]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'next-queue' into next
authorJames Morris <jmorris@namei.org>
Thu, 9 Feb 2012 06:02:34 +0000 (17:02 +1100)
committerJames Morris <jmorris@namei.org>
Thu, 9 Feb 2012 06:02:34 +0000 (17:02 +1100)
1502 files changed:
Documentation/DocBook/device-drivers.tmpl
Documentation/DocBook/deviceiobook.tmpl
Documentation/DocBook/media/dvb/dvbproperty.xml
Documentation/DocBook/media/v4l/vidioc-g-ext-ctrls.xml
Documentation/DocBook/media/v4l/vidioc-g-fbuf.xml
Documentation/DocBook/media/v4l/vidioc-g-frequency.xml
Documentation/DocBook/media/v4l/vidioc-g-input.xml
Documentation/DocBook/media/v4l/vidioc-g-output.xml
Documentation/acpi/apei/einj.txt
Documentation/coccinelle.txt
Documentation/devices.txt
Documentation/devicetree/bindings/dma/atmel-dma.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/omap-i2c.txt [new file with mode: 0644]
Documentation/dmaengine.txt
Documentation/driver-model/devres.txt
Documentation/feature-removal-schedule.txt
Documentation/hwmon/it87
Documentation/hwmon/lm63
Documentation/hwmon/sysfs-interface
Documentation/input/event-codes.txt
Documentation/ioctl/ioctl-number.txt
Documentation/kbuild/makefiles.txt
Documentation/kernel-parameters.txt
Documentation/pinctrl.txt
Documentation/power/basic-pm-debugging.txt
Documentation/power/freezing-of-tasks.txt
Documentation/scsi/ChangeLog.megaraid_sas
Documentation/scsi/LICENSE.qla4xxx
Documentation/stable_kernel_rules.txt
Documentation/sysctl/kernel.txt
Documentation/target/tcm_mod_builder.py
Documentation/thermal/sysfs-api.txt
Documentation/video4linux/v4l2-controls.txt
Documentation/virtual/00-INDEX
MAINTAINERS
Makefile
arch/arm/Kconfig
arch/arm/Makefile
arch/arm/boot/Makefile
arch/arm/common/gic.c
arch/arm/configs/imx_v6_v7_defconfig [moved from arch/arm/configs/mx5_defconfig with 80% similarity]
arch/arm/configs/mx3_defconfig [deleted file]
arch/arm/include/asm/assembler.h
arch/arm/include/asm/domain.h
arch/arm/include/asm/futex.h
arch/arm/include/asm/gpio.h
arch/arm/include/asm/kprobes.h
arch/arm/include/asm/memblock.h
arch/arm/include/asm/ptrace.h
arch/arm/include/asm/smp.h
arch/arm/include/asm/smp_plat.h
arch/arm/include/asm/thread_info.h
arch/arm/include/asm/tlb.h
arch/arm/include/asm/uaccess.h
arch/arm/include/asm/unified.h
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-common.S
arch/arm/kernel/head.S
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/setup.c
arch/arm/kernel/signal.c
arch/arm/kernel/smp.c
arch/arm/kernel/smp_twd.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/lib/getuser.S
arch/arm/lib/putuser.S
arch/arm/lib/uaccess.S
arch/arm/mach-at91/Kconfig
arch/arm/mach-at91/Makefile
arch/arm/mach-at91/at91cap9.c
arch/arm/mach-at91/at91sam9260.c
arch/arm/mach-at91/at91sam9261.c
arch/arm/mach-at91/at91sam9263.c
arch/arm/mach-at91/at91sam9_alt_reset.S
arch/arm/mach-at91/at91sam9g45.c
arch/arm/mach-at91/at91sam9g45_reset.S [new file with mode: 0644]
arch/arm/mach-at91/at91sam9rl.c
arch/arm/mach-at91/generic.h
arch/arm/mach-at91/include/mach/at91_rstc.h
arch/arm/mach-at91/include/mach/at91cap9.h
arch/arm/mach-at91/include/mach/at91cap9_ddrsdr.h [deleted file]
arch/arm/mach-at91/include/mach/at91sam9260.h
arch/arm/mach-at91/include/mach/at91sam9261.h
arch/arm/mach-at91/include/mach/at91sam9263.h
arch/arm/mach-at91/include/mach/at91sam9_ddrsdr.h
arch/arm/mach-at91/include/mach/at91sam9g45.h
arch/arm/mach-at91/include/mach/at91sam9rl.h
arch/arm/mach-at91/include/mach/board.h
arch/arm/mach-at91/pm.c
arch/arm/mach-at91/pm.h
arch/arm/mach-at91/pm_slowclock.S
arch/arm/mach-at91/setup.c
arch/arm/mach-bcmring/arch.c
arch/arm/mach-bcmring/dma.c
arch/arm/mach-bcmring/include/mach/dma.h
arch/arm/mach-davinci/board-da850-evm.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mach-davinci/board-dm644x-evm.c
arch/arm/mach-davinci/board-dm646x-evm.c
arch/arm/mach-davinci/board-neuros-osd2.c
arch/arm/mach-davinci/board-omapl138-hawk.c
arch/arm/mach-davinci/board-sffsdr.c
arch/arm/mach-davinci/da850.c
arch/arm/mach-ep93xx/include/mach/dma.h
arch/arm/mach-exynos/headsmp.S
arch/arm/mach-exynos/hotplug.c
arch/arm/mach-exynos/mach-origen.c
arch/arm/mach-exynos/platsmp.c
arch/arm/mach-highbank/highbank.c
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/Makefile
arch/arm/mach-imx/Makefile.boot
arch/arm/mach-imx/clock-imx6q.c
arch/arm/mach-imx/clock-mx51-mx53.c [moved from arch/arm/mach-mx5/clock-mx51-mx53.c with 99% similarity]
arch/arm/mach-imx/cpu-imx5.c [moved from arch/arm/mach-mx5/cpu.c with 100% similarity]
arch/arm/mach-imx/cpu_op-mx51.c [moved from arch/arm/mach-mx5/cpu_op-mx51.c with 100% similarity]
arch/arm/mach-imx/cpu_op-mx51.h [moved from arch/arm/mach-mx5/cpu_op-mx51.h with 100% similarity]
arch/arm/mach-imx/crm-regs-imx5.h [moved from arch/arm/mach-mx5/crm_regs.h with 100% similarity]
arch/arm/mach-imx/devices-imx50.h [moved from arch/arm/mach-mx5/devices-imx50.h with 100% similarity]
arch/arm/mach-imx/devices-imx51.h [moved from arch/arm/mach-mx5/devices-imx51.h with 100% similarity]
arch/arm/mach-imx/devices-imx53.h [moved from arch/arm/mach-mx5/devices-imx53.h with 100% similarity]
arch/arm/mach-imx/efika.h [moved from arch/arm/mach-mx5/efika.h with 100% similarity]
arch/arm/mach-imx/ehci-imx5.c [moved from arch/arm/mach-mx5/ehci.c with 100% similarity]
arch/arm/mach-imx/eukrea_mbimx51-baseboard.c [moved from arch/arm/mach-mx5/eukrea_mbimx51-baseboard.c with 100% similarity]
arch/arm/mach-imx/eukrea_mbimxsd-baseboard.c [moved from arch/arm/mach-mx5/eukrea_mbimxsd-baseboard.c with 100% similarity]
arch/arm/mach-imx/imx51-dt.c [moved from arch/arm/mach-mx5/imx51-dt.c with 100% similarity]
arch/arm/mach-imx/imx53-dt.c [moved from arch/arm/mach-mx5/imx53-dt.c with 100% similarity]
arch/arm/mach-imx/mach-cpuimx51.c [moved from arch/arm/mach-mx5/board-cpuimx51.c with 100% similarity]
arch/arm/mach-imx/mach-cpuimx51sd.c [moved from arch/arm/mach-mx5/board-cpuimx51sd.c with 100% similarity]
arch/arm/mach-imx/mach-mx31_3ds.c
arch/arm/mach-imx/mach-mx31moboard.c
arch/arm/mach-imx/mach-mx50_rdp.c [moved from arch/arm/mach-mx5/board-mx50_rdp.c with 100% similarity]
arch/arm/mach-imx/mach-mx51_3ds.c [moved from arch/arm/mach-mx5/board-mx51_3ds.c with 100% similarity]
arch/arm/mach-imx/mach-mx51_babbage.c [moved from arch/arm/mach-mx5/board-mx51_babbage.c with 100% similarity]
arch/arm/mach-imx/mach-mx51_efikamx.c [moved from arch/arm/mach-mx5/board-mx51_efikamx.c with 100% similarity]
arch/arm/mach-imx/mach-mx51_efikasb.c [moved from arch/arm/mach-mx5/board-mx51_efikasb.c with 100% similarity]
arch/arm/mach-imx/mach-mx53_ard.c [moved from arch/arm/mach-mx5/board-mx53_ard.c with 99% similarity]
arch/arm/mach-imx/mach-mx53_evk.c [moved from arch/arm/mach-mx5/board-mx53_evk.c with 99% similarity]
arch/arm/mach-imx/mach-mx53_loco.c [moved from arch/arm/mach-mx5/board-mx53_loco.c with 99% similarity]
arch/arm/mach-imx/mach-mx53_smd.c [moved from arch/arm/mach-mx5/board-mx53_smd.c with 99% similarity]
arch/arm/mach-imx/mach-pcm037.c
arch/arm/mach-imx/mm-imx5.c [moved from arch/arm/mach-mx5/mm.c with 100% similarity]
arch/arm/mach-imx/mx51_efika.c [moved from arch/arm/mach-mx5/mx51_efika.c with 100% similarity]
arch/arm/mach-imx/pm-imx5.c [moved from arch/arm/mach-mx5/system.c with 58% similarity]
arch/arm/mach-imx/src.c
arch/arm/mach-msm/headsmp.S
arch/arm/mach-msm/hotplug.c
arch/arm/mach-msm/platsmp.c
arch/arm/mach-msm/vreg.c
arch/arm/mach-mx5/Kconfig [deleted file]
arch/arm/mach-mx5/Makefile [deleted file]
arch/arm/mach-mx5/Makefile.boot [deleted file]
arch/arm/mach-mx5/pm-imx5.c [deleted file]
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/board-4430sdp.c
arch/arm/mach-omap2/board-omap4panda.c
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/display.c
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-omap2/hsmmc.c
arch/arm/mach-omap2/io.c
arch/arm/mach-omap2/omap-secure.c
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-omap2/omap_hwmod_2xxx_3xxx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
arch/arm/mach-omap2/omap_hwmod_44xx_data.c
arch/arm/mach-omap2/prm2xxx_3xxx.c
arch/arm/mach-omap2/smartreflex.c
arch/arm/mach-omap2/timer.c
arch/arm/mach-picoxcell/time.c
arch/arm/mach-pxa/devices.c
arch/arm/mach-pxa/pxa25x.c
arch/arm/mach-pxa/pxa27x.c
arch/arm/mach-pxa/pxa300.c
arch/arm/mach-pxa/pxa320.c
arch/arm/mach-pxa/pxa3xx.c
arch/arm/mach-pxa/pxa95x.c
arch/arm/mach-realview/hotplug.c
arch/arm/mach-realview/include/mach/board-eb.h
arch/arm/mach-realview/include/mach/board-pb11mp.h
arch/arm/mach-realview/platsmp.c
arch/arm/mach-realview/realview_eb.c
arch/arm/mach-realview/realview_pb11mp.c
arch/arm/mach-s3c64xx/include/mach/crag6410.h
arch/arm/mach-s3c64xx/mach-crag6410.c
arch/arm/mach-s3c64xx/pm.c
arch/arm/mach-sa1100/assabet.c
arch/arm/mach-sa1100/cerf.c
arch/arm/mach-sa1100/clock.c
arch/arm/mach-sa1100/collie.c
arch/arm/mach-sa1100/cpu-sa1100.c
arch/arm/mach-sa1100/generic.c
arch/arm/mach-sa1100/include/mach/mcp.h
arch/arm/mach-sa1100/jornada720_ssp.c
arch/arm/mach-sa1100/lart.c
arch/arm/mach-sa1100/shannon.c
arch/arm/mach-sa1100/simpad.c
arch/arm/mach-shmobile/pm-sh7372.c
arch/arm/mach-shmobile/setup-sh7372.c
arch/arm/mach-shmobile/smp-r8a7779.c
arch/arm/mach-shmobile/smp-sh73a0.c
arch/arm/mach-ux500/Kconfig
arch/arm/mach-ux500/board-mop500-sdi.c
arch/arm/mach-ux500/cache-l2x0.c
arch/arm/mach-ux500/headsmp.S
arch/arm/mach-ux500/hotplug.c
arch/arm/mach-ux500/platsmp.c
arch/arm/mach-ux500/usb.c
arch/arm/mach-vexpress/ct-ca9x4.c
arch/arm/mach-vexpress/hotplug.c
arch/arm/mach-vexpress/platsmp.c
arch/arm/mach-w90x900/clksel.c
arch/arm/mach-w90x900/cpu.c
arch/arm/mach-w90x900/dev.c
arch/arm/mach-w90x900/mfp.c
arch/arm/mm/Kconfig
arch/arm/mm/init.c
arch/arm/mm/proc-v7.S
arch/arm/plat-mxc/Kconfig
arch/arm/plat-mxc/include/mach/iomux-v1.h
arch/arm/plat-mxc/include/mach/mx3fb.h
arch/arm/plat-nomadik/include/plat/ste_dma40.h
arch/arm/plat-omap/devices.c
arch/arm/plat-s3c24xx/cpu.c
arch/arm/plat-samsung/dma-ops.c
arch/arm/plat-samsung/include/plat/dma-ops.h
arch/arm/plat-samsung/include/plat/dma.h
arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
arch/arm/plat-versatile/headsmp.S
arch/arm/plat-versatile/platsmp.c
arch/avr32/Kconfig
arch/ia64/include/asm/ptrace.h
arch/ia64/kernel/acpi.c
arch/ia64/kernel/ptrace.c
arch/m68k/atari/config.c
arch/m68k/include/asm/irq.h
arch/m68k/kernel/process_mm.c
arch/m68k/kernel/process_no.c
arch/m68k/kernel/traps.c
arch/m68k/mm/cache.c
arch/microblaze/Kconfig
arch/microblaze/boot/Makefile
arch/microblaze/include/asm/atomic.h
arch/microblaze/include/asm/ptrace.h
arch/microblaze/kernel/ptrace.c
arch/mips/Kconfig
arch/mips/include/asm/ptrace.h
arch/mips/kernel/ptrace.c
arch/mips/lib/iomap-pci.c
arch/openrisc/boot/Makefile
arch/powerpc/boot/Makefile
arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
arch/powerpc/boot/dts/p1020rdb.dtsi
arch/powerpc/boot/dts/p1021mds.dts
arch/powerpc/boot/dts/p2020ds.dtsi
arch/powerpc/boot/dts/p2020rdb.dts
arch/powerpc/include/asm/ptrace.h
arch/powerpc/kernel/crash.c
arch/powerpc/kernel/legacy_serial.c
arch/powerpc/kernel/ptrace.c
arch/powerpc/platforms/85xx/p1022_ds.c
arch/powerpc/platforms/powernv/pci-ioda.c
arch/powerpc/platforms/pseries/Kconfig
arch/powerpc/sysdev/fsl_pci.c
arch/s390/Makefile
arch/s390/include/asm/kexec.h
arch/s390/include/asm/ptrace.h
arch/s390/kernel/ptrace.c
arch/s390/kernel/vmlinux.lds.S
arch/score/kernel/entry.S
arch/sh/Kconfig
arch/sh/drivers/pci/pci.c
arch/sh/include/asm/ptrace_32.h
arch/sh/include/asm/ptrace_64.h
arch/sh/kernel/ptrace_32.c
arch/sh/kernel/ptrace_64.c
arch/sparc/Kconfig
arch/sparc/include/asm/ptrace.h
arch/sparc/kernel/ptrace_64.c
arch/sparc/kernel/sun4m_irq.c
arch/sparc/lib/divdi3.S
arch/um/Makefile
arch/um/kernel/ptrace.c
arch/x86/.gitignore
arch/x86/Kconfig
arch/x86/Makefile
arch/x86/boot/compressed/misc.c
arch/x86/ia32/Makefile
arch/x86/ia32/ia32entry.S
arch/x86/ia32/nosyscall.c [new file with mode: 0644]
arch/x86/ia32/syscall_ia32.c [new file with mode: 0644]
arch/x86/include/asm/Kbuild
arch/x86/include/asm/cmpxchg.h
arch/x86/include/asm/cpufeature.h
arch/x86/include/asm/ia32_unistd.h
arch/x86/include/asm/kvm_emulate.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/syscall.h
arch/x86/include/asm/unistd.h
arch/x86/include/asm/unistd_32.h [deleted file]
arch/x86/include/asm/unistd_64.h [deleted file]
arch/x86/include/asm/uv/uv_bau.h
arch/x86/include/asm/uv/uv_hub.h
arch/x86/kernel/Makefile
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/dumpstack.c
arch/x86/kernel/dumpstack_64.c
arch/x86/kernel/e820.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/entry_64.S
arch/x86/kernel/microcode_amd.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/reboot.c
arch/x86/kernel/syscall_32.c [new file with mode: 0644]
arch/x86/kernel/syscall_64.c
arch/x86/kernel/syscall_table_32.S [deleted file]
arch/x86/kernel/tsc.c
arch/x86/kernel/vm86_32.c
arch/x86/kvm/emulate.c
arch/x86/kvm/x86.c
arch/x86/lib/x86-opcode-map.txt
arch/x86/mm/fault.c
arch/x86/mm/srat.c
arch/x86/net/bpf_jit_comp.c
arch/x86/platform/uv/tlb_uv.c
arch/x86/platform/uv/uv_irq.c
arch/x86/syscalls/Makefile [new file with mode: 0644]
arch/x86/syscalls/syscall_32.tbl [new file with mode: 0644]
arch/x86/syscalls/syscall_64.tbl [new file with mode: 0644]
arch/x86/syscalls/syscallhdr.sh [new file with mode: 0644]
arch/x86/syscalls/syscalltbl.sh [new file with mode: 0644]
arch/x86/um/Makefile
arch/x86/um/shared/sysdep/ptrace.h
arch/x86/um/sys_call_table_32.S [deleted file]
arch/x86/um/sys_call_table_32.c [new file with mode: 0644]
arch/x86/um/sys_call_table_64.c
arch/x86/um/user-offsets.c
arch/x86/xen/spinlock.c
arch/xtensa/include/asm/string.h
arch/xtensa/kernel/ptrace.c
block/cfq-iosched.c
crypto/sha512_generic.c
drivers/acpi/Makefile
drivers/acpi/acpica/Makefile
drivers/acpi/acpica/accommon.h
drivers/acpi/acpica/acconfig.h
drivers/acpi/acpica/acdebug.h
drivers/acpi/acpica/acdispat.h
drivers/acpi/acpica/acevents.h
drivers/acpi/acpica/acglobal.h
drivers/acpi/acpica/achware.h
drivers/acpi/acpica/acinterp.h
drivers/acpi/acpica/aclocal.h
drivers/acpi/acpica/acmacros.h
drivers/acpi/acpica/acnamesp.h
drivers/acpi/acpica/acobject.h
drivers/acpi/acpica/acopcode.h
drivers/acpi/acpica/acparser.h
drivers/acpi/acpica/acpredef.h
drivers/acpi/acpica/acresrc.h
drivers/acpi/acpica/acstruct.h
drivers/acpi/acpica/actables.h
drivers/acpi/acpica/acutils.h
drivers/acpi/acpica/amlcode.h
drivers/acpi/acpica/amlresrc.h
drivers/acpi/acpica/dsargs.c
drivers/acpi/acpica/dscontrol.c
drivers/acpi/acpica/dsfield.c
drivers/acpi/acpica/dsinit.c
drivers/acpi/acpica/dsmethod.c
drivers/acpi/acpica/dsmthdat.c
drivers/acpi/acpica/dsobject.c
drivers/acpi/acpica/dsopcode.c
drivers/acpi/acpica/dsutils.c
drivers/acpi/acpica/dswexec.c
drivers/acpi/acpica/dswload.c
drivers/acpi/acpica/dswload2.c
drivers/acpi/acpica/dswscope.c
drivers/acpi/acpica/dswstate.c
drivers/acpi/acpica/evevent.c
drivers/acpi/acpica/evglock.c
drivers/acpi/acpica/evgpe.c
drivers/acpi/acpica/evgpeblk.c
drivers/acpi/acpica/evgpeinit.c
drivers/acpi/acpica/evgpeutil.c
drivers/acpi/acpica/evmisc.c
drivers/acpi/acpica/evregion.c
drivers/acpi/acpica/evrgnini.c
drivers/acpi/acpica/evsci.c
drivers/acpi/acpica/evxface.c
drivers/acpi/acpica/evxfevnt.c
drivers/acpi/acpica/evxfgpe.c
drivers/acpi/acpica/evxfregn.c
drivers/acpi/acpica/exconfig.c
drivers/acpi/acpica/exconvrt.c
drivers/acpi/acpica/excreate.c
drivers/acpi/acpica/exdebug.c
drivers/acpi/acpica/exdump.c
drivers/acpi/acpica/exfield.c
drivers/acpi/acpica/exfldio.c
drivers/acpi/acpica/exmisc.c
drivers/acpi/acpica/exmutex.c
drivers/acpi/acpica/exnames.c
drivers/acpi/acpica/exoparg1.c
drivers/acpi/acpica/exoparg2.c
drivers/acpi/acpica/exoparg3.c
drivers/acpi/acpica/exoparg6.c
drivers/acpi/acpica/exprep.c
drivers/acpi/acpica/exregion.c
drivers/acpi/acpica/exresnte.c
drivers/acpi/acpica/exresolv.c
drivers/acpi/acpica/exresop.c
drivers/acpi/acpica/exstore.c
drivers/acpi/acpica/exstoren.c
drivers/acpi/acpica/exstorob.c
drivers/acpi/acpica/exsystem.c
drivers/acpi/acpica/exutils.c
drivers/acpi/acpica/hwacpi.c
drivers/acpi/acpica/hwgpe.c
drivers/acpi/acpica/hwpci.c
drivers/acpi/acpica/hwregs.c
drivers/acpi/acpica/hwsleep.c
drivers/acpi/acpica/hwtimer.c
drivers/acpi/acpica/hwvalid.c
drivers/acpi/acpica/hwxface.c
drivers/acpi/acpica/nsaccess.c
drivers/acpi/acpica/nsalloc.c
drivers/acpi/acpica/nsdump.c
drivers/acpi/acpica/nsdumpdv.c
drivers/acpi/acpica/nseval.c
drivers/acpi/acpica/nsinit.c
drivers/acpi/acpica/nsload.c
drivers/acpi/acpica/nsnames.c
drivers/acpi/acpica/nsobject.c
drivers/acpi/acpica/nsparse.c
drivers/acpi/acpica/nspredef.c
drivers/acpi/acpica/nsrepair.c
drivers/acpi/acpica/nsrepair2.c
drivers/acpi/acpica/nssearch.c
drivers/acpi/acpica/nsutils.c
drivers/acpi/acpica/nswalk.c
drivers/acpi/acpica/nsxfeval.c
drivers/acpi/acpica/nsxfname.c
drivers/acpi/acpica/nsxfobj.c
drivers/acpi/acpica/psargs.c
drivers/acpi/acpica/psloop.c
drivers/acpi/acpica/psopcode.c
drivers/acpi/acpica/psparse.c
drivers/acpi/acpica/psscope.c
drivers/acpi/acpica/pstree.c
drivers/acpi/acpica/psutils.c
drivers/acpi/acpica/pswalk.c
drivers/acpi/acpica/psxface.c
drivers/acpi/acpica/rsaddr.c
drivers/acpi/acpica/rscalc.c
drivers/acpi/acpica/rscreate.c
drivers/acpi/acpica/rsdump.c
drivers/acpi/acpica/rsinfo.c
drivers/acpi/acpica/rsio.c
drivers/acpi/acpica/rsirq.c
drivers/acpi/acpica/rslist.c
drivers/acpi/acpica/rsmemory.c
drivers/acpi/acpica/rsmisc.c
drivers/acpi/acpica/rsserial.c [new file with mode: 0644]
drivers/acpi/acpica/rsutils.c
drivers/acpi/acpica/rsxface.c
drivers/acpi/acpica/tbfadt.c
drivers/acpi/acpica/tbfind.c
drivers/acpi/acpica/tbinstal.c
drivers/acpi/acpica/tbutils.c
drivers/acpi/acpica/tbxface.c
drivers/acpi/acpica/tbxfroot.c
drivers/acpi/acpica/utaddress.c [new file with mode: 0644]
drivers/acpi/acpica/utalloc.c
drivers/acpi/acpica/utcopy.c
drivers/acpi/acpica/utdebug.c
drivers/acpi/acpica/utdecode.c
drivers/acpi/acpica/utdelete.c
drivers/acpi/acpica/uteval.c
drivers/acpi/acpica/utglobal.c
drivers/acpi/acpica/utids.c
drivers/acpi/acpica/utinit.c
drivers/acpi/acpica/utlock.c
drivers/acpi/acpica/utmath.c
drivers/acpi/acpica/utmisc.c
drivers/acpi/acpica/utmutex.c
drivers/acpi/acpica/utobject.c
drivers/acpi/acpica/utosi.c
drivers/acpi/acpica/utresrc.c
drivers/acpi/acpica/utstate.c
drivers/acpi/acpica/utxface.c
drivers/acpi/acpica/utxferror.c
drivers/acpi/acpica/utxfmutex.c [new file with mode: 0644]
drivers/acpi/apei/apei-base.c
drivers/acpi/apei/apei-internal.h
drivers/acpi/apei/einj.c
drivers/acpi/apei/erst.c
drivers/acpi/apei/ghes.c
drivers/acpi/apei/hest.c
drivers/acpi/atomicio.c [deleted file]
drivers/acpi/numa.c
drivers/acpi/nvs.c
drivers/acpi/osl.c
drivers/acpi/processor_core.c
drivers/acpi/processor_driver.c
drivers/acpi/sleep.c
drivers/ata/ata_piix.c
drivers/ata/libata-core.c
drivers/ata/libata-transport.c
drivers/ata/pata_bf54x.c
drivers/ata/sata_fsl.c
drivers/base/Makefile
drivers/base/bus.c
drivers/base/core.c
drivers/base/firmware_class.c
drivers/base/power/domain.c
drivers/base/power/domain_governor.c
drivers/base/regmap/regmap.c
drivers/base/sys.c [deleted file]
drivers/bcma/bcma_private.h
drivers/bcma/host_pci.c
drivers/bcma/main.c
drivers/block/Kconfig
drivers/block/Makefile
drivers/block/nvme.c [new file with mode: 0644]
drivers/block/rbd.c
drivers/char/agp/backend.c
drivers/char/random.c
drivers/char/tpm/tpm.c
drivers/char/tpm/tpm.h
drivers/dma/Kconfig
drivers/dma/Makefile
drivers/dma/amba-pl08x.c
drivers/dma/at_hdmac.c
drivers/dma/at_hdmac_regs.h
drivers/dma/coh901318.c
drivers/dma/coh901318_lli.c
drivers/dma/coh901318_lli.h
drivers/dma/dmaengine.c
drivers/dma/dmatest.c
drivers/dma/dw_dmac.c
drivers/dma/dw_dmac_regs.h
drivers/dma/ep93xx_dma.c
drivers/dma/fsldma.c
drivers/dma/imx-dma.c
drivers/dma/imx-sdma.c
drivers/dma/intel_mid_dma.c
drivers/dma/intel_mid_dma_regs.h
drivers/dma/iop-adma.c
drivers/dma/ipu/ipu_idmac.c
drivers/dma/mpc512x_dma.c
drivers/dma/mxs-dma.c
drivers/dma/pch_dma.c
drivers/dma/pl330.c
drivers/dma/shdma.c
drivers/dma/sirf-dma.c [new file with mode: 0644]
drivers/dma/ste_dma40.c
drivers/dma/ste_dma40_ll.h
drivers/dma/timb_dma.c
drivers/dma/txx9dmac.c
drivers/firewire/ohci.c
drivers/gpio/Kconfig
drivers/gpio/gpio-lpc32xx.c
drivers/gpio/gpio-ml-ioh.c
drivers/gpio/gpio-pch.c
drivers/gpio/gpio-samsung.c
drivers/gpio/gpio-tps65910.c
drivers/gpu/drm/drm_auth.c
drivers/gpu/drm/drm_fops.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/exynos/Kconfig
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/gma500/framebuffer.c
drivers/gpu/drm/gma500/gtt.c
drivers/gpu/drm/i810/i810_dma.c
drivers/gpu/drm/i810/i810_drv.c
drivers/gpu/drm/i810/i810_drv.h
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_bios.h
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_tv.c
drivers/gpu/drm/nouveau/nouveau_bios.h
drivers/gpu/drm/nouveau/nouveau_bo.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drv.c
drivers/gpu/drm/nouveau/nouveau_gem.c
drivers/gpu/drm/nouveau/nouveau_mxm.c
drivers/gpu/drm/nouveau/nv50_pm.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreend.h
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/r600_blit_kms.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_bios.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_encoders.c
drivers/gpu/drm/radeon/radeon_i2c.c
drivers/gpu/drm/radeon/radeon_irq_kms.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_ring.c
drivers/gpu/drm/sis/sis_drv.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
drivers/hid/hid-hyperv.c
drivers/hid/hid-wacom.c
drivers/hid/hid-wiimote-core.c
drivers/hid/usbhid/hiddev.c
drivers/hwmon/Kconfig
drivers/hwmon/adm1031.c
drivers/hwmon/coretemp.c
drivers/hwmon/f71805f.c
drivers/hwmon/it87.c
drivers/hwmon/lm63.c
drivers/hwmon/lm90.c
drivers/hwmon/max1111.c
drivers/hwmon/sht15.c
drivers/hwmon/w83627ehf.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/i2c-eg20t.c
drivers/i2c/busses/i2c-omap.c
drivers/idle/intel_idle.c
drivers/infiniband/Kconfig
drivers/infiniband/Makefile
drivers/infiniband/core/ucma.c
drivers/infiniband/core/uverbs_cmd.c
drivers/infiniband/core/verbs.c
drivers/infiniband/hw/ipath/ipath_fs.c
drivers/infiniband/hw/mlx4/mad.c
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/nes/nes.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_cm.h
drivers/infiniband/hw/nes/nes_context.h
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_mgt.c
drivers/infiniband/hw/nes/nes_mgt.h
drivers/infiniband/hw/nes/nes_nic.c
drivers/infiniband/hw/nes/nes_user.h
drivers/infiniband/hw/nes/nes_utils.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/nes/nes_verbs.h
drivers/infiniband/hw/qib/qib_iba6120.c
drivers/infiniband/hw/qib/qib_pcie.c
drivers/infiniband/ulp/srpt/Kconfig [new file with mode: 0644]
drivers/infiniband/ulp/srpt/Makefile [new file with mode: 0644]
drivers/infiniband/ulp/srpt/ib_dm_mad.h [new file with mode: 0644]
drivers/infiniband/ulp/srpt/ib_srpt.c [new file with mode: 0644]
drivers/infiniband/ulp/srpt/ib_srpt.h [new file with mode: 0644]
drivers/input/evdev.c
drivers/input/keyboard/twl4030_keypad.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/serio/serio_raw.c
drivers/iommu/amd_iommu.c
drivers/iommu/msm_iommu.c
drivers/leds/Kconfig
drivers/leds/Makefile
drivers/leds/leds-lm3530.c
drivers/leds/leds-ot200.c [new file with mode: 0644]
drivers/md/dm-raid.c
drivers/md/md.c
drivers/media/common/tuners/tuner-xc2028.c
drivers/media/common/tuners/xc4000.c
drivers/media/dvb/dvb-core/dvb_frontend.c
drivers/media/dvb/dvb-usb/anysee.c
drivers/media/dvb/dvb-usb/cinergyT2-fe.c
drivers/media/dvb/dvb-usb/dib0700.h
drivers/media/dvb/dvb-usb/dib0700_core.c
drivers/media/dvb/dvb-usb/dib0700_devices.c
drivers/media/dvb/frontends/cxd2820r.h
drivers/media/dvb/frontends/cxd2820r_core.c
drivers/media/dvb/frontends/ds3000.c
drivers/media/dvb/frontends/mb86a20s.c
drivers/media/dvb/frontends/tda18271c2dd.c
drivers/media/video/as3645a.c
drivers/media/video/atmel-isi.c
drivers/media/video/cx18/cx18-fileops.c
drivers/media/video/cx231xx/cx231xx-cards.c
drivers/media/video/cx23885/cx23885-cards.c
drivers/media/video/cx23885/cx23885-dvb.c
drivers/media/video/cx23885/cx23885-video.c
drivers/media/video/cx88/cx88-cards.c
drivers/media/video/em28xx/em28xx-dvb.c
drivers/media/video/ivtv/ivtv-driver.c
drivers/media/video/ivtv/ivtv-driver.h
drivers/media/video/ivtv/ivtv-fileops.c
drivers/media/video/ivtv/ivtv-ioctl.c
drivers/media/video/ivtv/ivtv-irq.c
drivers/media/video/ivtv/ivtv-streams.c
drivers/media/video/ivtv/ivtv-yuv.c
drivers/media/video/mx3_camera.c
drivers/media/video/omap/omap_vout.c
drivers/media/video/pwc/pwc-ctrl.c
drivers/media/video/pwc/pwc-dec1.c
drivers/media/video/pwc/pwc-dec1.h
drivers/media/video/pwc/pwc-dec23.c
drivers/media/video/pwc/pwc-dec23.h
drivers/media/video/pwc/pwc-if.c
drivers/media/video/pwc/pwc-misc.c
drivers/media/video/pwc/pwc-v4l.c
drivers/media/video/pwc/pwc.h
drivers/media/video/s5p-fimc/fimc-capture.c
drivers/media/video/s5p-fimc/fimc-core.c
drivers/media/video/s5p-fimc/fimc-mdevice.c
drivers/media/video/s5p-g2d/g2d.c
drivers/media/video/s5p-jpeg/jpeg-core.c
drivers/media/video/s5p-mfc/s5p_mfc.c
drivers/media/video/s5p-mfc/s5p_mfc_dec.c
drivers/media/video/saa7164/saa7164-cards.c
drivers/media/video/timblogiw.c
drivers/media/video/tlg2300/pd-main.c
drivers/media/video/v4l2-ctrls.c
drivers/media/video/v4l2-ioctl.c
drivers/media/video/zoran/zoran_driver.c
drivers/mfd/mcp-core.c
drivers/mfd/mcp-sa11x0.c
drivers/mfd/twl6040-core.c
drivers/mfd/ucb1x00-core.c
drivers/mfd/ucb1x00-ts.c
drivers/misc/carma/carma-fpga-program.c
drivers/misc/lkdtm.c
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/mmci.c
drivers/mmc/host/mxcmmc.c
drivers/mmc/host/mxs-mmc.c
drivers/mmc/host/sh_mmcif.c
drivers/mmc/host/tmio_mmc_dma.c
drivers/mtd/mtdcore.c
drivers/mtd/nand/atmel_nand.c
drivers/mtd/nand/gpmi-nand/gpmi-lib.c
drivers/mtd/nand/nand_base.c
drivers/net/bonding/bond_alb.c
drivers/net/dsa/mv88e6060.c
drivers/net/dsa/mv88e6123_61_65.c
drivers/net/dsa/mv88e6131.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/faraday/ftgmac100.c
drivers/net/ethernet/faraday/ftmac100.c
drivers/net/ethernet/ibm/ehea/ehea_ethtool.c
drivers/net/ethernet/ibm/ehea/ehea_main.c
drivers/net/ethernet/ibm/ehea/ehea_qmr.c
drivers/net/ethernet/intel/igb/Makefile
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_82575.h
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/e1000_mac.c
drivers/net/ethernet/intel/igb/e1000_mac.h
drivers/net/ethernet/intel/igb/e1000_mbx.c
drivers/net/ethernet/intel/igb/e1000_mbx.h
drivers/net/ethernet/intel/igb/e1000_nvm.c
drivers/net/ethernet/intel/igb/e1000_nvm.h
drivers/net/ethernet/intel/igb/e1000_phy.c
drivers/net/ethernet/intel/igb/e1000_phy.h
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb.h
drivers/net/ethernet/intel/igb/igb_ethtool.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igbvf/ethtool.c
drivers/net/ethernet/intel/igbvf/netdev.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbevf/ethtool.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf.h
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/intel/ixgbevf/mbx.c
drivers/net/ethernet/intel/ixgbevf/vf.c
drivers/net/ethernet/intel/ixgbevf/vf.h
drivers/net/ethernet/marvell/mv643xx_eth.c
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/fw.c
drivers/net/ethernet/mellanox/mlx4/fw.h
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/mr.c
drivers/net/ethernet/mellanox/mlx4/pd.c
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx4/profile.c
drivers/net/ethernet/mellanox/mlx4/qp.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/mellanox/mlx4/srq.c
drivers/net/ethernet/micrel/ks8842.c
drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/stmicro/stmmac/stmmac_mdio.c
drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macvlan.c
drivers/net/phy/mdio_bus.c
drivers/net/team/team.c
drivers/net/wireless/ath/ath9k/ar9003_mac.c
drivers/net/wireless/ath/ath9k/mac.c
drivers/net/wireless/b43/Kconfig
drivers/net/wireless/b43/main.c
drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
drivers/net/wireless/brcm80211/brcmsmac/mac80211_if.c
drivers/net/wireless/brcm80211/brcmsmac/main.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/iwlwifi/iwl-scan.c
drivers/net/wireless/iwlwifi/iwl-trans-pcie-rx.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/rt2x00/rt2800pci.c
drivers/net/xen-netfront.c
drivers/pci/pci.c
drivers/pcmcia/ds.c
drivers/pcmcia/sa1111_generic.c
drivers/pinctrl/core.c
drivers/pinctrl/core.h
drivers/pinctrl/pinconf.c
drivers/pinctrl/pinconf.h
drivers/pinctrl/pinmux.c
drivers/pinctrl/pinmux.h
drivers/regulator/core.c
drivers/regulator/of_regulator.c
drivers/rtc/Kconfig
drivers/rtc/rtc-sa1100.c
drivers/s390/block/dasd.c
drivers/s390/block/dasd_alias.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_int.h
drivers/scsi/Kconfig
drivers/scsi/bfa/bfa_defs_svc.h
drivers/scsi/bfa/bfa_fc.h
drivers/scsi/bfa/bfa_fcpim.c
drivers/scsi/bfa/bfa_fcpim.h
drivers/scsi/bfa/bfa_svc.h
drivers/scsi/bfa/bfad.c
drivers/scsi/bfa/bfad_attr.c
drivers/scsi/bfa/bfad_bsg.c
drivers/scsi/bfa/bfad_drv.h
drivers/scsi/bfa/bfad_im.c
drivers/scsi/bfa/bfad_im.h
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/device_handler/scsi_dh_alua.c
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe.h
drivers/scsi/hpsa.c
drivers/scsi/isci/firmware/Makefile [deleted file]
drivers/scsi/isci/firmware/README [deleted file]
drivers/scsi/isci/firmware/create_fw.c [deleted file]
drivers/scsi/isci/firmware/create_fw.h [deleted file]
drivers/scsi/isci/host.c
drivers/scsi/isci/host.h
drivers/scsi/isci/init.c
drivers/scsi/isci/isci.h
drivers/scsi/isci/phy.c
drivers/scsi/isci/port.c
drivers/scsi/isci/port.h
drivers/scsi/isci/port_config.c
drivers/scsi/isci/probe_roms.c
drivers/scsi/isci/probe_roms.h
drivers/scsi/isci/remote_device.c
drivers/scsi/isci/task.c
drivers/scsi/isci/task.h
drivers/scsi/libfc/fc_disc.c
drivers/scsi/libfc/fc_elsct.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_lport.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/mac_esp.c
drivers/scsi/mac_scsi.c
drivers/scsi/megaraid.c
drivers/scsi/megaraid/megaraid_sas.h
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/megaraid/megaraid_sas_fp.c
drivers/scsi/qla4xxx/ql4_def.h
drivers/scsi/qla4xxx/ql4_init.c
drivers/scsi/qla4xxx/ql4_mbx.c
drivers/scsi/qla4xxx/ql4_nx.c
drivers/scsi/qla4xxx/ql4_nx.h
drivers/scsi/qla4xxx/ql4_os.c
drivers/scsi/qla4xxx/ql4_version.h
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/sg.c
drivers/scsi/sym53c8xx_2/sym_glue.c
drivers/spi/Kconfig
drivers/spi/spi-dw-mid.c
drivers/spi/spi-ep93xx.c
drivers/spi/spi-pl022.c
drivers/spi/spi-topcliff-pch.c
drivers/staging/media/go7007/go7007-usb.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_configfs.c
drivers/target/iscsi/iscsi_target_core.h
drivers/target/iscsi/iscsi_target_device.c
drivers/target/iscsi/iscsi_target_erl0.c
drivers/target/iscsi/iscsi_target_erl1.c
drivers/target/iscsi/iscsi_target_erl2.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_nego.c
drivers/target/iscsi/iscsi_target_nodeattrib.c
drivers/target/iscsi/iscsi_target_stat.c
drivers/target/iscsi/iscsi_target_tmr.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/loopback/tcm_loop.c
drivers/target/loopback/tcm_loop.h
drivers/target/target_core_alua.c
drivers/target/target_core_cdb.c
drivers/target/target_core_cdb.h [deleted file]
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_fabric_configfs.c
drivers/target/target_core_fabric_lib.c
drivers/target/target_core_file.c
drivers/target/target_core_hba.c
drivers/target/target_core_hba.h [deleted file]
drivers/target/target_core_iblock.c
drivers/target/target_core_internal.h [new file with mode: 0644]
drivers/target/target_core_pr.c
drivers/target/target_core_pr.h
drivers/target/target_core_pscsi.c
drivers/target/target_core_rd.c
drivers/target/target_core_stat.c
drivers/target/target_core_stat.h [deleted file]
drivers/target/target_core_tmr.c
drivers/target/target_core_tpg.c
drivers/target/target_core_transport.c
drivers/target/target_core_ua.c
drivers/target/tcm_fc/tfc_cmd.c
drivers/target/tcm_fc/tfc_conf.c
drivers/target/tcm_fc/tfc_io.c
drivers/target/tcm_fc/tfc_sess.c
drivers/thermal/thermal_sys.c
drivers/tty/serial/8250/8250.c [moved from drivers/tty/serial/8250.c with 100% similarity]
drivers/tty/serial/8250/8250.h [moved from drivers/tty/serial/8250.h with 100% similarity]
drivers/tty/serial/8250/8250_accent.c [moved from drivers/tty/serial/8250_accent.c with 100% similarity]
drivers/tty/serial/8250/8250_acorn.c [moved from drivers/tty/serial/8250_acorn.c with 100% similarity]
drivers/tty/serial/8250/8250_boca.c [moved from drivers/tty/serial/8250_boca.c with 100% similarity]
drivers/tty/serial/8250/8250_dw.c [moved from drivers/tty/serial/8250_dw.c with 100% similarity]
drivers/tty/serial/8250/8250_early.c [moved from drivers/tty/serial/8250_early.c with 100% similarity]
drivers/tty/serial/8250/8250_exar_st16c554.c [moved from drivers/tty/serial/8250_exar_st16c554.c with 100% similarity]
drivers/tty/serial/8250/8250_fourport.c [moved from drivers/tty/serial/8250_fourport.c with 100% similarity]
drivers/tty/serial/8250/8250_fsl.c [moved from drivers/tty/serial/8250_fsl.c with 100% similarity]
drivers/tty/serial/8250/8250_gsc.c [moved from drivers/tty/serial/8250_gsc.c with 100% similarity]
drivers/tty/serial/8250/8250_hp300.c [moved from drivers/tty/serial/8250_hp300.c with 100% similarity]
drivers/tty/serial/8250/8250_hub6.c [moved from drivers/tty/serial/8250_hub6.c with 100% similarity]
drivers/tty/serial/8250/8250_mca.c [moved from drivers/tty/serial/8250_mca.c with 100% similarity]
drivers/tty/serial/8250/8250_pci.c [moved from drivers/tty/serial/8250_pci.c with 100% similarity]
drivers/tty/serial/8250/8250_pnp.c [moved from drivers/tty/serial/8250_pnp.c with 100% similarity]
drivers/tty/serial/8250/Kconfig [new file with mode: 0644]
drivers/tty/serial/8250/Makefile [new file with mode: 0644]
drivers/tty/serial/8250/m32r_sio.c [moved from drivers/tty/serial/m32r_sio.c with 100% similarity]
drivers/tty/serial/8250/m32r_sio.h [moved from drivers/tty/serial/m32r_sio.h with 100% similarity]
drivers/tty/serial/8250/m32r_sio_reg.h [moved from drivers/tty/serial/m32r_sio_reg.h with 100% similarity]
drivers/tty/serial/8250/serial_cs.c [moved from drivers/tty/serial/serial_cs.c with 100% similarity]
drivers/tty/serial/Kconfig
drivers/tty/serial/Makefile
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/jsm/jsm_driver.c
drivers/tty/serial/max3107-aava.c [deleted file]
drivers/tty/serial/omap-serial.c
drivers/tty/serial/pch_uart.c
drivers/tty/serial/serial_core.c
drivers/tty/serial/sh-sci.c
drivers/tty/tty_port.c
drivers/tty/vt/vt_ioctl.c
drivers/usb/class/cdc-wdm.c
drivers/usb/dwc3/ep0.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/composite.c
drivers/usb/gadget/epautoconf.c
drivers/usb/gadget/f_mass_storage.c
drivers/usb/gadget/fsl_udc_core.c
drivers/usb/gadget/langwell_udc.c
drivers/usb/gadget/langwell_udc.h
drivers/usb/gadget/storage_common.c
drivers/usb/host/ehci-fsl.c
drivers/usb/host/ehci-pci.c
drivers/usb/host/ehci-xilinx-of.c
drivers/usb/host/ohci-at91.c
drivers/usb/host/ohci-dbg.c
drivers/usb/host/ohci-pci.c
drivers/usb/host/xhci-ring.c
drivers/usb/misc/emi26.c
drivers/usb/misc/emi62.c
drivers/usb/misc/usbsevseg.c
drivers/usb/musb/davinci.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/omap2430.c
drivers/usb/musb/ux500_dma.c
drivers/usb/otg/Kconfig
drivers/usb/otg/Makefile
drivers/usb/otg/langwell_otg.c [deleted file]
drivers/usb/otg/mv_otg.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/renesas_usbhs/mod_gadget.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/io_ti.c
drivers/usb/serial/kobil_sct.c
drivers/usb/serial/option.c
drivers/usb/serial/qcaux.c
drivers/usb/storage/realtek_cr.c
drivers/usb/usb-skeleton.c
drivers/usb/wusbcore/Kconfig
drivers/vhost/net.c
drivers/video/atmel_lcdfb.c
drivers/video/backlight/adp8860_bl.c
drivers/video/backlight/adp8870_bl.c
drivers/video/backlight/l4f00242t03.c
drivers/video/fsl-diu-fb.c
drivers/video/intelfb/intelfbdrv.c
drivers/video/macfb.c
drivers/video/mx3fb.c
drivers/video/omap2/dss/dispc.c
drivers/video/omap2/dss/dsi.c
drivers/video/omap2/dss/dss.c
drivers/video/omap2/dss/hdmi.c
drivers/video/omap2/dss/rfbi.c
drivers/video/omap2/dss/ti_hdmi.h
drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
drivers/video/omap2/dss/venc.c
drivers/virtio/virtio_ring.c
drivers/watchdog/dw_wdt.c
drivers/watchdog/iTCO_wdt.c
drivers/watchdog/imx2_wdt.c
drivers/watchdog/nuc900_wdt.c
drivers/watchdog/omap_wdt.c
drivers/watchdog/pnx4008_wdt.c
drivers/watchdog/stmp3xxx_wdt.c
drivers/watchdog/via_wdt.c
drivers/watchdog/wafer5823wdt.c
drivers/watchdog/wm8350_wdt.c
drivers/xen/biomerge.c
drivers/xen/grant-table.c
drivers/xen/xen-balloon.c
firmware/Makefile
firmware/isci/isci_firmware.bin.ihex [deleted file]
fs/btrfs/Kconfig
fs/btrfs/Makefile
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/btrfs_inode.h
fs/btrfs/check-integrity.c [new file with mode: 0644]
fs/btrfs/check-integrity.h [new file with mode: 0644]
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-inode.c
fs/btrfs/delayed-ref.c
fs/btrfs/delayed-ref.h
fs/btrfs/disk-io.c
fs/btrfs/disk-io.h
fs/btrfs/export.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/extent_io.h
fs/btrfs/file.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode-map.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ioctl.h
fs/btrfs/locking.c
fs/btrfs/relocation.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/transaction.c
fs/btrfs/tree-log.c
fs/btrfs/ulist.c [new file with mode: 0644]
fs/btrfs/ulist.h [new file with mode: 0644]
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/btrfs/xattr.c
fs/ceph/caps.c
fs/ceph/dir.c
fs/ceph/mds_client.c
fs/ceph/mds_client.h
fs/ceph/xattr.c
fs/cifs/Kconfig
fs/cifs/cifs_debug.c
fs/cifs/cifs_spnego.c
fs/cifs/cifs_unicode.c
fs/cifs/cifs_unicode.h
fs/cifs/cifsacl.c
fs/cifs/cifsencrypt.c
fs/cifs/cifsglob.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/readdir.c
fs/cifs/sess.c
fs/cifs/smbencrypt.c
fs/debugfs/file.c
fs/ecryptfs/crypto.c
fs/ecryptfs/ecryptfs_kernel.h
fs/ecryptfs/inode.c
fs/ecryptfs/keystore.c
fs/ecryptfs/miscdev.c
fs/ecryptfs/mmap.c
fs/ecryptfs/read_write.c
fs/exec.c
fs/ext2/ioctl.c
fs/inode.c
fs/jbd/checkpoint.c
fs/jbd/recovery.c
fs/jffs2/erase.c
fs/logfs/dev_mtd.c
fs/logfs/dir.c
fs/logfs/file.c
fs/logfs/gc.c
fs/logfs/inode.c
fs/logfs/journal.c
fs/logfs/logfs.h
fs/logfs/readwrite.c
fs/logfs/segment.c
fs/logfs/super.c
fs/namei.c
fs/nfs/blocklayout/blocklayout.c
fs/nfs/blocklayout/blocklayout.h
fs/nfs/blocklayout/extents.c
fs/nfs/callback.h
fs/nfs/callback_xdr.c
fs/nfs/nfs4filelayoutdev.c
fs/nfs/nfs4proc.c
fs/nilfs2/ioctl.c
fs/proc/base.c
fs/proc/stat.c
fs/proc/task_mmu.c
fs/qnx4/inode.c
fs/quota/dquot.c
fs/super.c
fs/sysfs/file.c
fs/sysfs/inode.c
fs/xfs/xfs_aops.c
fs/xfs/xfs_attr.c
fs/xfs/xfs_attr_leaf.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_dfrag.c
fs/xfs/xfs_file.c
fs/xfs/xfs_fs_subr.c
fs/xfs/xfs_iget.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_inode.h
fs/xfs/xfs_inode_item.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_qm_syscalls.c
fs/xfs/xfs_super.c
fs/xfs/xfs_sync.c
fs/xfs/xfs_trace.h
fs/xfs/xfs_vnodeops.c
include/acpi/acnames.h
include/acpi/acpi_numa.h
include/acpi/acpiosxf.h
include/acpi/acpixf.h
include/acpi/acrestyp.h
include/acpi/actbl.h
include/acpi/actbl1.h
include/acpi/actbl3.h [new file with mode: 0644]
include/acpi/actypes.h
include/acpi/atomicio.h [deleted file]
include/acpi/processor.h
include/asm-generic/pci_iomap.h
include/drm/drmP.h
include/keys/user-type.h
include/linux/acpi.h
include/linux/acpi_io.h
include/linux/amba/pl08x.h
include/linux/audit.h
include/linux/bcma/bcma.h
include/linux/binfmts.h
include/linux/cpuidle.h
include/linux/device.h
include/linux/digsig.h
include/linux/dmaengine.h
include/linux/dw_dmac.h
include/linux/freezer.h
include/linux/fs.h
include/linux/gpio_keys.h
include/linux/if_team.h
include/linux/kexec.h
include/linux/key.h
include/linux/kref.h
include/linux/lp8727.h [changed mode: 0755->0644]
include/linux/mfd/mcp.h
include/linux/mfd/twl6040.h
include/linux/mfd/ucb1x00.h
include/linux/migrate.h
include/linux/migrate_mode.h [new file with mode: 0644]
include/linux/miscdevice.h
include/linux/mlx4/device.h
include/linux/mod_devicetable.h
include/linux/mpi.h
include/linux/mtd/gpmi-nand.h [new file with mode: 0644]
include/linux/mtd/mtd.h
include/linux/netfilter/nf_conntrack_common.h
include/linux/netfilter/xt_CT.h
include/linux/nvme.h [new file with mode: 0644]
include/linux/perf_event.h
include/linux/pm_qos.h
include/linux/ptrace.h
include/linux/quota.h
include/linux/res_counter.h
include/linux/sched.h
include/linux/sh_dma.h
include/linux/shmem_fs.h
include/linux/sirfsoc_dma.h [new file with mode: 0644]
include/linux/snmp.h
include/linux/suspend.h
include/linux/swap.h
include/linux/sysdev.h [deleted file]
include/linux/thermal.h
include/linux/tty_driver.h
include/linux/usb.h
include/linux/usb/langwell_otg.h [deleted file]
include/media/tuner.h
include/net/bluetooth/hci.h
include/net/cfg80211.h
include/net/flow.h
include/net/netns/generic.h
include/net/netprio_cgroup.h
include/net/sock.h
include/net/tcp.h
include/scsi/libfc.h
include/sound/core.h
include/target/target_core_backend.h [new file with mode: 0644]
include/target/target_core_base.h
include/target/target_core_device.h [deleted file]
include/target/target_core_fabric.h [moved from include/target/target_core_fabric_ops.h with 50% similarity]
include/target/target_core_fabric_lib.h [deleted file]
include/target/target_core_tmr.h [deleted file]
include/target/target_core_tpg.h [deleted file]
include/target/target_core_transport.h [deleted file]
include/trace/events/btrfs.h
include/video/omapdss.h
init/Kconfig
ipc/mqueue.c
ipc/shm.c
kernel/Makefile
kernel/audit.c
kernel/audit.h
kernel/auditfilter.c
kernel/auditsc.c
kernel/capability.c
kernel/events/callchain.c
kernel/events/core.c
kernel/exit.c
kernel/fork.c
kernel/kprobes.c
kernel/power/power.h
kernel/power/process.c
kernel/power/snapshot.c
kernel/power/swap.c
kernel/power/user.c
kernel/rcutorture.c
kernel/res_counter.c
kernel/sched/core.c
kernel/sched/cpupri.c
kernel/sched/fair.c
kernel/sched/rt.c
kernel/seccomp.c
kernel/tracepoint.c
kernel/watchdog.c
lib/Kconfig
lib/Makefile
lib/bug.c
lib/clz_tab.c [new file with mode: 0644]
lib/digsig.c
lib/mpi/longlong.h
lib/mpi/mpi-bit.c
lib/mpi/mpi-div.c
lib/mpi/mpi-pow.c
lib/mpi/mpicoder.c
lib/mpi/mpih-div.c
lib/mpi/mpiutil.c
lib/pci_iomap.c
mm/compaction.c
mm/filemap.c
mm/filemap_xip.c
mm/huge_memory.c
mm/hugetlb.c
mm/kmemleak.c
mm/memblock.c
mm/memcontrol.c
mm/memory.c
mm/migrate.c
mm/page_alloc.c
mm/process_vm_access.c
mm/shmem.c
mm/swap.c
mm/vmscan.c
net/bluetooth/hci_core.c
net/bridge/br_fdb.c
net/caif/caif_dev.c
net/caif/caif_usb.c
net/caif/cfcnfg.c
net/ceph/ceph_common.c
net/ceph/mon_client.c
net/core/dev.c
net/core/ethtool.c
net/core/flow_dissector.c
net/core/net-sysfs.c
net/core/net_namespace.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/secure_seq.c
net/core/sock.c
net/ipv4/inet_connection_sock.c
net/ipv4/inetpeer.c
net/ipv4/ip_gre.c
net/ipv4/ipconfig.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/sysctl_net_ipv4.c
net/ipv4/tcp.c
net/ipv4/tcp_bic.c
net/ipv4/tcp_cubic.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_output.c
net/ipv4/udp_diag.c
net/ipv6/addrconf.c
net/ipv6/datagram.c
net/ipv6/proc.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/l2tp/l2tp_ip.c
net/llc/af_llc.c
net/mac80211/cfg.c
net/mac80211/debugfs_key.c
net/mac80211/ibss.c
net/mac80211/iface.c
net/mac80211/mesh_hwmp.c
net/mac80211/mesh_plink.c
net/mac80211/mlme.c
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/tx.c
net/mac80211/wpa.c
net/mac80211/wpa.h
net/netfilter/ipset/ip_set_core.c
net/netfilter/nf_conntrack_helper.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/xt_CT.c
net/netfilter/xt_hashlimit.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/flow.c
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport.c
net/rds/af_rds.c
net/sched/sch_netem.c
net/sunrpc/auth_generic.c
net/unix/af_unix.c
scripts/Makefile.headersinst
scripts/Makefile.lib
scripts/checkpatch.pl
scripts/checksyscalls.sh
scripts/coccicheck
scripts/coccinelle/api/devm_request_and_ioremap.cocci [new file with mode: 0644]
scripts/coccinelle/api/kstrdup.cocci
scripts/coccinelle/api/memdup.cocci
scripts/coccinelle/api/memdup_user.cocci
scripts/coccinelle/free/devm_free.cocci [new file with mode: 0644]
scripts/coccinelle/free/kfree.cocci
scripts/coccinelle/iterators/fen.cocci
scripts/coccinelle/iterators/itnull.cocci
scripts/coccinelle/locks/call_kern.cocci
scripts/coccinelle/locks/flags.cocci
scripts/coccinelle/locks/mini_lock.cocci
scripts/coccinelle/misc/doubleinit.cocci
scripts/coccinelle/null/eno.cocci
scripts/dtc/dtc.c
scripts/dtc/srcpos.c
scripts/dtc/srcpos.h
scripts/genksyms/Makefile
scripts/kconfig/Makefile
scripts/kconfig/confdata.c
scripts/kconfig/expr.h
scripts/kconfig/gconf.c
scripts/kconfig/lkc.h
scripts/kconfig/mconf.c
scripts/kconfig/merge_config.sh [new file with mode: 0644]
scripts/kernel-doc
scripts/mod/file2alias.c
scripts/tags.sh
security/integrity/Kconfig
security/integrity/Makefile
security/integrity/ima/ima_audit.c
security/integrity/ima/ima_policy.c
security/integrity/integrity.h
security/keys/encrypted-keys/encrypted.c
security/keys/encrypted-keys/masterkey_trusted.c
security/keys/gc.c
security/keys/internal.h
security/keys/key.c
security/keys/keyring.c
security/keys/trusted.c
security/keys/user_defined.c
security/lsm_audit.c
security/tomoyo/util.c
sound/atmel/abdac.c
sound/atmel/ac97c.c
sound/core/Kconfig
sound/core/compress_offload.c
sound/isa/sb/emu8000_patch.c
sound/pci/au88x0/au88x0.c
sound/pci/au88x0/au88x0.h
sound/pci/au88x0/au88x0_pcm.c
sound/pci/hda/alc880_quirks.c
sound/pci/hda/alc882_quirks.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_jack.c
sound/pci/hda/patch_ca0132.c
sound/pci/hda/patch_cirrus.c
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/hda/patch_via.c
sound/pci/oxygen/oxygen_mixer.c
sound/pci/oxygen/xonar_wm87x6.c
sound/pci/ymfpci/ymfpci.c
sound/pci/ymfpci/ymfpci_main.c
sound/soc/codecs/cs42l73.c
sound/soc/codecs/sgtl5000.c
sound/soc/codecs/tlv320aic32x4.c
sound/soc/codecs/wm2000.c
sound/soc/codecs/wm5100.c
sound/soc/codecs/wm8958-dsp2.c
sound/soc/codecs/wm8962.c
sound/soc/codecs/wm8993.c
sound/soc/codecs/wm8994.c
sound/soc/codecs/wm8996.c
sound/soc/codecs/wm8996.h
sound/soc/codecs/wm_hubs.c
sound/soc/ep93xx/ep93xx-pcm.c
sound/soc/imx/imx-pcm-dma-mx2.c
sound/soc/mxs/mxs-pcm.c
sound/soc/mxs/mxs-saif.c
sound/soc/nuc900/nuc900-ac97.c
sound/soc/samsung/dma.c
sound/soc/samsung/neo1973_wm8753.c
sound/soc/sh/siu_pcm.c
sound/soc/soc-core.c
sound/soc/soc-dapm.c
sound/soc/txx9/txx9aclc.c
sound/usb/quirks-table.h
tools/perf/Makefile
tools/perf/builtin-probe.c
tools/perf/builtin-top.c
tools/perf/util/header.c
tools/perf/util/probe-event.c
tools/perf/util/symbol.c
tools/perf/util/trace-event-parse.c
tools/perf/util/ui/browsers/hists.c
tools/perf/util/ui/helpline.c
tools/perf/util/util.h
tools/power/x86/turbostat/turbostat.c
tools/testing/ktest/compare-ktest-sample.pl
tools/testing/ktest/ktest.pl
tools/testing/ktest/sample.conf
virt/kvm/kvm_main.c

index b638e50cf8f604b8bacb3a4a0f974653ca055071..9c27e5125dd26efb5ca49c4b2bc9beadb4d559d7 100644 (file)
@@ -50,7 +50,9 @@
 
      <sect1><title>Delaying, scheduling, and timer routines</title>
 !Iinclude/linux/sched.h
-!Ekernel/sched.c
+!Ekernel/sched/core.c
+!Ikernel/sched/cpupri.c
+!Ikernel/sched/fair.c
 !Iinclude/linux/completion.h
 !Ekernel/timer.c
      </sect1>
@@ -100,9 +102,12 @@ X!Iinclude/linux/kobject.h
 !Iinclude/linux/device.h
      </sect1>
      <sect1><title>Device Drivers Base</title>
+!Idrivers/base/init.c
 !Edrivers/base/driver.c
 !Edrivers/base/core.c
+!Edrivers/base/syscore.c
 !Edrivers/base/class.c
+!Idrivers/base/node.c
 !Edrivers/base/firmware_class.c
 !Edrivers/base/transport_class.c
 <!-- Cannot be included, because
@@ -111,13 +116,18 @@ X!Iinclude/linux/kobject.h
      exceed allowed 44 characters maximum
 X!Edrivers/base/attribute_container.c
 -->
-!Edrivers/base/sys.c
+!Edrivers/base/dd.c
 <!--
 X!Edrivers/base/interface.c
 -->
 !Iinclude/linux/platform_device.h
 !Edrivers/base/platform.c
 !Edrivers/base/bus.c
+     </sect1>
+     <sect1><title>Device Drivers DMA Management</title>
+!Edrivers/base/dma-buf.c
+!Edrivers/base/dma-coherent.c
+!Edrivers/base/dma-mapping.c
      </sect1>
      <sect1><title>Device Drivers Power Management</title>
 !Edrivers/base/power/main.c
@@ -216,9 +226,8 @@ X!Isound/sound_firmware.c
 
   <chapter id="uart16x50">
      <title>16x50 UART Driver</title>
-!Iinclude/linux/serial_core.h
 !Edrivers/tty/serial/serial_core.c
-!Edrivers/tty/serial/8250.c
+!Edrivers/tty/serial/8250/8250.c
   </chapter>
 
   <chapter id="fbdev">
index c1ed6a49e598cd992552fa6e3b0da70f59aa7829..54199a0dcf9adc325d61a168d54fa913742faebf 100644 (file)
@@ -317,7 +317,7 @@ CPU B:  spin_unlock_irqrestore(&amp;dev_lock, flags)
   <chapter id="pubfunctions">
      <title>Public Functions Provided</title>
 !Iarch/x86/include/asm/io.h
-!Elib/iomap.c
+!Elib/pci_iomap.c
   </chapter>
 
 </book>
index ffee1fbbc001ae316f40be9def3b2456e20c9118..c7a4ca51785980264b37b19b3b63987a3871fe33 100644 (file)
@@ -163,14 +163,16 @@ get/set up to 64 properties. The actual meaning of each property is described on
        <section id="DTV-FREQUENCY">
                <title><constant>DTV_FREQUENCY</constant></title>
 
-               <para>Central frequency of the channel, in HZ.</para>
+               <para>Central frequency of the channel.</para>
 
                <para>Notes:</para>
-               <para>1)For ISDB-T, the channels are usually transmitted with an offset of 143kHz.
+               <para>1)For satellital delivery systems, it is measured in kHz.
+                       For the other ones, it is measured in Hz.</para>
+               <para>2)For ISDB-T, the channels are usually transmitted with an offset of 143kHz.
                        E.g. a valid frequncy could be 474143 kHz. The stepping is bound to the bandwidth of
                        the channel which is 6MHz.</para>
 
-               <para>2)As in ISDB-Tsb the channel consists of only one or three segments the
+               <para>3)As in ISDB-Tsb the channel consists of only one or three segments the
                        frequency step is 429kHz, 3*429 respectively. As for ISDB-T the
                        central frequency of the channel is expected.</para>
        </section>
@@ -735,14 +737,10 @@ typedef enum fe_hierarchy {
                        <listitem><para><link linkend="DTV-TUNE"><constant>DTV_TUNE</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-CLEAR"><constant>DTV_CLEAR</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-FREQUENCY"><constant>DTV_FREQUENCY</constant></link></para></listitem>
-                       <listitem><para><link linkend="DTV-MODULATION"><constant>DTV_MODULATION</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-BANDWIDTH-HZ"><constant>DTV_BANDWIDTH_HZ</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-INVERSION"><constant>DTV_INVERSION</constant></link></para></listitem>
-                       <listitem><para><link linkend="DTV-CODE-RATE-HP"><constant>DTV_CODE_RATE_HP</constant></link></para></listitem>
-                       <listitem><para><link linkend="DTV-CODE-RATE-LP"><constant>DTV_CODE_RATE_LP</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-GUARD-INTERVAL"><constant>DTV_GUARD_INTERVAL</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-TRANSMISSION-MODE"><constant>DTV_TRANSMISSION_MODE</constant></link></para></listitem>
-                       <listitem><para><link linkend="DTV-HIERARCHY"><constant>DTV_HIERARCHY</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-ISDBT-LAYER-ENABLED"><constant>DTV_ISDBT_LAYER_ENABLED</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-ISDBT-PARTIAL-RECEPTION"><constant>DTV_ISDBT_PARTIAL_RECEPTION</constant></link></para></listitem>
                        <listitem><para><link linkend="DTV-ISDBT-SOUND-BROADCASTING"><constant>DTV_ISDBT_SOUND_BROADCASTING</constant></link></para></listitem>
index 6f1f9a629dc34d1508bd550342d810f05f2b5028..b17a7aac6997482379e1b43c8b0d7e5cf914e1f0 100644 (file)
@@ -183,7 +183,12 @@ applications must set the array to zero.</entry>
            <entry>__u32</entry>
            <entry><structfield>ctrl_class</structfield></entry>
            <entry>The control class to which all controls belong, see
-<xref linkend="ctrl-class" />.</entry>
+<xref linkend="ctrl-class" />. Drivers that use a kernel framework for handling
+controls will also accept a value of 0 here, meaning that the controls can
+belong to any control class. Whether drivers support this can be tested by setting
+<structfield>ctrl_class</structfield> to 0 and calling <constant>VIDIOC_TRY_EXT_CTRLS</constant>
+with a <structfield>count</structfield> of 0. If that succeeds, then the driver
+supports this feature.</entry>
          </row>
          <row>
            <entry>__u32</entry>
@@ -194,10 +199,13 @@ also be zero.</entry>
          <row>
            <entry>__u32</entry>
            <entry><structfield>error_idx</structfield></entry>
-           <entry>Set by the driver in case of an error. It is the
-index of the control causing the error or equal to 'count' when the
-error is not associated with a particular control. Undefined when the
-ioctl returns 0 (success).</entry>
+           <entry>Set by the driver in case of an error. If it is equal
+to <structfield>count</structfield>, then no actual changes were made to
+controls. In other words, the error was not associated with setting a particular
+control. If it is another value, then only the controls up to <structfield>error_idx-1</structfield>
+were modified and control <structfield>error_idx</structfield> is the one that
+caused the error. The <structfield>error_idx</structfield> value is undefined
+if the ioctl returned 0 (success).</entry>
          </row>
          <row>
            <entry>__u32</entry>
index 93817f33703305beae49acafa52b1bceb0613dd2..7c63815e7afd0eb964a1848513dea5be14eb677e 100644 (file)
@@ -364,15 +364,20 @@ capability and it is cleared otherwise.</entry>
          <row>
            <entry><constant>V4L2_FBUF_FLAG_OVERLAY</constant></entry>
            <entry>0x0002</entry>
-           <entry>The frame buffer is an overlay surface the same
-size as the capture. [?]</entry>
-         </row>
-         <row>
-           <entry spanname="hspan">The purpose of
-<constant>V4L2_FBUF_FLAG_OVERLAY</constant> was never quite clear.
-Most drivers seem to ignore this flag. For compatibility with the
-<wordasword>bttv</wordasword> driver applications should set the
-<constant>V4L2_FBUF_FLAG_OVERLAY</constant> flag.</entry>
+           <entry>If this flag is set for a video capture device, then the
+driver will set the initial overlay size to cover the full framebuffer size,
+otherwise the existing overlay size (as set by &VIDIOC-S-FMT;) will be used.
+
+Only one video capture driver (bttv) supports this flag. The use of this flag
+for capture devices is deprecated. There is no way to detect which drivers
+support this flag, so the only reliable method of setting the overlay size is
+through &VIDIOC-S-FMT;.
+
+If this flag is set for a video output device, then the video output overlay
+window is relative to the top-left corner of the framebuffer and restricted
+to the size of the framebuffer. If it is cleared, then the video output
+overlay window is relative to the video output display.
+            </entry>
          </row>
          <row>
            <entry><constant>V4L2_FBUF_FLAG_CHROMAKEY</constant></entry>
index 16431813bebd253f87965029088ee733328c36e5..66e9a5257861ab1c5f65b197e3bc7e52304659e6 100644 (file)
@@ -98,8 +98,11 @@ the &v4l2-output; <structfield>modulator</structfield> field and the
            <entry>&v4l2-tuner-type;</entry>
            <entry><structfield>type</structfield></entry>
            <entry>The tuner type. This is the same value as in the
-&v4l2-tuner; <structfield>type</structfield> field. The field is not
-applicable to modulators, &ie; ignored by drivers.</entry>
+&v4l2-tuner; <structfield>type</structfield> field. The type must be set
+to <constant>V4L2_TUNER_RADIO</constant> for <filename>/dev/radioX</filename>
+device nodes, and to <constant>V4L2_TUNER_ANALOG_TV</constant>
+for all others. The field is not applicable to modulators, &ie; ignored
+by drivers.</entry>
          </row>
          <row>
            <entry>__u32</entry>
index 08ae82f131f2b952df2a0f5488b49d0747776153..1d43065090dd1087be1c26a8f54804ac8a4ee2c2 100644 (file)
@@ -61,8 +61,8 @@ desired input in an integer and call the
 <constant>VIDIOC_S_INPUT</constant> ioctl with a pointer to this
 integer. Side effects are possible. For example inputs may support
 different video standards, so the driver may implicitly switch the
-current standard. It is good practice to select an input before
-querying or negotiating any other parameters.</para>
+current standard. Because of these possible side effects applications
+must select an input before querying or negotiating any other parameters.</para>
 
     <para>Information about video inputs is available using the
 &VIDIOC-ENUMINPUT; ioctl.</para>
index fd45f1c13ccf445b89e582036b40c3a7a94ab86c..4533068ecb8ad5dd221512d5f6dc8a7266b66ecb 100644 (file)
@@ -61,8 +61,9 @@ desired output in an integer and call the
 <constant>VIDIOC_S_OUTPUT</constant> ioctl with a pointer to this integer.
 Side effects are possible. For example outputs may support different
 video standards, so the driver may implicitly switch the current
-standard. It is good practice to select an output before querying or
-negotiating any other parameters.</para>
+standard.
+standard. Because of these possible side effects applications
+must select an output before querying or negotiating any other parameters.</para>
 
     <para>Information about video outputs is available using the
 &VIDIOC-ENUMOUTPUT; ioctl.</para>
index 5cc699ba5453479ea13a028236f0d568063edb1e..e7cc363972173ca0177de6bfb7ce3f38de89bde2 100644 (file)
@@ -47,20 +47,53 @@ directory apei/einj. The following files are provided.
 
 - param1
   This file is used to set the first error parameter value. Effect of
-  parameter depends on error_type specified. For memory error, this is
-  physical memory address.  Only available if param_extension module
-  parameter is specified.
+  parameter depends on error_type specified.
 
 - param2
   This file is used to set the second error parameter value. Effect of
-  parameter depends on error_type specified. For memory error, this is
-  physical memory address mask.  Only available if param_extension
-  module parameter is specified.
+  parameter depends on error_type specified.
+
+BIOS versions based in the ACPI 4.0 specification have limited options
+to control where the errors are injected.  Your BIOS may support an
+extension (enabled with the param_extension=1 module parameter, or
+boot command line einj.param_extension=1). This allows the address
+and mask for memory injections to be specified by the param1 and
+param2 files in apei/einj.
+
+BIOS versions using the ACPI 5.0 specification have more control over
+the target of the injection. For processor related errors (type 0x1,
+0x2 and 0x4) the APICID of the target should be provided using the
+param1 file in apei/einj. For memory errors (type 0x8, 0x10 and 0x20)
+the address is set using param1 with a mask in param2 (0x0 is equivalent
+to all ones). For PCI express errors (type 0x40, 0x80 and 0x100) the
+segment, bus, device and function are specified using param1:
+
+         31     24 23    16 15    11 10      8  7        0
+       +-------------------------------------------------+
+       | segment |   bus  | device | function | reserved |
+       +-------------------------------------------------+
+
+An ACPI 5.0 BIOS may also allow vendor specific errors to be injected.
+In this case a file named vendor will contain identifying information
+from the BIOS that hopefully will allow an application wishing to use
+the vendor specific extension to tell that they are running on a BIOS
+that supports it. All vendor extensions have the 0x80000000 bit set in
+error_type. A file vendor_flags controls the interpretation of param1
+and param2 (1 = PROCESSOR, 2 = MEMORY, 4 = PCI). See your BIOS vendor
+documentation for details (and expect changes to this API if vendors
+creativity in using this feature expands beyond our expectations).
+
+Example:
+# cd /sys/kernel/debug/apei/einj
+# cat available_error_type             # See which errors can be injected
+0x00000002     Processor Uncorrectable non-fatal
+0x00000008     Memory Correctable
+0x00000010     Memory Uncorrectable non-fatal
+# echo 0x12345000 > param1             # Set memory address for injection
+# echo 0xfffffffffffff000 > param2     # Mask - anywhere in this page
+# echo 0x8 > error_type                        # Choose correctable memory error
+# echo 1 > error_inject                        # Inject now
 
-Injecting parameter support is a BIOS version specific extension, that
-is, it only works on some BIOS version.  If you want to use it, please
-make sure your BIOS version has the proper support and specify
-"param_extension=y" in module parameter.
 
 For more information about EINJ, please refer to ACPI specification
-version 4.0, section 17.5.
+version 4.0, section 17.5 and ACPI 5.0, section 18.6.
index 96b690348ba14290f10235b94d6e13e4cc2d329d..cf44eb6499b418c3fd28339b52d53b3467cbfe23 100644 (file)
@@ -102,9 +102,15 @@ or
        make coccicheck COCCI=<my_SP.cocci> MODE=report
 
 
- Using Coccinelle on (modified) files
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Controlling Which Files are Processed by Coccinelle
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+By default the entire kernel source tree is checked.
+
+To apply Coccinelle to a specific directory, M= can be used.
+For example, to check drivers/net/wireless/ one may write:
 
+    make coccicheck M=drivers/net/wireless/
+    
 To apply Coccinelle on a file basis, instead of a directory basis, the
 following command may be used:
 
index cec8864ce4e8cbb4089b68279255c51645938554..00383186d8fb3e2c0fc4d5a12852844cb5b2721f 100644 (file)
@@ -447,6 +447,9 @@ Your cooperation is appreciated.
                234 = /dev/btrfs-control        Btrfs control device
                235 = /dev/autofs       Autofs control device
                236 = /dev/mapper/control       Device-Mapper control device
+               237 = /dev/loop-control Loopback control device
+               238 = /dev/vhost-net    Host kernel accelerator for virtio net
+
                240-254                 Reserved for local use
                255                     Reserved for MISC_DYNAMIC_MINOR
 
diff --git a/Documentation/devicetree/bindings/dma/atmel-dma.txt b/Documentation/devicetree/bindings/dma/atmel-dma.txt
new file mode 100644 (file)
index 0000000..3c046ee
--- /dev/null
@@ -0,0 +1,14 @@
+* Atmel Direct Memory Access Controller (DMA)
+
+Required properties:
+- compatible: Should be "atmel,<chip>-dma"
+- reg: Should contain DMA registers location and length
+- interrupts: Should contain DMA interrupt
+
+Examples:
+
+dma@ffffec00 {
+       compatible = "atmel,at91sam9g45-dma";
+       reg = <0xffffec00 0x200>;
+       interrupts = <21>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/omap-i2c.txt b/Documentation/devicetree/bindings/i2c/omap-i2c.txt
new file mode 100644 (file)
index 0000000..56564aa
--- /dev/null
@@ -0,0 +1,30 @@
+I2C for OMAP platforms
+
+Required properties :
+- compatible : Must be "ti,omap3-i2c" or "ti,omap4-i2c"
+- ti,hwmods : Must be "i2c<n>", n being the instance number (1-based)
+- #address-cells = <1>;
+- #size-cells = <0>;
+
+Recommended properties :
+- clock-frequency : Desired I2C bus clock frequency in Hz. Otherwise
+  the default 100 kHz frequency will be used.
+
+Optional properties:
+- Child nodes conforming to i2c bus binding
+
+Note: Current implementation will fetch base address, irq and dma
+from omap hwmod data base during device registration.
+Future plan is to migrate hwmod data base contents into device tree
+blob so that, all the required data will be used from device tree dts
+file.
+
+Examples :
+
+i2c1: i2c@0 {
+    compatible = "ti,omap3-i2c";
+    #address-cells = <1>;
+    #size-cells = <0>;
+    ti,hwmods = "i2c1";
+    clock-frequency = <400000>;
+};
index 94b7e0f96b38fa8086ea14f7cd88718b45e84d70..bbe6cb3d1856b8943dd04d3449af9d39d5c1e7dc 100644 (file)
@@ -75,6 +75,10 @@ The slave DMA usage consists of following steps:
    slave_sg    - DMA a list of scatter gather buffers from/to a peripheral
    dma_cyclic  - Perform a cyclic DMA operation from/to a peripheral till the
                  operation is explicitly stopped.
+   interleaved_dma - This is common to Slave as well as M2M clients. For slave
+                address of devices' fifo could be already known to the driver.
+                Various types of operations could be expressed by setting
+                appropriate values to the 'dma_interleaved_template' members.
 
    A non-NULL return of this transfer API represents a "descriptor" for
    the given transaction.
@@ -89,6 +93,10 @@ The slave DMA usage consists of following steps:
                struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
                size_t period_len, enum dma_data_direction direction);
 
+       struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
+               struct dma_chan *chan, struct dma_interleaved_template *xt,
+               unsigned long flags);
+
    The peripheral driver is expected to have mapped the scatterlist for
    the DMA operation prior to calling device_prep_slave_sg, and must
    keep the scatterlist mapped until the DMA operation has completed.
index 10c64c8a13d4f2aec7bebb0bd6a44ce401aa78cc..41c0c5d1ba145a392e17d42dc4ce8ac9c3c41a49 100644 (file)
@@ -233,6 +233,10 @@ certainly invest a bit more effort into libata core layer).
   6. List of managed interfaces
   -----------------------------
 
+MEM
+  devm_kzalloc()
+  devm_kfree()
+
 IO region
   devm_request_region()
   devm_request_mem_region()
index d725c0dfe032f0692ab9de9a4358dfdcdd711f39..a0ffac029a0dc703322d922f3ec59435e3315ef3 100644 (file)
@@ -439,17 +439,6 @@ Who:       Jean Delvare <khali@linux-fr.org>
 
 ----------------------------
 
-What:  For VIDIOC_S_FREQUENCY the type field must match the device node's type.
-       If not, return -EINVAL.
-When:  3.2
-Why:   It makes no sense to switch the tuner to radio mode by calling
-       VIDIOC_S_FREQUENCY on a video node, or to switch the tuner to tv mode by
-       calling VIDIOC_S_FREQUENCY on a radio node. This is the first step of a
-       move to more consistent handling of tv and radio tuners.
-Who:   Hans Verkuil <hans.verkuil@cisco.com>
-
-----------------------------
-
 What:  Opening a radio device node will no longer automatically switch the
        tuner mode from tv to radio.
 When:  3.3
@@ -521,3 +510,17 @@ Why:       The pci_scan_bus_parented() interface creates a new root bus.  The
        convert to using pci_scan_root_bus() so they can supply a list of
        bus resources when the bus is created.
 Who:   Bjorn Helgaas <bhelgaas@google.com>
+
+----------------------------
+
+What:  The CAP9 SoC family will be removed
+When:  3.4
+Files: arch/arm/mach-at91/at91cap9.c
+       arch/arm/mach-at91/at91cap9_devices.c
+       arch/arm/mach-at91/include/mach/at91cap9.h
+       arch/arm/mach-at91/include/mach/at91cap9_matrix.h
+       arch/arm/mach-at91/include/mach/at91cap9_ddrsdr.h
+       arch/arm/mach-at91/board-cap9adk.c
+Why:   The code is not actively maintained and platforms are now hard to find.
+Who:   Nicolas Ferre <nicolas.ferre@atmel.com>
+       Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcrosoft.com>
index 6f496a5867324cc5750f90ecd0fd91d3547b554d..23b7def21ba8bc5fe9e797e2b4cce1b94245d969 100644 (file)
@@ -26,6 +26,10 @@ Supported chips:
     Prefix: 'it8721'
     Addresses scanned: from Super I/O config space (8 I/O ports)
     Datasheet: Not publicly available
+  * IT8728F
+    Prefix: 'it8728'
+    Addresses scanned: from Super I/O config space (8 I/O ports)
+    Datasheet: Not publicly available
   * SiS950   [clone of IT8705F]
     Prefix: 'it87'
     Addresses scanned: from Super I/O config space (8 I/O ports)
@@ -71,7 +75,7 @@ Description
 -----------
 
 This driver implements support for the IT8705F, IT8712F, IT8716F,
-IT8718F, IT8720F, IT8721F, IT8726F, IT8758E and SiS950 chips.
+IT8718F, IT8720F, IT8721F, IT8726F, IT8728F, IT8758E and SiS950 chips.
 
 These chips are 'Super I/O chips', supporting floppy disks, infrared ports,
 joysticks and other miscellaneous stuff. For hardware monitoring, they
@@ -105,6 +109,9 @@ The IT8726F is just bit enhanced IT8716F with additional hardware
 for AMD power sequencing. Therefore the chip will appear as IT8716F
 to userspace applications.
 
+The IT8728F is considered compatible with the IT8721F, until a datasheet
+becomes available (hopefully.)
+
 Temperatures are measured in degrees Celsius. An alarm is triggered once
 when the Overtemperature Shutdown limit is crossed.
 
@@ -121,8 +128,8 @@ alarm is triggered if the voltage has crossed a programmable minimum or
 maximum limit. Note that minimum in this case always means 'closest to
 zero'; this is important for negative voltage measurements. All voltage
 inputs can measure voltages between 0 and 4.08 volts, with a resolution of
-0.016 volt (except IT8721F/IT8758E: 0.012 volt.) The battery voltage in8 does
-not have limit registers.
+0.016 volt (except IT8721F/IT8758E and IT8728F: 0.012 volt.) The battery
+voltage in8 does not have limit registers.
 
 On the IT8721F/IT8758E, some voltage inputs are internal and scaled inside
 the chip (in7, in8 and optionally in3). The driver handles this transparently
index b9843eab1afb107d6ffb13239b6df435bd7c8c54..4d30d209881a2861ad95c07208a7179ead236b03 100644 (file)
@@ -12,6 +12,11 @@ Supported chips:
     Addresses scanned: I2C 0x18 and 0x4e
     Datasheet: Publicly available at the National Semiconductor website
                http://www.national.com/pf/LM/LM64.html
+  * National Semiconductor LM96163
+    Prefix: 'lm96163'
+    Addresses scanned: I2C 0x4c
+    Datasheet: Publicly available at the National Semiconductor website
+               http://www.national.com/pf/LM/LM96163.html
 
 Author: Jean Delvare <khali@linux-fr.org>
 
@@ -49,16 +54,24 @@ value for measuring the speed of the fan. It can measure fan speeds down to
 Note that the pin used for fan monitoring is shared with an alert out
 function. Depending on how the board designer wanted to use the chip, fan
 speed monitoring will or will not be possible. The proper chip configuration
-is left to the BIOS, and the driver will blindly trust it.
+is left to the BIOS, and the driver will blindly trust it. Only the original
+LM63 suffers from this limitation, the LM64 and LM96163 have separate pins
+for fan monitoring and alert out. On the LM64, monitoring is always enabled;
+on the LM96163 it can be disabled.
 
 A PWM output can be used to control the speed of the fan. The LM63 has two
 PWM modes: manual and automatic. Automatic mode is not fully implemented yet
 (you cannot define your custom PWM/temperature curve), and mode change isn't
 supported either.
 
-The lm63 driver will not update its values more frequently than every
-second; reading them more often will do no harm, but will return 'old'
-values.
+The lm63 driver will not update its values more frequently than configured with
+the update_interval sysfs attribute; reading them more often will do no harm,
+but will return 'old' values. Values in the automatic fan control lookup table
+(attributes pwm1_auto_*) have their own independent lifetime of 5 seconds.
 
 The LM64 is effectively an LM63 with GPIO lines. The driver does not
 support these GPIO lines at present.
+
+The LM96163 is an enhanced version of LM63 with improved temperature accuracy
+and better PWM resolution. For LM96163, the external temperature sensor type is
+configurable as CPU embedded diode(1) or 3904 transistor(2).
index a4aa8f600e09643beb6a2b505c9c08d00f28166a..1f4dd855a299350ec707fc9ec34630baef29c689 100644 (file)
@@ -304,7 +304,7 @@ value (fastest fan speed) wins.
 temp[1-*]_type Sensor type selection.
                Integers 1 to 6
                RW
-               1: PII/Celeron Diode
+               1: CPU embedded diode
                2: 3904 transistor
                3: thermal diode
                4: thermistor
index 23fcb05175be70e0d140febdc153751763f38d04..53305bd08182dac8db030112ed9dadf1bf99efc9 100644 (file)
@@ -17,11 +17,11 @@ reports supported by a device are also provided by sysfs in
 class/input/event*/device/capabilities/, and the properties of a device are
 provided in class/input/event*/device/properties.
 
-Types:
-==========
-Types are groupings of codes under a logical input construct. Each type has a
-set of applicable codes to be used in generating events. See the Codes section
-for details on valid codes for each type.
+Event types:
+===========
+Event types are groupings of codes under a logical input construct. Each
+type has a set of applicable codes to be used in generating events. See the
+Codes section for details on valid codes for each type.
 
 * EV_SYN:
   - Used as markers to separate events. Events may be separated in time or in
@@ -63,9 +63,9 @@ for details on valid codes for each type.
 * EV_FF_STATUS:
   - Used to receive force feedback device status.
 
-Codes:
-==========
-Codes define the precise type of event.
+Event codes:
+===========
+Event codes define the precise type of event.
 
 EV_SYN:
 ----------
@@ -220,6 +220,56 @@ EV_PWR:
 EV_PWR events are a special type of event used specifically for power
 mangement. Its usage is not well defined. To be addressed later.
 
+Device properties:
+=================
+Normally, userspace sets up an input device based on the data it emits,
+i.e., the event types. In the case of two devices emitting the same event
+types, additional information can be provided in the form of device
+properties.
+
+INPUT_PROP_DIRECT + INPUT_PROP_POINTER:
+--------------------------------------
+The INPUT_PROP_DIRECT property indicates that device coordinates should be
+directly mapped to screen coordinates (not taking into account trivial
+transformations, such as scaling, flipping and rotating). Non-direct input
+devices require non-trivial transformation, such as absolute to relative
+transformation for touchpads. Typical direct input devices: touchscreens,
+drawing tablets; non-direct devices: touchpads, mice.
+
+The INPUT_PROP_POINTER property indicates that the device is not transposed
+on the screen and thus requires use of an on-screen pointer to trace user's
+movements.  Typical pointer devices: touchpads, tablets, mice; non-pointer
+device: touchscreen.
+
+If neither INPUT_PROP_DIRECT or INPUT_PROP_POINTER are set, the property is
+considered undefined and the device type should be deduced in the
+traditional way, using emitted event types.
+
+INPUT_PROP_BUTTONPAD:
+--------------------
+For touchpads where the button is placed beneath the surface, such that
+pressing down on the pad causes a button click, this property should be
+set. Common in clickpad notebooks and macbooks from 2009 and onwards.
+
+Originally, the buttonpad property was coded into the bcm5974 driver
+version field under the name integrated button. For backwards
+compatibility, both methods need to be checked in userspace.
+
+INPUT_PROP_SEMI_MT:
+------------------
+Some touchpads, most common between 2008 and 2011, can detect the presence
+of multiple contacts without resolving the individual positions; only the
+number of contacts and a rectangular shape is known. For such
+touchpads, the semi-mt property should be set.
+
+Depending on the device, the rectangle may enclose all touches, like a
+bounding box, or just some of them, for instance the two most recent
+touches. The diversity makes the rectangle of limited use, but some
+gestures can normally be extracted from it.
+
+If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
+device.
+
 Guidelines:
 ==========
 The guidelines below ensure proper single-touch and multi-finger functionality.
@@ -240,6 +290,8 @@ used to report when a touch is active on the screen.
 BTN_{MOUSE,LEFT,MIDDLE,RIGHT} must not be reported as the result of touch
 contact. BTN_TOOL_<name> events should be reported where possible.
 
+For new hardware, INPUT_PROP_DIRECT should be set.
+
 Trackpads:
 ----------
 Legacy trackpads that only provide relative position information must report
@@ -250,6 +302,8 @@ location of the touch. BTN_TOUCH should be used to report when a touch is active
 on the trackpad. Where multi-finger support is available, BTN_TOOL_<name> should
 be used to report the number of touches active on the trackpad.
 
+For new hardware, INPUT_PROP_POINTER should be set.
+
 Tablets:
 ----------
 BTN_TOOL_<name> events must be reported when a stylus or other tool is active on
@@ -260,3 +314,5 @@ button may be used for buttons on the tablet except BTN_{MOUSE,LEFT}.
 BTN_{0,1,2,etc} are good generic codes for unlabeled buttons. Do not use
 meaningful buttons, like BTN_FORWARD, unless the button is labeled for that
 purpose on the device.
+
+For new hardware, both INPUT_PROP_DIRECT and INPUT_PROP_POINTER should be set.
index 54078ed96b3751724acaf0bd3fdab95df962480f..4840334ea97b30705df13a2c642e976a83621800 100644 (file)
@@ -149,6 +149,7 @@ Code  Seq#(hex)     Include File            Comments
 'M'    01-03   drivers/scsi/megaraid/megaraid_sas.h
 'M'    00-0F   drivers/video/fsl-diu-fb.h      conflict!
 'N'    00-1F   drivers/usb/scanner.h
+'N'    40-7F   drivers/block/nvme.c
 'O'     00-06   mtd/ubi-user.h         UBI
 'P'    all     linux/soundcard.h       conflict!
 'P'    60-6F   sound/sscape_ioctl.h    conflict!
index f47cdefb4d1efc17d4e515df31996f134674649e..ab0a984530d87848435571d857b201484591f73c 100644 (file)
@@ -33,14 +33,15 @@ This document describes the Linux kernel Makefiles.
 
        === 6 Architecture Makefiles
           --- 6.1 Set variables to tweak the build to the architecture
-          --- 6.2 Add prerequisites to archprepare:
-          --- 6.3 List directories to visit when descending
-          --- 6.4 Architecture-specific boot images
-          --- 6.5 Building non-kbuild targets
-          --- 6.6 Commands useful for building a boot image
-          --- 6.7 Custom kbuild commands
-          --- 6.8 Preprocessing linker scripts
-          --- 6.9 Generic header files
+          --- 6.2 Add prerequisites to archheaders:
+          --- 6.3 Add prerequisites to archprepare:
+          --- 6.4 List directories to visit when descending
+          --- 6.5 Architecture-specific boot images
+          --- 6.6 Building non-kbuild targets
+          --- 6.7 Commands useful for building a boot image
+          --- 6.8 Custom kbuild commands
+          --- 6.9 Preprocessing linker scripts
+          --- 6.10 Generic header files
 
        === 7 Kbuild syntax for exported headers
                --- 7.1 header-y
@@ -252,7 +253,7 @@ more details, with real examples.
        This will create a library lib.a based on delay.o. For kbuild to
        actually recognize that there is a lib.a being built, the directory
        shall be listed in libs-y.
-       See also "6.3 List directories to visit when descending".
+       See also "6.4 List directories to visit when descending".
 
        Use of lib-y is normally restricted to lib/ and arch/*/lib.
 
@@ -974,7 +975,20 @@ When kbuild executes, the following steps are followed (roughly):
        $(KBUILD_ARFLAGS) set by the top level Makefile to "D" (deterministic
        mode) if this option is supported by $(AR).
 
---- 6.2 Add prerequisites to archprepare:
+--- 6.2 Add prerequisites to archheaders:
+
+       The archheaders: rule is used to generate header files that
+       may be installed into user space by "make header_install" or
+       "make headers_install_all".  In order to support
+       "make headers_install_all", this target has to be able to run
+       on an unconfigured tree, or a tree configured for another
+       architecture.
+
+       It is run before "make archprepare" when run on the
+       architecture itself.
+
+
+--- 6.3 Add prerequisites to archprepare:
 
        The archprepare: rule is used to list prerequisites that need to be
        built before starting to descend down in the subdirectories.
@@ -990,7 +1004,7 @@ When kbuild executes, the following steps are followed (roughly):
        generating offset header files.
 
 
---- 6.3 List directories to visit when descending
+--- 6.4 List directories to visit when descending
 
        An arch Makefile cooperates with the top Makefile to define variables
        which specify how to build the vmlinux file.  Note that there is no
@@ -1019,7 +1033,7 @@ When kbuild executes, the following steps are followed (roughly):
                drivers-$(CONFIG_OPROFILE)  += arch/sparc64/oprofile/
 
 
---- 6.4 Architecture-specific boot images
+--- 6.5 Architecture-specific boot images
 
        An arch Makefile specifies goals that take the vmlinux file, compress
        it, wrap it in bootstrapping code, and copy the resulting files
@@ -1070,7 +1084,7 @@ When kbuild executes, the following steps are followed (roughly):
 
        When "make" is executed without arguments, bzImage will be built.
 
---- 6.5 Building non-kbuild targets
+--- 6.6 Building non-kbuild targets
 
     extra-y
 
@@ -1090,7 +1104,7 @@ When kbuild executes, the following steps are followed (roughly):
        shall be built, but shall not be linked as part of built-in.o.
 
 
---- 6.6 Commands useful for building a boot image
+--- 6.7 Commands useful for building a boot image
 
        Kbuild provides a few macros that are useful when building a
        boot image.
@@ -1112,7 +1126,7 @@ When kbuild executes, the following steps are followed (roughly):
        always be built.
        Assignments to $(targets) are without $(obj)/ prefix.
        if_changed may be used in conjunction with custom commands as
-       defined in 6.7 "Custom kbuild commands".
+       defined in 6.8 "Custom kbuild commands".
 
        Note: It is a typical mistake to forget the FORCE prerequisite.
        Another common pitfall is that whitespace is sometimes
@@ -1171,7 +1185,7 @@ When kbuild executes, the following steps are followed (roughly):
                $(obj)/%.dtb: $(src)/%.dts
                        $(call cmd,dtc)
 
---- 6.7 Custom kbuild commands
+--- 6.8 Custom kbuild commands
 
        When kbuild is executing with KBUILD_VERBOSE=0, then only a shorthand
        of a command is normally displayed.
@@ -1198,7 +1212,7 @@ When kbuild executes, the following steps are followed (roughly):
        will be displayed with "make KBUILD_VERBOSE=0".
 
 
---- 6.8 Preprocessing linker scripts
+--- 6.9 Preprocessing linker scripts
 
        When the vmlinux image is built, the linker script
        arch/$(ARCH)/kernel/vmlinux.lds is used.
@@ -1228,7 +1242,7 @@ When kbuild executes, the following steps are followed (roughly):
        The kbuild infrastructure for *lds file are used in several
        architecture-specific files.
 
---- 6.9 Generic header files
+--- 6.10 Generic header files
 
        The directory include/asm-generic contains the header files
        that may be shared between individual architectures.
index b29f3c416296ef880203a02c1d9b8c707f09206a..033d4e69b43b107d9780ec959105823e6cefd07c 100644 (file)
@@ -1059,6 +1059,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        By default, super page will be supported if Intel IOMMU
                        has the capability. With this option, super page will
                        not be supported.
+
+       intel_idle.max_cstate=  [KNL,HW,ACPI,X86]
+                       0       disables intel_idle and fall back on acpi_idle.
+                       1 to 6  specify maximum depth of C-state.
+
        intremap=       [X86-64, Intel-IOMMU]
                        on      enable Interrupt Remapping (default)
                        off     disable Interrupt Remapping
index 6727b92bc2fb9db3f700d3056c1360e0297c09b5..150fd3833d0bfa114e39ce8d8d6687425aace459 100644 (file)
@@ -857,42 +857,41 @@ case), we define a mapping like this:
 
 ...
 {
-       .name "2bit"
+       .name "2bit"
        .ctrl_dev_name = "pinctrl-foo",
        .function = "mmc0",
        .group = "mmc0_1_grp",
        .dev_name = "foo-mmc.0",
 },
 {
-       .name "4bit"
+       .name "4bit"
        .ctrl_dev_name = "pinctrl-foo",
        .function = "mmc0",
        .group = "mmc0_1_grp",
        .dev_name = "foo-mmc.0",
 },
 {
-       .name "4bit"
+       .name "4bit"
        .ctrl_dev_name = "pinctrl-foo",
        .function = "mmc0",
        .group = "mmc0_2_grp",
        .dev_name = "foo-mmc.0",
 },
 {
-       .name "8bit"
+       .name "8bit"
        .ctrl_dev_name = "pinctrl-foo",
-       .function = "mmc0",
        .group = "mmc0_1_grp",
        .dev_name = "foo-mmc.0",
 },
 {
-       .name "8bit"
+       .name "8bit"
        .ctrl_dev_name = "pinctrl-foo",
        .function = "mmc0",
        .group = "mmc0_2_grp",
        .dev_name = "foo-mmc.0",
 },
 {
-       .name "8bit"
+       .name "8bit"
        .ctrl_dev_name = "pinctrl-foo",
        .function = "mmc0",
        .group = "mmc0_3_grp",
@@ -995,7 +994,7 @@ This is enabled by simply setting the .hog_on_boot field in the map to true,
 like this:
 
 {
-       .name "POWERMAP"
+       .name "POWERMAP"
        .ctrl_dev_name = "pinctrl-foo",
        .function = "power_func",
        .hog_on_boot = true,
@@ -1025,7 +1024,7 @@ it, disables and releases it, and muxes it in on the pins defined by group B:
 
 foo_switch()
 {
-       struct pinmux pmx;
+       struct pinmux *pmx;
 
        /* Enable on position A */
        pmx = pinmux_get(&device, "spi0-pos-A");
index 40a4c65f380a10479a7a805d58310be97b1a2ee1..262acf56fa79b6cb5973b5e408e6bcd8b437b33d 100644 (file)
@@ -15,7 +15,7 @@ test at least a couple of times in a row for confidence.  [This is necessary,
 because some problems only show up on a second attempt at suspending and
 resuming the system.]  Moreover, hibernating in the "reboot" and "shutdown"
 modes causes the PM core to skip some platform-related callbacks which on ACPI
-systems might be necessary to make hibernation work.  Thus, if you machine fails
+systems might be necessary to make hibernation work.  Thus, if your machine fails
 to hibernate or resume in the "reboot" mode, you should try the "platform" mode:
 
 # echo platform > /sys/power/disk
index 6ccb68f68da685a39cefc0c2bf0609924c292b38..ebd7490ef1df8eefdbaeb94ad656aeb091f9368f 100644 (file)
@@ -120,10 +120,10 @@ So in practice, the 'at all' may become a 'why freeze kernel threads?' and
 freezing user threads I don't find really objectionable."
 
 Still, there are kernel threads that may want to be freezable.  For example, if
-a kernel that belongs to a device driver accesses the device directly, it in
-principle needs to know when the device is suspended, so that it doesn't try to
-access it at that time.  However, if the kernel thread is freezable, it will be
-frozen before the driver's .suspend() callback is executed and it will be
+a kernel thread that belongs to a device driver accesses the device directly, it
+in principle needs to know when the device is suspended, so that it doesn't try
+to access it at that time.  However, if the kernel thread is freezable, it will
+be frozen before the driver's .suspend() callback is executed and it will be
 thawed after the driver's .resume() callback has run, so it won't be accessing
 the device while it's suspended.
 
index 64adb98b181c717f763bff24109bc8bf9aff104a..57566bacb4c56c8182f0e9b16bafe74c9b1233ab 100644 (file)
@@ -1,3 +1,13 @@
+Release Date    : Fri. Jan 6, 2012 17:00:00 PST 2010 -
+                       (emaild-id:megaraidlinux@lsi.com)
+                       Adam Radford
+Current Version : 00.00.06.14-rc1
+Old Version     : 00.00.06.12-rc1
+    1. Fix reglockFlags for degraded raid5/6 for MR 9360/9380.
+    2. Mask off flags in ioctl path to prevent memory scribble with older
+       MegaCLI versions.
+    3. Remove poll_mode_io module paramater, sysfs node, and associated code.
+-------------------------------------------------------------------------------
 Release Date    : Wed. Oct 5, 2011 17:00:00 PST 2010 -
                        (emaild-id:megaraidlinux@lsi.com)
                        Adam Radford
index 494980e404912a861a238423e9189708549eaa66..ab899591ecb7a93dfe6c8b936042c3b8f400bd42 100644 (file)
@@ -1,32 +1,11 @@
 Copyright (c) 2003-2011 QLogic Corporation
-QLogic Linux iSCSI HBA Driver
+QLogic Linux iSCSI Driver
 
 This program includes a device driver for Linux 3.x.
 You may modify and redistribute the device driver code under the
 GNU General Public License (a copy of which is attached hereto as
 Exhibit A) published by the Free Software Foundation (version 2).
 
-REGARDLESS OF WHAT LICENSING MECHANISM IS USED OR APPLICABLE,
-THIS PROGRAM IS PROVIDED BY QLOGIC CORPORATION "AS IS'' AND ANY
-EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
-PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR
-BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
-TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
-POSSIBILITY OF SUCH DAMAGE.
-
-USER ACKNOWLEDGES AND AGREES THAT USE OF THIS PROGRAM WILL NOT
-CREATE OR GIVE GROUNDS FOR A LICENSE BY IMPLICATION, ESTOPPEL, OR
-OTHERWISE IN ANY INTELLECTUAL PROPERTY RIGHTS (PATENT, COPYRIGHT,
-TRADE SECRET, MASK WORK, OR OTHER PROPRIETARY RIGHT) EMBODIED IN
-ANY OTHER QLOGIC HARDWARE OR SOFTWARE EITHER SOLELY OR IN
-COMBINATION WITH THIS PROGRAM.
-
 
 EXHIBIT A
 
index 21fd05c28e738e146b313081bbd4294687ddabdf..f0ab5cf28fcae0a1ff0783bf8a4ddc477447f585 100644 (file)
@@ -25,7 +25,8 @@ Procedure for submitting patches to the -stable tree:
 
  - Send the patch, after verifying that it follows the above rules, to
    stable@vger.kernel.org.  You must note the upstream commit ID in the
-   changelog of your submission.
+   changelog of your submission, as well as the kernel version you wish
+   it to be applied to.
  - To have the patch automatically included in the stable tree, add the tag
      Cc: stable@vger.kernel.org
    in the sign-off area. Once the patch is merged it will be applied to
index 8c20fbd8b42dd922daa92f223bbefa9ffcc4f8e3..6d78841fd41677d4f81dbcb53eed7ab8602cddb4 100644 (file)
@@ -601,6 +601,8 @@ can be ORed together:
         instead of using the one provided by the hardware.
  512 - A kernel warning has occurred.
 1024 - A module from drivers/staging was loaded.
+2048 - The system is working around a severe firmware bug.
+4096 - An out-of-tree module has been loaded.
 
 ==============================================================
 
index 7ef9b843d529a5fadbe05eb19ad6c34fd8293cd5..6e21b8b52638a1f0ab014cd46b532d341ba17619 100755 (executable)
@@ -230,14 +230,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "#include <linux/ctype.h>\n"
        buf += "#include <asm/unaligned.h>\n\n"
        buf += "#include <target/target_core_base.h>\n"
-       buf += "#include <target/target_core_transport.h>\n"
-       buf += "#include <target/target_core_fabric_ops.h>\n"
+       buf += "#include <target/target_core_fabric.h>\n"
        buf += "#include <target/target_core_fabric_configfs.h>\n"
-       buf += "#include <target/target_core_fabric_lib.h>\n"
-       buf += "#include <target/target_core_device.h>\n"
-       buf += "#include <target/target_core_tpg.h>\n"
        buf += "#include <target/target_core_configfs.h>\n"
-       buf += "#include <target/target_core_base.h>\n"
        buf += "#include <target/configfs_macros.h>\n\n"
        buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
        buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
@@ -260,7 +255,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
        buf += "                return ERR_PTR(-EINVAL); */\n"
        buf += "        se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
-       buf += "        if (!(se_nacl_new))\n"
+       buf += "        if (!se_nacl_new)\n"
        buf += "                return ERR_PTR(-ENOMEM);\n"
        buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
        buf += "        nexus_depth = 1;\n"
@@ -308,7 +303,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
        buf += "                return ERR_PTR(-EINVAL);\n\n"
        buf += "        tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
-       buf += "        if (!(tpg)) {\n"
+       buf += "        if (!tpg) {\n"
        buf += "                printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
        buf += "                return ERR_PTR(-ENOMEM);\n"
        buf += "        }\n"
@@ -344,7 +339,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
        buf += "                return ERR_PTR(-EINVAL); */\n\n"
        buf += "        " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
-       buf += "        if (!(" + fabric_mod_port + ")) {\n"
+       buf += "        if (!" + fabric_mod_port + ") {\n"
        buf += "                printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
        buf += "                return ERR_PTR(-ENOMEM);\n"
        buf += "        }\n"
@@ -352,7 +347,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        if proto_ident == "FC" or proto_ident == "SAS":
                buf += "        " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
 
-       buf += "        /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "__NAMELEN, wwpn); */\n\n"
+       buf += "        /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
        buf += "        return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
        buf += "}\n\n"
        buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
@@ -391,8 +386,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .tpg_alloc_fabric_acl           = " + fabric_mod_name + "_alloc_fabric_acl,\n"
        buf += "        .tpg_release_fabric_acl         = " + fabric_mod_name + "_release_fabric_acl,\n"
        buf += "        .tpg_get_inst_index             = " + fabric_mod_name + "_tpg_get_inst_index,\n"
-       buf += "        .release_cmd_to_pool            = " + fabric_mod_name + "_release_cmd,\n"
-       buf += "        .release_cmd_direct             = " + fabric_mod_name + "_release_cmd,\n"
+       buf += "        .release_cmd                    = " + fabric_mod_name + "_release_cmd,\n"
        buf += "        .shutdown_session               = " + fabric_mod_name + "_shutdown_session,\n"
        buf += "        .close_session                  = " + fabric_mod_name + "_close_session,\n"
        buf += "        .stop_session                   = " + fabric_mod_name + "_stop_session,\n"
@@ -405,14 +399,12 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        .set_default_node_attributes    = " + fabric_mod_name + "_set_default_node_attrs,\n"
        buf += "        .get_task_tag                   = " + fabric_mod_name + "_get_task_tag,\n"
        buf += "        .get_cmd_state                  = " + fabric_mod_name + "_get_cmd_state,\n"
-       buf += "        .new_cmd_failure                = " + fabric_mod_name + "_new_cmd_failure,\n"
        buf += "        .queue_data_in                  = " + fabric_mod_name + "_queue_data_in,\n"
        buf += "        .queue_status                   = " + fabric_mod_name + "_queue_status,\n"
        buf += "        .queue_tm_rsp                   = " + fabric_mod_name + "_queue_tm_rsp,\n"
        buf += "        .get_fabric_sense_len           = " + fabric_mod_name + "_get_fabric_sense_len,\n"
        buf += "        .set_fabric_sense_len           = " + fabric_mod_name + "_set_fabric_sense_len,\n"
        buf += "        .is_state_remove                = " + fabric_mod_name + "_is_state_remove,\n"
-       buf += "        .pack_lun                       = " + fabric_mod_name + "_pack_lun,\n"
        buf += "        /*\n"
        buf += "         * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
        buf += "         */\n"
@@ -439,9 +431,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "         * Register the top level struct config_item_type with TCM core\n"
        buf += "         */\n"
        buf += "        fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
-       buf += "        if (!(fabric)) {\n"
+       buf += "        if (IS_ERR(fabric)) {\n"
        buf += "                printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
-       buf += "                return -ENOMEM;\n"
+       buf += "                return PTR_ERR(fabric);\n"
        buf += "        }\n"
        buf += "        /*\n"
        buf += "         * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
@@ -475,9 +467,9 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "        printk(KERN_INFO \"" +  fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
        buf += "        return 0;\n"
        buf += "};\n\n"
-       buf += "static void " + fabric_mod_name + "_deregister_configfs(void)\n"
+       buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
        buf += "{\n"
-       buf += "        if (!(" + fabric_mod_name + "_fabric_configfs))\n"
+       buf += "        if (!" + fabric_mod_name + "_fabric_configfs)\n"
        buf += "                return;\n\n"
        buf += "        target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
        buf += "        " + fabric_mod_name + "_fabric_configfs = NULL;\n"
@@ -492,17 +484,15 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "                return ret;\n\n"
        buf += "        return 0;\n"
        buf += "};\n\n"
-       buf += "static void " + fabric_mod_name + "_exit(void)\n"
+       buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
        buf += "{\n"
        buf += "        " + fabric_mod_name + "_deregister_configfs();\n"
        buf += "};\n\n"
 
-       buf += "#ifdef MODULE\n"
        buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
        buf += "MODULE_LICENSE(\"GPL\");\n"
        buf += "module_init(" + fabric_mod_name + "_init);\n"
        buf += "module_exit(" + fabric_mod_name + "_exit);\n"
-       buf += "#endif\n"
 
        ret = p.write(buf)
        if ret:
@@ -514,7 +504,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
 
 def tcm_mod_scan_fabric_ops(tcm_dir):
 
-       fabric_ops_api = tcm_dir + "include/target/target_core_fabric_ops.h"
+       fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
 
        print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
        process_fo = 0;
@@ -579,11 +569,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
        buf += "#include <scsi/scsi_cmnd.h>\n"
        buf += "#include <scsi/libfc.h>\n\n"
        buf += "#include <target/target_core_base.h>\n"
-       buf += "#include <target/target_core_transport.h>\n"
-       buf += "#include <target/target_core_fabric_ops.h>\n"
-       buf += "#include <target/target_core_fabric_lib.h>\n"
-       buf += "#include <target/target_core_device.h>\n"
-       buf += "#include <target/target_core_tpg.h>\n"
+       buf += "#include <target/target_core_fabric.h>\n"
        buf += "#include <target/target_core_configfs.h>\n\n"
        buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
        buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
@@ -788,7 +774,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        buf += "{\n"
                        buf += "        struct " + fabric_mod_name + "_nacl *nacl;\n\n"
                        buf += "        nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
-                       buf += "        if (!(nacl)) {\n"
+                       buf += "        if (!nacl) {\n"
                        buf += "                printk(KERN_ERR \"Unable to alocate struct " + fabric_mod_name + "_nacl\\n\");\n"
                        buf += "                return NULL;\n"
                        buf += "        }\n\n"
@@ -815,7 +801,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        buf += "}\n\n"
                        bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
 
-               if re.search('release_cmd_to_pool', fo):
+               if re.search('\*release_cmd\)\(', fo):
                        buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
                        buf += "{\n"
                        buf += "        return;\n"
@@ -899,13 +885,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        buf += "}\n\n"
                        bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
 
-               if re.search('new_cmd_failure\)\(', fo):
-                       buf += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *se_cmd)\n"
-                       buf += "{\n"
-                       buf += "        return;\n"
-                       buf += "}\n\n"
-                       bufi += "void " + fabric_mod_name + "_new_cmd_failure(struct se_cmd *);\n"
-
                if re.search('queue_data_in\)\(', fo):
                        buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
                        buf += "{\n"
@@ -948,15 +927,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
                        buf += "}\n\n"
                        bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
 
-               if re.search('pack_lun\)\(', fo):
-                       buf += "u64 " + fabric_mod_name + "_pack_lun(unsigned int lun)\n"
-                       buf += "{\n"
-                       buf += "        WARN_ON(lun >= 256);\n"
-                       buf += "        /* Caller wants this byte-swapped */\n"
-                       buf += "        return cpu_to_le64((lun & 0xff) << 8);\n"
-                       buf += "}\n\n"
-                       bufi += "u64 " + fabric_mod_name + "_pack_lun(unsigned int);\n"
-
 
        ret = p.write(buf)
        if ret:
index b61e46f449aa1e0bf376de21ddf2eec55c196a39..1733ab947a95d3849ff650e7252cfb1d0cb99d11 100644 (file)
@@ -284,7 +284,7 @@ method, the sys I/F structure will be built like this:
 The framework includes a simple notification mechanism, in the form of a
 netlink event. Netlink socket initialization is done during the _init_
 of the framework. Drivers which intend to use the notification mechanism
-just need to call generate_netlink_event() with two arguments viz
+just need to call thermal_generate_netlink_event() with two arguments viz
 (originator, event). Typically the originator will be an integer assigned
 to a thermal_zone_device when it registers itself with the framework. The
 event will be one of:{THERMAL_AUX0, THERMAL_AUX1, THERMAL_CRITICAL,
index 26aa0573933e8f5f970f7f18b3f53f0afd37eb6d..e2492a9d1027b95be62b9fde675b043643a7f01c 100644 (file)
@@ -666,27 +666,6 @@ a control of this type whenever the first control belonging to a new control
 class is added.
 
 
-Differences from the Spec
-=========================
-
-There are a few places where the framework acts slightly differently from the
-V4L2 Specification. Those differences are described in this section. We will
-have to see whether we need to adjust the spec or not.
-
-1) It is no longer required to have all controls contained in a
-v4l2_ext_control array be from the same control class. The framework will be
-able to handle any type of control in the array. You need to set ctrl_class
-to 0 in order to enable this. If ctrl_class is non-zero, then it will still
-check that all controls belong to that control class.
-
-If you set ctrl_class to 0 and count to 0, then it will only return an error
-if there are no controls at all.
-
-2) Clarified the way error_idx works. For get and set it will be equal to
-count if nothing was done yet. If it is less than count then only the controls
-up to error_idx-1 were successfully applied.
-
-
 Proposals for Extensions
 ========================
 
index 8e601991d91c6b4e0247cd9b7a5735f3df7661b3..924bd462675e90ab222cc45d8824f734be72612c 100644 (file)
@@ -4,8 +4,6 @@ Virtualization support in the Linux kernel.
        - this file.
 kvm/
        - Kernel Virtual Machine.  See also http://linux-kvm.org
-lguest/
-       - Extremely simple hypervisor for experimental/educational use.
 uml/
        - User Mode Linux, builds/runs Linux kernel as a userspace program.
 virtio.txt
index 2a90101309d1b396a9f8417a52dd55256b333731..55ca0bea142c58e3eb8574084c770059de5fb779 100644 (file)
@@ -159,7 +159,7 @@ S:  Maintained
 F:     drivers/net/ethernet/realtek/r8169.c
 
 8250/16?50 (AND CLONE UARTS) SERIAL DRIVER
-M:     Greg Kroah-Hartman <gregkh@suse.de>
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     linux-serial@vger.kernel.org
 W:     http://serial.sourceforge.net
 S:     Maintained
@@ -745,6 +745,7 @@ M:  Barry Song <baohua.song@csr.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-prima2/
+F:     drivers/dma/sirf-dma*
 
 ARM/EBSA110 MACHINE SUPPORT
 M:     Russell King <linux@arm.linux.org.uk>
@@ -788,12 +789,6 @@ F: arch/arm/mach-mx*/
 F:     arch/arm/mach-imx/
 F:     arch/arm/plat-mxc/
 
-ARM/FREESCALE IMX51
-M:     Amit Kucheria <amit.kucheria@canonical.com>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Maintained
-F:     arch/arm/mach-mx5/
-
 ARM/FREESCALE IMX6
 M:     Shawn Guo <shawn.guo@linaro.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1411,6 +1406,7 @@ F:        net/ax25/
 B43 WIRELESS DRIVER
 M:     Stefano Brivio <stefano.brivio@polimi.it>
 L:     linux-wireless@vger.kernel.org
+L:     b43-dev@lists.infradead.org (moderated for non-subscribers)
 W:     http://linuxwireless.org/en/users/Drivers/b43
 S:     Maintained
 F:     drivers/net/wireless/b43/
@@ -1587,6 +1583,13 @@ L:       linux-scsi@vger.kernel.org
 S:     Supported
 F:     drivers/scsi/bnx2fc/
 
+BROADCOM SPECIFIC AMBA DRIVER (BCMA)
+M:     RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
+L:     linux-wireless@vger.kernel.org
+S:     Maintained
+F:     drivers/bcma/
+F:     include/linux/bcma/
+
 BROCADE BFA FC SCSI DRIVER
 M:     Jing Huang <huangj@brocade.com>
 L:     linux-scsi@vger.kernel.org
@@ -1774,9 +1777,9 @@ X:        net/wireless/wext*
 
 CHAR and MISC DRIVERS
 M:     Arnd Bergmann <arnd@arndb.de>
-M:     Greg Kroah-Hartman <greg@kroah.com>
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
-S:     Maintained
+S:     Supported
 F:     drivers/char/*
 F:     drivers/misc/*
 
@@ -2237,6 +2240,17 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/teigland/dlm.git
 S:     Supported
 F:     fs/dlm/
 
+DMA BUFFER SHARING FRAMEWORK
+M:     Sumit Semwal <sumit.semwal@linaro.org>
+S:     Maintained
+L:     linux-media@vger.kernel.org
+L:     dri-devel@lists.freedesktop.org
+L:     linaro-mm-sig@lists.linaro.org
+F:     drivers/base/dma-buf*
+F:     include/linux/dma-buf*
+F:     Documentation/dma-buf-sharing.txt
+T:     git git://git.linaro.org/people/sumitsemwal/linux-dma-buf.git
+
 DMA GENERIC OFFLOAD ENGINE SUBSYSTEM
 M:     Vinod Koul <vinod.koul@intel.com>
 M:     Dan Williams <dan.j.williams@intel.com>
@@ -2267,7 +2281,7 @@ F:        drivers/acpi/dock.c
 DOCUMENTATION
 M:     Randy Dunlap <rdunlap@xenotime.net>
 L:     linux-doc@vger.kernel.org
-T:     quilt http://userweb.kernel.org/~rdunlap/kernel-doc-patches/current/
+T:     quilt http://xenotime.net/kernel-doc-patches/current/
 S:     Maintained
 F:     Documentation/
 
@@ -2300,7 +2314,7 @@ F:        lib/lru_cache.c
 F:     Documentation/blockdev/drbd/
 
 DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
-M:     Greg Kroah-Hartman <gregkh@suse.de>
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core-2.6.git
 S:     Supported
 F:     Documentation/kobject.txt
@@ -2330,6 +2344,9 @@ F:        include/drm/i915*
 
 DRM DRIVERS FOR EXYNOS
 M:     Inki Dae <inki.dae@samsung.com>
+M:     Joonyoung Shim <jy0922.shim@samsung.com>
+M:     Seung-Woo Kim <sw0312.kim@samsung.com>
+M:     Kyungmin Park <kyungmin.park@samsung.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Supported
 F:     drivers/gpu/drm/exynos
@@ -2382,7 +2399,7 @@ F:        net/bridge/netfilter/ebt*.c
 
 ECRYPT FILE SYSTEM
 M:     Tyler Hicks <tyhicks@canonical.com>
-M:     Dustin Kirkland <kirkland@canonical.com>
+M:     Dustin Kirkland <dustin.kirkland@gazzang.com>
 L:     ecryptfs@vger.kernel.org
 W:     https://launchpad.net/ecryptfs
 S:     Supported
@@ -3969,11 +3986,11 @@ M:      Rusty Russell <rusty@rustcorp.com.au>
 L:     lguest@lists.ozlabs.org
 W:     http://lguest.ozlabs.org/
 S:     Odd Fixes
-F:     Documentation/virtual/lguest/
+F:     arch/x86/include/asm/lguest*.h
 F:     arch/x86/lguest/
 F:     drivers/lguest/
 F:     include/linux/lguest*.h
-F:     arch/x86/include/asm/lguest*.h
+F:     tools/lguest/
 
 LINUX FOR IBM pSERIES (RS/6000)
 M:     Paul Mackerras <paulus@au.ibm.com>
@@ -4113,10 +4130,11 @@ L:      linux-ntfs-dev@lists.sourceforge.net
 W:     http://www.linux-ntfs.org/content/view/19/37/
 S:     Maintained
 F:     Documentation/ldm.txt
-F:     fs/partitions/ldm.*
+F:     block/partitions/ldm.*
 
 LogFS
 M:     Joern Engel <joern@logfs.org>
+M:     Prasad Joshi <prasadjoshi.linux@gmail.com>
 L:     logfs@logfs.org
 W:     logfs.org
 S:     Maintained
@@ -4258,13 +4276,6 @@ S:       Orphan
 F:     drivers/video/matrox/matroxfb_*
 F:     include/linux/matroxfb.h
 
-MAX1668 TEMPERATURE SENSOR DRIVER
-M:     "David George" <david.george@ska.ac.za>
-L:     lm-sensors@lm-sensors.org
-S:     Maintained
-F:     Documentation/hwmon/max1668
-F:     drivers/hwmon/max1668.c
-
 MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
 M:     "Hans J. Koch" <hjk@hansjkoch.de>
 L:     lm-sensors@lm-sensors.org
@@ -5616,7 +5627,7 @@ W:        http://www.ibm.com/developerworks/linux/linux390/
 S:     Supported
 F:     arch/s390/
 F:     drivers/s390/
-F:     fs/partitions/ibm.c
+F:     block/partitions/ibm.c
 F:     Documentation/s390/
 F:     Documentation/DocBook/s390*
 
@@ -5846,7 +5857,7 @@ F:        drivers/mmc/host/sdhci-spear.c
 SECURITY SUBSYSTEM
 M:     James Morris <jmorris@namei.org>
 L:     linux-security-module@vger.kernel.org (suggested Cc:)
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/security-testing-2.6.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
 W:     http://security.wiki.kernel.org/
 S:     Supported
 F:     security/
@@ -6116,13 +6127,6 @@ S:       Maintained
 F:     drivers/ssb/
 F:     include/linux/ssb/
 
-BROADCOM SPECIFIC AMBA DRIVER (BCMA)
-M:     RafaÅ‚ MiÅ‚ecki <zajec5@gmail.com>
-L:     linux-wireless@vger.kernel.org
-S:     Maintained
-F:     drivers/bcma/
-F:     include/linux/bcma/
-
 SONY VAIO CONTROL DEVICE DRIVER
 M:     Mattia Dongili <malattia@linux.it>
 L:     platform-driver-x86@vger.kernel.org
@@ -6266,15 +6270,15 @@ S:      Maintained
 F:     arch/alpha/kernel/srm_env.c
 
 STABLE BRANCH
-M:     Greg Kroah-Hartman <greg@kroah.com>
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     stable@vger.kernel.org
-S:     Maintained
+S:     Supported
 
 STAGING SUBSYSTEM
-M:     Greg Kroah-Hartman <gregkh@suse.de>
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging.git
 L:     devel@driverdev.osuosl.org
-S:     Maintained
+S:     Supported
 F:     drivers/staging/
 
 STAGING - AGERE HERMES II and II.5 WIRELESS DRIVERS
@@ -6386,11 +6390,6 @@ M:       Omar Ramirez Luna <omar.ramirez@ti.com>
 S:     Odd Fixes
 F:     drivers/staging/tidspbridge/
 
-STAGING - TRIDENT TVMASTER TMxxxx USB VIDEO CAPTURE DRIVERS
-L:     linux-media@vger.kernel.org
-S:     Odd Fixes
-F:     drivers/staging/tm6000/
-
 STAGING - USB ENE SM/MS CARD READER DRIVER
 M:     Al Cho <acho@novell.com>
 S:     Odd Fixes
@@ -6659,10 +6658,10 @@ S:      Maintained
 K:     ^Subject:.*(?i)trivial
 
 TTY LAYER
-M:     Greg Kroah-Hartman <gregkh@suse.de>
-S:     Maintained
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/tty-2.6.git
-F:     drivers/tty/*
+F:     drivers/tty/
 F:     drivers/tty/serial/serial_core.c
 F:     include/linux/serial_core.h
 F:     include/linux/serial.h
@@ -6948,7 +6947,7 @@ S:        Maintained
 F:     drivers/usb/serial/digi_acceleport.c
 
 USB SERIAL DRIVER
-M:     Greg Kroah-Hartman <gregkh@suse.de>
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     linux-usb@vger.kernel.org
 S:     Supported
 F:     Documentation/usb/usb-serial.txt
@@ -6963,9 +6962,8 @@ S:        Maintained
 F:     drivers/usb/serial/empeg.c
 
 USB SERIAL KEYSPAN DRIVER
-M:     Greg Kroah-Hartman <greg@kroah.com>
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     linux-usb@vger.kernel.org
-W:     http://www.kroah.com/linux/
 S:     Maintained
 F:     drivers/usb/serial/*keyspan*
 
@@ -6993,7 +6991,7 @@ F:        Documentation/video4linux/sn9c102.txt
 F:     drivers/media/video/sn9c102/
 
 USB SUBSYSTEM
-M:     Greg Kroah-Hartman <gregkh@suse.de>
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 L:     linux-usb@vger.kernel.org
 W:     http://www.linux-usb.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb-2.6.git
@@ -7080,7 +7078,7 @@ F:        fs/hppfs/
 
 USERSPACE I/O (UIO)
 M:     "Hans J. Koch" <hjk@hansjkoch.de>
-M:     Greg Kroah-Hartman <gregkh@suse.de>
+M:     Greg Kroah-Hartman <gregkh@linuxfoundation.org>
 S:     Maintained
 F:     Documentation/DocBook/uio-howto.tmpl
 F:     drivers/uio/
@@ -7198,7 +7196,7 @@ S:        Maintained
 F:     drivers/net/vmxnet3/
 
 VMware PVSCSI driver
-M:     Alok Kataria <akataria@vmware.com>
+M:     Arvind Kumar <arvindkumar@vmware.com>
 M:     VMware PV-Drivers <pv-drivers@vmware.com>
 L:     linux-scsi@vger.kernel.org
 S:     Maintained
@@ -7355,6 +7353,7 @@ S:        Supported
 F:     Documentation/hwmon/wm83??
 F:     arch/arm/mach-s3c64xx/mach-crag6410*
 F:     drivers/leds/leds-wm83*.c
+F:     drivers/hwmon/wm83??-hwmon.c
 F:     drivers/input/misc/wm831x-on.c
 F:     drivers/input/touchscreen/wm831x-ts.c
 F:     drivers/input/touchscreen/wm97*.c
index adddd11c3b3b8a2827dc4f591b573b1fde5768a9..7c44b676b77a95739a700036173e3addb2946336 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
-PATCHLEVEL = 2
+PATCHLEVEL = 3
 SUBLEVEL = 0
-EXTRAVERSION =
+EXTRAVERSION = -rc3
 NAME = Saber-toothed Squirrel
 
 # *DOCUMENTATION*
@@ -312,7 +312,7 @@ endif
 # If the user is running make -s (silent mode), suppress echoing of
 # commands
 
-ifneq ($(findstring s,$(MAKEFLAGS)),)
+ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
   quiet=silent_
 endif
 
@@ -442,7 +442,7 @@ asm-generic:
 
 no-dot-config-targets := clean mrproper distclean \
                         cscope gtags TAGS tags help %docs check% coccicheck \
-                        include/linux/version.h headers_% \
+                        include/linux/version.h headers_% archheaders \
                         kernelversion %src-pkg
 
 config-targets := 0
@@ -979,7 +979,7 @@ prepare1: prepare2 include/linux/version.h include/generated/utsrelease.h \
                    include/config/auto.conf
        $(cmd_crmodverdir)
 
-archprepare: prepare1 scripts_basic
+archprepare: archheaders prepare1 scripts_basic
 
 prepare0: archprepare FORCE
        $(Q)$(MAKE) $(build)=.
@@ -1046,8 +1046,11 @@ hdr-inst := -rR -f $(srctree)/scripts/Makefile.headersinst obj
 # If we do an all arch process set dst to asm-$(hdr-arch)
 hdr-dst = $(if $(KBUILD_HEADERS), dst=include/asm-$(hdr-arch), dst=include/asm)
 
+PHONY += archheaders
+archheaders:
+
 PHONY += __headers
-__headers: include/linux/version.h scripts_basic asm-generic FORCE
+__headers: include/linux/version.h scripts_basic asm-generic archheaders FORCE
        $(Q)$(MAKE) $(build)=scripts build_unifdef
 
 PHONY += headers_install_all
index 24626b0419ee97e963e68329a8eb6769360b46ea..a48aecc17eacc2e3d3f5cf4b0ff4183f29b33440 100644 (file)
@@ -754,7 +754,7 @@ config ARCH_SA1100
        select ARCH_HAS_CPUFREQ
        select CPU_FREQ
        select GENERIC_CLOCKEVENTS
-       select CLKDEV_LOOKUP
+       select HAVE_CLK
        select HAVE_SCHED_CLOCK
        select TICK_ONESHOT
        select ARCH_REQUIRE_GPIOLIB
@@ -825,7 +825,6 @@ config ARCH_S5PC100
        select HAVE_CLK
        select CLKDEV_LOOKUP
        select CPU_V7
-       select ARM_L1_CACHE_SHIFT_6
        select ARCH_USES_GETTIMEOFFSET
        select HAVE_S3C2410_I2C if I2C
        select HAVE_S3C_RTC if RTC_CLASS
@@ -842,7 +841,6 @@ config ARCH_S5PV210
        select HAVE_CLK
        select CLKDEV_LOOKUP
        select CLKSRC_MMIO
-       select ARM_L1_CACHE_SHIFT_6
        select ARCH_HAS_CPUFREQ
        select GENERIC_CLOCKEVENTS
        select HAVE_SCHED_CLOCK
index 40319d91bb7fb7fd9381c8f60cc2423230d83c0b..1683bfb9166fa0d8d1124a2d23f5d5d347cdc4a0 100644 (file)
@@ -160,7 +160,6 @@ machine-$(CONFIG_ARCH_MSM)          := msm
 machine-$(CONFIG_ARCH_MV78XX0)         := mv78xx0
 machine-$(CONFIG_ARCH_IMX_V4_V5)       := imx
 machine-$(CONFIG_ARCH_IMX_V6_V7)       := imx
-machine-$(CONFIG_ARCH_MX5)             := mx5
 machine-$(CONFIG_ARCH_MXS)             := mxs
 machine-$(CONFIG_ARCH_NETX)            := netx
 machine-$(CONFIG_ARCH_NOMADIK)         := nomadik
index 5df26a9976a26c10ddfcf0f788302dd2caeb8769..fc871e719aae8bd6e94d5602591f2948f568f8e9 100644 (file)
@@ -59,9 +59,11 @@ $(obj)/zImage:       $(obj)/compressed/vmlinux FORCE
 
 endif
 
+targets += $(dtb-y)
+
 # Rule to build device tree blobs
-$(obj)/%.dtb: $(src)/dts/%.dts
-       $(call cmd,dtc)
+$(obj)/%.dtb: $(src)/dts/%.dts FORCE
+       $(call if_changed_dep,dtc)
 
 $(obj)/dtbs: $(addprefix $(obj)/, $(dtb-y))
 
index b2dc2dd7f1df6d25fbba306e0822563883b18859..c47d6199b784c1f7aa3c4838dc9aea2d5cf576a0 100644 (file)
@@ -41,6 +41,7 @@
 
 #include <asm/irq.h>
 #include <asm/exception.h>
+#include <asm/smp_plat.h>
 #include <asm/mach/irq.h>
 #include <asm/hardware/gic.h>
 
@@ -352,11 +353,7 @@ static void __init gic_dist_init(struct gic_chip_data *gic)
        unsigned int gic_irqs = gic->gic_irqs;
        struct irq_domain *domain = &gic->domain;
        void __iomem *base = gic_data_dist_base(gic);
-       u32 cpu = 0;
-
-#ifdef CONFIG_SMP
-       cpu = cpu_logical_map(smp_processor_id());
-#endif
+       u32 cpu = cpu_logical_map(smp_processor_id());
 
        cpumask = 1 << cpu;
        cpumask |= cpumask << 8;
similarity index 80%
rename from arch/arm/configs/mx5_defconfig
rename to arch/arm/configs/imx_v6_v7_defconfig
index d0d8dfece37ee7073023b7ea3f1fade5ffebc1d0..3a4fb2e5fc68fb53c54467d466da94d295703f72 100644 (file)
@@ -3,6 +3,7 @@ CONFIG_EXPERIMENTAL=y
 CONFIG_KERNEL_LZO=y
 CONFIG_SYSVIPC=y
 CONFIG_LOG_BUF_SHIFT=18
+CONFIG_CGROUPS=y
 CONFIG_RELAY=y
 CONFIG_EXPERT=y
 # CONFIG_SLUB_DEBUG is not set
@@ -14,20 +15,31 @@ CONFIG_MODULE_SRCVERSION_ALL=y
 # CONFIG_LBDAF is not set
 # CONFIG_BLK_DEV_BSG is not set
 CONFIG_ARCH_MXC=y
-CONFIG_ARCH_MX5=y
-CONFIG_MACH_MX51_BABBAGE=y
+CONFIG_MACH_MX31LILLY=y
+CONFIG_MACH_MX31LITE=y
+CONFIG_MACH_PCM037=y
+CONFIG_MACH_PCM037_EET=y
+CONFIG_MACH_MX31_3DS=y
+CONFIG_MACH_MX31MOBOARD=y
+CONFIG_MACH_QONG=y
+CONFIG_MACH_ARMADILLO5X0=y
+CONFIG_MACH_KZM_ARM11_01=y
+CONFIG_MACH_PCM043=y
+CONFIG_MACH_MX35_3DS=y
+CONFIG_MACH_EUKREA_CPUIMX35=y
+CONFIG_MACH_VPR200=y
+CONFIG_MACH_IMX51_DT=y
 CONFIG_MACH_MX51_3DS=y
 CONFIG_MACH_EUKREA_CPUIMX51=y
 CONFIG_MACH_EUKREA_CPUIMX51SD=y
 CONFIG_MACH_MX51_EFIKAMX=y
 CONFIG_MACH_MX51_EFIKASB=y
-CONFIG_MACH_MX53_EVK=y
-CONFIG_MACH_MX53_SMD=y
-CONFIG_MACH_MX53_LOCO=y
-CONFIG_MACH_MX53_ARD=y
+CONFIG_MACH_IMX53_DT=y
+CONFIG_SOC_IMX6Q=y
 CONFIG_MXC_PWM=y
 CONFIG_NO_HZ=y
 CONFIG_HIGH_RES_TIMERS=y
+CONFIG_SMP=y
 CONFIG_VMSPLIT_2G=y
 CONFIG_PREEMPT_VOLUNTARY=y
 CONFIG_AEABI=y
@@ -49,7 +61,7 @@ CONFIG_IP_PNP_DHCP=y
 # CONFIG_INET_XFRM_MODE_TUNNEL is not set
 # CONFIG_INET_XFRM_MODE_BEET is not set
 # CONFIG_INET_LRO is not set
-# CONFIG_IPV6 is not set
+CONFIG_IPV6=y
 # CONFIG_WIRELESS is not set
 CONFIG_DEVTMPFS=y
 CONFIG_DEVTMPFS_MOUNT=y
@@ -68,24 +80,20 @@ CONFIG_SCSI_SCAN_ASYNC=y
 CONFIG_ATA=y
 CONFIG_PATA_IMX=y
 CONFIG_NETDEVICES=y
-CONFIG_MII=m
-CONFIG_MARVELL_PHY=y
-CONFIG_DAVICOM_PHY=y
-CONFIG_QSEMI_PHY=y
-CONFIG_LXT_PHY=y
-CONFIG_CICADA_PHY=y
-CONFIG_VITESSE_PHY=y
-CONFIG_SMSC_PHY=y
-CONFIG_BROADCOM_PHY=y
-CONFIG_ICPLUS_PHY=y
-CONFIG_REALTEK_PHY=y
-CONFIG_NATIONAL_PHY=y
-CONFIG_STE10XP=y
-CONFIG_LSI_ET1011C_PHY=y
-CONFIG_MICREL_PHY=y
-CONFIG_NET_ETHERNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
+# CONFIG_NET_VENDOR_BROADCOM is not set
+# CONFIG_NET_VENDOR_CHELSIO is not set
+# CONFIG_NET_VENDOR_FARADAY is not set
+CONFIG_FEC=y
+# CONFIG_NET_VENDOR_INTEL is not set
+# CONFIG_NET_VENDOR_MARVELL is not set
+# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_MICROCHIP is not set
+# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_SEEQ is not set
+CONFIG_SMC91X=y
+CONFIG_SMC911X=y
+CONFIG_SMSC911X=y
+# CONFIG_NET_VENDOR_STMICRO is not set
 # CONFIG_WLAN is not set
 # CONFIG_INPUT_MOUSEDEV_PSAUX is not set
 CONFIG_INPUT_EVDEV=y
@@ -124,7 +132,6 @@ CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_MXC=y
 CONFIG_USB_STORAGE=y
 CONFIG_MMC=y
-CONFIG_MMC_BLOCK=m
 CONFIG_MMC_SDHCI=y
 CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_ESDHC_IMX=y
@@ -133,6 +140,8 @@ CONFIG_LEDS_CLASS=y
 CONFIG_RTC_CLASS=y
 CONFIG_RTC_INTF_DEV_UIE_EMUL=y
 CONFIG_RTC_MXC=y
+CONFIG_DMADEVICES=y
+CONFIG_IMX_SDMA=y
 CONFIG_EXT2_FS=y
 CONFIG_EXT2_FS_XATTR=y
 CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/configs/mx3_defconfig b/arch/arm/configs/mx3_defconfig
deleted file mode 100644 (file)
index cb0717f..0000000
+++ /dev/null
@@ -1,144 +0,0 @@
-CONFIG_EXPERIMENTAL=y
-CONFIG_SYSVIPC=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_EXPERT=y
-CONFIG_SLAB=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODULE_FORCE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-CONFIG_ARCH_MXC=y
-CONFIG_MACH_MX31ADS_WM1133_EV1=y
-CONFIG_MACH_MX31LILLY=y
-CONFIG_MACH_MX31LITE=y
-CONFIG_MACH_PCM037=y
-CONFIG_MACH_PCM037_EET=y
-CONFIG_MACH_MX31_3DS=y
-CONFIG_MACH_MX31MOBOARD=y
-CONFIG_MACH_QONG=y
-CONFIG_MACH_ARMADILLO5X0=y
-CONFIG_MACH_KZM_ARM11_01=y
-CONFIG_MACH_PCM043=y
-CONFIG_MACH_MX35_3DS=y
-CONFIG_MACH_EUKREA_CPUIMX35=y
-CONFIG_MXC_IRQ_PRIOR=y
-CONFIG_MXC_PWM=y
-CONFIG_ARM_ERRATA_411920=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_PREEMPT=y
-CONFIG_AEABI=y
-CONFIG_ZBOOT_ROM_TEXT=0x0
-CONFIG_ZBOOT_ROM_BSS=0x0
-CONFIG_CMDLINE="noinitrd console=ttymxc0,115200 root=/dev/mtdblock2 rw ip=off"
-CONFIG_VFP=y
-CONFIG_PM_DEBUG=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
-# CONFIG_INET_XFRM_MODE_TUNNEL is not set
-# CONFIG_INET_XFRM_MODE_BEET is not set
-# CONFIG_INET_LRO is not set
-# CONFIG_INET_DIAG is not set
-# CONFIG_IPV6 is not set
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_FW_LOADER=m
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_CHAR=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_CFI=y
-CONFIG_MTD_PHYSMAP=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_MXC=y
-CONFIG_MTD_UBI=y
-# CONFIG_BLK_DEV is not set
-CONFIG_MISC_DEVICES=y
-CONFIG_EEPROM_AT24=y
-CONFIG_NETDEVICES=y
-CONFIG_SMSC_PHY=y
-CONFIG_NET_ETHERNET=y
-CONFIG_SMSC911X=y
-CONFIG_DNET=y
-# CONFIG_NETDEV_1000 is not set
-# CONFIG_NETDEV_10000 is not set
-# CONFIG_INPUT_MOUSEDEV is not set
-# CONFIG_KEYBOARD_ATKBD is not set
-CONFIG_KEYBOARD_IMX=y
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO is not set
-# CONFIG_VT is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_8250=m
-CONFIG_SERIAL_8250_EXTENDED=y
-CONFIG_SERIAL_8250_SHARE_IRQ=y
-CONFIG_SERIAL_IMX=y
-CONFIG_SERIAL_IMX_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_I2C=y
-CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_IMX=y
-CONFIG_SPI=y
-CONFIG_W1=y
-CONFIG_W1_MASTER_MXC=y
-CONFIG_W1_SLAVE_THERM=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-CONFIG_IMX2_WDT=y
-CONFIG_MFD_WM8350_I2C=y
-CONFIG_REGULATOR=y
-CONFIG_REGULATOR_WM8350=y
-CONFIG_MEDIA_SUPPORT=y
-CONFIG_VIDEO_DEV=y
-# CONFIG_RC_CORE is not set
-# CONFIG_MEDIA_TUNER_CUSTOMISE is not set
-CONFIG_SOC_CAMERA=y
-CONFIG_SOC_CAMERA_MT9M001=y
-CONFIG_SOC_CAMERA_MT9M111=y
-CONFIG_SOC_CAMERA_MT9T031=y
-CONFIG_SOC_CAMERA_MT9V022=y
-CONFIG_SOC_CAMERA_TW9910=y
-CONFIG_SOC_CAMERA_OV772X=y
-CONFIG_VIDEO_MX3=y
-# CONFIG_RADIO_ADAPTERS is not set
-CONFIG_FB=y
-CONFIG_SOUND=y
-CONFIG_SND=y
-# CONFIG_SND_ARM is not set
-# CONFIG_SND_SPI is not set
-CONFIG_SND_SOC=y
-CONFIG_SND_IMX_SOC=y
-CONFIG_SND_MXC_SOC_WM1133_EV1=y
-CONFIG_SND_SOC_PHYCORE_AC97=y
-CONFIG_SND_SOC_EUKREA_TLV320=y
-CONFIG_USB=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MXC=y
-CONFIG_USB_GADGET=m
-CONFIG_USB_FSL_USB2=m
-CONFIG_USB_G_SERIAL=m
-CONFIG_USB_ULPI=y
-CONFIG_MMC=y
-CONFIG_MMC_MXC=y
-CONFIG_RTC_CLASS=y
-CONFIG_RTC_MXC=y
-CONFIG_DMADEVICES=y
-# CONFIG_DNOTIFY is not set
-CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
-CONFIG_UBIFS_FS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V3=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-# CONFIG_ENABLE_WARN_DEPRECATED is not set
-# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_SYSCTL_SYSCALL_CHECK=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
index b6e65dedfd716db5e336451868af811e34a684dc..62f8095d46de8f4f2b4fad93c4338ac5df54fa7e 100644 (file)
  */
 #ifdef CONFIG_THUMB2_KERNEL
 
-       .macro  usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
+       .macro  usraccoff, instr, reg, ptr, inc, off, cond, abort, t=TUSER()
 9999:
        .if     \inc == 1
        \instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
 
 #else  /* !CONFIG_THUMB2_KERNEL */
 
-       .macro  usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
+       .macro  usracc, instr, reg, ptr, inc, cond, rept, abort, t=TUSER()
        .rept   \rept
 9999:
        .if     \inc == 1
index af18ceaacf5d2ec2bafe51de722387fb3249ac4e..b5dc173d336f93f88d7fa63e0b0c91def5393255 100644 (file)
@@ -83,9 +83,9 @@
  * instructions (inline assembly)
  */
 #ifdef CONFIG_CPU_USE_DOMAINS
-#define T(instr)       #instr "t"
+#define TUSER(instr)   #instr "t"
 #else
-#define T(instr)       #instr
+#define TUSER(instr)   #instr
 #endif
 
 #else /* __ASSEMBLY__ */
@@ -95,9 +95,9 @@
  * instructions
  */
 #ifdef CONFIG_CPU_USE_DOMAINS
-#define T(instr)       instr ## t
+#define TUSER(instr)   instr ## t
 #else
-#define T(instr)       instr
+#define TUSER(instr)   instr
 #endif
 
 #endif /* __ASSEMBLY__ */
index 253cc86318bf84907e8f003bac9dc09ae72fab6b..7be54690aeec0f38be6bb2738a8a7df8a243021d 100644 (file)
@@ -75,9 +75,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 
 #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg)        \
        __asm__ __volatile__(                                   \
-       "1:     " T(ldr) "      %1, [%3]\n"                     \
+       "1:     " TUSER(ldr) "  %1, [%3]\n"                     \
        "       " insn "\n"                                     \
-       "2:     " T(str) "      %0, [%3]\n"                     \
+       "2:     " TUSER(str) "  %0, [%3]\n"                     \
        "       mov     %0, #0\n"                               \
        __futex_atomic_ex_table("%5")                           \
        : "=&r" (ret), "=&r" (oldval), "=&r" (tmp)              \
@@ -95,10 +95,10 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                return -EFAULT;
 
        __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
-       "1:     " T(ldr) "      %1, [%4]\n"
+       "1:     " TUSER(ldr) "  %1, [%4]\n"
        "       teq     %1, %2\n"
        "       it      eq      @ explicit IT needed for the 2b label\n"
-       "2:     " T(streq) "    %3, [%4]\n"
+       "2:     " TUSER(streq) "        %3, [%4]\n"
        __futex_atomic_ex_table("%5")
        : "+r" (ret), "=&r" (val)
        : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
index 7151753b0989f922c5a0e9d29fce5e057ae2ad5e..c402e9b31f4c61148ee5466b29f9ec176f11b78f 100644 (file)
@@ -2,7 +2,7 @@
 #define _ARCH_ARM_GPIO_H
 
 #if CONFIG_ARCH_NR_GPIO > 0
-#define ARCH_NR_GPIO CONFIG_ARCH_NR_GPIO
+#define ARCH_NR_GPIOS CONFIG_ARCH_NR_GPIO
 #endif
 
 /* not all ARM platforms necessarily support this API ... */
index feec86768f9c967c946a74ff23ed898d6207543c..f82ec22eeb1174a3ba0b431da119868cd8a1d36d 100644 (file)
@@ -24,7 +24,6 @@
 #define MAX_INSN_SIZE                  2
 #define MAX_STACK_SIZE                 64      /* 32 would probably be OK */
 
-#define regs_return_value(regs)                ((regs)->ARM_r0)
 #define flush_insn_slot(p)             do { } while (0)
 #define kretprobe_blacklist_size       0
 
index b8da2e415e4eb21a870cbb52333b5400a50ad163..00ca5f92648ea56616afe8bfbd4a25ff8f0b0e27 100644 (file)
@@ -6,4 +6,6 @@ struct machine_desc;
 
 extern void arm_memblock_init(struct meminfo *, struct machine_desc *);
 
+phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);
+
 #endif
index 96187ff58c247cf61953ff8d06922412f0acaca2..451808ba1211f2a547af60b35545b220d69886a9 100644 (file)
@@ -189,6 +189,11 @@ static inline int valid_user_regs(struct pt_regs *regs)
        return 0;
 }
 
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->ARM_r0;
+}
+
 #define instruction_pointer(regs)      (regs)->ARM_pc
 
 #ifdef CONFIG_SMP
index 1e5717afc4ac007e94447cf91ab88bdee9f7f1ad..ae29293270a3d8c298f5359c17ff64443882729d 100644 (file)
@@ -70,12 +70,6 @@ extern void platform_secondary_init(unsigned int cpu);
  */
 extern void platform_smp_prepare_cpus(unsigned int);
 
-/*
- * Logical CPU mapping.
- */
-extern int __cpu_logical_map[NR_CPUS];
-#define cpu_logical_map(cpu)   __cpu_logical_map[cpu]
-
 /*
  * Initial data for bringing up a secondary CPU.
  */
index f24c1b9e211dd180a6caf548260110a33f75b33c..558d6c80aca9cc292c5d3d50e99ee705535fe868 100644 (file)
@@ -43,4 +43,10 @@ static inline int cache_ops_need_broadcast(void)
 }
 #endif
 
+/*
+ * Logical CPU mapping.
+ */
+extern int __cpu_logical_map[];
+#define cpu_logical_map(cpu)   __cpu_logical_map[cpu]
+
 #endif
index 0f30c3a78fc10815668b56c2ae5789ea38b4fdd9..d4c24d412a8ddbdaba9f11a6217cd6a8b592c22c 100644 (file)
@@ -129,6 +129,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
 /*
  * thread information flags:
  *  TIF_SYSCALL_TRACE  - syscall trace active
+ *  TIF_SYSCAL_AUDIT   - syscall auditing active
  *  TIF_SIGPENDING     - signal pending
  *  TIF_NEED_RESCHED   - rescheduling necessary
  *  TIF_NOTIFY_RESUME  - callback before returning to user
@@ -139,6 +140,7 @@ extern void vfp_flush_hwstate(struct thread_info *);
 #define TIF_NEED_RESCHED       1
 #define TIF_NOTIFY_RESUME      2       /* callback before returning to user */
 #define TIF_SYSCALL_TRACE      8
+#define TIF_SYSCALL_AUDIT      9
 #define TIF_POLLING_NRFLAG     16
 #define TIF_USING_IWMMXT       17
 #define TIF_MEMDIE             18      /* is terminating due to OOM killer */
@@ -149,11 +151,15 @@ extern void vfp_flush_hwstate(struct thread_info *);
 #define _TIF_NEED_RESCHED      (1 << TIF_NEED_RESCHED)
 #define _TIF_NOTIFY_RESUME     (1 << TIF_NOTIFY_RESUME)
 #define _TIF_SYSCALL_TRACE     (1 << TIF_SYSCALL_TRACE)
+#define _TIF_SYSCALL_AUDIT     (1 << TIF_SYSCALL_AUDIT)
 #define _TIF_POLLING_NRFLAG    (1 << TIF_POLLING_NRFLAG)
 #define _TIF_USING_IWMMXT      (1 << TIF_USING_IWMMXT)
 #define _TIF_RESTORE_SIGMASK   (1 << TIF_RESTORE_SIGMASK)
 #define _TIF_SECCOMP           (1 << TIF_SECCOMP)
 
+/* Checks for any syscall work in entry-common.S */
+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
+
 /*
  * Change these and you break ASM code in entry-common.S
  */
index 5d3ed7e38561dd43553d1d3bf1b2af07e0d2b747..314d4664eae7d9976a5fe656f74918cb8d15ab8b 100644 (file)
@@ -198,7 +198,15 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
        unsigned long addr)
 {
        pgtable_page_dtor(pte);
-       tlb_add_flush(tlb, addr);
+
+       /*
+        * With the classic ARM MMU, a pte page has two corresponding pmd
+        * entries, each covering 1MB.
+        */
+       addr &= PMD_MASK;
+       tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
+       tlb_add_flush(tlb, addr + SZ_1M);
+
        tlb_remove_page(tlb, pte);
 }
 
index b293616a1a1a8d6cf92a5ae1560994c7e3d66e46..2958976d867b4b9795a090cee9a6b9cacb646631 100644 (file)
@@ -227,7 +227,7 @@ do {                                                                        \
 
 #define __get_user_asm_byte(x,addr,err)                                \
        __asm__ __volatile__(                                   \
-       "1:     " T(ldrb) "     %1,[%2],#0\n"                   \
+       "1:     " TUSER(ldrb) " %1,[%2],#0\n"                   \
        "2:\n"                                                  \
        "       .pushsection .fixup,\"ax\"\n"                   \
        "       .align  2\n"                                    \
@@ -263,7 +263,7 @@ do {                                                                        \
 
 #define __get_user_asm_word(x,addr,err)                                \
        __asm__ __volatile__(                                   \
-       "1:     " T(ldr) "      %1,[%2],#0\n"                   \
+       "1:     " TUSER(ldr) "  %1,[%2],#0\n"                   \
        "2:\n"                                                  \
        "       .pushsection .fixup,\"ax\"\n"                   \
        "       .align  2\n"                                    \
@@ -308,7 +308,7 @@ do {                                                                        \
 
 #define __put_user_asm_byte(x,__pu_addr,err)                   \
        __asm__ __volatile__(                                   \
-       "1:     " T(strb) "     %1,[%2],#0\n"                   \
+       "1:     " TUSER(strb) " %1,[%2],#0\n"                   \
        "2:\n"                                                  \
        "       .pushsection .fixup,\"ax\"\n"                   \
        "       .align  2\n"                                    \
@@ -341,7 +341,7 @@ do {                                                                        \
 
 #define __put_user_asm_word(x,__pu_addr,err)                   \
        __asm__ __volatile__(                                   \
-       "1:     " T(str) "      %1,[%2],#0\n"                   \
+       "1:     " TUSER(str) "  %1,[%2],#0\n"                   \
        "2:\n"                                                  \
        "       .pushsection .fixup,\"ax\"\n"                   \
        "       .align  2\n"                                    \
@@ -366,10 +366,10 @@ do {                                                                      \
 
 #define __put_user_asm_dword(x,__pu_addr,err)                  \
        __asm__ __volatile__(                                   \
- ARM(  "1:     " T(str) "      " __reg_oper1 ", [%1], #4\n"    )       \
- ARM(  "2:     " T(str) "      " __reg_oper0 ", [%1]\n"        )       \
- THUMB(        "1:     " T(str) "      " __reg_oper1 ", [%1]\n"        )       \
- THUMB(        "2:     " T(str) "      " __reg_oper0 ", [%1, #4]\n"    )       \
+ ARM(  "1:     " TUSER(str) "  " __reg_oper1 ", [%1], #4\n"    ) \
+ ARM(  "2:     " TUSER(str) "  " __reg_oper0 ", [%1]\n"        ) \
+ THUMB(        "1:     " TUSER(str) "  " __reg_oper1 ", [%1]\n"        ) \
+ THUMB(        "2:     " TUSER(str) "  " __reg_oper0 ", [%1, #4]\n"    ) \
        "3:\n"                                                  \
        "       .pushsection .fixup,\"ax\"\n"                   \
        "       .align  2\n"                                    \
index bc631161e9c6d29eb0f9e73d408da4ae4c13da0e..f5989f46b4d2d450f18b24faa946de750394ec13 100644 (file)
@@ -37,8 +37,8 @@
 #define THUMB(x...)    x
 #ifdef __ASSEMBLY__
 #define W(instr)       instr.w
-#endif
 #define BSYM(sym)      sym + 1
+#endif
 
 #else  /* !CONFIG_THUMB2_KERNEL */
 
@@ -49,8 +49,8 @@
 #define THUMB(x...)
 #ifdef __ASSEMBLY__
 #define W(instr)       instr
-#endif
 #define BSYM(sym)      sym
+#endif
 
 #endif /* CONFIG_THUMB2_KERNEL */
 
index 3a456c6c70056f9267f592792193d2176c045802..be16a48007b4b79fc5db0d3e48b016242b68d760 100644 (file)
@@ -790,7 +790,7 @@ __kuser_cmpxchg64:                          @ 0xffff0f60
        smp_dmb arm
        rsbs    r0, r3, #0                      @ set returned val and C flag
        ldmfd   sp!, {r4, r5, r6, r7}
-       bx      lr
+       usr_ret lr
 
 #elif !defined(CONFIG_SMP)
 
index b2a27b6b0046ee6c1e0f0928c3269ab5f6c63cd2..9fd0ba90c1d29ff78ed04ca80580e1c8be89cd0a 100644 (file)
@@ -87,7 +87,7 @@ ENTRY(ret_from_fork)
        get_thread_info tsk
        ldr     r1, [tsk, #TI_FLAGS]            @ check for syscall tracing
        mov     why, #1
-       tst     r1, #_TIF_SYSCALL_TRACE         @ are we tracing syscalls?
+       tst     r1, #_TIF_SYSCALL_WORK          @ are we tracing syscalls?
        beq     ret_slow_syscall
        mov     r1, sp
        mov     r0, #1                          @ trace exit [IP = 1]
@@ -149,6 +149,11 @@ ENDPROC(ret_from_fork)
 #endif
 #endif
 
+.macro mcount_adjust_addr rd, rn
+       bic     \rd, \rn, #1            @ clear the Thumb bit if present
+       sub     \rd, \rd, #MCOUNT_INSN_SIZE
+.endm
+
 .macro __mcount suffix
        mcount_enter
        ldr     r0, =ftrace_trace_function
@@ -173,8 +178,7 @@ ENDPROC(ret_from_fork)
        mcount_exit
 
 1:     mcount_get_lr   r1                      @ lr of instrumented func
-       mov     r0, lr                          @ instrumented function
-       sub     r0, r0, #MCOUNT_INSN_SIZE
+       mcount_adjust_addr      r0, lr          @ instrumented function
        adr     lr, BSYM(2f)
        mov     pc, r2
 2:     mcount_exit
@@ -184,8 +188,7 @@ ENDPROC(ret_from_fork)
        mcount_enter
 
        mcount_get_lr   r1                      @ lr of instrumented func
-       mov     r0, lr                          @ instrumented function
-       sub     r0, r0, #MCOUNT_INSN_SIZE
+       mcount_adjust_addr      r0, lr          @ instrumented function
 
        .globl ftrace_call\suffix
 ftrace_call\suffix:
@@ -205,11 +208,11 @@ ftrace_graph_call\suffix:
 #ifdef CONFIG_DYNAMIC_FTRACE
        @ called from __ftrace_caller, saved in mcount_enter
        ldr     r1, [sp, #16]           @ instrumented routine (func)
+       mcount_adjust_addr      r1, r1
 #else
        @ called from __mcount, untouched in lr
-       mov     r1, lr                  @ instrumented routine (func)
+       mcount_adjust_addr      r1, lr  @ instrumented routine (func)
 #endif
-       sub     r1, r1, #MCOUNT_INSN_SIZE
        mov     r2, fp                  @ frame pointer
        bl      prepare_ftrace_return
        mcount_exit
@@ -443,7 +446,7 @@ ENTRY(vector_swi)
 1:
 #endif
 
-       tst     r10, #_TIF_SYSCALL_TRACE                @ are we tracing syscalls?
+       tst     r10, #_TIF_SYSCALL_WORK         @ are we tracing syscalls?
        bne     __sys_trace
 
        cmp     scno, #NR_syscalls              @ check upper syscall limit
index 14e277d2ff911ec7690c4223c5acea4b602e3450..6d579114406673fd1b97c09f0cdcbb7263b044da 100644 (file)
@@ -99,6 +99,14 @@ ENTRY(stext)
  THUMB( it     eq )            @ force fixup-able long branch encoding
        beq     __error_p                       @ yes, error 'p'
 
+#ifdef CONFIG_ARM_LPAE
+       mrc     p15, 0, r3, c0, c1, 4           @ read ID_MMFR0
+       and     r3, r3, #0xf                    @ extract VMSA support
+       cmp     r3, #5                          @ long-descriptor translation table format?
+ THUMB( it     lo )                            @ force fixup-able long branch encoding
+       blo     __error_p                       @ only classic page table format
+#endif
+
 #ifndef CONFIG_XIP_KERNEL
        adr     r3, 2f
        ldmia   r3, {r4, r8}
index 460bbbb6b88536ba18ded4983b6ee18baf1a4bf4..6933244c68f964ffed73f11cfbdaac1ca13b2f74 100644 (file)
@@ -469,6 +469,20 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                        [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
        },
+       [C(NODE)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
 };
 
 /*
@@ -579,6 +593,20 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
                        [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
        },
+       [C(NODE)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
 };
 
 /*
index 483727ad68923a3e27af6d693c9132db0148af7f..e33870ff0ac092b25e31a49e3074899ee0ac6339 100644 (file)
@@ -699,10 +699,13 @@ static int vfp_set(struct task_struct *target,
 {
        int ret;
        struct thread_info *thread = task_thread_info(target);
-       struct vfp_hard_struct new_vfp = thread->vfpstate.hard;
+       struct vfp_hard_struct new_vfp;
        const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
        const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
 
+       vfp_sync_hwstate(thread);
+       new_vfp = thread->vfpstate.hard;
+
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
                                  &new_vfp.fpregs,
                                  user_fpregs_offset,
@@ -723,9 +726,8 @@ static int vfp_set(struct task_struct *target,
        if (ret)
                return ret;
 
-       vfp_sync_hwstate(thread);
-       thread->vfpstate.hard = new_vfp;
        vfp_flush_hwstate(thread);
+       thread->vfpstate.hard = new_vfp;
 
        return 0;
 }
@@ -906,11 +908,6 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
 {
        unsigned long ip;
 
-       if (!test_thread_flag(TIF_SYSCALL_TRACE))
-               return scno;
-       if (!(current->ptrace & PT_PTRACED))
-               return scno;
-
        /*
         * Save IP.  IP is used to denote syscall entry/exit:
         *  IP = 0 -> entry, = 1 -> exit
@@ -918,6 +915,17 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
        ip = regs->ARM_ip;
        regs->ARM_ip = why;
 
+       if (!ip)
+               audit_syscall_exit(regs);
+       else
+               audit_syscall_entry(AUDIT_ARCH_ARMEB, scno, regs->ARM_r0,
+                                   regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
+
+       if (!test_thread_flag(TIF_SYSCALL_TRACE))
+               return scno;
+       if (!(current->ptrace & PT_PTRACED))
+               return scno;
+
        current_thread_info()->syscall = scno;
 
        /* the 0x80 provides a way for the tracing parent to distinguish
index 129fbd55bde85612de3900035f05fd8bd85cfeee..a255c39612ca3cfa10bddb7c7728216efeeb04d5 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/init.h>
 #include <linux/kexec.h>
 #include <linux/of_fdt.h>
-#include <linux/crash_dump.h>
 #include <linux/root_dev.h>
 #include <linux/cpu.h>
 #include <linux/interrupt.h>
@@ -160,7 +159,7 @@ static struct resource mem_res[] = {
                .flags = IORESOURCE_MEM
        },
        {
-               .name = "Kernel text",
+               .name = "Kernel code",
                .start = 0,
                .end = 0,
                .flags = IORESOURCE_MEM
@@ -427,6 +426,20 @@ void cpu_init(void)
            : "r14");
 }
 
+int __cpu_logical_map[NR_CPUS];
+
+void __init smp_setup_processor_id(void)
+{
+       int i;
+       u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
+
+       cpu_logical_map(0) = cpu;
+       for (i = 1; i < NR_CPUS; ++i)
+               cpu_logical_map(i) = i == cpu ? 0 : i;
+
+       printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
+}
+
 static void __init setup_processor(void)
 {
        struct proc_info_list *list;
index 0340224cf73c5c9db2d16185bd698a6ba6518fb8..9e617bd4a146250d7d3f453d50a45431fa261e08 100644 (file)
@@ -227,6 +227,8 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
        if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
                return -EINVAL;
 
+       vfp_flush_hwstate(thread);
+
        /*
         * Copy the floating point registers. There can be unused
         * registers see asm/hwcap.h for details.
@@ -251,9 +253,6 @@ static int restore_vfp_context(struct vfp_sigframe __user *frame)
        __get_user_error(h->fpinst, &frame->ufp_exc.fpinst, err);
        __get_user_error(h->fpinst2, &frame->ufp_exc.fpinst2, err);
 
-       if (!err)
-               vfp_flush_hwstate(thread);
-
        return err ? -EFAULT : 0;
 }
 
index 57db122a4f629bb53247fde4f2b15076838661c0..cdeb727527d39768587ffa3dd9946073aeaa6853 100644 (file)
@@ -233,20 +233,6 @@ void __ref cpu_die(void)
 }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-int __cpu_logical_map[NR_CPUS];
-
-void __init smp_setup_processor_id(void)
-{
-       int i;
-       u32 cpu = is_smp() ? read_cpuid_mpidr() & 0xff : 0;
-
-       cpu_logical_map(0) = cpu;
-       for (i = 1; i < NR_CPUS; ++i)
-               cpu_logical_map(i) = i == cpu ? 0 : i;
-
-       printk(KERN_INFO "Booting Linux on physical CPU %d\n", cpu);
-}
-
 /*
  * Called by both boot and secondaries to move global data into
  * per-processor storage.
@@ -443,9 +429,7 @@ static DEFINE_PER_CPU(struct clock_event_device, percpu_clockevent);
 static void ipi_timer(void)
 {
        struct clock_event_device *evt = &__get_cpu_var(percpu_clockevent);
-       irq_enter();
        evt->event_handler(evt);
-       irq_exit();
 }
 
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
@@ -548,7 +532,9 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
 
        switch (ipinr) {
        case IPI_TIMER:
+               irq_enter();
                ipi_timer();
+               irq_exit();
                break;
 
        case IPI_RESCHEDULE:
@@ -556,15 +542,21 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
                break;
 
        case IPI_CALL_FUNC:
+               irq_enter();
                generic_smp_call_function_interrupt();
+               irq_exit();
                break;
 
        case IPI_CALL_FUNC_SINGLE:
+               irq_enter();
                generic_smp_call_function_single_interrupt();
+               irq_exit();
                break;
 
        case IPI_CPU_STOP:
+               irq_enter();
                ipi_cpu_stop(cpu);
+               irq_exit();
                break;
 
        default:
index c8e938553d478015d57d436ec43908b81b460d3a..4285daa077b0b8105f2267d107e9c80836ff01bc 100644 (file)
@@ -252,6 +252,8 @@ void __cpuinit twd_timer_setup(struct clock_event_device *clk)
        else
                twd_calibrate_rate();
 
+       __raw_writel(0, twd_base + TWD_TIMER_CONTROL);
+
        clk->name = "local_timer";
        clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
                        CLOCK_EVT_FEAT_C3STOP;
index f76e75548670e97eeac9112a8e48b53e37f9cc91..1e19691e040650625103ddcbdee6ffceb5a77f1c 100644 (file)
@@ -4,6 +4,7 @@
  */
 
 #include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
 #include <asm/thread_info.h>
 #include <asm/memory.h>
 #include <asm/page.h>
@@ -181,7 +182,7 @@ SECTIONS
        }
 #endif
 
-       PERCPU_SECTION(32)
+       PERCPU_SECTION(L1_CACHE_BYTES)
 
 #ifdef CONFIG_XIP_KERNEL
        __data_loc = ALIGN(4);          /* location in binary */
@@ -212,13 +213,13 @@ SECTIONS
 #endif
 
                NOSAVE_DATA
-               CACHELINE_ALIGNED_DATA(32)
-               READ_MOSTLY_DATA(32)
+               CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
+               READ_MOSTLY_DATA(L1_CACHE_BYTES)
 
                /*
                 * The exception fixup table (might need resorting at runtime)
                 */
-               . = ALIGN(32);
+               . = ALIGN(4);
                __start___ex_table = .;
 #ifdef CONFIG_MMU
                *(__ex_table)
index 1b049cd7a49a8badab511ed2b0532d1f7e76b733..11093a7c3e32289e95a8c100cc01ef2bbb8d7101 100644 (file)
 #include <asm/domain.h>
 
 ENTRY(__get_user_1)
-1:     T(ldrb) r2, [r0]
+1: TUSER(ldrb) r2, [r0]
        mov     r0, #0
        mov     pc, lr
 ENDPROC(__get_user_1)
 
 ENTRY(__get_user_2)
 #ifdef CONFIG_THUMB2_KERNEL
-2:     T(ldrb) r2, [r0]
-3:     T(ldrb) r3, [r0, #1]
+2: TUSER(ldrb) r2, [r0]
+3: TUSER(ldrb) r3, [r0, #1]
 #else
-2:     T(ldrb) r2, [r0], #1
-3:     T(ldrb) r3, [r0]
+2: TUSER(ldrb) r2, [r0], #1
+3: TUSER(ldrb) r3, [r0]
 #endif
 #ifndef __ARMEB__
        orr     r2, r2, r3, lsl #8
@@ -54,7 +54,7 @@ ENTRY(__get_user_2)
 ENDPROC(__get_user_2)
 
 ENTRY(__get_user_4)
-4:     T(ldr)  r2, [r0]
+4: TUSER(ldr)  r2, [r0]
        mov     r0, #0
        mov     pc, lr
 ENDPROC(__get_user_4)
index c023fc11e86c7b603eaaa08270e7ea429ecfab5f..7db25990c589f3d98554d9aee47cf7b5c3c486fd 100644 (file)
@@ -31,7 +31,7 @@
 #include <asm/domain.h>
 
 ENTRY(__put_user_1)
-1:     T(strb) r2, [r0]
+1: TUSER(strb) r2, [r0]
        mov     r0, #0
        mov     pc, lr
 ENDPROC(__put_user_1)
@@ -40,19 +40,19 @@ ENTRY(__put_user_2)
        mov     ip, r2, lsr #8
 #ifdef CONFIG_THUMB2_KERNEL
 #ifndef __ARMEB__
-2:     T(strb) r2, [r0]
-3:     T(strb) ip, [r0, #1]
+2: TUSER(strb) r2, [r0]
+3: TUSER(strb) ip, [r0, #1]
 #else
-2:     T(strb) ip, [r0]
-3:     T(strb) r2, [r0, #1]
+2: TUSER(strb) ip, [r0]
+3: TUSER(strb) r2, [r0, #1]
 #endif
 #else  /* !CONFIG_THUMB2_KERNEL */
 #ifndef __ARMEB__
-2:     T(strb) r2, [r0], #1
-3:     T(strb) ip, [r0]
+2: TUSER(strb) r2, [r0], #1
+3: TUSER(strb) ip, [r0]
 #else
-2:     T(strb) ip, [r0], #1
-3:     T(strb) r2, [r0]
+2: TUSER(strb) ip, [r0], #1
+3: TUSER(strb) r2, [r0]
 #endif
 #endif /* CONFIG_THUMB2_KERNEL */
        mov     r0, #0
@@ -60,18 +60,18 @@ ENTRY(__put_user_2)
 ENDPROC(__put_user_2)
 
 ENTRY(__put_user_4)
-4:     T(str)  r2, [r0]
+4: TUSER(str)  r2, [r0]
        mov     r0, #0
        mov     pc, lr
 ENDPROC(__put_user_4)
 
 ENTRY(__put_user_8)
 #ifdef CONFIG_THUMB2_KERNEL
-5:     T(str)  r2, [r0]
-6:     T(str)  r3, [r0, #4]
+5: TUSER(str)  r2, [r0]
+6: TUSER(str)  r3, [r0, #4]
 #else
-5:     T(str)  r2, [r0], #4
-6:     T(str)  r3, [r0]
+5: TUSER(str)  r2, [r0], #4
+6: TUSER(str)  r3, [r0]
 #endif
        mov     r0, #0
        mov     pc, lr
index d0ece2aeb70dfc380d689a85a141a28a5917ec20..5c908b1cb8ed5db3eeabfb89f7f659f99d2d6f76 100644 (file)
                rsb     ip, ip, #4
                cmp     ip, #2
                ldrb    r3, [r1], #1
-USER(          T(strb) r3, [r0], #1)                   @ May fault
+USER(  TUSER(  strb)   r3, [r0], #1)                   @ May fault
                ldrgeb  r3, [r1], #1
-USER(          T(strgeb) r3, [r0], #1)                 @ May fault
+USER(  TUSER(  strgeb) r3, [r0], #1)                   @ May fault
                ldrgtb  r3, [r1], #1
-USER(          T(strgtb) r3, [r0], #1)                 @ May fault
+USER(  TUSER(  strgtb) r3, [r0], #1)                   @ May fault
                sub     r2, r2, ip
                b       .Lc2u_dest_aligned
 
@@ -59,7 +59,7 @@ ENTRY(__copy_to_user)
                addmi   ip, r2, #4
                bmi     .Lc2u_0nowords
                ldr     r3, [r1], #4
-USER(          T(str)  r3, [r0], #4)                   @ May fault
+USER(  TUSER(  str)    r3, [r0], #4)                   @ May fault
                mov     ip, r0, lsl #32 - PAGE_SHIFT    @ On each page, use a ld/st??t instruction
                rsb     ip, ip, #0
                movs    ip, ip, lsr #32 - PAGE_SHIFT
@@ -88,18 +88,18 @@ USER(               T(str)  r3, [r0], #4)                   @ May fault
                stmneia r0!, {r3 - r4}                  @ Shouldnt fault
                tst     ip, #4
                ldrne   r3, [r1], #4
-               T(strne) r3, [r0], #4                   @ Shouldnt fault
+       TUSER(  strne) r3, [r0], #4                     @ Shouldnt fault
                ands    ip, ip, #3
                beq     .Lc2u_0fupi
 .Lc2u_0nowords:        teq     ip, #0
                beq     .Lc2u_finished
 .Lc2u_nowords: cmp     ip, #2
                ldrb    r3, [r1], #1
-USER(          T(strb) r3, [r0], #1)                   @ May fault
+USER(  TUSER(  strb)   r3, [r0], #1)                   @ May fault
                ldrgeb  r3, [r1], #1
-USER(          T(strgeb) r3, [r0], #1)                 @ May fault
+USER(  TUSER(  strgeb) r3, [r0], #1)                   @ May fault
                ldrgtb  r3, [r1], #1
-USER(          T(strgtb) r3, [r0], #1)                 @ May fault
+USER(  TUSER(  strgtb) r3, [r0], #1)                   @ May fault
                b       .Lc2u_finished
 
 .Lc2u_not_enough:
@@ -120,7 +120,7 @@ USER(               T(strgtb) r3, [r0], #1)                 @ May fault
                mov     r3, r7, pull #8
                ldr     r7, [r1], #4
                orr     r3, r3, r7, push #24
-USER(          T(str)  r3, [r0], #4)                   @ May fault
+USER(  TUSER(  str)    r3, [r0], #4)                   @ May fault
                mov     ip, r0, lsl #32 - PAGE_SHIFT
                rsb     ip, ip, #0
                movs    ip, ip, lsr #32 - PAGE_SHIFT
@@ -155,18 +155,18 @@ USER(             T(str)  r3, [r0], #4)                   @ May fault
                movne   r3, r7, pull #8
                ldrne   r7, [r1], #4
                orrne   r3, r3, r7, push #24
-               T(strne) r3, [r0], #4                   @ Shouldnt fault
+       TUSER(  strne) r3, [r0], #4                     @ Shouldnt fault
                ands    ip, ip, #3
                beq     .Lc2u_1fupi
 .Lc2u_1nowords:        mov     r3, r7, get_byte_1
                teq     ip, #0
                beq     .Lc2u_finished
                cmp     ip, #2
-USER(          T(strb) r3, [r0], #1)                   @ May fault
+USER(  TUSER(  strb)   r3, [r0], #1)                   @ May fault
                movge   r3, r7, get_byte_2
-USER(          T(strgeb) r3, [r0], #1)                 @ May fault
+USER(  TUSER(  strgeb) r3, [r0], #1)                   @ May fault
                movgt   r3, r7, get_byte_3
-USER(          T(strgtb) r3, [r0], #1)                 @ May fault
+USER(  TUSER(  strgtb) r3, [r0], #1)                   @ May fault
                b       .Lc2u_finished
 
 .Lc2u_2fupi:   subs    r2, r2, #4
@@ -175,7 +175,7 @@ USER(               T(strgtb) r3, [r0], #1)                 @ May fault
                mov     r3, r7, pull #16
                ldr     r7, [r1], #4
                orr     r3, r3, r7, push #16
-USER(          T(str)  r3, [r0], #4)                   @ May fault
+USER(  TUSER(  str)    r3, [r0], #4)                   @ May fault
                mov     ip, r0, lsl #32 - PAGE_SHIFT
                rsb     ip, ip, #0
                movs    ip, ip, lsr #32 - PAGE_SHIFT
@@ -210,18 +210,18 @@ USER(             T(str)  r3, [r0], #4)                   @ May fault
                movne   r3, r7, pull #16
                ldrne   r7, [r1], #4
                orrne   r3, r3, r7, push #16
-               T(strne) r3, [r0], #4                   @ Shouldnt fault
+       TUSER(  strne) r3, [r0], #4                     @ Shouldnt fault
                ands    ip, ip, #3
                beq     .Lc2u_2fupi
 .Lc2u_2nowords:        mov     r3, r7, get_byte_2
                teq     ip, #0
                beq     .Lc2u_finished
                cmp     ip, #2
-USER(          T(strb) r3, [r0], #1)                   @ May fault
+USER(  TUSER(  strb)   r3, [r0], #1)                   @ May fault
                movge   r3, r7, get_byte_3
-USER(          T(strgeb) r3, [r0], #1)                 @ May fault
+USER(  TUSER(  strgeb) r3, [r0], #1)                   @ May fault
                ldrgtb  r3, [r1], #0
-USER(          T(strgtb) r3, [r0], #1)                 @ May fault
+USER(  TUSER(  strgtb) r3, [r0], #1)                   @ May fault
                b       .Lc2u_finished
 
 .Lc2u_3fupi:   subs    r2, r2, #4
@@ -230,7 +230,7 @@ USER(               T(strgtb) r3, [r0], #1)                 @ May fault
                mov     r3, r7, pull #24
                ldr     r7, [r1], #4
                orr     r3, r3, r7, push #8
-USER(          T(str)  r3, [r0], #4)                   @ May fault
+USER(  TUSER(  str)    r3, [r0], #4)                   @ May fault
                mov     ip, r0, lsl #32 - PAGE_SHIFT
                rsb     ip, ip, #0
                movs    ip, ip, lsr #32 - PAGE_SHIFT
@@ -265,18 +265,18 @@ USER(             T(str)  r3, [r0], #4)                   @ May fault
                movne   r3, r7, pull #24
                ldrne   r7, [r1], #4
                orrne   r3, r3, r7, push #8
-               T(strne) r3, [r0], #4                   @ Shouldnt fault
+       TUSER(  strne) r3, [r0], #4                     @ Shouldnt fault
                ands    ip, ip, #3
                beq     .Lc2u_3fupi
 .Lc2u_3nowords:        mov     r3, r7, get_byte_3
                teq     ip, #0
                beq     .Lc2u_finished
                cmp     ip, #2
-USER(          T(strb) r3, [r0], #1)                   @ May fault
+USER(  TUSER(  strb)   r3, [r0], #1)                   @ May fault
                ldrgeb  r3, [r1], #1
-USER(          T(strgeb) r3, [r0], #1)                 @ May fault
+USER(  TUSER(  strgeb) r3, [r0], #1)                   @ May fault
                ldrgtb  r3, [r1], #0
-USER(          T(strgtb) r3, [r0], #1)                 @ May fault
+USER(  TUSER(  strgtb) r3, [r0], #1)                   @ May fault
                b       .Lc2u_finished
 ENDPROC(__copy_to_user)
 
@@ -295,11 +295,11 @@ ENDPROC(__copy_to_user)
 .Lcfu_dest_not_aligned:
                rsb     ip, ip, #4
                cmp     ip, #2
-USER(          T(ldrb) r3, [r1], #1)                   @ May fault
+USER(  TUSER(  ldrb)   r3, [r1], #1)                   @ May fault
                strb    r3, [r0], #1
-USER(          T(ldrgeb) r3, [r1], #1)                 @ May fault
+USER(  TUSER(  ldrgeb) r3, [r1], #1)                   @ May fault
                strgeb  r3, [r0], #1
-USER(          T(ldrgtb) r3, [r1], #1)                 @ May fault
+USER(  TUSER(  ldrgtb) r3, [r1], #1)                   @ May fault
                strgtb  r3, [r0], #1
                sub     r2, r2, ip
                b       .Lcfu_dest_aligned
@@ -322,7 +322,7 @@ ENTRY(__copy_from_user)
 .Lcfu_0fupi:   subs    r2, r2, #4
                addmi   ip, r2, #4
                bmi     .Lcfu_0nowords
-USER(          T(ldr)  r3, [r1], #4)
+USER(  TUSER(  ldr)    r3, [r1], #4)
                str     r3, [r0], #4
                mov     ip, r1, lsl #32 - PAGE_SHIFT    @ On each page, use a ld/st??t instruction
                rsb     ip, ip, #0
@@ -351,18 +351,18 @@ USER(             T(ldr)  r3, [r1], #4)
                ldmneia r1!, {r3 - r4}                  @ Shouldnt fault
                stmneia r0!, {r3 - r4}
                tst     ip, #4
-               T(ldrne) r3, [r1], #4                   @ Shouldnt fault
+       TUSER(  ldrne) r3, [r1], #4                     @ Shouldnt fault
                strne   r3, [r0], #4
                ands    ip, ip, #3
                beq     .Lcfu_0fupi
 .Lcfu_0nowords:        teq     ip, #0
                beq     .Lcfu_finished
 .Lcfu_nowords: cmp     ip, #2
-USER(          T(ldrb) r3, [r1], #1)                   @ May fault
+USER(  TUSER(  ldrb)   r3, [r1], #1)                   @ May fault
                strb    r3, [r0], #1
-USER(          T(ldrgeb) r3, [r1], #1)                 @ May fault
+USER(  TUSER(  ldrgeb) r3, [r1], #1)                   @ May fault
                strgeb  r3, [r0], #1
-USER(          T(ldrgtb) r3, [r1], #1)                 @ May fault
+USER(  TUSER(  ldrgtb) r3, [r1], #1)                   @ May fault
                strgtb  r3, [r0], #1
                b       .Lcfu_finished
 
@@ -375,7 +375,7 @@ USER(               T(ldrgtb) r3, [r1], #1)                 @ May fault
 
 .Lcfu_src_not_aligned:
                bic     r1, r1, #3
-USER(          T(ldr)  r7, [r1], #4)                   @ May fault
+USER(  TUSER(  ldr)    r7, [r1], #4)                   @ May fault
                cmp     ip, #2
                bgt     .Lcfu_3fupi
                beq     .Lcfu_2fupi
@@ -383,7 +383,7 @@ USER(               T(ldr)  r7, [r1], #4)                   @ May fault
                addmi   ip, r2, #4
                bmi     .Lcfu_1nowords
                mov     r3, r7, pull #8
-USER(          T(ldr)  r7, [r1], #4)                   @ May fault
+USER(  TUSER(  ldr)    r7, [r1], #4)                   @ May fault
                orr     r3, r3, r7, push #24
                str     r3, [r0], #4
                mov     ip, r1, lsl #32 - PAGE_SHIFT
@@ -418,7 +418,7 @@ USER(               T(ldr)  r7, [r1], #4)                   @ May fault
                stmneia r0!, {r3 - r4}
                tst     ip, #4
                movne   r3, r7, pull #8
-USER(          T(ldrne) r7, [r1], #4)                  @ May fault
+USER(  TUSER(  ldrne) r7, [r1], #4)                    @ May fault
                orrne   r3, r3, r7, push #24
                strne   r3, [r0], #4
                ands    ip, ip, #3
@@ -438,7 +438,7 @@ USER(               T(ldrne) r7, [r1], #4)                  @ May fault
                addmi   ip, r2, #4
                bmi     .Lcfu_2nowords
                mov     r3, r7, pull #16
-USER(          T(ldr)  r7, [r1], #4)                   @ May fault
+USER(  TUSER(  ldr)    r7, [r1], #4)                   @ May fault
                orr     r3, r3, r7, push #16
                str     r3, [r0], #4
                mov     ip, r1, lsl #32 - PAGE_SHIFT
@@ -474,7 +474,7 @@ USER(               T(ldr)  r7, [r1], #4)                   @ May fault
                stmneia r0!, {r3 - r4}
                tst     ip, #4
                movne   r3, r7, pull #16
-USER(          T(ldrne) r7, [r1], #4)                  @ May fault
+USER(  TUSER(  ldrne) r7, [r1], #4)                    @ May fault
                orrne   r3, r3, r7, push #16
                strne   r3, [r0], #4
                ands    ip, ip, #3
@@ -486,7 +486,7 @@ USER(               T(ldrne) r7, [r1], #4)                  @ May fault
                strb    r3, [r0], #1
                movge   r3, r7, get_byte_3
                strgeb  r3, [r0], #1
-USER(          T(ldrgtb) r3, [r1], #0)                 @ May fault
+USER(  TUSER(  ldrgtb) r3, [r1], #0)                   @ May fault
                strgtb  r3, [r0], #1
                b       .Lcfu_finished
 
@@ -494,7 +494,7 @@ USER(               T(ldrgtb) r3, [r1], #0)                 @ May fault
                addmi   ip, r2, #4
                bmi     .Lcfu_3nowords
                mov     r3, r7, pull #24
-USER(          T(ldr)  r7, [r1], #4)                   @ May fault
+USER(  TUSER(  ldr)    r7, [r1], #4)                   @ May fault
                orr     r3, r3, r7, push #8
                str     r3, [r0], #4
                mov     ip, r1, lsl #32 - PAGE_SHIFT
@@ -529,7 +529,7 @@ USER(               T(ldr)  r7, [r1], #4)                   @ May fault
                stmneia r0!, {r3 - r4}
                tst     ip, #4
                movne   r3, r7, pull #24
-USER(          T(ldrne) r7, [r1], #4)                  @ May fault
+USER(  TUSER(  ldrne) r7, [r1], #4)                    @ May fault
                orrne   r3, r3, r7, push #8
                strne   r3, [r0], #4
                ands    ip, ip, #3
@@ -539,9 +539,9 @@ USER(               T(ldrne) r7, [r1], #4)                  @ May fault
                beq     .Lcfu_finished
                cmp     ip, #2
                strb    r3, [r0], #1
-USER(          T(ldrgeb) r3, [r1], #1)                 @ May fault
+USER(  TUSER(  ldrgeb) r3, [r1], #1)                   @ May fault
                strgeb  r3, [r0], #1
-USER(          T(ldrgtb) r3, [r1], #1)                 @ May fault
+USER(  TUSER(  ldrgtb) r3, [r1], #1)                   @ May fault
                strgtb  r3, [r0], #1
                b       .Lcfu_finished
 ENDPROC(__copy_from_user)
index 4f991f2952846fb89d36be0758c255a4014b575a..71feb00a1e995de0991b54189569b04c1cf02f06 100644 (file)
@@ -18,6 +18,12 @@ config HAVE_AT91_USART4
 config HAVE_AT91_USART5
        bool
 
+config AT91_SAM9_ALT_RESET
+       bool
+
+config AT91_SAM9G45_RESET
+       bool
+
 menu "Atmel AT91 System-on-Chip"
 
 choice
@@ -39,6 +45,7 @@ config ARCH_AT91SAM9260
        select HAVE_AT91_USART4
        select HAVE_AT91_USART5
        select HAVE_NET_MACB
+       select AT91_SAM9_ALT_RESET
 
 config ARCH_AT91SAM9261
        bool "AT91SAM9261"
@@ -46,6 +53,7 @@ config ARCH_AT91SAM9261
        select GENERIC_CLOCKEVENTS
        select HAVE_FB_ATMEL
        select HAVE_AT91_DBGU0
+       select AT91_SAM9_ALT_RESET
 
 config ARCH_AT91SAM9G10
        bool "AT91SAM9G10"
@@ -53,6 +61,7 @@ config ARCH_AT91SAM9G10
        select GENERIC_CLOCKEVENTS
        select HAVE_AT91_DBGU0
        select HAVE_FB_ATMEL
+       select AT91_SAM9_ALT_RESET
 
 config ARCH_AT91SAM9263
        bool "AT91SAM9263"
@@ -61,6 +70,7 @@ config ARCH_AT91SAM9263
        select HAVE_FB_ATMEL
        select HAVE_NET_MACB
        select HAVE_AT91_DBGU1
+       select AT91_SAM9_ALT_RESET
 
 config ARCH_AT91SAM9RL
        bool "AT91SAM9RL"
@@ -69,6 +79,7 @@ config ARCH_AT91SAM9RL
        select HAVE_AT91_USART3
        select HAVE_FB_ATMEL
        select HAVE_AT91_DBGU0
+       select AT91_SAM9_ALT_RESET
 
 config ARCH_AT91SAM9G20
        bool "AT91SAM9G20"
@@ -79,6 +90,7 @@ config ARCH_AT91SAM9G20
        select HAVE_AT91_USART4
        select HAVE_AT91_USART5
        select HAVE_NET_MACB
+       select AT91_SAM9_ALT_RESET
 
 config ARCH_AT91SAM9G45
        bool "AT91SAM9G45"
@@ -88,6 +100,7 @@ config ARCH_AT91SAM9G45
        select HAVE_FB_ATMEL
        select HAVE_NET_MACB
        select HAVE_AT91_DBGU1
+       select AT91_SAM9G45_RESET
 
 config ARCH_AT91CAP9
        bool "AT91CAP9"
@@ -96,6 +109,7 @@ config ARCH_AT91CAP9
        select HAVE_FB_ATMEL
        select HAVE_NET_MACB
        select HAVE_AT91_DBGU1
+       select AT91_SAM9G45_RESET
 
 config ARCH_AT91X40
        bool "AT91x40"
index 242174f9f3554c6323075229eceef94a9f759225..705e1fbded3919112efb5858ec921d49e05d1eaa 100644 (file)
@@ -8,15 +8,17 @@ obj-n         :=
 obj-           :=
 
 obj-$(CONFIG_AT91_PMC_UNIT)    += clock.o
+obj-$(CONFIG_AT91_SAM9_ALT_RESET) += at91sam9_alt_reset.o
+obj-$(CONFIG_AT91_SAM9G45_RESET) += at91sam9g45_reset.o
 
 # CPU-specific support
 obj-$(CONFIG_ARCH_AT91RM9200)  += at91rm9200.o at91rm9200_time.o at91rm9200_devices.o
-obj-$(CONFIG_ARCH_AT91SAM9260) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o at91sam9_alt_reset.o
-obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o at91sam9_alt_reset.o
-obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o at91sam9_alt_reset.o
-obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o at91sam9_alt_reset.o
-obj-$(CONFIG_ARCH_AT91SAM9RL)  += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o at91sam9_alt_reset.o
-obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o at91sam9_alt_reset.o
+obj-$(CONFIG_ARCH_AT91SAM9260) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9261) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9G10) += at91sam9261.o at91sam926x_time.o at91sam9261_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9263) += at91sam9263.o at91sam926x_time.o at91sam9263_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9RL)  += at91sam9rl.o at91sam926x_time.o at91sam9rl_devices.o sam9_smc.o
+obj-$(CONFIG_ARCH_AT91SAM9G20) += at91sam9260.o at91sam926x_time.o at91sam9260_devices.o sam9_smc.o
 obj-$(CONFIG_ARCH_AT91SAM9G45) += at91sam9g45.o at91sam926x_time.o at91sam9g45_devices.o sam9_smc.o
 obj-$(CONFIG_ARCH_AT91CAP9)    += at91cap9.o at91sam926x_time.o at91cap9_devices.o sam9_smc.o
 obj-$(CONFIG_ARCH_AT91X40)     += at91x40.o at91x40_time.o
index edb879ac04c8e30c3d2b814f24497ae86075e7be..a42edc25a87e693aea85b2b494515c14e4d32c42 100644 (file)
@@ -21,7 +21,6 @@
 #include <mach/cpu.h>
 #include <mach/at91cap9.h>
 #include <mach/at91_pmc.h>
-#include <mach/at91_rstc.h>
 
 #include "soc.h"
 #include "generic.h"
@@ -314,11 +313,6 @@ static struct at91_gpio_bank at91cap9_gpio[] __initdata = {
        }
 };
 
-static void at91cap9_restart(char mode, const char *cmd)
-{
-       at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
-}
-
 /* --------------------------------------------------------------------
  *  AT91CAP9 processor initialization
  * -------------------------------------------------------------------- */
@@ -331,13 +325,14 @@ static void __init at91cap9_map_io(void)
 static void __init at91cap9_ioremap_registers(void)
 {
        at91_ioremap_shdwc(AT91CAP9_BASE_SHDWC);
+       at91_ioremap_rstc(AT91CAP9_BASE_RSTC);
        at91sam926x_ioremap_pit(AT91CAP9_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91CAP9_BASE_SMC);
 }
 
 static void __init at91cap9_initialize(void)
 {
-       arm_pm_restart = at91cap9_restart;
+       arm_pm_restart = at91sam9g45_restart;
        at91_extern_irq = (1 << AT91CAP9_ID_IRQ0) | (1 << AT91CAP9_ID_IRQ1);
 
        /* Register GPIO subsystem */
index 5e46e4a96430d90e793343115cbce42001e5cecc..d4036ba43612afe85f7b74ce58f55ae35e1882d0 100644 (file)
@@ -323,6 +323,7 @@ static void __init at91sam9260_map_io(void)
 static void __init at91sam9260_ioremap_registers(void)
 {
        at91_ioremap_shdwc(AT91SAM9260_BASE_SHDWC);
+       at91_ioremap_rstc(AT91SAM9260_BASE_RSTC);
        at91sam926x_ioremap_pit(AT91SAM9260_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9260_BASE_SMC);
 }
index b85b9ea6017071252a670fdb6e22cb222336055d..023c2ff138df8a9228b4cb45c514d6e02f938918 100644 (file)
@@ -281,6 +281,7 @@ static void __init at91sam9261_map_io(void)
 static void __init at91sam9261_ioremap_registers(void)
 {
        at91_ioremap_shdwc(AT91SAM9261_BASE_SHDWC);
+       at91_ioremap_rstc(AT91SAM9261_BASE_RSTC);
        at91sam926x_ioremap_pit(AT91SAM9261_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9261_BASE_SMC);
 }
index 79e3669b1117cbdf7ae18fd685647625766da513..75e876c258afed33b87ba50ccdb8dafee2523d79 100644 (file)
@@ -301,6 +301,7 @@ static void __init at91sam9263_map_io(void)
 static void __init at91sam9263_ioremap_registers(void)
 {
        at91_ioremap_shdwc(AT91SAM9263_BASE_SHDWC);
+       at91_ioremap_rstc(AT91SAM9263_BASE_RSTC);
        at91sam926x_ioremap_pit(AT91SAM9263_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9263_BASE_SMC0);
        at91sam9_ioremap_smc(1, AT91SAM9263_BASE_SMC1);
index d3f931c5942e9078bcb31803e50a6fefed398c8b..518e42377171c8fb25e5888c8863f7b363721a99 100644 (file)
@@ -23,7 +23,8 @@
                        .globl  at91sam9_alt_restart
 
 at91sam9_alt_restart:  ldr     r0, .at91_va_base_sdramc        @ preload constants
-                       ldr     r1, .at91_va_base_rstc_cr
+                       ldr     r1, =at91_rstc_base
+                       ldr     r1, [r1]
 
                        mov     r2, #1
                        mov     r3, #AT91_SDRAMC_LPCB_POWER_DOWN
@@ -33,11 +34,9 @@ at91sam9_alt_restart:        ldr     r0, .at91_va_base_sdramc        @ preload constants
 
                        str     r2, [r0, #AT91_SDRAMC_TR]       @ disable SDRAM access
                        str     r3, [r0, #AT91_SDRAMC_LPR]      @ power down SDRAM
-                       str     r4, [r1]                        @ reset processor
+                       str     r4, [r1, #AT91_RSTC_CR]         @ reset processor
 
                        b       .
 
 .at91_va_base_sdramc:
        .word AT91_VA_BASE_SYS + AT91_SDRAMC0
-.at91_va_base_rstc_cr:
-       .word AT91_VA_BASE_SYS + AT91_RSTC_CR
index 7032dd32cdf0fbc207dc265958e70faeb1e32143..1cb6a96b1c1e3a7ec44cbde4129992b1f54da5eb 100644 (file)
@@ -18,7 +18,6 @@
 #include <asm/mach/map.h>
 #include <mach/at91sam9g45.h>
 #include <mach/at91_pmc.h>
-#include <mach/at91_rstc.h>
 #include <mach/cpu.h>
 
 #include "soc.h"
@@ -318,11 +317,6 @@ static struct at91_gpio_bank at91sam9g45_gpio[] __initdata = {
        }
 };
 
-static void at91sam9g45_restart(char mode, const char *cmd)
-{
-       at91_sys_write(AT91_RSTC_CR, AT91_RSTC_KEY | AT91_RSTC_PROCRST | AT91_RSTC_PERRST);
-}
-
 /* --------------------------------------------------------------------
  *  AT91SAM9G45 processor initialization
  * -------------------------------------------------------------------- */
@@ -336,6 +330,7 @@ static void __init at91sam9g45_map_io(void)
 static void __init at91sam9g45_ioremap_registers(void)
 {
        at91_ioremap_shdwc(AT91SAM9G45_BASE_SHDWC);
+       at91_ioremap_rstc(AT91SAM9G45_BASE_RSTC);
        at91sam926x_ioremap_pit(AT91SAM9G45_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9G45_BASE_SMC);
 }
diff --git a/arch/arm/mach-at91/at91sam9g45_reset.S b/arch/arm/mach-at91/at91sam9g45_reset.S
new file mode 100644 (file)
index 0000000..0468be1
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * reset AT91SAM9G45 as per errata
+ *
+ * Copyright (C) 2011 Jean-Christophe PLAGNIOL-VILLARD <plagnioj@jcosoft.com>
+ *
+ * unless the SDRAM is cleanly shutdown before we hit the
+ * reset register it can be left driving the data bus and
+ * killing the chance of a subsequent boot from NAND
+ *
+ * GPLv2 Only
+ */
+
+#include <linux/linkage.h>
+#include <mach/hardware.h>
+#include <mach/at91sam9_ddrsdr.h>
+#include <mach/at91_rstc.h>
+
+                       .arm
+
+                       .globl  at91sam9g45_restart
+
+at91sam9g45_restart:
+                       ldr     r0, .at91_va_base_sdramc0       @ preload constants
+                       ldr     r1, =at91_rstc_base
+                       ldr     r1, [r1]
+
+                       mov     r2, #1
+                       mov     r3, #AT91_DDRSDRC_LPCB_POWER_DOWN
+                       ldr     r4, =AT91_RSTC_KEY | AT91_RSTC_PERRST | AT91_RSTC_PROCRST
+
+                       .balign 32                              @ align to cache line
+
+                       str     r2, [r0, #AT91_DDRSDRC_RTR]     @ disable DDR0 access
+                       str     r3, [r0, #AT91_DDRSDRC_LPR]     @ power down DDR0
+                       str     r4, [r1, #AT91_RSTC_CR]         @ reset processor
+
+                       b       .
+
+.at91_va_base_sdramc0:
+       .word AT91_VA_BASE_SYS + AT91_DDRSDRC0
index d6bcb1da11dfbc004d0c59890d60fef8d3dde27f..d2c91a841cb8c5fe73b1d5a6682e9d6626cb4f74 100644 (file)
@@ -286,6 +286,7 @@ static void __init at91sam9rl_map_io(void)
 static void __init at91sam9rl_ioremap_registers(void)
 {
        at91_ioremap_shdwc(AT91SAM9RL_BASE_SHDWC);
+       at91_ioremap_rstc(AT91SAM9RL_BASE_RSTC);
        at91sam926x_ioremap_pit(AT91SAM9RL_BASE_PIT);
        at91sam9_ioremap_smc(0, AT91SAM9RL_BASE_SMC);
 }
index 4866b8180d66610d17d6a0576424e19a751995a0..594133451c0c88498d50d30b223f22341c132cb2 100644 (file)
@@ -58,7 +58,9 @@ extern void at91_irq_suspend(void);
 extern void at91_irq_resume(void);
 
 /* reset */
+extern void at91_ioremap_rstc(u32 base_addr);
 extern void at91sam9_alt_restart(char, const char *);
+extern void at91sam9g45_restart(char, const char *);
 
 /* shutdown */
 extern void at91_ioremap_shdwc(u32 base_addr);
index cbd2bf052c1f0e64937146e48ed5521f321d0c75..875fa336800ba3848b04362fe0db4caa73cb4865 100644 (file)
 #ifndef AT91_RSTC_H
 #define AT91_RSTC_H
 
-#define AT91_RSTC_CR           (AT91_RSTC + 0x00)      /* Reset Controller Control Register */
+#ifndef __ASSEMBLY__
+extern void __iomem *at91_rstc_base;
+
+#define at91_rstc_read(field) \
+       __raw_readl(at91_rstc_base + field)
+
+#define at91_rstc_write(field, value) \
+       __raw_writel(value, at91_rstc_base + field);
+#else
+.extern at91_rstc_base
+#endif
+
+#define AT91_RSTC_CR           0x00                    /* Reset Controller Control Register */
 #define                AT91_RSTC_PROCRST       (1 << 0)                /* Processor Reset */
 #define                AT91_RSTC_PERRST        (1 << 2)                /* Peripheral Reset */
 #define                AT91_RSTC_EXTRST        (1 << 3)                /* External Reset */
 #define                AT91_RSTC_KEY           (0xa5 << 24)            /* KEY Password */
 
-#define AT91_RSTC_SR           (AT91_RSTC + 0x04)      /* Reset Controller Status Register */
+#define AT91_RSTC_SR           0x04                    /* Reset Controller Status Register */
 #define                AT91_RSTC_URSTS         (1 << 0)                /* User Reset Status */
 #define                AT91_RSTC_RSTTYP        (7 << 8)                /* Reset Type */
 #define                        AT91_RSTC_RSTTYP_GENERAL        (0 << 8)
@@ -33,7 +45,7 @@
 #define                AT91_RSTC_NRSTL         (1 << 16)               /* NRST Pin Level */
 #define                AT91_RSTC_SRCMP         (1 << 17)               /* Software Reset Command in Progress */
 
-#define AT91_RSTC_MR           (AT91_RSTC + 0x08)      /* Reset Controller Mode Register */
+#define AT91_RSTC_MR           0x08                    /* Reset Controller Mode Register */
 #define                AT91_RSTC_URSTEN        (1 << 0)                /* User Reset Enable */
 #define                AT91_RSTC_URSTIEN       (1 << 4)                /* User Reset Interrupt Enable */
 #define                AT91_RSTC_ERSTL         (0xf << 8)              /* External Reset Length */
index 4c0e2f6011d70cc2ef93c349e70d02d9fc740065..61d952902f2b7ebe3a76ac3c1d40abe44c1f1282 100644 (file)
@@ -83,7 +83,6 @@
 #define AT91_DDRSDRC0  (0xffffe600 - AT91_BASE_SYS)
 #define AT91_MATRIX    (0xffffea00 - AT91_BASE_SYS)
 #define AT91_PMC       (0xfffffc00 - AT91_BASE_SYS)
-#define AT91_RSTC      (0xfffffd00 - AT91_BASE_SYS)
 #define AT91_GPBR      (cpu_is_at91cap9_revB() ?       \
                        (0xfffffd50 - AT91_BASE_SYS) :  \
                        (0xfffffd60 - AT91_BASE_SYS))
@@ -96,6 +95,7 @@
 #define AT91CAP9_BASE_PIOB     0xfffff400
 #define AT91CAP9_BASE_PIOC     0xfffff600
 #define AT91CAP9_BASE_PIOD     0xfffff800
+#define AT91CAP9_BASE_RSTC     0xfffffd00
 #define AT91CAP9_BASE_SHDWC    0xfffffd10
 #define AT91CAP9_BASE_RTT      0xfffffd20
 #define AT91CAP9_BASE_PIT      0xfffffd30
diff --git a/arch/arm/mach-at91/include/mach/at91cap9_ddrsdr.h b/arch/arm/mach-at91/include/mach/at91cap9_ddrsdr.h
deleted file mode 100644 (file)
index 976f4a6..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * arch/arm/mach-at91/include/mach/at91cap9_ddrsdr.h
- *
- *  (C) 2008 Andrew Victor
- *
- * DDR/SDR Controller (DDRSDRC) - System peripherals registers.
- * Based on AT91CAP9 datasheet revision B.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef AT91CAP9_DDRSDR_H
-#define AT91CAP9_DDRSDR_H
-
-#define AT91_DDRSDRC_MR                0x00    /* Mode Register */
-#define                AT91_DDRSDRC_MODE       (0xf << 0)              /* Command Mode */
-#define                        AT91_DDRSDRC_MODE_NORMAL                0
-#define                        AT91_DDRSDRC_MODE_NOP           1
-#define                        AT91_DDRSDRC_MODE_PRECHARGE     2
-#define                        AT91_DDRSDRC_MODE_LMR           3
-#define                        AT91_DDRSDRC_MODE_REFRESH       4
-#define                        AT91_DDRSDRC_MODE_EXT_LMR       5
-#define                        AT91_DDRSDRC_MODE_DEEP          6
-
-#define AT91_DDRSDRC_RTR       0x04    /* Refresh Timer Register */
-#define                AT91_DDRSDRC_COUNT      (0xfff << 0)            /* Refresh Timer Counter */
-
-#define AT91_DDRSDRC_CR                0x08    /* Configuration Register */
-#define                AT91_DDRSDRC_NC         (3 << 0)                /* Number of Column Bits */
-#define                        AT91_DDRSDRC_NC_SDR8    (0 << 0)
-#define                        AT91_DDRSDRC_NC_SDR9    (1 << 0)
-#define                        AT91_DDRSDRC_NC_SDR10   (2 << 0)
-#define                        AT91_DDRSDRC_NC_SDR11   (3 << 0)
-#define                        AT91_DDRSDRC_NC_DDR9    (0 << 0)
-#define                        AT91_DDRSDRC_NC_DDR10   (1 << 0)
-#define                        AT91_DDRSDRC_NC_DDR11   (2 << 0)
-#define                        AT91_DDRSDRC_NC_DDR12   (3 << 0)
-#define                AT91_DDRSDRC_NR         (3 << 2)                /* Number of Row Bits */
-#define                        AT91_DDRSDRC_NR_11      (0 << 2)
-#define                        AT91_DDRSDRC_NR_12      (1 << 2)
-#define                        AT91_DDRSDRC_NR_13      (2 << 2)
-#define                AT91_DDRSDRC_CAS        (7 << 4)                /* CAS Latency */
-#define                        AT91_DDRSDRC_CAS_2      (2 << 4)
-#define                        AT91_DDRSDRC_CAS_3      (3 << 4)
-#define                        AT91_DDRSDRC_CAS_25     (6 << 4)
-#define                AT91_DDRSDRC_DLL        (1 << 7)                /* Reset DLL */
-#define                AT91_DDRSDRC_DICDS      (1 << 8)                /* Output impedance control */
-
-#define AT91_DDRSDRC_T0PR      0x0C    /* Timing 0 Register */
-#define                AT91_DDRSDRC_TRAS       (0xf <<  0)             /* Active to Precharge delay */
-#define                AT91_DDRSDRC_TRCD       (0xf <<  4)             /* Row to Column delay */
-#define                AT91_DDRSDRC_TWR        (0xf <<  8)             /* Write recovery delay */
-#define                AT91_DDRSDRC_TRC        (0xf << 12)             /* Row cycle delay */
-#define                AT91_DDRSDRC_TRP        (0xf << 16)             /* Row precharge delay */
-#define                AT91_DDRSDRC_TRRD       (0xf << 20)             /* Active BankA to BankB */
-#define                AT91_DDRSDRC_TWTR       (1   << 24)             /* Internal Write to Read delay */
-#define                AT91_DDRSDRC_TMRD       (0xf << 28)             /* Load mode to active/refresh delay */
-
-#define AT91_DDRSDRC_T1PR      0x10    /* Timing 1 Register */
-#define                AT91_DDRSDRC_TRFC       (0x1f << 0)             /* Row Cycle Delay */
-#define                AT91_DDRSDRC_TXSNR      (0xff << 8)             /* Exit self-refresh to non-read */
-#define                AT91_DDRSDRC_TXSRD      (0xff << 16)            /* Exit self-refresh to read */
-#define                AT91_DDRSDRC_TXP        (0xf  << 24)            /* Exit power-down delay */
-
-#define AT91_DDRSDRC_LPR       0x18    /* Low Power Register */
-#define                AT91_DDRSDRC_LPCB               (3 << 0)        /* Low-power Configurations */
-#define                        AT91_DDRSDRC_LPCB_DISABLE               0
-#define                        AT91_DDRSDRC_LPCB_SELF_REFRESH          1
-#define                        AT91_DDRSDRC_LPCB_POWER_DOWN            2
-#define                        AT91_DDRSDRC_LPCB_DEEP_POWER_DOWN       3
-#define                AT91_DDRSDRC_CLKFR              (1 << 2)        /* Clock Frozen */
-#define                AT91_DDRSDRC_PASR               (7 << 4)        /* Partial Array Self Refresh */
-#define                AT91_DDRSDRC_TCSR               (3 << 8)        /* Temperature Compensated Self Refresh */
-#define                AT91_DDRSDRC_DS                 (3 << 10)       /* Drive Strength */
-#define                AT91_DDRSDRC_TIMEOUT            (3 << 12)       /* Time to define when Low Power Mode is enabled */
-#define                        AT91_DDRSDRC_TIMEOUT_0_CLK_CYCLES       (0 << 12)
-#define                        AT91_DDRSDRC_TIMEOUT_64_CLK_CYCLES      (1 << 12)
-#define                        AT91_DDRSDRC_TIMEOUT_128_CLK_CYCLES     (2 << 12)
-
-#define AT91_DDRSDRC_MDR       0x1C    /* Memory Device Register */
-#define                AT91_DDRSDRC_MD         (3 << 0)                /* Memory Device Type */
-#define                        AT91_DDRSDRC_MD_SDR             0
-#define                        AT91_DDRSDRC_MD_LOW_POWER_SDR   1
-#define                        AT91_DDRSDRC_MD_DDR             2
-#define                        AT91_DDRSDRC_MD_LOW_POWER_DDR   3
-
-#define AT91_DDRSDRC_DLLR      0x20    /* DLL Information Register */
-#define                AT91_DDRSDRC_MDINC      (1 << 0)                /* Master Delay increment */
-#define                AT91_DDRSDRC_MDDEC      (1 << 1)                /* Master Delay decrement */
-#define                AT91_DDRSDRC_MDOVF      (1 << 2)                /* Master Delay Overflow */
-#define                AT91_DDRSDRC_SDCOVF     (1 << 3)                /* Slave Delay Correction Overflow */
-#define                AT91_DDRSDRC_SDCUDF     (1 << 4)                /* Slave Delay Correction Underflow */
-#define                AT91_DDRSDRC_SDERF      (1 << 5)                /* Slave Delay Correction error */
-#define                AT91_DDRSDRC_MDVAL      (0xff <<  8)            /* Master Delay value */
-#define                AT91_DDRSDRC_SDVAL      (0xff << 16)            /* Slave Delay value */
-#define                AT91_DDRSDRC_SDCVAL     (0xff << 24)            /* Slave Delay Correction value */
-
-/* Register access macros */
-#define at91_ramc_read(num, reg) \
-       at91_sys_read(AT91_DDRSDRC##num + reg)
-#define at91_ramc_write(num, reg, value) \
-       at91_sys_write(AT91_DDRSDRC##num + reg, value)
-
-
-#endif
index f937c476bb67d6a584b08b122178caa0979cb405..fa5ca278adebf726a2cf849e162e33a82bc101b6 100644 (file)
@@ -83,7 +83,6 @@
 #define AT91_SDRAMC0   (0xffffea00 - AT91_BASE_SYS)
 #define AT91_MATRIX    (0xffffee00 - AT91_BASE_SYS)
 #define AT91_PMC       (0xfffffc00 - AT91_BASE_SYS)
-#define AT91_RSTC      (0xfffffd00 - AT91_BASE_SYS)
 #define AT91_GPBR      (0xfffffd50 - AT91_BASE_SYS)
 
 #define AT91SAM9260_BASE_ECC   0xffffe800
@@ -92,6 +91,7 @@
 #define AT91SAM9260_BASE_PIOA  0xfffff400
 #define AT91SAM9260_BASE_PIOB  0xfffff600
 #define AT91SAM9260_BASE_PIOC  0xfffff800
+#define AT91SAM9260_BASE_RSTC  0xfffffd00
 #define AT91SAM9260_BASE_SHDWC 0xfffffd10
 #define AT91SAM9260_BASE_RTT   0xfffffd20
 #define AT91SAM9260_BASE_PIT   0xfffffd30
index 175604e261becd42ba06aa5b7dc9150cf3eb7fb1..7cde2d36570eeee50049e54ff4a2f61ac853a69c 100644 (file)
@@ -68,7 +68,6 @@
 #define AT91_SDRAMC0   (0xffffea00 - AT91_BASE_SYS)
 #define AT91_MATRIX    (0xffffee00 - AT91_BASE_SYS)
 #define AT91_PMC       (0xfffffc00 - AT91_BASE_SYS)
-#define AT91_RSTC      (0xfffffd00 - AT91_BASE_SYS)
 #define AT91_GPBR      (0xfffffd50 - AT91_BASE_SYS)
 
 #define AT91SAM9261_BASE_SMC   0xffffec00
@@ -76,6 +75,7 @@
 #define AT91SAM9261_BASE_PIOA  0xfffff400
 #define AT91SAM9261_BASE_PIOB  0xfffff600
 #define AT91SAM9261_BASE_PIOC  0xfffff800
+#define AT91SAM9261_BASE_RSTC  0xfffffd00
 #define AT91SAM9261_BASE_SHDWC 0xfffffd10
 #define AT91SAM9261_BASE_RTT   0xfffffd20
 #define AT91SAM9261_BASE_PIT   0xfffffd30
index 80c915002d835a91e87d420efdb1c2ac422a8803..5949abda962b2affffaa44c59bfbf4e7138e610f 100644 (file)
@@ -78,7 +78,6 @@
 #define AT91_SDRAMC1   (0xffffe800 - AT91_BASE_SYS)
 #define AT91_MATRIX    (0xffffec00 - AT91_BASE_SYS)
 #define AT91_PMC       (0xfffffc00 - AT91_BASE_SYS)
-#define AT91_RSTC      (0xfffffd00 - AT91_BASE_SYS)
 #define AT91_GPBR      (0xfffffd60 - AT91_BASE_SYS)
 
 #define AT91SAM9263_BASE_ECC0  0xffffe000
@@ -91,6 +90,7 @@
 #define AT91SAM9263_BASE_PIOC  0xfffff600
 #define AT91SAM9263_BASE_PIOD  0xfffff800
 #define AT91SAM9263_BASE_PIOE  0xfffffa00
+#define AT91SAM9263_BASE_RSTC  0xfffffd00
 #define AT91SAM9263_BASE_SHDWC 0xfffffd10
 #define AT91SAM9263_BASE_RTT0  0xfffffd20
 #define AT91SAM9263_BASE_PIT   0xfffffd30
index d27b15ba8ebf81c74792c6b0cd2f5a12f33cb664..e2f8da8ce5bc81f8b0447b0529c5b9338d5c447f 100644 (file)
 #define                        AT91_DDRSDRC_CAS_25     (6 << 4)
 #define                AT91_DDRSDRC_RST_DLL    (1 << 7)                /* Reset DLL */
 #define                AT91_DDRSDRC_DICDS      (1 << 8)                /* Output impedance control */
-#define                AT91_DDRSDRC_DIS_DLL    (1 << 9)                /* Disable DLL */
-#define                AT91_DDRSDRC_OCD        (1 << 12)               /* Off-Chip Driver */
-#define                AT91_DDRSDRC_DQMS       (1 << 16)               /* Mask Data is Shared */
-#define                AT91_DDRSDRC_ACTBST     (1 << 18)               /* Active Bank X to Burst Stop Read Access Bank Y */
+#define                AT91_DDRSDRC_DIS_DLL    (1 << 9)                /* Disable DLL [SAM9 Only] */
+#define                AT91_DDRSDRC_OCD        (1 << 12)               /* Off-Chip Driver [SAM9 Only] */
+#define                AT91_DDRSDRC_DQMS       (1 << 16)               /* Mask Data is Shared [SAM9 Only] */
+#define                AT91_DDRSDRC_ACTBST     (1 << 18)               /* Active Bank X to Burst Stop Read Access Bank Y [SAM9 Only] */
 
 #define AT91_DDRSDRC_T0PR      0x0C    /* Timing 0 Register */
 #define                AT91_DDRSDRC_TRAS       (0xf <<  0)             /* Active to Precharge delay */
@@ -59,7 +59,8 @@
 #define                AT91_DDRSDRC_TRP        (0xf << 16)             /* Row precharge delay */
 #define                AT91_DDRSDRC_TRRD       (0xf << 20)             /* Active BankA to BankB */
 #define                AT91_DDRSDRC_TWTR       (0x7 << 24)             /* Internal Write to Read delay */
-#define                AT91_DDRSDRC_RED_WRRD   (0x1 << 27)             /* Reduce Write to Read Delay */
+#define                AT91CAP9_DDRSDRC_TWTR   (1   << 24)             /* Internal Write to Read delay */
+#define                AT91_DDRSDRC_RED_WRRD   (0x1 << 27)             /* Reduce Write to Read Delay [SAM9 Only] */
 #define                AT91_DDRSDRC_TMRD       (0xf << 28)             /* Load mode to active/refresh delay */
 
 #define AT91_DDRSDRC_T1PR      0x10    /* Timing 1 Register */
 #define                AT91_DDRSDRC_TXSRD      (0xff << 16)            /* Exit self-refresh to read */
 #define                AT91_DDRSDRC_TXP        (0xf  << 24)            /* Exit power-down delay */
 
-#define AT91_DDRSDRC_T2PR      0x14    /* Timing 2 Register */
+#define AT91_DDRSDRC_T2PR      0x14    /* Timing 2 Register [SAM9 Only] */
 #define                AT91_DDRSDRC_TXARD      (0xf  << 0)             /* Exit active power down delay to read command in mode "Fast Exit" */
 #define                AT91_DDRSDRC_TXARDS     (0xf  << 4)             /* Exit active power down delay to read command in mode "Slow Exit" */
 #define                AT91_DDRSDRC_TRPA       (0xf  << 8)             /* Row Precharge All delay */
 #define                AT91_DDRSDRC_TRTP       (0x7  << 12)            /* Read to Precharge delay */
 
 #define AT91_DDRSDRC_LPR       0x1C    /* Low Power Register */
+#define AT91CAP9_DDRSDRC_LPR   0x18    /* Low Power Register */
 #define                AT91_DDRSDRC_LPCB       (3 << 0)                /* Low-power Configurations */
 #define                        AT91_DDRSDRC_LPCB_DISABLE               0
 #define                        AT91_DDRSDRC_LPCB_SELF_REFRESH          1
 #define                AT91_DDRSDRC_UPD_MR     (3 << 20)        /* Update load mode register and extended mode register */
 
 #define AT91_DDRSDRC_MDR       0x20    /* Memory Device Register */
+#define AT91CAP9_DDRSDRC_MDR   0x1C    /* Memory Device Register */
 #define                AT91_DDRSDRC_MD         (3 << 0)                /* Memory Device Type */
 #define                        AT91_DDRSDRC_MD_SDR             0
 #define                        AT91_DDRSDRC_MD_LOW_POWER_SDR   1
+#define                        AT91CAP9_DDRSDRC_MD_DDR         2
 #define                        AT91_DDRSDRC_MD_LOW_POWER_DDR   3
-#define                        AT91_DDRSDRC_MD_DDR2            6
+#define                        AT91_DDRSDRC_MD_DDR2            6       /* [SAM9 Only] */
 #define                AT91_DDRSDRC_DBW        (1 << 4)                /* Data Bus Width */
 #define                        AT91_DDRSDRC_DBW_32BITS         (0 <<  4)
 #define                        AT91_DDRSDRC_DBW_16BITS         (1 <<  4)
 
 #define AT91_DDRSDRC_DLL       0x24    /* DLL Information Register */
+#define AT91CAP9_DDRSDRC_DLL   0x20    /* DLL Information Register */
 #define                AT91_DDRSDRC_MDINC      (1 << 0)                /* Master Delay increment */
 #define                AT91_DDRSDRC_MDDEC      (1 << 1)                /* Master Delay decrement */
 #define                AT91_DDRSDRC_MDOVF      (1 << 2)                /* Master Delay Overflow */
+#define                AT91CAP9_DDRSDRC_SDCOVF (1 << 3)                /* Slave Delay Correction Overflow */
+#define                AT91CAP9_DDRSDRC_SDCUDF (1 << 4)                /* Slave Delay Correction Underflow */
+#define                AT91CAP9_DDRSDRC_SDERF  (1 << 5)                /* Slave Delay Correction error */
 #define                AT91_DDRSDRC_MDVAL      (0xff <<  8)            /* Master Delay value */
+#define                AT91CAP9_DDRSDRC_SDVAL  (0xff << 16)            /* Slave Delay value */
+#define                AT91CAP9_DDRSDRC_SDCVAL (0xff << 24)            /* Slave Delay Correction value */
 
-#define AT91_DDRSDRC_HS                0x2C    /* High Speed Register */
+#define AT91_DDRSDRC_HS                0x2C    /* High Speed Register [SAM9 Only] */
 #define                AT91_DDRSDRC_DIS_ATCP_RD        (1 << 2)        /* Anticip read access is disabled */
 
 #define AT91_DDRSDRC_DELAY(n)  (0x30 + (0x4 * (n)))    /* Delay I/O Register n */
 
-#define AT91_DDRSDRC_WPMR      0xE4    /* Write Protect Mode Register */
+#define AT91_DDRSDRC_WPMR      0xE4    /* Write Protect Mode Register [SAM9 Only] */
 #define                AT91_DDRSDRC_WP         (1 << 0)                /* Write protect enable */
 #define                AT91_DDRSDRC_WPKEY      (0xffffff << 8)         /* Write protect key */
 #define                AT91_DDRSDRC_KEY        (0x444452 << 8)         /* Write protect key = "DDR" */
 
-#define AT91_DDRSDRC_WPSR      0xE8    /* Write Protect Status Register */
+#define AT91_DDRSDRC_WPSR      0xE8    /* Write Protect Status Register [SAM9 Only] */
 #define                AT91_DDRSDRC_WPVS       (1 << 0)                /* Write protect violation status */
 #define                AT91_DDRSDRC_WPVSRC     (0xffff << 8)           /* Write protect violation source */
 
index f0c23c960dece748b5453e735c66e710c205a54d..dd9c95ea0862d7233c4d669b2262c05cd6d3021d 100644 (file)
@@ -90,7 +90,6 @@
 #define AT91_DDRSDRC0  (0xffffe600 - AT91_BASE_SYS)
 #define AT91_MATRIX    (0xffffea00 - AT91_BASE_SYS)
 #define AT91_PMC       (0xfffffc00 - AT91_BASE_SYS)
-#define AT91_RSTC      (0xfffffd00 - AT91_BASE_SYS)
 #define AT91_GPBR      (0xfffffd60 - AT91_BASE_SYS)
 
 #define AT91SAM9G45_BASE_ECC   0xffffe200
 #define AT91SAM9G45_BASE_PIOC  0xfffff600
 #define AT91SAM9G45_BASE_PIOD  0xfffff800
 #define AT91SAM9G45_BASE_PIOE  0xfffffa00
+#define AT91SAM9G45_BASE_RSTC  0xfffffd00
 #define AT91SAM9G45_BASE_SHDWC 0xfffffd10
 #define AT91SAM9G45_BASE_RTT   0xfffffd20
 #define AT91SAM9G45_BASE_PIT   0xfffffd30
index 2bb359e60b97f5b71367ac3ffe832b255f90be3d..d7bead7118da85873c4ef43388432ce80f24a9f1 100644 (file)
@@ -72,7 +72,6 @@
 #define AT91_SDRAMC0   (0xffffea00 - AT91_BASE_SYS)
 #define AT91_MATRIX    (0xffffee00 - AT91_BASE_SYS)
 #define AT91_PMC       (0xfffffc00 - AT91_BASE_SYS)
-#define AT91_RSTC      (0xfffffd00 - AT91_BASE_SYS)
 #define AT91_SCKCR     (0xfffffd50 - AT91_BASE_SYS)
 #define AT91_GPBR      (0xfffffd60 - AT91_BASE_SYS)
 
@@ -84,6 +83,7 @@
 #define AT91SAM9RL_BASE_PIOB   0xfffff600
 #define AT91SAM9RL_BASE_PIOC   0xfffff800
 #define AT91SAM9RL_BASE_PIOD   0xfffffa00
+#define AT91SAM9RL_BASE_RSTC   0xfffffd00
 #define AT91SAM9RL_BASE_SHDWC  0xfffffd10
 #define AT91SAM9RL_BASE_RTT    0xfffffd20
 #define AT91SAM9RL_BASE_PIT    0xfffffd30
index d0b377b21bd7d76da84a19db3c05c6d7304e62c0..3b33f07b1e1189ab4a49644741715731b095461f 100644 (file)
@@ -88,7 +88,7 @@ extern void __init at91_add_device_eth(struct macb_platform_data *data);
 struct at91_usbh_data {
        u8              ports;          /* number of ports on root hub */
        int             vbus_pin[2];    /* port power-control pin */
-       u8              vbus_pin_inverted;
+       u8              vbus_pin_active_low[2];
        u8              overcurrent_supported;
        int             overcurrent_pin[2];
        u8              overcurrent_status[2];
index 62ad95556c367f7ab7f0311428b6475070aeab02..1606379ac28462dd33f31ba8e0756a9d6ec4d21a 100644 (file)
@@ -34,7 +34,6 @@
 /*
  * Show the reason for the previous system reset.
  */
-#if defined(AT91_RSTC)
 
 #include <mach/at91_rstc.h>
 #include <mach/at91_shdwc.h>
@@ -58,10 +57,10 @@ static void __init show_reset_status(void)
        char *reason, *r2 = reset;
        u32 reset_type, wake_type;
 
-       if (!at91_shdwc_base)
+       if (!at91_shdwc_base || !at91_rstc_base)
                return;
 
-       reset_type = at91_sys_read(AT91_RSTC_SR) & AT91_RSTC_RSTTYP;
+       reset_type = at91_rstc_read(AT91_RSTC_SR) & AT91_RSTC_RSTTYP;
        wake_type = at91_shdwc_read(AT91_SHDW_SR);
 
        switch (reset_type) {
@@ -102,10 +101,6 @@ static void __init show_reset_status(void)
        }
        pr_info("AT91: Starting after %s %s\n", reason, r2);
 }
-#else
-static void __init show_reset_status(void) {}
-#endif
-
 
 static int at91_pm_valid_state(suspend_state_t state)
 {
index ce9a206991111672d6d1a0a235855c8849adf842..7eb40d24242f2ae54fbd9ab8197d6166f98381eb 100644 (file)
@@ -25,21 +25,21 @@ static inline u32 sdram_selfrefresh_enable(void)
                                                                : : "r" (0))
 
 #elif defined(CONFIG_ARCH_AT91CAP9)
-#include <mach/at91cap9_ddrsdr.h>
+#include <mach/at91sam9_ddrsdr.h>
 
 
 static inline u32 sdram_selfrefresh_enable(void)
 {
        u32 saved_lpr, lpr;
 
-       saved_lpr = at91_ramc_read(0, AT91_DDRSDRC_LPR);
+       saved_lpr = at91_ramc_read(0, AT91CAP9_DDRSDRC_LPR);
 
        lpr = saved_lpr & ~AT91_DDRSDRC_LPCB;
-       at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr | AT91_DDRSDRC_LPCB_SELF_REFRESH);
+       at91_ramc_write(0, AT91CAP9_DDRSDRC_LPR, lpr | AT91_DDRSDRC_LPCB_SELF_REFRESH);
        return saved_lpr;
 }
 
-#define sdram_selfrefresh_disable(saved_lpr)   at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr)
+#define sdram_selfrefresh_disable(saved_lpr)   at91_ramc_write(0, AT91CAP9_DDRSDRC_LPR, saved_lpr)
 #define wait_for_interrupt_enable()            cpu_do_idle()
 
 #elif defined(CONFIG_ARCH_AT91SAM9G45)
index f7922a436172a063bf188af5fc2b24cad4ab148e..92dfb8461392a66541a1370de2c7dce11c03a039 100644 (file)
@@ -18,9 +18,8 @@
 
 #if defined(CONFIG_ARCH_AT91RM9200)
 #include <mach/at91rm9200_mc.h>
-#elif defined(CONFIG_ARCH_AT91CAP9)
-#include <mach/at91cap9_ddrsdr.h>
-#elif defined(CONFIG_ARCH_AT91SAM9G45)
+#elif defined(CONFIG_ARCH_AT91CAP9) \
+       || defined(CONFIG_ARCH_AT91SAM9G45)
 #include <mach/at91sam9_ddrsdr.h>
 #else
 #include <mach/at91sam9_sdramc.h>
index 8bdcc3cb6012bf723ab46c27ca44f63d81d22da9..69d3fc4c46f372ff99c2468f5e8eda6cc110bfd5 100644 (file)
@@ -29,9 +29,12 @@ EXPORT_SYMBOL(at91_soc_initdata);
 void __init at91rm9200_set_type(int type)
 {
        if (type == ARCH_REVISON_9200_PQFP)
-               at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA;
-       else
                at91_soc_initdata.subtype = AT91_SOC_RM9200_PQFP;
+       else
+               at91_soc_initdata.subtype = AT91_SOC_RM9200_BGA;
+
+       pr_info("AT91: filled in soc subtype: %s\n",
+               at91_get_soc_subtype(&at91_soc_initdata));
 }
 
 void __init at91_init_irq_default(void)
@@ -281,6 +284,15 @@ void __init at91_ioremap_shdwc(u32 base_addr)
        pm_power_off = at91sam9_poweroff;
 }
 
+void __iomem *at91_rstc_base;
+
+void __init at91_ioremap_rstc(u32 base_addr)
+{
+       at91_rstc_base = ioremap(base_addr, 16);
+       if (!at91_rstc_base)
+               panic("Impossible to ioremap at91_rstc_base\n");
+}
+
 void __init at91_initialize(unsigned long main_clock)
 {
        at91_boot_soc.ioremap_registers();
index 9e5e7552498c362c7880b76d3e820640cddfc555..45c97b1ee9b1d59ba923a69b50e94337b48da993 100644 (file)
@@ -194,6 +194,6 @@ MACHINE_START(BCMRING, "BCMRING")
        .init_early = bcmring_init_early,
        .init_irq = bcmring_init_irq,
        .timer = &bcmring_timer,
-       .init_machine = bcmring_init_machine
+       .init_machine = bcmring_init_machine,
        .restart = bcmring_restart,
 MACHINE_END
index 1a1a27dd56544d0684bebc29d40b69bdff2d190f..1024396797e16f6c839383da409731bfaef494d3 100644 (file)
 
 #include <mach/timer.h>
 
-#include <linux/mm.h>
 #include <linux/pfn.h>
 #include <linux/atomic.h>
 #include <linux/sched.h>
 #include <mach/dma.h>
 
-/* I don't quite understand why dc4 fails when this is set to 1 and DMA is enabled */
-/* especially since dc4 doesn't use kmalloc'd memory. */
-
-#define ALLOW_MAP_OF_KMALLOC_MEMORY 0
-
 /* ---- Public Variables ------------------------------------------------- */
 
 /* ---- Private Constants and Types -------------------------------------- */
 #define CONTROLLER_FROM_HANDLE(handle)    (((handle) >> 4) & 0x0f)
 #define CHANNEL_FROM_HANDLE(handle)       ((handle) & 0x0f)
 
-#define DMA_MAP_DEBUG   0
-
-#if DMA_MAP_DEBUG
-#   define  DMA_MAP_PRINT(fmt, args...)   printk("%s: " fmt, __func__,  ## args)
-#else
-#   define  DMA_MAP_PRINT(fmt, args...)
-#endif
 
 /* ---- Private Variables ------------------------------------------------ */
 
 static DMA_Global_t gDMA;
 static struct proc_dir_entry *gDmaDir;
 
-static atomic_t gDmaStatMemTypeKmalloc = ATOMIC_INIT(0);
-static atomic_t gDmaStatMemTypeVmalloc = ATOMIC_INIT(0);
-static atomic_t gDmaStatMemTypeUser = ATOMIC_INIT(0);
-static atomic_t gDmaStatMemTypeCoherent = ATOMIC_INIT(0);
-
 #include "dma_device.c"
 
 /* ---- Private Function Prototypes -------------------------------------- */
 
 /* ---- Functions  ------------------------------------------------------- */
 
-/****************************************************************************/
-/**
-*   Displays information for /proc/dma/mem-type
-*/
-/****************************************************************************/
-
-static int dma_proc_read_mem_type(char *buf, char **start, off_t offset,
-                                 int count, int *eof, void *data)
-{
-       int len = 0;
-
-       len += sprintf(buf + len, "dma_map_mem statistics\n");
-       len +=
-           sprintf(buf + len, "coherent: %d\n",
-                   atomic_read(&gDmaStatMemTypeCoherent));
-       len +=
-           sprintf(buf + len, "kmalloc:  %d\n",
-                   atomic_read(&gDmaStatMemTypeKmalloc));
-       len +=
-           sprintf(buf + len, "vmalloc:  %d\n",
-                   atomic_read(&gDmaStatMemTypeVmalloc));
-       len +=
-           sprintf(buf + len, "user:     %d\n",
-                   atomic_read(&gDmaStatMemTypeUser));
-
-       return len;
-}
-
 /****************************************************************************/
 /**
 *   Displays information for /proc/dma/channels
@@ -846,8 +800,6 @@ int dma_init(void)
                                       dma_proc_read_channels, NULL);
                create_proc_read_entry("devices", 0, gDmaDir,
                                       dma_proc_read_devices, NULL);
-               create_proc_read_entry("mem-type", 0, gDmaDir,
-                                      dma_proc_read_mem_type, NULL);
        }
 
 out:
@@ -1565,767 +1517,3 @@ int dma_set_device_handler(DMA_Device_t dev,    /* Device to set the callback for.
 }
 
 EXPORT_SYMBOL(dma_set_device_handler);
-
-/****************************************************************************/
-/**
-*   Initializes a memory mapping structure
-*/
-/****************************************************************************/
-
-int dma_init_mem_map(DMA_MemMap_t *memMap)
-{
-       memset(memMap, 0, sizeof(*memMap));
-
-       sema_init(&memMap->lock, 1);
-
-       return 0;
-}
-
-EXPORT_SYMBOL(dma_init_mem_map);
-
-/****************************************************************************/
-/**
-*   Releases any memory currently being held by a memory mapping structure.
-*/
-/****************************************************************************/
-
-int dma_term_mem_map(DMA_MemMap_t *memMap)
-{
-       down(&memMap->lock);    /* Just being paranoid */
-
-       /* Free up any allocated memory */
-
-       up(&memMap->lock);
-       memset(memMap, 0, sizeof(*memMap));
-
-       return 0;
-}
-
-EXPORT_SYMBOL(dma_term_mem_map);
-
-/****************************************************************************/
-/**
-*   Looks at a memory address and categorizes it.
-*
-*   @return One of the values from the DMA_MemType_t enumeration.
-*/
-/****************************************************************************/
-
-DMA_MemType_t dma_mem_type(void *addr)
-{
-       unsigned long addrVal = (unsigned long)addr;
-
-       if (addrVal >= CONSISTENT_BASE) {
-               /* NOTE: DMA virtual memory space starts at 0xFFxxxxxx */
-
-               /* dma_alloc_xxx pages are physically and virtually contiguous */
-
-               return DMA_MEM_TYPE_DMA;
-       }
-
-       /* Technically, we could add one more classification. Addresses between VMALLOC_END */
-       /* and the beginning of the DMA virtual address could be considered to be I/O space. */
-       /* Right now, nobody cares about this particular classification, so we ignore it. */
-
-       if (is_vmalloc_addr(addr)) {
-               /* Address comes from the vmalloc'd region. Pages are virtually */
-               /* contiguous but NOT physically contiguous */
-
-               return DMA_MEM_TYPE_VMALLOC;
-       }
-
-       if (addrVal >= PAGE_OFFSET) {
-               /* PAGE_OFFSET is typically 0xC0000000 */
-
-               /* kmalloc'd pages are physically contiguous */
-
-               return DMA_MEM_TYPE_KMALLOC;
-       }
-
-       return DMA_MEM_TYPE_USER;
-}
-
-EXPORT_SYMBOL(dma_mem_type);
-
-/****************************************************************************/
-/**
-*   Looks at a memory address and determines if we support DMA'ing to/from
-*   that type of memory.
-*
-*   @return boolean -
-*               return value != 0 means dma supported
-*               return value == 0 means dma not supported
-*/
-/****************************************************************************/
-
-int dma_mem_supports_dma(void *addr)
-{
-       DMA_MemType_t memType = dma_mem_type(addr);
-
-       return (memType == DMA_MEM_TYPE_DMA)
-#if ALLOW_MAP_OF_KMALLOC_MEMORY
-           || (memType == DMA_MEM_TYPE_KMALLOC)
-#endif
-           || (memType == DMA_MEM_TYPE_USER);
-}
-
-EXPORT_SYMBOL(dma_mem_supports_dma);
-
-/****************************************************************************/
-/**
-*   Maps in a memory region such that it can be used for performing a DMA.
-*
-*   @return
-*/
-/****************************************************************************/
-
-int dma_map_start(DMA_MemMap_t *memMap,        /* Stores state information about the map */
-                 enum dma_data_direction dir   /* Direction that the mapping will be going */
-    ) {
-       int rc;
-
-       down(&memMap->lock);
-
-       DMA_MAP_PRINT("memMap: %p\n", memMap);
-
-       if (memMap->inUse) {
-               printk(KERN_ERR "%s: memory map %p is already being used\n",
-                      __func__, memMap);
-               rc = -EBUSY;
-               goto out;
-       }
-
-       memMap->inUse = 1;
-       memMap->dir = dir;
-       memMap->numRegionsUsed = 0;
-
-       rc = 0;
-
-out:
-
-       DMA_MAP_PRINT("returning %d", rc);
-
-       up(&memMap->lock);
-
-       return rc;
-}
-
-EXPORT_SYMBOL(dma_map_start);
-
-/****************************************************************************/
-/**
-*   Adds a segment of memory to a memory map. Each segment is both
-*   physically and virtually contiguous.
-*
-*   @return     0 on success, error code otherwise.
-*/
-/****************************************************************************/
-
-static int dma_map_add_segment(DMA_MemMap_t *memMap,   /* Stores state information about the map */
-                              DMA_Region_t *region,    /* Region that the segment belongs to */
-                              void *virtAddr,  /* Virtual address of the segment being added */
-                              dma_addr_t physAddr,     /* Physical address of the segment being added */
-                              size_t numBytes  /* Number of bytes of the segment being added */
-    ) {
-       DMA_Segment_t *segment;
-
-       DMA_MAP_PRINT("memMap:%p va:%p pa:0x%x #:%d\n", memMap, virtAddr,
-                     physAddr, numBytes);
-
-       /* Sanity check */
-
-       if (((unsigned long)virtAddr < (unsigned long)region->virtAddr)
-           || (((unsigned long)virtAddr + numBytes)) >
-           ((unsigned long)region->virtAddr + region->numBytes)) {
-               printk(KERN_ERR
-                      "%s: virtAddr %p is outside region @ %p len: %d\n",
-                      __func__, virtAddr, region->virtAddr, region->numBytes);
-               return -EINVAL;
-       }
-
-       if (region->numSegmentsUsed > 0) {
-               /* Check to see if this segment is physically contiguous with the previous one */
-
-               segment = &region->segment[region->numSegmentsUsed - 1];
-
-               if ((segment->physAddr + segment->numBytes) == physAddr) {
-                       /* It is - just add on to the end */
-
-                       DMA_MAP_PRINT("appending %d bytes to last segment\n",
-                                     numBytes);
-
-                       segment->numBytes += numBytes;
-
-                       return 0;
-               }
-       }
-
-       /* Reallocate to hold more segments, if required. */
-
-       if (region->numSegmentsUsed >= region->numSegmentsAllocated) {
-               DMA_Segment_t *newSegment;
-               size_t oldSize =
-                   region->numSegmentsAllocated * sizeof(*newSegment);
-               int newAlloc = region->numSegmentsAllocated + 4;
-               size_t newSize = newAlloc * sizeof(*newSegment);
-
-               newSegment = kmalloc(newSize, GFP_KERNEL);
-               if (newSegment == NULL) {
-                       return -ENOMEM;
-               }
-               memcpy(newSegment, region->segment, oldSize);
-               memset(&((uint8_t *) newSegment)[oldSize], 0,
-                      newSize - oldSize);
-               kfree(region->segment);
-
-               region->numSegmentsAllocated = newAlloc;
-               region->segment = newSegment;
-       }
-
-       segment = &region->segment[region->numSegmentsUsed];
-       region->numSegmentsUsed++;
-
-       segment->virtAddr = virtAddr;
-       segment->physAddr = physAddr;
-       segment->numBytes = numBytes;
-
-       DMA_MAP_PRINT("returning success\n");
-
-       return 0;
-}
-
-/****************************************************************************/
-/**
-*   Adds a region of memory to a memory map. Each region is virtually
-*   contiguous, but not necessarily physically contiguous.
-*
-*   @return     0 on success, error code otherwise.
-*/
-/****************************************************************************/
-
-int dma_map_add_region(DMA_MemMap_t *memMap,   /* Stores state information about the map */
-                      void *mem,       /* Virtual address that we want to get a map of */
-                      size_t numBytes  /* Number of bytes being mapped */
-    ) {
-       unsigned long addr = (unsigned long)mem;
-       unsigned int offset;
-       int rc = 0;
-       DMA_Region_t *region;
-       dma_addr_t physAddr;
-
-       down(&memMap->lock);
-
-       DMA_MAP_PRINT("memMap:%p va:%p #:%d\n", memMap, mem, numBytes);
-
-       if (!memMap->inUse) {
-               printk(KERN_ERR "%s: Make sure you call dma_map_start first\n",
-                      __func__);
-               rc = -EINVAL;
-               goto out;
-       }
-
-       /* Reallocate to hold more regions. */
-
-       if (memMap->numRegionsUsed >= memMap->numRegionsAllocated) {
-               DMA_Region_t *newRegion;
-               size_t oldSize =
-                   memMap->numRegionsAllocated * sizeof(*newRegion);
-               int newAlloc = memMap->numRegionsAllocated + 4;
-               size_t newSize = newAlloc * sizeof(*newRegion);
-
-               newRegion = kmalloc(newSize, GFP_KERNEL);
-               if (newRegion == NULL) {
-                       rc = -ENOMEM;
-                       goto out;
-               }
-               memcpy(newRegion, memMap->region, oldSize);
-               memset(&((uint8_t *) newRegion)[oldSize], 0, newSize - oldSize);
-
-               kfree(memMap->region);
-
-               memMap->numRegionsAllocated = newAlloc;
-               memMap->region = newRegion;
-       }
-
-       region = &memMap->region[memMap->numRegionsUsed];
-       memMap->numRegionsUsed++;
-
-       offset = addr & ~PAGE_MASK;
-
-       region->memType = dma_mem_type(mem);
-       region->virtAddr = mem;
-       region->numBytes = numBytes;
-       region->numSegmentsUsed = 0;
-       region->numLockedPages = 0;
-       region->lockedPages = NULL;
-
-       switch (region->memType) {
-       case DMA_MEM_TYPE_VMALLOC:
-               {
-                       atomic_inc(&gDmaStatMemTypeVmalloc);
-
-                       /* printk(KERN_ERR "%s: vmalloc'd pages are not supported\n", __func__); */
-
-                       /* vmalloc'd pages are not physically contiguous */
-
-                       rc = -EINVAL;
-                       break;
-               }
-
-       case DMA_MEM_TYPE_KMALLOC:
-               {
-                       atomic_inc(&gDmaStatMemTypeKmalloc);
-
-                       /* kmalloc'd pages are physically contiguous, so they'll have exactly */
-                       /* one segment */
-
-#if ALLOW_MAP_OF_KMALLOC_MEMORY
-                       physAddr =
-                           dma_map_single(NULL, mem, numBytes, memMap->dir);
-                       rc = dma_map_add_segment(memMap, region, mem, physAddr,
-                                                numBytes);
-#else
-                       rc = -EINVAL;
-#endif
-                       break;
-               }
-
-       case DMA_MEM_TYPE_DMA:
-               {
-                       /* dma_alloc_xxx pages are physically contiguous */
-
-                       atomic_inc(&gDmaStatMemTypeCoherent);
-
-                       physAddr = (vmalloc_to_pfn(mem) << PAGE_SHIFT) + offset;
-
-                       dma_sync_single_for_cpu(NULL, physAddr, numBytes,
-                                               memMap->dir);
-                       rc = dma_map_add_segment(memMap, region, mem, physAddr,
-                                                numBytes);
-                       break;
-               }
-
-       case DMA_MEM_TYPE_USER:
-               {
-                       size_t firstPageOffset;
-                       size_t firstPageSize;
-                       struct page **pages;
-                       struct task_struct *userTask;
-
-                       atomic_inc(&gDmaStatMemTypeUser);
-
-#if 1
-                       /* If the pages are user pages, then the dma_mem_map_set_user_task function */
-                       /* must have been previously called. */
-
-                       if (memMap->userTask == NULL) {
-                               printk(KERN_ERR
-                                      "%s: must call dma_mem_map_set_user_task when using user-mode memory\n",
-                                      __func__);
-                               return -EINVAL;
-                       }
-
-                       /* User pages need to be locked. */
-
-                       firstPageOffset =
-                           (unsigned long)region->virtAddr & (PAGE_SIZE - 1);
-                       firstPageSize = PAGE_SIZE - firstPageOffset;
-
-                       region->numLockedPages = (firstPageOffset
-                                                 + region->numBytes +
-                                                 PAGE_SIZE - 1) / PAGE_SIZE;
-                       pages =
-                           kmalloc(region->numLockedPages *
-                                   sizeof(struct page *), GFP_KERNEL);
-
-                       if (pages == NULL) {
-                               region->numLockedPages = 0;
-                               return -ENOMEM;
-                       }
-
-                       userTask = memMap->userTask;
-
-                       down_read(&userTask->mm->mmap_sem);
-                       rc = get_user_pages(userTask,   /* task */
-                                           userTask->mm,       /* mm */
-                                           (unsigned long)region->virtAddr,    /* start */
-                                           region->numLockedPages,     /* len */
-                                           memMap->dir == DMA_FROM_DEVICE,     /* write */
-                                           0,  /* force */
-                                           pages,      /* pages (array of pointers to page) */
-                                           NULL);      /* vmas */
-                       up_read(&userTask->mm->mmap_sem);
-
-                       if (rc != region->numLockedPages) {
-                               kfree(pages);
-                               region->numLockedPages = 0;
-
-                               if (rc >= 0) {
-                                       rc = -EINVAL;
-                               }
-                       } else {
-                               uint8_t *virtAddr = region->virtAddr;
-                               size_t bytesRemaining;
-                               int pageIdx;
-
-                               rc = 0; /* Since get_user_pages returns +ve number */
-
-                               region->lockedPages = pages;
-
-                               /* We've locked the user pages. Now we need to walk them and figure */
-                               /* out the physical addresses. */
-
-                               /* The first page may be partial */
-
-                               dma_map_add_segment(memMap,
-                                                   region,
-                                                   virtAddr,
-                                                   PFN_PHYS(page_to_pfn
-                                                            (pages[0])) +
-                                                   firstPageOffset,
-                                                   firstPageSize);
-
-                               virtAddr += firstPageSize;
-                               bytesRemaining =
-                                   region->numBytes - firstPageSize;
-
-                               for (pageIdx = 1;
-                                    pageIdx < region->numLockedPages;
-                                    pageIdx++) {
-                                       size_t bytesThisPage =
-                                           (bytesRemaining >
-                                            PAGE_SIZE ? PAGE_SIZE :
-                                            bytesRemaining);
-
-                                       DMA_MAP_PRINT
-                                           ("pageIdx:%d pages[pageIdx]=%p pfn=%u phys=%u\n",
-                                            pageIdx, pages[pageIdx],
-                                            page_to_pfn(pages[pageIdx]),
-                                            PFN_PHYS(page_to_pfn
-                                                     (pages[pageIdx])));
-
-                                       dma_map_add_segment(memMap,
-                                                           region,
-                                                           virtAddr,
-                                                           PFN_PHYS(page_to_pfn
-                                                                    (pages
-                                                                     [pageIdx])),
-                                                           bytesThisPage);
-
-                                       virtAddr += bytesThisPage;
-                                       bytesRemaining -= bytesThisPage;
-                               }
-                       }
-#else
-                       printk(KERN_ERR
-                              "%s: User mode pages are not yet supported\n",
-                              __func__);
-
-                       /* user pages are not physically contiguous */
-
-                       rc = -EINVAL;
-#endif
-                       break;
-               }
-
-       default:
-               {
-                       printk(KERN_ERR "%s: Unsupported memory type: %d\n",
-                              __func__, region->memType);
-
-                       rc = -EINVAL;
-                       break;
-               }
-       }
-
-       if (rc != 0) {
-               memMap->numRegionsUsed--;
-       }
-
-out:
-
-       DMA_MAP_PRINT("returning %d\n", rc);
-
-       up(&memMap->lock);
-
-       return rc;
-}
-
-EXPORT_SYMBOL(dma_map_add_segment);
-
-/****************************************************************************/
-/**
-*   Maps in a memory region such that it can be used for performing a DMA.
-*
-*   @return     0 on success, error code otherwise.
-*/
-/****************************************************************************/
-
-int dma_map_mem(DMA_MemMap_t *memMap,  /* Stores state information about the map */
-               void *mem,      /* Virtual address that we want to get a map of */
-               size_t numBytes,        /* Number of bytes being mapped */
-               enum dma_data_direction dir     /* Direction that the mapping will be going */
-    ) {
-       int rc;
-
-       rc = dma_map_start(memMap, dir);
-       if (rc == 0) {
-               rc = dma_map_add_region(memMap, mem, numBytes);
-               if (rc < 0) {
-                       /* Since the add fails, this function will fail, and the caller won't */
-                       /* call unmap, so we need to do it here. */
-
-                       dma_unmap(memMap, 0);
-               }
-       }
-
-       return rc;
-}
-
-EXPORT_SYMBOL(dma_map_mem);
-
-/****************************************************************************/
-/**
-*   Setup a descriptor ring for a given memory map.
-*
-*   It is assumed that the descriptor ring has already been initialized, and
-*   this routine will only reallocate a new descriptor ring if the existing
-*   one is too small.
-*
-*   @return     0 on success, error code otherwise.
-*/
-/****************************************************************************/
-
-int dma_map_create_descriptor_ring(DMA_Device_t dev,   /* DMA device (where the ring is stored) */
-                                  DMA_MemMap_t *memMap,        /* Memory map that will be used */
-                                  dma_addr_t devPhysAddr       /* Physical address of device */
-    ) {
-       int rc;
-       int numDescriptors;
-       DMA_DeviceAttribute_t *devAttr;
-       DMA_Region_t *region;
-       DMA_Segment_t *segment;
-       dma_addr_t srcPhysAddr;
-       dma_addr_t dstPhysAddr;
-       int regionIdx;
-       int segmentIdx;
-
-       devAttr = &DMA_gDeviceAttribute[dev];
-
-       down(&memMap->lock);
-
-       /* Figure out how many descriptors we need */
-
-       numDescriptors = 0;
-       for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
-               region = &memMap->region[regionIdx];
-
-               for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
-                    segmentIdx++) {
-                       segment = &region->segment[segmentIdx];
-
-                       if (memMap->dir == DMA_TO_DEVICE) {
-                               srcPhysAddr = segment->physAddr;
-                               dstPhysAddr = devPhysAddr;
-                       } else {
-                               srcPhysAddr = devPhysAddr;
-                               dstPhysAddr = segment->physAddr;
-                       }
-
-                       rc =
-                            dma_calculate_descriptor_count(dev, srcPhysAddr,
-                                                           dstPhysAddr,
-                                                           segment->
-                                                           numBytes);
-                       if (rc < 0) {
-                               printk(KERN_ERR
-                                      "%s: dma_calculate_descriptor_count failed: %d\n",
-                                      __func__, rc);
-                               goto out;
-                       }
-                       numDescriptors += rc;
-               }
-       }
-
-       /* Adjust the size of the ring, if it isn't big enough */
-
-       if (numDescriptors > devAttr->ring.descriptorsAllocated) {
-               dma_free_descriptor_ring(&devAttr->ring);
-               rc =
-                    dma_alloc_descriptor_ring(&devAttr->ring,
-                                              numDescriptors);
-               if (rc < 0) {
-                       printk(KERN_ERR
-                              "%s: dma_alloc_descriptor_ring failed: %d\n",
-                              __func__, rc);
-                       goto out;
-               }
-       } else {
-               rc =
-                    dma_init_descriptor_ring(&devAttr->ring,
-                                             numDescriptors);
-               if (rc < 0) {
-                       printk(KERN_ERR
-                              "%s: dma_init_descriptor_ring failed: %d\n",
-                              __func__, rc);
-                       goto out;
-               }
-       }
-
-       /* Populate the descriptors */
-
-       for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
-               region = &memMap->region[regionIdx];
-
-               for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
-                    segmentIdx++) {
-                       segment = &region->segment[segmentIdx];
-
-                       if (memMap->dir == DMA_TO_DEVICE) {
-                               srcPhysAddr = segment->physAddr;
-                               dstPhysAddr = devPhysAddr;
-                       } else {
-                               srcPhysAddr = devPhysAddr;
-                               dstPhysAddr = segment->physAddr;
-                       }
-
-                       rc =
-                            dma_add_descriptors(&devAttr->ring, dev,
-                                                srcPhysAddr, dstPhysAddr,
-                                                segment->numBytes);
-                       if (rc < 0) {
-                               printk(KERN_ERR
-                                      "%s: dma_add_descriptors failed: %d\n",
-                                      __func__, rc);
-                               goto out;
-                       }
-               }
-       }
-
-       rc = 0;
-
-out:
-
-       up(&memMap->lock);
-       return rc;
-}
-
-EXPORT_SYMBOL(dma_map_create_descriptor_ring);
-
-/****************************************************************************/
-/**
-*   Maps in a memory region such that it can be used for performing a DMA.
-*
-*   @return
-*/
-/****************************************************************************/
-
-int dma_unmap(DMA_MemMap_t *memMap,    /* Stores state information about the map */
-             int dirtied       /* non-zero if any of the pages were modified */
-    ) {
-
-       int rc = 0;
-       int regionIdx;
-       int segmentIdx;
-       DMA_Region_t *region;
-       DMA_Segment_t *segment;
-
-       down(&memMap->lock);
-
-       for (regionIdx = 0; regionIdx < memMap->numRegionsUsed; regionIdx++) {
-               region = &memMap->region[regionIdx];
-
-               for (segmentIdx = 0; segmentIdx < region->numSegmentsUsed;
-                    segmentIdx++) {
-                       segment = &region->segment[segmentIdx];
-
-                       switch (region->memType) {
-                       case DMA_MEM_TYPE_VMALLOC:
-                               {
-                                       printk(KERN_ERR
-                                              "%s: vmalloc'd pages are not yet supported\n",
-                                              __func__);
-                                       rc = -EINVAL;
-                                       goto out;
-                               }
-
-                       case DMA_MEM_TYPE_KMALLOC:
-                               {
-#if ALLOW_MAP_OF_KMALLOC_MEMORY
-                                       dma_unmap_single(NULL,
-                                                        segment->physAddr,
-                                                        segment->numBytes,
-                                                        memMap->dir);
-#endif
-                                       break;
-                               }
-
-                       case DMA_MEM_TYPE_DMA:
-                               {
-                                       dma_sync_single_for_cpu(NULL,
-                                                               segment->
-                                                               physAddr,
-                                                               segment->
-                                                               numBytes,
-                                                               memMap->dir);
-                                       break;
-                               }
-
-                       case DMA_MEM_TYPE_USER:
-                               {
-                                       /* Nothing to do here. */
-
-                                       break;
-                               }
-
-                       default:
-                               {
-                                       printk(KERN_ERR
-                                              "%s: Unsupported memory type: %d\n",
-                                              __func__, region->memType);
-                                       rc = -EINVAL;
-                                       goto out;
-                               }
-                       }
-
-                       segment->virtAddr = NULL;
-                       segment->physAddr = 0;
-                       segment->numBytes = 0;
-               }
-
-               if (region->numLockedPages > 0) {
-                       int pageIdx;
-
-                       /* Some user pages were locked. We need to go and unlock them now. */
-
-                       for (pageIdx = 0; pageIdx < region->numLockedPages;
-                            pageIdx++) {
-                               struct page *page =
-                                   region->lockedPages[pageIdx];
-
-                               if (memMap->dir == DMA_FROM_DEVICE) {
-                                       SetPageDirty(page);
-                               }
-                               page_cache_release(page);
-                       }
-                       kfree(region->lockedPages);
-                       region->numLockedPages = 0;
-                       region->lockedPages = NULL;
-               }
-
-               region->memType = DMA_MEM_TYPE_NONE;
-               region->virtAddr = NULL;
-               region->numBytes = 0;
-               region->numSegmentsUsed = 0;
-       }
-       memMap->userTask = NULL;
-       memMap->numRegionsUsed = 0;
-       memMap->inUse = 0;
-
-out:
-       up(&memMap->lock);
-
-       return rc;
-}
-
-EXPORT_SYMBOL(dma_unmap);
index 1f2c5319c05656294170621f0653778115441e25..72543781207b2f2a70e5bfbe5d84f44bec3239a7 100644 (file)
 /* ---- Include Files ---------------------------------------------------- */
 
 #include <linux/kernel.h>
-#include <linux/wait.h>
 #include <linux/semaphore.h>
 #include <csp/dmacHw.h>
 #include <mach/timer.h>
-#include <linux/scatterlist.h>
-#include <linux/dma-mapping.h>
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
 
 /* ---- Constants and Types ---------------------------------------------- */
 
@@ -111,78 +105,6 @@ typedef struct {
 
 } DMA_DescriptorRing_t;
 
-/****************************************************************************
-*
-*   The DMA_MemType_t and DMA_MemMap_t are helper structures used to setup
-*   DMA chains from a variety of memory sources.
-*
-*****************************************************************************/
-
-#define DMA_MEM_MAP_MIN_SIZE    4096   /* Pages less than this size are better */
-                                       /* off not being DMA'd. */
-
-typedef enum {
-       DMA_MEM_TYPE_NONE,      /* Not a valid setting */
-       DMA_MEM_TYPE_VMALLOC,   /* Memory came from vmalloc call */
-       DMA_MEM_TYPE_KMALLOC,   /* Memory came from kmalloc call */
-       DMA_MEM_TYPE_DMA,       /* Memory came from dma_alloc_xxx call */
-       DMA_MEM_TYPE_USER,      /* Memory came from user space. */
-
-} DMA_MemType_t;
-
-/* A segment represents a physically and virtually contiguous chunk of memory. */
-/* i.e. each segment can be DMA'd */
-/* A user of the DMA code will add memory regions. Each region may need to be */
-/* represented by one or more segments. */
-
-typedef struct {
-       void *virtAddr;         /* Virtual address used for this segment */
-       dma_addr_t physAddr;    /* Physical address this segment maps to */
-       size_t numBytes;        /* Size of the segment, in bytes */
-
-} DMA_Segment_t;
-
-/* A region represents a virtually contiguous chunk of memory, which may be */
-/* made up of multiple segments. */
-
-typedef struct {
-       DMA_MemType_t memType;
-       void *virtAddr;
-       size_t numBytes;
-
-       /* Each region (virtually contiguous) consists of one or more segments. Each */
-       /* segment is virtually and physically contiguous. */
-
-       int numSegmentsUsed;
-       int numSegmentsAllocated;
-       DMA_Segment_t *segment;
-
-       /* When a region corresponds to user memory, we need to lock all of the pages */
-       /* down before we can figure out the physical addresses. The lockedPage array contains */
-       /* the pages that were locked, and which subsequently need to be unlocked once the */
-       /* memory is unmapped. */
-
-       unsigned numLockedPages;
-       struct page **lockedPages;
-
-} DMA_Region_t;
-
-typedef struct {
-       int inUse;              /* Is this mapping currently being used? */
-       struct semaphore lock;  /* Acquired when using this structure */
-       enum dma_data_direction dir;    /* Direction this transfer is intended for */
-
-       /* In the event that we're mapping user memory, we need to know which task */
-       /* the memory is for, so that we can obtain the correct mm locks. */
-
-       struct task_struct *userTask;
-
-       int numRegionsUsed;
-       int numRegionsAllocated;
-       DMA_Region_t *region;
-
-} DMA_MemMap_t;
-
 /****************************************************************************
 *
 *   The DMA_DeviceAttribute_t contains information which describes a
@@ -568,124 +490,6 @@ int dma_alloc_double_dst_descriptors(DMA_Handle_t handle, /* DMA Handle */
                                     size_t numBytes    /* Number of bytes in each destination buffer */
     );
 
-/****************************************************************************/
-/**
-*   Initializes a DMA_MemMap_t data structure
-*/
-/****************************************************************************/
-
-int dma_init_mem_map(DMA_MemMap_t *memMap      /* Stores state information about the map */
-    );
-
-/****************************************************************************/
-/**
-*   Releases any memory currently being held by a memory mapping structure.
-*/
-/****************************************************************************/
-
-int dma_term_mem_map(DMA_MemMap_t *memMap      /* Stores state information about the map */
-    );
-
-/****************************************************************************/
-/**
-*   Looks at a memory address and categorizes it.
-*
-*   @return One of the values from the DMA_MemType_t enumeration.
-*/
-/****************************************************************************/
-
-DMA_MemType_t dma_mem_type(void *addr);
-
-/****************************************************************************/
-/**
-*   Sets the process (aka userTask) associated with a mem map. This is
-*   required if user-mode segments will be added to the mapping.
-*/
-/****************************************************************************/
-
-static inline void dma_mem_map_set_user_task(DMA_MemMap_t *memMap,
-                                            struct task_struct *task)
-{
-       memMap->userTask = task;
-}
-
-/****************************************************************************/
-/**
-*   Looks at a memory address and determines if we support DMA'ing to/from
-*   that type of memory.
-*
-*   @return boolean -
-*               return value != 0 means dma supported
-*               return value == 0 means dma not supported
-*/
-/****************************************************************************/
-
-int dma_mem_supports_dma(void *addr);
-
-/****************************************************************************/
-/**
-*   Initializes a memory map for use. Since this function acquires a
-*   sempaphore within the memory map, it is VERY important that dma_unmap
-*   be called when you're finished using the map.
-*/
-/****************************************************************************/
-
-int dma_map_start(DMA_MemMap_t *memMap,        /* Stores state information about the map */
-                 enum dma_data_direction dir   /* Direction that the mapping will be going */
-    );
-
-/****************************************************************************/
-/**
-*   Adds a segment of memory to a memory map.
-*
-*   @return     0 on success, error code otherwise.
-*/
-/****************************************************************************/
-
-int dma_map_add_region(DMA_MemMap_t *memMap,   /* Stores state information about the map */
-                      void *mem,       /* Virtual address that we want to get a map of */
-                      size_t numBytes  /* Number of bytes being mapped */
-    );
-
-/****************************************************************************/
-/**
-*   Creates a descriptor ring from a memory mapping.
-*
-*   @return 0 on success, error code otherwise.
-*/
-/****************************************************************************/
-
-int dma_map_create_descriptor_ring(DMA_Device_t dev,   /* DMA device (where the ring is stored) */
-                                  DMA_MemMap_t *memMap,        /* Memory map that will be used */
-                                  dma_addr_t devPhysAddr       /* Physical address of device */
-    );
-
-/****************************************************************************/
-/**
-*   Maps in a memory region such that it can be used for performing a DMA.
-*
-*   @return
-*/
-/****************************************************************************/
-
-int dma_map_mem(DMA_MemMap_t *memMap,  /* Stores state information about the map */
-               void *addr,     /* Virtual address that we want to get a map of */
-               size_t count,   /* Number of bytes being mapped */
-               enum dma_data_direction dir     /* Direction that the mapping will be going */
-    );
-
-/****************************************************************************/
-/**
-*   Maps in a memory region such that it can be used for performing a DMA.
-*
-*   @return
-*/
-/****************************************************************************/
-
-int dma_unmap(DMA_MemMap_t *memMap,    /* Stores state information about the map */
-             int dirtied       /* non-zero if any of the pages were modified */
-    );
-
 /****************************************************************************/
 /**
 *   Initiates a transfer when the descriptors have already been setup.
index 6b22b543a83f0f6f393a2fbf9e23324c2bc7b9d1..d5088900af6c89a41eae892fb80be4d88fc8a73d 100644 (file)
@@ -44,7 +44,7 @@
 #include <mach/aemif.h>
 #include <mach/spi.h>
 
-#define DA850_EVM_PHY_ID               "0:00"
+#define DA850_EVM_PHY_ID               "davinci_mdio-0:00"
 #define DA850_LCD_PWR_PIN              GPIO_TO_PIN(2, 8)
 #define DA850_LCD_BL_PIN               GPIO_TO_PIN(2, 15)
 
index 346e1de2f5a857ac16fb9c2ecb5b85af9c4537d9..849311d3cb7c184f59adb7df85fe36876b4e3cb3 100644 (file)
@@ -54,7 +54,7 @@ static inline int have_tvp7002(void)
        return 0;
 }
 
-#define DM365_EVM_PHY_ID               "0:01"
+#define DM365_EVM_PHY_ID               "davinci_mdio-0:01"
 /*
  * A MAX-II CPLD is used for various board control functions.
  */
index a64b49cfedcad5f495ac2e360751e045036ca2cd..1247ecdcf752d57c7d6aa476a699b34c158a4324 100644 (file)
@@ -40,7 +40,7 @@
 #include <mach/usb.h>
 #include <mach/aemif.h>
 
-#define DM644X_EVM_PHY_ID              "0:01"
+#define DM644X_EVM_PHY_ID              "davinci_mdio-0:01"
 #define LXT971_PHY_ID  (0x001378e2)
 #define LXT971_PHY_MASK        (0xfffffff0)
 
index 64017558860bd0c127a9c5178d8aacd47229a62f..872ac69fa0490af84501ac13d73a792bf5f6aea7 100644 (file)
@@ -736,7 +736,7 @@ static struct davinci_uart_config uart_config __initdata = {
        .enabled_uarts = (1 << 0),
 };
 
-#define DM646X_EVM_PHY_ID              "0:01"
+#define DM646X_EVM_PHY_ID              "davinci_mdio-0:01"
 /*
  * The following EDMA channels/slots are not being used by drivers (for
  * example: Timer, GPIO, UART events etc) on dm646x, hence they are being
index 6c4a16415d476f58fbdadb88308cb803a9019956..8d34f513d41507b7bf2562739d8f824cc1e40c35 100644 (file)
@@ -39,7 +39,7 @@
 #include <mach/mmc.h>
 #include <mach/usb.h>
 
-#define NEUROS_OSD2_PHY_ID             "0:01"
+#define NEUROS_OSD2_PHY_ID             "davinci_mdio-0:01"
 #define LXT971_PHY_ID                  0x001378e2
 #define LXT971_PHY_MASK                        0xfffffff0
 
index e7c0c7c534937132929cf4cdd760fb4c63f4118b..45e815760a27fd0cfc0f2965590eb90a1779b240 100644 (file)
@@ -21,7 +21,7 @@
 #include <mach/da8xx.h>
 #include <mach/mux.h>
 
-#define HAWKBOARD_PHY_ID               "0:07"
+#define HAWKBOARD_PHY_ID               "davinci_mdio-0:07"
 #define DA850_HAWK_MMCSD_CD_PIN                GPIO_TO_PIN(3, 12)
 #define DA850_HAWK_MMCSD_WP_PIN                GPIO_TO_PIN(3, 13)
 
index 0b136a831c59563100312646d0b1b1324e8c4e20..31da3c5b2ba37f4934c09fa50bc2aa1cad775631 100644 (file)
@@ -42,7 +42,7 @@
 #include <mach/mux.h>
 #include <mach/usb.h>
 
-#define SFFSDR_PHY_ID          "0:01"
+#define SFFSDR_PHY_ID          "davinci_mdio-0:01"
 static struct mtd_partition davinci_sffsdr_nandflash_partition[] = {
        /* U-Boot Environment: Block 0
         * UBL:                Block 1
index 0ed7fdb64efbf5211a8e5b8fc19bc5cf56fc3b97..992c4c4101856db5f8f503cdd34aa5fbd5196806 100644 (file)
@@ -153,34 +153,6 @@ static struct clk pll1_sysclk3 = {
        .div_reg        = PLLDIV3,
 };
 
-static struct clk pll1_sysclk4 = {
-       .name           = "pll1_sysclk4",
-       .parent         = &pll1_clk,
-       .flags          = CLK_PLL,
-       .div_reg        = PLLDIV4,
-};
-
-static struct clk pll1_sysclk5 = {
-       .name           = "pll1_sysclk5",
-       .parent         = &pll1_clk,
-       .flags          = CLK_PLL,
-       .div_reg        = PLLDIV5,
-};
-
-static struct clk pll1_sysclk6 = {
-       .name           = "pll0_sysclk6",
-       .parent         = &pll0_clk,
-       .flags          = CLK_PLL,
-       .div_reg        = PLLDIV6,
-};
-
-static struct clk pll1_sysclk7 = {
-       .name           = "pll1_sysclk7",
-       .parent         = &pll1_clk,
-       .flags          = CLK_PLL,
-       .div_reg        = PLLDIV7,
-};
-
 static struct clk i2c0_clk = {
        .name           = "i2c0",
        .parent         = &pll0_aux_clk,
@@ -397,10 +369,6 @@ static struct clk_lookup da850_clks[] = {
        CLK(NULL,               "pll1_aux",     &pll1_aux_clk),
        CLK(NULL,               "pll1_sysclk2", &pll1_sysclk2),
        CLK(NULL,               "pll1_sysclk3", &pll1_sysclk3),
-       CLK(NULL,               "pll1_sysclk4", &pll1_sysclk4),
-       CLK(NULL,               "pll1_sysclk5", &pll1_sysclk5),
-       CLK(NULL,               "pll1_sysclk6", &pll1_sysclk6),
-       CLK(NULL,               "pll1_sysclk7", &pll1_sysclk7),
        CLK("i2c_davinci.1",    NULL,           &i2c0_clk),
        CLK(NULL,               "timer0",       &timerp64_0_clk),
        CLK("watchdog",         NULL,           &timerp64_1_clk),
index 46d4d876e6fb853e9cf051ed3f88edc852c8a57d..e82c642fa53cd49ac88afc412b4194319fc3303f 100644 (file)
@@ -37,7 +37,7 @@
  */
 struct ep93xx_dma_data {
        int                             port;
-       enum dma_data_direction         direction;
+       enum dma_transfer_direction     direction;
        const char                      *name;
 };
 
@@ -80,14 +80,14 @@ static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
  * channel supports given DMA direction. Only M2P channels have such
  * limitation, for M2M channels the direction is configurable.
  */
-static inline enum dma_data_direction
+static inline enum dma_transfer_direction
 ep93xx_dma_chan_direction(struct dma_chan *chan)
 {
        if (!ep93xx_dma_chan_is_m2p(chan))
                return DMA_NONE;
 
        /* even channels are for TX, odd for RX */
-       return (chan->chan_id % 2 == 0) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+       return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
 }
 
 #endif /* __ASM_ARCH_DMA_H */
index 3cdeb3647542592a4a62f98e0c6b647da358b0d4..5364d4bfa8bc79efe6d51b27ff361811d98b8227 100644 (file)
@@ -36,6 +36,8 @@ pen:  ldr     r7, [r6]
         * should now contain the SVC stack for this core
         */
        b       secondary_startup
+ENDPROC(exynos4_secondary_startup)
 
+       .align 2
 1:     .long   .
        .long   pen_release
index da70e7e3993740e88f1bb473bfc593d8f809b287..dd1ad55524c97e0d28f270e93e10f00dab7327b8 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/io.h>
 
 #include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
 
 #include <mach/regs-pmu.h>
 
index 2b11e046d3919dc393a2875b528abf34e3664d85..0679b8ad2d1e1a4263521f805d7afd246334a037 100644 (file)
@@ -597,7 +597,8 @@ static struct s3c_fb_pd_win origen_fb_win0 = {
 static struct s3c_fb_platdata origen_lcd_pdata __initdata = {
        .win[0]         = &origen_fb_win0,
        .vidcon0        = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
-       .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
+       .vidcon1        = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
+                               VIDCON1_INV_VCLK,
        .setup_gpio     = exynos4_fimd0_gpio_setup_24bpp,
 };
 
index 60bc45e3e7099045560f71bf4aaf29664341e488..0f2035a1eb6e5f31cf7bdbd16270af5efd4abee4 100644 (file)
@@ -23,8 +23,8 @@
 
 #include <asm/cacheflush.h>
 #include <asm/hardware/gic.h>
+#include <asm/smp_plat.h>
 #include <asm/smp_scu.h>
-#include <asm/unified.h>
 
 #include <mach/hardware.h>
 #include <mach/regs-clock.h>
@@ -137,7 +137,7 @@ int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
        while (time_before(jiffies, timeout)) {
                smp_rmb();
 
-               __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)),
+               __raw_writel(virt_to_phys(exynos4_secondary_startup),
                        CPU1_BOOT_REG);
                gic_raise_softirq(cpumask_of(cpu), 1);
 
@@ -192,6 +192,6 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
         * until it receives a soft interrupt, and then the
         * secondary CPU branches to this address.
         */
-       __raw_writel(BSYM(virt_to_phys(exynos4_secondary_startup)),
+       __raw_writel(virt_to_phys(exynos4_secondary_startup),
                        CPU1_BOOT_REG);
 }
index 804c4a55f8038c75cbf0731168ee6d0d36de004a..8394d512a40227e0d5526fd6cc731137f1d108d6 100644 (file)
@@ -25,7 +25,7 @@
 #include <linux/smp.h>
 
 #include <asm/cacheflush.h>
-#include <asm/unified.h>
+#include <asm/smp_plat.h>
 #include <asm/smp_scu.h>
 #include <asm/hardware/arm_timer.h>
 #include <asm/hardware/timer-sp.h>
@@ -73,10 +73,8 @@ static void __init highbank_map_io(void)
 
 void highbank_set_cpu_jump(int cpu, void *jump_addr)
 {
-#ifdef CONFIG_SMP
        cpu = cpu_logical_map(cpu);
-#endif
-       writel(BSYM(virt_to_phys(jump_addr)), HB_JUMP_TABLE_VIRT(cpu));
+       writel(virt_to_phys(jump_addr), HB_JUMP_TABLE_VIRT(cpu));
        __cpuc_flush_dcache_area(HB_JUMP_TABLE_VIRT(cpu), 16);
        outer_clean_range(HB_JUMP_TABLE_PHYS(cpu),
                          HB_JUMP_TABLE_PHYS(cpu) + 15);
index 0e6de366c6482026157656ad3ea9f8866299ba50..4defb97bbfc866400fe57fe4c5178a778c30a91f 100644 (file)
@@ -22,6 +22,18 @@ config ARCH_MX25
 config MACH_MX27
        bool
 
+config ARCH_MX5
+       bool
+
+config ARCH_MX50
+       bool
+
+config ARCH_MX51
+       bool
+
+config ARCH_MX53
+       bool
+
 config SOC_IMX1
        bool
        select ARCH_MX1
@@ -73,6 +85,31 @@ config SOC_IMX35
        select MXC_AVIC
        select SMP_ON_UP if SMP
 
+config SOC_IMX5
+       select CPU_V7
+       select MXC_TZIC
+       select ARCH_MXC_IOMUX_V3
+       select ARCH_MXC_AUDMUX_V2
+       select ARCH_HAS_CPUFREQ
+       select ARCH_MX5
+       bool
+
+config SOC_IMX50
+       bool
+       select SOC_IMX5
+       select ARCH_MX50
+
+config SOC_IMX51
+       bool
+       select SOC_IMX5
+       select ARCH_MX5
+       select ARCH_MX51
+
+config SOC_IMX53
+       bool
+       select SOC_IMX5
+       select ARCH_MX5
+       select ARCH_MX53
 
 if ARCH_IMX_V4_V5
 
@@ -592,6 +629,207 @@ config MACH_VPR200
          Include support for VPR200 platform. This includes specific
          configurations for the board and its peripherals.
 
+comment "i.MX5 platforms:"
+
+config MACH_MX50_RDP
+       bool "Support MX50 reference design platform"
+       depends on BROKEN
+       select SOC_IMX50
+       select IMX_HAVE_PLATFORM_IMX_I2C
+       select IMX_HAVE_PLATFORM_IMX_UART
+       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+       select IMX_HAVE_PLATFORM_SPI_IMX
+       help
+         Include support for MX50 reference design platform (RDP) board. This
+         includes specific configurations for the board and its peripherals.
+
+comment "i.MX51 machines:"
+
+config MACH_IMX51_DT
+       bool "Support i.MX51 platforms from device tree"
+       select SOC_IMX51
+       select USE_OF
+       select MACH_MX51_BABBAGE
+       help
+         Include support for Freescale i.MX51 based platforms
+         using the device tree for discovery
+
+config MACH_MX51_BABBAGE
+       bool "Support MX51 BABBAGE platforms"
+       select SOC_IMX51
+       select IMX_HAVE_PLATFORM_FSL_USB2_UDC
+       select IMX_HAVE_PLATFORM_IMX2_WDT
+       select IMX_HAVE_PLATFORM_IMX_I2C
+       select IMX_HAVE_PLATFORM_IMX_UART
+       select IMX_HAVE_PLATFORM_MXC_EHCI
+       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+       select IMX_HAVE_PLATFORM_SPI_IMX
+       help
+         Include support for MX51 Babbage platform, also known as MX51EVK in
+         u-boot. This includes specific configurations for the board and its
+         peripherals.
+
+config MACH_MX51_3DS
+       bool "Support MX51PDK (3DS)"
+       select SOC_IMX51
+       select IMX_HAVE_PLATFORM_IMX2_WDT
+       select IMX_HAVE_PLATFORM_IMX_KEYPAD
+       select IMX_HAVE_PLATFORM_IMX_UART
+       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+       select IMX_HAVE_PLATFORM_SPI_IMX
+       select MXC_DEBUG_BOARD
+       help
+         Include support for MX51PDK (3DS) platform. This includes specific
+         configurations for the board and its peripherals.
+
+config MACH_EUKREA_CPUIMX51
+       bool "Support Eukrea CPUIMX51 module"
+       select SOC_IMX51
+       select IMX_HAVE_PLATFORM_FSL_USB2_UDC
+       select IMX_HAVE_PLATFORM_IMX_I2C
+       select IMX_HAVE_PLATFORM_IMX_UART
+       select IMX_HAVE_PLATFORM_MXC_EHCI
+       select IMX_HAVE_PLATFORM_MXC_NAND
+       select IMX_HAVE_PLATFORM_SPI_IMX
+       help
+         Include support for Eukrea CPUIMX51 platform. This includes
+         specific configurations for the module and its peripherals.
+
+choice
+       prompt "Baseboard"
+       depends on MACH_EUKREA_CPUIMX51
+       default MACH_EUKREA_MBIMX51_BASEBOARD
+
+config MACH_EUKREA_MBIMX51_BASEBOARD
+       prompt "Eukrea MBIMX51 development board"
+       bool
+       select IMX_HAVE_PLATFORM_IMX_KEYPAD
+       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+       select LEDS_GPIO_REGISTER
+       help
+         This adds board specific devices that can be found on Eukrea's
+         MBIMX51 evaluation board.
+
+endchoice
+
+config MACH_EUKREA_CPUIMX51SD
+       bool "Support Eukrea CPUIMX51SD module"
+       select SOC_IMX51
+       select IMX_HAVE_PLATFORM_FSL_USB2_UDC
+       select IMX_HAVE_PLATFORM_IMX_I2C
+       select IMX_HAVE_PLATFORM_IMX_UART
+       select IMX_HAVE_PLATFORM_MXC_EHCI
+       select IMX_HAVE_PLATFORM_MXC_NAND
+       select IMX_HAVE_PLATFORM_SPI_IMX
+       help
+         Include support for Eukrea CPUIMX51SD platform. This includes
+         specific configurations for the module and its peripherals.
+
+choice
+       prompt "Baseboard"
+       depends on MACH_EUKREA_CPUIMX51SD
+       default MACH_EUKREA_MBIMXSD51_BASEBOARD
+
+config MACH_EUKREA_MBIMXSD51_BASEBOARD
+       prompt "Eukrea MBIMXSD development board"
+       bool
+       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+       select LEDS_GPIO_REGISTER
+       help
+         This adds board specific devices that can be found on Eukrea's
+         MBIMXSD evaluation board.
+
+endchoice
+
+config MX51_EFIKA_COMMON
+       bool
+       select SOC_IMX51
+       select IMX_HAVE_PLATFORM_IMX_UART
+       select IMX_HAVE_PLATFORM_MXC_EHCI
+       select IMX_HAVE_PLATFORM_PATA_IMX
+       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+       select IMX_HAVE_PLATFORM_SPI_IMX
+       select MXC_ULPI if USB_ULPI
+
+config MACH_MX51_EFIKAMX
+       bool "Support MX51 Genesi Efika MX nettop"
+       select LEDS_GPIO_REGISTER
+       select MX51_EFIKA_COMMON
+       help
+         Include support for Genesi Efika MX nettop. This includes specific
+         configurations for the board and its peripherals.
+
+config MACH_MX51_EFIKASB
+       bool "Support MX51 Genesi Efika Smartbook"
+       select LEDS_GPIO_REGISTER
+       select MX51_EFIKA_COMMON
+       help
+         Include support for Genesi Efika Smartbook. This includes specific
+         configurations for the board and its peripherals.
+
+comment "i.MX53 machines:"
+
+config MACH_IMX53_DT
+       bool "Support i.MX53 platforms from device tree"
+       select SOC_IMX53
+       select USE_OF
+       select MACH_MX53_ARD
+       select MACH_MX53_EVK
+       select MACH_MX53_LOCO
+       select MACH_MX53_SMD
+       help
+         Include support for Freescale i.MX53 based platforms
+         using the device tree for discovery
+
+config MACH_MX53_EVK
+       bool "Support MX53 EVK platforms"
+       select SOC_IMX53
+       select IMX_HAVE_PLATFORM_IMX2_WDT
+       select IMX_HAVE_PLATFORM_IMX_UART
+       select IMX_HAVE_PLATFORM_IMX_I2C
+       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+       select IMX_HAVE_PLATFORM_SPI_IMX
+       select LEDS_GPIO_REGISTER
+       help
+         Include support for MX53 EVK platform. This includes specific
+         configurations for the board and its peripherals.
+
+config MACH_MX53_SMD
+       bool "Support MX53 SMD platforms"
+       select SOC_IMX53
+       select IMX_HAVE_PLATFORM_IMX2_WDT
+       select IMX_HAVE_PLATFORM_IMX_I2C
+       select IMX_HAVE_PLATFORM_IMX_UART
+       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+       help
+         Include support for MX53 SMD platform. This includes specific
+         configurations for the board and its peripherals.
+
+config MACH_MX53_LOCO
+       bool "Support MX53 LOCO platforms"
+       select SOC_IMX53
+       select IMX_HAVE_PLATFORM_IMX2_WDT
+       select IMX_HAVE_PLATFORM_IMX_I2C
+       select IMX_HAVE_PLATFORM_IMX_UART
+       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+       select IMX_HAVE_PLATFORM_GPIO_KEYS
+       select LEDS_GPIO_REGISTER
+       help
+         Include support for MX53 LOCO platform. This includes specific
+         configurations for the board and its peripherals.
+
+config MACH_MX53_ARD
+       bool "Support MX53 ARD platforms"
+       select SOC_IMX53
+       select IMX_HAVE_PLATFORM_IMX2_WDT
+       select IMX_HAVE_PLATFORM_IMX_I2C
+       select IMX_HAVE_PLATFORM_IMX_UART
+       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
+       select IMX_HAVE_PLATFORM_GPIO_KEYS
+       help
+         Include support for MX53 ARD platform. This includes specific
+         configurations for the board and its peripherals.
+
 comment "i.MX6 family:"
 
 config SOC_IMX6Q
index f5920c24f7d7eab486a8a340e4e6b6b785038483..55db9c488f2b408162e4bcca36f44d7cdb444ac0 100644 (file)
@@ -11,6 +11,8 @@ obj-$(CONFIG_SOC_IMX27) += clock-imx27.o mm-imx27.o ehci-imx27.o
 obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clock-imx31.o iomux-imx31.o ehci-imx31.o
 obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clock-imx35.o ehci-imx35.o
 
+obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clock-mx51-mx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
+
 # Support for CMOS sensor interface
 obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o
 
@@ -75,3 +77,22 @@ obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o
 ifeq ($(CONFIG_PM),y)
 obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
 endif
+
+# i.MX5 based machines
+obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o
+obj-$(CONFIG_MACH_MX51_3DS) += mach-mx51_3ds.o
+obj-$(CONFIG_MACH_MX53_EVK) += mach-mx53_evk.o
+obj-$(CONFIG_MACH_MX53_SMD) += mach-mx53_smd.o
+obj-$(CONFIG_MACH_MX53_LOCO) += mach-mx53_loco.o
+obj-$(CONFIG_MACH_MX53_ARD) += mach-mx53_ard.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX51) += mach-cpuimx51.o
+obj-$(CONFIG_MACH_EUKREA_MBIMX51_BASEBOARD) += eukrea_mbimx51-baseboard.o
+obj-$(CONFIG_MACH_EUKREA_CPUIMX51SD) += mach-cpuimx51sd.o
+obj-$(CONFIG_MACH_EUKREA_MBIMXSD51_BASEBOARD) += eukrea_mbimxsd-baseboard.o
+obj-$(CONFIG_MX51_EFIKA_COMMON) += mx51_efika.o
+obj-$(CONFIG_MACH_MX51_EFIKAMX) += mach-mx51_efikamx.o
+obj-$(CONFIG_MACH_MX51_EFIKASB) += mach-mx51_efikasb.o
+obj-$(CONFIG_MACH_MX50_RDP) += mach-mx50_rdp.o
+
+obj-$(CONFIG_MACH_IMX51_DT) += imx51-dt.o
+obj-$(CONFIG_MACH_IMX53_DT) += imx53-dt.o
index 5f4d06af491262a6ac6d75d7bcdeaee74684a16b..6dfdbcc83afd7d4fb9e8245dfceed489e0c6970e 100644 (file)
@@ -22,6 +22,18 @@ zreladdr-$(CONFIG_SOC_IMX35) += 0x80008000
 params_phys-$(CONFIG_SOC_IMX35)        := 0x80000100
 initrd_phys-$(CONFIG_SOC_IMX35)        := 0x80800000
 
+zreladdr-$(CONFIG_SOC_IMX50)   += 0x70008000
+params_phys-$(CONFIG_SOC_IMX50)        := 0x70000100
+initrd_phys-$(CONFIG_SOC_IMX50)        := 0x70800000
+
+zreladdr-$(CONFIG_SOC_IMX51)   += 0x90008000
+params_phys-$(CONFIG_SOC_IMX51)        := 0x90000100
+initrd_phys-$(CONFIG_SOC_IMX51)        := 0x90800000
+
+zreladdr-$(CONFIG_SOC_IMX53)   += 0x70008000
+params_phys-$(CONFIG_SOC_IMX53)        := 0x70000100
+initrd_phys-$(CONFIG_SOC_IMX53)        := 0x70800000
+
 zreladdr-$(CONFIG_SOC_IMX6Q)   += 0x10008000
 params_phys-$(CONFIG_SOC_IMX6Q)        := 0x10000100
 initrd_phys-$(CONFIG_SOC_IMX6Q)        := 0x10800000
index 9273c2a24b540a12646c406ac82dedb6c27ca28b..2d88f8b9a454994bee6841cda4828d81286bac5e 100644 (file)
@@ -814,6 +814,16 @@ DEF_PFD(pll3_pfd_540m, PFD_480, PFD1, &pll3_usb_otg);
 DEF_PFD(pll3_pfd_508m, PFD_480, PFD2, &pll3_usb_otg);
 DEF_PFD(pll3_pfd_454m, PFD_480, PFD3, &pll3_usb_otg);
 
+static unsigned long twd_clk_get_rate(struct clk *clk)
+{
+       return clk_get_rate(clk->parent) / 2;
+}
+
+static struct clk twd_clk = {
+       .parent = &arm_clk,
+       .get_rate = twd_clk_get_rate,
+};
+
 static unsigned long pll2_200m_get_rate(struct clk *clk)
 {
        return clk_get_rate(clk->parent) / 2;
@@ -1894,6 +1904,7 @@ static struct clk_lookup lookups[] = {
        _REGISTER_CLOCK("20ec000.sdma", NULL, sdma_clk),
        _REGISTER_CLOCK("20bc000.wdog", NULL, dummy_clk),
        _REGISTER_CLOCK("20c0000.wdog", NULL, dummy_clk),
+       _REGISTER_CLOCK("smp_twd", NULL, twd_clk),
        _REGISTER_CLOCK(NULL, "ckih", ckih_clk),
        _REGISTER_CLOCK(NULL, "ckil_clk", ckil_clk),
        _REGISTER_CLOCK(NULL, "aips_tz1_clk", aips_tz1_clk),
similarity index 99%
rename from arch/arm/mach-mx5/clock-mx51-mx53.c
rename to arch/arm/mach-imx/clock-mx51-mx53.c
index 4cb276977190e98174c70419a64a63696179ac73..08470504a088915f1cd0eea8e7ca1a3b25a41e32 100644 (file)
@@ -23,7 +23,7 @@
 #include <mach/common.h>
 #include <mach/clock.h>
 
-#include "crm_regs.h"
+#include "crm-regs-imx5.h"
 
 /* External clock values passed-in by the board code */
 static unsigned long external_high_reference, external_low_reference;
index 89c33258639f89a9b075a2f9bf6ca0bf41b7a0e8..4d1aab154400fb43e4e0de2ea38e49c932d278e0 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/mach/time.h>
 #include <asm/memory.h>
 #include <asm/mach/map.h>
+#include <asm/memblock.h>
 #include <mach/common.h>
 #include <mach/iomux-mx3.h>
 #include <mach/3ds_debugboard.h>
@@ -754,10 +755,8 @@ static struct sys_timer mx31_3ds_timer = {
 static void __init mx31_3ds_reserve(void)
 {
        /* reserve MX31_3DS_CAMERA_BUF_SIZE bytes for mx3-camera */
-       mx3_camera_base = memblock_alloc(MX31_3DS_CAMERA_BUF_SIZE,
+       mx3_camera_base = arm_memblock_steal(MX31_3DS_CAMERA_BUF_SIZE,
                                         MX31_3DS_CAMERA_BUF_SIZE);
-       memblock_free(mx3_camera_base, MX31_3DS_CAMERA_BUF_SIZE);
-       memblock_remove(mx3_camera_base, MX31_3DS_CAMERA_BUF_SIZE);
 }
 
 MACHINE_START(MX31_3DS, "Freescale MX31PDK (3DS)")
index b95981dacb2bcef2ff8138b59d32f58d85d3a394..f225262b5c38551cb6ec424e01cb71139f9a551f 100644 (file)
@@ -41,6 +41,7 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
 #include <asm/mach/map.h>
+#include <asm/memblock.h>
 #include <mach/board-mx31moboard.h>
 #include <mach/common.h>
 #include <mach/hardware.h>
@@ -584,10 +585,8 @@ struct sys_timer mx31moboard_timer = {
 static void __init mx31moboard_reserve(void)
 {
        /* reserve 4 MiB for mx3-camera */
-       mx3_camera_base = memblock_alloc(MX3_CAMERA_BUF_SIZE,
+       mx3_camera_base = arm_memblock_steal(MX3_CAMERA_BUF_SIZE,
                        MX3_CAMERA_BUF_SIZE);
-       memblock_free(mx3_camera_base, MX3_CAMERA_BUF_SIZE);
-       memblock_remove(mx3_camera_base, MX3_CAMERA_BUF_SIZE);
 }
 
 MACHINE_START(MX31MOBOARD, "EPFL Mobots mx31moboard")
similarity index 99%
rename from arch/arm/mach-mx5/board-mx53_ard.c
rename to arch/arm/mach-imx/mach-mx53_ard.c
index 5f224f1c3eb638f6076e62b342956c4e2eaebe6d..753f4fc9ec04ae1ddf908a7ba9b124d7f27b4379 100644 (file)
@@ -32,7 +32,6 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
 
-#include "crm_regs.h"
 #include "devices-imx53.h"
 
 #define ARD_ETHERNET_INT_B     IMX_GPIO_NR(2, 31)
@@ -189,8 +188,10 @@ static int weim_cs_config(void)
                return -ENOMEM;
 
        iomuxc_base = ioremap(MX53_IOMUXC_BASE_ADDR, SZ_4K);
-       if (!iomuxc_base)
+       if (!iomuxc_base) {
+               iounmap(weim_base);
                return -ENOMEM;
+       }
 
        /* CS1 timings for LAN9220 */
        writel(0x20001, (weim_base + 0x18));
similarity index 99%
rename from arch/arm/mach-mx5/board-mx53_evk.c
rename to arch/arm/mach-imx/mach-mx53_evk.c
index d6ce137896d6adb69a167323dc9046d1b0b10718..5a72188b9cdb6e3ba8479a7b473c04a77d92080d 100644 (file)
@@ -37,7 +37,6 @@
 #define EVK_ECSPI1_CS1         IMX_GPIO_NR(3, 19)
 #define MX53EVK_LED            IMX_GPIO_NR(7, 7)
 
-#include "crm_regs.h"
 #include "devices-imx53.h"
 
 static iomux_v3_cfg_t mx53_evk_pads[] = {
similarity index 99%
rename from arch/arm/mach-mx5/board-mx53_loco.c
rename to arch/arm/mach-imx/mach-mx53_loco.c
index fd8b524e1c58223c53f3b03bd5533a46dfec302d..37f67cac15a4daad2d803266503f6b2dfa81d4d0 100644 (file)
@@ -32,7 +32,6 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
 
-#include "crm_regs.h"
 #include "devices-imx53.h"
 
 #define MX53_LOCO_POWER                        IMX_GPIO_NR(1, 8)
similarity index 99%
rename from arch/arm/mach-mx5/board-mx53_smd.c
rename to arch/arm/mach-imx/mach-mx53_smd.c
index 22c53c9b18aaa4ac6199068e04f07df535d42cb7..8e972c5c3e138bfebe6244b3da450937d1ede945 100644 (file)
@@ -31,7 +31,6 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
 
-#include "crm_regs.h"
 #include "devices-imx53.h"
 
 #define SMD_FEC_PHY_RST                IMX_GPIO_NR(7, 6)
index d7e151669ed347591e1687de4c628156aa56820d..e48854b9d9907a7c76095b5531c3a9f63eb0db23 100644 (file)
@@ -39,6 +39,7 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/time.h>
 #include <asm/mach/map.h>
+#include <asm/memblock.h>
 #include <mach/common.h>
 #include <mach/hardware.h>
 #include <mach/iomux-mx3.h>
@@ -680,10 +681,8 @@ struct sys_timer pcm037_timer = {
 static void __init pcm037_reserve(void)
 {
        /* reserve 4 MiB for mx3-camera */
-       mx3_camera_base = memblock_alloc(MX3_CAMERA_BUF_SIZE,
+       mx3_camera_base = arm_memblock_steal(MX3_CAMERA_BUF_SIZE,
                        MX3_CAMERA_BUF_SIZE);
-       memblock_free(mx3_camera_base, MX3_CAMERA_BUF_SIZE);
-       memblock_remove(mx3_camera_base, MX3_CAMERA_BUF_SIZE);
 }
 
 MACHINE_START(PCM037, "Phytec Phycore pcm037")
similarity index 58%
rename from arch/arm/mach-mx5/system.c
rename to arch/arm/mach-imx/pm-imx5.c
index 5eebfaad1226d5384509e6cc2eaf982bb5d9c44d..6dc0934480577363a2d49a4a5646d69d6cc62729 100644 (file)
@@ -1,8 +1,6 @@
 /*
- * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- */
-
-/*
+ *  Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
  * The code contained herein is licensed under the GNU General Public
  * License. You may obtain a copy of the GNU General Public License
  * Version 2 or later at the following locations:
  * http://www.opensource.org/licenses/gpl-license.html
  * http://www.gnu.org/copyleft/gpl.html
  */
-#include <linux/platform_device.h>
+#include <linux/suspend.h>
+#include <linux/clk.h>
 #include <linux/io.h>
-#include <mach/hardware.h>
+#include <linux/err.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
 #include <mach/common.h>
-#include "crm_regs.h"
+#include <mach/hardware.h>
+#include "crm-regs-imx5.h"
+
+static struct clk *gpc_dvfs_clk;
 
-/* set cpu low power mode before WFI instruction. This function is called
-  * mx5 because it can be used for mx50, mx51, and mx53.*/
+/*
+ * set cpu low power mode before WFI instruction. This function is called
+ * mx5 because it can be used for mx50, mx51, and mx53.
+ */
 void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
 {
        u32 plat_lpc, arm_srpgcr, ccm_clpcr;
@@ -80,3 +86,68 @@ void mx5_cpu_lp_set(enum mxc_cpu_pwr_mode mode)
                __raw_writel(empgc1, MXC_SRPG_EMPGC1_SRPGCR);
        }
 }
+
+static int mx5_suspend_prepare(void)
+{
+       return clk_enable(gpc_dvfs_clk);
+}
+
+static int mx5_suspend_enter(suspend_state_t state)
+{
+       switch (state) {
+       case PM_SUSPEND_MEM:
+               mx5_cpu_lp_set(STOP_POWER_OFF);
+               break;
+       case PM_SUSPEND_STANDBY:
+               mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (state == PM_SUSPEND_MEM) {
+               local_flush_tlb_all();
+               flush_cache_all();
+
+               /*clear the EMPGC0/1 bits */
+               __raw_writel(0, MXC_SRPG_EMPGC0_SRPGCR);
+               __raw_writel(0, MXC_SRPG_EMPGC1_SRPGCR);
+       }
+       cpu_do_idle();
+       return 0;
+}
+
+static void mx5_suspend_finish(void)
+{
+       clk_disable(gpc_dvfs_clk);
+}
+
+static int mx5_pm_valid(suspend_state_t state)
+{
+       return (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX);
+}
+
+static const struct platform_suspend_ops mx5_suspend_ops = {
+       .valid = mx5_pm_valid,
+       .prepare = mx5_suspend_prepare,
+       .enter = mx5_suspend_enter,
+       .finish = mx5_suspend_finish,
+};
+
+static int __init mx5_pm_init(void)
+{
+       if (!cpu_is_mx51() && !cpu_is_mx53())
+               return 0;
+
+       if (gpc_dvfs_clk == NULL)
+               gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
+
+       if (!IS_ERR(gpc_dvfs_clk)) {
+               if (cpu_is_mx51())
+                       suspend_set_ops(&mx5_suspend_ops);
+       } else
+               return -EPERM;
+
+       return 0;
+}
+device_initcall(mx5_pm_init);
index 4bde04f99e38ceda85fd4cb43f89bf2eb080ead7..e15f1555c59b1ebd2712ba8b8c31772c340b06ef 100644 (file)
@@ -15,7 +15,7 @@
 #include <linux/of.h>
 #include <linux/of_address.h>
 #include <linux/smp.h>
-#include <asm/unified.h>
+#include <asm/smp_plat.h>
 
 #define SRC_SCR                                0x000
 #define SRC_GPR1                       0x020
 
 static void __iomem *src_base;
 
-#ifndef CONFIG_SMP
-#define cpu_logical_map(cpu)           0
-#endif
-
 void imx_enable_cpu(int cpu, bool enable)
 {
        u32 mask, val;
@@ -43,7 +39,7 @@ void imx_enable_cpu(int cpu, bool enable)
 void imx_set_cpu_jump(int cpu, void *jump_addr)
 {
        cpu = cpu_logical_map(cpu);
-       writel_relaxed(BSYM(virt_to_phys(jump_addr)),
+       writel_relaxed(virt_to_phys(jump_addr),
                       src_base + SRC_GPR1 + cpu * 8);
 }
 
index 0c631a9f8647f5b8ec35dd5998da65bc937860d0..bcd5af223deabaf48451bf3a4d67cea23c82566e 100644 (file)
@@ -34,6 +34,7 @@ pen:  ldr     r7, [r6]
         * should now contain the SVC stack for this core
         */
        b       secondary_startup
+ENDPROC(msm_secondary_startup)
 
        .align
 1:     .long   .
index 41c252de0215b3c66aaad2c7074d90230b431c19..a446fc14221f6f0394806fb32af411ef2742e85e 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/smp.h>
 
 #include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
 
 extern volatile int pen_release;
 
index 0b3e357c4c8c8166e144a023a059336bddb660d7..db0117ec55f4ad32a10d154d1eb2c41654a73bac 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/cacheflush.h>
 #include <asm/cputype.h>
 #include <asm/mach-types.h>
+#include <asm/smp_plat.h>
 
 #include <mach/msm_iomap.h>
 
index a9103bc6615f01ac526d8b085939fb4f20433ffb..bd66ed04d6dc0edfdc489a56a7f1e9fb84dbafc1 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/debugfs.h>
+#include <linux/module.h>
 #include <linux/string.h>
 #include <mach/vreg.h>
 
diff --git a/arch/arm/mach-mx5/Kconfig b/arch/arm/mach-mx5/Kconfig
deleted file mode 100644 (file)
index af0c212..0000000
+++ /dev/null
@@ -1,244 +0,0 @@
-if ARCH_MX5
-
-# ARCH_MX5/50/53 are left to mark places where prevent multi-soc in single
-# image. So for most time, SOC_IMX50/51/53 should be used.
-
-config ARCH_MX51
-       bool
-
-config ARCH_MX50
-       bool
-
-config ARCH_MX53
-       bool
-
-config SOC_IMX50
-       bool
-       select CPU_V7
-       select ARM_L1_CACHE_SHIFT_6
-       select MXC_TZIC
-       select ARCH_MXC_IOMUX_V3
-       select ARCH_MXC_AUDMUX_V2
-       select ARCH_HAS_CPUFREQ
-       select ARCH_MX50
-
-config SOC_IMX51
-       bool
-       select CPU_V7
-       select ARM_L1_CACHE_SHIFT_6
-       select MXC_TZIC
-       select ARCH_MXC_IOMUX_V3
-       select ARCH_MXC_AUDMUX_V2
-       select ARCH_HAS_CPUFREQ
-       select ARCH_MX51
-
-config SOC_IMX53
-       bool
-       select CPU_V7
-       select ARM_L1_CACHE_SHIFT_6
-       select MXC_TZIC
-       select ARCH_MXC_IOMUX_V3
-       select ARCH_MX53
-
-#comment "i.MX50 machines:"
-
-config MACH_MX50_RDP
-       bool "Support MX50 reference design platform"
-       depends on BROKEN
-       select SOC_IMX50
-       select IMX_HAVE_PLATFORM_IMX_I2C
-       select IMX_HAVE_PLATFORM_IMX_UART
-       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-       select IMX_HAVE_PLATFORM_SPI_IMX
-       help
-         Include support for MX50 reference design platform (RDP) board. This
-         includes specific configurations for the board and its peripherals.
-
-comment "i.MX51 machines:"
-
-config MACH_IMX51_DT
-       bool "Support i.MX51 platforms from device tree"
-       select SOC_IMX51
-       select USE_OF
-       select MACH_MX51_BABBAGE
-       help
-         Include support for Freescale i.MX51 based platforms
-         using the device tree for discovery
-
-config MACH_MX51_BABBAGE
-       bool "Support MX51 BABBAGE platforms"
-       select SOC_IMX51
-       select IMX_HAVE_PLATFORM_FSL_USB2_UDC
-       select IMX_HAVE_PLATFORM_IMX2_WDT
-       select IMX_HAVE_PLATFORM_IMX_I2C
-       select IMX_HAVE_PLATFORM_IMX_UART
-       select IMX_HAVE_PLATFORM_MXC_EHCI
-       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-       select IMX_HAVE_PLATFORM_SPI_IMX
-       help
-         Include support for MX51 Babbage platform, also known as MX51EVK in
-         u-boot. This includes specific configurations for the board and its
-         peripherals.
-
-config MACH_MX51_3DS
-       bool "Support MX51PDK (3DS)"
-       select SOC_IMX51
-       select IMX_HAVE_PLATFORM_IMX2_WDT
-       select IMX_HAVE_PLATFORM_IMX_KEYPAD
-       select IMX_HAVE_PLATFORM_IMX_UART
-       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-       select IMX_HAVE_PLATFORM_SPI_IMX
-       select MXC_DEBUG_BOARD
-       help
-         Include support for MX51PDK (3DS) platform. This includes specific
-         configurations for the board and its peripherals.
-
-config MACH_EUKREA_CPUIMX51
-       bool "Support Eukrea CPUIMX51 module"
-       select SOC_IMX51
-       select IMX_HAVE_PLATFORM_FSL_USB2_UDC
-       select IMX_HAVE_PLATFORM_IMX_I2C
-       select IMX_HAVE_PLATFORM_IMX_UART
-       select IMX_HAVE_PLATFORM_MXC_EHCI
-       select IMX_HAVE_PLATFORM_MXC_NAND
-       select IMX_HAVE_PLATFORM_SPI_IMX
-       help
-         Include support for Eukrea CPUIMX51 platform. This includes
-         specific configurations for the module and its peripherals.
-
-choice
-       prompt "Baseboard"
-       depends on MACH_EUKREA_CPUIMX51
-       default MACH_EUKREA_MBIMX51_BASEBOARD
-
-config MACH_EUKREA_MBIMX51_BASEBOARD
-       prompt "Eukrea MBIMX51 development board"
-       bool
-       select IMX_HAVE_PLATFORM_IMX_KEYPAD
-       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-       select LEDS_GPIO_REGISTER
-       help
-         This adds board specific devices that can be found on Eukrea's
-         MBIMX51 evaluation board.
-
-endchoice
-
-config MACH_EUKREA_CPUIMX51SD
-       bool "Support Eukrea CPUIMX51SD module"
-       select SOC_IMX51
-       select IMX_HAVE_PLATFORM_FSL_USB2_UDC
-       select IMX_HAVE_PLATFORM_IMX_I2C
-       select IMX_HAVE_PLATFORM_IMX_UART
-       select IMX_HAVE_PLATFORM_MXC_EHCI
-       select IMX_HAVE_PLATFORM_MXC_NAND
-       select IMX_HAVE_PLATFORM_SPI_IMX
-       help
-         Include support for Eukrea CPUIMX51SD platform. This includes
-         specific configurations for the module and its peripherals.
-
-choice
-       prompt "Baseboard"
-       depends on MACH_EUKREA_CPUIMX51SD
-       default MACH_EUKREA_MBIMXSD51_BASEBOARD
-
-config MACH_EUKREA_MBIMXSD51_BASEBOARD
-       prompt "Eukrea MBIMXSD development board"
-       bool
-       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-       select LEDS_GPIO_REGISTER
-       help
-         This adds board specific devices that can be found on Eukrea's
-         MBIMXSD evaluation board.
-
-endchoice
-
-config MX51_EFIKA_COMMON
-       bool
-       select SOC_IMX51
-       select IMX_HAVE_PLATFORM_IMX_UART
-       select IMX_HAVE_PLATFORM_MXC_EHCI
-       select IMX_HAVE_PLATFORM_PATA_IMX
-       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-       select IMX_HAVE_PLATFORM_SPI_IMX
-       select MXC_ULPI if USB_ULPI
-
-config MACH_MX51_EFIKAMX
-       bool "Support MX51 Genesi Efika MX nettop"
-       select LEDS_GPIO_REGISTER
-       select MX51_EFIKA_COMMON
-       help
-         Include support for Genesi Efika MX nettop. This includes specific
-         configurations for the board and its peripherals.
-
-config MACH_MX51_EFIKASB
-       bool "Support MX51 Genesi Efika Smartbook"
-       select LEDS_GPIO_REGISTER
-       select MX51_EFIKA_COMMON
-       help
-         Include support for Genesi Efika Smartbook. This includes specific
-         configurations for the board and its peripherals.
-
-comment "i.MX53 machines:"
-
-config MACH_IMX53_DT
-       bool "Support i.MX53 platforms from device tree"
-       select SOC_IMX53
-       select USE_OF
-       select MACH_MX53_ARD
-       select MACH_MX53_EVK
-       select MACH_MX53_LOCO
-       select MACH_MX53_SMD
-       help
-         Include support for Freescale i.MX53 based platforms
-         using the device tree for discovery
-
-config MACH_MX53_EVK
-       bool "Support MX53 EVK platforms"
-       select SOC_IMX53
-       select IMX_HAVE_PLATFORM_IMX2_WDT
-       select IMX_HAVE_PLATFORM_IMX_UART
-       select IMX_HAVE_PLATFORM_IMX_I2C
-       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-       select IMX_HAVE_PLATFORM_SPI_IMX
-       select LEDS_GPIO_REGISTER
-       help
-         Include support for MX53 EVK platform. This includes specific
-         configurations for the board and its peripherals.
-
-config MACH_MX53_SMD
-       bool "Support MX53 SMD platforms"
-       select SOC_IMX53
-       select IMX_HAVE_PLATFORM_IMX2_WDT
-       select IMX_HAVE_PLATFORM_IMX_I2C
-       select IMX_HAVE_PLATFORM_IMX_UART
-       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-       help
-         Include support for MX53 SMD platform. This includes specific
-         configurations for the board and its peripherals.
-
-config MACH_MX53_LOCO
-       bool "Support MX53 LOCO platforms"
-       select SOC_IMX53
-       select IMX_HAVE_PLATFORM_IMX2_WDT
-       select IMX_HAVE_PLATFORM_IMX_I2C
-       select IMX_HAVE_PLATFORM_IMX_UART
-       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-       select IMX_HAVE_PLATFORM_GPIO_KEYS
-       select LEDS_GPIO_REGISTER
-       help
-         Include support for MX53 LOCO platform. This includes specific
-         configurations for the board and its peripherals.
-
-config MACH_MX53_ARD
-       bool "Support MX53 ARD platforms"
-       select SOC_IMX53
-       select IMX_HAVE_PLATFORM_IMX2_WDT
-       select IMX_HAVE_PLATFORM_IMX_I2C
-       select IMX_HAVE_PLATFORM_IMX_UART
-       select IMX_HAVE_PLATFORM_SDHCI_ESDHC_IMX
-       select IMX_HAVE_PLATFORM_GPIO_KEYS
-       help
-         Include support for MX53 ARD platform. This includes specific
-         configurations for the board and its peripherals.
-
-endif
diff --git a/arch/arm/mach-mx5/Makefile b/arch/arm/mach-mx5/Makefile
deleted file mode 100644 (file)
index 0fc6080..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-# Makefile for the linux kernel.
-#
-
-# Object file lists.
-obj-y   := cpu.o mm.o clock-mx51-mx53.o ehci.o system.o
-
-obj-$(CONFIG_PM) += pm-imx5.o
-obj-$(CONFIG_CPU_FREQ_IMX)    += cpu_op-mx51.o
-obj-$(CONFIG_MACH_MX51_BABBAGE) += board-mx51_babbage.o
-obj-$(CONFIG_MACH_MX51_3DS) += board-mx51_3ds.o
-obj-$(CONFIG_MACH_MX53_EVK) += board-mx53_evk.o
-obj-$(CONFIG_MACH_MX53_SMD) += board-mx53_smd.o
-obj-$(CONFIG_MACH_MX53_LOCO) += board-mx53_loco.o
-obj-$(CONFIG_MACH_MX53_ARD) += board-mx53_ard.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX51) += board-cpuimx51.o
-obj-$(CONFIG_MACH_EUKREA_MBIMX51_BASEBOARD) += eukrea_mbimx51-baseboard.o
-obj-$(CONFIG_MACH_EUKREA_CPUIMX51SD) += board-cpuimx51sd.o
-obj-$(CONFIG_MACH_EUKREA_MBIMXSD51_BASEBOARD) += eukrea_mbimxsd-baseboard.o
-obj-$(CONFIG_MX51_EFIKA_COMMON) += mx51_efika.o
-obj-$(CONFIG_MACH_MX51_EFIKAMX) += board-mx51_efikamx.o
-obj-$(CONFIG_MACH_MX51_EFIKASB) += board-mx51_efikasb.o
-obj-$(CONFIG_MACH_MX50_RDP) += board-mx50_rdp.o
-
-obj-$(CONFIG_MACH_IMX51_DT) += imx51-dt.o
-obj-$(CONFIG_MACH_IMX53_DT) += imx53-dt.o
diff --git a/arch/arm/mach-mx5/Makefile.boot b/arch/arm/mach-mx5/Makefile.boot
deleted file mode 100644 (file)
index ca207ca..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
-   zreladdr-$(CONFIG_ARCH_MX50)        += 0x70008000
-params_phys-$(CONFIG_ARCH_MX50)        := 0x70000100
-initrd_phys-$(CONFIG_ARCH_MX50)        := 0x70800000
-   zreladdr-$(CONFIG_ARCH_MX51)        += 0x90008000
-params_phys-$(CONFIG_ARCH_MX51)        := 0x90000100
-initrd_phys-$(CONFIG_ARCH_MX51)        := 0x90800000
-   zreladdr-$(CONFIG_ARCH_MX53)        += 0x70008000
-params_phys-$(CONFIG_ARCH_MX53)        := 0x70000100
-initrd_phys-$(CONFIG_ARCH_MX53)        := 0x70800000
diff --git a/arch/arm/mach-mx5/pm-imx5.c b/arch/arm/mach-mx5/pm-imx5.c
deleted file mode 100644 (file)
index 98052fc..0000000
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- *  Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
- *
- * The code contained herein is licensed under the GNU General Public
- * License. You may obtain a copy of the GNU General Public License
- * Version 2 or later at the following locations:
- *
- * http://www.opensource.org/licenses/gpl-license.html
- * http://www.gnu.org/copyleft/gpl.html
- */
-#include <linux/suspend.h>
-#include <linux/clk.h>
-#include <linux/io.h>
-#include <linux/err.h>
-#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
-#include <mach/common.h>
-#include <mach/hardware.h>
-#include "crm_regs.h"
-
-static struct clk *gpc_dvfs_clk;
-
-static int mx5_suspend_prepare(void)
-{
-       return clk_enable(gpc_dvfs_clk);
-}
-
-static int mx5_suspend_enter(suspend_state_t state)
-{
-       switch (state) {
-       case PM_SUSPEND_MEM:
-               mx5_cpu_lp_set(STOP_POWER_OFF);
-               break;
-       case PM_SUSPEND_STANDBY:
-               mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
-               break;
-       default:
-               return -EINVAL;
-       }
-
-       if (state == PM_SUSPEND_MEM) {
-               local_flush_tlb_all();
-               flush_cache_all();
-
-               /*clear the EMPGC0/1 bits */
-               __raw_writel(0, MXC_SRPG_EMPGC0_SRPGCR);
-               __raw_writel(0, MXC_SRPG_EMPGC1_SRPGCR);
-       }
-       cpu_do_idle();
-       return 0;
-}
-
-static void mx5_suspend_finish(void)
-{
-       clk_disable(gpc_dvfs_clk);
-}
-
-static int mx5_pm_valid(suspend_state_t state)
-{
-       return (state > PM_SUSPEND_ON && state <= PM_SUSPEND_MAX);
-}
-
-static const struct platform_suspend_ops mx5_suspend_ops = {
-       .valid = mx5_pm_valid,
-       .prepare = mx5_suspend_prepare,
-       .enter = mx5_suspend_enter,
-       .finish = mx5_suspend_finish,
-};
-
-static int __init mx5_pm_init(void)
-{
-       if (gpc_dvfs_clk == NULL)
-               gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
-
-       if (!IS_ERR(gpc_dvfs_clk)) {
-               if (cpu_is_mx51())
-                       suspend_set_ops(&mx5_suspend_ops);
-       } else
-               return -EPERM;
-
-       return 0;
-}
-device_initcall(mx5_pm_init);
index 904bd1dfcd2e26b194f24ebe62eee9a377a05016..d965da45160e67c4e03056a7d8c0d72178575042 100644 (file)
@@ -33,7 +33,6 @@ config ARCH_OMAP3
        default y
        select CPU_V7
        select USB_ARCH_HAS_EHCI
-       select ARM_L1_CACHE_SHIFT_6 if !ARCH_OMAP4
        select ARCH_HAS_OPP
        select PM_OPP if PM
        select ARM_CPU_SUSPEND if PM
@@ -214,13 +213,12 @@ config MACH_OMAP3_PANDORA
        depends on ARCH_OMAP3
        default y
        select OMAP_PACKAGE_CBB
-       select REGULATOR_FIXED_VOLTAGE
+       select REGULATOR_FIXED_VOLTAGE if REGULATOR
 
 config MACH_OMAP3_TOUCHBOOK
        bool "OMAP3 Touch Book"
        depends on ARCH_OMAP3
        default y
-       select BACKLIGHT_CLASS_DEVICE
 
 config MACH_OMAP_3430SDP
        bool "OMAP 3430 SDP board"
@@ -266,7 +264,7 @@ config MACH_OMAP_ZOOM2
        select SERIAL_8250
        select SERIAL_CORE_CONSOLE
        select SERIAL_8250_CONSOLE
-       select REGULATOR_FIXED_VOLTAGE
+       select REGULATOR_FIXED_VOLTAGE if REGULATOR
 
 config MACH_OMAP_ZOOM3
        bool "OMAP3630 Zoom3 board"
@@ -276,7 +274,7 @@ config MACH_OMAP_ZOOM3
        select SERIAL_8250
        select SERIAL_CORE_CONSOLE
        select SERIAL_8250_CONSOLE
-       select REGULATOR_FIXED_VOLTAGE
+       select REGULATOR_FIXED_VOLTAGE if REGULATOR
 
 config MACH_CM_T35
        bool "CompuLab CM-T35/CM-T3730 modules"
@@ -335,7 +333,7 @@ config MACH_OMAP_4430SDP
        depends on ARCH_OMAP4
        select OMAP_PACKAGE_CBL
        select OMAP_PACKAGE_CBS
-       select REGULATOR_FIXED_VOLTAGE
+       select REGULATOR_FIXED_VOLTAGE if REGULATOR
 
 config MACH_OMAP4_PANDA
        bool "OMAP4 Panda Board"
@@ -343,7 +341,7 @@ config MACH_OMAP4_PANDA
        depends on ARCH_OMAP4
        select OMAP_PACKAGE_CBL
        select OMAP_PACKAGE_CBS
-       select REGULATOR_FIXED_VOLTAGE
+       select REGULATOR_FIXED_VOLTAGE if REGULATOR
 
 config OMAP3_EMU
        bool "OMAP3 debugging peripherals"
@@ -366,8 +364,8 @@ config OMAP3_SDRC_AC_TIMING
          going on could result in system crashes;
 
 config OMAP4_ERRATA_I688
-       bool "OMAP4 errata: Async Bridge Corruption"
-       depends on ARCH_OMAP4
+       bool "OMAP4 errata: Async Bridge Corruption (BROKEN)"
+       depends on ARCH_OMAP4 && BROKEN
        select ARCH_HAS_BARRIERS
        help
          If a data is stalled inside asynchronous bridge because of back
index 39fba9df17fba3209396cba2d8be2d59553c8ab5..21fc876486601ff13f24220ef78512dc586a2e87 100644 (file)
@@ -52,8 +52,9 @@
 #define ETH_KS8851_QUART               138
 #define OMAP4_SFH7741_SENSOR_OUTPUT_GPIO       184
 #define OMAP4_SFH7741_ENABLE_GPIO              188
-#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */
+#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */
 #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
+#define HDMI_GPIO_HPD  63 /* Hotplug detect */
 #define DISPLAY_SEL_GPIO       59      /* LCD2/PicoDLP switch */
 #define DLP_POWER_ON_GPIO      40
 
@@ -603,8 +604,9 @@ static void __init omap_sfh7741prox_init(void)
 }
 
 static struct gpio sdp4430_hdmi_gpios[] = {
-       { HDMI_GPIO_HPD,        GPIOF_OUT_INIT_HIGH,    "hdmi_gpio_hpd"   },
+       { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" },
        { HDMI_GPIO_LS_OE,      GPIOF_OUT_INIT_HIGH,    "hdmi_gpio_ls_oe" },
+       { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" },
 };
 
 static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
@@ -621,8 +623,7 @@ static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev)
 
 static void sdp4430_panel_disable_hdmi(struct omap_dss_device *dssdev)
 {
-       gpio_free(HDMI_GPIO_LS_OE);
-       gpio_free(HDMI_GPIO_HPD);
+       gpio_free_array(sdp4430_hdmi_gpios, ARRAY_SIZE(sdp4430_hdmi_gpios));
 }
 
 static struct nokia_dsi_panel_data dsi1_panel = {
@@ -738,6 +739,10 @@ static void sdp4430_lcd_init(void)
                pr_err("%s: Could not get lcd2_reset_gpio\n", __func__);
 }
 
+static struct omap_dss_hdmi_data sdp4430_hdmi_data = {
+       .hpd_gpio = HDMI_GPIO_HPD,
+};
+
 static struct omap_dss_device sdp4430_hdmi_device = {
        .name = "hdmi",
        .driver_name = "hdmi_panel",
@@ -745,6 +750,7 @@ static struct omap_dss_device sdp4430_hdmi_device = {
        .platform_enable = sdp4430_panel_enable_hdmi,
        .platform_disable = sdp4430_panel_disable_hdmi,
        .channel = OMAP_DSS_CHANNEL_DIGIT,
+       .data = &sdp4430_hdmi_data,
 };
 
 static struct picodlp_panel_data sdp4430_picodlp_pdata = {
@@ -829,6 +835,10 @@ static void omap_4430sdp_display_init(void)
                omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP);
        else
                omap_hdmi_init(0);
+
+       omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
+       omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
+       omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
 }
 
 #ifdef CONFIG_OMAP_MUX
index 30ad40db2cf39558bdefc6bf40169114919ed6e7..b7779c206a90af0d9747f71a3d6d5f8963a90362 100644 (file)
@@ -51,8 +51,9 @@
 #define GPIO_HUB_NRESET                62
 #define GPIO_WIFI_PMENA                43
 #define GPIO_WIFI_IRQ          53
-#define HDMI_GPIO_HPD 60 /* Hot plug pin for HDMI */
+#define HDMI_GPIO_CT_CP_HPD 60 /* HPD mode enable/disable */
 #define HDMI_GPIO_LS_OE 41 /* Level shifter for HDMI */
+#define HDMI_GPIO_HPD  63 /* Hotplug detect */
 
 /* wl127x BT, FM, GPS connectivity chip */
 static int wl1271_gpios[] = {46, -1, -1};
@@ -413,8 +414,9 @@ int __init omap4_panda_dvi_init(void)
 }
 
 static struct gpio panda_hdmi_gpios[] = {
-       { HDMI_GPIO_HPD,        GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd"   },
+       { HDMI_GPIO_CT_CP_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ct_cp_hpd" },
        { HDMI_GPIO_LS_OE,      GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" },
+       { HDMI_GPIO_HPD, GPIOF_DIR_IN, "hdmi_gpio_hpd" },
 };
 
 static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
@@ -431,10 +433,13 @@ static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev)
 
 static void omap4_panda_panel_disable_hdmi(struct omap_dss_device *dssdev)
 {
-       gpio_free(HDMI_GPIO_LS_OE);
-       gpio_free(HDMI_GPIO_HPD);
+       gpio_free_array(panda_hdmi_gpios, ARRAY_SIZE(panda_hdmi_gpios));
 }
 
+static struct omap_dss_hdmi_data omap4_panda_hdmi_data = {
+       .hpd_gpio = HDMI_GPIO_HPD,
+};
+
 static struct omap_dss_device  omap4_panda_hdmi_device = {
        .name = "hdmi",
        .driver_name = "hdmi_panel",
@@ -442,6 +447,7 @@ static struct omap_dss_device  omap4_panda_hdmi_device = {
        .platform_enable = omap4_panda_panel_enable_hdmi,
        .platform_disable = omap4_panda_panel_disable_hdmi,
        .channel = OMAP_DSS_CHANNEL_DIGIT,
+       .data = &omap4_panda_hdmi_data,
 };
 
 static struct omap_dss_device *omap4_panda_dss_devices[] = {
@@ -473,6 +479,10 @@ void omap4_panda_display_init(void)
                omap_hdmi_init(OMAP_HDMI_SDA_SCL_EXTERNAL_PULLUP);
        else
                omap_hdmi_init(0);
+
+       omap_mux_init_gpio(HDMI_GPIO_LS_OE, OMAP_PIN_OUTPUT);
+       omap_mux_init_gpio(HDMI_GPIO_CT_CP_HPD, OMAP_PIN_OUTPUT);
+       omap_mux_init_gpio(HDMI_GPIO_HPD, OMAP_PIN_INPUT_PULLDOWN);
 }
 
 static void __init omap4_panda_init(void)
index 46dfd1ae8f71a6001f90ecdcea237103f906dcc4..283d11eae693115b42d2bbcca9176f019274960c 100644 (file)
@@ -28,7 +28,6 @@
 #include <plat/board.h>
 #include <plat/mcbsp.h>
 #include <plat/mmc.h>
-#include <plat/iommu.h>
 #include <plat/dma.h>
 #include <plat/omap_hwmod.h>
 #include <plat/omap_device.h>
@@ -128,6 +127,10 @@ static struct platform_device omap2cam_device = {
 };
 #endif
 
+#if defined(CONFIG_IOMMU_API)
+
+#include <plat/iommu.h>
+
 static struct resource omap3isp_resources[] = {
        {
                .start          = OMAP3430_ISP_BASE,
@@ -224,6 +227,15 @@ int omap3_init_camera(struct isp_platform_data *pdata)
        return platform_device_register(&omap3isp_device);
 }
 
+#else /* !CONFIG_IOMMU_API */
+
+int omap3_init_camera(struct isp_platform_data *pdata)
+{
+       return 0;
+}
+
+#endif
+
 static inline void omap_init_camera(void)
 {
 #if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE)
@@ -393,6 +405,7 @@ static int omap_mcspi_init(struct omap_hwmod *oh, void *unused)
                        break;
        default:
                        pr_err("Invalid McSPI Revision value\n");
+                       kfree(pdata);
                        return -EINVAL;
        }
 
index 3c446d1a1781fe3d819b2732ea28e2426c2a1348..3677b1f58b85f32c25e9c4f1e886a0e259ee9102 100644 (file)
@@ -103,12 +103,8 @@ static void omap4_hdmi_mux_pads(enum omap_hdmi_flags flags)
        u32 reg;
        u16 control_i2c_1;
 
-       /* PAD0_HDMI_HPD_PAD1_HDMI_CEC */
-       omap_mux_init_signal("hdmi_hpd",
-                       OMAP_PIN_INPUT_PULLUP);
        omap_mux_init_signal("hdmi_cec",
                        OMAP_PIN_INPUT_PULLUP);
-       /* PAD0_HDMI_DDC_SCL_PAD1_HDMI_DDC_SDA */
        omap_mux_init_signal("hdmi_ddc_scl",
                        OMAP_PIN_INPUT_PULLUP);
        omap_mux_init_signal("hdmi_ddc_sda",
index 130034bf01d5f880541976cf3210de7e74293ce1..dfffbbf4c009624c87375b6322a2a7285cd8a2e7 100644 (file)
@@ -528,7 +528,13 @@ int gpmc_cs_configure(int cs, int cmd, int wval)
 
        case GPMC_CONFIG_DEV_SIZE:
                regval  = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1);
+
+               /* clear 2 target bits */
+               regval &= ~GPMC_CONFIG1_DEVICESIZE(3);
+
+               /* set the proper value */
                regval |= GPMC_CONFIG1_DEVICESIZE(wval);
+
                gpmc_cs_write_reg(cs, GPMC_CS_CONFIG1, regval);
                break;
 
index bd844af13af56106d7fe5511dcaa6d2ebf6f7afa..ad0adb5a1e0eb62b025abc56006d068f92771d32 100644 (file)
@@ -175,14 +175,15 @@ static void hsmmc2_select_input_clk_src(struct omap_mmc_platform_data *mmc)
 {
        u32 reg;
 
-       if (mmc->slots[0].internal_clock) {
-               reg = omap_ctrl_readl(control_devconf1_offset);
+       reg = omap_ctrl_readl(control_devconf1_offset);
+       if (mmc->slots[0].internal_clock)
                reg |= OMAP2_MMCSDIO2ADPCLKISEL;
-               omap_ctrl_writel(reg, control_devconf1_offset);
-       }
+       else
+               reg &= ~OMAP2_MMCSDIO2ADPCLKISEL;
+       omap_ctrl_writel(reg, control_devconf1_offset);
 }
 
-static void hsmmc23_before_set_reg(struct device *dev, int slot,
+static void hsmmc2_before_set_reg(struct device *dev, int slot,
                                   int power_on, int vdd)
 {
        struct omap_mmc_platform_data *mmc = dev->platform_data;
@@ -407,14 +408,13 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
                        c->caps &= ~MMC_CAP_8_BIT_DATA;
                        c->caps |= MMC_CAP_4_BIT_DATA;
                }
-               /* FALLTHROUGH */
-       case 3:
                if (mmc->slots[0].features & HSMMC_HAS_PBIAS) {
                        /* off-chip level shifting, or none */
-                       mmc->slots[0].before_set_reg = hsmmc23_before_set_reg;
+                       mmc->slots[0].before_set_reg = hsmmc2_before_set_reg;
                        mmc->slots[0].after_set_reg = NULL;
                }
                break;
+       case 3:
        case 4:
        case 5:
                mmc->slots[0].before_set_reg = NULL;
index 3f174d51f67fb8e4f41f01ebbbe577c892999449..eb50c29fb6448e1404eef693e8f74c5e10641aed 100644 (file)
@@ -388,7 +388,7 @@ static void __init omap_hwmod_init_postsetup(void)
        omap_pm_if_early_init();
 }
 
-#ifdef CONFIG_ARCH_OMAP2
+#ifdef CONFIG_SOC_OMAP2420
 void __init omap2420_init_early(void)
 {
        omap2_set_globals_242x();
@@ -400,7 +400,9 @@ void __init omap2420_init_early(void)
        omap_hwmod_init_postsetup();
        omap2420_clk_init();
 }
+#endif
 
+#ifdef CONFIG_SOC_OMAP2430
 void __init omap2430_init_early(void)
 {
        omap2_set_globals_243x();
index 69f3c72d959b61e6cefe7c62462fc30f05a0b15f..d8f8ef40290f4dc4c803e69ce87fe5111baed499 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/memblock.h>
 
 #include <asm/cacheflush.h>
+#include <asm/memblock.h>
 
 #include <mach/omap-secure.h>
 
@@ -57,20 +58,10 @@ u32 omap_secure_dispatcher(u32 idx, u32 flag, u32 nargs, u32 arg1, u32 arg2,
 /* Allocate the memory to save secure ram */
 int __init omap_secure_ram_reserve_memblock(void)
 {
-       phys_addr_t paddr;
        u32 size = OMAP_SECURE_RAM_STORAGE;
 
        size = ALIGN(size, SZ_1M);
-       paddr = memblock_alloc(size, SZ_1M);
-       if (!paddr) {
-               pr_err("%s: failed to reserve %x bytes\n",
-                               __func__, size);
-               return -ENOMEM;
-       }
-       memblock_free(paddr, size);
-       memblock_remove(paddr, size);
-
-       omap_secure_memblock_base = paddr;
+       omap_secure_memblock_base = arm_memblock_steal(size, SZ_1M);
 
        return 0;
 }
index bc16c818c6b72e77ae276976c887b7a85647a9df..40a8fbc07e4b766717b35e8b69f88794c1335df7 100644 (file)
@@ -20,6 +20,7 @@
 #include <asm/hardware/gic.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/map.h>
+#include <asm/memblock.h>
 
 #include <plat/irqs.h>
 #include <plat/sram.h>
@@ -61,13 +62,8 @@ static int __init omap_barriers_init(void)
                return -ENODEV;
 
        size = ALIGN(PAGE_SIZE, SZ_1M);
-       paddr = memblock_alloc(size, SZ_1M);
-       if (!paddr) {
-               pr_err("%s: failed to reserve 4 Kbytes\n", __func__);
-               return -ENOMEM;
-       }
-       memblock_free(paddr, size);
-       memblock_remove(paddr, size);
+       paddr = arm_memblock_steal(size, SZ_1M);
+
        dram_io_desc[0].virtual = OMAP4_DRAM_BARRIER_VA;
        dram_io_desc[0].pfn = __phys_to_pfn(paddr);
        dram_io_desc[0].length = size;
index c11273da5dcc33f046e94babfdb6f34c1d6f2778..f08e442af3976d371bda89390c6529fe25839604 100644 (file)
@@ -55,27 +55,6 @@ struct omap_hwmod_class omap2_dss_hwmod_class = {
        .reset  = omap_dss_reset,
 };
 
-/*
- * 'dispc' class
- * display controller
- */
-
-static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
-       .rev_offs       = 0x0000,
-       .sysc_offs      = 0x0010,
-       .syss_offs      = 0x0014,
-       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
-                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
-       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
-                          MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
-       .sysc_fields    = &omap_hwmod_sysc_type1,
-};
-
-struct omap_hwmod_class omap2_dispc_hwmod_class = {
-       .name   = "dispc",
-       .sysc   = &omap2_dispc_sysc,
-};
-
 /*
  * 'rfbi' class
  * remote frame buffer interface
index 177dee20faef1ef79b8bacdcde89a4b8e86f1946..2a6729741b069c2fd7633bbfb14dab1432c471d7 100644 (file)
@@ -28,6 +28,28 @@ struct omap_hwmod_dma_info omap2xxx_dss_sdma_chs[] = {
        { .name = "dispc", .dma_req = 5 },
        { .dma_req = -1 }
 };
+
+/*
+ * 'dispc' class
+ * display controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap2_dispc_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
+                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+                          MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+struct omap_hwmod_class omap2_dispc_hwmod_class = {
+       .name   = "dispc",
+       .sysc   = &omap2_dispc_sysc,
+};
+
 /* OMAP2xxx Timer Common */
 static struct omap_hwmod_class_sysconfig omap2xxx_timer_sysc = {
        .rev_offs       = 0x0000,
index 5324e8d93bc0262d9019db6feb7f81fdc42a8f44..3c8dd928628efd7c01cad5a1d2743c7ddc34a660 100644 (file)
@@ -1480,6 +1480,28 @@ static struct omap_hwmod omap3xxx_dss_core_hwmod = {
        .masters_cnt    = ARRAY_SIZE(omap3xxx_dss_masters),
 };
 
+/*
+ * 'dispc' class
+ * display controller
+ */
+
+static struct omap_hwmod_class_sysconfig omap3_dispc_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .syss_offs      = 0x0014,
+       .sysc_flags     = (SYSC_HAS_SIDLEMODE | SYSC_HAS_MIDLEMODE |
+                          SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+                          SYSC_HAS_ENAWAKEUP),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+                          MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
+       .sysc_fields    = &omap_hwmod_sysc_type1,
+};
+
+static struct omap_hwmod_class omap3_dispc_hwmod_class = {
+       .name   = "dispc",
+       .sysc   = &omap3_dispc_sysc,
+};
+
 /* l4_core -> dss_dispc */
 static struct omap_hwmod_ocp_if omap3xxx_l4_core__dss_dispc = {
        .master         = &omap3xxx_l4_core_hwmod,
@@ -1503,7 +1525,7 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_dispc_slaves[] = {
 
 static struct omap_hwmod omap3xxx_dss_dispc_hwmod = {
        .name           = "dss_dispc",
-       .class          = &omap2_dispc_hwmod_class,
+       .class          = &omap3_dispc_hwmod_class,
        .mpu_irqs       = omap2_dispc_irqs,
        .main_clk       = "dss1_alwon_fck",
        .prcm           = {
@@ -3523,12 +3545,6 @@ static __initdata struct omap_hwmod *omap3xxx_hwmods[] = {
        &omap3xxx_uart2_hwmod,
        &omap3xxx_uart3_hwmod,
 
-       /* dss class */
-       &omap3xxx_dss_dispc_hwmod,
-       &omap3xxx_dss_dsi1_hwmod,
-       &omap3xxx_dss_rfbi_hwmod,
-       &omap3xxx_dss_venc_hwmod,
-
        /* i2c class */
        &omap3xxx_i2c1_hwmod,
        &omap3xxx_i2c2_hwmod,
@@ -3635,6 +3651,15 @@ static __initdata struct omap_hwmod *am35xx_hwmods[] = {
        NULL
 };
 
+static __initdata struct omap_hwmod *omap3xxx_dss_hwmods[] = {
+       /* dss class */
+       &omap3xxx_dss_dispc_hwmod,
+       &omap3xxx_dss_dsi1_hwmod,
+       &omap3xxx_dss_rfbi_hwmod,
+       &omap3xxx_dss_venc_hwmod,
+       NULL
+};
+
 int __init omap3xxx_hwmod_init(void)
 {
        int r;
@@ -3708,6 +3733,21 @@ int __init omap3xxx_hwmod_init(void)
 
        if (h)
                r = omap_hwmod_register(h);
+       if (r < 0)
+               return r;
+
+       /*
+        * DSS code presumes that dss_core hwmod is handled first,
+        * _before_ any other DSS related hwmods so register common
+        * DSS hwmods last to ensure that dss_core is already registered.
+        * Otherwise some change things may happen, for ex. if dispc
+        * is handled before dss_core and DSS is enabled in bootloader
+        * DIPSC will be reset with outputs enabled which sometimes leads
+        * to unrecoverable L3 error.
+        * XXX The long-term fix to this is to ensure modules are set up
+        * in dependency order in the hwmod core code.
+        */
+       r = omap_hwmod_register(omap3xxx_dss_hwmods);
 
        return r;
 }
index f9f1510817603332292348eedccca1155ac80895..ef0524c10a840296b089f4e3ca81d291f737a503 100644 (file)
@@ -1031,6 +1031,7 @@ static struct omap_hwmod_dma_info omap44xx_dmic_sdma_reqs[] = {
 
 static struct omap_hwmod_addr_space omap44xx_dmic_addrs[] = {
        {
+               .name           = "mpu",
                .pa_start       = 0x4012e000,
                .pa_end         = 0x4012e07f,
                .flags          = ADDR_TYPE_RT
@@ -1049,6 +1050,7 @@ static struct omap_hwmod_ocp_if omap44xx_l4_abe__dmic = {
 
 static struct omap_hwmod_addr_space omap44xx_dmic_dma_addrs[] = {
        {
+               .name           = "dma",
                .pa_start       = 0x4902e000,
                .pa_end         = 0x4902e07f,
                .flags          = ADDR_TYPE_RT
index c1c4d86a79a8e5f7ff32b2739928963be7a4d4e2..9ce765407ad55d5ac9190af77cdb48338c338752 100644 (file)
@@ -19,6 +19,7 @@
 #include "common.h"
 #include <plat/cpu.h>
 #include <plat/prcm.h>
+#include <plat/irqs.h>
 
 #include "vp.h"
 
index 9dd93453e563eaa666b2640663d529dc9102c928..7e755bb0ffc4c6a9bbfa906609cc370e383fdbf1 100644 (file)
@@ -897,7 +897,7 @@ static int __init omap_sr_probe(struct platform_device *pdev)
                ret = sr_late_init(sr_info);
                if (ret) {
                        pr_warning("%s: Error in SR late init\n", __func__);
-                       return ret;
+                       goto err_iounmap;
                }
        }
 
index 6eeff0e0ae01932d0e841444754d913a30e748b5..5c9acea957619196d502baaa3fd591e96a1db01d 100644 (file)
@@ -270,7 +270,7 @@ static struct clocksource clocksource_gpt = {
 static u32 notrace dmtimer_read_sched_clock(void)
 {
        if (clksrc.reserved)
-               return __omap_dm_timer_read_counter(clksrc.io_base, 1);
+               return __omap_dm_timer_read_counter(&clksrc, 1);
 
        return 0;
 }
index 6c89cf8ab22eece43dd765896bf0e53824f8753e..2ecba6743b8e66b050f7bb79665878a34878484b 100644 (file)
@@ -67,7 +67,7 @@ static void picoxcell_add_clocksource(struct device_node *source_timer)
 
 static void __iomem *sched_io_base;
 
-unsigned u32 notrace picoxcell_read_sched_clock(void)
+static u32 picoxcell_read_sched_clock(void)
 {
        return __raw_readl(sched_io_base);
 }
index 18fd177073f4a54862e17a60f8d2b9c459df3c1f..5bc13121eac5d15eb239e3992b18f90720f67416 100644 (file)
@@ -415,29 +415,9 @@ static struct resource pxa_rtc_resources[] = {
        },
 };
 
-static struct resource sa1100_rtc_resources[] = {
-       [0] = {
-               .start  = 0x40900000,
-               .end    = 0x409000ff,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = IRQ_RTC1Hz,
-               .end    = IRQ_RTC1Hz,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [2] = {
-               .start  = IRQ_RTCAlrm,
-               .end    = IRQ_RTCAlrm,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
 struct platform_device sa1100_device_rtc = {
        .name           = "sa1100-rtc",
        .id             = -1,
-       .num_resources  = ARRAY_SIZE(sa1100_rtc_resources),
-       .resource       = sa1100_rtc_resources,
 };
 
 struct platform_device pxa_device_rtc = {
index adf058fa97ee56665cc6d56f33d7ec8513de6abb..91e4f6c037661420e5f3e02e30af9174eef9edfd 100644 (file)
@@ -209,8 +209,6 @@ static struct clk_lookup pxa25x_clkregs[] = {
        INIT_CLKREG(&clk_pxa25x_gpio11, NULL, "GPIO11_CLK"),
        INIT_CLKREG(&clk_pxa25x_gpio12, NULL, "GPIO12_CLK"),
        INIT_CLKREG(&clk_pxa25x_mem, "pxa2xx-pcmcia", NULL),
-       INIT_CLKREG(&clk_dummy, "pxa-gpio", NULL),
-       INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
 };
 
 static struct clk_lookup pxa25x_hwuart_clkreg =
index 180bd8675d4b01a6044b6a32a0301e34c29e98c1..aed6cbcf386641e45147d67cb036301e9a6a54e6 100644 (file)
@@ -230,8 +230,6 @@ static struct clk_lookup pxa27x_clkregs[] = {
        INIT_CLKREG(&clk_pxa27x_im, NULL, "IMCLK"),
        INIT_CLKREG(&clk_pxa27x_memc, NULL, "MEMCLK"),
        INIT_CLKREG(&clk_pxa27x_mem, "pxa2xx-pcmcia", NULL),
-       INIT_CLKREG(&clk_dummy, "pxa-gpio", NULL),
-       INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
 };
 
 #ifdef CONFIG_PM
index 0388eda7878aa9ac3490188cc4646c5d357b6733..40bb16501d8601789875c03bbef88dd87519c925 100644 (file)
@@ -89,7 +89,6 @@ static DEFINE_PXA3_CKEN(gcu, PXA300_GCU, 0, 0);
 static struct clk_lookup common_clkregs[] = {
        INIT_CLKREG(&clk_common_nand, "pxa3xx-nand", NULL),
        INIT_CLKREG(&clk_gcu, "pxa3xx-gcu", NULL),
-       INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
 };
 
 static DEFINE_PXA3_CKEN(pxa310_mmc3, MMC3, 19500000, 0);
index d487e1ff4c9a45e699f367e5e18be19709cdca17..8d614ecd8e998d3d663187656dd108ca29405e62 100644 (file)
@@ -83,7 +83,6 @@ static DEFINE_PXA3_CKEN(gcu, PXA320_GCU, 0, 0);
 static struct clk_lookup pxa320_clkregs[] = {
        INIT_CLKREG(&clk_pxa320_nand, "pxa3xx-nand", NULL),
        INIT_CLKREG(&clk_gcu, "pxa3xx-gcu", NULL),
-       INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
 };
 
 static int __init pxa320_init(void)
index f107c71c7589f186918fb1c7e080f84ec067261d..4f402afa6609c0ed584100d951347ec1272f8531 100644 (file)
@@ -67,7 +67,6 @@ static struct clk_lookup pxa3xx_clkregs[] = {
        INIT_CLKREG(&clk_pxa3xx_pout, NULL, "CLK_POUT"),
        /* Power I2C clock is always on */
        INIT_CLKREG(&clk_dummy, "pxa3xx-pwri2c.1", NULL),
-       INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
        INIT_CLKREG(&clk_pxa3xx_lcd, "pxa2xx-fb", NULL),
        INIT_CLKREG(&clk_pxa3xx_camera, NULL, "CAMCLK"),
        INIT_CLKREG(&clk_pxa3xx_ac97, NULL, "AC97CLK"),
index fccc644702e6d4b05b67f75a55b33c7839756dc2..d082a583df78a14c0bc4074db270bb8ce2393ca1 100644 (file)
@@ -217,7 +217,6 @@ static struct clk_lookup pxa95x_clkregs[] = {
        INIT_CLKREG(&clk_pxa95x_pout, NULL, "CLK_POUT"),
        /* Power I2C clock is always on */
        INIT_CLKREG(&clk_dummy, "pxa3xx-pwri2c.1", NULL),
-       INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
        INIT_CLKREG(&clk_pxa95x_lcd, "pxa2xx-fb", NULL),
        INIT_CLKREG(&clk_pxa95x_ffuart, "pxa2xx-uart.0", NULL),
        INIT_CLKREG(&clk_pxa95x_btuart, "pxa2xx-uart.1", NULL),
index ac1aed2a8da4c7a27c84def422a76f51540b45ef..eb55f05bef3a1554b1a65485eed8fd3bb529853a 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 
 #include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
 
 extern volatile int pen_release;
 
index 794a8d91a6a62110872c1a989f167b10a90f595c..124bce6b4d7ba55df09bc262a3caf2acaf58b0fe 100644 (file)
 #define REALVIEW_EB_USB_BASE           0x4F000000      /* USB */
 
 #ifdef CONFIG_REALVIEW_EB_ARM11MP_REVB
-#define REALVIEW_EB11MP_SCU_BASE       0x10100000      /* SCU registers */
-#define REALVIEW_EB11MP_GIC_CPU_BASE   0x10100100      /* Generic interrupt controller CPU interface */
-#define REALVIEW_EB11MP_TWD_BASE       0x10100600
-#define REALVIEW_EB11MP_GIC_DIST_BASE  0x10101000      /* Generic interrupt controller distributor */
+#define REALVIEW_EB11MP_PRIV_MEM_BASE  0x1F000000
 #define REALVIEW_EB11MP_L220_BASE      0x10102000      /* L220 registers */
 #define REALVIEW_EB11MP_SYS_PLD_CTRL1  0xD8            /* Register offset for MPCore sysctl */
 #else
-#define REALVIEW_EB11MP_SCU_BASE       0x1F000000      /* SCU registers */
-#define REALVIEW_EB11MP_GIC_CPU_BASE   0x1F000100      /* Generic interrupt controller CPU interface */
-#define REALVIEW_EB11MP_TWD_BASE       0x1F000600
-#define REALVIEW_EB11MP_GIC_DIST_BASE  0x1F001000      /* Generic interrupt controller distributor */
+#define REALVIEW_EB11MP_PRIV_MEM_BASE  0x1F000000
 #define REALVIEW_EB11MP_L220_BASE      0x1F002000      /* L220 registers */
 #define REALVIEW_EB11MP_SYS_PLD_CTRL1  0x74            /* Register offset for MPCore sysctl */
 #endif
 
+#define REALVIEW_EB11MP_PRIV_MEM_SIZE  SZ_8K
+#define REALVIEW_EB11MP_PRIV_MEM_OFF(x)        (REALVIEW_EB11MP_PRIV_MEM_BASE + (x))
+
+#define REALVIEW_EB11MP_SCU_BASE       REALVIEW_EB11MP_PRIV_MEM_OFF(0)         /* SCU registers */
+#define REALVIEW_EB11MP_GIC_CPU_BASE   REALVIEW_EB11MP_PRIV_MEM_OFF(0x0100)    /* Generic interrupt controller CPU interface */
+#define REALVIEW_EB11MP_TWD_BASE       REALVIEW_EB11MP_PRIV_MEM_OFF(0x0600)
+#define REALVIEW_EB11MP_GIC_DIST_BASE  REALVIEW_EB11MP_PRIV_MEM_OFF(0x1000)    /* Generic interrupt controller distributor */
+
 /*
  * Core tile identification (REALVIEW_SYS_PROCID)
  */
index 7abf918b77e9fec5af2f7334e277c2dc82684565..aa2d4e02ea2ca5142558255eef93ca6757f066ef 100644 (file)
@@ -75,6 +75,8 @@
 /*
  * Testchip peripheral and fpga gic regions
  */
+#define REALVIEW_TC11MP_PRIV_MEM_BASE          0x1F000000
+#define REALVIEW_TC11MP_PRIV_MEM_SIZE          SZ_8K
 #define REALVIEW_TC11MP_SCU_BASE               0x1F000000      /* IRQ, Test chip */
 #define REALVIEW_TC11MP_GIC_CPU_BASE           0x1F000100      /* Test chip interrupt controller CPU interface */
 #define REALVIEW_TC11MP_TWD_BASE               0x1F000600
index e83c654a58d0f4540445840e1b7f1af377c12bff..17c878ddbc70d1da5a63b31a6d1bba663841c704 100644 (file)
@@ -17,7 +17,6 @@
 #include <asm/hardware/gic.h>
 #include <asm/mach-types.h>
 #include <asm/smp_scu.h>
-#include <asm/unified.h>
 
 #include <mach/board-eb.h>
 #include <mach/board-pb11mp.h>
@@ -75,6 +74,6 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
         * until it receives a soft interrupt, and then the
         * secondary CPU branches to this address.
         */
-       __raw_writel(BSYM(virt_to_phys(versatile_secondary_startup)),
+       __raw_writel(virt_to_phys(versatile_secondary_startup),
                     __io_address(REALVIEW_SYS_FLAGSSET));
 }
index e62962117763879cb469c172d7f464b6339608d3..9578145f2df031f33d8e13c277e405073cc3d197 100644 (file)
@@ -91,14 +91,9 @@ static struct map_desc realview_eb_io_desc[] __initdata = {
 
 static struct map_desc realview_eb11mp_io_desc[] __initdata = {
        {
-               .virtual        = IO_ADDRESS(REALVIEW_EB11MP_SCU_BASE),
-               .pfn            = __phys_to_pfn(REALVIEW_EB11MP_SCU_BASE),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
-       }, {
-               .virtual        = IO_ADDRESS(REALVIEW_EB11MP_GIC_DIST_BASE),
-               .pfn            = __phys_to_pfn(REALVIEW_EB11MP_GIC_DIST_BASE),
-               .length         = SZ_4K,
+               .virtual        = IO_ADDRESS(REALVIEW_EB11MP_PRIV_MEM_BASE),
+               .pfn            = __phys_to_pfn(REALVIEW_EB11MP_PRIV_MEM_BASE),
+               .length         = REALVIEW_EB11MP_PRIV_MEM_SIZE,
                .type           = MT_DEVICE,
        }, {
                .virtual        = IO_ADDRESS(REALVIEW_EB11MP_L220_BASE),
index 127a3fd42ab13db2e8430b55166a66df9b46cfae..2147335f66f5d63bcc38772983aff10fb7d27166 100644 (file)
@@ -64,15 +64,10 @@ static struct map_desc realview_pb11mp_io_desc[] __initdata = {
                .pfn            = __phys_to_pfn(REALVIEW_PB11MP_GIC_DIST_BASE),
                .length         = SZ_4K,
                .type           = MT_DEVICE,
-       }, {
-               .virtual        = IO_ADDRESS(REALVIEW_TC11MP_GIC_CPU_BASE),
-               .pfn            = __phys_to_pfn(REALVIEW_TC11MP_GIC_CPU_BASE),
-               .length         = SZ_4K,
-               .type           = MT_DEVICE,
-       }, {
-               .virtual        = IO_ADDRESS(REALVIEW_TC11MP_GIC_DIST_BASE),
-               .pfn            = __phys_to_pfn(REALVIEW_TC11MP_GIC_DIST_BASE),
-               .length         = SZ_4K,
+       }, {    /* Maps the SCU, GIC CPU interface, TWD, GIC DIST */
+               .virtual        = IO_ADDRESS(REALVIEW_TC11MP_PRIV_MEM_BASE),
+               .pfn            = __phys_to_pfn(REALVIEW_TC11MP_PRIV_MEM_BASE),
+               .length         = REALVIEW_TC11MP_PRIV_MEM_SIZE,
                .type           = MT_DEVICE,
        }, {
                .virtual        = IO_ADDRESS(REALVIEW_SCTL_BASE),
index 5d55ab018b6b3f74b1946a67d0efd84a534e3caf..4cb2f951f1e9155d9b4434fac10ff4ea330e9039 100644 (file)
@@ -21,5 +21,6 @@
 #define CODEC_GPIO_BASE                        (GPIO_BOARD_START + 8)
 #define GLENFARCLAS_PMIC_GPIO_BASE     (GPIO_BOARD_START + 32)
 #define BANFF_PMIC_GPIO_BASE           (GPIO_BOARD_START + 64)
+#define MMGPIO_GPIO_BASE               (GPIO_BOARD_START + 96)
 
 #endif
index 1cc91d794c973e538e52a9678bff72fa4e1b8d0e..8077f650eb0e6c291ac607aff70a02b35dfa62b8 100644 (file)
@@ -260,6 +260,7 @@ static struct platform_device crag6410_dm9k_device = {
 
 static struct resource crag6410_mmgpio_resource[] = {
        [0] = {
+               .name   = "dat",
                .start  = S3C64XX_PA_XM0CSN4 + 1,
                .end    = S3C64XX_PA_XM0CSN4 + 1,
                .flags  = IORESOURCE_MEM,
@@ -272,7 +273,7 @@ static struct platform_device crag6410_mmgpio = {
        .resource       = crag6410_mmgpio_resource,
        .num_resources  = ARRAY_SIZE(crag6410_mmgpio_resource),
        .dev.platform_data = &(struct bgpio_pdata) {
-               .base   = -1,
+               .base   = MMGPIO_GPIO_BASE,
        },
 };
 
@@ -328,7 +329,6 @@ static struct platform_device wallvdd_device = {
 
 static struct platform_device *crag6410_devices[] __initdata = {
        &s3c_device_hsmmc0,
-       &s3c_device_hsmmc1,
        &s3c_device_hsmmc2,
        &s3c_device_i2c0,
        &s3c_device_i2c1,
@@ -355,7 +355,7 @@ static struct platform_device *crag6410_devices[] __initdata = {
 
 static struct pca953x_platform_data crag6410_pca_data = {
        .gpio_base      = PCA935X_GPIO_BASE,
-       .irq_base       = 0,
+       .irq_base       = -1,
 };
 
 /* VDDARM is controlled by DVS1 connected to GPK(0) */
@@ -683,12 +683,6 @@ static struct s3c_sdhci_platdata crag6410_hsmmc2_pdata = {
        .cd_type                = S3C_SDHCI_CD_PERMANENT,
 };
 
-static struct s3c_sdhci_platdata crag6410_hsmmc1_pdata = {
-       .max_width              = 4,
-       .cd_type                = S3C_SDHCI_CD_GPIO,
-       .ext_cd_gpio            = S3C64XX_GPF(11),
-};
-
 static void crag6410_cfg_sdhci0(struct platform_device *dev, int width)
 {
        /* Set all the necessary GPG pins to special-function 2 */
@@ -723,7 +717,6 @@ static void __init crag6410_machine_init(void)
        gpio_direction_output(S3C64XX_GPF(10), 1);
 
        s3c_sdhci0_set_platdata(&crag6410_hsmmc0_pdata);
-       s3c_sdhci1_set_platdata(&crag6410_hsmmc1_pdata);
        s3c_sdhci2_set_platdata(&crag6410_hsmmc2_pdata);
 
        s3c_i2c0_set_platdata(&i2c0_pdata);
index 055dac90e0e247c95258ecd412e47bb2297de1cc..7d3e81b9dd06229034603aa260064384d8d44a5d 100644 (file)
@@ -346,23 +346,10 @@ int __init s3c64xx_pm_init(void)
 
 static __init int s3c64xx_pm_initcall(void)
 {
-       u32 val;
-
        pm_cpu_prep = s3c64xx_pm_prepare;
        pm_cpu_sleep = s3c64xx_cpu_suspend;
        pm_uart_udivslot = 1;
 
-       /*
-        * Unconditionally disable power domains that contain only
-        * blocks which have no mainline driver support.
-        */
-       val = __raw_readl(S3C64XX_NORMAL_CFG);
-       val &= ~(S3C64XX_NORMALCFG_DOMAIN_G_ON |
-                S3C64XX_NORMALCFG_DOMAIN_V_ON |
-                S3C64XX_NORMALCFG_DOMAIN_I_ON |
-                S3C64XX_NORMALCFG_DOMAIN_P_ON);
-       __raw_writel(val, S3C64XX_NORMAL_CFG);
-
 #ifdef CONFIG_S3C_PM_DEBUG_LED_SMDK
        gpio_request(S3C64XX_GPN(12), "DEBUG_LED0");
        gpio_request(S3C64XX_GPN(13), "DEBUG_LED1");
index 5bc6b3837b2033cbaccf57b69b91e07ffd0fe76f..0c4b76ab4d8eba0037e305d5d8f9b17b36074fa7 100644 (file)
@@ -202,7 +202,6 @@ static struct irda_platform_data assabet_irda_data = {
 static struct mcp_plat_data assabet_mcp_data = {
        .mccr0          = MCCR0_ADM,
        .sclk_rate      = 11981000,
-       .codec          = "ucb1x00",
 };
 
 static void __init assabet_init(void)
@@ -253,17 +252,6 @@ static void __init assabet_init(void)
        sa11x0_register_mtd(&assabet_flash_data, assabet_flash_resources,
                            ARRAY_SIZE(assabet_flash_resources));
        sa11x0_register_irda(&assabet_irda_data);
-
-       /*
-        * Setup the PPC unit correctly.
-        */
-       PPDR &= ~PPC_RXD4;
-       PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
-       PSDR |= PPC_RXD4;
-       PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-       PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-
-       ASSABET_BCR_set(ASSABET_BCR_CODEC_RST);
        sa11x0_register_mcp(&assabet_mcp_data);
 }
 
@@ -280,7 +268,7 @@ static void __init map_sa1100_gpio_regs( void )
        int prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO);
        pmd_t *pmd;
 
-       pmd = pmd_offset(pgd_offset_k(virt), virt);
+       pmd = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
        *pmd = __pmd(phys | prot);
        flush_pmd_entry(pmd);
 }
index d12d0f48b1dc4bec59f9a9939edc6c93e5f3ddcb..11bb6d0b9be377b6c926f3e759a03a21d2e9ffff 100644 (file)
@@ -124,23 +124,12 @@ static void __init cerf_map_io(void)
 static struct mcp_plat_data cerf_mcp_data = {
        .mccr0          = MCCR0_ADM,
        .sclk_rate      = 11981000,
-       .codec          = "ucb1x00",
 };
 
 static void __init cerf_init(void)
 {
        platform_add_devices(cerf_devices, ARRAY_SIZE(cerf_devices));
        sa11x0_register_mtd(&cerf_flash_data, &cerf_flash_resource, 1);
-
-       /*
-        * Setup the PPC unit correctly.
-        */
-       PPDR &= ~PPC_RXD4;
-       PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
-       PSDR |= PPC_RXD4;
-       PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-       PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-
        sa11x0_register_mcp(&cerf_mcp_data);
 }
 
index d6df9f6c9f7e42d9d9d06a1401c89bc5a47b246d..dab3c6347a8f2d8e80bafba18b6916f8f97161f9 100644 (file)
 #include <linux/clk.h>
 #include <linux/spinlock.h>
 #include <linux/mutex.h>
-#include <linux/io.h>
-#include <linux/clkdev.h>
 
 #include <mach/hardware.h>
 
-struct clkops {
-       void                    (*enable)(struct clk *);
-       void                    (*disable)(struct clk *);
-       unsigned long           (*getrate)(struct clk *);
-};
-
+/*
+ * Very simple clock implementation - we only have one clock to deal with.
+ */
 struct clk {
-       const struct clkops     *ops;
-       unsigned long           rate;
        unsigned int            enabled;
 };
 
-#define INIT_CLKREG(_clk, _devname, _conname)          \
-       {                                               \
-               .clk            = _clk,                 \
-               .dev_id         = _devname,             \
-               .con_id         = _conname,             \
-       }
-
-#define DEFINE_CLK(_name, _ops, _rate)                 \
-struct clk clk_##_name = {                             \
-               .ops    = _ops,                         \
-               .rate   = _rate,                        \
-       }
-
-static DEFINE_SPINLOCK(clocks_lock);
-
-static void clk_gpio27_enable(struct clk *clk)
+static void clk_gpio27_enable(void)
 {
        /*
         * First, set up the 3.6864MHz clock on GPIO 27 for the SA-1111:
@@ -54,22 +32,38 @@ static void clk_gpio27_enable(struct clk *clk)
        TUCR = TUCR_3_6864MHz;
 }
 
-static void clk_gpio27_disable(struct clk *clk)
+static void clk_gpio27_disable(void)
 {
        TUCR = 0;
        GPDR &= ~GPIO_32_768kHz;
        GAFR &= ~GPIO_32_768kHz;
 }
 
+static struct clk clk_gpio27;
+
+static DEFINE_SPINLOCK(clocks_lock);
+
+struct clk *clk_get(struct device *dev, const char *id)
+{
+       const char *devname = dev_name(dev);
+
+       return strcmp(devname, "sa1111.0") ? ERR_PTR(-ENOENT) : &clk_gpio27;
+}
+EXPORT_SYMBOL(clk_get);
+
+void clk_put(struct clk *clk)
+{
+}
+EXPORT_SYMBOL(clk_put);
+
 int clk_enable(struct clk *clk)
 {
        unsigned long flags;
 
        spin_lock_irqsave(&clocks_lock, flags);
        if (clk->enabled++ == 0)
-               clk->ops->enable(clk);
+               clk_gpio27_enable();
        spin_unlock_irqrestore(&clocks_lock, flags);
-
        return 0;
 }
 EXPORT_SYMBOL(clk_enable);
@@ -82,48 +76,13 @@ void clk_disable(struct clk *clk)
 
        spin_lock_irqsave(&clocks_lock, flags);
        if (--clk->enabled == 0)
-               clk->ops->disable(clk);
+               clk_gpio27_disable();
        spin_unlock_irqrestore(&clocks_lock, flags);
 }
 EXPORT_SYMBOL(clk_disable);
 
 unsigned long clk_get_rate(struct clk *clk)
 {
-       unsigned long rate;
-
-       rate = clk->rate;
-       if (clk->ops->getrate)
-               rate = clk->ops->getrate(clk);
-
-       return rate;
+       return 3686400;
 }
 EXPORT_SYMBOL(clk_get_rate);
-
-const struct clkops clk_gpio27_ops = {
-       .enable         = clk_gpio27_enable,
-       .disable        = clk_gpio27_disable,
-};
-
-static void clk_dummy_enable(struct clk *clk) { }
-static void clk_dummy_disable(struct clk *clk) { }
-
-const struct clkops clk_dummy_ops = {
-       .enable         = clk_dummy_enable,
-       .disable        = clk_dummy_disable,
-};
-
-static DEFINE_CLK(gpio27, &clk_gpio27_ops, 3686400);
-static DEFINE_CLK(dummy, &clk_dummy_ops, 0);
-
-static struct clk_lookup sa11xx_clkregs[] = {
-       INIT_CLKREG(&clk_gpio27, "sa1111.0", NULL),
-       INIT_CLKREG(&clk_dummy, "sa1100-rtc", NULL),
-};
-
-static int __init sa11xx_clk_init(void)
-{
-       clkdev_add_table(sa11xx_clkregs, ARRAY_SIZE(sa11xx_clkregs));
-       return 0;
-}
-
-postcore_initcall(sa11xx_clk_init);
index c483912d08af4535ce964992a19d66f511c80c21..fd5652118ed19565d5754ffdd16f1cfe0db59011 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/timer.h>
 #include <linux/gpio.h>
 #include <linux/pda_power.h>
-#include <linux/mfd/ucb1x00.h>
 
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
@@ -86,15 +85,10 @@ static struct scoop_pcmcia_config collie_pcmcia_config = {
        .num_devs       = 1,
 };
 
-static struct ucb1x00_plat_data collie_ucb1x00_data = {
-       .gpio_base      = COLLIE_TC35143_GPIO_BASE,
-};
-
 static struct mcp_plat_data collie_mcp_data = {
        .mccr0          = MCCR0_ADM | MCCR0_ExtClk,
        .sclk_rate      = 9216000,
-       .codec          = "ucb1x00",
-       .codec_pdata    = &collie_ucb1x00_data,
+       .gpio_base      = COLLIE_TC35143_GPIO_BASE,
 };
 
 /*
@@ -144,8 +138,6 @@ static struct pda_power_pdata collie_power_data = {
 static struct resource collie_power_resource[] = {
        {
                .name           = "ac",
-               .start          = gpio_to_irq(COLLIE_GPIO_AC_IN),
-               .end            = gpio_to_irq(COLLIE_GPIO_AC_IN),
                .flags          = IORESOURCE_IRQ |
                                  IORESOURCE_IRQ_HIGHEDGE |
                                  IORESOURCE_IRQ_LOWEDGE,
@@ -347,7 +339,8 @@ static void __init collie_init(void)
 
        GPSR |= _COLLIE_GPIO_UCB1x00_RESET;
 
-
+       collie_power_resource[0].start = gpio_to_irq(COLLIE_GPIO_AC_IN);
+       collie_power_resource[0].end = gpio_to_irq(COLLIE_GPIO_AC_IN);
        platform_scoop_config = &collie_pcmcia_config;
 
        ret = platform_add_devices(devices, ARRAY_SIZE(devices));
@@ -357,16 +350,6 @@ static void __init collie_init(void)
 
        sa11x0_register_mtd(&collie_flash_data, collie_flash_resources,
                            ARRAY_SIZE(collie_flash_resources));
-
-       /*
-        * Setup the PPC unit correctly.
-        */
-       PPDR &= ~PPC_RXD4;
-       PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
-       PSDR |= PPC_RXD4;
-       PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-       PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-
        sa11x0_register_mcp(&collie_mcp_data);
 
        sharpsl_save_param();
index aaa8acf76b7b551e29af1e1d04f38f6bba970b2c..19b2053f5af4146a68184ce4c40571cfa3353e67 100644 (file)
@@ -228,7 +228,7 @@ static int __init sa1100_cpu_init(struct cpufreq_policy *policy)
        return 0;
 }
 
-static struct cpufreq_driver sa1100_driver = {
+static struct cpufreq_driver sa1100_driver __refdata = {
        .flags          = CPUFREQ_STICKY,
        .verify         = sa11x0_verify_speed,
        .target         = sa1100_target,
index e3a28ca2a7b754fff9b095eb5c58e113525b16df..bb10ee2cb89f11f82c801d7f9c1d8ced11c1c3b7 100644 (file)
@@ -217,15 +217,10 @@ static struct platform_device sa11x0uart3_device = {
 static struct resource sa11x0mcp_resources[] = {
        [0] = {
                .start  = __PREG(Ser4MCCR0),
-               .end    = __PREG(Ser4MCCR0) + 0x1C - 1,
+               .end    = __PREG(Ser4MCCR0) + 0xffff,
                .flags  = IORESOURCE_MEM,
        },
        [1] = {
-               .start  = __PREG(Ser4MCCR1),
-               .end    = __PREG(Ser4MCCR1) + 0x4 - 1,
-               .flags  = IORESOURCE_MEM,
-       },
-       [2] = {
                .start  = IRQ_Ser4MCP,
                .end    = IRQ_Ser4MCP,
                .flags  = IORESOURCE_IRQ,
@@ -350,29 +345,9 @@ void sa11x0_register_irda(struct irda_platform_data *irda)
        sa11x0_register_device(&sa11x0ir_device, irda);
 }
 
-static struct resource sa11x0rtc_resources[] = {
-       [0] = {
-               .start  = 0x90010000,
-               .end    = 0x900100ff,
-               .flags  = IORESOURCE_MEM,
-       },
-       [1] = {
-               .start  = IRQ_RTC1Hz,
-               .end    = IRQ_RTC1Hz,
-               .flags  = IORESOURCE_IRQ,
-       },
-       [2] = {
-               .start  = IRQ_RTCAlrm,
-               .end    = IRQ_RTCAlrm,
-               .flags  = IORESOURCE_IRQ,
-       },
-};
-
 static struct platform_device sa11x0rtc_device = {
        .name           = "sa1100-rtc",
        .id             = -1,
-       .resource       = sa11x0rtc_resources,
-       .num_resources  = ARRAY_SIZE(sa11x0rtc_resources),
 };
 
 static struct platform_device *sa11x0_devices[] __initdata = {
index 586cec898b35ae8578025727e3ec4e2b679964e0..ed1a331508a754bf2e802f537689a97ee2b524c5 100644 (file)
@@ -17,8 +17,6 @@ struct mcp_plat_data {
        u32 mccr1;
        unsigned int sclk_rate;
        int gpio_base;
-       const char *codec;
-       void *codec_pdata;
 };
 
 #endif
index f50b00bd18a053603f98b77a5e25678b11f25b96..b412fc09c80cb30038e513c1b44021d1dabb42ff 100644 (file)
@@ -198,3 +198,5 @@ static int __init jornada_ssp_init(void)
 {
        return platform_driver_register(&jornadassp_driver);
 }
+
+module_init(jornada_ssp_init);
index d117ceab6215c4f8d814504aa313bacf17460b45..af4e2761f3dbf4a6254bfab53f98080334e5f387 100644 (file)
 static struct mcp_plat_data lart_mcp_data = {
        .mccr0          = MCCR0_ADM,
        .sclk_rate      = 11981000,
-       .codec          = "ucb1x00",
 };
 
 static void __init lart_init(void)
 {
-       /*
-        * Setup the PPC unit correctly.
-        */
-       PPDR &= ~PPC_RXD4;
-       PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
-       PSDR |= PPC_RXD4;
-       PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-       PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-
        sa11x0_register_mcp(&lart_mcp_data);
 }
 
index 748d34435b3f070000f2afcfc420253d85ccb060..318b2b766a0b3ee7b8921c2904b65c0fdd551c8c 100644 (file)
@@ -55,22 +55,11 @@ static struct resource shannon_flash_resource = {
 static struct mcp_plat_data shannon_mcp_data = {
        .mccr0          = MCCR0_ADM,
        .sclk_rate      = 11981000,
-       .codec          = "ucb1x00",
 };
 
 static void __init shannon_init(void)
 {
        sa11x0_register_mtd(&shannon_flash_data, &shannon_flash_resource, 1);
-
-       /*
-        * Setup the PPC unit correctly.
-        */
-       PPDR &= ~PPC_RXD4;
-       PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
-       PSDR |= PPC_RXD4;
-       PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-       PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-
        sa11x0_register_mcp(&shannon_mcp_data);
 }
 
index 458ececefa58a122332de40798d3df7aae3d1ea2..e17c04d6e32428af6aa7c8eb1fbc35ca6d7a02d8 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/mtd/partitions.h>
 #include <linux/io.h>
 #include <linux/gpio.h>
-#include <linux/mfd/ucb1x00.h>
 
 #include <asm/irq.h>
 #include <mach/hardware.h>
@@ -188,15 +187,10 @@ static struct resource simpad_flash_resources [] = {
        }
 };
 
-static struct ucb1x00_plat_data simpad_ucb1x00_data = {
-       .gpio_base      = SIMPAD_UCB1X00_GPIO_BASE,
-};
-
 static struct mcp_plat_data simpad_mcp_data = {
        .mccr0          = MCCR0_ADM,
        .sclk_rate      = 11981000,
-       .codec          = "ucb1300",
-       .codec_pdata    = &simpad_ucb1x00_data,
+       .gpio_base      = SIMPAD_UCB1X00_GPIO_BASE,
 };
 
 
@@ -384,16 +378,6 @@ static int __init simpad_init(void)
 
        sa11x0_register_mtd(&simpad_flash_data, simpad_flash_resources,
                              ARRAY_SIZE(simpad_flash_resources));
-
-       /*
-        * Setup the PPC unit correctly.
-        */
-       PPDR &= ~PPC_RXD4;
-       PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
-       PSDR |= PPC_RXD4;
-       PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-       PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
-
        sa11x0_register_mcp(&simpad_mcp_data);
 
        ret = platform_add_devices(devices, ARRAY_SIZE(devices));
index 77b8fc12fc2f1ab0000ae734fff09624ba7cbf44..fcf8b1761aef723c6f3e5f3bd1f2b42cf436e04a 100644 (file)
@@ -276,7 +276,7 @@ static int sh7372_a3sp_suspend(void)
         * Serial consoles make use of SCIF hardware located in A3SP,
         * keep such power domain on if "no_console_suspend" is set.
         */
-       return console_suspend_enabled ? -EBUSY : 0;
+       return console_suspend_enabled ? 0 : -EBUSY;
 }
 
 struct sh7372_pm_domain sh7372_a3sp = {
index 1ea89be63e29e1d13e7783d0d7f2ff31f64057a9..a83cf51fc09906a13018472b4f563baebaf7bcb5 100644 (file)
@@ -445,31 +445,39 @@ static const struct sh_dmae_slave_config sh7372_dmae_slaves[] = {
        },
 };
 
+#define SH7372_CHCLR 0x220
+
 static const struct sh_dmae_channel sh7372_dmae_channels[] = {
        {
                .offset = 0,
                .dmars = 0,
                .dmars_bit = 0,
+               .chclr_offset = SH7372_CHCLR + 0,
        }, {
                .offset = 0x10,
                .dmars = 0,
                .dmars_bit = 8,
+               .chclr_offset = SH7372_CHCLR + 0x10,
        }, {
                .offset = 0x20,
                .dmars = 4,
                .dmars_bit = 0,
+               .chclr_offset = SH7372_CHCLR + 0x20,
        }, {
                .offset = 0x30,
                .dmars = 4,
                .dmars_bit = 8,
+               .chclr_offset = SH7372_CHCLR + 0x30,
        }, {
                .offset = 0x50,
                .dmars = 8,
                .dmars_bit = 0,
+               .chclr_offset = SH7372_CHCLR + 0x50,
        }, {
                .offset = 0x60,
                .dmars = 8,
                .dmars_bit = 8,
+               .chclr_offset = SH7372_CHCLR + 0x60,
        }
 };
 
@@ -487,6 +495,7 @@ static struct sh_dmae_pdata dma_platform_data = {
        .ts_shift       = ts_shift,
        .ts_shift_num   = ARRAY_SIZE(ts_shift),
        .dmaor_init     = DMAOR_DME,
+       .chclr_present  = 1,
 };
 
 /* Resource order important! */
@@ -494,7 +503,7 @@ static struct resource sh7372_dmae0_resources[] = {
        {
                /* Channel registers and DMAOR */
                .start  = 0xfe008020,
-               .end    = 0xfe00808f,
+               .end    = 0xfe00828f,
                .flags  = IORESOURCE_MEM,
        },
        {
@@ -522,7 +531,7 @@ static struct resource sh7372_dmae1_resources[] = {
        {
                /* Channel registers and DMAOR */
                .start  = 0xfe018020,
-               .end    = 0xfe01808f,
+               .end    = 0xfe01828f,
                .flags  = IORESOURCE_MEM,
        },
        {
@@ -550,7 +559,7 @@ static struct resource sh7372_dmae2_resources[] = {
        {
                /* Channel registers and DMAOR */
                .start  = 0xfe028020,
-               .end    = 0xfe02808f,
+               .end    = 0xfe02828f,
                .flags  = IORESOURCE_MEM,
        },
        {
@@ -653,6 +662,7 @@ static struct sh_dmae_pdata usb_dma0_platform_data = {
        .dmaor_is_32bit = 1,
        .needs_tend_set = 1,
        .no_dmars       = 1,
+       .slave_only     = 1,
 };
 
 static struct resource sh7372_usb_dmae0_resources[] = {
@@ -714,6 +724,7 @@ static struct sh_dmae_pdata usb_dma1_platform_data = {
        .dmaor_is_32bit = 1,
        .needs_tend_set = 1,
        .no_dmars       = 1,
+       .slave_only     = 1,
 };
 
 static struct resource sh7372_usb_dmae1_resources[] = {
index cc97ef892d1b337e1e056f6172196860a6e00dc8..4fe2e9eaf5016e643aec46e6e1652828ad9e3ea9 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/delay.h>
 #include <mach/common.h>
 #include <mach/r8a7779.h>
+#include <asm/smp_plat.h>
 #include <asm/smp_scu.h>
 #include <asm/smp_twd.h>
 #include <asm/hardware/gic.h>
index be1ade76ccc81bb580523df640e2a3522638d510..0d159d64a34521a90b6b5f16ba14a8d294c5264f 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/spinlock.h>
 #include <linux/io.h>
 #include <mach/common.h>
+#include <asm/smp_plat.h>
 #include <asm/smp_scu.h>
 #include <asm/smp_twd.h>
 #include <asm/hardware/gic.h>
index a3e0c8692f0d1ddabd689f35c86f6e358c5d49ba..52af00446a6335ff9fd5bb1fccbd9a3d8805caae 100644 (file)
@@ -7,6 +7,7 @@ config UX500_SOC_COMMON
        select HAS_MTU
        select ARM_ERRATA_753970
        select ARM_ERRATA_754322
+       select ARM_ERRATA_764369
 
 menu "Ux500 SoC"
 
index 23be34b3bb6e8a9f2e41547c2d18b724c1a17609..5dde4d4ebe882f37a6d826f008c5ffd4666c5a36 100644 (file)
@@ -261,6 +261,8 @@ void __init mop500_sdi_init(void)
 
 void __init snowball_sdi_init(void)
 {
+       /* On Snowball MMC_CAP_SD_HIGHSPEED isn't supported (Hardware issue?) */
+       mop500_sdi0_data.capabilities &= ~MMC_CAP_SD_HIGHSPEED;
        /* On-board eMMC */
        db8500_add_sdi4(&mop500_sdi4_data, U8500_SDI_V2_PERIPHID);
        /* External Micro SD slot */
index 122ddde00ba78deabf82db84273705598ffda34e..da5569d83d58d87216438bf65aa15de86a6f71ea 100644 (file)
 
 static void __iomem *l2x0_base;
 
-static inline void ux500_cache_wait(void __iomem *reg, unsigned long mask)
-{
-       /* wait for the operation to complete */
-       while (readl_relaxed(reg) & mask)
-               cpu_relax();
-}
-
-static inline void ux500_cache_sync(void)
-{
-       writel_relaxed(0, l2x0_base + L2X0_CACHE_SYNC);
-       ux500_cache_wait(l2x0_base + L2X0_CACHE_SYNC, 1);
-}
-
-/*
- * The L2 cache cannot be turned off in the non-secure world.
- * Dummy until a secure service is in place.
- */
-static void ux500_l2x0_disable(void)
-{
-}
-
-/*
- * This is only called when doing a kexec, just after turning off the L2
- * and L1 cache, and it is surrounded by a spinlock in the generic version.
- * However, we're not really turning off the L2 cache right now and the
- * PL310 does not support exclusive accesses (used to implement the spinlock).
- * So, the invalidation needs to be done without the spinlock.
- */
-static void ux500_l2x0_inv_all(void)
-{
-       uint32_t l2x0_way_mask = (1<<16) - 1;   /* Bitmask of active ways */
-
-       /* invalidate all ways */
-       writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
-       ux500_cache_wait(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
-       ux500_cache_sync();
-}
-
 static int __init ux500_l2x0_unlock(void)
 {
        int i;
@@ -85,9 +47,13 @@ static int __init ux500_l2x0_init(void)
        /* 64KB way size, 8 way associativity, force WA */
        l2x0_init(l2x0_base, 0x3e060000, 0xc0000fff);
 
-       /* Override invalidate function */
-       outer_cache.disable = ux500_l2x0_disable;
-       outer_cache.inv_all = ux500_l2x0_inv_all;
+       /*
+        * We can't disable l2 as we are in non secure mode, currently
+        * this seems be called only during kexec path. So let's
+        * override outer.disable with nasty assignment until we have
+        * some SMI service available.
+        */
+       outer_cache.disable = NULL;
 
        return 0;
 }
index 64fa451edcfd486bdbaa0163f55e93399597d8ed..08da5589bcd8a60179cc458dba05f735ea6f919f 100644 (file)
@@ -32,6 +32,8 @@ pen:  ldr     r7, [r6]
         * should now contain the SVC stack for this core
         */
        b       secondary_startup
+ENDPROC(u8500_secondary_startup)
 
+       .align 2
 1:     .long   .
        .long   pen_release
index 572015e57cd997f6b355de1a835404a94df12abc..c76f0f456f045d8baaaf2c9b228633f98d63186d 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 
 #include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
 
 extern volatile int pen_release;
 
index a19e398dade35d6e9c14c75da381f89d3a3420a2..d2058ef8345fd4518874d2ab409ae3daa376d5df 100644 (file)
@@ -19,6 +19,7 @@
 
 #include <asm/cacheflush.h>
 #include <asm/hardware/gic.h>
+#include <asm/smp_plat.h>
 #include <asm/smp_scu.h>
 #include <mach/hardware.h>
 #include <mach/setup.h>
index 0a01cbdfe06339492c8aa21632d3223af3bf4ef1..9f9e1c203061dc8cbe0cff19cd69420462b05810 100644 (file)
@@ -95,13 +95,7 @@ static struct musb_hdrc_config musb_hdrc_config = {
 };
 
 static struct musb_hdrc_platform_data musb_platform_data = {
-#if defined(CONFIG_USB_MUSB_OTG)
        .mode = MUSB_OTG,
-#elif defined(CONFIG_USB_MUSB_PERIPHERAL)
-       .mode = MUSB_PERIPHERAL,
-#else /* defined(CONFIG_USB_MUSB_HOST) */
-       .mode = MUSB_HOST,
-#endif
        .config = &musb_hdrc_config,
        .board_data = &musb_board_data,
 };
index 2b1e836a76ed77b7f39eaeaecfdd67ab2d3832f0..b1e87c184e54b3a0f3096353871bdf6f00d97027 100644 (file)
@@ -217,7 +217,7 @@ static void __init ct_ca9x4_init(void)
 }
 
 #ifdef CONFIG_SMP
-static void ct_ca9x4_init_cpu_map(void)
+static void __init ct_ca9x4_init_cpu_map(void)
 {
        int i, ncores = scu_get_core_count(MMIO_P2V(A9_MPCORE_SCU));
 
@@ -233,7 +233,7 @@ static void ct_ca9x4_init_cpu_map(void)
        set_smp_cross_call(gic_raise_softirq);
 }
 
-static void ct_ca9x4_smp_enable(unsigned int max_cpus)
+static void __init ct_ca9x4_smp_enable(unsigned int max_cpus)
 {
        scu_enable(MMIO_P2V(A9_MPCORE_SCU));
 }
index 813ee08f96e6a3d7f7052a45be2e92e68e347b3c..3034a4dab4a1a3a927ace6f2364393c5fbbcfbe4 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/smp.h>
 
 #include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
 #include <asm/system.h>
 
 extern volatile int pen_release;
index 2b5f7ac001a3326a160c346b9a3a0706d9e3c858..124ffb16909382f1383673783812fd5a1354f70c 100644 (file)
@@ -13,8 +13,6 @@
 #include <linux/smp.h>
 #include <linux/io.h>
 
-#include <asm/unified.h>
-
 #include <mach/motherboard.h>
 #define V2M_PA_CS7 0x10000000
 
@@ -46,6 +44,6 @@ void __init platform_smp_prepare_cpus(unsigned int max_cpus)
         * secondary CPU branches to this address.
         */
        writel(~0, MMIO_P2V(V2M_SYS_FLAGSCLR));
-       writel(BSYM(virt_to_phys(versatile_secondary_startup)),
+       writel(virt_to_phys(versatile_secondary_startup),
                MMIO_P2V(V2M_SYS_FLAGSSET));
 }
index 3de4a5211c3b1702ac0aa1fb126c4b69f6e918c4..06d867dce55161df7d0ab34dfd11d0027e64795c 100644 (file)
@@ -48,7 +48,7 @@ static void clock_source_select(const char *dev_id, unsigned int clkval)
                offset = ATAOFFSET;
        else if (strcmp(dev_id, "nuc900-lcd") == 0)
                offset = LCDOFFSET;
-       else if (strcmp(dev_id, "nuc900-audio") == 0)
+       else if (strcmp(dev_id, "nuc900-ac97") == 0)
                offset = AUDOFFSET;
        else
                offset = CPUOFFSET;
index 604e1db266e8402d5f85c018c3870e15794c8bef..9a06619929090c43dd4993bfeeb3878319ac901b 100644 (file)
@@ -79,7 +79,7 @@ static DEFINE_CLK(timer4, 23);
 
 static struct clk_lookup nuc900_clkregs[] = {
        DEF_CLKLOOK(&clk_lcd, "nuc900-lcd", NULL),
-       DEF_CLKLOOK(&clk_audio, "nuc900-audio", NULL),
+       DEF_CLKLOOK(&clk_audio, "nuc900-ac97", NULL),
        DEF_CLKLOOK(&clk_fmi, "nuc900-fmi", NULL),
        DEF_CLKLOOK(&clk_ms, "nuc900-fmi", "MS"),
        DEF_CLKLOOK(&clk_sd, "nuc900-fmi", "SD"),
index 5b0c38abacc107a7eae8bd35ec8f03666f3422e7..78110befb7a9132d6da3353fdec6721b421894ed 100644 (file)
@@ -501,8 +501,8 @@ static struct resource nuc900_ac97_resource[] = {
 
 };
 
-struct platform_device nuc900_device_audio = {
-       .name           = "nuc900-audio",
+struct platform_device nuc900_device_ac97 = {
+       .name           = "nuc900-ac97",
        .id             = -1,
        .num_resources  = ARRAY_SIZE(nuc900_ac97_resource),
        .resource       = nuc900_ac97_resource,
@@ -523,7 +523,7 @@ static struct platform_device *nuc900_public_dev[] __initdata = {
        &nuc900_device_emc,
        &nuc900_device_spi,
        &nuc900_device_wdt,
-       &nuc900_device_audio,
+       &nuc900_device_ac97,
 };
 
 /* Provide adding specific CPU platform devices API */
index 9dd74612bb8707560abd06e6adc250f7ecd54bf2..c58d142b8a467134d193d2252ce52a361e314fcf 100644 (file)
@@ -155,7 +155,7 @@ void mfp_set_groupg(struct device *dev, const char *subname)
        } else if (strcmp(dev_id, "nuc900-i2c1") == 0) {
                mfpen &= ~(GPIOG2TO3);
                mfpen |= ENI2C1;/*enable i2c1*/
-       } else if (strcmp(dev_id, "nuc900-audio") == 0) {
+       } else if (strcmp(dev_id, "nuc900-ac97") == 0) {
                mfpen &= ~(GPIOG22TO23);
                mfpen |= ENAC97;/*enable AC97*/
        } else if (strcmp(dev_id, "nuc900-mmc-port1") == 0) {
index 4cefb57d9ed2d79a9ac59e6d3f252dc1076d56a3..1a3ca2488164033ea4328b32ce50c517cf4e908b 100644 (file)
@@ -882,6 +882,7 @@ config CACHE_XSC3L2
 
 config ARM_L1_CACHE_SHIFT_6
        bool
+       default y if CPU_V7
        help
          Setting ARM L1 cache line size to 64 Bytes.
 
index e34ea8adc1f928fe8ebeadc677aba06fbd8b4f47..5dc7d127a40fba7e8e16c6dd54abade74a45d5aa 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/memblock.h>
 
 #include <asm/mach-types.h>
+#include <asm/memblock.h>
 #include <asm/prom.h>
 #include <asm/sections.h>
 #include <asm/setup.h>
@@ -307,6 +308,21 @@ static void arm_memory_present(void)
 }
 #endif
 
+static bool arm_memblock_steal_permitted = true;
+
+phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
+{
+       phys_addr_t phys;
+
+       BUG_ON(!arm_memblock_steal_permitted);
+
+       phys = memblock_alloc(size, align);
+       memblock_free(phys, size);
+       memblock_remove(phys, size);
+
+       return phys;
+}
+
 void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
 {
        int i;
@@ -349,6 +365,7 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
        if (mdesc->reserve)
                mdesc->reserve();
 
+       arm_memblock_steal_permitted = false;
        memblock_allow_resize();
        memblock_dump_all();
 }
index 7e9b5bf910c199cba4fc2fce9bead46e91d470b8..0404ccbb8aa3ee39f0a8084c55879fb464115ca2 100644 (file)
@@ -148,10 +148,6 @@ ENDPROC(cpu_v7_do_resume)
  *     Initialise TLB, Caches, and MMU state ready to switch the MMU
  *     on.  Return in r0 the new CP15 C1 control register setting.
  *
- *     We automatically detect if we have a Harvard cache, and use the
- *     Harvard cache control instructions insead of the unified cache
- *     control instructions.
- *
  *     This should be able to cover all ARMv7 cores.
  *
  *     It is assumed that:
@@ -251,9 +247,7 @@ __v7_setup:
 #endif
 
 3:     mov     r10, #0
-#ifdef HARVARD_CACHE
        mcr     p15, 0, r10, c7, c5, 0          @ I+BTB cache invalidate
-#endif
        dsb
 #ifdef CONFIG_MMU
        mcr     p15, 0, r10, c8, c7, 0          @ invalidate I + D TLBs
@@ -329,16 +323,6 @@ __v7_ca5mp_proc_info:
        __v7_proc __v7_ca5mp_setup
        .size   __v7_ca5mp_proc_info, . - __v7_ca5mp_proc_info
 
-       /*
-        * ARM Ltd. Cortex A7 processor.
-        */
-       .type   __v7_ca7mp_proc_info, #object
-__v7_ca7mp_proc_info:
-       .long   0x410fc070
-       .long   0xff0ffff0
-       __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
-       .size   __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
-
        /*
         * ARM Ltd. Cortex A9 processor.
         */
@@ -350,6 +334,16 @@ __v7_ca9mp_proc_info:
        .size   __v7_ca9mp_proc_info, . - __v7_ca9mp_proc_info
 #endif /* CONFIG_ARM_LPAE */
 
+       /*
+        * ARM Ltd. Cortex A7 processor.
+        */
+       .type   __v7_ca7mp_proc_info, #object
+__v7_ca7mp_proc_info:
+       .long   0x410fc070
+       .long   0xff0ffff0
+       __v7_proc __v7_ca7mp_setup, hwcaps = HWCAP_IDIV
+       .size   __v7_ca7mp_proc_info, . - __v7_ca7mp_proc_info
+
        /*
         * ARM Ltd. Cortex A15 processor.
         */
index b30708e28c1de1560fb886e30b35d305fb25738d..dcebb1230f7fd3cd6f6e492c97507a483dcef3b8 100644 (file)
@@ -17,26 +17,17 @@ config ARCH_IMX_V4_V5
          and ARMv5 SoCs
 
 config ARCH_IMX_V6_V7
-       bool "i.MX3, i.MX6"
+       bool "i.MX3, i.MX5, i.MX6"
        select AUTO_ZRELADDR if !ZBOOT_ROM
        select ARM_PATCH_PHYS_VIRT
        select MIGHT_HAVE_CACHE_L2X0
        help
-         This enables support for systems based on the Freescale i.MX3 and i.MX6
-         family.
-
-config ARCH_MX5
-       bool "i.MX50, i.MX51, i.MX53"
-       select AUTO_ZRELADDR if !ZBOOT_ROM
-       select ARM_PATCH_PHYS_VIRT
-       help
-         This enables support for machines using Freescale's i.MX50 and i.MX53
-         processors.
+         This enables support for systems based on the Freescale i.MX3, i.MX5
+         and i.MX6 family.
 
 endchoice
 
 source "arch/arm/mach-imx/Kconfig"
-source "arch/arm/mach-mx5/Kconfig"
 
 endmenu
 
index 6fa8a707b9a0347778a0513f05c953f16defd70f..f7d18046c04ffd49e52f678da6270c288f8958e0 100644 (file)
@@ -96,6 +96,6 @@ extern int mxc_gpio_mode(int gpio_mode);
 extern int mxc_gpio_setup_multiple_pins(const int *pin_list, unsigned count,
                const char *label);
 
-extern int __init imx_iomuxv1_init(void __iomem *base, int numports);
+extern int imx_iomuxv1_init(void __iomem *base, int numports);
 
 #endif /* __MACH_IOMUX_V1_H__ */
index ac24c5c4bc83c48cfe005fb2cea0e9bd7d2fc89e..fdbe60001542615595b5f178f6856d8d5c0455bb 100644 (file)
 #define FB_SYNC_SWAP_RGB       0x04000000
 #define FB_SYNC_CLK_SEL_EN     0x02000000
 
+/*
+ * Specify the way your display is connected. The IPU can arbitrarily
+ * map the internal colors to the external data lines. We only support
+ * the following mappings at the moment.
+ */
+enum disp_data_mapping {
+       /* blue -> d[0..5], green -> d[6..11], red -> d[12..17] */
+       IPU_DISP_DATA_MAPPING_RGB666,
+       /* blue -> d[0..4], green -> d[5..10], red -> d[11..15] */
+       IPU_DISP_DATA_MAPPING_RGB565,
+       /* blue -> d[0..7], green -> d[8..15], red -> d[16..23] */
+       IPU_DISP_DATA_MAPPING_RGB888,
+};
+
 /**
  * struct mx3fb_platform_data - mx3fb platform data
  *
@@ -33,6 +47,7 @@ struct mx3fb_platform_data {
        const char                      *name;
        const struct fb_videomode       *mode;
        int                             num_modes;
+       enum disp_data_mapping          disp_data_fmt;
 };
 
 #endif
index 685c78716d95234d1cb955b69d23a0bd929d107b..fd0ee84c45d1bc66b51e0a5527f75d9981040eeb 100644 (file)
@@ -113,7 +113,8 @@ struct stedma40_half_channel_info {
  * @dst_dev_type: Dst device type
  * @src_info: Parameters for dst half channel
  * @dst_info: Parameters for dst half channel
- *
+ * @use_fixed_channel: if true, use physical channel specified by phy_channel
+ * @phy_channel: physical channel to use, only if use_fixed_channel is true
  *
  * This structure has to be filled by the client drivers.
  * It is recommended to do all dma configurations for clients in the machine.
@@ -129,6 +130,9 @@ struct stedma40_chan_cfg {
        int                                      dst_dev_type;
        struct stedma40_half_channel_info        src_info;
        struct stedma40_half_channel_info        dst_info;
+
+       bool                                     use_fixed_channel;
+       int                                      phy_channel;
 };
 
 /**
@@ -153,6 +157,7 @@ struct stedma40_platform_data {
        struct stedma40_chan_cfg        *memcpy_conf_phy;
        struct stedma40_chan_cfg        *memcpy_conf_log;
        int                              disabled_channels[STEDMA40_MAX_PHYS];
+       bool                             use_esram_lcla;
 };
 
 #ifdef CONFIG_STE_DMA40
@@ -187,7 +192,7 @@ static inline struct
 dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
                                            dma_addr_t addr,
                                            unsigned int size,
-                                           enum dma_data_direction direction,
+                                           enum dma_transfer_direction direction,
                                            unsigned long flags)
 {
        struct scatterlist sg;
@@ -209,7 +214,7 @@ static inline struct
 dma_async_tx_descriptor *stedma40_slave_mem(struct dma_chan *chan,
                                            dma_addr_t addr,
                                            unsigned int size,
-                                           enum dma_data_direction direction,
+                                           enum dma_transfer_direction direction,
                                            unsigned long flags)
 {
        return NULL;
index 19719329a47b0fb96a4b75e42f8103f505c00c84..60278f47c0bdd71953cc5c1483a68a7a02a0b50f 100644 (file)
@@ -20,6 +20,7 @@
 #include <mach/hardware.h>
 #include <asm/mach-types.h>
 #include <asm/mach/map.h>
+#include <asm/memblock.h>
 
 #include <plat/tc.h>
 #include <plat/board.h>
@@ -164,14 +165,12 @@ void __init omap_dsp_reserve_sdram_memblock(void)
        if (!size)
                return;
 
-       paddr = memblock_alloc(size, SZ_1M);
+       paddr = arm_memblock_steal(size, SZ_1M);
        if (!paddr) {
                pr_err("%s: failed to reserve %x bytes\n",
                                __func__, size);
                return;
        }
-       memblock_free(paddr, size);
-       memblock_remove(paddr, size);
 
        omap_dsp_phys_mempool_base = paddr;
 }
index 1121df13e15f52de91d7d30ee41399950e60ee72..21f1fda8b661ee7021782921b98b6cb637547284 100644 (file)
@@ -38,8 +38,6 @@
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
 
-#include <mach/system-reset.h>
-
 #include <mach/regs-gpio.h>
 #include <plat/regs-serial.h>
 
index 2cded872f22b3debd634282a408d67169ceff981..0747c77a2fd53d0d2a66a1f1f2377b3d7e7722f6 100644 (file)
@@ -37,14 +37,14 @@ static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
                                (void *)dma_ch;
        chan = dma_request_channel(mask, pl330_filter, filter_param);
 
-       if (info->direction == DMA_FROM_DEVICE) {
+       if (info->direction == DMA_DEV_TO_MEM) {
                memset(&slave_config, 0, sizeof(struct dma_slave_config));
                slave_config.direction = info->direction;
                slave_config.src_addr = info->fifo;
                slave_config.src_addr_width = info->width;
                slave_config.src_maxburst = 1;
                dmaengine_slave_config(chan, &slave_config);
-       } else if (info->direction == DMA_TO_DEVICE) {
+       } else if (info->direction == DMA_MEM_TO_DEV) {
                memset(&slave_config, 0, sizeof(struct dma_slave_config));
                slave_config.direction = info->direction;
                slave_config.dst_addr = info->fifo;
index 22eafc310bd7858a8fb10b39087fc6064e78e04c..71a6827c7706b21e10200bd834b5dbaad1fa9e45 100644 (file)
 #define __SAMSUNG_DMA_OPS_H_ __FILE__
 
 #include <linux/dmaengine.h>
+#include <mach/dma.h>
 
 struct samsung_dma_prep_info {
        enum dma_transaction_type cap;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        dma_addr_t buf;
        unsigned long period;
        unsigned long len;
@@ -27,7 +28,7 @@ struct samsung_dma_prep_info {
 
 struct samsung_dma_info {
        enum dma_transaction_type cap;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        enum dma_slave_buswidth width;
        dma_addr_t fifo;
        struct s3c2410_dma_client *client;
index b9061128abdef3bcf76a2e9cdd4bab76732b4fd5..7b02143ccd9a2236b1ba0f32cc47dfb54d8b3d54 100644 (file)
@@ -10,6 +10,9 @@
  * published by the Free Software Foundation.
 */
 
+#ifndef __PLAT_DMA_H
+#define __PLAT_DMA_H
+
 #include <linux/dma-mapping.h>
 
 enum s3c2410_dma_buffresult {
@@ -122,5 +125,6 @@ extern int s3c2410_dma_getposition(enum dma_ch channel,
 extern int s3c2410_dma_set_opfn(enum dma_ch, s3c2410_dma_opfn_t rtn);
 extern int s3c2410_dma_set_buffdone_fn(enum dma_ch, s3c2410_dma_cbfn_t rtn);
 
-
 #include <plat/dma-ops.h>
+
+#endif
index aea68b60ef98af7bac8c5841536c8150d803888f..fa95e9a009729f41ddd1c39174e4f6908f515226 100644 (file)
@@ -11,6 +11,8 @@
 #ifndef __S3C64XX_PLAT_SPI_H
 #define __S3C64XX_PLAT_SPI_H
 
+struct platform_device;
+
 /**
  * struct s3c64xx_spi_csinfo - ChipSelect description
  * @fb_delay: Slave specific feedback delay.
index d397a1fb2f5414ac7337d9aca3e8d328bf39f4c8..dd703ef09b8d9d623d83e4a131c12e87e329e838 100644 (file)
@@ -38,3 +38,4 @@ pen:  ldr     r7, [r6]
        .align
 1:     .long   .
        .long   pen_release
+ENDPROC(versatile_secondary_startup)
index 92f18d372b69ea24e062fb40b9f508b09d64dbf8..49c7db48c7f13d21b5211f1900ff99d639c4df8f 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/smp.h>
 
 #include <asm/cacheflush.h>
+#include <asm/smp_plat.h>
 #include <asm/hardware/gic.h>
 
 /*
index 197e96f7040594fa1994f6d7f61edddff6ccd65d..3dea7231f637c5d09e8c545fdee437a550b67c10 100644 (file)
@@ -8,6 +8,7 @@ config AVR32
        select HAVE_KPROBES
        select HAVE_GENERIC_HARDIRQS
        select GENERIC_IRQ_PROBE
+       select GENERIC_ATOMIC64
        select HARDIRQS_SW_RESEND
        select GENERIC_IRQ_SHOW
        select ARCH_HAVE_NMI_SAFE_CMPXCHG
index f5cb27614e35df7c4027094e7ac1d6626091021c..68c98f5b3ca625aaca9f342addd31c68c48c27af 100644 (file)
@@ -246,7 +246,18 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
        return regs->ar_bspstore;
 }
 
-#define regs_return_value(regs) ((regs)->r8)
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+       return regs->r10 != -1;
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       if (is_syscall_success(regs))
+               return regs->r8;
+       else
+               return -regs->r8;
+}
 
 /* Conserve space in histogram by encoding slot bits in address
  * bits 2 and 3 rather than bits 0 and 1.
index bfb4d01e0e519ed74a2a941214b4426627307134..5207035dc061bb5c567275efa0e0b58d2149a54d 100644 (file)
@@ -429,22 +429,24 @@ static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
 static struct acpi_table_slit __initdata *slit_table;
 cpumask_t early_cpu_possible_map = CPU_MASK_NONE;
 
-static int get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
+static int __init
+get_processor_proximity_domain(struct acpi_srat_cpu_affinity *pa)
 {
        int pxm;
 
        pxm = pa->proximity_domain_lo;
-       if (ia64_platform_is("sn2"))
+       if (ia64_platform_is("sn2") || acpi_srat_revision >= 2)
                pxm += pa->proximity_domain_hi[0] << 8;
        return pxm;
 }
 
-static int get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
+static int __init
+get_memory_proximity_domain(struct acpi_srat_mem_affinity *ma)
 {
        int pxm;
 
        pxm = ma->proximity_domain;
-       if (!ia64_platform_is("sn2"))
+       if (!ia64_platform_is("sn2") && acpi_srat_revision <= 1)
                pxm &= 0xff;
 
        return pxm;
index 8848f43d819e55ba91bf07fc6ae8756f88e7ad36..dad91661ddf96e8b54aa4fbeafb9190879a65ccf 100644 (file)
@@ -1246,15 +1246,8 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
        if (test_thread_flag(TIF_RESTORE_RSE))
                ia64_sync_krbs();
 
-       if (unlikely(current->audit_context)) {
-               long syscall;
-               int arch;
 
-               syscall = regs.r15;
-               arch = AUDIT_ARCH_IA64;
-
-               audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
-       }
+       audit_syscall_entry(AUDIT_ARCH_IA64, regs.r15, arg0, arg1, arg2, arg3);
 
        return 0;
 }
@@ -1268,14 +1261,7 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3,
 {
        int step;
 
-       if (unlikely(current->audit_context)) {
-               int success = AUDITSC_RESULT(regs.r10);
-               long result = regs.r8;
-
-               if (success != AUDITSC_SUCCESS)
-                       result = -result;
-               audit_syscall_exit(success, result);
-       }
+       audit_syscall_exit(&regs);
 
        step = test_thread_flag(TIF_SINGLESTEP);
        if (step || test_thread_flag(TIF_SYSCALL_TRACE))
index 4203d101363cf23074e7ce0e22a6f19579bf92ba..c4ac15c4f065ce97d0f86aafeb148efd5734e7ba 100644 (file)
@@ -414,9 +414,9 @@ void __init config_atari(void)
                                         * FDC val = 4 -> Supervisor only */
                asm volatile ("\n"
                        "       .chip   68030\n"
-                       "       pmove   %0@,%/tt1\n"
+                       "       pmove   %0,%/tt1\n"
                        "       .chip   68k"
-                       : : "a" (&tt1_val));
+                       : : "m" (tt1_val));
        } else {
                asm volatile ("\n"
                        "       .chip   68040\n"
@@ -569,10 +569,10 @@ static void atari_reset(void)
                        : "d0");
        } else
                asm volatile ("\n"
-                       "       pmove   %0@,%%tc\n"
+                       "       pmove   %0,%%tc\n"
                        "       jmp     %1@"
                        : /* no outputs */
-                       : "a" (&tc_val), "a" (reset_addr));
+                       : "m" (tc_val), "a" (reset_addr));
 }
 
 
index 0e89fa05de0e60bda81f5df2056106c49160e3e8..c1155f0e22cc2615a0e97f5745dd3c5d7d77aba9 100644 (file)
 
 #define IRQ_USER       8
 
-/*
- * various flags for request_irq() - the Amiga now uses the standard
- * mechanism like all other architectures - IRQF_DISABLED and
- * IRQF_SHARED are your friends.
- */
-#ifndef MACH_AMIGA_ONLY
-#define IRQ_FLG_LOCK   (0x0001)        /* handler is not replaceable   */
-#define IRQ_FLG_REPLACE        (0x0002)        /* replace existing handler     */
-#define IRQ_FLG_FAST   (0x0004)
-#define IRQ_FLG_SLOW   (0x0008)
-#define IRQ_FLG_STD    (0x8000)        /* internally used              */
-#endif
-
 struct irq_data;
 struct irq_chip;
 struct irq_desc;
index 125f34e00bf01e8409634a1c3dbfd1d2d294f123..099283ee1a8fd0810672f2a36ad038786799dc5d 100644 (file)
@@ -172,7 +172,7 @@ void flush_thread(void)
 
        current->thread.fs = __USER_DS;
        if (!FPU_IS_EMU)
-               asm volatile ("frestore %0@" : : "a" (&zero) : "memory");
+               asm volatile("frestore %0": :"m" (zero));
 }
 
 /*
index 69c1803fcf1bed00b4a2ba6a60956992543ffac1..5e1078cabe0e54bf10d4537aa86eaa11c8b0e8a5 100644 (file)
@@ -163,8 +163,8 @@ void flush_thread(void)
 #ifdef CONFIG_FPU
        if (!FPU_IS_EMU)
                asm volatile (".chip 68k/68881\n\t"
-                             "frestore %0@\n\t"
-                             ".chip 68k" : : "a" (&zero));
+                             "frestore %0\n\t"
+                             ".chip 68k" : : "m" (zero));
 #endif
 }
 
index a76452ca964ef6e538ee6d5181b4341c7679dd48..daaa9187654ca6706278e79e5502d0a9f4f44a82 100644 (file)
@@ -552,13 +552,13 @@ static inline void bus_error030 (struct frame *fp)
 
 #ifdef DEBUG
                asm volatile ("ptestr %3,%2@,#7,%0\n\t"
-                             "pmove %%psr,%1@"
-                             : "=a&" (desc)
-                             : "a" (&temp), "a" (addr), "d" (ssw));
+                             "pmove %%psr,%1"
+                             : "=a&" (desc), "=m" (temp)
+                             : "a" (addr), "d" (ssw));
 #else
                asm volatile ("ptestr %2,%1@,#7\n\t"
-                             "pmove %%psr,%0@"
-                             : : "a" (&temp), "a" (addr), "d" (ssw));
+                             "pmove %%psr,%0"
+                             : "=m" (temp) : "a" (addr), "d" (ssw));
 #endif
                mmusr = temp;
 
@@ -605,20 +605,18 @@ static inline void bus_error030 (struct frame *fp)
                               !(ssw & RW) ? "write" : "read", addr,
                               fp->ptregs.pc, ssw);
                        asm volatile ("ptestr #1,%1@,#0\n\t"
-                                     "pmove %%psr,%0@"
-                                     : /* no outputs */
-                                     : "a" (&temp), "a" (addr));
+                                     "pmove %%psr,%0"
+                                     : "=m" (temp)
+                                     : "a" (addr));
                        mmusr = temp;
 
                        printk ("level 0 mmusr is %#x\n", mmusr);
 #if 0
-                       asm volatile ("pmove %%tt0,%0@"
-                                     : /* no outputs */
-                                     : "a" (&tlong));
+                       asm volatile ("pmove %%tt0,%0"
+                                     : "=m" (tlong));
                        printk("tt0 is %#lx, ", tlong);
-                       asm volatile ("pmove %%tt1,%0@"
-                                     : /* no outputs */
-                                     : "a" (&tlong));
+                       asm volatile ("pmove %%tt1,%0"
+                                     : "=m" (tlong));
                        printk("tt1 is %#lx\n", tlong);
 #endif
 #ifdef DEBUG
@@ -668,13 +666,13 @@ static inline void bus_error030 (struct frame *fp)
 
 #ifdef DEBUG
        asm volatile ("ptestr #1,%2@,#7,%0\n\t"
-                     "pmove %%psr,%1@"
-                     : "=a&" (desc)
-                     : "a" (&temp), "a" (addr));
+                     "pmove %%psr,%1"
+                     : "=a&" (desc), "=m" (temp)
+                     : "a" (addr));
 #else
        asm volatile ("ptestr #1,%1@,#7\n\t"
-                     "pmove %%psr,%0@"
-                     : : "a" (&temp), "a" (addr));
+                     "pmove %%psr,%0"
+                     : "=m" (temp) : "a" (addr));
 #endif
        mmusr = temp;
 
index 95d0bf66e2e22e72b272e11b48f5d41914b5be38..3d84c1f2ffb2ef8765b5adae4bd8d334b8c78b6d 100644 (file)
@@ -52,9 +52,9 @@ static unsigned long virt_to_phys_slow(unsigned long vaddr)
                unsigned long *descaddr;
 
                asm volatile ("ptestr %3,%2@,#7,%0\n\t"
-                             "pmove %%psr,%1@"
-                             : "=a&" (descaddr)
-                             : "a" (&mmusr), "a" (vaddr), "d" (get_fs().seg));
+                             "pmove %%psr,%1"
+                             : "=a&" (descaddr), "=m" (mmusr)
+                             : "a" (vaddr), "d" (get_fs().seg));
                if (mmusr & (MMU_I|MMU_B|MMU_L))
                        return 0;
                descaddr = phys_to_virt((unsigned long)descaddr);
index 74f23a460ba2f4415a1657fd7288b318c469683d..c8d6efb99dbf668b7f283f23624c2f50f632c442 100644 (file)
@@ -19,6 +19,7 @@ config MICROBLAZE
        select GENERIC_IRQ_SHOW
        select GENERIC_PCI_IOMAP
        select GENERIC_CPU_DEVICES
+       select GENERIC_ATOMIC64
 
 config SWAP
        def_bool n
index 4c4e58ef0cb628ed3ca0f542536872036f93da02..0c796cf8158629abf23adb530c17ba2a8ceb9be5 100644 (file)
@@ -53,6 +53,6 @@ $(obj)/simpleImage.%: vmlinux FORCE
 DTC_FLAGS := -p 1024
 
 $(obj)/%.dtb: $(src)/dts/%.dts FORCE
-       $(call cmd,dtc)
+       $(call if_changed_dep,dtc)
 
 clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub
index 6d2e1d418be74f7dbc3797e82a497b8bcab93bc8..615f53992c654e4f78625bf8708d5d92c7c12b09 100644 (file)
@@ -2,6 +2,7 @@
 #define _ASM_MICROBLAZE_ATOMIC_H
 
 #include <asm-generic/atomic.h>
+#include <asm-generic/atomic64.h>
 
 /*
  * Atomically test *v and decrement if it is greater than 0.
index 816bee64b1961d202f35a1aa9c4d2be0fc1d640e..94e92c8058592f786cfd6fac942c90b308664e85 100644 (file)
@@ -61,6 +61,11 @@ struct pt_regs {
 #define instruction_pointer(regs)      ((regs)->pc)
 #define profile_pc(regs)               instruction_pointer(regs)
 
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->r3;
+}
+
 #else /* __KERNEL__ */
 
 /* pt_regs offsets used by gdbserver etc in ptrace syscalls */
index 043cb58f9c443e72843fa44a9ebd399442a965b5..6eb2aa927d8966b842f388b219ae19b8dadd7309 100644 (file)
@@ -147,10 +147,8 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
                 */
                ret = -1L;
 
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(EM_MICROBLAZE, regs->r12,
-                                   regs->r5, regs->r6,
-                                   regs->r7, regs->r8);
+       audit_syscall_entry(EM_MICROBLAZE, regs->r12, regs->r5, regs->r6,
+                           regs->r7, regs->r8);
 
        return ret ?: regs->r12;
 }
@@ -159,8 +157,7 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
 {
        int step;
 
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(regs->r3), regs->r3);
+       audit_syscall_exit(regs);
 
        step = test_thread_flag(TIF_SINGLESTEP);
        if (step || test_thread_flag(TIF_SYSCALL_TRACE))
index c4c1312473fbf6cd9104cc3d206846bcd38b1b4b..5ab6e89603c56bc461de1b4390ada67482749c2e 100644 (file)
@@ -2356,6 +2356,7 @@ config PCI
        depends on HW_HAS_PCI
        select PCI_DOMAINS
        select GENERIC_PCI_IOMAP
+       select NO_GENERIC_PCI_IOPORT_MAP
        help
          Find out whether you have a PCI motherboard. PCI is the name of a
          bus system, i.e. the way the CPU talks to the other stuff inside
index 7b99c670e478ed9ab79998762c839489cefb66a1..4b7f5252d2fd31ba156723c23db11e57e983442b 100644 (file)
@@ -137,7 +137,19 @@ extern int ptrace_set_watch_regs(struct task_struct *child,
  */
 #define user_mode(regs) (((regs)->cp0_status & KU_MASK) == KU_USER)
 
-#define regs_return_value(_regs) ((_regs)->regs[2])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+       return !regs->regs[7];
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       if (is_syscall_success(regs))
+               return regs->regs[2];
+       else
+               return -regs->regs[2];
+}
+
 #define instruction_pointer(regs) ((regs)->cp0_epc)
 #define profile_pc(regs) instruction_pointer(regs)
 
index 4e6ea1ffad46617b3e1c0880c4061371f7aa5885..7786b608d9322289ce23a83eb5205b50b57f3cb6 100644 (file)
@@ -560,10 +560,9 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
        }
 
 out:
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(audit_arch(), regs->regs[2],
-                                   regs->regs[4], regs->regs[5],
-                                   regs->regs[6], regs->regs[7]);
+       audit_syscall_entry(audit_arch(), regs->regs[2],
+                           regs->regs[4], regs->regs[5],
+                           regs->regs[6], regs->regs[7]);
 }
 
 /*
@@ -572,9 +571,7 @@ out:
  */
 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
 {
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(regs->regs[7]),
-                                  -regs->regs[2]);
+       audit_syscall_exit(regs);
 
        if (!(current->ptrace & PT_PTRACED))
                return;
index 2635b1a9633385568677b9a121f50adb6f96d8b7..fd35daa45314a370b89f521e3ec401d39e5dfdc9 100644 (file)
@@ -10,8 +10,8 @@
 #include <linux/module.h>
 #include <asm/io.h>
 
-static void __iomem *ioport_map_pci(struct pci_dev *dev,
-                                     unsigned long port, unsigned int nr)
+void __iomem *__pci_ioport_map(struct pci_dev *dev,
+                              unsigned long port, unsigned int nr)
 {
        struct pci_controller *ctrl = dev->bus->sysdata;
        unsigned long base = ctrl->io_map_base;
index 98ca185097a519bb4bc58938ee848234e81db3b4..09958358601a3637ec4d811826b3f84fea1a6f96 100644 (file)
@@ -11,5 +11,5 @@ clean-files := *.dtb.S
 
 #DTC_FLAGS ?= -p 1024
 
-$(obj)/%.dtb: $(src)/dts/%.dts
-       $(call cmd,dtc)
+$(obj)/%.dtb: $(src)/dts/%.dts FORCE
+       $(call if_changed_dep,dtc)
index 15986e70799ccbcb78d7aafc924d01bcf167a7a9..8844a17ce8ede20213d2c64d25d0d764587e834e 100644 (file)
@@ -345,8 +345,8 @@ $(obj)/treeImage.%: vmlinux $(obj)/%.dtb $(wrapperbits)
        $(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb)
 
 # Rule to build device tree blobs
-$(obj)/%.dtb: $(src)/dts/%.dts
-       $(call cmd,dtc)
+$(obj)/%.dtb: $(src)/dts/%.dts FORCE
+       $(call if_changed_dep,dtc)
 
 # If there isn't a platform selected then just strip the vmlinux.
 ifeq (,$(image-y))
index 89af6263770701e71a3e8e4c9e9d3db3c2b657d0..b37da56018b6340a6d514a85644d5131960de9ff 100644 (file)
        };
 
 /include/ "pq3-esdhc-0.dtsi"
+       sdhc@2e000 {
+               compatible = "fsl,mpc8536-esdhc", "fsl,esdhc";
+       };
+
 /include/ "pq3-sec3.0-0.dtsi"
 /include/ "pq3-mpic.dtsi"
 /include/ "pq3-mpic-timer-B.dtsi"
index bd9e163c764b33e858e835684f46b69c369e751b..a97d1263372ccbf9350f5db9ee29b9483886642a 100644 (file)
 /include/ "pq3-usb2-dr-0.dtsi"
 /include/ "pq3-esdhc-0.dtsi"
        sdhc@2e000 {
-               fsl,sdhci-auto-cmd12;
+               compatible = "fsl,p1010-esdhc", "fsl,esdhc";
+               sdhci,auto-cmd12;
        };
 
 /include/ "pq3-sec4.4-0.dtsi"
index fc924c5ffebe2245a65abf26bf71031f0ca8d078..5de5fc351314a627605c0bd0389f05ae452e1714 100644 (file)
 /include/ "pq3-usb2-dr-1.dtsi"
 
 /include/ "pq3-esdhc-0.dtsi"
+       sdhc@2e000 {
+               compatible = "fsl,p1020-esdhc", "fsl,esdhc";
+               sdhci,auto-cmd12;
+       };
 /include/ "pq3-sec3.3-0.dtsi"
 
 /include/ "pq3-mpic.dtsi"
index 16239b199d0a35b1d338590898582d9215cf525a..ff9ed1d879297bfe0975cd7261abdc9eaab7a57f 100644 (file)
 
 /include/ "pq3-esdhc-0.dtsi"
        sdhc@2e000 {
-               fsl,sdhci-auto-cmd12;
+               compatible = "fsl,p1022-esdhc", "fsl,esdhc";
+               sdhci,auto-cmd12;
        };
 
 /include/ "pq3-sec3.3-0.dtsi"
index c041050561a7f17a284e45b0c00119eb33ab4411..332e9e75e6c2f706b370d9f7524594df25c5c387 100644 (file)
 /include/ "pq3-etsec1-1.dtsi"
 /include/ "pq3-etsec1-2.dtsi"
 /include/ "pq3-esdhc-0.dtsi"
+       sdhc@2e000 {
+               compatible = "fsl,p2020-esdhc", "fsl,esdhc";
+       };
+
 /include/ "pq3-sec3.1-0.dtsi"
 /include/ "pq3-mpic.dtsi"
 /include/ "pq3-mpic-timer-B.dtsi"
index b5bd86f4baf21b810f4b38fef873b7e4775eb00e..1fb7e0e0940f0cb098843b78207c6117efeffab7 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * P1020 RDB Device Tree Source stub (no addresses or top-level ranges)
  *
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
 
        usb@22000 {
                phy_type = "ulpi";
+               dr_mode = "host";
        };
 
-       /* USB2 is shared with localbus, so it must be disabled
-          by default. We can't put 'status = "disabled";' here
-          since U-Boot doesn't clear the status property when
-          it enables USB2. OTOH, U-Boot does create a new node
-          when there isn't any. So, just comment it out.
+       /* USB2 is shared with localbus. It is used
+          only in case of SPI and SD boot after
+          appropriate device-tree fixup done by uboot */
        usb@23000 {
                phy_type = "ulpi";
+               dr_mode = "host";
        };
-       */
 
        mdio@24000 {
                phy0: ethernet-phy@0 {
index d9540791e4342034ca9509826104a9ae06ab32d2..97116f198a37fa1c793e1fb09d11090bd7f51515 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * P1021 MDS Device Tree Source
  *
- * Copyright 2010 Freescale Semiconductor Inc.
+ * Copyright 2010,2012 Freescale Semiconductor Inc.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the
 
                usb@22000 {
                        phy_type = "ulpi";
+                       dr_mode = "host";
                };
 
                mdio@24000 {
index c1cf6cef4dd60265586419252e0f67828ee6ddf2..d3b939c573b007e5ca25ad89f58523abb3db56cc 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * P2020DS Device Tree Source stub (no addresses or top-level ranges)
  *
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011-2012 Freescale Semiconductor Inc.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are met:
 &board_soc {
        usb@22000 {
                phy_type = "ulpi";
+               dr_mode = "host";
        };
 
        mdio@24520 {
index 26759a5917129505e02c00070d7c067453b002de..eb8a6aa2bda5f019ae06311a9e753f12cf2891b0 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * P2020 RDB Device Tree Source
  *
- * Copyright 2009-2011 Freescale Semiconductor Inc.
+ * Copyright 2009-2012 Freescale Semiconductor Inc.
  *
  * This program is free software; you can redistribute  it and/or modify it
  * under  the terms of  the GNU General  Public License as published by the
 
                usb@22000 {
                        phy_type = "ulpi";
+                       dr_mode = "host";
                };
 
                mdio@24520 {
index 48223f9b8728d5e1433c6d13ab9c1c583780d2cb..78a205162fd7d48ad0b1a3de78bd42453dae5c0a 100644 (file)
@@ -86,7 +86,18 @@ struct pt_regs {
 #define instruction_pointer(regs) ((regs)->nip)
 #define user_stack_pointer(regs) ((regs)->gpr[1])
 #define kernel_stack_pointer(regs) ((regs)->gpr[1])
-#define regs_return_value(regs) ((regs)->gpr[3])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+       return !(regs->ccr & 0x10000000);
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       if (is_syscall_success(regs))
+               return regs->gpr[3];
+       else
+               return -regs->gpr[3];
+}
 
 #ifdef CONFIG_SMP
 extern unsigned long profile_pc(struct pt_regs *regs);
index 28be3452e67ad75157940bc7ee29a6fd3f0c0195..abef75176c079f2f72f71d6af4f10cec4095deca 100644 (file)
@@ -46,7 +46,6 @@
 
 /* This keeps a track of which one is the crashing cpu. */
 int crashing_cpu = -1;
-static atomic_t cpus_in_crash;
 static int time_to_dump;
 
 #define CRASH_HANDLER_MAX 3
@@ -66,6 +65,7 @@ static int handle_fault(struct pt_regs *regs)
 
 #ifdef CONFIG_SMP
 
+static atomic_t cpus_in_crash;
 void crash_ipi_callback(struct pt_regs *regs)
 {
        static cpumask_t cpus_state_saved = CPU_MASK_NONE;
index 3fea3689527e963955f802ac7abf2a8471ddfd24..bedd12e1cfbcc0b636eea99f1ca18728938ed2f4 100644 (file)
@@ -442,8 +442,10 @@ static void __init fixup_port_irq(int index,
 
        port->irq = virq;
 
+#ifdef CONFIG_SERIAL_8250_FSL
        if (of_device_is_compatible(np, "fsl,ns16550"))
                port->handle_irq = fsl8250_handle_irq;
+#endif
 }
 
 static void __init fixup_port_pio(int index,
index 5de73dbd15c7e404a16257ae5239e9b1af293f65..5b43325402bcc8e5435c1e2804685e81cb24df72 100644 (file)
@@ -1724,22 +1724,20 @@ long do_syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->gpr[0]);
 
-       if (unlikely(current->audit_context)) {
 #ifdef CONFIG_PPC64
-               if (!is_32bit_task())
-                       audit_syscall_entry(AUDIT_ARCH_PPC64,
-                                           regs->gpr[0],
-                                           regs->gpr[3], regs->gpr[4],
-                                           regs->gpr[5], regs->gpr[6]);
-               else
+       if (!is_32bit_task())
+               audit_syscall_entry(AUDIT_ARCH_PPC64,
+                                   regs->gpr[0],
+                                   regs->gpr[3], regs->gpr[4],
+                                   regs->gpr[5], regs->gpr[6]);
+       else
 #endif
-                       audit_syscall_entry(AUDIT_ARCH_PPC,
-                                           regs->gpr[0],
-                                           regs->gpr[3] & 0xffffffff,
-                                           regs->gpr[4] & 0xffffffff,
-                                           regs->gpr[5] & 0xffffffff,
-                                           regs->gpr[6] & 0xffffffff);
-       }
+               audit_syscall_entry(AUDIT_ARCH_PPC,
+                                   regs->gpr[0],
+                                   regs->gpr[3] & 0xffffffff,
+                                   regs->gpr[4] & 0xffffffff,
+                                   regs->gpr[5] & 0xffffffff,
+                                   regs->gpr[6] & 0xffffffff);
 
        return ret ?: regs->gpr[0];
 }
@@ -1748,9 +1746,7 @@ void do_syscall_trace_leave(struct pt_regs *regs)
 {
        int step;
 
-       if (unlikely(current->audit_context))
-               audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
-                                  regs->result);
+       audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->result);
index bb3d84f4046f53342bc0390c02359a73a34d4866..b0984ada3f83c41db76a56f3adbbd7779af6d7c1 100644 (file)
@@ -25,6 +25,7 @@
 
 #include <sysdev/fsl_soc.h>
 #include <sysdev/fsl_pci.h>
+#include <asm/udbg.h>
 #include <asm/fsl_guts.h>
 #include "smp.h"
 
index f31162cfdaa9762f71cafe41e0f815e92181f6fd..5e155dfc4320ab011589d678408002dd05503a96 100644 (file)
@@ -204,11 +204,10 @@ static void __devinit pnv_ioda_offset_bus(struct pci_bus *bus,
        pr_devel("  -> OBR %s [%x] +%016llx\n",
                 bus->self ? pci_name(bus->self) : "root", flags, offset);
 
-       for (i = 0; i < 2; i++) {
-               r = bus->resource[i];
+       pci_bus_for_each_resource(bus, r, i) {
                if (r && (r->flags & flags)) {
-                       bus->resource[i]->start += offset;
-                       bus->resource[i]->end += offset;
+                       r->start += offset;
+                       r->end += offset;
                }
        }
        list_for_each_entry(dev, &bus->devices, bus_list)
@@ -288,12 +287,17 @@ static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags,
         * assignment algorithm is going to be uber-trivial for now, we
         * can try to be smarter later at filling out holes.
         */
-       start = bus->self ? 0 : bus->resource[bres]->start;
-
-       /* Don't hand out IO 0 */
-       if ((flags & IORESOURCE_IO) && !bus->self)
-               start += 0x1000;
-
+       if (bus->self) {
+               /* No offset for downstream bridges */
+               start = 0;
+       } else {
+               /* Offset from the root */
+               if (flags & IORESOURCE_IO)
+                       /* Don't hand out IO 0 */
+                       start = hose->io_resource.start + 0x1000;
+               else
+                       start = hose->mem_resources[0].start;
+       }
        while(!list_empty(&head)) {
                w = list_first_entry(&head, struct resource_wrap, link);
                list_del(&w->link);
@@ -321,13 +325,20 @@ static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags,
  empty:
        /* Only setup P2P's, not the PHB itself */
        if (bus->self) {
-               WARN_ON(bus->resource[bres] == NULL);
-               bus->resource[bres]->start = 0;
-               bus->resource[bres]->flags = (*size) ? flags : 0;
-               bus->resource[bres]->end = (*size) ? (*size - 1) : 0;
+               struct resource *res = bus->resource[bres];
+
+               if (WARN_ON(res == NULL))
+                       return;
 
-               /* Clear prefetch bus resources for now */
-               bus->resource[2]->flags = 0;
+               /*
+                * FIXME: We should probably export and call
+                * pci_bridge_check_ranges() to properly re-initialize
+                * the PCI portion of the flags here, and to detect
+                * what the bridge actually supports.
+                */
+               res->start = 0;
+               res->flags = (*size) ? flags : 0;
+               res->end = (*size) ? (*size - 1) : 0;
        }
 
        pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n",
index ae7b6d41fed363580575aa4ea6b56d053b702c84..31f22c1f657dbf4c7e2f68ca25002a24f485b83d 100644 (file)
@@ -122,7 +122,7 @@ config DTL
          Say N if you are unsure.
 
 config PSERIES_IDLE
-       tristate "Cpuidle driver for pSeries platforms"
+       bool "Cpuidle driver for pSeries platforms"
        depends on CPU_IDLE
        depends on PPC_PSERIES
        default y
index 3b61e8cf3421fd2ce2f132a42e25c2eb411e9d6f..30eb17ecad493f8d459beb2d6c60ed882c86a326 100644 (file)
@@ -205,12 +205,12 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
 
        if (paddr_hi == paddr_lo) {
                pr_err("%s: No outbound window space\n", name);
-               return ;
+               goto out;
        }
 
        if (paddr_lo == 0) {
                pr_err("%s: No space for inbound window\n", name);
-               return ;
+               goto out;
        }
 
        /* setup PCSRBAR/PEXCSRBAR */
@@ -357,6 +357,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
                        (u64)hose->dma_window_size);
        }
 
+out:
        iounmap(pci);
 }
 
index e9f353341693615249c9ef76501920c457360e8a..0ad2f1e1ce9ec48928696f428a9063968ea04490 100644 (file)
@@ -88,7 +88,6 @@ KBUILD_CFLAGS += -pipe -fno-strength-reduce -Wno-sign-compare
 KBUILD_AFLAGS  += $(aflags-y)
 
 OBJCOPYFLAGS   := -O binary
-LDFLAGS_vmlinux := -e start
 
 head-y         := arch/s390/kernel/head.o
 head-y         += arch/s390/kernel/$(if $(CONFIG_64BIT),head64.o,head31.o)
index cf4e47b0948c3c9b25aa5d6f310566667615d14a..3f30dac804ea7ee92808bb5a1f51e16482462951 100644 (file)
 /* The native architecture */
 #define KEXEC_ARCH KEXEC_ARCH_S390
 
+/*
+ * Size for s390x ELF notes per CPU
+ *
+ * Seven notes plus zero note at the end: prstatus, fpregset, timer,
+ * tod_cmp, tod_reg, control regs, and prefix
+ */
+#define KEXEC_NOTE_BYTES \
+       (ALIGN(sizeof(struct elf_note), 4) * 8 + \
+        ALIGN(sizeof("CORE"), 4) * 7 + \
+        ALIGN(sizeof(struct elf_prstatus), 4) + \
+        ALIGN(sizeof(elf_fpregset_t), 4) + \
+        ALIGN(sizeof(u64), 4) + \
+        ALIGN(sizeof(u64), 4) + \
+        ALIGN(sizeof(u32), 4) + \
+        ALIGN(sizeof(u64) * 16, 4) + \
+        ALIGN(sizeof(u32), 4) \
+       )
+
 /* Provide a dummy definition to avoid build failures. */
 static inline void crash_setup_regs(struct pt_regs *newregs,
                                        struct pt_regs *oldregs) { }
index 56da355678f4aa7feed766a016b84282381451ba..aeb77f01798504cd250aebbd8748516b8c64e113 100644 (file)
@@ -541,9 +541,13 @@ struct user_regs_struct
 #define user_mode(regs) (((regs)->psw.mask & PSW_MASK_PSTATE) != 0)
 #define instruction_pointer(regs) ((regs)->psw.addr & PSW_ADDR_INSN)
 #define user_stack_pointer(regs)((regs)->gprs[15])
-#define regs_return_value(regs)((regs)->gprs[2])
 #define profile_pc(regs) instruction_pointer(regs)
 
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->gprs[2];
+}
+
 int regs_query_register_offset(const char *name);
 const char *regs_query_register_name(unsigned int offset);
 unsigned long regs_get_register(struct pt_regs *regs, unsigned int offset);
index 573bc29551ef471fee58b89d02df0ea0ff956503..9d82ed4bcb273a91bd6c4875d204218da2d420a2 100644 (file)
@@ -740,20 +740,17 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->gprs[2]);
 
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(is_compat_task() ?
-                                       AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
-                                   regs->gprs[2], regs->orig_gpr2,
-                                   regs->gprs[3], regs->gprs[4],
-                                   regs->gprs[5]);
+       audit_syscall_entry(is_compat_task() ?
+                               AUDIT_ARCH_S390 : AUDIT_ARCH_S390X,
+                           regs->gprs[2], regs->orig_gpr2,
+                           regs->gprs[3], regs->gprs[4],
+                           regs->gprs[5]);
        return ret ?: regs->gprs[2];
 }
 
 asmlinkage void do_syscall_trace_exit(struct pt_regs *regs)
 {
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(regs->gprs[2]),
-                                  regs->gprs[2]);
+       audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->gprs[2]);
index e4c79ebb40e628850fd230fc94a0c7fdc77aaeb1..21109c63eb12961483134141c67bfc8c2b8f8315 100644 (file)
@@ -9,12 +9,12 @@
 #ifndef CONFIG_64BIT
 OUTPUT_FORMAT("elf32-s390", "elf32-s390", "elf32-s390")
 OUTPUT_ARCH(s390)
-ENTRY(_start)
+ENTRY(startup)
 jiffies = jiffies_64 + 4;
 #else
 OUTPUT_FORMAT("elf64-s390", "elf64-s390", "elf64-s390")
 OUTPUT_ARCH(s390:64-bit)
-ENTRY(_start)
+ENTRY(startup)
 jiffies = jiffies_64;
 #endif
 
index 577abba3fac64a092e0bac9e47b9a51cee7ed415..83bb96079c43c513a832c6dd41a8de32a9cdffe6 100644 (file)
@@ -408,7 +408,7 @@ ENTRY(handle_sys)
        sw      r9, [r0, PT_EPC]
 
        cmpi.c  r27, __NR_syscalls      # check syscall number
-       bgtu    illegal_syscall
+       bgeu    illegal_syscall
 
        slli    r8, r27, 2              # get syscall routine
        la      r11, sys_call_table
index 3c8db65c89e5583ef16907f9d44eb0a9a74461ff..713fb58ca50724309d14f326c047bc4443df4772 100644 (file)
@@ -859,6 +859,7 @@ config PCI
        depends on SYS_SUPPORTS_PCI
        select PCI_DOMAINS
        select GENERIC_PCI_IOMAP
+       select NO_GENERIC_PCI_IOPORT_MAP
        help
          Find out whether you have a PCI motherboard. PCI is the name of a
          bus system, i.e. the way the CPU talks to the other stuff inside
index 8f18dd090a66021b2a17051a0ade4f03f8ae71b4..1e7b0e2e764d1ae1319dd5a63ec563c9af9f1120 100644 (file)
@@ -356,8 +356,8 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
 
 #ifndef CONFIG_GENERIC_IOMAP
 
-static void __iomem *ioport_map_pci(struct pci_dev *dev,
-                                   unsigned long port, unsigned int nr)
+void __iomem *__pci_ioport_map(struct pci_dev *dev,
+                              unsigned long port, unsigned int nr)
 {
        struct pci_channel *chan = dev->sysdata;
 
index 6c2239cca1a2d86204a52ecef53ab5514f8a6073..2d3e906aa72252d3b827a12405546465127d5f78 100644 (file)
@@ -76,7 +76,10 @@ struct pt_dspregs {
 #ifdef __KERNEL__
 
 #define MAX_REG_OFFSET         offsetof(struct pt_regs, tra)
-#define regs_return_value(_regs)       ((_regs)->regs[0])
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->regs[0];
+}
 
 #endif /* __KERNEL__ */
 
index bf9be7764d69f250978300cfb5c3e556e0d71e76..eb3fcceaf64b7bb5942a081637e78c5d479bad86 100644 (file)
@@ -13,7 +13,10 @@ struct pt_regs {
 #ifdef __KERNEL__
 
 #define MAX_REG_OFFSET         offsetof(struct pt_regs, tregs[7])
-#define regs_return_value(_regs)       ((_regs)->regs[3])
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->regs[3];
+}
 
 #endif /* __KERNEL__ */
 
index 92b3c276339a3a50d95a027c1d2a2e902a507ee3..a3e651563763aaabf76818e68729a53bc98aeaf7 100644 (file)
@@ -518,10 +518,9 @@ asmlinkage long do_syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->regs[0]);
 
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(audit_arch(), regs->regs[3],
-                                   regs->regs[4], regs->regs[5],
-                                   regs->regs[6], regs->regs[7]);
+       audit_syscall_entry(audit_arch(), regs->regs[3],
+                           regs->regs[4], regs->regs[5],
+                           regs->regs[6], regs->regs[7]);
 
        return ret ?: regs->regs[0];
 }
@@ -530,9 +529,7 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
 {
        int step;
 
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(regs->regs[0]),
-                                  regs->regs[0]);
+       audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->regs[0]);
index c8f97649f354b5f4b80366b79a95c9a9a0f4d4bb..3d0080b5c976bb9b19fbbc8ac5116239c7d841e3 100644 (file)
@@ -536,10 +536,9 @@ asmlinkage long long do_syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->regs[9]);
 
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(audit_arch(), regs->regs[1],
-                                   regs->regs[2], regs->regs[3],
-                                   regs->regs[4], regs->regs[5]);
+       audit_syscall_entry(audit_arch(), regs->regs[1],
+                           regs->regs[2], regs->regs[3],
+                           regs->regs[4], regs->regs[5]);
 
        return ret ?: regs->regs[9];
 }
@@ -548,9 +547,7 @@ asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
 {
        int step;
 
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(regs->regs[9]),
-                                  regs->regs[9]);
+       audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->regs[9]);
index 96657992a72eef8f69580cc9cdb12e2c184639ec..ca5580e4d813711f272c57ddb6b5f43428ed99f9 100644 (file)
@@ -33,6 +33,7 @@ config SPARC
 config SPARC32
        def_bool !64BIT
        select GENERIC_ATOMIC64
+       select CLZ_TAB
 
 config SPARC64
        def_bool 64BIT
index a0e1bcf843a1ed5f9931a89c6cc63d4cd636d4ae..c00c3b5c2806edfac2b2b7f44f79045ae1bfec01 100644 (file)
@@ -207,7 +207,15 @@ do {       current_thread_info()->syscall_noerror = 1; \
 #define instruction_pointer(regs) ((regs)->tpc)
 #define instruction_pointer_set(regs, val) ((regs)->tpc = (val))
 #define user_stack_pointer(regs) ((regs)->u_regs[UREG_FP])
-#define regs_return_value(regs) ((regs)->u_regs[UREG_I0])
+static inline int is_syscall_success(struct pt_regs *regs)
+{
+       return !(regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY));
+}
+
+static inline long regs_return_value(struct pt_regs *regs)
+{
+       return regs->u_regs[UREG_I0];
+}
 #ifdef CONFIG_SMP
 extern unsigned long profile_pc(struct pt_regs *);
 #else
index 96ee50a806613b782ce11e8627f1911ede3fde3f..9388844cd88c5afa47f3a24027019a96d11895ba 100644 (file)
@@ -1071,32 +1071,22 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->u_regs[UREG_G1]);
 
-       if (unlikely(current->audit_context) && !ret)
-               audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
-                                    AUDIT_ARCH_SPARC :
-                                    AUDIT_ARCH_SPARC64),
-                                   regs->u_regs[UREG_G1],
-                                   regs->u_regs[UREG_I0],
-                                   regs->u_regs[UREG_I1],
-                                   regs->u_regs[UREG_I2],
-                                   regs->u_regs[UREG_I3]);
+       audit_syscall_entry((test_thread_flag(TIF_32BIT) ?
+                            AUDIT_ARCH_SPARC :
+                            AUDIT_ARCH_SPARC64),
+                           regs->u_regs[UREG_G1],
+                           regs->u_regs[UREG_I0],
+                           regs->u_regs[UREG_I1],
+                           regs->u_regs[UREG_I2],
+                           regs->u_regs[UREG_I3]);
 
        return ret;
 }
 
 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
 {
-#ifdef CONFIG_AUDITSYSCALL
-       if (unlikely(current->audit_context)) {
-               unsigned long tstate = regs->tstate;
-               int result = AUDITSC_SUCCESS;
+       audit_syscall_exit(regs);
 
-               if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
-                       result = AUDITSC_FAILURE;
-
-               audit_syscall_exit(result, regs->u_regs[UREG_I0]);
-       }
-#endif
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->u_regs[UREG_G1]);
 
index 422c16dad1f66610f4d90b8723b652c658cfcae3..e61165161dd33d1df3f6b104edbf183f15136ebb 100644 (file)
@@ -399,6 +399,9 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn)
        timers_global = (void __iomem *)
                (unsigned long) addr[num_cpu_timers];
 
+       /* Every per-cpu timer works in timer mode */
+       sbus_writel(0x00000000, &timers_global->timer_config);
+
        sbus_writel((((1000000/HZ) + 1) << 10), &timers_global->l10_limit);
 
        master_l10_counter = &timers_global->l10_count;
index 681b3683da9e6134d065d8d719a691738f01653d..d74bc0925f2d2fc0ffbfb419f01e55fcd10d4019 100644 (file)
@@ -17,23 +17,9 @@ along with GNU CC; see the file COPYING.  If not, write to
 the Free Software Foundation, 59 Temple Place - Suite 330,
 Boston, MA 02111-1307, USA.  */
 
-       .data
-       .align 8
-       .globl  __clz_tab
-__clz_tab:
-       .byte   0,1,2,2,3,3,3,3,4,4,4,4,4,4,4,4,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5
-       .byte   6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6
-       .byte   7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
-       .byte   7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7
-       .byte   8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
-       .byte   8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
-       .byte   8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
-       .byte   8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8,8
-       .size    __clz_tab,256
-       .global .udiv
-
        .text
        .align 4
+       .global .udiv
        .globl __divdi3
 __divdi3:
        save %sp,-104,%sp
index 7730af6ec13feba4898ca3a8e85e3e416c57c33d..28688e6d96d795c3f860f5343524584b0cefb994 100644 (file)
@@ -64,7 +64,8 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
 #This will adjust *FLAGS accordingly to the platform.
 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
 
-KBUILD_CPPFLAGS += -I$(srctree)/$(HOST_DIR)/include
+KBUILD_CPPFLAGS += -I$(srctree)/$(HOST_DIR)/include \
+                  -I$(HOST_DIR)/include/generated
 
 # -Derrno=kernel_errno - This turns all kernel references to errno into
 # kernel_errno to separate them from the libc errno.  This allows -fno-common
@@ -96,6 +97,10 @@ endef
 
 KBUILD_KCONFIG := $(HOST_DIR)/um/Kconfig
 
+archheaders:
+       $(Q)$(MAKE) -C '$(srctree)' KBUILD_SRC= \
+               ARCH=$(SUBARCH) O='$(objtree)' archheaders
+
 archprepare: include/generated/user_constants.h
 
 LINK-$(CONFIG_LD_SCRIPT_STATIC) += -static
@@ -135,7 +140,7 @@ archclean:
 
 # Generated files
 
-$(HOST_DIR)/um/user-offsets.s: FORCE
+$(HOST_DIR)/um/user-offsets.s: __headers FORCE
        $(Q)$(MAKE) $(build)=$(HOST_DIR)/um $@
 
 define filechk_gen-asm-offsets
index c9da32b0c707015c19db39b49785cd8b0610a35a..06b190390505f848a095648d2c555bd9d996604d 100644 (file)
@@ -167,17 +167,15 @@ void syscall_trace(struct uml_pt_regs *regs, int entryexit)
        int is_singlestep = (current->ptrace & PT_DTRACE) && entryexit;
        int tracesysgood;
 
-       if (unlikely(current->audit_context)) {
-               if (!entryexit)
-                       audit_syscall_entry(HOST_AUDIT_ARCH,
-                                           UPT_SYSCALL_NR(regs),
-                                           UPT_SYSCALL_ARG1(regs),
-                                           UPT_SYSCALL_ARG2(regs),
-                                           UPT_SYSCALL_ARG3(regs),
-                                           UPT_SYSCALL_ARG4(regs));
-               else audit_syscall_exit(AUDITSC_RESULT(UPT_SYSCALL_RET(regs)),
-                                       UPT_SYSCALL_RET(regs));
-       }
+       if (!entryexit)
+               audit_syscall_entry(HOST_AUDIT_ARCH,
+                                   UPT_SYSCALL_NR(regs),
+                                   UPT_SYSCALL_ARG1(regs),
+                                   UPT_SYSCALL_ARG2(regs),
+                                   UPT_SYSCALL_ARG3(regs),
+                                   UPT_SYSCALL_ARG4(regs));
+       else
+               audit_syscall_exit(regs);
 
        /* Fake a debug trap */
        if (is_singlestep)
index 028079065af6c55ac518969a6b5c1d8ae321b6c3..7cab8c08e6d1d2eee802397c9bc17ea0f7b1bbc1 100644 (file)
@@ -1,3 +1,4 @@
 boot/compressed/vmlinux
 tools/test_get_len
+tools/insn_sanity
 
index 6c14ecd851d0b3ae9d32cbd73ce97419b1cb4a48..5bed94e189fab8ef9bf24f038763ee90bc45b798 100644 (file)
@@ -125,16 +125,6 @@ config HAVE_LATENCYTOP_SUPPORT
 config MMU
        def_bool y
 
-config ZONE_DMA
-       bool "DMA memory allocation support" if EXPERT
-       default y
-       help
-         DMA memory allocation support allows devices with less than 32-bit
-         addressing to allocate within the first 16MB of address space.
-         Disable if no such devices will be used.
-
-         If unsure, say Y.
-
 config SBUS
        bool
 
@@ -255,6 +245,16 @@ source "kernel/Kconfig.freezer"
 
 menu "Processor type and features"
 
+config ZONE_DMA
+       bool "DMA memory allocation support" if EXPERT
+       default y
+       help
+         DMA memory allocation support allows devices with less than 32-bit
+         addressing to allocate within the first 16MB of address space.
+         Disable if no such devices will be used.
+
+         If unsure, say Y.
+
 source "kernel/time/Kconfig"
 
 config SMP
@@ -360,7 +360,6 @@ config X86_NUMACHIP
        depends on NUMA
        depends on SMP
        depends on X86_X2APIC
-       depends on !EDAC_AMD64
        ---help---
          Adds support for Numascale NumaChip large-SMP systems. Needed to
          enable more than ~168 cores.
index b02e509072a790b1fbea3387f8749b5326beb822..209ba1294592c406bd735df889f43f6272acbddb 100644 (file)
@@ -117,6 +117,12 @@ KBUILD_CFLAGS += $(call cc-option,-mno-sse -mno-mmx -mno-sse2 -mno-3dnow,)
 KBUILD_CFLAGS += $(mflags-y)
 KBUILD_AFLAGS += $(mflags-y)
 
+###
+# Syscall table generation
+
+archheaders:
+       $(Q)$(MAKE) $(build)=arch/x86/syscalls all
+
 ###
 # Kernel objects
 
index 3a19d04cebebe920e78269693c97637a19404b3b..7116dcba0c9ed2df2afd680c4407d7f57c0244a7 100644 (file)
@@ -321,6 +321,8 @@ static void parse_elf(void *output)
                default: /* Ignore other PT_* */ break;
                }
        }
+
+       free(phdrs);
 }
 
 asmlinkage void decompress_kernel(void *rmode, memptr heap,
index 52d0ccfcf6eafbc84fe0d66270b165b34101639d..455646e0e53282ad97721c4c04420de09e36f3d2 100644 (file)
@@ -3,6 +3,7 @@
 #
 
 obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o
+obj-$(CONFIG_IA32_EMULATION) += nosyscall.o syscall_ia32.o
 
 sysv-$(CONFIG_SYSVIPC) := ipc32.o
 obj-$(CONFIG_IA32_EMULATION) += $(sysv-y)
index 3e274564f6bf0d89f06b489484eb7d86faa049d6..e3e734005e19c1849dfb6f6be7a5b26f2fc9b374 100644 (file)
@@ -14,6 +14,7 @@
 #include <asm/segment.h>
 #include <asm/irqflags.h>
 #include <linux/linkage.h>
+#include <linux/err.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 #include <linux/elf-em.h>
@@ -27,8 +28,6 @@
 
        .section .entry.text, "ax"
 
-#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
-
        .macro IA32_ARG_FIXUP noebp=0
        movl    %edi,%r8d
        .if \noebp
@@ -191,7 +190,7 @@ sysexit_from_sys_call:
        movl %ebx,%edx                  /* 3rd arg: 1st syscall arg */
        movl %eax,%esi                  /* 2nd arg: syscall number */
        movl $AUDIT_ARCH_I386,%edi      /* 1st arg: audit arch */
-       call audit_syscall_entry
+       call __audit_syscall_entry
        movl RAX-ARGOFFSET(%rsp),%eax   /* reload syscall number */
        cmpq $(IA32_NR_syscalls-1),%rax
        ja ia32_badsys
@@ -208,12 +207,13 @@ sysexit_from_sys_call:
        TRACE_IRQS_ON
        sti
        movl %eax,%esi          /* second arg, syscall return value */
-       cmpl $0,%eax            /* is it < 0? */
-       setl %al                /* 1 if so, 0 if not */
+       cmpl $-MAX_ERRNO,%eax   /* is it an error ? */
+       jbe 1f
+       movslq %eax, %rsi       /* if error sign extend to 64 bits */
+1:     setbe %al               /* 1 if error, 0 if not */
        movzbl %al,%edi         /* zero-extend that into %edi */
-       inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
-       call audit_syscall_exit
-       movl RAX-ARGOFFSET(%rsp),%eax   /* reload syscall return value */
+       call __audit_syscall_exit
+       movq RAX-ARGOFFSET(%rsp),%rax   /* reload syscall return value */
        movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
        cli
        TRACE_IRQS_OFF
@@ -447,9 +447,6 @@ ia32_badsys:
        movq $-ENOSYS,%rax
        jmp ia32_sysret
 
-quiet_ni_syscall:
-       movq $-ENOSYS,%rax
-       ret
        CFI_ENDPROC
        
        .macro PTREGSCALL label, func, arg
@@ -494,357 +491,3 @@ ia32_ptregs_common:
        jmp  ia32_sysret        /* misbalances the return cache */
        CFI_ENDPROC
 END(ia32_ptregs_common)
-
-       .section .rodata,"a"
-       .align 8
-ia32_sys_call_table:
-       .quad sys_restart_syscall
-       .quad sys_exit
-       .quad stub32_fork
-       .quad sys_read
-       .quad sys_write
-       .quad compat_sys_open           /* 5 */
-       .quad sys_close
-       .quad sys32_waitpid
-       .quad sys_creat
-       .quad sys_link
-       .quad sys_unlink                /* 10 */
-       .quad stub32_execve
-       .quad sys_chdir
-       .quad compat_sys_time
-       .quad sys_mknod
-       .quad sys_chmod         /* 15 */
-       .quad sys_lchown16
-       .quad quiet_ni_syscall                  /* old break syscall holder */
-       .quad sys_stat
-       .quad sys32_lseek
-       .quad sys_getpid                /* 20 */
-       .quad compat_sys_mount  /* mount  */
-       .quad sys_oldumount     /* old_umount  */
-       .quad sys_setuid16
-       .quad sys_getuid16
-       .quad compat_sys_stime  /* stime */             /* 25 */
-       .quad compat_sys_ptrace /* ptrace */
-       .quad sys_alarm
-       .quad sys_fstat /* (old)fstat */
-       .quad sys_pause
-       .quad compat_sys_utime  /* 30 */
-       .quad quiet_ni_syscall  /* old stty syscall holder */
-       .quad quiet_ni_syscall  /* old gtty syscall holder */
-       .quad sys_access
-       .quad sys_nice  
-       .quad quiet_ni_syscall  /* 35 */        /* old ftime syscall holder */
-       .quad sys_sync
-       .quad sys32_kill
-       .quad sys_rename
-       .quad sys_mkdir
-       .quad sys_rmdir         /* 40 */
-       .quad sys_dup
-       .quad sys_pipe
-       .quad compat_sys_times
-       .quad quiet_ni_syscall                  /* old prof syscall holder */
-       .quad sys_brk           /* 45 */
-       .quad sys_setgid16
-       .quad sys_getgid16
-       .quad sys_signal
-       .quad sys_geteuid16
-       .quad sys_getegid16     /* 50 */
-       .quad sys_acct
-       .quad sys_umount                        /* new_umount */
-       .quad quiet_ni_syscall                  /* old lock syscall holder */
-       .quad compat_sys_ioctl
-       .quad compat_sys_fcntl64                /* 55 */
-       .quad quiet_ni_syscall                  /* old mpx syscall holder */
-       .quad sys_setpgid
-       .quad quiet_ni_syscall                  /* old ulimit syscall holder */
-       .quad sys_olduname
-       .quad sys_umask         /* 60 */
-       .quad sys_chroot
-       .quad compat_sys_ustat
-       .quad sys_dup2
-       .quad sys_getppid
-       .quad sys_getpgrp               /* 65 */
-       .quad sys_setsid
-       .quad sys32_sigaction
-       .quad sys_sgetmask
-       .quad sys_ssetmask
-       .quad sys_setreuid16    /* 70 */
-       .quad sys_setregid16
-       .quad sys32_sigsuspend
-       .quad compat_sys_sigpending
-       .quad sys_sethostname
-       .quad compat_sys_setrlimit      /* 75 */
-       .quad compat_sys_old_getrlimit  /* old_getrlimit */
-       .quad compat_sys_getrusage
-       .quad compat_sys_gettimeofday
-       .quad compat_sys_settimeofday
-       .quad sys_getgroups16   /* 80 */
-       .quad sys_setgroups16
-       .quad compat_sys_old_select
-       .quad sys_symlink
-       .quad sys_lstat
-       .quad sys_readlink              /* 85 */
-       .quad sys_uselib
-       .quad sys_swapon
-       .quad sys_reboot
-       .quad compat_sys_old_readdir
-       .quad sys32_mmap                /* 90 */
-       .quad sys_munmap
-       .quad sys_truncate
-       .quad sys_ftruncate
-       .quad sys_fchmod
-       .quad sys_fchown16              /* 95 */
-       .quad sys_getpriority
-       .quad sys_setpriority
-       .quad quiet_ni_syscall                  /* old profil syscall holder */
-       .quad compat_sys_statfs
-       .quad compat_sys_fstatfs                /* 100 */
-       .quad sys_ioperm
-       .quad compat_sys_socketcall
-       .quad sys_syslog
-       .quad compat_sys_setitimer
-       .quad compat_sys_getitimer      /* 105 */
-       .quad compat_sys_newstat
-       .quad compat_sys_newlstat
-       .quad compat_sys_newfstat
-       .quad sys_uname
-       .quad stub32_iopl               /* 110 */
-       .quad sys_vhangup
-       .quad quiet_ni_syscall  /* old "idle" system call */
-       .quad sys32_vm86_warning        /* vm86old */ 
-       .quad compat_sys_wait4
-       .quad sys_swapoff               /* 115 */
-       .quad compat_sys_sysinfo
-       .quad sys32_ipc
-       .quad sys_fsync
-       .quad stub32_sigreturn
-       .quad stub32_clone              /* 120 */
-       .quad sys_setdomainname
-       .quad sys_newuname
-       .quad sys_modify_ldt
-       .quad compat_sys_adjtimex
-       .quad sys32_mprotect            /* 125 */
-       .quad compat_sys_sigprocmask
-       .quad quiet_ni_syscall          /* create_module */
-       .quad sys_init_module
-       .quad sys_delete_module
-       .quad quiet_ni_syscall          /* 130  get_kernel_syms */
-       .quad sys32_quotactl
-       .quad sys_getpgid
-       .quad sys_fchdir
-       .quad quiet_ni_syscall  /* bdflush */
-       .quad sys_sysfs         /* 135 */
-       .quad sys_personality
-       .quad quiet_ni_syscall  /* for afs_syscall */
-       .quad sys_setfsuid16
-       .quad sys_setfsgid16
-       .quad sys_llseek                /* 140 */
-       .quad compat_sys_getdents
-       .quad compat_sys_select
-       .quad sys_flock
-       .quad sys_msync
-       .quad compat_sys_readv          /* 145 */
-       .quad compat_sys_writev
-       .quad sys_getsid
-       .quad sys_fdatasync
-       .quad compat_sys_sysctl /* sysctl */
-       .quad sys_mlock         /* 150 */
-       .quad sys_munlock
-       .quad sys_mlockall
-       .quad sys_munlockall
-       .quad sys_sched_setparam
-       .quad sys_sched_getparam   /* 155 */
-       .quad sys_sched_setscheduler
-       .quad sys_sched_getscheduler
-       .quad sys_sched_yield
-       .quad sys_sched_get_priority_max
-       .quad sys_sched_get_priority_min  /* 160 */
-       .quad sys32_sched_rr_get_interval
-       .quad compat_sys_nanosleep
-       .quad sys_mremap
-       .quad sys_setresuid16
-       .quad sys_getresuid16   /* 165 */
-       .quad sys32_vm86_warning        /* vm86 */ 
-       .quad quiet_ni_syscall  /* query_module */
-       .quad sys_poll
-       .quad quiet_ni_syscall /* old nfsservctl */
-       .quad sys_setresgid16   /* 170 */
-       .quad sys_getresgid16
-       .quad sys_prctl
-       .quad stub32_rt_sigreturn
-       .quad sys32_rt_sigaction
-       .quad sys32_rt_sigprocmask      /* 175 */
-       .quad sys32_rt_sigpending
-       .quad compat_sys_rt_sigtimedwait
-       .quad sys32_rt_sigqueueinfo
-       .quad sys_rt_sigsuspend
-       .quad sys32_pread               /* 180 */
-       .quad sys32_pwrite
-       .quad sys_chown16
-       .quad sys_getcwd
-       .quad sys_capget
-       .quad sys_capset
-       .quad stub32_sigaltstack
-       .quad sys32_sendfile
-       .quad quiet_ni_syscall          /* streams1 */
-       .quad quiet_ni_syscall          /* streams2 */
-       .quad stub32_vfork            /* 190 */
-       .quad compat_sys_getrlimit
-       .quad sys_mmap_pgoff
-       .quad sys32_truncate64
-       .quad sys32_ftruncate64
-       .quad sys32_stat64              /* 195 */
-       .quad sys32_lstat64
-       .quad sys32_fstat64
-       .quad sys_lchown
-       .quad sys_getuid
-       .quad sys_getgid                /* 200 */
-       .quad sys_geteuid
-       .quad sys_getegid
-       .quad sys_setreuid
-       .quad sys_setregid
-       .quad sys_getgroups     /* 205 */
-       .quad sys_setgroups
-       .quad sys_fchown
-       .quad sys_setresuid
-       .quad sys_getresuid
-       .quad sys_setresgid     /* 210 */
-       .quad sys_getresgid
-       .quad sys_chown
-       .quad sys_setuid
-       .quad sys_setgid
-       .quad sys_setfsuid              /* 215 */
-       .quad sys_setfsgid
-       .quad sys_pivot_root
-       .quad sys_mincore
-       .quad sys_madvise
-       .quad compat_sys_getdents64     /* 220 getdents64 */
-       .quad compat_sys_fcntl64        
-       .quad quiet_ni_syscall          /* tux */
-       .quad quiet_ni_syscall          /* security */
-       .quad sys_gettid        
-       .quad sys32_readahead   /* 225 */
-       .quad sys_setxattr
-       .quad sys_lsetxattr
-       .quad sys_fsetxattr
-       .quad sys_getxattr
-       .quad sys_lgetxattr     /* 230 */
-       .quad sys_fgetxattr
-       .quad sys_listxattr
-       .quad sys_llistxattr
-       .quad sys_flistxattr
-       .quad sys_removexattr   /* 235 */
-       .quad sys_lremovexattr
-       .quad sys_fremovexattr
-       .quad sys_tkill
-       .quad sys_sendfile64 
-       .quad compat_sys_futex          /* 240 */
-       .quad compat_sys_sched_setaffinity
-       .quad compat_sys_sched_getaffinity
-       .quad sys_set_thread_area
-       .quad sys_get_thread_area
-       .quad compat_sys_io_setup       /* 245 */
-       .quad sys_io_destroy
-       .quad compat_sys_io_getevents
-       .quad compat_sys_io_submit
-       .quad sys_io_cancel
-       .quad sys32_fadvise64           /* 250 */
-       .quad quiet_ni_syscall  /* free_huge_pages */
-       .quad sys_exit_group
-       .quad sys32_lookup_dcookie
-       .quad sys_epoll_create
-       .quad sys_epoll_ctl             /* 255 */
-       .quad sys_epoll_wait
-       .quad sys_remap_file_pages
-       .quad sys_set_tid_address
-       .quad compat_sys_timer_create
-       .quad compat_sys_timer_settime  /* 260 */
-       .quad compat_sys_timer_gettime
-       .quad sys_timer_getoverrun
-       .quad sys_timer_delete
-       .quad compat_sys_clock_settime
-       .quad compat_sys_clock_gettime  /* 265 */
-       .quad compat_sys_clock_getres
-       .quad compat_sys_clock_nanosleep
-       .quad compat_sys_statfs64
-       .quad compat_sys_fstatfs64
-       .quad sys_tgkill                /* 270 */
-       .quad compat_sys_utimes
-       .quad sys32_fadvise64_64
-       .quad quiet_ni_syscall  /* sys_vserver */
-       .quad sys_mbind
-       .quad compat_sys_get_mempolicy  /* 275 */
-       .quad sys_set_mempolicy
-       .quad compat_sys_mq_open
-       .quad sys_mq_unlink
-       .quad compat_sys_mq_timedsend
-       .quad compat_sys_mq_timedreceive        /* 280 */
-       .quad compat_sys_mq_notify
-       .quad compat_sys_mq_getsetattr
-       .quad compat_sys_kexec_load     /* reserved for kexec */
-       .quad compat_sys_waitid
-       .quad quiet_ni_syscall          /* 285: sys_altroot */
-       .quad sys_add_key
-       .quad sys_request_key
-       .quad sys_keyctl
-       .quad sys_ioprio_set
-       .quad sys_ioprio_get            /* 290 */
-       .quad sys_inotify_init
-       .quad sys_inotify_add_watch
-       .quad sys_inotify_rm_watch
-       .quad sys_migrate_pages
-       .quad compat_sys_openat         /* 295 */
-       .quad sys_mkdirat
-       .quad sys_mknodat
-       .quad sys_fchownat
-       .quad compat_sys_futimesat
-       .quad sys32_fstatat             /* 300 */
-       .quad sys_unlinkat
-       .quad sys_renameat
-       .quad sys_linkat
-       .quad sys_symlinkat
-       .quad sys_readlinkat            /* 305 */
-       .quad sys_fchmodat
-       .quad sys_faccessat
-       .quad compat_sys_pselect6
-       .quad compat_sys_ppoll
-       .quad sys_unshare               /* 310 */
-       .quad compat_sys_set_robust_list
-       .quad compat_sys_get_robust_list
-       .quad sys_splice
-       .quad sys32_sync_file_range
-       .quad sys_tee                   /* 315 */
-       .quad compat_sys_vmsplice
-       .quad compat_sys_move_pages
-       .quad sys_getcpu
-       .quad sys_epoll_pwait
-       .quad compat_sys_utimensat      /* 320 */
-       .quad compat_sys_signalfd
-       .quad sys_timerfd_create
-       .quad sys_eventfd
-       .quad sys32_fallocate
-       .quad compat_sys_timerfd_settime        /* 325 */
-       .quad compat_sys_timerfd_gettime
-       .quad compat_sys_signalfd4
-       .quad sys_eventfd2
-       .quad sys_epoll_create1
-       .quad sys_dup3                          /* 330 */
-       .quad sys_pipe2
-       .quad sys_inotify_init1
-       .quad compat_sys_preadv
-       .quad compat_sys_pwritev
-       .quad compat_sys_rt_tgsigqueueinfo      /* 335 */
-       .quad sys_perf_event_open
-       .quad compat_sys_recvmmsg
-       .quad sys_fanotify_init
-       .quad sys32_fanotify_mark
-       .quad sys_prlimit64             /* 340 */
-       .quad sys_name_to_handle_at
-       .quad compat_sys_open_by_handle_at
-       .quad compat_sys_clock_adjtime
-       .quad sys_syncfs
-       .quad compat_sys_sendmmsg       /* 345 */
-       .quad sys_setns
-       .quad compat_sys_process_vm_readv
-       .quad compat_sys_process_vm_writev
-ia32_syscall_end:
diff --git a/arch/x86/ia32/nosyscall.c b/arch/x86/ia32/nosyscall.c
new file mode 100644 (file)
index 0000000..51ecd5b
--- /dev/null
@@ -0,0 +1,7 @@
+#include <linux/kernel.h>
+#include <linux/errno.h>
+
+long compat_ni_syscall(void)
+{
+       return -ENOSYS;
+}
diff --git a/arch/x86/ia32/syscall_ia32.c b/arch/x86/ia32/syscall_ia32.c
new file mode 100644 (file)
index 0000000..4754ba0
--- /dev/null
@@ -0,0 +1,25 @@
+/* System call table for ia32 emulation. */
+
+#include <linux/linkage.h>
+#include <linux/sys.h>
+#include <linux/cache.h>
+#include <asm/asm-offsets.h>
+
+#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void compat(void) ;
+#include <asm/syscalls_32.h>
+#undef __SYSCALL_I386
+
+#define __SYSCALL_I386(nr, sym, compat) [nr] = compat,
+
+typedef void (*sys_call_ptr_t)(void);
+
+extern void compat_ni_syscall(void);
+
+const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = {
+       /*
+        * Smells like a compiler bug -- it doesn't work
+        * when the & below is removed.
+        */
+       [0 ... __NR_ia32_syscall_max] = &compat_ni_syscall,
+#include <asm/syscalls_32.h>
+};
index 6fa90a845e4ca9aa741034195f6a7026077bcadc..b57e6a43a37a1f9ebca3f4a4966f54b31753e605 100644 (file)
@@ -19,7 +19,8 @@ header-y += processor-flags.h
 header-y += ptrace-abi.h
 header-y += sigcontext32.h
 header-y += ucontext.h
-header-y += unistd_32.h
-header-y += unistd_64.h
 header-y += vm86.h
 header-y += vsyscall.h
+
+genhdr-y += unistd_32.h
+genhdr-y += unistd_64.h
index 0c9fa2745f13e4c1f242d3f37813f25849aebd6d..b3b7332629096849d11866c017c4444f8d156f9e 100644 (file)
@@ -145,13 +145,13 @@ extern void __add_wrong_size(void)
 
 #ifdef __HAVE_ARCH_CMPXCHG
 #define cmpxchg(ptr, old, new)                                         \
-       __cmpxchg((ptr), (old), (new), sizeof(*ptr))
+       __cmpxchg(ptr, old, new, sizeof(*(ptr)))
 
 #define sync_cmpxchg(ptr, old, new)                                    \
-       __sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))
+       __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
 
 #define cmpxchg_local(ptr, old, new)                                   \
-       __cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
+       __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
 #endif
 
 /*
index 17c5d4bdee5ed5124c7be91bf8d4ef73b39b7c2f..8d67d428b0f993f79031bb5f9702bd44adc8001a 100644 (file)
 #define X86_FEATURE_WDT                (6*32+13) /* Watchdog timer */
 #define X86_FEATURE_LWP                (6*32+15) /* Light Weight Profiling */
 #define X86_FEATURE_FMA4       (6*32+16) /* 4 operands MAC instructions */
+#define X86_FEATURE_TCE                (6*32+17) /* translation cache extension */
 #define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
 #define X86_FEATURE_TBM                (6*32+21) /* trailing bit manipulations */
 #define X86_FEATURE_TOPOEXT    (6*32+22) /* topology extensions CPUID leafs */
index 976f6ecd2ce691da49d58e433f0b76b0348622b2..b0d5716ca1e4b6fd76b232554e7778c60d5c89c3 100644 (file)
@@ -2,17 +2,10 @@
 #define _ASM_X86_IA32_UNISTD_H
 
 /*
- * This file contains the system call numbers of the ia32 port,
+ * This file contains the system call numbers of the ia32 compat ABI,
  * this is for the kernel only.
- * Only add syscalls here where some part of the kernel needs to know
- * the number. This should be otherwise in sync with asm-x86/unistd_32.h. -AK
  */
-
-#define __NR_ia32_restart_syscall 0
-#define __NR_ia32_exit           1
-#define __NR_ia32_read           3
-#define __NR_ia32_write                  4
-#define __NR_ia32_sigreturn    119
-#define __NR_ia32_rt_sigreturn 173
+#define __SYSCALL_ia32_NR(x) (x)
+#include <asm/unistd_32_ia32.h>
 
 #endif /* _ASM_X86_IA32_UNISTD_H */
index ab4092e3214ecea7d02e3827fe596ed662243214..7b9cfc4878afc374dacb4852312be4fcba35384e 100644 (file)
@@ -190,6 +190,9 @@ struct x86_emulate_ops {
        int (*intercept)(struct x86_emulate_ctxt *ctxt,
                         struct x86_instruction_info *info,
                         enum x86_intercept_stage stage);
+
+       bool (*get_cpuid)(struct x86_emulate_ctxt *ctxt,
+                        u32 *eax, u32 *ebx, u32 *ecx, u32 *edx);
 };
 
 typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -298,6 +301,19 @@ struct x86_emulate_ctxt {
 #define X86EMUL_MODE_PROT     (X86EMUL_MODE_PROT16|X86EMUL_MODE_PROT32| \
                               X86EMUL_MODE_PROT64)
 
+/* CPUID vendors */
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx 0x68747541
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx 0x444d4163
+#define X86EMUL_CPUID_VENDOR_AuthenticAMD_edx 0x69746e65
+
+#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx 0x69444d41
+#define X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx 0x21726574
+#define X86EMUL_CPUID_VENDOR_AMDisbetterI_edx 0x74656273
+
+#define X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 0x756e6547
+#define X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 0x6c65746e
+#define X86EMUL_CPUID_VENDOR_GenuineIntel_edx 0x49656e69
+
 enum x86_intercept_stage {
        X86_ICTP_NONE = 0,   /* Allow zero-init to not match anything */
        X86_ICPT_PRE_EXCEPT,
index f35ce43c1a7785bd0d555f0b3f6a65ab417bba3b..6aefb14cbbc5d9c40dcbf89322a5d4a77103b48e 100644 (file)
@@ -151,7 +151,7 @@ static inline void enable_p5_mce(void) {}
 
 void mce_setup(struct mce *m);
 void mce_log(struct mce *m);
-DECLARE_PER_CPU(struct device, mce_device);
+extern struct device *mce_device[CONFIG_NR_CPUS];
 
 /*
  * Maximum banks number.
index c4a348f7bd43de3932f88d0f8ddb5b66a7526863..d962e5652a7352498bd9654315f7819fb7d9d86b 100644 (file)
@@ -15,6 +15,7 @@
 
 #include <linux/sched.h>
 #include <linux/err.h>
+#include <asm/asm-offsets.h>   /* For NR_syscalls */
 
 extern const unsigned long sys_call_table[];
 
index 2a58ed3e51d8265627e185d5d8be3783c5ea9bf4..21f77b89e47a6aeea5ab00c01c28db2d54f0520b 100644 (file)
@@ -1,13 +1,60 @@
+#ifndef _ASM_X86_UNISTD_H
+#define _ASM_X86_UNISTD_H 1
+
 #ifdef __KERNEL__
 # ifdef CONFIG_X86_32
-#  include "unistd_32.h"
+
+#  include <asm/unistd_32.h>
+#  define __ARCH_WANT_IPC_PARSE_VERSION
+#  define __ARCH_WANT_STAT64
+#  define __ARCH_WANT_SYS_IPC
+#  define __ARCH_WANT_SYS_OLD_MMAP
+#  define __ARCH_WANT_SYS_OLD_SELECT
+
 # else
-#  include "unistd_64.h"
+
+#  include <asm/unistd_64.h>
+#  define __ARCH_WANT_COMPAT_SYS_TIME
+
 # endif
+
+# define __ARCH_WANT_OLD_READDIR
+# define __ARCH_WANT_OLD_STAT
+# define __ARCH_WANT_SYS_ALARM
+# define __ARCH_WANT_SYS_FADVISE64
+# define __ARCH_WANT_SYS_GETHOSTNAME
+# define __ARCH_WANT_SYS_GETPGRP
+# define __ARCH_WANT_SYS_LLSEEK
+# define __ARCH_WANT_SYS_NICE
+# define __ARCH_WANT_SYS_OLDUMOUNT
+# define __ARCH_WANT_SYS_OLD_GETRLIMIT
+# define __ARCH_WANT_SYS_OLD_UNAME
+# define __ARCH_WANT_SYS_PAUSE
+# define __ARCH_WANT_SYS_RT_SIGACTION
+# define __ARCH_WANT_SYS_RT_SIGSUSPEND
+# define __ARCH_WANT_SYS_SGETMASK
+# define __ARCH_WANT_SYS_SIGNAL
+# define __ARCH_WANT_SYS_SIGPENDING
+# define __ARCH_WANT_SYS_SIGPROCMASK
+# define __ARCH_WANT_SYS_SOCKETCALL
+# define __ARCH_WANT_SYS_TIME
+# define __ARCH_WANT_SYS_UTIME
+# define __ARCH_WANT_SYS_WAITPID
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+# define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+
 #else
 # ifdef __i386__
-#  include "unistd_32.h"
+#  include <asm/unistd_32.h>
 # else
-#  include "unistd_64.h"
+#  include <asm/unistd_64.h>
 # endif
 #endif
+
+#endif /* _ASM_X86_UNISTD_H */
diff --git a/arch/x86/include/asm/unistd_32.h b/arch/x86/include/asm/unistd_32.h
deleted file mode 100644 (file)
index 599c77d..0000000
+++ /dev/null
@@ -1,401 +0,0 @@
-#ifndef _ASM_X86_UNISTD_32_H
-#define _ASM_X86_UNISTD_32_H
-
-/*
- * This file contains the system call numbers.
- */
-
-#define __NR_restart_syscall      0
-#define __NR_exit                1
-#define __NR_fork                2
-#define __NR_read                3
-#define __NR_write               4
-#define __NR_open                5
-#define __NR_close               6
-#define __NR_waitpid             7
-#define __NR_creat               8
-#define __NR_link                9
-#define __NR_unlink             10
-#define __NR_execve             11
-#define __NR_chdir              12
-#define __NR_time               13
-#define __NR_mknod              14
-#define __NR_chmod              15
-#define __NR_lchown             16
-#define __NR_break              17
-#define __NR_oldstat            18
-#define __NR_lseek              19
-#define __NR_getpid             20
-#define __NR_mount              21
-#define __NR_umount             22
-#define __NR_setuid             23
-#define __NR_getuid             24
-#define __NR_stime              25
-#define __NR_ptrace             26
-#define __NR_alarm              27
-#define __NR_oldfstat           28
-#define __NR_pause              29
-#define __NR_utime              30
-#define __NR_stty               31
-#define __NR_gtty               32
-#define __NR_access             33
-#define __NR_nice               34
-#define __NR_ftime              35
-#define __NR_sync               36
-#define __NR_kill               37
-#define __NR_rename             38
-#define __NR_mkdir              39
-#define __NR_rmdir              40
-#define __NR_dup                41
-#define __NR_pipe               42
-#define __NR_times              43
-#define __NR_prof               44
-#define __NR_brk                45
-#define __NR_setgid             46
-#define __NR_getgid             47
-#define __NR_signal             48
-#define __NR_geteuid            49
-#define __NR_getegid            50
-#define __NR_acct               51
-#define __NR_umount2            52
-#define __NR_lock               53
-#define __NR_ioctl              54
-#define __NR_fcntl              55
-#define __NR_mpx                56
-#define __NR_setpgid            57
-#define __NR_ulimit             58
-#define __NR_oldolduname        59
-#define __NR_umask              60
-#define __NR_chroot             61
-#define __NR_ustat              62
-#define __NR_dup2               63
-#define __NR_getppid            64
-#define __NR_getpgrp            65
-#define __NR_setsid             66
-#define __NR_sigaction          67
-#define __NR_sgetmask           68
-#define __NR_ssetmask           69
-#define __NR_setreuid           70
-#define __NR_setregid           71
-#define __NR_sigsuspend                 72
-#define __NR_sigpending                 73
-#define __NR_sethostname        74
-#define __NR_setrlimit          75
-#define __NR_getrlimit          76   /* Back compatible 2Gig limited rlimit */
-#define __NR_getrusage          77
-#define __NR_gettimeofday       78
-#define __NR_settimeofday       79
-#define __NR_getgroups          80
-#define __NR_setgroups          81
-#define __NR_select             82
-#define __NR_symlink            83
-#define __NR_oldlstat           84
-#define __NR_readlink           85
-#define __NR_uselib             86
-#define __NR_swapon             87
-#define __NR_reboot             88
-#define __NR_readdir            89
-#define __NR_mmap               90
-#define __NR_munmap             91
-#define __NR_truncate           92
-#define __NR_ftruncate          93
-#define __NR_fchmod             94
-#define __NR_fchown             95
-#define __NR_getpriority        96
-#define __NR_setpriority        97
-#define __NR_profil             98
-#define __NR_statfs             99
-#define __NR_fstatfs           100
-#define __NR_ioperm            101
-#define __NR_socketcall                102
-#define __NR_syslog            103
-#define __NR_setitimer         104
-#define __NR_getitimer         105
-#define __NR_stat              106
-#define __NR_lstat             107
-#define __NR_fstat             108
-#define __NR_olduname          109
-#define __NR_iopl              110
-#define __NR_vhangup           111
-#define __NR_idle              112
-#define __NR_vm86old           113
-#define __NR_wait4             114
-#define __NR_swapoff           115
-#define __NR_sysinfo           116
-#define __NR_ipc               117
-#define __NR_fsync             118
-#define __NR_sigreturn         119
-#define __NR_clone             120
-#define __NR_setdomainname     121
-#define __NR_uname             122
-#define __NR_modify_ldt                123
-#define __NR_adjtimex          124
-#define __NR_mprotect          125
-#define __NR_sigprocmask       126
-#define __NR_create_module     127
-#define __NR_init_module       128
-#define __NR_delete_module     129
-#define __NR_get_kernel_syms   130
-#define __NR_quotactl          131
-#define __NR_getpgid           132
-#define __NR_fchdir            133
-#define __NR_bdflush           134
-#define __NR_sysfs             135
-#define __NR_personality       136
-#define __NR_afs_syscall       137 /* Syscall for Andrew File System */
-#define __NR_setfsuid          138
-#define __NR_setfsgid          139
-#define __NR__llseek           140
-#define __NR_getdents          141
-#define __NR__newselect                142
-#define __NR_flock             143
-#define __NR_msync             144
-#define __NR_readv             145
-#define __NR_writev            146
-#define __NR_getsid            147
-#define __NR_fdatasync         148
-#define __NR__sysctl           149
-#define __NR_mlock             150
-#define __NR_munlock           151
-#define __NR_mlockall          152
-#define __NR_munlockall                153
-#define __NR_sched_setparam            154
-#define __NR_sched_getparam            155
-#define __NR_sched_setscheduler                156
-#define __NR_sched_getscheduler                157
-#define __NR_sched_yield               158
-#define __NR_sched_get_priority_max    159
-#define __NR_sched_get_priority_min    160
-#define __NR_sched_rr_get_interval     161
-#define __NR_nanosleep         162
-#define __NR_mremap            163
-#define __NR_setresuid         164
-#define __NR_getresuid         165
-#define __NR_vm86              166
-#define __NR_query_module      167
-#define __NR_poll              168
-#define __NR_nfsservctl                169
-#define __NR_setresgid         170
-#define __NR_getresgid         171
-#define __NR_prctl              172
-#define __NR_rt_sigreturn      173
-#define __NR_rt_sigaction      174
-#define __NR_rt_sigprocmask    175
-#define __NR_rt_sigpending     176
-#define __NR_rt_sigtimedwait   177
-#define __NR_rt_sigqueueinfo   178
-#define __NR_rt_sigsuspend     179
-#define __NR_pread64           180
-#define __NR_pwrite64          181
-#define __NR_chown             182
-#define __NR_getcwd            183
-#define __NR_capget            184
-#define __NR_capset            185
-#define __NR_sigaltstack       186
-#define __NR_sendfile          187
-#define __NR_getpmsg           188     /* some people actually want streams */
-#define __NR_putpmsg           189     /* some people actually want streams */
-#define __NR_vfork             190
-#define __NR_ugetrlimit                191     /* SuS compliant getrlimit */
-#define __NR_mmap2             192
-#define __NR_truncate64                193
-#define __NR_ftruncate64       194
-#define __NR_stat64            195
-#define __NR_lstat64           196
-#define __NR_fstat64           197
-#define __NR_lchown32          198
-#define __NR_getuid32          199
-#define __NR_getgid32          200
-#define __NR_geteuid32         201
-#define __NR_getegid32         202
-#define __NR_setreuid32                203
-#define __NR_setregid32                204
-#define __NR_getgroups32       205
-#define __NR_setgroups32       206
-#define __NR_fchown32          207
-#define __NR_setresuid32       208
-#define __NR_getresuid32       209
-#define __NR_setresgid32       210
-#define __NR_getresgid32       211
-#define __NR_chown32           212
-#define __NR_setuid32          213
-#define __NR_setgid32          214
-#define __NR_setfsuid32                215
-#define __NR_setfsgid32                216
-#define __NR_pivot_root                217
-#define __NR_mincore           218
-#define __NR_madvise           219
-#define __NR_madvise1          219     /* delete when C lib stub is removed */
-#define __NR_getdents64                220
-#define __NR_fcntl64           221
-/* 223 is unused */
-#define __NR_gettid            224
-#define __NR_readahead         225
-#define __NR_setxattr          226
-#define __NR_lsetxattr         227
-#define __NR_fsetxattr         228
-#define __NR_getxattr          229
-#define __NR_lgetxattr         230
-#define __NR_fgetxattr         231
-#define __NR_listxattr         232
-#define __NR_llistxattr                233
-#define __NR_flistxattr                234
-#define __NR_removexattr       235
-#define __NR_lremovexattr      236
-#define __NR_fremovexattr      237
-#define __NR_tkill             238
-#define __NR_sendfile64                239
-#define __NR_futex             240
-#define __NR_sched_setaffinity 241
-#define __NR_sched_getaffinity 242
-#define __NR_set_thread_area   243
-#define __NR_get_thread_area   244
-#define __NR_io_setup          245
-#define __NR_io_destroy                246
-#define __NR_io_getevents      247
-#define __NR_io_submit         248
-#define __NR_io_cancel         249
-#define __NR_fadvise64         250
-/* 251 is available for reuse (was briefly sys_set_zone_reclaim) */
-#define __NR_exit_group                252
-#define __NR_lookup_dcookie    253
-#define __NR_epoll_create      254
-#define __NR_epoll_ctl         255
-#define __NR_epoll_wait                256
-#define __NR_remap_file_pages  257
-#define __NR_set_tid_address   258
-#define __NR_timer_create      259
-#define __NR_timer_settime     (__NR_timer_create+1)
-#define __NR_timer_gettime     (__NR_timer_create+2)
-#define __NR_timer_getoverrun  (__NR_timer_create+3)
-#define __NR_timer_delete      (__NR_timer_create+4)
-#define __NR_clock_settime     (__NR_timer_create+5)
-#define __NR_clock_gettime     (__NR_timer_create+6)
-#define __NR_clock_getres      (__NR_timer_create+7)
-#define __NR_clock_nanosleep   (__NR_timer_create+8)
-#define __NR_statfs64          268
-#define __NR_fstatfs64         269
-#define __NR_tgkill            270
-#define __NR_utimes            271
-#define __NR_fadvise64_64      272
-#define __NR_vserver           273
-#define __NR_mbind             274
-#define __NR_get_mempolicy     275
-#define __NR_set_mempolicy     276
-#define __NR_mq_open           277
-#define __NR_mq_unlink         (__NR_mq_open+1)
-#define __NR_mq_timedsend      (__NR_mq_open+2)
-#define __NR_mq_timedreceive   (__NR_mq_open+3)
-#define __NR_mq_notify         (__NR_mq_open+4)
-#define __NR_mq_getsetattr     (__NR_mq_open+5)
-#define __NR_kexec_load                283
-#define __NR_waitid            284
-/* #define __NR_sys_setaltroot 285 */
-#define __NR_add_key           286
-#define __NR_request_key       287
-#define __NR_keyctl            288
-#define __NR_ioprio_set                289
-#define __NR_ioprio_get                290
-#define __NR_inotify_init      291
-#define __NR_inotify_add_watch 292
-#define __NR_inotify_rm_watch  293
-#define __NR_migrate_pages     294
-#define __NR_openat            295
-#define __NR_mkdirat           296
-#define __NR_mknodat           297
-#define __NR_fchownat          298
-#define __NR_futimesat         299
-#define __NR_fstatat64         300
-#define __NR_unlinkat          301
-#define __NR_renameat          302
-#define __NR_linkat            303
-#define __NR_symlinkat         304
-#define __NR_readlinkat                305
-#define __NR_fchmodat          306
-#define __NR_faccessat         307
-#define __NR_pselect6          308
-#define __NR_ppoll             309
-#define __NR_unshare           310
-#define __NR_set_robust_list   311
-#define __NR_get_robust_list   312
-#define __NR_splice            313
-#define __NR_sync_file_range   314
-#define __NR_tee               315
-#define __NR_vmsplice          316
-#define __NR_move_pages                317
-#define __NR_getcpu            318
-#define __NR_epoll_pwait       319
-#define __NR_utimensat         320
-#define __NR_signalfd          321
-#define __NR_timerfd_create    322
-#define __NR_eventfd           323
-#define __NR_fallocate         324
-#define __NR_timerfd_settime   325
-#define __NR_timerfd_gettime   326
-#define __NR_signalfd4         327
-#define __NR_eventfd2          328
-#define __NR_epoll_create1     329
-#define __NR_dup3              330
-#define __NR_pipe2             331
-#define __NR_inotify_init1     332
-#define __NR_preadv            333
-#define __NR_pwritev           334
-#define __NR_rt_tgsigqueueinfo 335
-#define __NR_perf_event_open   336
-#define __NR_recvmmsg          337
-#define __NR_fanotify_init     338
-#define __NR_fanotify_mark     339
-#define __NR_prlimit64         340
-#define __NR_name_to_handle_at 341
-#define __NR_open_by_handle_at  342
-#define __NR_clock_adjtime     343
-#define __NR_syncfs             344
-#define __NR_sendmmsg          345
-#define __NR_setns             346
-#define __NR_process_vm_readv  347
-#define __NR_process_vm_writev 348
-
-#ifdef __KERNEL__
-
-#define NR_syscalls 349
-
-#define __ARCH_WANT_IPC_PARSE_VERSION
-#define __ARCH_WANT_OLD_READDIR
-#define __ARCH_WANT_OLD_STAT
-#define __ARCH_WANT_STAT64
-#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_IPC
-#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
-#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_WAITPID
-#define __ARCH_WANT_SYS_SOCKETCALL
-#define __ARCH_WANT_SYS_FADVISE64
-#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
-#define __ARCH_WANT_SYS_OLD_UNAME
-#define __ARCH_WANT_SYS_OLD_MMAP
-#define __ARCH_WANT_SYS_OLD_SELECT
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#ifndef cond_syscall
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-#endif
-
-#endif /* __KERNEL__ */
-#endif /* _ASM_X86_UNISTD_32_H */
diff --git a/arch/x86/include/asm/unistd_64.h b/arch/x86/include/asm/unistd_64.h
deleted file mode 100644 (file)
index 0431f19..0000000
+++ /dev/null
@@ -1,732 +0,0 @@
-#ifndef _ASM_X86_UNISTD_64_H
-#define _ASM_X86_UNISTD_64_H
-
-#ifndef __SYSCALL
-#define __SYSCALL(a, b)
-#endif
-
-/*
- * This file contains the system call numbers.
- *
- * Note: holes are not allowed.
- */
-
-/* at least 8 syscall per cacheline */
-#define __NR_read                              0
-__SYSCALL(__NR_read, sys_read)
-#define __NR_write                             1
-__SYSCALL(__NR_write, sys_write)
-#define __NR_open                              2
-__SYSCALL(__NR_open, sys_open)
-#define __NR_close                             3
-__SYSCALL(__NR_close, sys_close)
-#define __NR_stat                              4
-__SYSCALL(__NR_stat, sys_newstat)
-#define __NR_fstat                             5
-__SYSCALL(__NR_fstat, sys_newfstat)
-#define __NR_lstat                             6
-__SYSCALL(__NR_lstat, sys_newlstat)
-#define __NR_poll                              7
-__SYSCALL(__NR_poll, sys_poll)
-
-#define __NR_lseek                             8
-__SYSCALL(__NR_lseek, sys_lseek)
-#define __NR_mmap                              9
-__SYSCALL(__NR_mmap, sys_mmap)
-#define __NR_mprotect                          10
-__SYSCALL(__NR_mprotect, sys_mprotect)
-#define __NR_munmap                            11
-__SYSCALL(__NR_munmap, sys_munmap)
-#define __NR_brk                               12
-__SYSCALL(__NR_brk, sys_brk)
-#define __NR_rt_sigaction                      13
-__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction)
-#define __NR_rt_sigprocmask                    14
-__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
-#define __NR_rt_sigreturn                      15
-__SYSCALL(__NR_rt_sigreturn, stub_rt_sigreturn)
-
-#define __NR_ioctl                             16
-__SYSCALL(__NR_ioctl, sys_ioctl)
-#define __NR_pread64                           17
-__SYSCALL(__NR_pread64, sys_pread64)
-#define __NR_pwrite64                          18
-__SYSCALL(__NR_pwrite64, sys_pwrite64)
-#define __NR_readv                             19
-__SYSCALL(__NR_readv, sys_readv)
-#define __NR_writev                            20
-__SYSCALL(__NR_writev, sys_writev)
-#define __NR_access                            21
-__SYSCALL(__NR_access, sys_access)
-#define __NR_pipe                              22
-__SYSCALL(__NR_pipe, sys_pipe)
-#define __NR_select                            23
-__SYSCALL(__NR_select, sys_select)
-
-#define __NR_sched_yield                       24
-__SYSCALL(__NR_sched_yield, sys_sched_yield)
-#define __NR_mremap                            25
-__SYSCALL(__NR_mremap, sys_mremap)
-#define __NR_msync                             26
-__SYSCALL(__NR_msync, sys_msync)
-#define __NR_mincore                           27
-__SYSCALL(__NR_mincore, sys_mincore)
-#define __NR_madvise                           28
-__SYSCALL(__NR_madvise, sys_madvise)
-#define __NR_shmget                            29
-__SYSCALL(__NR_shmget, sys_shmget)
-#define __NR_shmat                             30
-__SYSCALL(__NR_shmat, sys_shmat)
-#define __NR_shmctl                            31
-__SYSCALL(__NR_shmctl, sys_shmctl)
-
-#define __NR_dup                               32
-__SYSCALL(__NR_dup, sys_dup)
-#define __NR_dup2                              33
-__SYSCALL(__NR_dup2, sys_dup2)
-#define __NR_pause                             34
-__SYSCALL(__NR_pause, sys_pause)
-#define __NR_nanosleep                         35
-__SYSCALL(__NR_nanosleep, sys_nanosleep)
-#define __NR_getitimer                         36
-__SYSCALL(__NR_getitimer, sys_getitimer)
-#define __NR_alarm                             37
-__SYSCALL(__NR_alarm, sys_alarm)
-#define __NR_setitimer                         38
-__SYSCALL(__NR_setitimer, sys_setitimer)
-#define __NR_getpid                            39
-__SYSCALL(__NR_getpid, sys_getpid)
-
-#define __NR_sendfile                          40
-__SYSCALL(__NR_sendfile, sys_sendfile64)
-#define __NR_socket                            41
-__SYSCALL(__NR_socket, sys_socket)
-#define __NR_connect                           42
-__SYSCALL(__NR_connect, sys_connect)
-#define __NR_accept                            43
-__SYSCALL(__NR_accept, sys_accept)
-#define __NR_sendto                            44
-__SYSCALL(__NR_sendto, sys_sendto)
-#define __NR_recvfrom                          45
-__SYSCALL(__NR_recvfrom, sys_recvfrom)
-#define __NR_sendmsg                           46
-__SYSCALL(__NR_sendmsg, sys_sendmsg)
-#define __NR_recvmsg                           47
-__SYSCALL(__NR_recvmsg, sys_recvmsg)
-
-#define __NR_shutdown                          48
-__SYSCALL(__NR_shutdown, sys_shutdown)
-#define __NR_bind                              49
-__SYSCALL(__NR_bind, sys_bind)
-#define __NR_listen                            50
-__SYSCALL(__NR_listen, sys_listen)
-#define __NR_getsockname                       51
-__SYSCALL(__NR_getsockname, sys_getsockname)
-#define __NR_getpeername                       52
-__SYSCALL(__NR_getpeername, sys_getpeername)
-#define __NR_socketpair                                53
-__SYSCALL(__NR_socketpair, sys_socketpair)
-#define __NR_setsockopt                                54
-__SYSCALL(__NR_setsockopt, sys_setsockopt)
-#define __NR_getsockopt                                55
-__SYSCALL(__NR_getsockopt, sys_getsockopt)
-
-#define __NR_clone                             56
-__SYSCALL(__NR_clone, stub_clone)
-#define __NR_fork                              57
-__SYSCALL(__NR_fork, stub_fork)
-#define __NR_vfork                             58
-__SYSCALL(__NR_vfork, stub_vfork)
-#define __NR_execve                            59
-__SYSCALL(__NR_execve, stub_execve)
-#define __NR_exit                              60
-__SYSCALL(__NR_exit, sys_exit)
-#define __NR_wait4                             61
-__SYSCALL(__NR_wait4, sys_wait4)
-#define __NR_kill                              62
-__SYSCALL(__NR_kill, sys_kill)
-#define __NR_uname                             63
-__SYSCALL(__NR_uname, sys_newuname)
-
-#define __NR_semget                            64
-__SYSCALL(__NR_semget, sys_semget)
-#define __NR_semop                             65
-__SYSCALL(__NR_semop, sys_semop)
-#define __NR_semctl                            66
-__SYSCALL(__NR_semctl, sys_semctl)
-#define __NR_shmdt                             67
-__SYSCALL(__NR_shmdt, sys_shmdt)
-#define __NR_msgget                            68
-__SYSCALL(__NR_msgget, sys_msgget)
-#define __NR_msgsnd                            69
-__SYSCALL(__NR_msgsnd, sys_msgsnd)
-#define __NR_msgrcv                            70
-__SYSCALL(__NR_msgrcv, sys_msgrcv)
-#define __NR_msgctl                            71
-__SYSCALL(__NR_msgctl, sys_msgctl)
-
-#define __NR_fcntl                             72
-__SYSCALL(__NR_fcntl, sys_fcntl)
-#define __NR_flock                             73
-__SYSCALL(__NR_flock, sys_flock)
-#define __NR_fsync                             74
-__SYSCALL(__NR_fsync, sys_fsync)
-#define __NR_fdatasync                         75
-__SYSCALL(__NR_fdatasync, sys_fdatasync)
-#define __NR_truncate                          76
-__SYSCALL(__NR_truncate, sys_truncate)
-#define __NR_ftruncate                         77
-__SYSCALL(__NR_ftruncate, sys_ftruncate)
-#define __NR_getdents                          78
-__SYSCALL(__NR_getdents, sys_getdents)
-#define __NR_getcwd                            79
-__SYSCALL(__NR_getcwd, sys_getcwd)
-
-#define __NR_chdir                             80
-__SYSCALL(__NR_chdir, sys_chdir)
-#define __NR_fchdir                            81
-__SYSCALL(__NR_fchdir, sys_fchdir)
-#define __NR_rename                            82
-__SYSCALL(__NR_rename, sys_rename)
-#define __NR_mkdir                             83
-__SYSCALL(__NR_mkdir, sys_mkdir)
-#define __NR_rmdir                             84
-__SYSCALL(__NR_rmdir, sys_rmdir)
-#define __NR_creat                             85
-__SYSCALL(__NR_creat, sys_creat)
-#define __NR_link                              86
-__SYSCALL(__NR_link, sys_link)
-#define __NR_unlink                            87
-__SYSCALL(__NR_unlink, sys_unlink)
-
-#define __NR_symlink                           88
-__SYSCALL(__NR_symlink, sys_symlink)
-#define __NR_readlink                          89
-__SYSCALL(__NR_readlink, sys_readlink)
-#define __NR_chmod                             90
-__SYSCALL(__NR_chmod, sys_chmod)
-#define __NR_fchmod                            91
-__SYSCALL(__NR_fchmod, sys_fchmod)
-#define __NR_chown                             92
-__SYSCALL(__NR_chown, sys_chown)
-#define __NR_fchown                            93
-__SYSCALL(__NR_fchown, sys_fchown)
-#define __NR_lchown                            94
-__SYSCALL(__NR_lchown, sys_lchown)
-#define __NR_umask                             95
-__SYSCALL(__NR_umask, sys_umask)
-
-#define __NR_gettimeofday                      96
-__SYSCALL(__NR_gettimeofday, sys_gettimeofday)
-#define __NR_getrlimit                         97
-__SYSCALL(__NR_getrlimit, sys_getrlimit)
-#define __NR_getrusage                         98
-__SYSCALL(__NR_getrusage, sys_getrusage)
-#define __NR_sysinfo                           99
-__SYSCALL(__NR_sysinfo, sys_sysinfo)
-#define __NR_times                             100
-__SYSCALL(__NR_times, sys_times)
-#define __NR_ptrace                            101
-__SYSCALL(__NR_ptrace, sys_ptrace)
-#define __NR_getuid                            102
-__SYSCALL(__NR_getuid, sys_getuid)
-#define __NR_syslog                            103
-__SYSCALL(__NR_syslog, sys_syslog)
-
-/* at the very end the stuff that never runs during the benchmarks */
-#define __NR_getgid                            104
-__SYSCALL(__NR_getgid, sys_getgid)
-#define __NR_setuid                            105
-__SYSCALL(__NR_setuid, sys_setuid)
-#define __NR_setgid                            106
-__SYSCALL(__NR_setgid, sys_setgid)
-#define __NR_geteuid                           107
-__SYSCALL(__NR_geteuid, sys_geteuid)
-#define __NR_getegid                           108
-__SYSCALL(__NR_getegid, sys_getegid)
-#define __NR_setpgid                           109
-__SYSCALL(__NR_setpgid, sys_setpgid)
-#define __NR_getppid                           110
-__SYSCALL(__NR_getppid, sys_getppid)
-#define __NR_getpgrp                           111
-__SYSCALL(__NR_getpgrp, sys_getpgrp)
-
-#define __NR_setsid                            112
-__SYSCALL(__NR_setsid, sys_setsid)
-#define __NR_setreuid                          113
-__SYSCALL(__NR_setreuid, sys_setreuid)
-#define __NR_setregid                          114
-__SYSCALL(__NR_setregid, sys_setregid)
-#define __NR_getgroups                         115
-__SYSCALL(__NR_getgroups, sys_getgroups)
-#define __NR_setgroups                         116
-__SYSCALL(__NR_setgroups, sys_setgroups)
-#define __NR_setresuid                         117
-__SYSCALL(__NR_setresuid, sys_setresuid)
-#define __NR_getresuid                         118
-__SYSCALL(__NR_getresuid, sys_getresuid)
-#define __NR_setresgid                         119
-__SYSCALL(__NR_setresgid, sys_setresgid)
-
-#define __NR_getresgid                         120
-__SYSCALL(__NR_getresgid, sys_getresgid)
-#define __NR_getpgid                           121
-__SYSCALL(__NR_getpgid, sys_getpgid)
-#define __NR_setfsuid                          122
-__SYSCALL(__NR_setfsuid, sys_setfsuid)
-#define __NR_setfsgid                          123
-__SYSCALL(__NR_setfsgid, sys_setfsgid)
-#define __NR_getsid                            124
-__SYSCALL(__NR_getsid, sys_getsid)
-#define __NR_capget                            125
-__SYSCALL(__NR_capget, sys_capget)
-#define __NR_capset                            126
-__SYSCALL(__NR_capset, sys_capset)
-
-#define __NR_rt_sigpending                     127
-__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
-#define __NR_rt_sigtimedwait                   128
-__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
-#define __NR_rt_sigqueueinfo                   129
-__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
-#define __NR_rt_sigsuspend                     130
-__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend)
-#define __NR_sigaltstack                       131
-__SYSCALL(__NR_sigaltstack, stub_sigaltstack)
-#define __NR_utime                             132
-__SYSCALL(__NR_utime, sys_utime)
-#define __NR_mknod                             133
-__SYSCALL(__NR_mknod, sys_mknod)
-
-/* Only needed for a.out */
-#define __NR_uselib                            134
-__SYSCALL(__NR_uselib, sys_ni_syscall)
-#define __NR_personality                       135
-__SYSCALL(__NR_personality, sys_personality)
-
-#define __NR_ustat                             136
-__SYSCALL(__NR_ustat, sys_ustat)
-#define __NR_statfs                            137
-__SYSCALL(__NR_statfs, sys_statfs)
-#define __NR_fstatfs                           138
-__SYSCALL(__NR_fstatfs, sys_fstatfs)
-#define __NR_sysfs                             139
-__SYSCALL(__NR_sysfs, sys_sysfs)
-
-#define __NR_getpriority                       140
-__SYSCALL(__NR_getpriority, sys_getpriority)
-#define __NR_setpriority                       141
-__SYSCALL(__NR_setpriority, sys_setpriority)
-#define __NR_sched_setparam                    142
-__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
-#define __NR_sched_getparam                    143
-__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
-#define __NR_sched_setscheduler                        144
-__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
-#define __NR_sched_getscheduler                        145
-__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
-#define __NR_sched_get_priority_max            146
-__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
-#define __NR_sched_get_priority_min            147
-__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
-#define __NR_sched_rr_get_interval             148
-__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval)
-
-#define __NR_mlock                             149
-__SYSCALL(__NR_mlock, sys_mlock)
-#define __NR_munlock                           150
-__SYSCALL(__NR_munlock, sys_munlock)
-#define __NR_mlockall                          151
-__SYSCALL(__NR_mlockall, sys_mlockall)
-#define __NR_munlockall                                152
-__SYSCALL(__NR_munlockall, sys_munlockall)
-
-#define __NR_vhangup                           153
-__SYSCALL(__NR_vhangup, sys_vhangup)
-
-#define __NR_modify_ldt                                154
-__SYSCALL(__NR_modify_ldt, sys_modify_ldt)
-
-#define __NR_pivot_root                                155
-__SYSCALL(__NR_pivot_root, sys_pivot_root)
-
-#define __NR__sysctl                           156
-__SYSCALL(__NR__sysctl, sys_sysctl)
-
-#define __NR_prctl                             157
-__SYSCALL(__NR_prctl, sys_prctl)
-#define __NR_arch_prctl                                158
-__SYSCALL(__NR_arch_prctl, sys_arch_prctl)
-
-#define __NR_adjtimex                          159
-__SYSCALL(__NR_adjtimex, sys_adjtimex)
-
-#define __NR_setrlimit                         160
-__SYSCALL(__NR_setrlimit, sys_setrlimit)
-
-#define __NR_chroot                            161
-__SYSCALL(__NR_chroot, sys_chroot)
-
-#define __NR_sync                              162
-__SYSCALL(__NR_sync, sys_sync)
-
-#define __NR_acct                              163
-__SYSCALL(__NR_acct, sys_acct)
-
-#define __NR_settimeofday                      164
-__SYSCALL(__NR_settimeofday, sys_settimeofday)
-
-#define __NR_mount                             165
-__SYSCALL(__NR_mount, sys_mount)
-#define __NR_umount2                           166
-__SYSCALL(__NR_umount2, sys_umount)
-
-#define __NR_swapon                            167
-__SYSCALL(__NR_swapon, sys_swapon)
-#define __NR_swapoff                           168
-__SYSCALL(__NR_swapoff, sys_swapoff)
-
-#define __NR_reboot                            169
-__SYSCALL(__NR_reboot, sys_reboot)
-
-#define __NR_sethostname                       170
-__SYSCALL(__NR_sethostname, sys_sethostname)
-#define __NR_setdomainname                     171
-__SYSCALL(__NR_setdomainname, sys_setdomainname)
-
-#define __NR_iopl                              172
-__SYSCALL(__NR_iopl, stub_iopl)
-#define __NR_ioperm                            173
-__SYSCALL(__NR_ioperm, sys_ioperm)
-
-#define __NR_create_module                     174
-__SYSCALL(__NR_create_module, sys_ni_syscall)
-#define __NR_init_module                       175
-__SYSCALL(__NR_init_module, sys_init_module)
-#define __NR_delete_module                     176
-__SYSCALL(__NR_delete_module, sys_delete_module)
-#define __NR_get_kernel_syms                   177
-__SYSCALL(__NR_get_kernel_syms, sys_ni_syscall)
-#define __NR_query_module                      178
-__SYSCALL(__NR_query_module, sys_ni_syscall)
-
-#define __NR_quotactl                          179
-__SYSCALL(__NR_quotactl, sys_quotactl)
-
-#define __NR_nfsservctl                                180
-__SYSCALL(__NR_nfsservctl, sys_ni_syscall)
-
-/* reserved for LiS/STREAMS */
-#define __NR_getpmsg                           181
-__SYSCALL(__NR_getpmsg, sys_ni_syscall)
-#define __NR_putpmsg                           182
-__SYSCALL(__NR_putpmsg, sys_ni_syscall)
-
-/* reserved for AFS */
-#define __NR_afs_syscall                       183
-__SYSCALL(__NR_afs_syscall, sys_ni_syscall)
-
-/* reserved for tux */
-#define __NR_tuxcall                           184
-__SYSCALL(__NR_tuxcall, sys_ni_syscall)
-
-#define __NR_security                          185
-__SYSCALL(__NR_security, sys_ni_syscall)
-
-#define __NR_gettid                            186
-__SYSCALL(__NR_gettid, sys_gettid)
-
-#define __NR_readahead                         187
-__SYSCALL(__NR_readahead, sys_readahead)
-#define __NR_setxattr                          188
-__SYSCALL(__NR_setxattr, sys_setxattr)
-#define __NR_lsetxattr                         189
-__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
-#define __NR_fsetxattr                         190
-__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
-#define __NR_getxattr                          191
-__SYSCALL(__NR_getxattr, sys_getxattr)
-#define __NR_lgetxattr                         192
-__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
-#define __NR_fgetxattr                         193
-__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
-#define __NR_listxattr                         194
-__SYSCALL(__NR_listxattr, sys_listxattr)
-#define __NR_llistxattr                                195
-__SYSCALL(__NR_llistxattr, sys_llistxattr)
-#define __NR_flistxattr                                196
-__SYSCALL(__NR_flistxattr, sys_flistxattr)
-#define __NR_removexattr                       197
-__SYSCALL(__NR_removexattr, sys_removexattr)
-#define __NR_lremovexattr                      198
-__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
-#define __NR_fremovexattr                      199
-__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
-#define __NR_tkill                             200
-__SYSCALL(__NR_tkill, sys_tkill)
-#define __NR_time                              201
-__SYSCALL(__NR_time, sys_time)
-#define __NR_futex                             202
-__SYSCALL(__NR_futex, sys_futex)
-#define __NR_sched_setaffinity                 203
-__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity)
-#define __NR_sched_getaffinity                 204
-__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity)
-#define __NR_set_thread_area                   205
-__SYSCALL(__NR_set_thread_area, sys_ni_syscall)        /* use arch_prctl */
-#define __NR_io_setup                          206
-__SYSCALL(__NR_io_setup, sys_io_setup)
-#define __NR_io_destroy                                207
-__SYSCALL(__NR_io_destroy, sys_io_destroy)
-#define __NR_io_getevents                      208
-__SYSCALL(__NR_io_getevents, sys_io_getevents)
-#define __NR_io_submit                         209
-__SYSCALL(__NR_io_submit, sys_io_submit)
-#define __NR_io_cancel                         210
-__SYSCALL(__NR_io_cancel, sys_io_cancel)
-#define __NR_get_thread_area                   211
-__SYSCALL(__NR_get_thread_area, sys_ni_syscall)        /* use arch_prctl */
-#define __NR_lookup_dcookie                    212
-__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie)
-#define __NR_epoll_create                      213
-__SYSCALL(__NR_epoll_create, sys_epoll_create)
-#define __NR_epoll_ctl_old                     214
-__SYSCALL(__NR_epoll_ctl_old, sys_ni_syscall)
-#define __NR_epoll_wait_old                    215
-__SYSCALL(__NR_epoll_wait_old, sys_ni_syscall)
-#define __NR_remap_file_pages                  216
-__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
-#define __NR_getdents64                                217
-__SYSCALL(__NR_getdents64, sys_getdents64)
-#define __NR_set_tid_address                   218
-__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
-#define __NR_restart_syscall                   219
-__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
-#define __NR_semtimedop                                220
-__SYSCALL(__NR_semtimedop, sys_semtimedop)
-#define __NR_fadvise64                         221
-__SYSCALL(__NR_fadvise64, sys_fadvise64)
-#define __NR_timer_create                      222
-__SYSCALL(__NR_timer_create, sys_timer_create)
-#define __NR_timer_settime                     223
-__SYSCALL(__NR_timer_settime, sys_timer_settime)
-#define __NR_timer_gettime                     224
-__SYSCALL(__NR_timer_gettime, sys_timer_gettime)
-#define __NR_timer_getoverrun                  225
-__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
-#define __NR_timer_delete                      226
-__SYSCALL(__NR_timer_delete, sys_timer_delete)
-#define __NR_clock_settime                     227
-__SYSCALL(__NR_clock_settime, sys_clock_settime)
-#define __NR_clock_gettime                     228
-__SYSCALL(__NR_clock_gettime, sys_clock_gettime)
-#define __NR_clock_getres                      229
-__SYSCALL(__NR_clock_getres, sys_clock_getres)
-#define __NR_clock_nanosleep                   230
-__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep)
-#define __NR_exit_group                                231
-__SYSCALL(__NR_exit_group, sys_exit_group)
-#define __NR_epoll_wait                                232
-__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
-#define __NR_epoll_ctl                         233
-__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
-#define __NR_tgkill                            234
-__SYSCALL(__NR_tgkill, sys_tgkill)
-#define __NR_utimes                            235
-__SYSCALL(__NR_utimes, sys_utimes)
-#define __NR_vserver                           236
-__SYSCALL(__NR_vserver, sys_ni_syscall)
-#define __NR_mbind                             237
-__SYSCALL(__NR_mbind, sys_mbind)
-#define __NR_set_mempolicy                     238
-__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
-#define __NR_get_mempolicy                     239
-__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
-#define __NR_mq_open                           240
-__SYSCALL(__NR_mq_open, sys_mq_open)
-#define __NR_mq_unlink                         241
-__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
-#define __NR_mq_timedsend                      242
-__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend)
-#define __NR_mq_timedreceive                   243
-__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive)
-#define __NR_mq_notify                         244
-__SYSCALL(__NR_mq_notify, sys_mq_notify)
-#define __NR_mq_getsetattr                     245
-__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
-#define __NR_kexec_load                                246
-__SYSCALL(__NR_kexec_load, sys_kexec_load)
-#define __NR_waitid                            247
-__SYSCALL(__NR_waitid, sys_waitid)
-#define __NR_add_key                           248
-__SYSCALL(__NR_add_key, sys_add_key)
-#define __NR_request_key                       249
-__SYSCALL(__NR_request_key, sys_request_key)
-#define __NR_keyctl                            250
-__SYSCALL(__NR_keyctl, sys_keyctl)
-#define __NR_ioprio_set                                251
-__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
-#define __NR_ioprio_get                                252
-__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
-#define __NR_inotify_init                      253
-__SYSCALL(__NR_inotify_init, sys_inotify_init)
-#define __NR_inotify_add_watch                 254
-__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
-#define __NR_inotify_rm_watch                  255
-__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
-#define __NR_migrate_pages                     256
-__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
-#define __NR_openat                            257
-__SYSCALL(__NR_openat, sys_openat)
-#define __NR_mkdirat                           258
-__SYSCALL(__NR_mkdirat, sys_mkdirat)
-#define __NR_mknodat                           259
-__SYSCALL(__NR_mknodat, sys_mknodat)
-#define __NR_fchownat                          260
-__SYSCALL(__NR_fchownat, sys_fchownat)
-#define __NR_futimesat                         261
-__SYSCALL(__NR_futimesat, sys_futimesat)
-#define __NR_newfstatat                                262
-__SYSCALL(__NR_newfstatat, sys_newfstatat)
-#define __NR_unlinkat                          263
-__SYSCALL(__NR_unlinkat, sys_unlinkat)
-#define __NR_renameat                          264
-__SYSCALL(__NR_renameat, sys_renameat)
-#define __NR_linkat                            265
-__SYSCALL(__NR_linkat, sys_linkat)
-#define __NR_symlinkat                         266
-__SYSCALL(__NR_symlinkat, sys_symlinkat)
-#define __NR_readlinkat                                267
-__SYSCALL(__NR_readlinkat, sys_readlinkat)
-#define __NR_fchmodat                          268
-__SYSCALL(__NR_fchmodat, sys_fchmodat)
-#define __NR_faccessat                         269
-__SYSCALL(__NR_faccessat, sys_faccessat)
-#define __NR_pselect6                          270
-__SYSCALL(__NR_pselect6, sys_pselect6)
-#define __NR_ppoll                             271
-__SYSCALL(__NR_ppoll,  sys_ppoll)
-#define __NR_unshare                           272
-__SYSCALL(__NR_unshare,        sys_unshare)
-#define __NR_set_robust_list                   273
-__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
-#define __NR_get_robust_list                   274
-__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
-#define __NR_splice                            275
-__SYSCALL(__NR_splice, sys_splice)
-#define __NR_tee                               276
-__SYSCALL(__NR_tee, sys_tee)
-#define __NR_sync_file_range                   277
-__SYSCALL(__NR_sync_file_range, sys_sync_file_range)
-#define __NR_vmsplice                          278
-__SYSCALL(__NR_vmsplice, sys_vmsplice)
-#define __NR_move_pages                                279
-__SYSCALL(__NR_move_pages, sys_move_pages)
-#define __NR_utimensat                         280
-__SYSCALL(__NR_utimensat, sys_utimensat)
-#define __NR_epoll_pwait                       281
-__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait)
-#define __NR_signalfd                          282
-__SYSCALL(__NR_signalfd, sys_signalfd)
-#define __NR_timerfd_create                    283
-__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
-#define __NR_eventfd                           284
-__SYSCALL(__NR_eventfd, sys_eventfd)
-#define __NR_fallocate                         285
-__SYSCALL(__NR_fallocate, sys_fallocate)
-#define __NR_timerfd_settime                   286
-__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
-#define __NR_timerfd_gettime                   287
-__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
-#define __NR_accept4                           288
-__SYSCALL(__NR_accept4, sys_accept4)
-#define __NR_signalfd4                         289
-__SYSCALL(__NR_signalfd4, sys_signalfd4)
-#define __NR_eventfd2                          290
-__SYSCALL(__NR_eventfd2, sys_eventfd2)
-#define __NR_epoll_create1                     291
-__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
-#define __NR_dup3                              292
-__SYSCALL(__NR_dup3, sys_dup3)
-#define __NR_pipe2                             293
-__SYSCALL(__NR_pipe2, sys_pipe2)
-#define __NR_inotify_init1                     294
-__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
-#define __NR_preadv                            295
-__SYSCALL(__NR_preadv, sys_preadv)
-#define __NR_pwritev                           296
-__SYSCALL(__NR_pwritev, sys_pwritev)
-#define __NR_rt_tgsigqueueinfo                 297
-__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo)
-#define __NR_perf_event_open                   298
-__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
-#define __NR_recvmmsg                          299
-__SYSCALL(__NR_recvmmsg, sys_recvmmsg)
-#define __NR_fanotify_init                     300
-__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
-#define __NR_fanotify_mark                     301
-__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
-#define __NR_prlimit64                         302
-__SYSCALL(__NR_prlimit64, sys_prlimit64)
-#define __NR_name_to_handle_at                 303
-__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
-#define __NR_open_by_handle_at                 304
-__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at)
-#define __NR_clock_adjtime                     305
-__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime)
-#define __NR_syncfs                             306
-__SYSCALL(__NR_syncfs, sys_syncfs)
-#define __NR_sendmmsg                          307
-__SYSCALL(__NR_sendmmsg, sys_sendmmsg)
-#define __NR_setns                             308
-__SYSCALL(__NR_setns, sys_setns)
-#define __NR_getcpu                            309
-__SYSCALL(__NR_getcpu, sys_getcpu)
-#define __NR_process_vm_readv                  310
-__SYSCALL(__NR_process_vm_readv, sys_process_vm_readv)
-#define __NR_process_vm_writev                 311
-__SYSCALL(__NR_process_vm_writev, sys_process_vm_writev)
-
-#ifndef __NO_STUBS
-#define __ARCH_WANT_OLD_READDIR
-#define __ARCH_WANT_OLD_STAT
-#define __ARCH_WANT_SYS_ALARM
-#define __ARCH_WANT_SYS_GETHOSTNAME
-#define __ARCH_WANT_SYS_PAUSE
-#define __ARCH_WANT_SYS_SGETMASK
-#define __ARCH_WANT_SYS_SIGNAL
-#define __ARCH_WANT_SYS_UTIME
-#define __ARCH_WANT_SYS_WAITPID
-#define __ARCH_WANT_SYS_SOCKETCALL
-#define __ARCH_WANT_SYS_FADVISE64
-#define __ARCH_WANT_SYS_GETPGRP
-#define __ARCH_WANT_SYS_LLSEEK
-#define __ARCH_WANT_SYS_NICE
-#define __ARCH_WANT_SYS_OLD_GETRLIMIT
-#define __ARCH_WANT_SYS_OLD_UNAME
-#define __ARCH_WANT_SYS_OLDUMOUNT
-#define __ARCH_WANT_SYS_SIGPENDING
-#define __ARCH_WANT_SYS_SIGPROCMASK
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-#define __ARCH_WANT_SYS_TIME
-#define __ARCH_WANT_COMPAT_SYS_TIME
-#endif /* __NO_STUBS */
-
-#ifdef __KERNEL__
-
-#ifndef COMPILE_OFFSETS
-#include <asm/asm-offsets.h>
-#define NR_syscalls (__NR_syscall_max + 1)
-#endif
-
-/*
- * "Conditional" syscalls
- *
- * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
- * but it doesn't work on all toolchains, so we just do it by hand
- */
-#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_X86_UNISTD_64_H */
index 8e862aaf0d905555e23265df00e139346d9b6649..becf47b81735ef6731e0271d0de7c0d939a389a9 100644 (file)
@@ -65,7 +65,7 @@
  * UV2: Bit 19 selects between
  *  (0): 10 microsecond timebase and
  *  (1): 80 microseconds
- *  we're using 655us, similar to UV1: 65 units of 10us
+ *  we're using 560us, similar to UV1: 65 units of 10us
  */
 #define UV1_INTD_SOFT_ACK_TIMEOUT_PERIOD (9UL)
 #define UV2_INTD_SOFT_ACK_TIMEOUT_PERIOD (15UL)
 #define FLUSH_RETRY_TIMEOUT            2
 #define FLUSH_GIVEUP                   3
 #define FLUSH_COMPLETE                 4
+#define FLUSH_RETRY_BUSYBUG            5
 
 /*
  * tuning the action when the numalink network is extremely delayed
@@ -235,10 +236,10 @@ struct bau_msg_payload {
 
 
 /*
- * Message header:  16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
+ * UV1 Message header:  16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
  * see table 4.2.3.0.1 in broacast_assist spec.
  */
-struct bau_msg_header {
+struct uv1_bau_msg_header {
        unsigned int    dest_subnodeid:6;       /* must be 0x10, for the LB */
        /* bits 5:0 */
        unsigned int    base_dest_nasid:15;     /* nasid of the first bit */
@@ -317,20 +318,88 @@ struct bau_msg_header {
        /* bits 127:107 */
 };
 
+/*
+ * UV2 Message header:  16 bytes (128 bits) (bytes 0x30-0x3f of descriptor)
+ * see figure 9-2 of harp_sys.pdf
+ */
+struct uv2_bau_msg_header {
+       unsigned int    base_dest_nasid:15;     /* nasid of the first bit */
+       /* bits 14:0 */                         /* in uvhub map */
+       unsigned int    dest_subnodeid:5;       /* must be 0x10, for the LB */
+       /* bits 19:15 */
+       unsigned int    rsvd_1:1;               /* must be zero */
+       /* bit 20 */
+       /* Address bits 59:21 */
+       /* bits 25:2 of address (44:21) are payload */
+       /* these next 24 bits become bytes 12-14 of msg */
+       /* bits 28:21 land in byte 12 */
+       unsigned int    replied_to:1;           /* sent as 0 by the source to
+                                                  byte 12 */
+       /* bit 21 */
+       unsigned int    msg_type:3;             /* software type of the
+                                                  message */
+       /* bits 24:22 */
+       unsigned int    canceled:1;             /* message canceled, resource
+                                                  is to be freed*/
+       /* bit 25 */
+       unsigned int    payload_1:3;            /* not currently used */
+       /* bits 28:26 */
+
+       /* bits 36:29 land in byte 13 */
+       unsigned int    payload_2a:3;           /* not currently used */
+       unsigned int    payload_2b:5;           /* not currently used */
+       /* bits 36:29 */
+
+       /* bits 44:37 land in byte 14 */
+       unsigned int    payload_3:8;            /* not currently used */
+       /* bits 44:37 */
+
+       unsigned int    rsvd_2:7;               /* reserved */
+       /* bits 51:45 */
+       unsigned int    swack_flag:1;           /* software acknowledge flag */
+       /* bit 52 */
+       unsigned int    rsvd_3a:3;              /* must be zero */
+       unsigned int    rsvd_3b:8;              /* must be zero */
+       unsigned int    rsvd_3c:8;              /* must be zero */
+       unsigned int    rsvd_3d:3;              /* must be zero */
+       /* bits 74:53 */
+       unsigned int    fairness:3;             /* usually zero */
+       /* bits 77:75 */
+
+       unsigned int    sequence:16;            /* message sequence number */
+       /* bits 93:78  Suppl_A  */
+       unsigned int    chaining:1;             /* next descriptor is part of
+                                                  this activation*/
+       /* bit 94 */
+       unsigned int    multilevel:1;           /* multi-level multicast
+                                                  format */
+       /* bit 95 */
+       unsigned int    rsvd_4:24;              /* ordered / source node /
+                                                  source subnode / aging
+                                                  must be zero */
+       /* bits 119:96 */
+       unsigned int    command:8;              /* message type */
+       /* bits 127:120 */
+};
+
 /*
  * The activation descriptor:
  * The format of the message to send, plus all accompanying control
  * Should be 64 bytes
  */
 struct bau_desc {
-       struct pnmask                   distribution;
+       struct pnmask                           distribution;
        /*
         * message template, consisting of header and payload:
         */
-       struct bau_msg_header           header;
-       struct bau_msg_payload          payload;
+       union bau_msg_header {
+               struct uv1_bau_msg_header       uv1_hdr;
+               struct uv2_bau_msg_header       uv2_hdr;
+       } header;
+
+       struct bau_msg_payload                  payload;
 };
-/*
+/* UV1:
  *   -payload--    ---------header------
  *   bytes 0-11    bits 41-56  bits 58-81
  *       A           B  (2)      C (3)
@@ -340,6 +409,16 @@ struct bau_desc {
  *   bytes 0-11  bytes 12-14  bytes 16-17  (byte 15 filled in by hw as vector)
  *   ------------payload queue-----------
  */
+/* UV2:
+ *   -payload--    ---------header------
+ *   bytes 0-11    bits 70-78  bits 21-44
+ *       A           B  (2)      C (3)
+ *
+ *            A/B/C are moved to:
+ *       A            C          B
+ *   bytes 0-11  bytes 12-14  bytes 16-17  (byte 15 filled in by hw as vector)
+ *   ------------payload queue-----------
+ */
 
 /*
  * The payload queue on the destination side is an array of these.
@@ -385,7 +464,6 @@ struct bau_pq_entry {
 struct msg_desc {
        struct bau_pq_entry     *msg;
        int                     msg_slot;
-       int                     swack_slot;
        struct bau_pq_entry     *queue_first;
        struct bau_pq_entry     *queue_last;
 };
@@ -405,6 +483,7 @@ struct ptc_stats {
                                                   requests */
        unsigned long   s_stimeout;             /* source side timeouts */
        unsigned long   s_dtimeout;             /* destination side timeouts */
+       unsigned long   s_strongnacks;          /* number of strong nack's */
        unsigned long   s_time;                 /* time spent in sending side */
        unsigned long   s_retriesok;            /* successful retries */
        unsigned long   s_ntargcpu;             /* total number of cpu's
@@ -439,6 +518,9 @@ struct ptc_stats {
        unsigned long   s_retry_messages;       /* retry broadcasts */
        unsigned long   s_bau_reenabled;        /* for bau enable/disable */
        unsigned long   s_bau_disabled;         /* for bau enable/disable */
+       unsigned long   s_uv2_wars;             /* uv2 workaround, perm. busy */
+       unsigned long   s_uv2_wars_hw;          /* uv2 workaround, hiwater */
+       unsigned long   s_uv2_war_waits;        /* uv2 workaround, long waits */
        /* destination statistics */
        unsigned long   d_alltlb;               /* times all tlb's on this
                                                   cpu were flushed */
@@ -511,9 +593,12 @@ struct bau_control {
        short                   osnode;
        short                   uvhub_cpu;
        short                   uvhub;
+       short                   uvhub_version;
        short                   cpus_in_socket;
        short                   cpus_in_uvhub;
        short                   partition_base_pnode;
+       short                   using_desc; /* an index, like uvhub_cpu */
+       unsigned int            inuse_map;
        unsigned short          message_number;
        unsigned short          uvhub_quiesce;
        short                   socket_acknowledge_count[DEST_Q_SIZE];
@@ -531,6 +616,7 @@ struct bau_control {
        int                     cong_response_us;
        int                     cong_reps;
        int                     cong_period;
+       unsigned long           clocks_per_100_usec;
        cycles_t                period_time;
        long                    period_requests;
        struct hub_and_pnode    *thp;
@@ -591,6 +677,11 @@ static inline void write_mmr_sw_ack(unsigned long mr)
        uv_write_local_mmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
 }
 
+static inline void write_gmmr_sw_ack(int pnode, unsigned long mr)
+{
+       write_gmmr(pnode, UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE_ALIAS, mr);
+}
+
 static inline unsigned long read_mmr_sw_ack(void)
 {
        return read_lmmr(UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE);
index 54a13aaebc4006c6a55cdce0fc414e2bd563a06a..21f7385badb8f9eb4249aef142487e27b264b32d 100644 (file)
@@ -318,13 +318,13 @@ uv_gpa_in_mmr_space(unsigned long gpa)
 /* UV global physical address --> socket phys RAM */
 static inline unsigned long uv_gpa_to_soc_phys_ram(unsigned long gpa)
 {
-       unsigned long paddr = gpa & uv_hub_info->gpa_mask;
+       unsigned long paddr;
        unsigned long remap_base = uv_hub_info->lowmem_remap_base;
        unsigned long remap_top =  uv_hub_info->lowmem_remap_top;
 
        gpa = ((gpa << uv_hub_info->m_shift) >> uv_hub_info->m_shift) |
                ((gpa >> uv_hub_info->n_lshift) << uv_hub_info->m_val);
-       gpa = gpa & uv_hub_info->gpa_mask;
+       paddr = gpa & uv_hub_info->gpa_mask;
        if (paddr >= remap_base && paddr < remap_base + remap_top)
                paddr -= remap_base;
        return paddr;
index 02b2f05b371e9a8a8c84fb2f0bd5742b1d8d6963..5369059c07a907fac7e6a755aa24e7ae280bd6a0 100644 (file)
@@ -25,7 +25,8 @@ obj-$(CONFIG_IRQ_WORK)  += irq_work.o
 obj-y                  += probe_roms.o
 obj-$(CONFIG_X86_32)   += sys_i386_32.o i386_ksyms_32.o
 obj-$(CONFIG_X86_64)   += sys_x86_64.o x8664_ksyms_64.o
-obj-$(CONFIG_X86_64)   += syscall_64.o vsyscall_64.o
+obj-y                  += syscall_$(BITS).o
+obj-$(CONFIG_X86_64)   += vsyscall_64.o
 obj-$(CONFIG_X86_64)   += vsyscall_emu_64.o
 obj-y                  += bootflag.o e820.o
 obj-y                  += pci-dma.o quirks.o topology.o kdebugfs.o
index 395a10e68067625e783b0ed1f8546231727ac230..85d98ab15cdcfd019c1d8bc6ece2f049ace07b65 100644 (file)
@@ -3,6 +3,11 @@
 #include <linux/lguest.h>
 #include "../../../drivers/lguest/lg.h"
 
+#define __SYSCALL_I386(nr, sym, compat) [nr] = 1,
+static char syscalls[] = {
+#include <asm/syscalls_32.h>
+};
+
 /* workaround for a warning with -Wmissing-prototypes */
 void foo(void);
 
@@ -76,4 +81,7 @@ void foo(void)
        OFFSET(LGUEST_PAGES_regs_errcode, lguest_pages, regs.errcode);
        OFFSET(LGUEST_PAGES_regs, lguest_pages, regs);
 #endif
+       BLANK();
+       DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
+       DEFINE(NR_syscalls, sizeof(syscalls));
 }
index e72a1194af22a76707a9f44865b8211b4b1e1e7c..834e897b1e25b66e864d010266e5df29fecfb892 100644 (file)
@@ -1,11 +1,12 @@
 #include <asm/ia32.h>
 
-#define __NO_STUBS 1
-#undef __SYSCALL
-#undef _ASM_X86_UNISTD_64_H
-#define __SYSCALL(nr, sym) [nr] = 1,
-static char syscalls[] = {
-#include <asm/unistd.h>
+#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
+static char syscalls_64[] = {
+#include <asm/syscalls_64.h>
+};
+#define __SYSCALL_I386(nr, sym, compat) [nr] = 1,
+static char syscalls_ia32[] = {
+#include <asm/syscalls_32.h>
 };
 
 int main(void)
@@ -72,7 +73,11 @@ int main(void)
        OFFSET(TSS_ist, tss_struct, x86_tss.ist);
        BLANK();
 
-       DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
+       DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
+       DEFINE(NR_syscalls, sizeof(syscalls_64));
+
+       DEFINE(__NR_ia32_syscall_max, sizeof(syscalls_ia32) - 1);
+       DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32));
 
        return 0;
 }
index 29ba3297e48006bea5987678bb4806d3c534fc2c..5a11ae2e9e917a07eae73246c7a73ace07edda39 100644 (file)
@@ -1859,7 +1859,7 @@ static struct bus_type mce_subsys = {
        .dev_name       = "machinecheck",
 };
 
-DEFINE_PER_CPU(struct device, mce_device);
+struct device *mce_device[CONFIG_NR_CPUS];
 
 __cpuinitdata
 void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
@@ -2001,19 +2001,27 @@ static struct device_attribute *mce_device_attrs[] = {
 
 static cpumask_var_t mce_device_initialized;
 
+static void mce_device_release(struct device *dev)
+{
+       kfree(dev);
+}
+
 /* Per cpu device init. All of the cpus still share the same ctrl bank: */
 static __cpuinit int mce_device_create(unsigned int cpu)
 {
-       struct device *dev = &per_cpu(mce_device, cpu);
+       struct device *dev;
        int err;
        int i, j;
 
        if (!mce_available(&boot_cpu_data))
                return -EIO;
 
-       memset(dev, 0, sizeof(struct device));
+       dev = kzalloc(sizeof *dev, GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
        dev->id  = cpu;
        dev->bus = &mce_subsys;
+       dev->release = &mce_device_release;
 
        err = device_register(dev);
        if (err)
@@ -2030,6 +2038,7 @@ static __cpuinit int mce_device_create(unsigned int cpu)
                        goto error2;
        }
        cpumask_set_cpu(cpu, mce_device_initialized);
+       mce_device[cpu] = dev;
 
        return 0;
 error2:
@@ -2046,7 +2055,7 @@ error:
 
 static __cpuinit void mce_device_remove(unsigned int cpu)
 {
-       struct device *dev = &per_cpu(mce_device, cpu);
+       struct device *dev = mce_device[cpu];
        int i;
 
        if (!cpumask_test_cpu(cpu, mce_device_initialized))
@@ -2060,6 +2069,7 @@ static __cpuinit void mce_device_remove(unsigned int cpu)
 
        device_unregister(dev);
        cpumask_clear_cpu(cpu, mce_device_initialized);
+       mce_device[cpu] = NULL;
 }
 
 /* Make sure there are no machine checks on offlined CPUs. */
index ba0b94a7e2040fcbd09de26a8729628758fd3370..786e76a86322c99ffd67a6fd9ebf03f85b1d36a3 100644 (file)
@@ -523,6 +523,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
 {
        int i, err = 0;
        struct threshold_bank *b = NULL;
+       struct device *dev = mce_device[cpu];
        char name[32];
 
        sprintf(name, "threshold_bank%i", bank);
@@ -543,8 +544,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                if (!b)
                        goto out;
 
-               err = sysfs_create_link(&per_cpu(mce_device, cpu).kobj,
-                                       b->kobj, name);
+               err = sysfs_create_link(&dev->kobj, b->kobj, name);
                if (err)
                        goto out;
 
@@ -565,7 +565,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                goto out;
        }
 
-       b->kobj = kobject_create_and_add(name, &per_cpu(mce_device, cpu).kobj);
+       b->kobj = kobject_create_and_add(name, &dev->kobj);
        if (!b->kobj)
                goto out_free;
 
@@ -585,8 +585,9 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
                if (i == cpu)
                        continue;
 
-               err = sysfs_create_link(&per_cpu(mce_device, i).kobj,
-                                       b->kobj, name);
+               dev = mce_device[i];
+               if (dev)
+                       err = sysfs_create_link(&dev->kobj,b->kobj, name);
                if (err)
                        goto out;
 
@@ -649,6 +650,7 @@ static void deallocate_threshold_block(unsigned int cpu,
 static void threshold_remove_bank(unsigned int cpu, int bank)
 {
        struct threshold_bank *b;
+       struct device *dev;
        char name[32];
        int i = 0;
 
@@ -663,7 +665,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
 #ifdef CONFIG_SMP
        /* sibling symlink */
        if (shared_bank[bank] && b->blocks->cpu != cpu) {
-               sysfs_remove_link(&per_cpu(mce_device, cpu).kobj, name);
+               sysfs_remove_link(&mce_device[cpu]->kobj, name);
                per_cpu(threshold_banks, cpu)[bank] = NULL;
 
                return;
@@ -675,7 +677,9 @@ static void threshold_remove_bank(unsigned int cpu, int bank)
                if (i == cpu)
                        continue;
 
-               sysfs_remove_link(&per_cpu(mce_device, i).kobj, name);
+               dev = mce_device[i];
+               if (dev)
+                       sysfs_remove_link(&dev->kobj, name);
                per_cpu(threshold_banks, i)[bank] = NULL;
        }
 
index 1aae78f775fc18e4f01b2ed568a1934e2de6dd31..4025fe4f928f6f4cb2ddcfc0233197e9e0b3ad44 100644 (file)
@@ -252,7 +252,8 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
        unsigned short ss;
        unsigned long sp;
 #endif
-       printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
+       printk(KERN_DEFAULT
+              "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
 #ifdef CONFIG_PREEMPT
        printk("PREEMPT ");
 #endif
index 6d728d9284bd0e3b94213206bd93af5afbfb7912..17107bd6e1f0af06b5e59b1c48d452305a8b3214 100644 (file)
@@ -129,7 +129,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
        if (!stack) {
                if (regs)
                        stack = (unsigned long *)regs->sp;
-               else if (task && task != current)
+               else if (task != current)
                        stack = (unsigned long *)task->thread.sp;
                else
                        stack = &dummy;
@@ -269,11 +269,11 @@ void show_registers(struct pt_regs *regs)
                unsigned char c;
                u8 *ip;
 
-               printk(KERN_EMERG "Stack:\n");
+               printk(KERN_DEFAULT "Stack:\n");
                show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
-                                  0, KERN_EMERG);
+                                  0, KERN_DEFAULT);
 
-               printk(KERN_EMERG "Code: ");
+               printk(KERN_DEFAULT "Code: ");
 
                ip = (u8 *)regs->ip - code_prologue;
                if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
index 174d938d576b347365f4fdab2de8e7b85fd36f53..62d61e9976eb0a83bca3aae5c979bb63505c0bbb 100644 (file)
@@ -703,7 +703,7 @@ void __init e820_mark_nosave_regions(unsigned long limit_pfn)
 }
 #endif
 
-#ifdef CONFIG_HIBERNATION
+#ifdef CONFIG_ACPI
 /**
  * Mark ACPI NVS memory region, so that we can save/restore it during
  * hibernation and the subsequent resume.
@@ -716,7 +716,7 @@ static int __init e820_mark_nvs_memory(void)
                struct e820entry *ei = &e820.map[i];
 
                if (ei->type == E820_NVS)
-                       suspend_nvs_register(ei->addr, ei->size);
+                       acpi_nvs_register(ei->addr, ei->size);
        }
 
        return 0;
index 22d0e21b4dd793fa509e52541bf9b57e7778d1d7..79d97e68f04238ac13993e89c016e67a7b647af6 100644 (file)
@@ -42,6 +42,7 @@
  */
 
 #include <linux/linkage.h>
+#include <linux/err.h>
 #include <asm/thread_info.h>
 #include <asm/irqflags.h>
 #include <asm/errno.h>
@@ -81,8 +82,6 @@
  * enough to patch inline, increasing performance.
  */
 
-#define nr_syscalls ((syscall_table_size)/4)
-
 #ifdef CONFIG_PREEMPT
 #define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
 #else
@@ -423,7 +422,7 @@ sysenter_past_esp:
        testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
        jnz sysenter_audit
 sysenter_do_call:
-       cmpl $(nr_syscalls), %eax
+       cmpl $(NR_syscalls), %eax
        jae syscall_badsys
        call *sys_call_table(,%eax,4)
        movl %eax,PT_EAX(%esp)
@@ -455,7 +454,7 @@ sysenter_audit:
        movl %ebx,%ecx                  /* 3rd arg: 1st syscall arg */
        movl %eax,%edx                  /* 2nd arg: syscall number */
        movl $AUDIT_ARCH_I386,%eax      /* 1st arg: audit arch */
-       call audit_syscall_entry
+       call __audit_syscall_entry
        pushl_cfi %ebx
        movl PT_EAX(%esp),%eax          /* reload syscall number */
        jmp sysenter_do_call
@@ -466,11 +465,10 @@ sysexit_audit:
        TRACE_IRQS_ON
        ENABLE_INTERRUPTS(CLBR_ANY)
        movl %eax,%edx          /* second arg, syscall return value */
-       cmpl $0,%eax            /* is it < 0? */
-       setl %al                /* 1 if so, 0 if not */
+       cmpl $-MAX_ERRNO,%eax   /* is it an error ? */
+       setbe %al               /* 1 if so, 0 if not */
        movzbl %al,%eax         /* zero-extend that */
-       inc %eax /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
-       call audit_syscall_exit
+       call __audit_syscall_exit
        DISABLE_INTERRUPTS(CLBR_ANY)
        TRACE_IRQS_OFF
        movl TI_flags(%ebp), %ecx
@@ -504,7 +502,7 @@ ENTRY(system_call)
                                        # system call tracing in operation / emulation
        testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
        jnz syscall_trace_entry
-       cmpl $(nr_syscalls), %eax
+       cmpl $(NR_syscalls), %eax
        jae syscall_badsys
 syscall_call:
        call *sys_call_table(,%eax,4)
@@ -654,7 +652,7 @@ syscall_trace_entry:
        movl %esp, %eax
        call syscall_trace_enter
        /* What it returned is what we'll actually use.  */
-       cmpl $(nr_syscalls), %eax
+       cmpl $(NR_syscalls), %eax
        jnae syscall_call
        jmp syscall_exit
 END(syscall_trace_entry)
@@ -694,29 +692,28 @@ END(syscall_badsys)
  * System calls that need a pt_regs pointer.
  */
 #define PTREGSCALL0(name) \
-       ALIGN; \
-ptregs_##name: \
+ENTRY(ptregs_##name) ;  \
        leal 4(%esp),%eax; \
-       jmp sys_##name;
+       jmp sys_##name; \
+ENDPROC(ptregs_##name)
 
 #define PTREGSCALL1(name) \
-       ALIGN; \
-ptregs_##name: \
+ENTRY(ptregs_##name) ; \
        leal 4(%esp),%edx; \
        movl (PT_EBX+4)(%esp),%eax; \
-       jmp sys_##name;
+       jmp sys_##name; \
+ENDPROC(ptregs_##name)
 
 #define PTREGSCALL2(name) \
-       ALIGN; \
-ptregs_##name: \
+ENTRY(ptregs_##name) ; \
        leal 4(%esp),%ecx; \
        movl (PT_ECX+4)(%esp),%edx; \
        movl (PT_EBX+4)(%esp),%eax; \
-       jmp sys_##name;
+       jmp sys_##name; \
+ENDPROC(ptregs_##name)
 
 #define PTREGSCALL3(name) \
-       ALIGN; \
-ptregs_##name: \
+ENTRY(ptregs_##name) ; \
        CFI_STARTPROC; \
        leal 4(%esp),%eax; \
        pushl_cfi %eax; \
@@ -741,8 +738,7 @@ PTREGSCALL2(vm86)
 PTREGSCALL1(vm86old)
 
 /* Clone is an oddball.  The 4th arg is in %edi */
-       ALIGN;
-ptregs_clone:
+ENTRY(ptregs_clone)
        CFI_STARTPROC
        leal 4(%esp),%eax
        pushl_cfi %eax
@@ -1213,11 +1209,6 @@ return_to_handler:
        jmp *%ecx
 #endif
 
-.section .rodata,"a"
-#include "syscall_table_32.S"
-
-syscall_table_size=(.-sys_call_table)
-
 /*
  * Some functions should be protected against kprobes
  */
index 940ba711fc286510ddc20c441fd882fe7a632b62..3fe8239fd8fbd8ef692f57517d0d1dbd37f284ae 100644 (file)
@@ -55,6 +55,7 @@
 #include <asm/paravirt.h>
 #include <asm/ftrace.h>
 #include <asm/percpu.h>
+#include <linux/err.h>
 
 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
 #include <linux/elf-em.h>
@@ -548,7 +549,7 @@ badsys:
 #ifdef CONFIG_AUDITSYSCALL
        /*
         * Fast path for syscall audit without full syscall trace.
-        * We just call audit_syscall_entry() directly, and then
+        * We just call __audit_syscall_entry() directly, and then
         * jump back to the normal fast path.
         */
 auditsys:
@@ -558,22 +559,21 @@ auditsys:
        movq %rdi,%rdx                  /* 3rd arg: 1st syscall arg */
        movq %rax,%rsi                  /* 2nd arg: syscall number */
        movl $AUDIT_ARCH_X86_64,%edi    /* 1st arg: audit arch */
-       call audit_syscall_entry
+       call __audit_syscall_entry
        LOAD_ARGS 0             /* reload call-clobbered registers */
        jmp system_call_fastpath
 
        /*
-        * Return fast path for syscall audit.  Call audit_syscall_exit()
+        * Return fast path for syscall audit.  Call __audit_syscall_exit()
         * directly and then jump back to the fast path with TIF_SYSCALL_AUDIT
         * masked off.
         */
 sysret_audit:
        movq RAX-ARGOFFSET(%rsp),%rsi   /* second arg, syscall return value */
-       cmpq $0,%rsi            /* is it < 0? */
-       setl %al                /* 1 if so, 0 if not */
+       cmpq $-MAX_ERRNO,%rsi   /* is it < -MAX_ERRNO? */
+       setbe %al               /* 1 if so, 0 if not */
        movzbl %al,%edi         /* zero-extend that into %edi */
-       inc %edi /* first arg, 0->1(AUDITSC_SUCCESS), 1->2(AUDITSC_FAILURE) */
-       call audit_syscall_exit
+       call __audit_syscall_exit
        movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
        jmp sysret_check
 #endif /* CONFIG_AUDITSYSCALL */
index fe86493f3ed1cc37912290e7f5461a2471785fc7..ac0417be9131a8d59cd834a61b629dd36ddec5b5 100644 (file)
@@ -311,13 +311,33 @@ out:
        return state;
 }
 
+/*
+ * AMD microcode firmware naming convention, up to family 15h they are in
+ * the legacy file:
+ *
+ *    amd-ucode/microcode_amd.bin
+ *
+ * This legacy file is always smaller than 2K in size.
+ *
+ * Starting at family 15h they are in family specific firmware files:
+ *
+ *    amd-ucode/microcode_amd_fam15h.bin
+ *    amd-ucode/microcode_amd_fam16h.bin
+ *    ...
+ *
+ * These might be larger than 2K.
+ */
 static enum ucode_state request_microcode_amd(int cpu, struct device *device)
 {
-       const char *fw_name = "amd-ucode/microcode_amd.bin";
+       char fw_name[36] = "amd-ucode/microcode_amd.bin";
        const struct firmware *fw;
        enum ucode_state ret = UCODE_NFOUND;
+       struct cpuinfo_x86 *c = &cpu_data(cpu);
+
+       if (c->x86 >= 0x15)
+               snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
 
-       if (request_firmware(&fw, fw_name, device)) {
+       if (request_firmware(&fw, (const char *)fw_name, device)) {
                pr_err("failed to load file %s\n", fw_name);
                goto out;
        }
index 89a04c7b5bb6f600ff764bf3c91900880e47db4e..50267386b7668d7761f14b2e98a6a032815a792f 100644 (file)
@@ -1392,20 +1392,18 @@ long syscall_trace_enter(struct pt_regs *regs)
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_enter(regs, regs->orig_ax);
 
-       if (unlikely(current->audit_context)) {
-               if (IS_IA32)
-                       audit_syscall_entry(AUDIT_ARCH_I386,
-                                           regs->orig_ax,
-                                           regs->bx, regs->cx,
-                                           regs->dx, regs->si);
+       if (IS_IA32)
+               audit_syscall_entry(AUDIT_ARCH_I386,
+                                   regs->orig_ax,
+                                   regs->bx, regs->cx,
+                                   regs->dx, regs->si);
 #ifdef CONFIG_X86_64
-               else
-                       audit_syscall_entry(AUDIT_ARCH_X86_64,
-                                           regs->orig_ax,
-                                           regs->di, regs->si,
-                                           regs->dx, regs->r10);
+       else
+               audit_syscall_entry(AUDIT_ARCH_X86_64,
+                                   regs->orig_ax,
+                                   regs->di, regs->si,
+                                   regs->dx, regs->r10);
 #endif
-       }
 
        return ret ?: regs->orig_ax;
 }
@@ -1414,8 +1412,7 @@ void syscall_trace_leave(struct pt_regs *regs)
 {
        bool step;
 
-       if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
+       audit_syscall_exit(regs);
 
        if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
                trace_sys_exit(regs, regs->ax);
index 37a458b521a6020598b69305c782a0f28bd75b27..d840e69a853c0ed3fb48d1bd9f6abc2367275efa 100644 (file)
@@ -39,6 +39,14 @@ static int reboot_mode;
 enum reboot_type reboot_type = BOOT_ACPI;
 int reboot_force;
 
+/* This variable is used privately to keep track of whether or not
+ * reboot_type is still set to its default value (i.e., reboot= hasn't
+ * been set on the command line).  This is needed so that we can
+ * suppress DMI scanning for reboot quirks.  Without it, it's
+ * impossible to override a faulty reboot quirk without recompiling.
+ */
+static int reboot_default = 1;
+
 #if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
 static int reboot_cpu = -1;
 #endif
@@ -67,6 +75,12 @@ bool port_cf9_safe = false;
 static int __init reboot_setup(char *str)
 {
        for (;;) {
+               /* Having anything passed on the command line via
+                * reboot= will cause us to disable DMI checking
+                * below.
+                */
+               reboot_default = 0;
+
                switch (*str) {
                case 'w':
                        reboot_mode = 0x1234;
@@ -295,14 +309,6 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "P4S800"),
                },
        },
-       {       /* Handle problems with rebooting on VersaLogic Menlow boards */
-               .callback = set_bios_reboot,
-               .ident = "VersaLogic Menlow based board",
-               .matches = {
-                       DMI_MATCH(DMI_BOARD_VENDOR, "VersaLogic Corporation"),
-                       DMI_MATCH(DMI_BOARD_NAME, "VersaLogic Menlow board"),
-               },
-       },
        { /* Handle reboot issue on Acer Aspire one */
                .callback = set_kbd_reboot,
                .ident = "Acer Aspire One A110",
@@ -316,7 +322,12 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = {
 
 static int __init reboot_init(void)
 {
-       dmi_check_system(reboot_dmi_table);
+       /* Only do the DMI check if reboot_type hasn't been overridden
+        * on the command line
+        */
+       if (reboot_default) {
+               dmi_check_system(reboot_dmi_table);
+       }
        return 0;
 }
 core_initcall(reboot_init);
@@ -465,7 +476,12 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = {
 
 static int __init pci_reboot_init(void)
 {
-       dmi_check_system(pci_reboot_dmi_table);
+       /* Only do the DMI check if reboot_type hasn't been overridden
+        * on the command line
+        */
+       if (reboot_default) {
+               dmi_check_system(pci_reboot_dmi_table);
+       }
        return 0;
 }
 core_initcall(pci_reboot_init);
diff --git a/arch/x86/kernel/syscall_32.c b/arch/x86/kernel/syscall_32.c
new file mode 100644 (file)
index 0000000..147fcd4
--- /dev/null
@@ -0,0 +1,25 @@
+/* System call table for i386. */
+
+#include <linux/linkage.h>
+#include <linux/sys.h>
+#include <linux/cache.h>
+#include <asm/asm-offsets.h>
+
+#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void sym(void) ;
+#include <asm/syscalls_32.h>
+#undef __SYSCALL_I386
+
+#define __SYSCALL_I386(nr, sym, compat) [nr] = sym,
+
+typedef asmlinkage void (*sys_call_ptr_t)(void);
+
+extern asmlinkage void sys_ni_syscall(void);
+
+const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
+       /*
+        * Smells like a compiler bug -- it doesn't work
+        * when the & below is removed.
+        */
+       [0 ... __NR_syscall_max] = &sys_ni_syscall,
+#include <asm/syscalls_32.h>
+};
index de87d6008295c5459aa1cbc9f41037829e1c82c8..7ac7943be02cb0bb69577daa0403b08372d82da4 100644 (file)
@@ -5,15 +5,11 @@
 #include <linux/cache.h>
 #include <asm/asm-offsets.h>
 
-#define __NO_STUBS
+#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
+#include <asm/syscalls_64.h>
+#undef __SYSCALL_64
 
-#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
-#undef _ASM_X86_UNISTD_64_H
-#include <asm/unistd_64.h>
-
-#undef __SYSCALL
-#define __SYSCALL(nr, sym) [nr] = sym,
-#undef _ASM_X86_UNISTD_64_H
+#define __SYSCALL_64(nr, sym, compat) [nr] = sym,
 
 typedef void (*sys_call_ptr_t)(void);
 
@@ -21,9 +17,9 @@ extern void sys_ni_syscall(void);
 
 const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
        /*
-       *Smells like a like a compiler bug -- it doesn't work
-       *when the & below is removed.
-       */
+        * Smells like a compiler bug -- it doesn't work
+        * when the & below is removed.
+        */
        [0 ... __NR_syscall_max] = &sys_ni_syscall,
-#include <asm/unistd_64.h>
+#include <asm/syscalls_64.h>
 };
diff --git a/arch/x86/kernel/syscall_table_32.S b/arch/x86/kernel/syscall_table_32.S
deleted file mode 100644 (file)
index 9a0e312..0000000
+++ /dev/null
@@ -1,350 +0,0 @@
-ENTRY(sys_call_table)
-       .long sys_restart_syscall       /* 0 - old "setup()" system call, used for restarting */
-       .long sys_exit
-       .long ptregs_fork
-       .long sys_read
-       .long sys_write
-       .long sys_open          /* 5 */
-       .long sys_close
-       .long sys_waitpid
-       .long sys_creat
-       .long sys_link
-       .long sys_unlink        /* 10 */
-       .long ptregs_execve
-       .long sys_chdir
-       .long sys_time
-       .long sys_mknod
-       .long sys_chmod         /* 15 */
-       .long sys_lchown16
-       .long sys_ni_syscall    /* old break syscall holder */
-       .long sys_stat
-       .long sys_lseek
-       .long sys_getpid        /* 20 */
-       .long sys_mount
-       .long sys_oldumount
-       .long sys_setuid16
-       .long sys_getuid16
-       .long sys_stime         /* 25 */
-       .long sys_ptrace
-       .long sys_alarm
-       .long sys_fstat
-       .long sys_pause
-       .long sys_utime         /* 30 */
-       .long sys_ni_syscall    /* old stty syscall holder */
-       .long sys_ni_syscall    /* old gtty syscall holder */
-       .long sys_access
-       .long sys_nice
-       .long sys_ni_syscall    /* 35 - old ftime syscall holder */
-       .long sys_sync
-       .long sys_kill
-       .long sys_rename
-       .long sys_mkdir
-       .long sys_rmdir         /* 40 */
-       .long sys_dup
-       .long sys_pipe
-       .long sys_times
-       .long sys_ni_syscall    /* old prof syscall holder */
-       .long sys_brk           /* 45 */
-       .long sys_setgid16
-       .long sys_getgid16
-       .long sys_signal
-       .long sys_geteuid16
-       .long sys_getegid16     /* 50 */
-       .long sys_acct
-       .long sys_umount        /* recycled never used phys() */
-       .long sys_ni_syscall    /* old lock syscall holder */
-       .long sys_ioctl
-       .long sys_fcntl         /* 55 */
-       .long sys_ni_syscall    /* old mpx syscall holder */
-       .long sys_setpgid
-       .long sys_ni_syscall    /* old ulimit syscall holder */
-       .long sys_olduname
-       .long sys_umask         /* 60 */
-       .long sys_chroot
-       .long sys_ustat
-       .long sys_dup2
-       .long sys_getppid
-       .long sys_getpgrp       /* 65 */
-       .long sys_setsid
-       .long sys_sigaction
-       .long sys_sgetmask
-       .long sys_ssetmask
-       .long sys_setreuid16    /* 70 */
-       .long sys_setregid16
-       .long sys_sigsuspend
-       .long sys_sigpending
-       .long sys_sethostname
-       .long sys_setrlimit     /* 75 */
-       .long sys_old_getrlimit
-       .long sys_getrusage
-       .long sys_gettimeofday
-       .long sys_settimeofday
-       .long sys_getgroups16   /* 80 */
-       .long sys_setgroups16
-       .long sys_old_select
-       .long sys_symlink
-       .long sys_lstat
-       .long sys_readlink      /* 85 */
-       .long sys_uselib
-       .long sys_swapon
-       .long sys_reboot
-       .long sys_old_readdir
-       .long sys_old_mmap      /* 90 */
-       .long sys_munmap
-       .long sys_truncate
-       .long sys_ftruncate
-       .long sys_fchmod
-       .long sys_fchown16      /* 95 */
-       .long sys_getpriority
-       .long sys_setpriority
-       .long sys_ni_syscall    /* old profil syscall holder */
-       .long sys_statfs
-       .long sys_fstatfs       /* 100 */
-       .long sys_ioperm
-       .long sys_socketcall
-       .long sys_syslog
-       .long sys_setitimer
-       .long sys_getitimer     /* 105 */
-       .long sys_newstat
-       .long sys_newlstat
-       .long sys_newfstat
-       .long sys_uname
-       .long ptregs_iopl       /* 110 */
-       .long sys_vhangup
-       .long sys_ni_syscall    /* old "idle" system call */
-       .long ptregs_vm86old
-       .long sys_wait4
-       .long sys_swapoff       /* 115 */
-       .long sys_sysinfo
-       .long sys_ipc
-       .long sys_fsync
-       .long ptregs_sigreturn
-       .long ptregs_clone      /* 120 */
-       .long sys_setdomainname
-       .long sys_newuname
-       .long sys_modify_ldt
-       .long sys_adjtimex
-       .long sys_mprotect      /* 125 */
-       .long sys_sigprocmask
-       .long sys_ni_syscall    /* old "create_module" */
-       .long sys_init_module
-       .long sys_delete_module
-       .long sys_ni_syscall    /* 130: old "get_kernel_syms" */
-       .long sys_quotactl
-       .long sys_getpgid
-       .long sys_fchdir
-       .long sys_bdflush
-       .long sys_sysfs         /* 135 */
-       .long sys_personality
-       .long sys_ni_syscall    /* reserved for afs_syscall */
-       .long sys_setfsuid16
-       .long sys_setfsgid16
-       .long sys_llseek        /* 140 */
-       .long sys_getdents
-       .long sys_select
-       .long sys_flock
-       .long sys_msync
-       .long sys_readv         /* 145 */
-       .long sys_writev
-       .long sys_getsid
-       .long sys_fdatasync
-       .long sys_sysctl
-       .long sys_mlock         /* 150 */
-       .long sys_munlock
-       .long sys_mlockall
-       .long sys_munlockall
-       .long sys_sched_setparam
-       .long sys_sched_getparam   /* 155 */
-       .long sys_sched_setscheduler
-       .long sys_sched_getscheduler
-       .long sys_sched_yield
-       .long sys_sched_get_priority_max
-       .long sys_sched_get_priority_min  /* 160 */
-       .long sys_sched_rr_get_interval
-       .long sys_nanosleep
-       .long sys_mremap
-       .long sys_setresuid16
-       .long sys_getresuid16   /* 165 */
-       .long ptregs_vm86
-       .long sys_ni_syscall    /* Old sys_query_module */
-       .long sys_poll
-       .long sys_ni_syscall    /* Old nfsservctl */
-       .long sys_setresgid16   /* 170 */
-       .long sys_getresgid16
-       .long sys_prctl
-       .long ptregs_rt_sigreturn
-       .long sys_rt_sigaction
-       .long sys_rt_sigprocmask        /* 175 */
-       .long sys_rt_sigpending
-       .long sys_rt_sigtimedwait
-       .long sys_rt_sigqueueinfo
-       .long sys_rt_sigsuspend
-       .long sys_pread64       /* 180 */
-       .long sys_pwrite64
-       .long sys_chown16
-       .long sys_getcwd
-       .long sys_capget
-       .long sys_capset        /* 185 */
-       .long ptregs_sigaltstack
-       .long sys_sendfile
-       .long sys_ni_syscall    /* reserved for streams1 */
-       .long sys_ni_syscall    /* reserved for streams2 */
-       .long ptregs_vfork      /* 190 */
-       .long sys_getrlimit
-       .long sys_mmap_pgoff
-       .long sys_truncate64
-       .long sys_ftruncate64
-       .long sys_stat64        /* 195 */
-       .long sys_lstat64
-       .long sys_fstat64
-       .long sys_lchown
-       .long sys_getuid
-       .long sys_getgid        /* 200 */
-       .long sys_geteuid
-       .long sys_getegid
-       .long sys_setreuid
-       .long sys_setregid
-       .long sys_getgroups     /* 205 */
-       .long sys_setgroups
-       .long sys_fchown
-       .long sys_setresuid
-       .long sys_getresuid
-       .long sys_setresgid     /* 210 */
-       .long sys_getresgid
-       .long sys_chown
-       .long sys_setuid
-       .long sys_setgid
-       .long sys_setfsuid      /* 215 */
-       .long sys_setfsgid
-       .long sys_pivot_root
-       .long sys_mincore
-       .long sys_madvise
-       .long sys_getdents64    /* 220 */
-       .long sys_fcntl64
-       .long sys_ni_syscall    /* reserved for TUX */
-       .long sys_ni_syscall
-       .long sys_gettid
-       .long sys_readahead     /* 225 */
-       .long sys_setxattr
-       .long sys_lsetxattr
-       .long sys_fsetxattr
-       .long sys_getxattr
-       .long sys_lgetxattr     /* 230 */
-       .long sys_fgetxattr
-       .long sys_listxattr
-       .long sys_llistxattr
-       .long sys_flistxattr
-       .long sys_removexattr   /* 235 */
-       .long sys_lremovexattr
-       .long sys_fremovexattr
-       .long sys_tkill
-       .long sys_sendfile64
-       .long sys_futex         /* 240 */
-       .long sys_sched_setaffinity
-       .long sys_sched_getaffinity
-       .long sys_set_thread_area
-       .long sys_get_thread_area
-       .long sys_io_setup      /* 245 */
-       .long sys_io_destroy
-       .long sys_io_getevents
-       .long sys_io_submit
-       .long sys_io_cancel
-       .long sys_fadvise64     /* 250 */
-       .long sys_ni_syscall
-       .long sys_exit_group
-       .long sys_lookup_dcookie
-       .long sys_epoll_create
-       .long sys_epoll_ctl     /* 255 */
-       .long sys_epoll_wait
-       .long sys_remap_file_pages
-       .long sys_set_tid_address
-       .long sys_timer_create
-       .long sys_timer_settime         /* 260 */
-       .long sys_timer_gettime
-       .long sys_timer_getoverrun
-       .long sys_timer_delete
-       .long sys_clock_settime
-       .long sys_clock_gettime         /* 265 */
-       .long sys_clock_getres
-       .long sys_clock_nanosleep
-       .long sys_statfs64
-       .long sys_fstatfs64
-       .long sys_tgkill        /* 270 */
-       .long sys_utimes
-       .long sys_fadvise64_64
-       .long sys_ni_syscall    /* sys_vserver */
-       .long sys_mbind
-       .long sys_get_mempolicy
-       .long sys_set_mempolicy
-       .long sys_mq_open
-       .long sys_mq_unlink
-       .long sys_mq_timedsend
-       .long sys_mq_timedreceive       /* 280 */
-       .long sys_mq_notify
-       .long sys_mq_getsetattr
-       .long sys_kexec_load
-       .long sys_waitid
-       .long sys_ni_syscall            /* 285 */ /* available */
-       .long sys_add_key
-       .long sys_request_key
-       .long sys_keyctl
-       .long sys_ioprio_set
-       .long sys_ioprio_get            /* 290 */
-       .long sys_inotify_init
-       .long sys_inotify_add_watch
-       .long sys_inotify_rm_watch
-       .long sys_migrate_pages
-       .long sys_openat                /* 295 */
-       .long sys_mkdirat
-       .long sys_mknodat
-       .long sys_fchownat
-       .long sys_futimesat
-       .long sys_fstatat64             /* 300 */
-       .long sys_unlinkat
-       .long sys_renameat
-       .long sys_linkat
-       .long sys_symlinkat
-       .long sys_readlinkat            /* 305 */
-       .long sys_fchmodat
-       .long sys_faccessat
-       .long sys_pselect6
-       .long sys_ppoll
-       .long sys_unshare               /* 310 */
-       .long sys_set_robust_list
-       .long sys_get_robust_list
-       .long sys_splice
-       .long sys_sync_file_range
-       .long sys_tee                   /* 315 */
-       .long sys_vmsplice
-       .long sys_move_pages
-       .long sys_getcpu
-       .long sys_epoll_pwait
-       .long sys_utimensat             /* 320 */
-       .long sys_signalfd
-       .long sys_timerfd_create
-       .long sys_eventfd
-       .long sys_fallocate
-       .long sys_timerfd_settime       /* 325 */
-       .long sys_timerfd_gettime
-       .long sys_signalfd4
-       .long sys_eventfd2
-       .long sys_epoll_create1
-       .long sys_dup3                  /* 330 */
-       .long sys_pipe2
-       .long sys_inotify_init1
-       .long sys_preadv
-       .long sys_pwritev
-       .long sys_rt_tgsigqueueinfo     /* 335 */
-       .long sys_perf_event_open
-       .long sys_recvmmsg
-       .long sys_fanotify_init
-       .long sys_fanotify_mark
-       .long sys_prlimit64             /* 340 */
-       .long sys_name_to_handle_at
-       .long sys_open_by_handle_at
-       .long sys_clock_adjtime
-       .long sys_syncfs
-       .long sys_sendmmsg              /* 345 */
-       .long sys_setns
-       .long sys_process_vm_readv
-       .long sys_process_vm_writev
index c0dd5b603749057db73420caacc321d5d714338f..a62c201c97eccf31fa90d05dec5e5cf5102ead74 100644 (file)
@@ -290,14 +290,15 @@ static inline int pit_verify_msb(unsigned char val)
 static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *deltap)
 {
        int count;
-       u64 tsc = 0;
+       u64 tsc = 0, prev_tsc = 0;
 
        for (count = 0; count < 50000; count++) {
                if (!pit_verify_msb(val))
                        break;
+               prev_tsc = tsc;
                tsc = get_cycles();
        }
-       *deltap = get_cycles() - tsc;
+       *deltap = get_cycles() - prev_tsc;
        *tscp = tsc;
 
        /*
@@ -311,9 +312,9 @@ static inline int pit_expect_msb(unsigned char val, u64 *tscp, unsigned long *de
  * How many MSB values do we want to see? We aim for
  * a maximum error rate of 500ppm (in practice the
  * real error is much smaller), but refuse to spend
- * more than 25ms on it.
+ * more than 50ms on it.
  */
-#define MAX_QUICK_PIT_MS 25
+#define MAX_QUICK_PIT_MS 50
 #define MAX_QUICK_PIT_ITERATIONS (MAX_QUICK_PIT_MS * PIT_TICK_RATE / 1000 / 256)
 
 static unsigned long quick_pit_calibrate(void)
@@ -383,15 +384,12 @@ success:
         *
         * As a result, we can depend on there not being
         * any odd delays anywhere, and the TSC reads are
-        * reliable (within the error). We also adjust the
-        * delta to the middle of the error bars, just
-        * because it looks nicer.
+        * reliable (within the error).
         *
         * kHz = ticks / time-in-seconds / 1000;
         * kHz = (t2 - t1) / (I * 256 / PIT_TICK_RATE) / 1000
         * kHz = ((t2 - t1) * PIT_TICK_RATE) / (I * 256 * 1000)
         */
-       delta += (long)(d2 - d1)/2;
        delta *= PIT_TICK_RATE;
        do_div(delta, i*256*1000);
        printk("Fast TSC calibration using PIT\n");
index 863f8753ab0ae696f8981ef30d9ee031dd0e310b..b466cab5ba15d171cb4b1fd61a1590bd0cca9956 100644 (file)
@@ -335,9 +335,11 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
        if (info->flags & VM86_SCREEN_BITMAP)
                mark_screen_rdonly(tsk->mm);
 
-       /*call audit_syscall_exit since we do not exit via the normal paths */
+       /*call __audit_syscall_exit since we do not exit via the normal paths */
+#ifdef CONFIG_AUDITSYSCALL
        if (unlikely(current->audit_context))
-               audit_syscall_exit(AUDITSC_RESULT(0), 0);
+               __audit_syscall_exit(1, 0);
+#endif
 
        __asm__ __volatile__(
                "movl %0,%%esp\n\t"
index 05a562b850252b3c480ce67dd4e1e4c6c47f6055..0982507b962a736f3e643d454d4b38c2b1cd1c29 100644 (file)
@@ -1891,6 +1891,51 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
        ss->p = 1;
 }
 
+static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
+{
+       struct x86_emulate_ops *ops = ctxt->ops;
+       u32 eax, ebx, ecx, edx;
+
+       /*
+        * syscall should always be enabled in longmode - so only become
+        * vendor specific (cpuid) if other modes are active...
+        */
+       if (ctxt->mode == X86EMUL_MODE_PROT64)
+               return true;
+
+       eax = 0x00000000;
+       ecx = 0x00000000;
+       if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) {
+               /*
+                * Intel ("GenuineIntel")
+                * remark: Intel CPUs only support "syscall" in 64bit
+                * longmode. Also an 64bit guest with a
+                * 32bit compat-app running will #UD !! While this
+                * behaviour can be fixed (by emulating) into AMD
+                * response - CPUs of AMD can't behave like Intel.
+                */
+               if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
+                   ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
+                   edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
+                       return false;
+
+               /* AMD ("AuthenticAMD") */
+               if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
+                   ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
+                   edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
+                       return true;
+
+               /* AMD ("AMDisbetter!") */
+               if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
+                   ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
+                   edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
+                       return true;
+       }
+
+       /* default: (not Intel, not AMD), apply Intel's stricter rules... */
+       return false;
+}
+
 static int em_syscall(struct x86_emulate_ctxt *ctxt)
 {
        struct x86_emulate_ops *ops = ctxt->ops;
@@ -1904,9 +1949,15 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
            ctxt->mode == X86EMUL_MODE_VM86)
                return emulate_ud(ctxt);
 
+       if (!(em_syscall_is_enabled(ctxt)))
+               return emulate_ud(ctxt);
+
        ops->get_msr(ctxt, MSR_EFER, &efer);
        setup_syscalls_segments(ctxt, &cs, &ss);
 
+       if (!(efer & EFER_SCE))
+               return emulate_ud(ctxt);
+
        ops->get_msr(ctxt, MSR_STAR, &msr_data);
        msr_data >>= 32;
        cs_sel = (u16)(msr_data & 0xfffc);
index 14d6cadc4ba63b9ba3adf5b5e64fff461483de69..9cbfc06981186d96c42d3383122310018fea0d7d 100644 (file)
@@ -1495,6 +1495,8 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
 
 int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 {
+       bool pr = false;
+
        switch (msr) {
        case MSR_EFER:
                return set_efer(vcpu, data);
@@ -1635,6 +1637,18 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                pr_unimpl(vcpu, "unimplemented perfctr wrmsr: "
                        "0x%x data 0x%llx\n", msr, data);
                break;
+       case MSR_P6_PERFCTR0:
+       case MSR_P6_PERFCTR1:
+               pr = true;
+       case MSR_P6_EVNTSEL0:
+       case MSR_P6_EVNTSEL1:
+               if (kvm_pmu_msr(vcpu, msr))
+                       return kvm_pmu_set_msr(vcpu, msr, data);
+
+               if (pr || data != 0)
+                       pr_unimpl(vcpu, "disabled perfctr wrmsr: "
+                               "0x%x data 0x%llx\n", msr, data);
+               break;
        case MSR_K7_CLK_CTL:
                /*
                 * Ignore all writes to this no longer documented MSR.
@@ -1835,6 +1849,14 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
        case MSR_FAM10H_MMIO_CONF_BASE:
                data = 0;
                break;
+       case MSR_P6_PERFCTR0:
+       case MSR_P6_PERFCTR1:
+       case MSR_P6_EVNTSEL0:
+       case MSR_P6_EVNTSEL1:
+               if (kvm_pmu_msr(vcpu, msr))
+                       return kvm_pmu_get_msr(vcpu, msr, pdata);
+               data = 0;
+               break;
        case MSR_IA32_UCODE_REV:
                data = 0x100000000ULL;
                break;
@@ -4180,6 +4202,28 @@ static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
        return kvm_x86_ops->check_intercept(emul_to_vcpu(ctxt), info, stage);
 }
 
+static bool emulator_get_cpuid(struct x86_emulate_ctxt *ctxt,
+                              u32 *eax, u32 *ebx, u32 *ecx, u32 *edx)
+{
+       struct kvm_cpuid_entry2 *cpuid = NULL;
+
+       if (eax && ecx)
+               cpuid = kvm_find_cpuid_entry(emul_to_vcpu(ctxt),
+                                           *eax, *ecx);
+
+       if (cpuid) {
+               *eax = cpuid->eax;
+               *ecx = cpuid->ecx;
+               if (ebx)
+                       *ebx = cpuid->ebx;
+               if (edx)
+                       *edx = cpuid->edx;
+               return true;
+       }
+
+       return false;
+}
+
 static struct x86_emulate_ops emulate_ops = {
        .read_std            = kvm_read_guest_virt_system,
        .write_std           = kvm_write_guest_virt_system,
@@ -4211,6 +4255,7 @@ static struct x86_emulate_ops emulate_ops = {
        .get_fpu             = emulator_get_fpu,
        .put_fpu             = emulator_put_fpu,
        .intercept           = emulator_intercept,
+       .get_cpuid           = emulator_get_cpuid,
 };
 
 static void cache_all_regs(struct kvm_vcpu *vcpu)
index 5b83c51c12e02e8146aba31c0236ff1e7d42d4b0..819137904428a4f82444cf5a9d5055bb59dc6d80 100644 (file)
@@ -219,7 +219,9 @@ ab: STOS/W/D/Q Yv,rAX
 ac: LODS/B AL,Xb
 ad: LODS/W/D/Q rAX,Xv
 ae: SCAS/B AL,Yb
-af: SCAS/W/D/Q rAX,Xv
+# Note: The May 2011 Intel manual shows Xv for the second parameter of the
+# next instruction but Yv is correct
+af: SCAS/W/D/Q rAX,Yv
 # 0xb0 - 0xbf
 b0: MOV AL/R8L,Ib
 b1: MOV CL/R9L,Ib
@@ -729,8 +731,8 @@ de: VAESDEC Vdq,Hdq,Wdq (66),(v1)
 df: VAESDECLAST Vdq,Hdq,Wdq (66),(v1)
 f0: MOVBE Gy,My | MOVBE Gw,Mw (66) | CRC32 Gd,Eb (F2)
 f1: MOVBE My,Gy | MOVBE Mw,Gw (66) | CRC32 Gd,Ey (F2)
-f3: ANDN Gy,By,Ey (v)
-f4: Grp17 (1A)
+f2: ANDN Gy,By,Ey (v)
+f3: Grp17 (1A)
 f5: BZHI Gy,Ey,By (v) | PEXT Gy,By,Ey (F3),(v) | PDEP Gy,By,Ey (F2),(v)
 f6: MULX By,Gy,rDX,Ey (F2),(v)
 f7: BEXTR Gy,Ey,By (v) | SHLX Gy,Ey,By (66),(v) | SARX Gy,Ey,By (F3),(v) | SHRX Gy,Ey,By (F2),(v)
index 9d74824a708dcdd1cd3b0e6ce4798e79fbbf1c01..f0b4caf85c1a8687496b863745f11838a4d430b8 100644 (file)
@@ -673,7 +673,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
 
        stackend = end_of_stack(tsk);
        if (tsk != &init_task && *stackend != STACK_END_MAGIC)
-               printk(KERN_ALERT "Thread overran stack, or stack corrupted\n");
+               printk(KERN_EMERG "Thread overran stack, or stack corrupted\n");
 
        tsk->thread.cr2         = address;
        tsk->thread.trap_no     = 14;
@@ -684,7 +684,7 @@ no_context(struct pt_regs *regs, unsigned long error_code,
                sig = 0;
 
        /* Executive summary in case the body of the oops scrolled away */
-       printk(KERN_EMERG "CR2: %016lx\n", address);
+       printk(KERN_DEFAULT "CR2: %016lx\n", address);
 
        oops_end(flags, regs, sig);
 }
index fd61b3fb73411f89b5a5d1b2805e133bcbd8be94..1c1c4f46a7c15c19c54923c815556d8c253b847c 100644 (file)
@@ -109,6 +109,8 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
        if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
                return;
        pxm = pa->proximity_domain_lo;
+       if (acpi_srat_revision >= 2)
+               pxm |= *((unsigned int*)pa->proximity_domain_hi) << 8;
        node = setup_node(pxm);
        if (node < 0) {
                printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
@@ -160,6 +162,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
        start = ma->base_address;
        end = start + ma->length;
        pxm = ma->proximity_domain;
+       if (acpi_srat_revision <= 1)
+               pxm &= 0xff;
        node = setup_node(pxm);
        if (node < 0) {
                printk(KERN_ERR "SRAT: Too many proximity domains.\n");
index 7b65f752c5f8fd79af2c6b4afb342988bdd8d56c..7c1b765ecc59e3324299dfe34ba04d127fed4878 100644 (file)
@@ -151,17 +151,18 @@ void bpf_jit_compile(struct sk_filter *fp)
        cleanup_addr = proglen; /* epilogue address */
 
        for (pass = 0; pass < 10; pass++) {
+               u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
                /* no prologue/epilogue for trivial filters (RET something) */
                proglen = 0;
                prog = temp;
 
-               if (seen) {
+               if (seen_or_pass0) {
                        EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
                        EMIT4(0x48, 0x83, 0xec, 96);    /* subq  $96,%rsp       */
                        /* note : must save %rbx in case bpf_error is hit */
-                       if (seen & (SEEN_XREG | SEEN_DATAREF))
+                       if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
                                EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
-                       if (seen & SEEN_XREG)
+                       if (seen_or_pass0 & SEEN_XREG)
                                CLEAR_X(); /* make sure we dont leek kernel memory */
 
                        /*
@@ -170,7 +171,7 @@ void bpf_jit_compile(struct sk_filter *fp)
                         *  r9 = skb->len - skb->data_len
                         *  r8 = skb->data
                         */
-                       if (seen & SEEN_DATAREF) {
+                       if (seen_or_pass0 & SEEN_DATAREF) {
                                if (offsetof(struct sk_buff, len) <= 127)
                                        /* mov    off8(%rdi),%r9d */
                                        EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
@@ -260,9 +261,14 @@ void bpf_jit_compile(struct sk_filter *fp)
                        case BPF_S_ALU_DIV_X: /* A /= X; */
                                seen |= SEEN_XREG;
                                EMIT2(0x85, 0xdb);      /* test %ebx,%ebx */
-                               if (pc_ret0 != -1)
-                                       EMIT_COND_JMP(X86_JE, addrs[pc_ret0] - (addrs[i] - 4));
-                               else {
+                               if (pc_ret0 > 0) {
+                                       /* addrs[pc_ret0 - 1] is start address of target
+                                        * (addrs[i] - 4) is the address following this jmp
+                                        * ("xor %edx,%edx; div %ebx" being 4 bytes long)
+                                        */
+                                       EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
+                                                               (addrs[i] - 4));
+                               } else {
                                        EMIT_COND_JMP(X86_JNE, 2 + 5);
                                        CLEAR_A();
                                        EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
@@ -335,12 +341,12 @@ void bpf_jit_compile(struct sk_filter *fp)
                                }
                                /* fallinto */
                        case BPF_S_RET_A:
-                               if (seen) {
+                               if (seen_or_pass0) {
                                        if (i != flen - 1) {
                                                EMIT_JMP(cleanup_addr - addrs[i]);
                                                break;
                                        }
-                                       if (seen & SEEN_XREG)
+                                       if (seen_or_pass0 & SEEN_XREG)
                                                EMIT4(0x48, 0x8b, 0x5d, 0xf8);  /* mov  -8(%rbp),%rbx */
                                        EMIT1(0xc9);            /* leaveq */
                                }
@@ -483,8 +489,9 @@ common_load:                        seen |= SEEN_DATAREF;
                                goto common_load;
                        case BPF_S_LDX_B_MSH:
                                if ((int)K < 0) {
-                                       if (pc_ret0 != -1) {
-                                               EMIT_JMP(addrs[pc_ret0] - addrs[i]);
+                                       if (pc_ret0 > 0) {
+                                               /* addrs[pc_ret0 - 1] is the start address */
+                                               EMIT_JMP(addrs[pc_ret0 - 1] - addrs[i]);
                                                break;
                                        }
                                        CLEAR_A();
@@ -599,13 +606,14 @@ cond_branch:                      f_offset = addrs[i + filter[i].jf] - addrs[i];
                 * use it to give the cleanup instruction(s) addr
                 */
                cleanup_addr = proglen - 1; /* ret */
-               if (seen)
+               if (seen_or_pass0)
                        cleanup_addr -= 1; /* leaveq */
-               if (seen & SEEN_XREG)
+               if (seen_or_pass0 & SEEN_XREG)
                        cleanup_addr -= 4; /* mov  -8(%rbp),%rbx */
 
                if (image) {
-                       WARN_ON(proglen != oldproglen);
+                       if (proglen != oldproglen)
+                               pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
                        break;
                }
                if (proglen == oldproglen) {
index 5b552198f774eb1e6c6ab3b9ef42276706d74328..3ae0e61abd23acf7b1de580b8c234873b1870209 100644 (file)
@@ -157,13 +157,14 @@ static int __init uvhub_to_first_apicid(int uvhub)
  * clear of the Timeout bit (as well) will free the resource. No reply will
  * be sent (the hardware will only do one reply per message).
  */
-static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp)
+static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp,
+                                               int do_acknowledge)
 {
        unsigned long dw;
        struct bau_pq_entry *msg;
 
        msg = mdp->msg;
-       if (!msg->canceled) {
+       if (!msg->canceled && do_acknowledge) {
                dw = (msg->swack_vec << UV_SW_ACK_NPENDING) | msg->swack_vec;
                write_mmr_sw_ack(dw);
        }
@@ -212,8 +213,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
                        if (mmr & (msg_res << UV_SW_ACK_NPENDING)) {
                                unsigned long mr;
                                /*
-                                * is the resource timed out?
-                                * make everyone ignore the cancelled message.
+                                * Is the resource timed out?
+                                * Make everyone ignore the cancelled message.
                                 */
                                msg2->canceled = 1;
                                stat->d_canceled++;
@@ -231,8 +232,8 @@ static void bau_process_retry_msg(struct msg_desc *mdp,
  * Do all the things a cpu should do for a TLB shootdown message.
  * Other cpu's may come here at the same time for this message.
  */
-static void bau_process_message(struct msg_desc *mdp,
-                                       struct bau_control *bcp)
+static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp,
+                                               int do_acknowledge)
 {
        short socket_ack_count = 0;
        short *sp;
@@ -284,8 +285,9 @@ static void bau_process_message(struct msg_desc *mdp,
                if (msg_ack_count == bcp->cpus_in_uvhub) {
                        /*
                         * All cpus in uvhub saw it; reply
+                        * (unless we are in the UV2 workaround)
                         */
-                       reply_to_message(mdp, bcp);
+                       reply_to_message(mdp, bcp, do_acknowledge);
                }
        }
 
@@ -491,27 +493,138 @@ static int uv1_wait_completion(struct bau_desc *bau_desc,
 /*
  * UV2 has an extra bit of status in the ACTIVATION_STATUS_2 register.
  */
-static unsigned long uv2_read_status(unsigned long offset, int rshft, int cpu)
+static unsigned long uv2_read_status(unsigned long offset, int rshft, int desc)
 {
        unsigned long descriptor_status;
        unsigned long descriptor_status2;
 
        descriptor_status = ((read_lmmr(offset) >> rshft) & UV_ACT_STATUS_MASK);
-       descriptor_status2 = (read_mmr_uv2_status() >> cpu) & 0x1UL;
+       descriptor_status2 = (read_mmr_uv2_status() >> desc) & 0x1UL;
        descriptor_status = (descriptor_status << 1) | descriptor_status2;
        return descriptor_status;
 }
 
+/*
+ * Return whether the status of the descriptor that is normally used for this
+ * cpu (the one indexed by its hub-relative cpu number) is busy.
+ * The status of the original 32 descriptors is always reflected in the 64
+ * bits of UVH_LB_BAU_SB_ACTIVATION_STATUS_0.
+ * The bit provided by the activation_status_2 register is irrelevant to
+ * the status if it is only being tested for busy or not busy.
+ */
+int normal_busy(struct bau_control *bcp)
+{
+       int cpu = bcp->uvhub_cpu;
+       int mmr_offset;
+       int right_shift;
+
+       mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
+       right_shift = cpu * UV_ACT_STATUS_SIZE;
+       return (((((read_lmmr(mmr_offset) >> right_shift) &
+                               UV_ACT_STATUS_MASK)) << 1) == UV2H_DESC_BUSY);
+}
+
+/*
+ * Entered when a bau descriptor has gone into a permanent busy wait because
+ * of a hardware bug.
+ * Workaround the bug.
+ */
+int handle_uv2_busy(struct bau_control *bcp)
+{
+       int busy_one = bcp->using_desc;
+       int normal = bcp->uvhub_cpu;
+       int selected = -1;
+       int i;
+       unsigned long descriptor_status;
+       unsigned long status;
+       int mmr_offset;
+       struct bau_desc *bau_desc_old;
+       struct bau_desc *bau_desc_new;
+       struct bau_control *hmaster = bcp->uvhub_master;
+       struct ptc_stats *stat = bcp->statp;
+       cycles_t ttm;
+
+       stat->s_uv2_wars++;
+       spin_lock(&hmaster->uvhub_lock);
+       /* try for the original first */
+       if (busy_one != normal) {
+               if (!normal_busy(bcp))
+                       selected = normal;
+       }
+       if (selected < 0) {
+               /* can't use the normal, select an alternate */
+               mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
+               descriptor_status = read_lmmr(mmr_offset);
+
+               /* scan available descriptors 32-63 */
+               for (i = 0; i < UV_CPUS_PER_AS; i++) {
+                       if ((hmaster->inuse_map & (1 << i)) == 0) {
+                               status = ((descriptor_status >>
+                                               (i * UV_ACT_STATUS_SIZE)) &
+                                               UV_ACT_STATUS_MASK) << 1;
+                               if (status != UV2H_DESC_BUSY) {
+                                       selected = i + UV_CPUS_PER_AS;
+                                       break;
+                               }
+                       }
+               }
+       }
+
+       if (busy_one != normal)
+               /* mark the busy alternate as not in-use */
+               hmaster->inuse_map &= ~(1 << (busy_one - UV_CPUS_PER_AS));
+
+       if (selected >= 0) {
+               /* switch to the selected descriptor */
+               if (selected != normal) {
+                       /* set the selected alternate as in-use */
+                       hmaster->inuse_map |=
+                                       (1 << (selected - UV_CPUS_PER_AS));
+                       if (selected > stat->s_uv2_wars_hw)
+                               stat->s_uv2_wars_hw = selected;
+               }
+               bau_desc_old = bcp->descriptor_base;
+               bau_desc_old += (ITEMS_PER_DESC * busy_one);
+               bcp->using_desc = selected;
+               bau_desc_new = bcp->descriptor_base;
+               bau_desc_new += (ITEMS_PER_DESC * selected);
+               *bau_desc_new = *bau_desc_old;
+       } else {
+               /*
+                * All are busy. Wait for the normal one for this cpu to
+                * free up.
+                */
+               stat->s_uv2_war_waits++;
+               spin_unlock(&hmaster->uvhub_lock);
+               ttm = get_cycles();
+               do {
+                       cpu_relax();
+               } while (normal_busy(bcp));
+               spin_lock(&hmaster->uvhub_lock);
+               /* switch to the original descriptor */
+               bcp->using_desc = normal;
+               bau_desc_old = bcp->descriptor_base;
+               bau_desc_old += (ITEMS_PER_DESC * bcp->using_desc);
+               bcp->using_desc = (ITEMS_PER_DESC * normal);
+               bau_desc_new = bcp->descriptor_base;
+               bau_desc_new += (ITEMS_PER_DESC * normal);
+               *bau_desc_new = *bau_desc_old; /* copy the entire descriptor */
+       }
+       spin_unlock(&hmaster->uvhub_lock);
+       return FLUSH_RETRY_BUSYBUG;
+}
+
 static int uv2_wait_completion(struct bau_desc *bau_desc,
                                unsigned long mmr_offset, int right_shift,
                                struct bau_control *bcp, long try)
 {
        unsigned long descriptor_stat;
        cycles_t ttm;
-       int cpu = bcp->uvhub_cpu;
+       int desc = bcp->using_desc;
+       long busy_reps = 0;
        struct ptc_stats *stat = bcp->statp;
 
-       descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
+       descriptor_stat = uv2_read_status(mmr_offset, right_shift, desc);
 
        /* spin on the status MMR, waiting for it to go idle */
        while (descriptor_stat != UV2H_DESC_IDLE) {
@@ -522,32 +635,35 @@ static int uv2_wait_completion(struct bau_desc *bau_desc,
                 * our message and its state will stay IDLE.
                 */
                if ((descriptor_stat == UV2H_DESC_SOURCE_TIMEOUT) ||
-                   (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) ||
                    (descriptor_stat == UV2H_DESC_DEST_PUT_ERR)) {
                        stat->s_stimeout++;
                        return FLUSH_GIVEUP;
+               } else if (descriptor_stat == UV2H_DESC_DEST_STRONG_NACK) {
+                       stat->s_strongnacks++;
+                       bcp->conseccompletes = 0;
+                       return FLUSH_GIVEUP;
                } else if (descriptor_stat == UV2H_DESC_DEST_TIMEOUT) {
                        stat->s_dtimeout++;
-                       ttm = get_cycles();
-                       /*
-                        * Our retries may be blocked by all destination
-                        * swack resources being consumed, and a timeout
-                        * pending.  In that case hardware returns the
-                        * ERROR that looks like a destination timeout.
-                        */
-                       if (cycles_2_us(ttm - bcp->send_message) < timeout_us) {
-                               bcp->conseccompletes = 0;
-                               return FLUSH_RETRY_PLUGGED;
-                       }
                        bcp->conseccompletes = 0;
                        return FLUSH_RETRY_TIMEOUT;
                } else {
+                       busy_reps++;
+                       if (busy_reps > 1000000) {
+                               /* not to hammer on the clock */
+                               busy_reps = 0;
+                               ttm = get_cycles();
+                               if ((ttm - bcp->send_message) >
+                                       (bcp->clocks_per_100_usec)) {
+                                       return handle_uv2_busy(bcp);
+                               }
+                       }
                        /*
                         * descriptor_stat is still BUSY
                         */
                        cpu_relax();
                }
-               descriptor_stat = uv2_read_status(mmr_offset, right_shift, cpu);
+               descriptor_stat = uv2_read_status(mmr_offset, right_shift,
+                                                                       desc);
        }
        bcp->conseccompletes++;
        return FLUSH_COMPLETE;
@@ -563,17 +679,17 @@ static int wait_completion(struct bau_desc *bau_desc,
 {
        int right_shift;
        unsigned long mmr_offset;
-       int cpu = bcp->uvhub_cpu;
+       int desc = bcp->using_desc;
 
-       if (cpu < UV_CPUS_PER_AS) {
+       if (desc < UV_CPUS_PER_AS) {
                mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_0;
-               right_shift = cpu * UV_ACT_STATUS_SIZE;
+               right_shift = desc * UV_ACT_STATUS_SIZE;
        } else {
                mmr_offset = UVH_LB_BAU_SB_ACTIVATION_STATUS_1;
-               right_shift = ((cpu - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
+               right_shift = ((desc - UV_CPUS_PER_AS) * UV_ACT_STATUS_SIZE);
        }
 
-       if (is_uv1_hub())
+       if (bcp->uvhub_version == 1)
                return uv1_wait_completion(bau_desc, mmr_offset, right_shift,
                                                                bcp, try);
        else
@@ -752,19 +868,22 @@ static void handle_cmplt(int completion_status, struct bau_desc *bau_desc,
  * Returns 1 if it gives up entirely and the original cpu mask is to be
  * returned to the kernel.
  */
-int uv_flush_send_and_wait(struct bau_desc *bau_desc,
-                       struct cpumask *flush_mask, struct bau_control *bcp)
+int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp)
 {
        int seq_number = 0;
        int completion_stat = 0;
+       int uv1 = 0;
        long try = 0;
        unsigned long index;
        cycles_t time1;
        cycles_t time2;
        struct ptc_stats *stat = bcp->statp;
        struct bau_control *hmaster = bcp->uvhub_master;
+       struct uv1_bau_msg_header *uv1_hdr = NULL;
+       struct uv2_bau_msg_header *uv2_hdr = NULL;
+       struct bau_desc *bau_desc;
 
-       if (is_uv1_hub())
+       if (bcp->uvhub_version == 1)
                uv1_throttle(hmaster, stat);
 
        while (hmaster->uvhub_quiesce)
@@ -772,22 +891,39 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
 
        time1 = get_cycles();
        do {
-               if (try == 0) {
-                       bau_desc->header.msg_type = MSG_REGULAR;
+               bau_desc = bcp->descriptor_base;
+               bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
+               if (bcp->uvhub_version == 1) {
+                       uv1 = 1;
+                       uv1_hdr = &bau_desc->header.uv1_hdr;
+               } else
+                       uv2_hdr = &bau_desc->header.uv2_hdr;
+               if ((try == 0) || (completion_stat == FLUSH_RETRY_BUSYBUG)) {
+                       if (uv1)
+                               uv1_hdr->msg_type = MSG_REGULAR;
+                       else
+                               uv2_hdr->msg_type = MSG_REGULAR;
                        seq_number = bcp->message_number++;
                } else {
-                       bau_desc->header.msg_type = MSG_RETRY;
+                       if (uv1)
+                               uv1_hdr->msg_type = MSG_RETRY;
+                       else
+                               uv2_hdr->msg_type = MSG_RETRY;
                        stat->s_retry_messages++;
                }
 
-               bau_desc->header.sequence = seq_number;
-               index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu;
+               if (uv1)
+                       uv1_hdr->sequence = seq_number;
+               else
+                       uv2_hdr->sequence = seq_number;
+               index = (1UL << AS_PUSH_SHIFT) | bcp->using_desc;
                bcp->send_message = get_cycles();
 
                write_mmr_activation(index);
 
                try++;
                completion_stat = wait_completion(bau_desc, bcp, try);
+               /* UV2: wait_completion() may change the bcp->using_desc */
 
                handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat);
 
@@ -798,6 +934,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
                }
                cpu_relax();
        } while ((completion_stat == FLUSH_RETRY_PLUGGED) ||
+                (completion_stat == FLUSH_RETRY_BUSYBUG) ||
                 (completion_stat == FLUSH_RETRY_TIMEOUT));
 
        time2 = get_cycles();
@@ -812,6 +949,7 @@ int uv_flush_send_and_wait(struct bau_desc *bau_desc,
        record_send_stats(time1, time2, bcp, stat, completion_stat, try);
 
        if (completion_stat == FLUSH_GIVEUP)
+               /* FLUSH_GIVEUP will fall back to using IPI's for tlb flush */
                return 1;
        return 0;
 }
@@ -967,7 +1105,7 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
                stat->s_ntargself++;
 
        bau_desc = bcp->descriptor_base;
-       bau_desc += ITEMS_PER_DESC * bcp->uvhub_cpu;
+       bau_desc += (ITEMS_PER_DESC * bcp->using_desc);
        bau_uvhubs_clear(&bau_desc->distribution, UV_DISTRIBUTION_SIZE);
        if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes))
                return NULL;
@@ -980,12 +1118,85 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
         * uv_flush_send_and_wait returns 0 if all cpu's were messaged,
         * or 1 if it gave up and the original cpumask should be returned.
         */
-       if (!uv_flush_send_and_wait(bau_desc, flush_mask, bcp))
+       if (!uv_flush_send_and_wait(flush_mask, bcp))
                return NULL;
        else
                return cpumask;
 }
 
+/*
+ * Search the message queue for any 'other' message with the same software
+ * acknowledge resource bit vector.
+ */
+struct bau_pq_entry *find_another_by_swack(struct bau_pq_entry *msg,
+                       struct bau_control *bcp, unsigned char swack_vec)
+{
+       struct bau_pq_entry *msg_next = msg + 1;
+
+       if (msg_next > bcp->queue_last)
+               msg_next = bcp->queue_first;
+       while ((msg_next->swack_vec != 0) && (msg_next != msg)) {
+               if (msg_next->swack_vec == swack_vec)
+                       return msg_next;
+               msg_next++;
+               if (msg_next > bcp->queue_last)
+                       msg_next = bcp->queue_first;
+       }
+       return NULL;
+}
+
+/*
+ * UV2 needs to work around a bug in which an arriving message has not
+ * set a bit in the UVH_LB_BAU_INTD_SOFTWARE_ACKNOWLEDGE register.
+ * Such a message must be ignored.
+ */
+void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp)
+{
+       unsigned long mmr_image;
+       unsigned char swack_vec;
+       struct bau_pq_entry *msg = mdp->msg;
+       struct bau_pq_entry *other_msg;
+
+       mmr_image = read_mmr_sw_ack();
+       swack_vec = msg->swack_vec;
+
+       if ((swack_vec & mmr_image) == 0) {
+               /*
+                * This message was assigned a swack resource, but no
+                * reserved acknowlegment is pending.
+                * The bug has prevented this message from setting the MMR.
+                * And no other message has used the same sw_ack resource.
+                * Do the requested shootdown but do not reply to the msg.
+                * (the 0 means make no acknowledge)
+                */
+               bau_process_message(mdp, bcp, 0);
+               return;
+       }
+
+       /*
+        * Some message has set the MMR 'pending' bit; it might have been
+        * another message.  Look for that message.
+        */
+       other_msg = find_another_by_swack(msg, bcp, msg->swack_vec);
+       if (other_msg) {
+               /* There is another.  Do not ack the current one. */
+               bau_process_message(mdp, bcp, 0);
+               /*
+                * Let the natural processing of that message acknowledge
+                * it. Don't get the processing of sw_ack's out of order.
+                */
+               return;
+       }
+
+       /*
+        * There is no other message using this sw_ack, so it is safe to
+        * acknowledge it.
+        */
+       bau_process_message(mdp, bcp, 1);
+
+       return;
+}
+
 /*
  * The BAU message interrupt comes here. (registered by set_intr_gate)
  * See entry_64.S
@@ -1009,6 +1220,7 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
        struct ptc_stats *stat;
        struct msg_desc msgdesc;
 
+       ack_APIC_irq();
        time_start = get_cycles();
 
        bcp = &per_cpu(bau_control, smp_processor_id());
@@ -1022,9 +1234,11 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
                count++;
 
                msgdesc.msg_slot = msg - msgdesc.queue_first;
-               msgdesc.swack_slot = ffs(msg->swack_vec) - 1;
                msgdesc.msg = msg;
-               bau_process_message(&msgdesc, bcp);
+               if (bcp->uvhub_version == 2)
+                       process_uv2_message(&msgdesc, bcp);
+               else
+                       bau_process_message(&msgdesc, bcp, 1);
 
                msg++;
                if (msg > msgdesc.queue_last)
@@ -1036,8 +1250,6 @@ void uv_bau_message_interrupt(struct pt_regs *regs)
                stat->d_nomsg++;
        else if (count > 1)
                stat->d_multmsg++;
-
-       ack_APIC_irq();
 }
 
 /*
@@ -1083,7 +1295,7 @@ static void __init enable_timeouts(void)
                 */
                mmr_image |= (1L << SOFTACK_MSHIFT);
                if (is_uv2_hub()) {
-                       mmr_image |= (1L << UV2_LEG_SHFT);
+                       mmr_image &= ~(1L << UV2_LEG_SHFT);
                        mmr_image |= (1L << UV2_EXT_SHFT);
                }
                write_mmr_misc_control(pnode, mmr_image);
@@ -1136,13 +1348,13 @@ static int ptc_seq_show(struct seq_file *file, void *data)
                seq_printf(file,
                        "remotehub numuvhubs numuvhubs16 numuvhubs8 ");
                seq_printf(file,
-                       "numuvhubs4 numuvhubs2 numuvhubs1 dto retries rok ");
+                   "numuvhubs4 numuvhubs2 numuvhubs1 dto snacks retries rok ");
                seq_printf(file,
                        "resetp resett giveup sto bz throt swack recv rtime ");
                seq_printf(file,
                        "all one mult none retry canc nocan reset rcan ");
                seq_printf(file,
-                       "disable enable\n");
+                       "disable enable wars warshw warwaits\n");
        }
        if (cpu < num_possible_cpus() && cpu_online(cpu)) {
                stat = &per_cpu(ptcstats, cpu);
@@ -1154,10 +1366,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
                           stat->s_ntargremotes, stat->s_ntargcpu,
                           stat->s_ntarglocaluvhub, stat->s_ntargremoteuvhub,
                           stat->s_ntarguvhub, stat->s_ntarguvhub16);
-               seq_printf(file, "%ld %ld %ld %ld %ld ",
+               seq_printf(file, "%ld %ld %ld %ld %ld %ld ",
                           stat->s_ntarguvhub8, stat->s_ntarguvhub4,
                           stat->s_ntarguvhub2, stat->s_ntarguvhub1,
-                          stat->s_dtimeout);
+                          stat->s_dtimeout, stat->s_strongnacks);
                seq_printf(file, "%ld %ld %ld %ld %ld %ld %ld %ld ",
                           stat->s_retry_messages, stat->s_retriesok,
                           stat->s_resets_plug, stat->s_resets_timeout,
@@ -1173,8 +1385,10 @@ static int ptc_seq_show(struct seq_file *file, void *data)
                           stat->d_nomsg, stat->d_retries, stat->d_canceled,
                           stat->d_nocanceled, stat->d_resets,
                           stat->d_rcanceled);
-               seq_printf(file, "%ld %ld\n",
-                       stat->s_bau_disabled, stat->s_bau_reenabled);
+               seq_printf(file, "%ld %ld %ld %ld %ld\n",
+                       stat->s_bau_disabled, stat->s_bau_reenabled,
+                       stat->s_uv2_wars, stat->s_uv2_wars_hw,
+                       stat->s_uv2_war_waits);
        }
        return 0;
 }
@@ -1432,12 +1646,15 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
 {
        int i;
        int cpu;
+       int uv1 = 0;
        unsigned long gpa;
        unsigned long m;
        unsigned long n;
        size_t dsize;
        struct bau_desc *bau_desc;
        struct bau_desc *bd2;
+       struct uv1_bau_msg_header *uv1_hdr;
+       struct uv2_bau_msg_header *uv2_hdr;
        struct bau_control *bcp;
 
        /*
@@ -1451,6 +1668,8 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
        gpa = uv_gpa(bau_desc);
        n = uv_gpa_to_gnode(gpa);
        m = uv_gpa_to_offset(gpa);
+       if (is_uv1_hub())
+               uv1 = 1;
 
        /* the 14-bit pnode */
        write_mmr_descriptor_base(pnode, (n << UV_DESC_PSHIFT | m));
@@ -1461,21 +1680,33 @@ static void activation_descriptor_init(int node, int pnode, int base_pnode)
         */
        for (i = 0, bd2 = bau_desc; i < (ADP_SZ * ITEMS_PER_DESC); i++, bd2++) {
                memset(bd2, 0, sizeof(struct bau_desc));
-               bd2->header.swack_flag =        1;
-               /*
-                * The base_dest_nasid set in the message header is the nasid
-                * of the first uvhub in the partition. The bit map will
-                * indicate destination pnode numbers relative to that base.
-                * They may not be consecutive if nasid striding is being used.
-                */
-               bd2->header.base_dest_nasid =   UV_PNODE_TO_NASID(base_pnode);
-               bd2->header.dest_subnodeid =    UV_LB_SUBNODEID;
-               bd2->header.command =           UV_NET_ENDPOINT_INTD;
-               bd2->header.int_both =          1;
-               /*
-                * all others need to be set to zero:
-                *   fairness chaining multilevel count replied_to
-                */
+               if (uv1) {
+                       uv1_hdr = &bd2->header.uv1_hdr;
+                       uv1_hdr->swack_flag =   1;
+                       /*
+                        * The base_dest_nasid set in the message header
+                        * is the nasid of the first uvhub in the partition.
+                        * The bit map will indicate destination pnode numbers
+                        * relative to that base. They may not be consecutive
+                        * if nasid striding is being used.
+                        */
+                       uv1_hdr->base_dest_nasid =
+                                               UV_PNODE_TO_NASID(base_pnode);
+                       uv1_hdr->dest_subnodeid =       UV_LB_SUBNODEID;
+                       uv1_hdr->command =              UV_NET_ENDPOINT_INTD;
+                       uv1_hdr->int_both =             1;
+                       /*
+                        * all others need to be set to zero:
+                        *   fairness chaining multilevel count replied_to
+                        */
+               } else {
+                       uv2_hdr = &bd2->header.uv2_hdr;
+                       uv2_hdr->swack_flag =   1;
+                       uv2_hdr->base_dest_nasid =
+                                               UV_PNODE_TO_NASID(base_pnode);
+                       uv2_hdr->dest_subnodeid =       UV_LB_SUBNODEID;
+                       uv2_hdr->command =              UV_NET_ENDPOINT_INTD;
+               }
        }
        for_each_present_cpu(cpu) {
                if (pnode != uv_blade_to_pnode(uv_cpu_to_blade_id(cpu)))
@@ -1531,6 +1762,7 @@ static void pq_init(int node, int pnode)
        write_mmr_payload_first(pnode, pn_first);
        write_mmr_payload_tail(pnode, first);
        write_mmr_payload_last(pnode, last);
+       write_gmmr_sw_ack(pnode, 0xffffUL);
 
        /* in effect, all msg_type's are set to MSG_NOOP */
        memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE);
@@ -1584,14 +1816,14 @@ static int calculate_destination_timeout(void)
                ts_ns = base * mult1 * mult2;
                ret = ts_ns / 1000;
        } else {
-               /* 4 bits  0/1 for 10/80us, 3 bits of multiplier */
-               mmr_image = uv_read_local_mmr(UVH_AGING_PRESCALE_SEL);
+               /* 4 bits  0/1 for 10/80us base, 3 bits of multiplier */
+               mmr_image = uv_read_local_mmr(UVH_LB_BAU_MISC_CONTROL);
                mmr_image = (mmr_image & UV_SA_MASK) >> UV_SA_SHFT;
                if (mmr_image & (1L << UV2_ACK_UNITS_SHFT))
-                       mult1 = 80;
+                       base = 80;
                else
-                       mult1 = 10;
-               base = mmr_image & UV2_ACK_MASK;
+                       base = 10;
+               mult1 = mmr_image & UV2_ACK_MASK;
                ret = mult1 * base;
        }
        return ret;
@@ -1618,6 +1850,9 @@ static void __init init_per_cpu_tunables(void)
                bcp->cong_response_us           = congested_respns_us;
                bcp->cong_reps                  = congested_reps;
                bcp->cong_period                = congested_period;
+               bcp->clocks_per_100_usec =      usec_2_cycles(100);
+               spin_lock_init(&bcp->queue_lock);
+               spin_lock_init(&bcp->uvhub_lock);
        }
 }
 
@@ -1728,8 +1963,17 @@ static int scan_sock(struct socket_desc *sdp, struct uvhub_desc *bdp,
                bcp->cpus_in_socket = sdp->num_cpus;
                bcp->socket_master = *smasterp;
                bcp->uvhub = bdp->uvhub;
+               if (is_uv1_hub())
+                       bcp->uvhub_version = 1;
+               else if (is_uv2_hub())
+                       bcp->uvhub_version = 2;
+               else {
+                       printk(KERN_EMERG "uvhub version not 1 or 2\n");
+                       return 1;
+               }
                bcp->uvhub_master = *hmasterp;
                bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id;
+               bcp->using_desc = bcp->uvhub_cpu;
                if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) {
                        printk(KERN_EMERG "%d cpus per uvhub invalid\n",
                                bcp->uvhub_cpu);
@@ -1845,6 +2089,8 @@ static int __init uv_bau_init(void)
                        uv_base_pnode = uv_blade_to_pnode(uvhub);
        }
 
+       enable_timeouts();
+
        if (init_per_cpu(nuvhubs, uv_base_pnode)) {
                nobau = 1;
                return 0;
@@ -1855,7 +2101,6 @@ static int __init uv_bau_init(void)
                if (uv_blade_nr_possible_cpus(uvhub))
                        init_uvhub(uvhub, vector, uv_base_pnode);
 
-       enable_timeouts();
        alloc_intr_gate(vector, uv_bau_message_intr1);
 
        for_each_possible_blade(uvhub) {
@@ -1867,7 +2112,8 @@ static int __init uv_bau_init(void)
                        val = 1L << 63;
                        write_gmmr_activation(pnode, val);
                        mmr = 1; /* should be 1 to broadcast to both sockets */
-                       write_mmr_data_broadcast(pnode, mmr);
+                       if (!is_uv1_hub())
+                               write_mmr_data_broadcast(pnode, mmr);
                }
        }
 
index 374a05d8ad22156b9a82e3ae0a643d9de785599f..f25c2765a5c9b48bbac24551dc2e82fc9e2d2ce2 100644 (file)
@@ -25,7 +25,7 @@ struct uv_irq_2_mmr_pnode{
        int                     irq;
 };
 
-static spinlock_t              uv_irq_lock;
+static DEFINE_SPINLOCK(uv_irq_lock);
 static struct rb_root          uv_irq_root;
 
 static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
diff --git a/arch/x86/syscalls/Makefile b/arch/x86/syscalls/Makefile
new file mode 100644 (file)
index 0000000..564b247
--- /dev/null
@@ -0,0 +1,43 @@
+out := $(obj)/../include/generated/asm
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)')
+
+syscall32 := $(srctree)/$(src)/syscall_32.tbl
+syscall64 := $(srctree)/$(src)/syscall_64.tbl
+
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR  $@
+      cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' $< $@ \
+                  $(syshdr_abi_$(basetarget)) $(syshdr_pfx_$(basetarget))
+quiet_cmd_systbl = SYSTBL  $@
+      cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@
+
+syshdr_abi_unistd_32 := i386
+$(out)/unistd_32.h: $(syscall32) $(syshdr)
+       $(call if_changed,syshdr)
+
+syshdr_abi_unistd_32_ia32 := i386
+syshdr_pfx_unistd_32_ia32 := ia32_
+$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr)
+       $(call if_changed,syshdr)
+
+syshdr_abi_unistd_64 := 64
+$(out)/unistd_64.h: $(syscall64) $(syshdr)
+       $(call if_changed,syshdr)
+
+$(out)/syscalls_32.h: $(syscall32) $(systbl)
+       $(call if_changed,systbl)
+$(out)/syscalls_64.h: $(syscall64) $(systbl)
+       $(call if_changed,systbl)
+
+syshdr-y                       += unistd_32.h unistd_64.h
+syshdr-y                       += syscalls_32.h
+syshdr-$(CONFIG_X86_64)                += unistd_32_ia32.h
+syshdr-$(CONFIG_X86_64)                += syscalls_64.h
+
+targets        += $(syshdr-y)
+
+all: $(addprefix $(out)/,$(targets))
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
new file mode 100644 (file)
index 0000000..ce98e28
--- /dev/null
@@ -0,0 +1,357 @@
+#
+# 32-bit system call numbers and entry vectors
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The abi is always "i386" for this file.
+#
+0      i386    restart_syscall         sys_restart_syscall
+1      i386    exit                    sys_exit
+2      i386    fork                    ptregs_fork                     stub32_fork
+3      i386    read                    sys_read
+4      i386    write                   sys_write
+5      i386    open                    sys_open                        compat_sys_open
+6      i386    close                   sys_close
+7      i386    waitpid                 sys_waitpid                     sys32_waitpid
+8      i386    creat                   sys_creat
+9      i386    link                    sys_link
+10     i386    unlink                  sys_unlink
+11     i386    execve                  ptregs_execve                   stub32_execve
+12     i386    chdir                   sys_chdir
+13     i386    time                    sys_time                        compat_sys_time
+14     i386    mknod                   sys_mknod
+15     i386    chmod                   sys_chmod
+16     i386    lchown                  sys_lchown16
+17     i386    break
+18     i386    oldstat                 sys_stat
+19     i386    lseek                   sys_lseek                       sys32_lseek
+20     i386    getpid                  sys_getpid
+21     i386    mount                   sys_mount                       compat_sys_mount
+22     i386    umount                  sys_oldumount
+23     i386    setuid                  sys_setuid16
+24     i386    getuid                  sys_getuid16
+25     i386    stime                   sys_stime                       compat_sys_stime
+26     i386    ptrace                  sys_ptrace                      compat_sys_ptrace
+27     i386    alarm                   sys_alarm
+28     i386    oldfstat                sys_fstat
+29     i386    pause                   sys_pause
+30     i386    utime                   sys_utime                       compat_sys_utime
+31     i386    stty
+32     i386    gtty
+33     i386    access                  sys_access
+34     i386    nice                    sys_nice
+35     i386    ftime
+36     i386    sync                    sys_sync
+37     i386    kill                    sys_kill                        sys32_kill
+38     i386    rename                  sys_rename
+39     i386    mkdir                   sys_mkdir
+40     i386    rmdir                   sys_rmdir
+41     i386    dup                     sys_dup
+42     i386    pipe                    sys_pipe
+43     i386    times                   sys_times                       compat_sys_times
+44     i386    prof
+45     i386    brk                     sys_brk
+46     i386    setgid                  sys_setgid16
+47     i386    getgid                  sys_getgid16
+48     i386    signal                  sys_signal
+49     i386    geteuid                 sys_geteuid16
+50     i386    getegid                 sys_getegid16
+51     i386    acct                    sys_acct
+52     i386    umount2                 sys_umount
+53     i386    lock
+54     i386    ioctl                   sys_ioctl                       compat_sys_ioctl
+55     i386    fcntl                   sys_fcntl                       compat_sys_fcntl64
+56     i386    mpx
+57     i386    setpgid                 sys_setpgid
+58     i386    ulimit
+59     i386    oldolduname             sys_olduname
+60     i386    umask                   sys_umask
+61     i386    chroot                  sys_chroot
+62     i386    ustat                   sys_ustat                       compat_sys_ustat
+63     i386    dup2                    sys_dup2
+64     i386    getppid                 sys_getppid
+65     i386    getpgrp                 sys_getpgrp
+66     i386    setsid                  sys_setsid
+67     i386    sigaction               sys_sigaction                   sys32_sigaction
+68     i386    sgetmask                sys_sgetmask
+69     i386    ssetmask                sys_ssetmask
+70     i386    setreuid                sys_setreuid16
+71     i386    setregid                sys_setregid16
+72     i386    sigsuspend              sys_sigsuspend                  sys32_sigsuspend
+73     i386    sigpending              sys_sigpending                  compat_sys_sigpending
+74     i386    sethostname             sys_sethostname
+75     i386    setrlimit               sys_setrlimit                   compat_sys_setrlimit
+76     i386    getrlimit               sys_old_getrlimit               compat_sys_old_getrlimit
+77     i386    getrusage               sys_getrusage                   compat_sys_getrusage
+78     i386    gettimeofday            sys_gettimeofday                compat_sys_gettimeofday
+79     i386    settimeofday            sys_settimeofday                compat_sys_settimeofday
+80     i386    getgroups               sys_getgroups16
+81     i386    setgroups               sys_setgroups16
+82     i386    select                  sys_old_select                  compat_sys_old_select
+83     i386    symlink                 sys_symlink
+84     i386    oldlstat                sys_lstat
+85     i386    readlink                sys_readlink
+86     i386    uselib                  sys_uselib
+87     i386    swapon                  sys_swapon
+88     i386    reboot                  sys_reboot
+89     i386    readdir                 sys_old_readdir                 compat_sys_old_readdir
+90     i386    mmap                    sys_old_mmap                    sys32_mmap
+91     i386    munmap                  sys_munmap
+92     i386    truncate                sys_truncate
+93     i386    ftruncate               sys_ftruncate
+94     i386    fchmod                  sys_fchmod
+95     i386    fchown                  sys_fchown16
+96     i386    getpriority             sys_getpriority
+97     i386    setpriority             sys_setpriority
+98     i386    profil
+99     i386    statfs                  sys_statfs                      compat_sys_statfs
+100    i386    fstatfs                 sys_fstatfs                     compat_sys_fstatfs
+101    i386    ioperm                  sys_ioperm
+102    i386    socketcall              sys_socketcall                  compat_sys_socketcall
+103    i386    syslog                  sys_syslog
+104    i386    setitimer               sys_setitimer                   compat_sys_setitimer
+105    i386    getitimer               sys_getitimer                   compat_sys_getitimer
+106    i386    stat                    sys_newstat                     compat_sys_newstat
+107    i386    lstat                   sys_newlstat                    compat_sys_newlstat
+108    i386    fstat                   sys_newfstat                    compat_sys_newfstat
+109    i386    olduname                sys_uname
+110    i386    iopl                    ptregs_iopl                     stub32_iopl
+111    i386    vhangup                 sys_vhangup
+112    i386    idle
+113    i386    vm86old                 ptregs_vm86old                  sys32_vm86_warning
+114    i386    wait4                   sys_wait4                       compat_sys_wait4
+115    i386    swapoff                 sys_swapoff
+116    i386    sysinfo                 sys_sysinfo                     compat_sys_sysinfo
+117    i386    ipc                     sys_ipc                         sys32_ipc
+118    i386    fsync                   sys_fsync
+119    i386    sigreturn               ptregs_sigreturn                stub32_sigreturn
+120    i386    clone                   ptregs_clone                    stub32_clone
+121    i386    setdomainname           sys_setdomainname
+122    i386    uname                   sys_newuname
+123    i386    modify_ldt              sys_modify_ldt
+124    i386    adjtimex                sys_adjtimex                    compat_sys_adjtimex
+125    i386    mprotect                sys_mprotect                    sys32_mprotect
+126    i386    sigprocmask             sys_sigprocmask                 compat_sys_sigprocmask
+127    i386    create_module
+128    i386    init_module             sys_init_module
+129    i386    delete_module           sys_delete_module
+130    i386    get_kernel_syms
+131    i386    quotactl                sys_quotactl                    sys32_quotactl
+132    i386    getpgid                 sys_getpgid
+133    i386    fchdir                  sys_fchdir
+134    i386    bdflush                 sys_bdflush
+135    i386    sysfs                   sys_sysfs
+136    i386    personality             sys_personality
+137    i386    afs_syscall
+138    i386    setfsuid                sys_setfsuid16
+139    i386    setfsgid                sys_setfsgid16
+140    i386    _llseek                 sys_llseek
+141    i386    getdents                sys_getdents                    compat_sys_getdents
+142    i386    _newselect              sys_select                      compat_sys_select
+143    i386    flock                   sys_flock
+144    i386    msync                   sys_msync
+145    i386    readv                   sys_readv                       compat_sys_readv
+146    i386    writev                  sys_writev                      compat_sys_writev
+147    i386    getsid                  sys_getsid
+148    i386    fdatasync               sys_fdatasync
+149    i386    _sysctl                 sys_sysctl                      compat_sys_sysctl
+150    i386    mlock                   sys_mlock
+151    i386    munlock                 sys_munlock
+152    i386    mlockall                sys_mlockall
+153    i386    munlockall              sys_munlockall
+154    i386    sched_setparam          sys_sched_setparam
+155    i386    sched_getparam          sys_sched_getparam
+156    i386    sched_setscheduler      sys_sched_setscheduler
+157    i386    sched_getscheduler      sys_sched_getscheduler
+158    i386    sched_yield             sys_sched_yield
+159    i386    sched_get_priority_max  sys_sched_get_priority_max
+160    i386    sched_get_priority_min  sys_sched_get_priority_min
+161    i386    sched_rr_get_interval   sys_sched_rr_get_interval       sys32_sched_rr_get_interval
+162    i386    nanosleep               sys_nanosleep                   compat_sys_nanosleep
+163    i386    mremap                  sys_mremap
+164    i386    setresuid               sys_setresuid16
+165    i386    getresuid               sys_getresuid16
+166    i386    vm86                    ptregs_vm86                     sys32_vm86_warning
+167    i386    query_module
+168    i386    poll                    sys_poll
+169    i386    nfsservctl
+170    i386    setresgid               sys_setresgid16
+171    i386    getresgid               sys_getresgid16
+172    i386    prctl                   sys_prctl
+173    i386    rt_sigreturn            ptregs_rt_sigreturn             stub32_rt_sigreturn
+174    i386    rt_sigaction            sys_rt_sigaction                sys32_rt_sigaction
+175    i386    rt_sigprocmask          sys_rt_sigprocmask              sys32_rt_sigprocmask
+176    i386    rt_sigpending           sys_rt_sigpending               sys32_rt_sigpending
+177    i386    rt_sigtimedwait         sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait
+178    i386    rt_sigqueueinfo         sys_rt_sigqueueinfo             sys32_rt_sigqueueinfo
+179    i386    rt_sigsuspend           sys_rt_sigsuspend
+180    i386    pread64                 sys_pread64                     sys32_pread
+181    i386    pwrite64                sys_pwrite64                    sys32_pwrite
+182    i386    chown                   sys_chown16
+183    i386    getcwd                  sys_getcwd
+184    i386    capget                  sys_capget
+185    i386    capset                  sys_capset
+186    i386    sigaltstack             ptregs_sigaltstack              stub32_sigaltstack
+187    i386    sendfile                sys_sendfile                    sys32_sendfile
+188    i386    getpmsg
+189    i386    putpmsg
+190    i386    vfork                   ptregs_vfork                    stub32_vfork
+191    i386    ugetrlimit              sys_getrlimit                   compat_sys_getrlimit
+192    i386    mmap2                   sys_mmap_pgoff
+193    i386    truncate64              sys_truncate64                  sys32_truncate64
+194    i386    ftruncate64             sys_ftruncate64                 sys32_ftruncate64
+195    i386    stat64                  sys_stat64                      sys32_stat64
+196    i386    lstat64                 sys_lstat64                     sys32_lstat64
+197    i386    fstat64                 sys_fstat64                     sys32_fstat64
+198    i386    lchown32                sys_lchown
+199    i386    getuid32                sys_getuid
+200    i386    getgid32                sys_getgid
+201    i386    geteuid32               sys_geteuid
+202    i386    getegid32               sys_getegid
+203    i386    setreuid32              sys_setreuid
+204    i386    setregid32              sys_setregid
+205    i386    getgroups32             sys_getgroups
+206    i386    setgroups32             sys_setgroups
+207    i386    fchown32                sys_fchown
+208    i386    setresuid32             sys_setresuid
+209    i386    getresuid32             sys_getresuid
+210    i386    setresgid32             sys_setresgid
+211    i386    getresgid32             sys_getresgid
+212    i386    chown32                 sys_chown
+213    i386    setuid32                sys_setuid
+214    i386    setgid32                sys_setgid
+215    i386    setfsuid32              sys_setfsuid
+216    i386    setfsgid32              sys_setfsgid
+217    i386    pivot_root              sys_pivot_root
+218    i386    mincore                 sys_mincore
+219    i386    madvise                 sys_madvise
+220    i386    getdents64              sys_getdents64                  compat_sys_getdents64
+221    i386    fcntl64                 sys_fcntl64                     compat_sys_fcntl64
+# 222 is unused
+# 223 is unused
+224    i386    gettid                  sys_gettid
+225    i386    readahead               sys_readahead                   sys32_readahead
+226    i386    setxattr                sys_setxattr
+227    i386    lsetxattr               sys_lsetxattr
+228    i386    fsetxattr               sys_fsetxattr
+229    i386    getxattr                sys_getxattr
+230    i386    lgetxattr               sys_lgetxattr
+231    i386    fgetxattr               sys_fgetxattr
+232    i386    listxattr               sys_listxattr
+233    i386    llistxattr              sys_llistxattr
+234    i386    flistxattr              sys_flistxattr
+235    i386    removexattr             sys_removexattr
+236    i386    lremovexattr            sys_lremovexattr
+237    i386    fremovexattr            sys_fremovexattr
+238    i386    tkill                   sys_tkill
+239    i386    sendfile64              sys_sendfile64
+240    i386    futex                   sys_futex                       compat_sys_futex
+241    i386    sched_setaffinity       sys_sched_setaffinity           compat_sys_sched_setaffinity
+242    i386    sched_getaffinity       sys_sched_getaffinity           compat_sys_sched_getaffinity
+243    i386    set_thread_area         sys_set_thread_area
+244    i386    get_thread_area         sys_get_thread_area
+245    i386    io_setup                sys_io_setup                    compat_sys_io_setup
+246    i386    io_destroy              sys_io_destroy
+247    i386    io_getevents            sys_io_getevents                compat_sys_io_getevents
+248    i386    io_submit               sys_io_submit                   compat_sys_io_submit
+249    i386    io_cancel               sys_io_cancel
+250    i386    fadvise64               sys_fadvise64                   sys32_fadvise64
+# 251 is available for reuse (was briefly sys_set_zone_reclaim)
+252    i386    exit_group              sys_exit_group
+253    i386    lookup_dcookie          sys_lookup_dcookie              sys32_lookup_dcookie
+254    i386    epoll_create            sys_epoll_create
+255    i386    epoll_ctl               sys_epoll_ctl
+256    i386    epoll_wait              sys_epoll_wait
+257    i386    remap_file_pages        sys_remap_file_pages
+258    i386    set_tid_address         sys_set_tid_address
+259    i386    timer_create            sys_timer_create                compat_sys_timer_create
+260    i386    timer_settime           sys_timer_settime               compat_sys_timer_settime
+261    i386    timer_gettime           sys_timer_gettime               compat_sys_timer_gettime
+262    i386    timer_getoverrun        sys_timer_getoverrun
+263    i386    timer_delete            sys_timer_delete
+264    i386    clock_settime           sys_clock_settime               compat_sys_clock_settime
+265    i386    clock_gettime           sys_clock_gettime               compat_sys_clock_gettime
+266    i386    clock_getres            sys_clock_getres                compat_sys_clock_getres
+267    i386    clock_nanosleep         sys_clock_nanosleep             compat_sys_clock_nanosleep
+268    i386    statfs64                sys_statfs64                    compat_sys_statfs64
+269    i386    fstatfs64               sys_fstatfs64                   compat_sys_fstatfs64
+270    i386    tgkill                  sys_tgkill
+271    i386    utimes                  sys_utimes                      compat_sys_utimes
+272    i386    fadvise64_64            sys_fadvise64_64                sys32_fadvise64_64
+273    i386    vserver
+274    i386    mbind                   sys_mbind
+275    i386    get_mempolicy           sys_get_mempolicy               compat_sys_get_mempolicy
+276    i386    set_mempolicy           sys_set_mempolicy
+277    i386    mq_open                 sys_mq_open                     compat_sys_mq_open
+278    i386    mq_unlink               sys_mq_unlink
+279    i386    mq_timedsend            sys_mq_timedsend                compat_sys_mq_timedsend
+280    i386    mq_timedreceive         sys_mq_timedreceive             compat_sys_mq_timedreceive
+281    i386    mq_notify               sys_mq_notify                   compat_sys_mq_notify
+282    i386    mq_getsetaddr           sys_mq_getsetattr               compat_sys_mq_getsetattr
+283    i386    kexec_load              sys_kexec_load                  compat_sys_kexec_load
+284    i386    waitid                  sys_waitid                      compat_sys_waitid
+# 285 sys_setaltroot
+286    i386    add_key                 sys_add_key
+287    i386    request_key             sys_request_key
+288    i386    keyctl                  sys_keyctl
+289    i386    ioprio_set              sys_ioprio_set
+290    i386    ioprio_get              sys_ioprio_get
+291    i386    inotify_init            sys_inotify_init
+292    i386    inotify_add_watch       sys_inotify_add_watch
+293    i386    inotify_rm_watch        sys_inotify_rm_watch
+294    i386    migrate_pages           sys_migrate_pages
+295    i386    openat                  sys_openat                      compat_sys_openat
+296    i386    mkdirat                 sys_mkdirat
+297    i386    mknodat                 sys_mknodat
+298    i386    fchownat                sys_fchownat
+299    i386    futimesat               sys_futimesat                   compat_sys_futimesat
+300    i386    fstatat64               sys_fstatat64                   sys32_fstatat
+301    i386    unlinkat                sys_unlinkat
+302    i386    renameat                sys_renameat
+303    i386    linkat                  sys_linkat
+304    i386    symlinkat               sys_symlinkat
+305    i386    readlinkat              sys_readlinkat
+306    i386    fchmodat                sys_fchmodat
+307    i386    faccessat               sys_faccessat
+308    i386    pselect6                sys_pselect6                    compat_sys_pselect6
+309    i386    ppoll                   sys_ppoll                       compat_sys_ppoll
+310    i386    unshare                 sys_unshare
+311    i386    set_robust_list         sys_set_robust_list             compat_sys_set_robust_list
+312    i386    get_robust_list         sys_get_robust_list             compat_sys_get_robust_list
+313    i386    splice                  sys_splice
+314    i386    sync_file_range         sys_sync_file_range             sys32_sync_file_range
+315    i386    tee                     sys_tee
+316    i386    vmsplice                sys_vmsplice                    compat_sys_vmsplice
+317    i386    move_pages              sys_move_pages                  compat_sys_move_pages
+318    i386    getcpu                  sys_getcpu
+319    i386    epoll_pwait             sys_epoll_pwait
+320    i386    utimensat               sys_utimensat                   compat_sys_utimensat
+321    i386    signalfd                sys_signalfd                    compat_sys_signalfd
+322    i386    timerfd_create          sys_timerfd_create
+323    i386    eventfd                 sys_eventfd
+324    i386    fallocate               sys_fallocate                   sys32_fallocate
+325    i386    timerfd_settime         sys_timerfd_settime             compat_sys_timerfd_settime
+326    i386    timerfd_gettime         sys_timerfd_gettime             compat_sys_timerfd_gettime
+327    i386    signalfd4               sys_signalfd4                   compat_sys_signalfd4
+328    i386    eventfd2                sys_eventfd2
+329    i386    epoll_create1           sys_epoll_create1
+330    i386    dup3                    sys_dup3
+331    i386    pipe2                   sys_pipe2
+332    i386    inotify_init1           sys_inotify_init1
+333    i386    preadv                  sys_preadv                      compat_sys_preadv
+334    i386    pwritev                 sys_pwritev                     compat_sys_pwritev
+335    i386    rt_tgsigqueueinfo       sys_rt_tgsigqueueinfo           compat_sys_rt_tgsigqueueinfo
+336    i386    perf_event_open         sys_perf_event_open
+337    i386    recvmmsg                sys_recvmmsg                    compat_sys_recvmmsg
+338    i386    fanotify_init           sys_fanotify_init
+339    i386    fanotify_mark           sys_fanotify_mark               sys32_fanotify_mark
+340    i386    prlimit64               sys_prlimit64
+341    i386    name_to_handle_at       sys_name_to_handle_at
+342    i386    open_by_handle_at       sys_open_by_handle_at           compat_sys_open_by_handle_at
+343    i386    clock_adjtime           sys_clock_adjtime               compat_sys_clock_adjtime
+344    i386    syncfs                  sys_syncfs
+345    i386    sendmmsg                sys_sendmmsg                    compat_sys_sendmmsg
+346    i386    setns                   sys_setns
+347    i386    process_vm_readv        sys_process_vm_readv            compat_sys_process_vm_readv
+348    i386    process_vm_writev       sys_process_vm_writev           compat_sys_process_vm_writev
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
new file mode 100644 (file)
index 0000000..b440a8f
--- /dev/null
@@ -0,0 +1,320 @@
+#
+# 64-bit system call numbers and entry vectors
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The abi is always "64" for this file (for now.)
+#
+0      64      read                    sys_read
+1      64      write                   sys_write
+2      64      open                    sys_open
+3      64      close                   sys_close
+4      64      stat                    sys_newstat
+5      64      fstat                   sys_newfstat
+6      64      lstat                   sys_newlstat
+7      64      poll                    sys_poll
+8      64      lseek                   sys_lseek
+9      64      mmap                    sys_mmap
+10     64      mprotect                sys_mprotect
+11     64      munmap                  sys_munmap
+12     64      brk                     sys_brk
+13     64      rt_sigaction            sys_rt_sigaction
+14     64      rt_sigprocmask          sys_rt_sigprocmask
+15     64      rt_sigreturn            stub_rt_sigreturn
+16     64      ioctl                   sys_ioctl
+17     64      pread64                 sys_pread64
+18     64      pwrite64                sys_pwrite64
+19     64      readv                   sys_readv
+20     64      writev                  sys_writev
+21     64      access                  sys_access
+22     64      pipe                    sys_pipe
+23     64      select                  sys_select
+24     64      sched_yield             sys_sched_yield
+25     64      mremap                  sys_mremap
+26     64      msync                   sys_msync
+27     64      mincore                 sys_mincore
+28     64      madvise                 sys_madvise
+29     64      shmget                  sys_shmget
+30     64      shmat                   sys_shmat
+31     64      shmctl                  sys_shmctl
+32     64      dup                     sys_dup
+33     64      dup2                    sys_dup2
+34     64      pause                   sys_pause
+35     64      nanosleep               sys_nanosleep
+36     64      getitimer               sys_getitimer
+37     64      alarm                   sys_alarm
+38     64      setitimer               sys_setitimer
+39     64      getpid                  sys_getpid
+40     64      sendfile                sys_sendfile64
+41     64      socket                  sys_socket
+42     64      connect                 sys_connect
+43     64      accept                  sys_accept
+44     64      sendto                  sys_sendto
+45     64      recvfrom                sys_recvfrom
+46     64      sendmsg                 sys_sendmsg
+47     64      recvmsg                 sys_recvmsg
+48     64      shutdown                sys_shutdown
+49     64      bind                    sys_bind
+50     64      listen                  sys_listen
+51     64      getsockname             sys_getsockname
+52     64      getpeername             sys_getpeername
+53     64      socketpair              sys_socketpair
+54     64      setsockopt              sys_setsockopt
+55     64      getsockopt              sys_getsockopt
+56     64      clone                   stub_clone
+57     64      fork                    stub_fork
+58     64      vfork                   stub_vfork
+59     64      execve                  stub_execve
+60     64      exit                    sys_exit
+61     64      wait4                   sys_wait4
+62     64      kill                    sys_kill
+63     64      uname                   sys_newuname
+64     64      semget                  sys_semget
+65     64      semop                   sys_semop
+66     64      semctl                  sys_semctl
+67     64      shmdt                   sys_shmdt
+68     64      msgget                  sys_msgget
+69     64      msgsnd                  sys_msgsnd
+70     64      msgrcv                  sys_msgrcv
+71     64      msgctl                  sys_msgctl
+72     64      fcntl                   sys_fcntl
+73     64      flock                   sys_flock
+74     64      fsync                   sys_fsync
+75     64      fdatasync               sys_fdatasync
+76     64      truncate                sys_truncate
+77     64      ftruncate               sys_ftruncate
+78     64      getdents                sys_getdents
+79     64      getcwd                  sys_getcwd
+80     64      chdir                   sys_chdir
+81     64      fchdir                  sys_fchdir
+82     64      rename                  sys_rename
+83     64      mkdir                   sys_mkdir
+84     64      rmdir                   sys_rmdir
+85     64      creat                   sys_creat
+86     64      link                    sys_link
+87     64      unlink                  sys_unlink
+88     64      symlink                 sys_symlink
+89     64      readlink                sys_readlink
+90     64      chmod                   sys_chmod
+91     64      fchmod                  sys_fchmod
+92     64      chown                   sys_chown
+93     64      fchown                  sys_fchown
+94     64      lchown                  sys_lchown
+95     64      umask                   sys_umask
+96     64      gettimeofday            sys_gettimeofday
+97     64      getrlimit               sys_getrlimit
+98     64      getrusage               sys_getrusage
+99     64      sysinfo                 sys_sysinfo
+100    64      times                   sys_times
+101    64      ptrace                  sys_ptrace
+102    64      getuid                  sys_getuid
+103    64      syslog                  sys_syslog
+104    64      getgid                  sys_getgid
+105    64      setuid                  sys_setuid
+106    64      setgid                  sys_setgid
+107    64      geteuid                 sys_geteuid
+108    64      getegid                 sys_getegid
+109    64      setpgid                 sys_setpgid
+110    64      getppid                 sys_getppid
+111    64      getpgrp                 sys_getpgrp
+112    64      setsid                  sys_setsid
+113    64      setreuid                sys_setreuid
+114    64      setregid                sys_setregid
+115    64      getgroups               sys_getgroups
+116    64      setgroups               sys_setgroups
+117    64      setresuid               sys_setresuid
+118    64      getresuid               sys_getresuid
+119    64      setresgid               sys_setresgid
+120    64      getresgid               sys_getresgid
+121    64      getpgid                 sys_getpgid
+122    64      setfsuid                sys_setfsuid
+123    64      setfsgid                sys_setfsgid
+124    64      getsid                  sys_getsid
+125    64      capget                  sys_capget
+126    64      capset                  sys_capset
+127    64      rt_sigpending           sys_rt_sigpending
+128    64      rt_sigtimedwait         sys_rt_sigtimedwait
+129    64      rt_sigqueueinfo         sys_rt_sigqueueinfo
+130    64      rt_sigsuspend           sys_rt_sigsuspend
+131    64      sigaltstack             stub_sigaltstack
+132    64      utime                   sys_utime
+133    64      mknod                   sys_mknod
+134    64      uselib
+135    64      personality             sys_personality
+136    64      ustat                   sys_ustat
+137    64      statfs                  sys_statfs
+138    64      fstatfs                 sys_fstatfs
+139    64      sysfs                   sys_sysfs
+140    64      getpriority             sys_getpriority
+141    64      setpriority             sys_setpriority
+142    64      sched_setparam          sys_sched_setparam
+143    64      sched_getparam          sys_sched_getparam
+144    64      sched_setscheduler      sys_sched_setscheduler
+145    64      sched_getscheduler      sys_sched_getscheduler
+146    64      sched_get_priority_max  sys_sched_get_priority_max
+147    64      sched_get_priority_min  sys_sched_get_priority_min
+148    64      sched_rr_get_interval   sys_sched_rr_get_interval
+149    64      mlock                   sys_mlock
+150    64      munlock                 sys_munlock
+151    64      mlockall                sys_mlockall
+152    64      munlockall              sys_munlockall
+153    64      vhangup                 sys_vhangup
+154    64      modify_ldt              sys_modify_ldt
+155    64      pivot_root              sys_pivot_root
+156    64      _sysctl                 sys_sysctl
+157    64      prctl                   sys_prctl
+158    64      arch_prctl              sys_arch_prctl
+159    64      adjtimex                sys_adjtimex
+160    64      setrlimit               sys_setrlimit
+161    64      chroot                  sys_chroot
+162    64      sync                    sys_sync
+163    64      acct                    sys_acct
+164    64      settimeofday            sys_settimeofday
+165    64      mount                   sys_mount
+166    64      umount2                 sys_umount
+167    64      swapon                  sys_swapon
+168    64      swapoff                 sys_swapoff
+169    64      reboot                  sys_reboot
+170    64      sethostname             sys_sethostname
+171    64      setdomainname           sys_setdomainname
+172    64      iopl                    stub_iopl
+173    64      ioperm                  sys_ioperm
+174    64      create_module
+175    64      init_module             sys_init_module
+176    64      delete_module           sys_delete_module
+177    64      get_kernel_syms
+178    64      query_module
+179    64      quotactl                sys_quotactl
+180    64      nfsservctl
+181    64      getpmsg
+182    64      putpmsg
+183    64      afs_syscall
+184    64      tuxcall
+185    64      security
+186    64      gettid                  sys_gettid
+187    64      readahead               sys_readahead
+188    64      setxattr                sys_setxattr
+189    64      lsetxattr               sys_lsetxattr
+190    64      fsetxattr               sys_fsetxattr
+191    64      getxattr                sys_getxattr
+192    64      lgetxattr               sys_lgetxattr
+193    64      fgetxattr               sys_fgetxattr
+194    64      listxattr               sys_listxattr
+195    64      llistxattr              sys_llistxattr
+196    64      flistxattr              sys_flistxattr
+197    64      removexattr             sys_removexattr
+198    64      lremovexattr            sys_lremovexattr
+199    64      fremovexattr            sys_fremovexattr
+200    64      tkill                   sys_tkill
+201    64      time                    sys_time
+202    64      futex                   sys_futex
+203    64      sched_setaffinity       sys_sched_setaffinity
+204    64      sched_getaffinity       sys_sched_getaffinity
+205    64      set_thread_area
+206    64      io_setup                sys_io_setup
+207    64      io_destroy              sys_io_destroy
+208    64      io_getevents            sys_io_getevents
+209    64      io_submit               sys_io_submit
+210    64      io_cancel               sys_io_cancel
+211    64      get_thread_area
+212    64      lookup_dcookie          sys_lookup_dcookie
+213    64      epoll_create            sys_epoll_create
+214    64      epoll_ctl_old
+215    64      epoll_wait_old
+216    64      remap_file_pages        sys_remap_file_pages
+217    64      getdents64              sys_getdents64
+218    64      set_tid_address         sys_set_tid_address
+219    64      restart_syscall         sys_restart_syscall
+220    64      semtimedop              sys_semtimedop
+221    64      fadvise64               sys_fadvise64
+222    64      timer_create            sys_timer_create
+223    64      timer_settime           sys_timer_settime
+224    64      timer_gettime           sys_timer_gettime
+225    64      timer_getoverrun        sys_timer_getoverrun
+226    64      timer_delete            sys_timer_delete
+227    64      clock_settime           sys_clock_settime
+228    64      clock_gettime           sys_clock_gettime
+229    64      clock_getres            sys_clock_getres
+230    64      clock_nanosleep         sys_clock_nanosleep
+231    64      exit_group              sys_exit_group
+232    64      epoll_wait              sys_epoll_wait
+233    64      epoll_ctl               sys_epoll_ctl
+234    64      tgkill                  sys_tgkill
+235    64      utimes                  sys_utimes
+236    64      vserver
+237    64      mbind                   sys_mbind
+238    64      set_mempolicy           sys_set_mempolicy
+239    64      get_mempolicy           sys_get_mempolicy
+240    64      mq_open                 sys_mq_open
+241    64      mq_unlink               sys_mq_unlink
+242    64      mq_timedsend            sys_mq_timedsend
+243    64      mq_timedreceive         sys_mq_timedreceive
+244    64      mq_notify               sys_mq_notify
+245    64      mq_getsetattr           sys_mq_getsetattr
+246    64      kexec_load              sys_kexec_load
+247    64      waitid                  sys_waitid
+248    64      add_key                 sys_add_key
+249    64      request_key             sys_request_key
+250    64      keyctl                  sys_keyctl
+251    64      ioprio_set              sys_ioprio_set
+252    64      ioprio_get              sys_ioprio_get
+253    64      inotify_init            sys_inotify_init
+254    64      inotify_add_watch       sys_inotify_add_watch
+255    64      inotify_rm_watch        sys_inotify_rm_watch
+256    64      migrate_pages           sys_migrate_pages
+257    64      openat                  sys_openat
+258    64      mkdirat                 sys_mkdirat
+259    64      mknodat                 sys_mknodat
+260    64      fchownat                sys_fchownat
+261    64      futimesat               sys_futimesat
+262    64      newfstatat              sys_newfstatat
+263    64      unlinkat                sys_unlinkat
+264    64      renameat                sys_renameat
+265    64      linkat                  sys_linkat
+266    64      symlinkat               sys_symlinkat
+267    64      readlinkat              sys_readlinkat
+268    64      fchmodat                sys_fchmodat
+269    64      faccessat               sys_faccessat
+270    64      pselect6                sys_pselect6
+271    64      ppoll                   sys_ppoll
+272    64      unshare                 sys_unshare
+273    64      set_robust_list         sys_set_robust_list
+274    64      get_robust_list         sys_get_robust_list
+275    64      splice                  sys_splice
+276    64      tee                     sys_tee
+277    64      sync_file_range         sys_sync_file_range
+278    64      vmsplice                sys_vmsplice
+279    64      move_pages              sys_move_pages
+280    64      utimensat               sys_utimensat
+281    64      epoll_pwait             sys_epoll_pwait
+282    64      signalfd                sys_signalfd
+283    64      timerfd_create          sys_timerfd_create
+284    64      eventfd                 sys_eventfd
+285    64      fallocate               sys_fallocate
+286    64      timerfd_settime         sys_timerfd_settime
+287    64      timerfd_gettime         sys_timerfd_gettime
+288    64      accept4                 sys_accept4
+289    64      signalfd4               sys_signalfd4
+290    64      eventfd2                sys_eventfd2
+291    64      epoll_create1           sys_epoll_create1
+292    64      dup3                    sys_dup3
+293    64      pipe2                   sys_pipe2
+294    64      inotify_init1           sys_inotify_init1
+295    64      preadv                  sys_preadv
+296    64      pwritev                 sys_pwritev
+297    64      rt_tgsigqueueinfo       sys_rt_tgsigqueueinfo
+298    64      perf_event_open         sys_perf_event_open
+299    64      recvmmsg                sys_recvmmsg
+300    64      fanotify_init           sys_fanotify_init
+301    64      fanotify_mark           sys_fanotify_mark
+302    64      prlimit64               sys_prlimit64
+303    64      name_to_handle_at       sys_name_to_handle_at
+304    64      open_by_handle_at       sys_open_by_handle_at
+305    64      clock_adjtime           sys_clock_adjtime
+306    64      syncfs                  sys_syncfs
+307    64      sendmmsg                sys_sendmmsg
+308    64      setns                   sys_setns
+309    64      getcpu                  sys_getcpu
+310    64      process_vm_readv        sys_process_vm_readv
+311    64      process_vm_writev       sys_process_vm_writev
diff --git a/arch/x86/syscalls/syscallhdr.sh b/arch/x86/syscalls/syscallhdr.sh
new file mode 100644 (file)
index 0000000..31fd5f1
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_ASM_X86_`basename "$out" | sed \
+    -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+    -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+    echo "#ifndef ${fileguard}"
+    echo "#define ${fileguard} 1"
+    echo ""
+
+    while read nr abi name entry ; do
+       if [ -z "$offset" ]; then
+           echo "#define __NR_${prefix}${name} $nr"
+       else
+           echo "#define __NR_${prefix}${name} ($offset + $nr)"
+        fi
+    done
+
+    echo ""
+    echo "#endif /* ${fileguard} */"
+) > "$out"
diff --git a/arch/x86/syscalls/syscalltbl.sh b/arch/x86/syscalls/syscalltbl.sh
new file mode 100644 (file)
index 0000000..0e7f8ec
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+in="$1"
+out="$2"
+
+grep '^[0-9]' "$in" | sort -n | (
+    while read nr abi name entry compat; do
+       abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
+       if [ -n "$compat" ]; then
+           echo "__SYSCALL_${abi}($nr, $entry, $compat)"
+       elif [ -n "$entry" ]; then
+           echo "__SYSCALL_${abi}($nr, $entry, $entry)"
+       fi
+    done
+) > "$out"
index 8fb58400e4155b32cb8281f6bd0a53d4b6a8d253..5d065b2222d370e13dd161a6b66e96590b747739 100644 (file)
@@ -37,7 +37,8 @@ subarch-$(CONFIG_MODULES) += ../kernel/module.o
 USER_OBJS := bugs_$(BITS).o ptrace_user.o fault.o
 
 extra-y += user-offsets.s
-$(obj)/user-offsets.s: c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS)
+$(obj)/user-offsets.s: c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) \
+       -Iarch/x86/include/generated
 
 UNPROFILE_OBJS := stub_segv.o
 CFLAGS_stub_segv.o := $(CFLAGS_NO_HARDENING)
index 711b1621747f2e45e8497a13c7c81b9bc260916f..2bbe1ec2d96ab1a4d8ae8ab540eae7f9360c2db4 100644 (file)
@@ -1,5 +1,15 @@
+#ifndef __SYSDEP_X86_PTRACE_H
+#define __SYSDEP_X86_PTRACE_H
+
 #ifdef __i386__
 #include "ptrace_32.h"
 #else
 #include "ptrace_64.h"
 #endif
+
+static inline long regs_return_value(struct uml_pt_regs *regs)
+{
+       return UPT_SYSCALL_RET(regs);
+}
+
+#endif /* __SYSDEP_X86_PTRACE_H */
diff --git a/arch/x86/um/sys_call_table_32.S b/arch/x86/um/sys_call_table_32.S
deleted file mode 100644 (file)
index a7ca80d..0000000
+++ /dev/null
@@ -1,26 +0,0 @@
-#include <linux/linkage.h>
-/* Steal i386 syscall table for our purposes, but with some slight changes.*/
-
-#define sys_iopl sys_ni_syscall
-#define sys_ioperm sys_ni_syscall
-
-#define sys_vm86old sys_ni_syscall
-#define sys_vm86 sys_ni_syscall
-
-#define old_mmap sys_old_mmap
-
-#define ptregs_fork sys_fork
-#define ptregs_execve sys_execve
-#define ptregs_iopl sys_iopl
-#define ptregs_vm86old sys_vm86old
-#define ptregs_clone sys_clone
-#define ptregs_vm86 sys_vm86
-#define ptregs_sigaltstack sys_sigaltstack
-#define ptregs_vfork sys_vfork
-
-.section .rodata,"a"
-
-#include "../kernel/syscall_table_32.S"
-
-ENTRY(syscall_table_size)
-.long .-sys_call_table
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
new file mode 100644 (file)
index 0000000..416bd40
--- /dev/null
@@ -0,0 +1,55 @@
+/*
+ * System call table for UML/i386, copied from arch/x86/kernel/syscall_*.c
+ * with some changes for UML.
+ */
+
+#include <linux/linkage.h>
+#include <linux/sys.h>
+#include <linux/cache.h>
+#include <generated/user_constants.h>
+
+#define __NO_STUBS
+
+/*
+ * Below you can see, in terms of #define's, the differences between the x86-64
+ * and the UML syscall table.
+ */
+
+/* Not going to be implemented by UML, since we have no hardware. */
+#define sys_iopl sys_ni_syscall
+#define sys_ioperm sys_ni_syscall
+
+#define sys_vm86old sys_ni_syscall
+#define sys_vm86 sys_ni_syscall
+
+#define old_mmap sys_old_mmap
+
+#define ptregs_fork sys_fork
+#define ptregs_execve sys_execve
+#define ptregs_iopl sys_iopl
+#define ptregs_vm86old sys_vm86old
+#define ptregs_clone sys_clone
+#define ptregs_vm86 sys_vm86
+#define ptregs_sigaltstack sys_sigaltstack
+#define ptregs_vfork sys_vfork
+
+#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void sym(void) ;
+#include <asm/syscalls_32.h>
+
+#undef __SYSCALL_I386
+#define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym,
+
+typedef void (*sys_call_ptr_t)(void);
+
+extern void sys_ni_syscall(void);
+
+const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+       /*
+        * Smells like a compiler bug -- it doesn't work
+        * when the & below is removed.
+        */
+       [0 ... __NR_syscall_max] = &sys_ni_syscall,
+#include <asm/syscalls_32.h>
+};
+
+int syscall_table_size = sizeof(sys_call_table);
index 99522f78b16251e8cc222fcb96e1b56de153bba1..fe626c3ba01b280a6ddc9b2bc3fb894b7c504c3e 100644 (file)
@@ -1,11 +1,12 @@
 /*
- * System call table for UML/x86-64, copied from arch/x86_64/kernel/syscall.c
+ * System call table for UML/x86-64, copied from arch/x86/kernel/syscall_*.c
  * with some changes for UML.
  */
 
 #include <linux/linkage.h>
 #include <linux/sys.h>
 #include <linux/cache.h>
+#include <generated/user_constants.h>
 
 #define __NO_STUBS
 
 #define stub_sigaltstack sys_sigaltstack
 #define stub_rt_sigreturn sys_rt_sigreturn
 
-#define __SYSCALL(nr, sym) extern asmlinkage void sym(void) ;
-#undef _ASM_X86_UNISTD_64_H
-#include "../../x86/include/asm/unistd_64.h"
+#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
+#include <asm/syscalls_64.h>
 
-#undef __SYSCALL
-#define __SYSCALL(nr, sym) [ nr ] = sym,
-#undef _ASM_X86_UNISTD_64_H
+#undef __SYSCALL_64
+#define __SYSCALL_64(nr, sym, compat) [ nr ] = sym,
 
 typedef void (*sys_call_ptr_t)(void);
 
 extern void sys_ni_syscall(void);
 
-/*
- * We used to have a trick here which made sure that holes in the
- * x86_64 table were filled in with sys_ni_syscall, but a comment in
- * unistd_64.h says that holes aren't allowed, so the trick was
- * removed.
- * The trick looked like this
- *     [0 ... UM_NR_syscall_max] = &sys_ni_syscall
- * before including unistd_64.h - the later initializations overwrote
- * the sys_ni_syscall filler.
- */
-
-sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
-#include <asm/unistd_64.h>
+const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+       /*
+        * Smells like a compiler bug -- it doesn't work
+        * when the & below is removed.
+        */
+       [0 ... __NR_syscall_max] = &sys_ni_syscall,
+#include <asm/syscalls_64.h>
 };
 
 int syscall_table_size = sizeof(sys_call_table);
index ca49be8ddd0c7160821a4d4db06f18346f52b195..5edf4f4bbf531f2e3912a1068bf78f65de8cbcf8 100644 (file)
@@ -8,6 +8,18 @@
 #include <asm/ptrace.h>
 #include <asm/types.h>
 
+#ifdef __i386__
+#define __SYSCALL_I386(nr, sym, compat) [nr] = 1,
+static char syscalls[] = {
+#include <asm/syscalls_32.h>
+};
+#else
+#define __SYSCALL_64(nr, sym, compat) [nr] = 1,
+static char syscalls[] = {
+#include <asm/syscalls_64.h>
+};
+#endif
+
 #define DEFINE(sym, val) \
        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
 
@@ -77,4 +89,7 @@ void foo(void)
        DEFINE(UM_PROT_READ, PROT_READ);
        DEFINE(UM_PROT_WRITE, PROT_WRITE);
        DEFINE(UM_PROT_EXEC, PROT_EXEC);
+
+       DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
+       DEFINE(NR_syscalls, sizeof(syscalls));
 }
index cc9b1e182fcfad86bc67b56a8e172fb73d9e9ecf..d69cc6c3f8080aab34f14e0e0b42b1f6ec8e73f1 100644 (file)
@@ -116,9 +116,26 @@ static inline void spin_time_accum_blocked(u64 start)
 }
 #endif  /* CONFIG_XEN_DEBUG_FS */
 
+/*
+ * Size struct xen_spinlock so it's the same as arch_spinlock_t.
+ */
+#if NR_CPUS < 256
+typedef u8 xen_spinners_t;
+# define inc_spinners(xl) \
+       asm(LOCK_PREFIX " incb %0" : "+m" ((xl)->spinners) : : "memory");
+# define dec_spinners(xl) \
+       asm(LOCK_PREFIX " decb %0" : "+m" ((xl)->spinners) : : "memory");
+#else
+typedef u16 xen_spinners_t;
+# define inc_spinners(xl) \
+       asm(LOCK_PREFIX " incw %0" : "+m" ((xl)->spinners) : : "memory");
+# define dec_spinners(xl) \
+       asm(LOCK_PREFIX " decw %0" : "+m" ((xl)->spinners) : : "memory");
+#endif
+
 struct xen_spinlock {
        unsigned char lock;             /* 0 -> free; 1 -> locked */
-       unsigned short spinners;        /* count of waiting cpus */
+       xen_spinners_t spinners;        /* count of waiting cpus */
 };
 
 static int xen_spin_is_locked(struct arch_spinlock *lock)
@@ -164,8 +181,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
 
        wmb();                  /* set lock of interest before count */
 
-       asm(LOCK_PREFIX " incw %0"
-           : "+m" (xl->spinners) : : "memory");
+       inc_spinners(xl);
 
        return prev;
 }
@@ -176,8 +192,7 @@ static inline struct xen_spinlock *spinning_lock(struct xen_spinlock *xl)
  */
 static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock *prev)
 {
-       asm(LOCK_PREFIX " decw %0"
-           : "+m" (xl->spinners) : : "memory");
+       dec_spinners(xl);
        wmb();                  /* decrement count before restoring lock */
        __this_cpu_write(lock_spinners, prev);
 }
@@ -373,6 +388,8 @@ void xen_uninit_lock_cpu(int cpu)
 
 void __init xen_init_spinlocks(void)
 {
+       BUILD_BUG_ON(sizeof(struct xen_spinlock) > sizeof(arch_spinlock_t));
+
        pv_lock_ops.spin_is_locked = xen_spin_is_locked;
        pv_lock_ops.spin_is_contended = xen_spin_is_contended;
        pv_lock_ops.spin_lock = xen_spin_lock;
index 5fb8c27cbef58dd98cb3847e2bc1b286fdad1cd3..405a8c49ff2c991b132ea31539f128069fbb03eb 100644 (file)
@@ -118,7 +118,4 @@ extern void *memmove(void *__dest, __const__ void *__src, size_t __n);
 /* Don't build bcopy at all ...  */
 #define __HAVE_ARCH_BCOPY
 
-#define __HAVE_ARCH_MEMSCAN
-#define memscan memchr
-
 #endif /* _XTENSA_STRING_H */
index a0d042aa296755e441fe4266a7103b315a581a34..2dff698ab02e5ce1642248afa0f582b8aa0f112f 100644 (file)
@@ -334,8 +334,7 @@ void do_syscall_trace_enter(struct pt_regs *regs)
                do_syscall_trace();
 
 #if 0
-       if (unlikely(current->audit_context))
-               audit_syscall_entry(current, AUDIT_ARCH_XTENSA..);
+       audit_syscall_entry(current, AUDIT_ARCH_XTENSA..);
 #endif
 }
 
index 163263ddd3814e3f087eb1a7e1461242fabbd702..ee55019066a19500c6df54addf90d91d2ace60fa 100644 (file)
@@ -3117,18 +3117,17 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
  */
 static void cfq_preempt_queue(struct cfq_data *cfqd, struct cfq_queue *cfqq)
 {
-       struct cfq_queue *old_cfqq = cfqd->active_queue;
-
        cfq_log_cfqq(cfqd, cfqq, "preempt");
-       cfq_slice_expired(cfqd, 1);
 
        /*
         * workload type is changed, don't save slice, otherwise preempt
         * doesn't happen
         */
-       if (cfqq_type(old_cfqq) != cfqq_type(cfqq))
+       if (cfqq_type(cfqd->active_queue) != cfqq_type(cfqq))
                cfqq->cfqg->saved_workload_slice = 0;
 
+       cfq_slice_expired(cfqd, 1);
+
        /*
         * Put the new queue at the front of the of the current list,
         * so we know that it will be selected next.
index 9ed9f60316e545b6bc0ad68f9ef8c50413450685..88f160b77b1fec95ed6315c65566a30f611f5642 100644 (file)
@@ -21,8 +21,6 @@
 #include <linux/percpu.h>
 #include <asm/byteorder.h>
 
-static DEFINE_PER_CPU(u64[80], msg_schedule);
-
 static inline u64 Ch(u64 x, u64 y, u64 z)
 {
         return z ^ (x & (y ^ z));
@@ -80,7 +78,7 @@ static inline void LOAD_OP(int I, u64 *W, const u8 *input)
 
 static inline void BLEND_OP(int I, u64 *W)
 {
-       W[I] = s1(W[I-2]) + W[I-7] + s0(W[I-15]) + W[I-16];
+       W[I % 16] += s1(W[(I-2) % 16]) + W[(I-7) % 16] + s0(W[(I-15) % 16]);
 }
 
 static void
@@ -89,38 +87,48 @@ sha512_transform(u64 *state, const u8 *input)
        u64 a, b, c, d, e, f, g, h, t1, t2;
 
        int i;
-       u64 *W = get_cpu_var(msg_schedule);
+       u64 W[16];
 
        /* load the input */
         for (i = 0; i < 16; i++)
                 LOAD_OP(i, W, input);
 
-        for (i = 16; i < 80; i++) {
-                BLEND_OP(i, W);
-        }
-
        /* load the state into our registers */
        a=state[0];   b=state[1];   c=state[2];   d=state[3];
        e=state[4];   f=state[5];   g=state[6];   h=state[7];
 
-       /* now iterate */
-       for (i=0; i<80; i+=8) {
-               t1 = h + e1(e) + Ch(e,f,g) + sha512_K[i  ] + W[i  ];
-               t2 = e0(a) + Maj(a,b,c);    d+=t1;    h=t1+t2;
-               t1 = g + e1(d) + Ch(d,e,f) + sha512_K[i+1] + W[i+1];
-               t2 = e0(h) + Maj(h,a,b);    c+=t1;    g=t1+t2;
-               t1 = f + e1(c) + Ch(c,d,e) + sha512_K[i+2] + W[i+2];
-               t2 = e0(g) + Maj(g,h,a);    b+=t1;    f=t1+t2;
-               t1 = e + e1(b) + Ch(b,c,d) + sha512_K[i+3] + W[i+3];
-               t2 = e0(f) + Maj(f,g,h);    a+=t1;    e=t1+t2;
-               t1 = d + e1(a) + Ch(a,b,c) + sha512_K[i+4] + W[i+4];
-               t2 = e0(e) + Maj(e,f,g);    h+=t1;    d=t1+t2;
-               t1 = c + e1(h) + Ch(h,a,b) + sha512_K[i+5] + W[i+5];
-               t2 = e0(d) + Maj(d,e,f);    g+=t1;    c=t1+t2;
-               t1 = b + e1(g) + Ch(g,h,a) + sha512_K[i+6] + W[i+6];
-               t2 = e0(c) + Maj(c,d,e);    f+=t1;    b=t1+t2;
-               t1 = a + e1(f) + Ch(f,g,h) + sha512_K[i+7] + W[i+7];
-               t2 = e0(b) + Maj(b,c,d);    e+=t1;    a=t1+t2;
+#define SHA512_0_15(i, a, b, c, d, e, f, g, h)                 \
+       t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[i];      \
+       t2 = e0(a) + Maj(a, b, c);                              \
+       d += t1;                                                \
+       h = t1 + t2
+
+#define SHA512_16_79(i, a, b, c, d, e, f, g, h)                        \
+       BLEND_OP(i, W);                                         \
+       t1 = h + e1(e) + Ch(e, f, g) + sha512_K[i] + W[(i)%16]; \
+       t2 = e0(a) + Maj(a, b, c);                              \
+       d += t1;                                                \
+       h = t1 + t2
+
+       for (i = 0; i < 16; i += 8) {
+               SHA512_0_15(i, a, b, c, d, e, f, g, h);
+               SHA512_0_15(i + 1, h, a, b, c, d, e, f, g);
+               SHA512_0_15(i + 2, g, h, a, b, c, d, e, f);
+               SHA512_0_15(i + 3, f, g, h, a, b, c, d, e);
+               SHA512_0_15(i + 4, e, f, g, h, a, b, c, d);
+               SHA512_0_15(i + 5, d, e, f, g, h, a, b, c);
+               SHA512_0_15(i + 6, c, d, e, f, g, h, a, b);
+               SHA512_0_15(i + 7, b, c, d, e, f, g, h, a);
+       }
+       for (i = 16; i < 80; i += 8) {
+               SHA512_16_79(i, a, b, c, d, e, f, g, h);
+               SHA512_16_79(i + 1, h, a, b, c, d, e, f, g);
+               SHA512_16_79(i + 2, g, h, a, b, c, d, e, f);
+               SHA512_16_79(i + 3, f, g, h, a, b, c, d, e);
+               SHA512_16_79(i + 4, e, f, g, h, a, b, c, d);
+               SHA512_16_79(i + 5, d, e, f, g, h, a, b, c);
+               SHA512_16_79(i + 6, c, d, e, f, g, h, a, b);
+               SHA512_16_79(i + 7, b, c, d, e, f, g, h, a);
        }
 
        state[0] += a; state[1] += b; state[2] += c; state[3] += d;
@@ -128,8 +136,6 @@ sha512_transform(u64 *state, const u8 *input)
 
        /* erase our data */
        a = b = c = d = e = f = g = h = t1 = t2 = 0;
-       memset(W, 0, sizeof(__get_cpu_var(msg_schedule)));
-       put_cpu_var(msg_schedule);
 }
 
 static int
index ecb26b4f29a0581d697a26c69fa9b52532e0f327..1567028d2038ecbe853bfebc7b4e4debd03be8fe 100644 (file)
@@ -19,12 +19,12 @@ obj-y                               += acpi.o \
 
 # All the builtin files are in the "acpi." module_param namespace.
 acpi-y                         += osl.o utils.o reboot.o
-acpi-y                         += atomicio.o
+acpi-y                         += nvs.o
 
 # sleep related files
 acpi-y                         += wakeup.o
 acpi-y                         += sleep.o
-acpi-$(CONFIG_ACPI_SLEEP)      += proc.o nvs.o
+acpi-$(CONFIG_ACPI_SLEEP)      += proc.o
 
 
 #
index 301bd2d388ad1dc7a6a00268ebbec4266470150a..0ca208b6dcf099e4b012ed29efa0d564227f20c6 100644 (file)
@@ -8,41 +8,151 @@ ccflags-$(CONFIG_ACPI_DEBUG) += -DACPI_DEBUG_OUTPUT
 # use acpi.o to put all files here into acpi.o modparam namespace
 obj-y  += acpi.o
 
-acpi-y := dsfield.o   dsmthdat.o  dsopcode.o  dswexec.o  dswscope.o \
-        dsmethod.o  dsobject.o  dsutils.o   dswload.o  dswstate.o \
-        dsinit.o dsargs.o dscontrol.o dswload2.o
+acpi-y :=              \
+       dsargs.o        \
+       dscontrol.o     \
+       dsfield.o       \
+       dsinit.o        \
+       dsmethod.o      \
+       dsmthdat.o      \
+       dsobject.o      \
+       dsopcode.o      \
+       dsutils.o       \
+       dswexec.o       \
+       dswload.o       \
+       dswload2.o      \
+       dswscope.o      \
+       dswstate.o
 
-acpi-y += evevent.o  evregion.o  evsci.o    evxfevnt.o \
-        evmisc.o   evrgnini.o  evxface.o  evxfregn.o \
-        evgpe.o    evgpeblk.o evgpeinit.o  evgpeutil.o evxfgpe.o evglock.o
+acpi-y +=              \
+       evevent.o       \
+       evgpe.o         \
+       evgpeblk.o      \
+       evgpeinit.o     \
+       evgpeutil.o     \
+       evglock.o       \
+       evmisc.o        \
+       evregion.o      \
+       evrgnini.o      \
+       evsci.o         \
+       evxface.o       \
+       evxfevnt.o      \
+       evxfgpe.o       \
+       evxfregn.o
 
-acpi-y += exconfig.o  exfield.o  exnames.o   exoparg6.o  exresolv.o  exstorob.o\
-        exconvrt.o  exfldio.o  exoparg1.o  exprep.o    exresop.o   exsystem.o\
-        excreate.o  exmisc.o   exoparg2.o  exregion.o  exstore.o   exutils.o \
-        exdump.o    exmutex.o  exoparg3.o  exresnte.o  exstoren.o  exdebug.o
+acpi-y +=              \
+       exconfig.o      \
+       exconvrt.o      \
+       excreate.o      \
+       exdebug.o       \
+       exdump.o        \
+       exfield.o       \
+       exfldio.o       \
+       exmutex.o       \
+       exnames.o       \
+       exoparg1.o      \
+       exoparg2.o      \
+       exoparg3.o      \
+       exoparg6.o      \
+       exprep.o        \
+       exmisc.o        \
+       exregion.o      \
+       exresnte.o      \
+       exresolv.o      \
+       exresop.o       \
+       exstore.o       \
+       exstoren.o      \
+       exstorob.o      \
+       exsystem.o      \
+       exutils.o
 
-acpi-y += hwacpi.o  hwgpe.o  hwregs.o  hwsleep.o hwxface.o hwvalid.o hwpci.o
+acpi-y +=              \
+       hwacpi.o        \
+       hwgpe.o         \
+       hwpci.o         \
+       hwregs.o        \
+       hwsleep.o       \
+       hwvalid.o       \
+       hwxface.o
 
 acpi-$(ACPI_FUTURE_USAGE) += hwtimer.o
 
-acpi-y += nsaccess.o  nsload.o    nssearch.o  nsxfeval.o \
-        nsalloc.o   nseval.o    nsnames.o   nsutils.o   nsxfname.o \
-        nsdump.o    nsinit.o    nsobject.o  nswalk.o    nsxfobj.o  \
-        nsparse.o   nspredef.o  nsrepair.o  nsrepair2.o
+acpi-y +=              \
+       nsaccess.o      \
+       nsalloc.o       \
+       nsdump.o        \
+       nseval.o        \
+       nsinit.o        \
+       nsload.o        \
+       nsnames.o       \
+       nsobject.o      \
+       nsparse.o       \
+       nspredef.o      \
+       nsrepair.o      \
+       nsrepair2.o     \
+       nssearch.o      \
+       nsutils.o       \
+       nswalk.o        \
+       nsxfeval.o      \
+       nsxfname.o      \
+       nsxfobj.o
 
 acpi-$(ACPI_FUTURE_USAGE) += nsdumpdv.o
 
-acpi-y += psargs.o    psparse.o  psloop.o pstree.o   pswalk.o  \
-        psopcode.o  psscope.o  psutils.o  psxface.o
+acpi-y +=              \
+       psargs.o        \
+       psloop.o        \
+       psopcode.o      \
+       psparse.o       \
+       psscope.o       \
+       pstree.o        \
+       psutils.o       \
+       pswalk.o        \
+       psxface.o
 
-acpi-y += rsaddr.o rscreate.o rsinfo.o rsio.o rslist.o rsmisc.o rsxface.o \
-        rscalc.o  rsirq.o  rsmemory.o  rsutils.o
+acpi-y +=              \
+       rsaddr.o        \
+       rscalc.o        \
+       rscreate.o      \
+       rsinfo.o        \
+       rsio.o          \
+       rsirq.o         \
+       rslist.o        \
+       rsmemory.o      \
+       rsmisc.o        \
+       rsserial.o      \
+       rsutils.o       \
+       rsxface.o
 
 acpi-$(ACPI_FUTURE_USAGE) += rsdump.o
 
-acpi-y += tbxface.o tbinstal.o tbutils.o tbfind.o tbfadt.o tbxfroot.o
+acpi-y +=              \
+       tbfadt.o        \
+       tbfind.o        \
+       tbinstal.o      \
+       tbutils.o       \
+       tbxface.o       \
+       tbxfroot.o
 
-acpi-y += utalloc.o utdebug.o uteval.o utinit.o utmisc.o utxface.o \
-               utcopy.o utdelete.o utglobal.o utmath.o utobject.o \
-               utstate.o utmutex.o utobject.o utresrc.o utlock.o utids.o \
-               utosi.o utxferror.o utdecode.o
+acpi-y +=              \
+       utaddress.o     \
+       utalloc.o       \
+       utcopy.o        \
+       utdebug.o       \
+       utdecode.o      \
+       utdelete.o      \
+       uteval.o        \
+       utglobal.o      \
+       utids.o         \
+       utinit.o        \
+       utlock.o        \
+       utmath.o        \
+       utmisc.o        \
+       utmutex.o       \
+       utobject.o      \
+       utosi.o         \
+       utresrc.o       \
+       utstate.o       \
+       utxface.o       \
+       utxferror.o     \
+       utxfmutex.o
index e0ba17f0a7c89acc590ab487e5c51c1360697579..a44bd424f9f4137b58a722e313d3454b0fbc2243 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f895a244ca7ea187588356475e6ca4d700804b52..1f30af613e87619d4ab7cfeec3d45dad1d15d386 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 
 #define ACPI_MAX_SLEEP                  2000   /* Two seconds */
 
+/* Address Range lists are per-space_id (Memory and I/O only) */
+
+#define ACPI_ADDRESS_RANGE_MAX          2
+
 /******************************************************************************
  *
  * ACPI Specification constants (Do not change unless the specification changes)
 #define ACPI_RSDP_CHECKSUM_LENGTH       20
 #define ACPI_RSDP_XCHECKSUM_LENGTH      36
 
-/* SMBus and IPMI bidirectional buffer size */
+/* SMBus, GSBus and IPMI bidirectional buffer size */
 
 #define ACPI_SMBUS_BUFFER_SIZE          34
+#define ACPI_GSBUS_BUFFER_SIZE          34
 #define ACPI_IPMI_BUFFER_SIZE           66
 
 /* _sx_d and _sx_w control methods */
index eb0b1f8dee6dca011b447504b7654ca5243b3f63..deaa8197956133850767828e05dd248ea41378cf 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2d1b7ffa377a030d9e30490bcdcfef225bdb1d89..5935ba6707e2614cddcd39d3bb0220a3bc9a2bd1 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index bea3b4899183bae0fce0ecfa44238160d438a9d3..c53caa521a30e90b46f3d6de1d70db4c7d4ea12b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -162,6 +162,7 @@ acpi_status acpi_ev_initialize_op_regions(void);
 
 acpi_status
 acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+                              union acpi_operand_object *field_obj,
                               u32 function,
                               u32 region_offset, u32 bit_width, u64 *value);
 
index e6652d716e45727cf992838b4420902b80436ce7..2853f7673f3bdf05235384b21a614bd1ac331084 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -140,8 +140,19 @@ u32 acpi_gbl_trace_flags;
 acpi_name acpi_gbl_trace_method_name;
 u8 acpi_gbl_system_awake_and_running;
 
+/*
+ * ACPI 5.0 introduces the concept of a "reduced hardware platform", meaning
+ * that the ACPI hardware is no longer required. A flag in the FADT indicates
+ * a reduced HW machine, and that flag is duplicated here for convenience.
+ */
+u8 acpi_gbl_reduced_hardware;
+
 #endif
 
+/* Do not disassemble buffers to resource descriptors */
+
+ACPI_EXTERN u8 ACPI_INIT_GLOBAL(acpi_gbl_no_resource_disassembly, FALSE);
+
 /*****************************************************************************
  *
  * Debug support
@@ -207,7 +218,7 @@ ACPI_EXTERN struct acpi_rw_lock acpi_gbl_namespace_rw_lock;
 
 /*****************************************************************************
  *
- * Mutual exlusion within ACPICA subsystem
+ * Mutual exclusion within ACPICA subsystem
  *
  ****************************************************************************/
 
@@ -295,6 +306,8 @@ ACPI_EXTERN u8 acpi_gbl_acpi_hardware_present;
 ACPI_EXTERN u8 acpi_gbl_events_initialized;
 ACPI_EXTERN u8 acpi_gbl_osi_data;
 ACPI_EXTERN struct acpi_interface_info *acpi_gbl_supported_interfaces;
+ACPI_EXTERN struct acpi_address_range
+    *acpi_gbl_address_range_list[ACPI_ADDRESS_RANGE_MAX];
 
 #ifndef DEFINE_ACPI_GLOBALS
 
index e7213beaafc7914322f72520d40bcfd437b4ea5b..677793e938f5d83e31241206724b58fec3e01630 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3731e1c34b83b5d75ab25f08d0eefb706d7f85bc..eb308635da7247f0e1ae0554f322d082869b3eb9 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -468,6 +468,8 @@ void acpi_ex_eisa_id_to_string(char *dest, u64 compressed_id);
 
 void acpi_ex_integer_to_string(char *dest, u64 value);
 
+u8 acpi_is_valid_space_id(u8 space_id);
+
 /*
  * exregion - default op_region handlers
  */
index 5552125d8340ef11782f4aec94595e7444cb1774..3f24068837d5ad8e80c7603ac864fa86673bf7b3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -53,7 +53,7 @@ typedef u32 acpi_mutex_handle;
 
 /* Total number of aml opcodes defined */
 
-#define AML_NUM_OPCODES                 0x7F
+#define AML_NUM_OPCODES                 0x81
 
 /* Forward declarations */
 
@@ -249,12 +249,16 @@ struct acpi_create_field_info {
        struct acpi_namespace_node *field_node;
        struct acpi_namespace_node *register_node;
        struct acpi_namespace_node *data_register_node;
+       struct acpi_namespace_node *connection_node;
+       u8 *resource_buffer;
        u32 bank_value;
        u32 field_bit_position;
        u32 field_bit_length;
+       u16 resource_length;
        u8 field_flags;
        u8 attribute;
        u8 field_type;
+       u8 access_length;
 };
 
 typedef
@@ -315,7 +319,8 @@ struct acpi_name_info {
 
 /*
  * Used for ACPI_PTYPE1_FIXED, ACPI_PTYPE1_VAR, ACPI_PTYPE2,
- * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT
+ * ACPI_PTYPE2_MIN, ACPI_PTYPE2_PKG_COUNT, ACPI_PTYPE2_COUNT,
+ * ACPI_PTYPE2_FIX_VAR
  */
 struct acpi_package_info {
        u8 type;
@@ -625,6 +630,15 @@ union acpi_generic_state {
 
 typedef acpi_status(*ACPI_EXECUTE_OP) (struct acpi_walk_state * walk_state);
 
+/* Address Range info block */
+
+struct acpi_address_range {
+       struct acpi_address_range *next;
+       struct acpi_namespace_node *region_node;
+       acpi_physical_address start_address;
+       acpi_physical_address end_address;
+};
+
 /*****************************************************************************
  *
  * Parser typedefs and structs
@@ -951,7 +965,7 @@ struct acpi_port_info {
 #define ACPI_RESOURCE_NAME_END_DEPENDENT        0x38
 #define ACPI_RESOURCE_NAME_IO                   0x40
 #define ACPI_RESOURCE_NAME_FIXED_IO             0x48
-#define ACPI_RESOURCE_NAME_RESERVED_S1          0x50
+#define ACPI_RESOURCE_NAME_FIXED_DMA            0x50
 #define ACPI_RESOURCE_NAME_RESERVED_S2          0x58
 #define ACPI_RESOURCE_NAME_RESERVED_S3          0x60
 #define ACPI_RESOURCE_NAME_RESERVED_S4          0x68
@@ -973,7 +987,9 @@ struct acpi_port_info {
 #define ACPI_RESOURCE_NAME_EXTENDED_IRQ         0x89
 #define ACPI_RESOURCE_NAME_ADDRESS64            0x8A
 #define ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64   0x8B
-#define ACPI_RESOURCE_NAME_LARGE_MAX            0x8B
+#define ACPI_RESOURCE_NAME_GPIO                 0x8C
+#define ACPI_RESOURCE_NAME_SERIAL_BUS           0x8E
+#define ACPI_RESOURCE_NAME_LARGE_MAX            0x8E
 
 /*****************************************************************************
  *
index b7491ee1fba642447242705c5a7ba48c677cce94..ef338a96f5b28761b77840851d73fedd8f25da8f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 79a598c67fe3ad30ae599aacaefa2f776b6d0303..2c9e0f049523120f4c03a094ab8b16e0563aea08 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1055769f2f01ae97f4f69780fd93b1015512d870..c065078ca83bbbb0f3a09a22a609121d0d7785d7 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -254,6 +254,7 @@ ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_NOTIFY_INFO};
        u32                             base_byte_offset;   /* Byte offset within containing object */\
        u32                             value;              /* Value to store into the Bank or Index register */\
        u8                              start_field_bit_offset;/* Bit offset within first field datum (0-63) */\
+       u8                              access_length;  /* For serial regions/fields */
 
 
 struct acpi_object_field_common {      /* COMMON FIELD (for BUFFER, REGION, BANK, and INDEX fields) */
@@ -261,7 +262,9 @@ struct acpi_object_field_common {   /* COMMON FIELD (for BUFFER, REGION, BANK, and
 };
 
 struct acpi_object_region_field {
-       ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO union acpi_operand_object *region_obj; /* Containing op_region object */
+       ACPI_OBJECT_COMMON_HEADER ACPI_COMMON_FIELD_INFO u16 resource_length;
+       union acpi_operand_object *region_obj;  /* Containing op_region object */
+       u8 *resource_buffer;    /* resource_template for serial regions/fields */
 };
 
 struct acpi_object_bank_field {
@@ -358,6 +361,7 @@ typedef enum {
  */
 struct acpi_object_extra {
        ACPI_OBJECT_COMMON_HEADER struct acpi_namespace_node *method_REG;       /* _REG method for this region (if any) */
+       struct acpi_namespace_node *scope_node;
        void *region_context;   /* Region-specific data */
        u8 *aml_start;
        u32 aml_length;
index bb2ccfad7376548569f99ed46653c4ee710f958e..9440d053fbb3f83e6f60217bb4fc6e08cafe4a98 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -93,6 +93,7 @@
 #define ARGP_CONCAT_OP                  ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
 #define ARGP_CONCAT_RES_OP              ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
 #define ARGP_COND_REF_OF_OP             ARGP_LIST2 (ARGP_SUPERNAME,  ARGP_SUPERNAME)
+#define ARGP_CONNECTFIELD_OP            ARGP_LIST1 (ARGP_NAMESTRING)
 #define ARGP_CONTINUE_OP                ARG_NONE
 #define ARGP_COPY_OP                    ARGP_LIST2 (ARGP_TERMARG,    ARGP_SIMPLENAME)
 #define ARGP_CREATE_BIT_FIELD_OP        ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_NAME)
 #define ARGP_RETURN_OP                  ARGP_LIST1 (ARGP_TERMARG)
 #define ARGP_REVISION_OP                ARG_NONE
 #define ARGP_SCOPE_OP                   ARGP_LIST3 (ARGP_PKGLENGTH,  ARGP_NAME,          ARGP_TERMLIST)
+#define ARGP_SERIALFIELD_OP             ARGP_LIST1 (ARGP_NAMESTRING)
 #define ARGP_SHIFT_LEFT_OP              ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
 #define ARGP_SHIFT_RIGHT_OP             ARGP_LIST3 (ARGP_TERMARG,    ARGP_TERMARG,       ARGP_TARGET)
 #define ARGP_SIGNAL_OP                  ARGP_LIST1 (ARGP_SUPERNAME)
 #define ARGI_CONCAT_OP                  ARGI_LIST3 (ARGI_COMPUTEDATA,ARGI_COMPUTEDATA,   ARGI_TARGETREF)
 #define ARGI_CONCAT_RES_OP              ARGI_LIST3 (ARGI_BUFFER,     ARGI_BUFFER,        ARGI_TARGETREF)
 #define ARGI_COND_REF_OF_OP             ARGI_LIST2 (ARGI_OBJECT_REF, ARGI_TARGETREF)
+#define ARGI_CONNECTFIELD_OP            ARGI_INVALID_OPCODE
 #define ARGI_CONTINUE_OP                ARGI_INVALID_OPCODE
 #define ARGI_COPY_OP                    ARGI_LIST2 (ARGI_ANYTYPE,    ARGI_SIMPLE_TARGET)
 #define ARGI_CREATE_BIT_FIELD_OP        ARGI_LIST3 (ARGI_BUFFER,     ARGI_INTEGER,       ARGI_REFERENCE)
 #define ARGI_RETURN_OP                  ARGI_INVALID_OPCODE
 #define ARGI_REVISION_OP                ARG_NONE
 #define ARGI_SCOPE_OP                   ARGI_INVALID_OPCODE
+#define ARGI_SERIALFIELD_OP             ARGI_INVALID_OPCODE
 #define ARGI_SHIFT_LEFT_OP              ARGI_LIST3 (ARGI_INTEGER,    ARGI_INTEGER,       ARGI_TARGETREF)
 #define ARGI_SHIFT_RIGHT_OP             ARGI_LIST3 (ARGI_INTEGER,    ARGI_INTEGER,       ARGI_TARGETREF)
 #define ARGI_SIGNAL_OP                  ARGI_LIST1 (ARGI_EVENT)
index 5ea1e06afa20637b652f87cbe9ee60b91fb62023..b725d780d34dafec0ecd1d2166a30dd8c078d1f0 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c445cca490ea4b1489983ba4866b0fb746c3de21..bbb34c9be4e84de95f0d0922fe72ba5558f1b3e0 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * ACPI_PTYPE2_REV_FIXED: Revision at start, each subpackage is Fixed-length
  *      (Used for _ART, _FPS)
  *
+ * ACPI_PTYPE2_FIX_VAR: Each subpackage consists of some fixed-length elements
+ *      followed by an optional element
+ *      object type
+ *      count
+ *      object type
+ *      count = 0 (optional)
+ *      (Used for _DLM)
+ *
  *****************************************************************************/
 
 enum acpi_return_package_types {
@@ -105,7 +113,8 @@ enum acpi_return_package_types {
        ACPI_PTYPE2_PKG_COUNT = 6,
        ACPI_PTYPE2_FIXED = 7,
        ACPI_PTYPE2_MIN = 8,
-       ACPI_PTYPE2_REV_FIXED = 9
+       ACPI_PTYPE2_REV_FIXED = 9,
+       ACPI_PTYPE2_FIX_VAR = 10
 };
 
 #ifdef ACPI_CREATE_PREDEFINED_TABLE
@@ -154,6 +163,7 @@ static const union acpi_predefined_info predefined_names[] =
        {{"_AC8", 0, ACPI_RTYPE_INTEGER}},
        {{"_AC9", 0, ACPI_RTYPE_INTEGER}},
        {{"_ADR", 0, ACPI_RTYPE_INTEGER}},
+       {{"_AEI", 0, ACPI_RTYPE_BUFFER}},
        {{"_AL0", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
                          {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
 
@@ -229,6 +239,13 @@ static const union acpi_predefined_info predefined_names[] =
        {{"_CID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING | ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints/Strs) */
                          {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING, 0,0}, 0,0}},
 
+       {{"_CLS", 0, ACPI_RTYPE_PACKAGE}},      /* Fixed-length (3 Int) */
+       {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
+
+       {{"_CPC", 0, ACPI_RTYPE_PACKAGE}},      /* Variable-length (Ints/Bufs) */
+       {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER, 0, 0}, 0,
+         0}},
+
        {{"_CRS", 0, ACPI_RTYPE_BUFFER}},
        {{"_CRT", 0, ACPI_RTYPE_INTEGER}},
        {{"_CSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n-1 Int) */
@@ -237,12 +254,21 @@ static const union acpi_predefined_info predefined_names[] =
        {{"_CST", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (1 Int(n), n Pkg (1 Buf/3 Int) */
                          {{{ACPI_PTYPE2_PKG_COUNT,ACPI_RTYPE_BUFFER, 1, ACPI_RTYPE_INTEGER}, 3,0}},
 
+       {{"_CWS", 1, ACPI_RTYPE_INTEGER}},
        {{"_DCK", 1, ACPI_RTYPE_INTEGER}},
        {{"_DCS", 0, ACPI_RTYPE_INTEGER}},
        {{"_DDC", 1, ACPI_RTYPE_INTEGER | ACPI_RTYPE_BUFFER}},
        {{"_DDN", 0, ACPI_RTYPE_STRING}},
+       {{"_DEP", 0, ACPI_RTYPE_PACKAGE}},      /* Variable-length (Refs) */
+       {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
        {{"_DGS", 0, ACPI_RTYPE_INTEGER}},
        {{"_DIS", 0, 0}},
+
+       {{"_DLM", 0, ACPI_RTYPE_PACKAGE}},      /* Variable-length (Pkgs) each (1 Ref, 0/1 Optional Buf/Ref) */
+       {{{ACPI_PTYPE2_FIX_VAR, ACPI_RTYPE_REFERENCE, 1,
+          ACPI_RTYPE_REFERENCE | ACPI_RTYPE_BUFFER}, 0, 0}},
+
        {{"_DMA", 0, ACPI_RTYPE_BUFFER}},
        {{"_DOD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Ints) */
                          {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_INTEGER, 0,0}, 0,0}},
@@ -262,6 +288,7 @@ static const union acpi_predefined_info predefined_names[] =
        {{"_EJ3", 1, 0}},
        {{"_EJ4", 1, 0}},
        {{"_EJD", 0, ACPI_RTYPE_STRING}},
+       {{"_EVT", 1, 0}},
        {{"_FDE", 0, ACPI_RTYPE_BUFFER}},
        {{"_FDI", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (16 Int) */
                          {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16,0}, 0,0}},
@@ -281,14 +308,17 @@ static const union acpi_predefined_info predefined_names[] =
        {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 3, 0}, 0, 0}},
 
        {{"_GAI", 0, ACPI_RTYPE_INTEGER}},
+       {{"_GCP", 0, ACPI_RTYPE_INTEGER}},
        {{"_GHL", 0, ACPI_RTYPE_INTEGER}},
        {{"_GLK", 0, ACPI_RTYPE_INTEGER}},
        {{"_GPD", 0, ACPI_RTYPE_INTEGER}},
        {{"_GPE", 0, ACPI_RTYPE_INTEGER}}, /* _GPE method, not _GPE scope */
+       {{"_GRT", 0, ACPI_RTYPE_BUFFER}},
        {{"_GSB", 0, ACPI_RTYPE_INTEGER}},
        {{"_GTF", 0, ACPI_RTYPE_BUFFER}},
        {{"_GTM", 0, ACPI_RTYPE_BUFFER}},
        {{"_GTS", 1, 0}},
+       {{"_GWS", 1, ACPI_RTYPE_INTEGER}},
        {{"_HID", 0, ACPI_RTYPE_INTEGER | ACPI_RTYPE_STRING}},
        {{"_HOT", 0, ACPI_RTYPE_INTEGER}},
        {{"_HPP", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (4 Int) */
@@ -303,6 +333,7 @@ static const union acpi_predefined_info predefined_names[] =
        {{"_HPX", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (var Ints) */
                          {{{ACPI_PTYPE2_MIN, ACPI_RTYPE_INTEGER, 5,0}, 0,0}},
 
+       {{"_HRV", 0, ACPI_RTYPE_INTEGER}},
        {{"_IFT", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
        {{"_INI", 0, 0}},
        {{"_IRC", 0, 0}},
@@ -361,6 +392,9 @@ static const union acpi_predefined_info predefined_names[] =
        {{"_PR3", 0, ACPI_RTYPE_PACKAGE}},      /* Variable-length (Refs) */
        {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
 
+       {{"_PRE", 0, ACPI_RTYPE_PACKAGE}},      /* Variable-length (Refs) */
+       {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
+
        {{"_PRL", 0, ACPI_RTYPE_PACKAGE}},      /* Variable-length (Refs) */
        {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0, 0}, 0, 0}},
 
@@ -391,6 +425,7 @@ static const union acpi_predefined_info predefined_names[] =
        {{"_PSD", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Pkgs) each (5 Int) with count */
                          {{{ACPI_PTYPE2_COUNT, ACPI_RTYPE_INTEGER,0,0}, 0,0}},
 
+       {{"_PSE", 1, 0}},
        {{"_PSL", 0, ACPI_RTYPE_PACKAGE}}, /* Variable-length (Refs) */
                          {{{ACPI_PTYPE1_VAR, ACPI_RTYPE_REFERENCE, 0,0}, 0,0}},
 
@@ -457,6 +492,7 @@ static const union acpi_predefined_info predefined_names[] =
        {{"_SLI", 0, ACPI_RTYPE_BUFFER}},
        {{"_SPD", 1, ACPI_RTYPE_INTEGER}},
        {{"_SRS", 1, 0}},
+       {{"_SRT", 1, ACPI_RTYPE_INTEGER}},
        {{"_SRV", 0, ACPI_RTYPE_INTEGER}}, /* See IPMI spec */
        {{"_SST", 1, 0}},
        {{"_STA", 0, ACPI_RTYPE_INTEGER}},
@@ -464,6 +500,7 @@ static const union acpi_predefined_info predefined_names[] =
        {{"_STP", 2, ACPI_RTYPE_INTEGER}},
        {{"_STR", 0, ACPI_RTYPE_BUFFER}},
        {{"_STV", 2, ACPI_RTYPE_INTEGER}},
+       {{"_SUB", 0, ACPI_RTYPE_STRING}},
        {{"_SUN", 0, ACPI_RTYPE_INTEGER}},
        {{"_SWS", 0, ACPI_RTYPE_INTEGER}},
        {{"_TC1", 0, ACPI_RTYPE_INTEGER}},
index f08b55b7f3a08f5dff5209ad52268e78b59cacc6..0347d099349708a6f21035467ac96e2eb5578105 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -73,28 +73,40 @@ typedef const struct acpi_rsconvert_info {
 
 /* Resource conversion opcodes */
 
-#define ACPI_RSC_INITGET                0
-#define ACPI_RSC_INITSET                1
-#define ACPI_RSC_FLAGINIT               2
-#define ACPI_RSC_1BITFLAG               3
-#define ACPI_RSC_2BITFLAG               4
-#define ACPI_RSC_COUNT                  5
-#define ACPI_RSC_COUNT16                6
-#define ACPI_RSC_LENGTH                 7
-#define ACPI_RSC_MOVE8                  8
-#define ACPI_RSC_MOVE16                 9
-#define ACPI_RSC_MOVE32                 10
-#define ACPI_RSC_MOVE64                 11
-#define ACPI_RSC_SET8                   12
-#define ACPI_RSC_DATA8                  13
-#define ACPI_RSC_ADDRESS                14
-#define ACPI_RSC_SOURCE                 15
-#define ACPI_RSC_SOURCEX                16
-#define ACPI_RSC_BITMASK                17
-#define ACPI_RSC_BITMASK16              18
-#define ACPI_RSC_EXIT_NE                19
-#define ACPI_RSC_EXIT_LE                20
-#define ACPI_RSC_EXIT_EQ                21
+typedef enum {
+       ACPI_RSC_INITGET = 0,
+       ACPI_RSC_INITSET,
+       ACPI_RSC_FLAGINIT,
+       ACPI_RSC_1BITFLAG,
+       ACPI_RSC_2BITFLAG,
+       ACPI_RSC_3BITFLAG,
+       ACPI_RSC_ADDRESS,
+       ACPI_RSC_BITMASK,
+       ACPI_RSC_BITMASK16,
+       ACPI_RSC_COUNT,
+       ACPI_RSC_COUNT16,
+       ACPI_RSC_COUNT_GPIO_PIN,
+       ACPI_RSC_COUNT_GPIO_RES,
+       ACPI_RSC_COUNT_GPIO_VEN,
+       ACPI_RSC_COUNT_SERIAL_RES,
+       ACPI_RSC_COUNT_SERIAL_VEN,
+       ACPI_RSC_DATA8,
+       ACPI_RSC_EXIT_EQ,
+       ACPI_RSC_EXIT_LE,
+       ACPI_RSC_EXIT_NE,
+       ACPI_RSC_LENGTH,
+       ACPI_RSC_MOVE_GPIO_PIN,
+       ACPI_RSC_MOVE_GPIO_RES,
+       ACPI_RSC_MOVE_SERIAL_RES,
+       ACPI_RSC_MOVE_SERIAL_VEN,
+       ACPI_RSC_MOVE8,
+       ACPI_RSC_MOVE16,
+       ACPI_RSC_MOVE32,
+       ACPI_RSC_MOVE64,
+       ACPI_RSC_SET8,
+       ACPI_RSC_SOURCE,
+       ACPI_RSC_SOURCEX
+} ACPI_RSCONVERT_OPCODES;
 
 /* Resource Conversion sub-opcodes */
 
@@ -106,6 +118,9 @@ typedef const struct acpi_rsconvert_info {
 #define ACPI_RS_OFFSET(f)               (u8) ACPI_OFFSET (struct acpi_resource,f)
 #define AML_OFFSET(f)                   (u8) ACPI_OFFSET (union aml_resource,f)
 
+/*
+ * Individual entry for the resource dump tables
+ */
 typedef const struct acpi_rsdump_info {
        u8 opcode;
        u8 offset;
@@ -116,20 +131,25 @@ typedef const struct acpi_rsdump_info {
 
 /* Values for the Opcode field above */
 
-#define ACPI_RSD_TITLE                  0
-#define ACPI_RSD_LITERAL                1
-#define ACPI_RSD_STRING                 2
-#define ACPI_RSD_UINT8                  3
-#define ACPI_RSD_UINT16                 4
-#define ACPI_RSD_UINT32                 5
-#define ACPI_RSD_UINT64                 6
-#define ACPI_RSD_1BITFLAG               7
-#define ACPI_RSD_2BITFLAG               8
-#define ACPI_RSD_SHORTLIST              9
-#define ACPI_RSD_LONGLIST               10
-#define ACPI_RSD_DWORDLIST              11
-#define ACPI_RSD_ADDRESS                12
-#define ACPI_RSD_SOURCE                 13
+typedef enum {
+       ACPI_RSD_TITLE = 0,
+       ACPI_RSD_1BITFLAG,
+       ACPI_RSD_2BITFLAG,
+       ACPI_RSD_3BITFLAG,
+       ACPI_RSD_ADDRESS,
+       ACPI_RSD_DWORDLIST,
+       ACPI_RSD_LITERAL,
+       ACPI_RSD_LONGLIST,
+       ACPI_RSD_SHORTLIST,
+       ACPI_RSD_SHORTLISTX,
+       ACPI_RSD_SOURCE,
+       ACPI_RSD_STRING,
+       ACPI_RSD_UINT8,
+       ACPI_RSD_UINT16,
+       ACPI_RSD_UINT32,
+       ACPI_RSD_UINT64,
+       ACPI_RSD_WORDLIST
+} ACPI_RSDUMP_OPCODES;
 
 /* restore default alignment */
 
@@ -138,13 +158,18 @@ typedef const struct acpi_rsdump_info {
 /* Resource tables indexed by internal resource type */
 
 extern const u8 acpi_gbl_aml_resource_sizes[];
+extern const u8 acpi_gbl_aml_resource_serial_bus_sizes[];
 extern struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[];
 
 /* Resource tables indexed by raw AML resource descriptor type */
 
 extern const u8 acpi_gbl_resource_struct_sizes[];
+extern const u8 acpi_gbl_resource_struct_serial_bus_sizes[];
 extern struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[];
 
+extern struct acpi_rsconvert_info
+    *acpi_gbl_convert_resource_serial_bus_dispatch[];
+
 struct acpi_vendor_walk_info {
        struct acpi_vendor_uuid *uuid;
        struct acpi_buffer *buffer;
@@ -190,6 +215,10 @@ acpi_status
 acpi_rs_set_srs_method_data(struct acpi_namespace_node *node,
                            struct acpi_buffer *ret_buffer);
 
+acpi_status
+acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
+                           struct acpi_buffer *ret_buffer);
+
 /*
  * rscalc
  */
@@ -293,6 +322,11 @@ extern struct acpi_rsconvert_info acpi_rs_convert_address16[];
 extern struct acpi_rsconvert_info acpi_rs_convert_ext_irq[];
 extern struct acpi_rsconvert_info acpi_rs_convert_address64[];
 extern struct acpi_rsconvert_info acpi_rs_convert_ext_address64[];
+extern struct acpi_rsconvert_info acpi_rs_convert_gpio[];
+extern struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[];
+extern struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[];
+extern struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[];
+extern struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[];
 
 /* These resources require separate get/set tables */
 
@@ -310,6 +344,7 @@ extern struct acpi_rsconvert_info acpi_rs_set_vendor[];
  * rsinfo
  */
 extern struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[];
+extern struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[];
 
 /*
  * rsdump
@@ -331,6 +366,12 @@ extern struct acpi_rsdump_info acpi_rs_dump_address64[];
 extern struct acpi_rsdump_info acpi_rs_dump_ext_address64[];
 extern struct acpi_rsdump_info acpi_rs_dump_ext_irq[];
 extern struct acpi_rsdump_info acpi_rs_dump_generic_reg[];
+extern struct acpi_rsdump_info acpi_rs_dump_gpio[];
+extern struct acpi_rsdump_info acpi_rs_dump_fixed_dma[];
+extern struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[];
+extern struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[];
 #endif
 
 #endif                         /* __ACRESRC_H__ */
index 1623b245dde23b226ae70d1c8233e89425b507d2..0404df605bc187940b0a82c51b5905bb83a4acc9 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 967f08124eba46ae53684bfd0347ca5da14f740f..d5bec304c823387bfc652cf7140b5e47dfb0516f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 99c140d8e348b38c364c94352af16c86de7b164d..925ccf22101b45adcb66817c736195adbcbf975f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -45,6 +45,7 @@
 #define _ACUTILS_H
 
 extern const u8 acpi_gbl_resource_aml_sizes[];
+extern const u8 acpi_gbl_resource_aml_serial_bus_sizes[];
 
 /* Strings used by the disassembler and debugger resource dump routines */
 
@@ -578,6 +579,24 @@ acpi_ut_create_list(char *list_name,
 
 #endif                         /* ACPI_DBG_TRACK_ALLOCATIONS */
 
+/*
+ * utaddress - address range check
+ */
+acpi_status
+acpi_ut_add_address_range(acpi_adr_space_type space_id,
+                         acpi_physical_address address,
+                         u32 length, struct acpi_namespace_node *region_node);
+
+void
+acpi_ut_remove_address_range(acpi_adr_space_type space_id,
+                            struct acpi_namespace_node *region_node);
+
+u32
+acpi_ut_check_address_range(acpi_adr_space_type space_id,
+                           acpi_physical_address address, u32 length, u8 warn);
+
+void acpi_ut_delete_address_lists(void);
+
 /*
  * utxferror - various error/warning output functions
  */
index 1077f17859ed3d3fb74d2280966b2a847025e424..905280fec0fa0a132049318fa36953b347a0c324 100644 (file)
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define AML_LLESSEQUAL_OP           (u16) 0x9294
 #define AML_LNOTEQUAL_OP            (u16) 0x9293
 
+/*
+ * Opcodes for "Field" operators
+ */
+#define AML_FIELD_OFFSET_OP         (u8) 0x00
+#define AML_FIELD_ACCESS_OP         (u8) 0x01
+#define AML_FIELD_CONNECTION_OP     (u8) 0x02  /* ACPI 5.0 */
+#define AML_FIELD_EXT_ACCESS_OP     (u8) 0x03  /* ACPI 5.0 */
+
 /*
  * Internal opcodes
  * Use only "Unknown" AML opcodes, don't attempt to use
 #define AML_INT_METHODCALL_OP       (u16) 0x0035
 #define AML_INT_RETURN_VALUE_OP     (u16) 0x0036
 #define AML_INT_EVAL_SUBTREE_OP     (u16) 0x0037
+#define AML_INT_CONNECTION_OP       (u16) 0x0038
+#define AML_INT_EXTACCESSFIELD_OP   (u16) 0x0039
 
 #define ARG_NONE                    0x0
 
@@ -456,13 +466,16 @@ typedef enum {
  * access_as keyword
  */
 typedef enum {
-       AML_FIELD_ATTRIB_SMB_QUICK = 0x02,
-       AML_FIELD_ATTRIB_SMB_SEND_RCV = 0x04,
-       AML_FIELD_ATTRIB_SMB_BYTE = 0x06,
-       AML_FIELD_ATTRIB_SMB_WORD = 0x08,
-       AML_FIELD_ATTRIB_SMB_BLOCK = 0x0A,
-       AML_FIELD_ATTRIB_SMB_WORD_CALL = 0x0C,
-       AML_FIELD_ATTRIB_SMB_BLOCK_CALL = 0x0D
+       AML_FIELD_ATTRIB_QUICK = 0x02,
+       AML_FIELD_ATTRIB_SEND_RCV = 0x04,
+       AML_FIELD_ATTRIB_BYTE = 0x06,
+       AML_FIELD_ATTRIB_WORD = 0x08,
+       AML_FIELD_ATTRIB_BLOCK = 0x0A,
+       AML_FIELD_ATTRIB_MULTIBYTE = 0x0B,
+       AML_FIELD_ATTRIB_WORD_CALL = 0x0C,
+       AML_FIELD_ATTRIB_BLOCK_CALL = 0x0D,
+       AML_FIELD_ATTRIB_RAW_BYTES = 0x0E,
+       AML_FIELD_ATTRIB_RAW_PROCESS = 0x0F
 } AML_ACCESS_ATTRIBUTE;
 
 /* Bit fields in the AML method_flags byte */
index 59122cde247c2042e1121e6f33926eba1ec61cc1..7b2128f274e71a10f2331ca8d42abd78af166677 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define ACPI_RESTAG_TYPESPECIFICATTRIBUTES      "_ATT"
 #define ACPI_RESTAG_BASEADDRESS                 "_BAS"
 #define ACPI_RESTAG_BUSMASTER                   "_BM_" /* Master(1), Slave(0) */
+#define ACPI_RESTAG_DEBOUNCETIME                "_DBT"
 #define ACPI_RESTAG_DECODE                      "_DEC"
+#define ACPI_RESTAG_DEVICEPOLARITY              "_DPL"
 #define ACPI_RESTAG_DMA                         "_DMA"
 #define ACPI_RESTAG_DMATYPE                     "_TYP" /* Compatible(0), A(1), B(2), F(3) */
+#define ACPI_RESTAG_DRIVESTRENGTH               "_DRS"
+#define ACPI_RESTAG_ENDIANNESS                  "_END"
+#define ACPI_RESTAG_FLOWCONTROL                 "_FLC"
 #define ACPI_RESTAG_GRANULARITY                 "_GRA"
 #define ACPI_RESTAG_INTERRUPT                   "_INT"
 #define ACPI_RESTAG_INTERRUPTLEVEL              "_LL_" /* active_lo(1), active_hi(0) */
 #define ACPI_RESTAG_INTERRUPTSHARE              "_SHR" /* Shareable(1), no_share(0) */
 #define ACPI_RESTAG_INTERRUPTTYPE               "_HE_" /* Edge(1), Level(0) */
+#define ACPI_RESTAG_IORESTRICTION               "_IOR"
 #define ACPI_RESTAG_LENGTH                      "_LEN"
+#define ACPI_RESTAG_LINE                        "_LIN"
 #define ACPI_RESTAG_MEMATTRIBUTES               "_MTP" /* Memory(0), Reserved(1), ACPI(2), NVS(3) */
 #define ACPI_RESTAG_MEMTYPE                     "_MEM" /* non_cache(0), Cacheable(1) Cache+combine(2), Cache+prefetch(3) */
 #define ACPI_RESTAG_MAXADDR                     "_MAX"
 #define ACPI_RESTAG_MINADDR                     "_MIN"
 #define ACPI_RESTAG_MAXTYPE                     "_MAF"
 #define ACPI_RESTAG_MINTYPE                     "_MIF"
+#define ACPI_RESTAG_MODE                        "_MOD"
+#define ACPI_RESTAG_PARITY                      "_PAR"
+#define ACPI_RESTAG_PHASE                       "_PHA"
+#define ACPI_RESTAG_PIN                         "_PIN"
+#define ACPI_RESTAG_PINCONFIG                   "_PPI"
+#define ACPI_RESTAG_POLARITY                    "_POL"
 #define ACPI_RESTAG_REGISTERBITOFFSET           "_RBO"
 #define ACPI_RESTAG_REGISTERBITWIDTH            "_RBW"
 #define ACPI_RESTAG_RANGETYPE                   "_RNG"
 #define ACPI_RESTAG_READWRITETYPE               "_RW_" /* read_only(0), Writeable (1) */
+#define ACPI_RESTAG_LENGTH_RX                   "_RXL"
+#define ACPI_RESTAG_LENGTH_TX                   "_TXL"
+#define ACPI_RESTAG_SLAVEMODE                   "_SLV"
+#define ACPI_RESTAG_SPEED                       "_SPE"
+#define ACPI_RESTAG_STOPBITS                    "_STB"
 #define ACPI_RESTAG_TRANSLATION                 "_TRA"
 #define ACPI_RESTAG_TRANSTYPE                   "_TRS" /* Sparse(1), Dense(0) */
 #define ACPI_RESTAG_TYPE                        "_TTP" /* Translation(1), Static (0) */
 #define ACPI_RESTAG_XFERTYPE                    "_SIZ" /* 8(0), 8_and16(1), 16(2) */
+#define ACPI_RESTAG_VENDORDATA                  "_VEN"
 
 /* Default sizes for "small" resource descriptors */
 
 #define ASL_RDESC_END_DEPEND_SIZE               0x00
 #define ASL_RDESC_IO_SIZE                       0x07
 #define ASL_RDESC_FIXED_IO_SIZE                 0x03
+#define ASL_RDESC_FIXED_DMA_SIZE                0x05
 #define ASL_RDESC_END_TAG_SIZE                  0x01
 
 struct asl_resource_node {
@@ -164,6 +184,12 @@ struct aml_resource_end_tag {
        AML_RESOURCE_SMALL_HEADER_COMMON u8 checksum;
 };
 
+struct aml_resource_fixed_dma {
+       AML_RESOURCE_SMALL_HEADER_COMMON u16 request_lines;
+       u16 channels;
+       u8 width;
+};
+
 /*
  * LARGE descriptors
  */
@@ -263,6 +289,110 @@ struct aml_resource_generic_register {
        u64 address;
 };
 
+/* Common descriptor for gpio_int and gpio_io (ACPI 5.0) */
+
+struct aml_resource_gpio {
+       AML_RESOURCE_LARGE_HEADER_COMMON u8 revision_id;
+       u8 connection_type;
+       u16 flags;
+       u16 int_flags;
+       u8 pin_config;
+       u16 drive_strength;
+       u16 debounce_timeout;
+       u16 pin_table_offset;
+       u8 res_source_index;
+       u16 res_source_offset;
+       u16 vendor_offset;
+       u16 vendor_length;
+       /*
+        * Optional fields follow immediately:
+        * 1) PIN list (Words)
+        * 2) Resource Source String
+        * 3) Vendor Data bytes
+        */
+};
+
+#define AML_RESOURCE_GPIO_REVISION              1      /* ACPI 5.0 */
+
+/* Values for connection_type above */
+
+#define AML_RESOURCE_GPIO_TYPE_INT              0
+#define AML_RESOURCE_GPIO_TYPE_IO               1
+#define AML_RESOURCE_MAX_GPIOTYPE               1
+
+/* Common preamble for all serial descriptors (ACPI 5.0) */
+
+#define AML_RESOURCE_SERIAL_COMMON \
+       u8                              revision_id; \
+       u8                              res_source_index; \
+       u8                              type; \
+       u8                              flags; \
+       u16                             type_specific_flags; \
+       u8                              type_revision_id; \
+       u16                             type_data_length; \
+
+/* Values for the type field above */
+
+#define AML_RESOURCE_I2C_SERIALBUSTYPE          1
+#define AML_RESOURCE_SPI_SERIALBUSTYPE          2
+#define AML_RESOURCE_UART_SERIALBUSTYPE         3
+#define AML_RESOURCE_MAX_SERIALBUSTYPE          3
+#define AML_RESOURCE_VENDOR_SERIALBUSTYPE       192    /* Vendor defined is 0xC0-0xFF (NOT SUPPORTED) */
+
+struct aml_resource_common_serialbus {
+AML_RESOURCE_LARGE_HEADER_COMMON AML_RESOURCE_SERIAL_COMMON};
+
+struct aml_resource_i2c_serialbus {
+       AML_RESOURCE_LARGE_HEADER_COMMON
+           AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
+       u16 slave_address;
+       /*
+        * Optional fields follow immediately:
+        * 1) Vendor Data bytes
+        * 2) Resource Source String
+        */
+};
+
+#define AML_RESOURCE_I2C_REVISION               1      /* ACPI 5.0 */
+#define AML_RESOURCE_I2C_TYPE_REVISION          1      /* ACPI 5.0 */
+#define AML_RESOURCE_I2C_MIN_DATA_LEN           6
+
+struct aml_resource_spi_serialbus {
+       AML_RESOURCE_LARGE_HEADER_COMMON
+           AML_RESOURCE_SERIAL_COMMON u32 connection_speed;
+       u8 data_bit_length;
+       u8 clock_phase;
+       u8 clock_polarity;
+       u16 device_selection;
+       /*
+        * Optional fields follow immediately:
+        * 1) Vendor Data bytes
+        * 2) Resource Source String
+        */
+};
+
+#define AML_RESOURCE_SPI_REVISION               1      /* ACPI 5.0 */
+#define AML_RESOURCE_SPI_TYPE_REVISION          1      /* ACPI 5.0 */
+#define AML_RESOURCE_SPI_MIN_DATA_LEN           9
+
+struct aml_resource_uart_serialbus {
+       AML_RESOURCE_LARGE_HEADER_COMMON
+           AML_RESOURCE_SERIAL_COMMON u32 default_baud_rate;
+       u16 rx_fifo_size;
+       u16 tx_fifo_size;
+       u8 parity;
+       u8 lines_enabled;
+       /*
+        * Optional fields follow immediately:
+        * 1) Vendor Data bytes
+        * 2) Resource Source String
+        */
+};
+
+#define AML_RESOURCE_UART_REVISION              1      /* ACPI 5.0 */
+#define AML_RESOURCE_UART_TYPE_REVISION         1      /* ACPI 5.0 */
+#define AML_RESOURCE_UART_MIN_DATA_LEN          10
+
 /* restore default alignment */
 
 #pragma pack()
@@ -284,6 +414,7 @@ union aml_resource {
        struct aml_resource_end_dependent end_dpf;
        struct aml_resource_io io;
        struct aml_resource_fixed_io fixed_io;
+       struct aml_resource_fixed_dma fixed_dma;
        struct aml_resource_vendor_small vendor_small;
        struct aml_resource_end_tag end_tag;
 
@@ -299,6 +430,11 @@ union aml_resource {
        struct aml_resource_address64 address64;
        struct aml_resource_extended_address64 ext_address64;
        struct aml_resource_extended_irq extended_irq;
+       struct aml_resource_gpio gpio;
+       struct aml_resource_i2c_serialbus i2c_serial_bus;
+       struct aml_resource_spi_serialbus spi_serial_bus;
+       struct aml_resource_uart_serialbus uart_serial_bus;
+       struct aml_resource_common_serialbus common_serial_bus;
 
        /* Utility overlays */
 
index 8c7b99728aa23163e77178a6748ddd5e699c8e45..80eb1900297f549f8191b8d3deb4b90ad80863b8 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -250,6 +250,13 @@ acpi_ds_get_bank_field_arguments(union acpi_operand_object *obj_desc)
        status = acpi_ds_execute_arguments(node, node->parent,
                                           extra_desc->extra.aml_length,
                                           extra_desc->extra.aml_start);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       status = acpi_ut_add_address_range(obj_desc->region.space_id,
+                                          obj_desc->region.address,
+                                          obj_desc->region.length, node);
        return_ACPI_STATUS(status);
 }
 
@@ -384,8 +391,15 @@ acpi_status acpi_ds_get_region_arguments(union acpi_operand_object *obj_desc)
 
        /* Execute the argument AML */
 
-       status = acpi_ds_execute_arguments(node, node->parent,
+       status = acpi_ds_execute_arguments(node, extra_desc->extra.scope_node,
                                           extra_desc->extra.aml_length,
                                           extra_desc->extra.aml_start);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       status = acpi_ut_add_address_range(obj_desc->region.space_id,
+                                          obj_desc->region.address,
+                                          obj_desc->region.length, node);
        return_ACPI_STATUS(status);
 }
index 26c49fff58da0fecbe83c2d70bfaa825eb2c90b2..effe4ca1133fd7cd93ebb1ab397d8be0ffe68e72 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 34be60c0e4484b880f6bf2da883e7ba1e7311ecd..cd243cf2cab2373f25c40f076b2d211b8f5641a2 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -221,6 +221,7 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
 {
        acpi_status status;
        u64 position;
+       union acpi_parse_object *child;
 
        ACPI_FUNCTION_TRACE_PTR(ds_get_field_names, info);
 
@@ -232,10 +233,11 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
 
        while (arg) {
                /*
-                * Three types of field elements are handled:
-                * 1) Offset - specifies a bit offset
-                * 2) access_as - changes the access mode
-                * 3) Name - Enters a new named field into the namespace
+                * Four types of field elements are handled:
+                * 1) Name - Enters a new named field into the namespace
+                * 2) Offset - specifies a bit offset
+                * 3) access_as - changes the access mode/attributes
+                * 4) Connection - Associate a resource template with the field
                 */
                switch (arg->common.aml_opcode) {
                case AML_INT_RESERVEDFIELD_OP:
@@ -253,21 +255,70 @@ acpi_ds_get_field_names(struct acpi_create_field_info *info,
                        break;
 
                case AML_INT_ACCESSFIELD_OP:
-
+               case AML_INT_EXTACCESSFIELD_OP:
                        /*
-                        * Get a new access_type and access_attribute -- to be used for all
-                        * field units that follow, until field end or another access_as
-                        * keyword.
+                        * Get new access_type, access_attribute, and access_length fields
+                        * -- to be used for all field units that follow, until the
+                        * end-of-field or another access_as keyword is encountered.
+                        * NOTE. These three bytes are encoded in the integer value
+                        * of the parseop for convenience.
                         *
                         * In field_flags, preserve the flag bits other than the
-                        * ACCESS_TYPE bits
+                        * ACCESS_TYPE bits.
                         */
+
+                       /* access_type (byte_acc, word_acc, etc.) */
+
                        info->field_flags = (u8)
                            ((info->
                              field_flags & ~(AML_FIELD_ACCESS_TYPE_MASK)) |
-                            ((u8) ((u32) arg->common.value.integer >> 8)));
+                            ((u8)((u32)(arg->common.value.integer & 0x07))));
+
+                       /* access_attribute (attrib_quick, attrib_byte, etc.) */
+
+                       info->attribute =
+                           (u8)((arg->common.value.integer >> 8) & 0xFF);
+
+                       /* access_length (for serial/buffer protocols) */
+
+                       info->access_length =
+                           (u8)((arg->common.value.integer >> 16) & 0xFF);
+                       break;
+
+               case AML_INT_CONNECTION_OP:
+                       /*
+                        * Clear any previous connection. New connection is used for all
+                        * fields that follow, similar to access_as
+                        */
+                       info->resource_buffer = NULL;
+                       info->connection_node = NULL;
 
-                       info->attribute = (u8) (arg->common.value.integer);
+                       /*
+                        * A Connection() is either an actual resource descriptor (buffer)
+                        * or a named reference to a resource template
+                        */
+                       child = arg->common.value.arg;
+                       if (child->common.aml_opcode == AML_INT_BYTELIST_OP) {
+                               info->resource_buffer = child->named.data;
+                               info->resource_length =
+                                   (u16)child->named.value.integer;
+                       } else {
+                               /* Lookup the Connection() namepath, it should already exist */
+
+                               status = acpi_ns_lookup(walk_state->scope_info,
+                                                       child->common.value.
+                                                       name, ACPI_TYPE_ANY,
+                                                       ACPI_IMODE_EXECUTE,
+                                                       ACPI_NS_DONT_OPEN_SCOPE,
+                                                       walk_state,
+                                                       &info->connection_node);
+                               if (ACPI_FAILURE(status)) {
+                                       ACPI_ERROR_NAMESPACE(child->common.
+                                                            value.name,
+                                                            status);
+                                       return_ACPI_STATUS(status);
+                               }
+                       }
                        break;
 
                case AML_INT_NAMEDFIELD_OP:
@@ -374,6 +425,8 @@ acpi_ds_create_field(union acpi_parse_object *op,
                }
        }
 
+       ACPI_MEMSET(&info, 0, sizeof(struct acpi_create_field_info));
+
        /* Second arg is the field flags */
 
        arg = arg->common.next;
@@ -386,7 +439,6 @@ acpi_ds_create_field(union acpi_parse_object *op,
        info.region_node = region_node;
 
        status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-
        return_ACPI_STATUS(status);
 }
 
@@ -474,8 +526,8 @@ acpi_ds_init_field_objects(union acpi_parse_object *op,
         */
        while (arg) {
                /*
-                * Ignore OFFSET and ACCESSAS terms here; we are only interested in the
-                * field names in order to enter them into the namespace.
+                * Ignore OFFSET/ACCESSAS/CONNECTION terms here; we are only interested
+                * in the field names in order to enter them into the namespace.
                 */
                if (arg->common.aml_opcode == AML_INT_NAMEDFIELD_OP) {
                        status = acpi_ns_lookup(walk_state->scope_info,
@@ -651,6 +703,5 @@ acpi_ds_create_index_field(union acpi_parse_object *op,
        info.region_node = region_node;
 
        status = acpi_ds_get_field_names(&info, walk_state, arg->common.next);
-
        return_ACPI_STATUS(status);
 }
index a7718bf2b9a18a24e0bdee7554736caa07976c2b..9e5ac7f780a7e1f006bd7bd1dd256a9367859e32 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5d797751e205b363a48e43b6484e2a9cc37fc317..00f5dab5bcc0e7414cb4ea910daf37a6536bbf5c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 905ce29a92e1784c82430c64e527d536e1c7f579..b40bd507be5dc33993e96667b5e2c50ad6395cff 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f42e17e5c252cc98331da46b45270090b093195c..d7045ca3e32a59269e036bac0e05893e0a5d4795 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c627a288e027644d5ef537eb1776098085811ed4..e5eff758510266c0c68a1b772c8004598d1fc2a1 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2c477ce172fad15d44218da1cc66e3377822590e..1abcda31037f20e6988fc073cdc00764f86d7357 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fe40e4c6554f2554310e055cfd96a289c1bf0d89..642f3c053e878e95d1a55fc9b149de42dce19f79 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 324acec1179abaef80d5bb6fdbbd50e2f4106469..552aa3a50c84882ce9c9aa152d1b231bf7f6cd20 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 976318138c56aa983f74f3786f4016040354d44c..ae71477247631074fcc627b1345e307e77b334a9 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 76a661fc1e0933cfa84aba0a6d90a449ac1e5da1..9e9490a9cbf0e22ab16bfa9c32972afbc1971962 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a6c374ef9914e8bb57ad092498ba55feef512145..c9c2ac13e7cc926d977ad8cb3052af532416badc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d458b041e6510aec0c91cd4b20c0080147975524..6729ebe2f1e669b78f7a30e86efd6445720113e5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -71,6 +71,12 @@ acpi_status acpi_ev_initialize_events(void)
 
        ACPI_FUNCTION_TRACE(ev_initialize_events);
 
+       /* If Hardware Reduced flag is set, there are no fixed events */
+
+       if (acpi_gbl_reduced_hardware) {
+               return_ACPI_STATUS(AE_OK);
+       }
+
        /*
         * Initialize the Fixed and General Purpose Events. This is done prior to
         * enabling SCIs to prevent interrupts from occurring before the handlers
@@ -111,6 +117,12 @@ acpi_status acpi_ev_install_xrupt_handlers(void)
 
        ACPI_FUNCTION_TRACE(ev_install_xrupt_handlers);
 
+       /* If Hardware Reduced flag is set, there is no ACPI h/w */
+
+       if (acpi_gbl_reduced_hardware) {
+               return_ACPI_STATUS(AE_OK);
+       }
+
        /* Install the SCI handler */
 
        status = acpi_ev_install_sci_handler();
index 56a562a1e5d7bde44f5d15b40dc250843b2de41f..5e5683cb1f0d9445e3d0e0330eaebe3335866cf2 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,12 @@ acpi_status acpi_ev_init_global_lock_handler(void)
 
        ACPI_FUNCTION_TRACE(ev_init_global_lock_handler);
 
+       /* If Hardware Reduced flag is set, there is no global lock */
+
+       if (acpi_gbl_reduced_hardware) {
+               return_ACPI_STATUS(AE_OK);
+       }
+
        /* Attempt installation of the global lock handler */
 
        status = acpi_install_fixed_event_handler(ACPI_EVENT_GLOBAL,
index 65c79add3b1982ae3432a830c4c94f37be60f39d..9e88cb6fb25ea6b05a0befc926b3986a66ac9fd3 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ca2c41a5331177707e46bf7b0d4f9407e8a24478..be75339cd5dd2c31731187a791d94b1829b97945 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ce9aa9f9a9724e818d5d6e6fe568e35a3bf9d39c..adf7494da9dbdcfb03e1843ea559adbbf945a889 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 80a81d0c4a80d36049aceac84b78ead5dffe0481..25073932aa10d373d2e838d8517f7209fe0b4425 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d0b3318444273977a382afb74ffcc5a0ed2ddb65..84966f4164638573082ce23373460df4297b6eb2 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f0edf5c43c035f0da64345ffe30894099beecc2d..1b0180a1b798bb0184241ac12d75937b55d74902 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -329,6 +329,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
  * FUNCTION:    acpi_ev_address_space_dispatch
  *
  * PARAMETERS:  region_obj          - Internal region object
+ *              field_obj           - Corresponding field. Can be NULL.
  *              Function            - Read or Write operation
  *              region_offset       - Where in the region to read or write
  *              bit_width           - Field width in bits (8, 16, 32, or 64)
@@ -344,6 +345,7 @@ acpi_ev_execute_reg_method(union acpi_operand_object *region_obj, u32 function)
 
 acpi_status
 acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
+                              union acpi_operand_object *field_obj,
                               u32 function,
                               u32 region_offset, u32 bit_width, u64 *value)
 {
@@ -353,6 +355,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
        union acpi_operand_object *handler_desc;
        union acpi_operand_object *region_obj2;
        void *region_context = NULL;
+       struct acpi_connection_info *context;
 
        ACPI_FUNCTION_TRACE(ev_address_space_dispatch);
 
@@ -375,6 +378,8 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
                return_ACPI_STATUS(AE_NOT_EXIST);
        }
 
+       context = handler_desc->address_space.context;
+
        /*
         * It may be the case that the region has never been initialized.
         * Some types of regions require special init code
@@ -404,8 +409,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
                acpi_ex_exit_interpreter();
 
                status = region_setup(region_obj, ACPI_REGION_ACTIVATE,
-                                     handler_desc->address_space.context,
-                                     &region_context);
+                                     context, &region_context);
 
                /* Re-enter the interpreter */
 
@@ -455,6 +459,25 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
                          acpi_ut_get_region_name(region_obj->region.
                                                  space_id)));
 
+       /*
+        * Special handling for generic_serial_bus and general_purpose_io:
+        * There are three extra parameters that must be passed to the
+        * handler via the context:
+        *   1) Connection buffer, a resource template from Connection() op.
+        *   2) Length of the above buffer.
+        *   3) Actual access length from the access_as() op.
+        */
+       if (((region_obj->region.space_id == ACPI_ADR_SPACE_GSBUS) ||
+            (region_obj->region.space_id == ACPI_ADR_SPACE_GPIO)) &&
+           context && field_obj) {
+
+               /* Get the Connection (resource_template) buffer */
+
+               context->connection = field_obj->field.resource_buffer;
+               context->length = field_obj->field.resource_length;
+               context->access_length = field_obj->field.access_length;
+       }
+
        if (!(handler_desc->address_space.handler_flags &
              ACPI_ADDR_HANDLER_DEFAULT_INSTALLED)) {
                /*
@@ -469,7 +492,7 @@ acpi_ev_address_space_dispatch(union acpi_operand_object *region_obj,
 
        status = handler(function,
                         (region_obj->region.address + region_offset),
-                        bit_width, value, handler_desc->address_space.context,
+                        bit_width, value, context,
                         region_obj2->extra.region_context);
 
        if (ACPI_FAILURE(status)) {
index 55a5d35ef34a0d2f85e86020b96d305dd25b356f..819c17f5897ab664b67f9656803986ae6912ad58 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2ebd40e1a3ef947ff7672afde21c6f211d33ae00..26065c612e7673d2c376741da9dbcef3dceefb2b 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f4f523bf59390108e5095a303038da3ce872f345..61944e89565a31aa7d82dcc291da7296e28a6c93 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 20516e599476c6b59cbb100d7dfdd01357ef0273..1768bbec10023613fdce37d84f400907a69c8f8b 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f06a3ee356ba72048b8a2fac77faeaf0324ec5e0..33388fd69df448155c51154bfe1f5eb332580600 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index aee887e3ca5c6ca57ac76de607e622b581f60e03..6019208cd4b6f522227b70cb5577f13406b5b221 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 745a42b401f5030bc88b11b1b89aa2b21a455b14..c86d44e41bc85bf8935a56c4a2f5c4e348bd0dc7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -297,9 +297,9 @@ acpi_ex_region_read(union acpi_operand_object *obj_desc, u32 length, u8 *buffer)
        /* Bytewise reads */
 
        for (i = 0; i < length; i++) {
-               status = acpi_ev_address_space_dispatch(obj_desc, ACPI_READ,
-                                                       region_offset, 8,
-                                                       &value);
+               status =
+                   acpi_ev_address_space_dispatch(obj_desc, NULL, ACPI_READ,
+                                                  region_offset, 8, &value);
                if (ACPI_FAILURE(status)) {
                        return status;
                }
index 74162a11817dc9b6bff8e30ba2d039a7c1ec47a9..e385436bd42422ef81ad64ca2decfa9a14eaeae7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 110711afada8be5c0b769c693c6fd3b624251ef4..3f5bc998c1cb15fc7f9e212faa97552b0e87465e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -267,7 +267,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
  *
  * PARAMETERS:  aml_start           - Pointer to the region declaration AML
  *              aml_length          - Max length of the declaration AML
- *              region_space        - space_iD for the region
+ *              space_id            - Address space ID for the region
  *              walk_state          - Current state
  *
  * RETURN:      Status
@@ -279,7 +279,7 @@ acpi_status acpi_ex_create_mutex(struct acpi_walk_state *walk_state)
 acpi_status
 acpi_ex_create_region(u8 * aml_start,
                      u32 aml_length,
-                     u8 region_space, struct acpi_walk_state *walk_state)
+                     u8 space_id, struct acpi_walk_state *walk_state)
 {
        acpi_status status;
        union acpi_operand_object *obj_desc;
@@ -304,16 +304,19 @@ acpi_ex_create_region(u8 * aml_start,
         * Space ID must be one of the predefined IDs, or in the user-defined
         * range
         */
-       if ((region_space >= ACPI_NUM_PREDEFINED_REGIONS) &&
-           (region_space < ACPI_USER_REGION_BEGIN) &&
-           (region_space != ACPI_ADR_SPACE_DATA_TABLE)) {
-               ACPI_ERROR((AE_INFO, "Invalid AddressSpace type 0x%X",
-                           region_space));
-               return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+       if (!acpi_is_valid_space_id(space_id)) {
+               /*
+                * Print an error message, but continue. We don't want to abort
+                * a table load for this exception. Instead, if the region is
+                * actually used at runtime, abort the executing method.
+                */
+               ACPI_ERROR((AE_INFO,
+                           "Invalid/unknown Address Space ID: 0x%2.2X",
+                           space_id));
        }
 
        ACPI_DEBUG_PRINT((ACPI_DB_LOAD, "Region Type - %s (0x%X)\n",
-                         acpi_ut_get_region_name(region_space), region_space));
+                         acpi_ut_get_region_name(space_id), space_id));
 
        /* Create the region descriptor */
 
@@ -330,10 +333,16 @@ acpi_ex_create_region(u8 * aml_start,
        region_obj2 = obj_desc->common.next_object;
        region_obj2->extra.aml_start = aml_start;
        region_obj2->extra.aml_length = aml_length;
+       if (walk_state->scope_info) {
+               region_obj2->extra.scope_node =
+                   walk_state->scope_info->scope.node;
+       } else {
+               region_obj2->extra.scope_node = node;
+       }
 
        /* Init the region from the operands */
 
-       obj_desc->region.space_id = region_space;
+       obj_desc->region.space_id = space_id;
        obj_desc->region.address = 0;
        obj_desc->region.length = 0;
        obj_desc->region.node = node;
index c7a2f1edd28276389d0a61bc5674ea6ee7e89b5d..e211e9c192159b10af4c9a68bc17dfba742b57fd 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 61b8c0e8b74d1b5bc115addeb1d04da535401b03..2a6ac0a3bc1e661b25876a1247fbe763279ef91d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -192,10 +192,13 @@ static struct acpi_exdump_info acpi_ex_dump_buffer_field[3] = {
         "Buffer Object"}
 };
 
-static struct acpi_exdump_info acpi_ex_dump_region_field[3] = {
+static struct acpi_exdump_info acpi_ex_dump_region_field[5] = {
        {ACPI_EXD_INIT, ACPI_EXD_TABLE_SIZE(acpi_ex_dump_region_field), NULL},
        {ACPI_EXD_FIELD, 0, NULL},
-       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"}
+       {ACPI_EXD_UINT8, ACPI_EXD_OFFSET(field.access_length), "AccessLength"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.region_obj), "Region Object"},
+       {ACPI_EXD_POINTER, ACPI_EXD_OFFSET(field.resource_buffer),
+        "ResourceBuffer"}
 };
 
 static struct acpi_exdump_info acpi_ex_dump_bank_field[5] = {
index 0bde2230c028ed0f0cc28689e98eacccd7a9bd8b..dc092f5b35d6b8ed0198535a39be122280d7828e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -100,18 +100,25 @@ acpi_ex_read_data_from_field(struct acpi_walk_state *walk_state,
                   (obj_desc->field.region_obj->region.space_id ==
                    ACPI_ADR_SPACE_SMBUS
                    || obj_desc->field.region_obj->region.space_id ==
+                   ACPI_ADR_SPACE_GSBUS
+                   || obj_desc->field.region_obj->region.space_id ==
                    ACPI_ADR_SPACE_IPMI)) {
                /*
-                * This is an SMBus or IPMI read. We must create a buffer to hold
+                * This is an SMBus, GSBus or IPMI read. We must create a buffer to hold
                 * the data and then directly access the region handler.
                 *
-                * Note: Smbus protocol value is passed in upper 16-bits of Function
+                * Note: SMBus and GSBus protocol value is passed in upper 16-bits of Function
                 */
                if (obj_desc->field.region_obj->region.space_id ==
                    ACPI_ADR_SPACE_SMBUS) {
                        length = ACPI_SMBUS_BUFFER_SIZE;
                        function =
                            ACPI_READ | (obj_desc->field.attribute << 16);
+               } else if (obj_desc->field.region_obj->region.space_id ==
+                          ACPI_ADR_SPACE_GSBUS) {
+                       length = ACPI_GSBUS_BUFFER_SIZE;
+                       function =
+                           ACPI_READ | (obj_desc->field.attribute << 16);
                } else {        /* IPMI */
 
                        length = ACPI_IPMI_BUFFER_SIZE;
@@ -248,21 +255,23 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
                   (obj_desc->field.region_obj->region.space_id ==
                    ACPI_ADR_SPACE_SMBUS
                    || obj_desc->field.region_obj->region.space_id ==
+                   ACPI_ADR_SPACE_GSBUS
+                   || obj_desc->field.region_obj->region.space_id ==
                    ACPI_ADR_SPACE_IPMI)) {
                /*
-                * This is an SMBus or IPMI write. We will bypass the entire field
+                * This is an SMBus, GSBus or IPMI write. We will bypass the entire field
                 * mechanism and handoff the buffer directly to the handler. For
                 * these address spaces, the buffer is bi-directional; on a write,
                 * return data is returned in the same buffer.
                 *
                 * Source must be a buffer of sufficient size:
-                * ACPI_SMBUS_BUFFER_SIZE or ACPI_IPMI_BUFFER_SIZE.
+                * ACPI_SMBUS_BUFFER_SIZE, ACPI_GSBUS_BUFFER_SIZE, or ACPI_IPMI_BUFFER_SIZE.
                 *
-                * Note: SMBus protocol type is passed in upper 16-bits of Function
+                * Note: SMBus and GSBus protocol type is passed in upper 16-bits of Function
                 */
                if (source_desc->common.type != ACPI_TYPE_BUFFER) {
                        ACPI_ERROR((AE_INFO,
-                                   "SMBus or IPMI write requires Buffer, found type %s",
+                                   "SMBus/IPMI/GenericSerialBus write requires Buffer, found type %s",
                                    acpi_ut_get_object_type_name(source_desc)));
 
                        return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
@@ -273,6 +282,11 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
                        length = ACPI_SMBUS_BUFFER_SIZE;
                        function =
                            ACPI_WRITE | (obj_desc->field.attribute << 16);
+               } else if (obj_desc->field.region_obj->region.space_id ==
+                          ACPI_ADR_SPACE_GSBUS) {
+                       length = ACPI_GSBUS_BUFFER_SIZE;
+                       function =
+                           ACPI_WRITE | (obj_desc->field.attribute << 16);
                } else {        /* IPMI */
 
                        length = ACPI_IPMI_BUFFER_SIZE;
@@ -281,7 +295,7 @@ acpi_ex_write_data_to_field(union acpi_operand_object *source_desc,
 
                if (source_desc->buffer.length < length) {
                        ACPI_ERROR((AE_INFO,
-                                   "SMBus or IPMI write requires Buffer of length %u, found length %u",
+                                   "SMBus/IPMI/GenericSerialBus write requires Buffer of length %u, found length %u",
                                    length, source_desc->buffer.length));
 
                        return_ACPI_STATUS(AE_AML_BUFFER_LIMIT);
index f915a7f3f921ea6fd43d195425cb9783946cae60..149de45fdaddc659e134903e96689ce64289f31c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -86,6 +86,7 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
 {
        acpi_status status = AE_OK;
        union acpi_operand_object *rgn_desc;
+       u8 space_id;
 
        ACPI_FUNCTION_TRACE_U32(ex_setup_region, field_datum_byte_offset);
 
@@ -101,6 +102,17 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
                return_ACPI_STATUS(AE_AML_OPERAND_TYPE);
        }
 
+       space_id = rgn_desc->region.space_id;
+
+       /* Validate the Space ID */
+
+       if (!acpi_is_valid_space_id(space_id)) {
+               ACPI_ERROR((AE_INFO,
+                           "Invalid/unknown Address Space ID: 0x%2.2X",
+                           space_id));
+               return_ACPI_STATUS(AE_AML_INVALID_SPACE_ID);
+       }
+
        /*
         * If the Region Address and Length have not been previously evaluated,
         * evaluate them now and save the results.
@@ -119,11 +131,12 @@ acpi_ex_setup_region(union acpi_operand_object *obj_desc,
        }
 
        /*
-        * Exit now for SMBus or IPMI address space, it has a non-linear
+        * Exit now for SMBus, GSBus or IPMI address space, it has a non-linear
         * address space and the request cannot be directly validated
         */
-       if (rgn_desc->region.space_id == ACPI_ADR_SPACE_SMBUS ||
-           rgn_desc->region.space_id == ACPI_ADR_SPACE_IPMI) {
+       if (space_id == ACPI_ADR_SPACE_SMBUS ||
+           space_id == ACPI_ADR_SPACE_GSBUS ||
+           space_id == ACPI_ADR_SPACE_IPMI) {
 
                /* SMBus or IPMI has a non-linear address space */
 
@@ -271,11 +284,12 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
 
        /* Invoke the appropriate address_space/op_region handler */
 
-       status =
-           acpi_ev_address_space_dispatch(rgn_desc, function, region_offset,
-                                          ACPI_MUL_8(obj_desc->common_field.
-                                                     access_byte_width),
-                                          value);
+       status = acpi_ev_address_space_dispatch(rgn_desc, obj_desc,
+                                               function, region_offset,
+                                               ACPI_MUL_8(obj_desc->
+                                                          common_field.
+                                                          access_byte_width),
+                                               value);
 
        if (ACPI_FAILURE(status)) {
                if (status == AE_NOT_IMPLEMENTED) {
@@ -316,6 +330,7 @@ acpi_ex_access_region(union acpi_operand_object *obj_desc,
 static u8
 acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
 {
+       ACPI_FUNCTION_NAME(ex_register_overflow);
 
        if (obj_desc->common_field.bit_length >= ACPI_INTEGER_BIT_SIZE) {
                /*
@@ -330,6 +345,11 @@ acpi_ex_register_overflow(union acpi_operand_object *obj_desc, u64 value)
                 * The Value is larger than the maximum value that can fit into
                 * the register.
                 */
+               ACPI_ERROR((AE_INFO,
+                           "Index value 0x%8.8X%8.8X overflows field width 0x%X",
+                           ACPI_FORMAT_UINT64(value),
+                           obj_desc->common_field.bit_length));
+
                return (TRUE);
        }
 
index 703d88ed0b3df53dbcfede8511ee95e83d7658d4..0a0893310348e6ad35a8cf6cf1f56d9f1ba18961 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index be1c56ead6535e68b5b73bb22a3a9451506f840d..60933e9dc3c0a6cf07f207cdb8d86894ce03c0da 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 49ec049c157e6b4e86e0d9b1de72f60fe9452b40..fcc75fa27d323d4aa870f4c78c06473830653550 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 236ead14b7f7a2c84f4b381bad84b468f2119456..9ba8c73cea16c2018d2bd1b6baf15f847328add6 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2571b4a310f4bd31119f506a6441877886c1390b..879e8a277b9485ccb7e8deb0cba6742357744db9 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1b48d9d28c9ae055b92e53a06302ae66a079b08f..71fcc65c9ffae718495cb4862d3517e4b4581b62 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f4a2787e8e92b1c178dbbf85b97e2c7218f2054f..0786b86590610e5d64d8a7b8d75344733cbaac2d 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index cc95e2000406c3a7a6ff6a9bf63b27752ce008fa..30157f5a12d7d774386ab834c6ee819232f23b2e 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -47,6 +47,7 @@
 #include "acinterp.h"
 #include "amlcode.h"
 #include "acnamesp.h"
+#include "acdispat.h"
 
 #define _COMPONENT          ACPI_EXECUTER
 ACPI_MODULE_NAME("exprep")
@@ -455,6 +456,30 @@ acpi_status acpi_ex_prep_field_value(struct acpi_create_field_info *info)
                obj_desc->field.region_obj =
                    acpi_ns_get_attached_object(info->region_node);
 
+               /* Fields specific to generic_serial_bus fields */
+
+               obj_desc->field.access_length = info->access_length;
+
+               if (info->connection_node) {
+                       second_desc = info->connection_node->object;
+                       if (!(second_desc->common.flags & AOPOBJ_DATA_VALID)) {
+                               status =
+                                   acpi_ds_get_buffer_arguments(second_desc);
+                               if (ACPI_FAILURE(status)) {
+                                       acpi_ut_delete_object_desc(obj_desc);
+                                       return_ACPI_STATUS(status);
+                               }
+                       }
+
+                       obj_desc->field.resource_buffer =
+                           second_desc->buffer.pointer;
+                       obj_desc->field.resource_length =
+                           (u16)second_desc->buffer.length;
+               } else if (info->resource_buffer) {
+                       obj_desc->field.resource_buffer = info->resource_buffer;
+                       obj_desc->field.resource_length = info->resource_length;
+               }
+
                /* Allow full data read from EC address space */
 
                if ((obj_desc->field.region_obj->region.space_id ==
index f0d5e14f1f2c0040ff84c18a9a0dbaa6e73bcbed..12d51df6d3bf6ad354dc1dd9a3c095d725d61839 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 55997e46948bfed2843f4a603f3871223b8abb4a..fa50e77e64a8cedff0d23c49f9ed46ff11340156 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index db502cd7d9349c2e1b6617319023d3d9f1ed3a5a..6e335dc345285a1bd0523b82c1a3cb3406261b1c 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e3bb00ccdff5982338d5e422e3c4aa643523de1c..a67b1d925dddca807fe3525dd5e2dee4826a53ac 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c0c8842dd344b088649c3bc041d2df787f645ba5..c6cf843cc4c990492441b61fad0839bd6e89efd6 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a979017d56b8e5cf23e7672574ed66f83c7b4a79..b35bed52e0616ec34ef94715d21f2e941631f3ee 100644 (file)
@@ -7,7 +7,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index dc665cc554de762c455f801f57d1e03673c85e9d..65a45d8335c8418882c8dff45b69966406270c06 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index df66e7b686bec836ae4fb2e13e64ac0f21756473..191a129452263e858e35e27551f291156ebda12a 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8ad93146dd3282a984918a1a9b6a4b57b4e5ed41..eb6798ba8b59b092f88019d96d46a04b8637c27f 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -435,4 +435,29 @@ void acpi_ex_integer_to_string(char *out_string, u64 value)
        }
 }
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_is_valid_space_id
+ *
+ * PARAMETERS:  space_id            - ID to be validated
+ *
+ * RETURN:      TRUE if valid/supported ID.
+ *
+ * DESCRIPTION: Validate an operation region space_iD.
+ *
+ ******************************************************************************/
+
+u8 acpi_is_valid_space_id(u8 space_id)
+{
+
+       if ((space_id >= ACPI_NUM_PREDEFINED_REGIONS) &&
+           (space_id < ACPI_USER_REGION_BEGIN) &&
+           (space_id != ACPI_ADR_SPACE_DATA_TABLE) &&
+           (space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) {
+               return (FALSE);
+       }
+
+       return (TRUE);
+}
+
 #endif
index fc380d3d45ab0f01ca413b7c6795532d65e45ca3..d21ec5f0b3a9a3138da2cd21058b5ecdb7cc9259 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f610d88a66be86b823f6ceed6abd7e2876c3a81e..1a6894afef7972052a08abf756c52f5e8ccad828 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 050fd227951b6835d3be95bfa828fbae7951a3d5..1455ddcdc32ceebcfcdf309c8c1d6627d7d47559 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index cc70f3fdcdd1a0f1d4222f6f7e6a84ca9ff7ebf5..4ea4eeb51bfdf588dcbc14e5b8830060e7a42378 100644 (file)
@@ -7,7 +7,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d52da307365006f36a067a3f033cad82fd93ff42..3c4a922a9fc2810b607e47ffd3d799277ffcea5e 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 50d21c40b5c1bdd9767aeee49851cd754f0c917b..d4973d9da9f1ce3ed876941609fda18a83928678 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5f1605874655a2f425aa0c0abd79befcb9684a3c..6e5c43a60bb723a54d7cc15543a11a89b8bcd056 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -134,6 +134,8 @@ acpi_hw_validate_io_request(acpi_io_address address, u32 bit_width)
        /* Supported widths are 8/16/32 */
 
        if ((bit_width != 8) && (bit_width != 16) && (bit_width != 32)) {
+               ACPI_ERROR((AE_INFO,
+                           "Bad BitWidth parameter: %8.8X", bit_width));
                return AE_BAD_PARAMETER;
        }
 
index d707756228c2eaddec8d4c99cd00ce3d19b92da8..9d38eb6c0d0b9107866f355c4aa9419937ebb883 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d93172fd15a8cfb38f7102cb799527a0f5a4b2ff..61623f3f6826ccdb6286de6e61e4611be4f6675a 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1d0ef15d158ff72779f32c814238130f13954425..7c3d3ceb98b37798dedaaf059ff50ffedb6a4c8b 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b683cc2ff9d3cc15d0acd687f11a8d70bbcf17b3..b7f2b3be79ac110182f9c17a3d4cecee4462ebf4 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 2ed294b7a4db9c401d094071c6c104d0c7b974a4..30ea5bc53a78bf75572169b282752f1d70ebf37c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c1bd02b1a058746f820988639baecb4ed73fc2b8..f375cb82e321b7958932be9e4c5bc00e65e3e30c 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index fd7c6380e2941b9cf61500ace2d3a6021b80affe..9d84ec2f0211bde65d30d10b6a79b5e0fd313fd0 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 5f7dc691c1838e0ea65d8d78ba5417e1a056c5ad..5cbf15ffe7d8ed38a38782b26f10114374b109cb 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index d5fa520c3de5cb026ec5a4b4c09ff7a410da5552..b20e7c8c3ffbcc7414689f28c242ec8c266a1a21 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3bb8bf105ea2abb151f4e862db5d00339a9972a0..dd77a3ce6e508eb6bb6fa74d3be1a45e268d59f1 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index b3234fa795b8591f62db87c30051c144ec250ee2..ec7ba2d3463ccd00a7f3986c8979bec197ff3e57 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c845c8089f39c4190f4896889d308ef546195549..bbe46a447d34d2c9597f0894060e6f05af1645de 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -620,6 +620,7 @@ acpi_ns_check_package(struct acpi_predefined_data *data,
        case ACPI_PTYPE2_FIXED:
        case ACPI_PTYPE2_MIN:
        case ACPI_PTYPE2_COUNT:
+       case ACPI_PTYPE2_FIX_VAR:
 
                /*
                 * These types all return a single Package that consists of a
@@ -759,6 +760,34 @@ acpi_ns_check_package_list(struct acpi_predefined_data *data,
                        }
                        break;
 
+               case ACPI_PTYPE2_FIX_VAR:
+                       /*
+                        * Each subpackage has a fixed number of elements and an
+                        * optional element
+                        */
+                       expected_count =
+                           package->ret_info.count1 + package->ret_info.count2;
+                       if (sub_package->package.count < expected_count) {
+                               goto package_too_small;
+                       }
+
+                       status =
+                           acpi_ns_check_package_elements(data, sub_elements,
+                                                          package->ret_info.
+                                                          object_type1,
+                                                          package->ret_info.
+                                                          count1,
+                                                          package->ret_info.
+                                                          object_type2,
+                                                          sub_package->package.
+                                                          count -
+                                                          package->ret_info.
+                                                          count1, 0);
+                       if (ACPI_FAILURE(status)) {
+                               return (status);
+                       }
+                       break;
+
                case ACPI_PTYPE2_FIXED:
 
                        /* Each sub-package has a fixed length */
index ac7b854b0bd740fb16ee73c6f5df978d79952900..9c35d20eb52b14fc9c4a5e238017852d934aee7c 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -634,6 +634,7 @@ acpi_ns_remove_null_elements(struct acpi_predefined_data *data,
        case ACPI_PTYPE2_FIXED:
        case ACPI_PTYPE2_MIN:
        case ACPI_PTYPE2_REV_FIXED:
+       case ACPI_PTYPE2_FIX_VAR:
                break;
 
        default:
index 024c4f263f872fc550567e56a5d4a32c369071de..726bc8e687f7a68bca895b881ce2bea09823fd87 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -467,11 +467,12 @@ acpi_ns_repair_HID(struct acpi_predefined_data *data,
        }
 
        /*
-        * Copy and uppercase the string. From the ACPI specification:
+        * Copy and uppercase the string. From the ACPI 5.0 specification:
         *
         * A valid PNP ID must be of the form "AAA####" where A is an uppercase
         * letter and # is a hex digit. A valid ACPI ID must be of the form
-        * "ACPI####" where # is a hex digit.
+        * "NNNN####" where N is an uppercase letter or decimal digit, and
+        * # is a hex digit.
         */
        for (dest = new_string->string.pointer; *source; dest++, source++) {
                *dest = (char)ACPI_TOUPPER(*source);
index 28b0d7a62b9976c5cf04e1043f5293f81f8189f0..507043d6611428e939182515449eb288fde51395 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index cb1b104a69a20211804e8c5124f07c8d6c9ea9b9..a535b7afda5cc3daf2cb80a28fd03fa18c22a6df 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 345f0c3c6ad2a336a1e8982a318ec2ca7742ce7a..f69895a548957761476e6ee4f5f29805729b6187 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e7f016d1b226edcd295f5a60adc73a0632d8b3ac..71d15f61807ba312e9f60b617c215a23ce643f9e 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 83bf930243034608eeda0b5467883c1782c7c3c8..af401c9c4dfc04f51c5d2091b8aac799873fbd10 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 57e6d825ed8411c8d0bd16c1de6208686a920244..880a605cee20e24b59d8173c0010c9ed456b11c2 100644 (file)
@@ -6,7 +6,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index e1fad0ee0136760806f8e3dcf1e1cb76b1509e7a..5ac36aba507c348192485115399f9cd8b02944fc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -484,34 +484,54 @@ acpi_ps_get_next_simple_arg(struct acpi_parse_state *parser_state,
 static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
                                                       *parser_state)
 {
-       u32 aml_offset = (u32)
-           ACPI_PTR_DIFF(parser_state->aml,
-                         parser_state->aml_start);
+       u32 aml_offset;
        union acpi_parse_object *field;
+       union acpi_parse_object *arg = NULL;
        u16 opcode;
        u32 name;
+       u8 access_type;
+       u8 access_attribute;
+       u8 access_length;
+       u32 pkg_length;
+       u8 *pkg_end;
+       u32 buffer_length;
 
        ACPI_FUNCTION_TRACE(ps_get_next_field);
 
+       aml_offset =
+           (u32)ACPI_PTR_DIFF(parser_state->aml, parser_state->aml_start);
+
        /* Determine field type */
 
        switch (ACPI_GET8(parser_state->aml)) {
-       default:
+       case AML_FIELD_OFFSET_OP:
 
-               opcode = AML_INT_NAMEDFIELD_OP;
+               opcode = AML_INT_RESERVEDFIELD_OP;
+               parser_state->aml++;
                break;
 
-       case 0x00:
+       case AML_FIELD_ACCESS_OP:
 
-               opcode = AML_INT_RESERVEDFIELD_OP;
+               opcode = AML_INT_ACCESSFIELD_OP;
                parser_state->aml++;
                break;
 
-       case 0x01:
+       case AML_FIELD_CONNECTION_OP:
 
-               opcode = AML_INT_ACCESSFIELD_OP;
+               opcode = AML_INT_CONNECTION_OP;
+               parser_state->aml++;
+               break;
+
+       case AML_FIELD_EXT_ACCESS_OP:
+
+               opcode = AML_INT_EXTACCESSFIELD_OP;
                parser_state->aml++;
                break;
+
+       default:
+
+               opcode = AML_INT_NAMEDFIELD_OP;
+               break;
        }
 
        /* Allocate a new field op */
@@ -549,16 +569,111 @@ static union acpi_parse_object *acpi_ps_get_next_field(struct acpi_parse_state
                break;
 
        case AML_INT_ACCESSFIELD_OP:
+       case AML_INT_EXTACCESSFIELD_OP:
 
                /*
                 * Get access_type and access_attrib and merge into the field Op
-                * access_type is first operand, access_attribute is second
+                * access_type is first operand, access_attribute is second. stuff
+                * these bytes into the node integer value for convenience.
                 */
-               field->common.value.integer =
-                   (((u32) ACPI_GET8(parser_state->aml) << 8));
+
+               /* Get the two bytes (Type/Attribute) */
+
+               access_type = ACPI_GET8(parser_state->aml);
                parser_state->aml++;
-               field->common.value.integer |= ACPI_GET8(parser_state->aml);
+               access_attribute = ACPI_GET8(parser_state->aml);
                parser_state->aml++;
+
+               field->common.value.integer = (u8)access_type;
+               field->common.value.integer |= (u16)(access_attribute << 8);
+
+               /* This opcode has a third byte, access_length */
+
+               if (opcode == AML_INT_EXTACCESSFIELD_OP) {
+                       access_length = ACPI_GET8(parser_state->aml);
+                       parser_state->aml++;
+
+                       field->common.value.integer |=
+                           (u32)(access_length << 16);
+               }
+               break;
+
+       case AML_INT_CONNECTION_OP:
+
+               /*
+                * Argument for Connection operator can be either a Buffer
+                * (resource descriptor), or a name_string.
+                */
+               if (ACPI_GET8(parser_state->aml) == AML_BUFFER_OP) {
+                       parser_state->aml++;
+
+                       pkg_end = parser_state->aml;
+                       pkg_length =
+                           acpi_ps_get_next_package_length(parser_state);
+                       pkg_end += pkg_length;
+
+                       if (parser_state->aml < pkg_end) {
+
+                               /* Non-empty list */
+
+                               arg = acpi_ps_alloc_op(AML_INT_BYTELIST_OP);
+                               if (!arg) {
+                                       return_PTR(NULL);
+                               }
+
+                               /* Get the actual buffer length argument */
+
+                               opcode = ACPI_GET8(parser_state->aml);
+                               parser_state->aml++;
+
+                               switch (opcode) {
+                               case AML_BYTE_OP:       /* AML_BYTEDATA_ARG */
+                                       buffer_length =
+                                           ACPI_GET8(parser_state->aml);
+                                       parser_state->aml += 1;
+                                       break;
+
+                               case AML_WORD_OP:       /* AML_WORDDATA_ARG */
+                                       buffer_length =
+                                           ACPI_GET16(parser_state->aml);
+                                       parser_state->aml += 2;
+                                       break;
+
+                               case AML_DWORD_OP:      /* AML_DWORDATA_ARG */
+                                       buffer_length =
+                                           ACPI_GET32(parser_state->aml);
+                                       parser_state->aml += 4;
+                                       break;
+
+                               default:
+                                       buffer_length = 0;
+                                       break;
+                               }
+
+                               /* Fill in bytelist data */
+
+                               arg->named.value.size = buffer_length;
+                               arg->named.data = parser_state->aml;
+                       }
+
+                       /* Skip to End of byte data */
+
+                       parser_state->aml = pkg_end;
+               } else {
+                       arg = acpi_ps_alloc_op(AML_INT_NAMEPATH_OP);
+                       if (!arg) {
+                               return_PTR(NULL);
+                       }
+
+                       /* Get the Namestring argument */
+
+                       arg->common.value.name =
+                           acpi_ps_get_next_namestring(parser_state);
+               }
+
+               /* Link the buffer/namestring to parent (CONNECTION_OP) */
+
+               acpi_ps_append_arg(field, arg);
                break;
 
        default:
index 01dd70d1de514eb8e8a03dfbc94995e35a6f90e3..9547ad8a620bfc1e801e30a6bc8dfb1891288fb5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index bed08de7528c20e1762a8c989e89fece2a368bdf..a0226fdcf75c0406e7f8bc0a9388000985d44637 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -638,7 +638,16 @@ const struct acpi_opcode_info acpi_gbl_aml_op_info[AML_NUM_OPCODES] = {
 
 /* 7E */ ACPI_OP("Timer", ARGP_TIMER_OP, ARGI_TIMER_OP, ACPI_TYPE_ANY,
                 AML_CLASS_EXECUTE, AML_TYPE_EXEC_0A_0T_1R,
-                AML_FLAGS_EXEC_0A_0T_1R)
+                        AML_FLAGS_EXEC_0A_0T_1R),
+
+/* ACPI 5.0 opcodes */
+
+/* 7F */ ACPI_OP("-ConnectField-", ARGP_CONNECTFIELD_OP,
+                        ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
+                        AML_CLASS_INTERNAL, AML_TYPE_BOGUS, AML_HAS_ARGS),
+/* 80 */ ACPI_OP("-ExtAccessField-", ARGP_CONNECTFIELD_OP,
+                        ARGI_CONNECTFIELD_OP, ACPI_TYPE_ANY,
+                        AML_CLASS_INTERNAL, AML_TYPE_BOGUS, 0)
 
 /*! [End] no source code translation !*/
 };
@@ -657,7 +666,7 @@ static const u8 acpi_gbl_short_op_index[256] = {
 /* 0x20 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
 /* 0x28 */ _UNK, _UNK, _UNK, _UNK, _UNK, 0x63, _PFX, _PFX,
 /* 0x30 */ 0x67, 0x66, 0x68, 0x65, 0x69, 0x64, 0x6A, 0x7D,
-/* 0x38 */ _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
+/* 0x38 */ 0x7F, 0x80, _UNK, _UNK, _UNK, _UNK, _UNK, _UNK,
 /* 0x40 */ _UNK, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
 /* 0x48 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
 /* 0x50 */ _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC, _ASC,
index 9bb0cbd37b5e678d462702d909d7a165396f2342..2ff9c35a19686be19b16a1b203936a6ca45982bc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a5faa1323a0290f77676419a07ccc05eed2313e5..c872aa4b926ec52bc1ddbec6921871529b97edcc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index f1464c03aa427865f4601cd01d58712ce7024b39..2b03cdbbe1c0dc49436b6bc089b5a7bd771904db 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -74,6 +74,12 @@ union acpi_parse_object *acpi_ps_get_arg(union acpi_parse_object *op, u32 argn)
 
        ACPI_FUNCTION_ENTRY();
 
+/*
+       if (Op->Common.aml_opcode == AML_INT_CONNECTION_OP)
+       {
+               return (Op->Common.Value.Arg);
+       }
+*/
        /* Get the info structure for this opcode */
 
        op_info = acpi_ps_get_opcode_info(op->common.aml_opcode);
index 7eda78503422b7d651b046c2b05a76ac895d1fb0..13bb131ae12517040401a7cbfa02b8c3c0735422 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3312d6368bf15bba6e193b14e2d3c47cf01dd804..ab96cf47896d33e9a28b7854306c5011dbfff970 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8086805d44946ba73f0f2819271d2077d3755721..9d98c5ff66a5f8c08a0704a658d962b1f2d82ff7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 9e66f90784269243cb5e30f815b50f299c16ccf9..a0305652394f1f03fbebc050e55ce5ad28bd1856 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 3a8a89ec2ca4195d14a90b620918f6f4aa9cd881..3c6df4b7eb2ddc200575ea8849b905519efe6f87 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -313,6 +313,38 @@ acpi_rs_get_aml_length(struct acpi_resource * resource, acpi_size * size_needed)
                                                          resource_source));
                        break;
 
+               case ACPI_RESOURCE_TYPE_GPIO:
+
+                       total_size =
+                           (acpi_rs_length) (total_size +
+                                             (resource->data.gpio.
+                                              pin_table_length * 2) +
+                                             resource->data.gpio.
+                                             resource_source.string_length +
+                                             resource->data.gpio.
+                                             vendor_length);
+
+                       break;
+
+               case ACPI_RESOURCE_TYPE_SERIAL_BUS:
+
+                       total_size =
+                           acpi_gbl_aml_resource_serial_bus_sizes[resource->
+                                                                  data.
+                                                                  common_serial_bus.
+                                                                  type];
+
+                       total_size = (acpi_rs_length) (total_size +
+                                                      resource->data.
+                                                      i2c_serial_bus.
+                                                      resource_source.
+                                                      string_length +
+                                                      resource->data.
+                                                      i2c_serial_bus.
+                                                      vendor_length);
+
+                       break;
+
                default:
                        break;
                }
@@ -362,10 +394,11 @@ acpi_rs_get_list_length(u8 * aml_buffer,
        u32 extra_struct_bytes;
        u8 resource_index;
        u8 minimum_aml_resource_length;
+       union aml_resource *aml_resource;
 
        ACPI_FUNCTION_TRACE(rs_get_list_length);
 
-       *size_needed = 0;
+       *size_needed = ACPI_RS_SIZE_MIN;        /* Minimum size is one end_tag */
        end_aml = aml_buffer + aml_buffer_length;
 
        /* Walk the list of AML resource descriptors */
@@ -376,9 +409,15 @@ acpi_rs_get_list_length(u8 * aml_buffer,
 
                status = acpi_ut_validate_resource(aml_buffer, &resource_index);
                if (ACPI_FAILURE(status)) {
+                       /*
+                        * Exit on failure. Cannot continue because the descriptor length
+                        * may be bogus also.
+                        */
                        return_ACPI_STATUS(status);
                }
 
+               aml_resource = (void *)aml_buffer;
+
                /* Get the resource length and base (minimum) AML size */
 
                resource_length = acpi_ut_get_resource_length(aml_buffer);
@@ -422,10 +461,8 @@ acpi_rs_get_list_length(u8 * aml_buffer,
 
                case ACPI_RESOURCE_NAME_END_TAG:
                        /*
-                        * End Tag:
-                        * This is the normal exit, add size of end_tag
+                        * End Tag: This is the normal exit
                         */
-                       *size_needed += ACPI_RS_SIZE_MIN;
                        return_ACPI_STATUS(AE_OK);
 
                case ACPI_RESOURCE_NAME_ADDRESS32:
@@ -457,6 +494,33 @@ acpi_rs_get_list_length(u8 * aml_buffer,
                                                         minimum_aml_resource_length);
                        break;
 
+               case ACPI_RESOURCE_NAME_GPIO:
+
+                       /* Vendor data is optional */
+
+                       if (aml_resource->gpio.vendor_length) {
+                               extra_struct_bytes +=
+                                   aml_resource->gpio.vendor_offset -
+                                   aml_resource->gpio.pin_table_offset +
+                                   aml_resource->gpio.vendor_length;
+                       } else {
+                               extra_struct_bytes +=
+                                   aml_resource->large_header.resource_length +
+                                   sizeof(struct aml_resource_large_header) -
+                                   aml_resource->gpio.pin_table_offset;
+                       }
+                       break;
+
+               case ACPI_RESOURCE_NAME_SERIAL_BUS:
+
+                       minimum_aml_resource_length =
+                           acpi_gbl_resource_aml_serial_bus_sizes
+                           [aml_resource->common_serial_bus.type];
+                       extra_struct_bytes +=
+                           aml_resource->common_serial_bus.resource_length -
+                           minimum_aml_resource_length;
+                       break;
+
                default:
                        break;
                }
@@ -467,9 +531,18 @@ acpi_rs_get_list_length(u8 * aml_buffer,
                 * Important: Round the size up for the appropriate alignment. This
                 * is a requirement on IA64.
                 */
-               buffer_size = acpi_gbl_resource_struct_sizes[resource_index] +
-                   extra_struct_bytes;
-               buffer_size = (u32) ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
+               if (acpi_ut_get_resource_type(aml_buffer) ==
+                   ACPI_RESOURCE_NAME_SERIAL_BUS) {
+                       buffer_size =
+                           acpi_gbl_resource_struct_serial_bus_sizes
+                           [aml_resource->common_serial_bus.type] +
+                           extra_struct_bytes;
+               } else {
+                       buffer_size =
+                           acpi_gbl_resource_struct_sizes[resource_index] +
+                           extra_struct_bytes;
+               }
+               buffer_size = (u32)ACPI_ROUND_UP_TO_NATIVE_WORD(buffer_size);
 
                *size_needed += buffer_size;
 
index 4ce6e1147e807993cccc251af32632d0eabca6e8..46d6eb38ae66f5598faba40c70ffaf4466e0459b 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
 #define _COMPONENT          ACPI_RESOURCES
 ACPI_MODULE_NAME("rscreate")
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_buffer_to_resource
+ *
+ * PARAMETERS:  aml_buffer          - Pointer to the resource byte stream
+ *              aml_buffer_length   - Length of the aml_buffer
+ *              resource_ptr        - Where the converted resource is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Convert a raw AML buffer to a resource list
+ *
+ ******************************************************************************/
+acpi_status
+acpi_buffer_to_resource(u8 *aml_buffer,
+                       u16 aml_buffer_length,
+                       struct acpi_resource **resource_ptr)
+{
+       acpi_status status;
+       acpi_size list_size_needed;
+       void *resource;
+       void *current_resource_ptr;
+
+       /*
+        * Note: we allow AE_AML_NO_RESOURCE_END_TAG, since an end tag
+        * is not required here.
+        */
+
+       /* Get the required length for the converted resource */
+
+       status = acpi_rs_get_list_length(aml_buffer, aml_buffer_length,
+                                        &list_size_needed);
+       if (status == AE_AML_NO_RESOURCE_END_TAG) {
+               status = AE_OK;
+       }
+       if (ACPI_FAILURE(status)) {
+               return (status);
+       }
+
+       /* Allocate a buffer for the converted resource */
+
+       resource = ACPI_ALLOCATE_ZEROED(list_size_needed);
+       current_resource_ptr = resource;
+       if (!resource) {
+               return (AE_NO_MEMORY);
+       }
+
+       /* Perform the AML-to-Resource conversion */
+
+       status = acpi_ut_walk_aml_resources(aml_buffer, aml_buffer_length,
+                                           acpi_rs_convert_aml_to_resources,
+                                           &current_resource_ptr);
+       if (status == AE_AML_NO_RESOURCE_END_TAG) {
+               status = AE_OK;
+       }
+       if (ACPI_FAILURE(status)) {
+               ACPI_FREE(resource);
+       } else {
+               *resource_ptr = resource;
+       }
+
+       return (status);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_rs_create_resource_list
@@ -66,9 +130,10 @@ ACPI_MODULE_NAME("rscreate")
  *              of device resources.
  *
  ******************************************************************************/
+
 acpi_status
 acpi_rs_create_resource_list(union acpi_operand_object *aml_buffer,
-                            struct acpi_buffer *output_buffer)
+                            struct acpi_buffer * output_buffer)
 {
 
        acpi_status status;
index 33db7520c74be0c2937b837b31512dcec49fde58..b4c5811323932324becf8a803a85fc542dc20579 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -61,11 +61,13 @@ static void acpi_rs_out_integer64(char *title, u64 value);
 
 static void acpi_rs_out_title(char *title);
 
-static void acpi_rs_dump_byte_list(u16 length, u8 * data);
+static void acpi_rs_dump_byte_list(u16 length, u8 *data);
 
-static void acpi_rs_dump_dword_list(u8 length, u32 * data);
+static void acpi_rs_dump_word_list(u16 length, u16 *data);
 
-static void acpi_rs_dump_short_byte_list(u8 length, u8 * data);
+static void acpi_rs_dump_dword_list(u8 length, u32 *data);
+
+static void acpi_rs_dump_short_byte_list(u8 length, u8 *data);
 
 static void
 acpi_rs_dump_resource_source(struct acpi_resource_source *resource_source);
@@ -309,6 +311,125 @@ struct acpi_rsdump_info acpi_rs_dump_generic_reg[6] = {
        {ACPI_RSD_UINT64, ACPI_RSD_OFFSET(generic_reg.address), "Address", NULL}
 };
 
+struct acpi_rsdump_info acpi_rs_dump_gpio[16] = {
+       {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_gpio), "GPIO", NULL},
+       {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.revision_id), "RevisionId", NULL},
+       {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.connection_type),
+        "ConnectionType", acpi_gbl_ct_decode},
+       {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.producer_consumer),
+        "ProducerConsumer", acpi_gbl_consume_decode},
+       {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(gpio.pin_config), "PinConfig",
+        acpi_gbl_ppc_decode},
+       {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.sharable), "Sharable",
+        acpi_gbl_shr_decode},
+       {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.io_restriction),
+        "IoRestriction", acpi_gbl_ior_decode},
+       {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(gpio.triggering), "Triggering",
+        acpi_gbl_he_decode},
+       {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(gpio.polarity), "Polarity",
+        acpi_gbl_ll_decode},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.drive_strength), "DriveStrength",
+        NULL},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.debounce_timeout),
+        "DebounceTimeout", NULL},
+       {ACPI_RSD_SOURCE, ACPI_RSD_OFFSET(gpio.resource_source),
+        "ResourceSource", NULL},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.pin_table_length),
+        "PinTableLength", NULL},
+       {ACPI_RSD_WORDLIST, ACPI_RSD_OFFSET(gpio.pin_table), "PinTable", NULL},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(gpio.vendor_length), "VendorLength",
+        NULL},
+       {ACPI_RSD_SHORTLISTX, ACPI_RSD_OFFSET(gpio.vendor_data), "VendorData",
+        NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_fixed_dma[4] = {
+       {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_fixed_dma),
+        "FixedDma", NULL},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.request_lines),
+        "RequestLines", NULL},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(fixed_dma.channels), "Channels",
+        NULL},
+       {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(fixed_dma.width), "TransferWidth",
+        acpi_gbl_dts_decode},
+};
+
+#define ACPI_RS_DUMP_COMMON_SERIAL_BUS \
+       {ACPI_RSD_UINT8,    ACPI_RSD_OFFSET (common_serial_bus.revision_id),    "RevisionId",               NULL}, \
+       {ACPI_RSD_UINT8,    ACPI_RSD_OFFSET (common_serial_bus.type),           "Type",                     acpi_gbl_sbt_decode}, \
+       {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.producer_consumer), "ProducerConsumer",      acpi_gbl_consume_decode}, \
+       {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET (common_serial_bus.slave_mode),     "SlaveMode",                acpi_gbl_sm_decode}, \
+       {ACPI_RSD_UINT8,    ACPI_RSD_OFFSET (common_serial_bus.type_revision_id), "TypeRevisionId",         NULL}, \
+       {ACPI_RSD_UINT16,   ACPI_RSD_OFFSET (common_serial_bus.type_data_length), "TypeDataLength",         NULL}, \
+       {ACPI_RSD_SOURCE,   ACPI_RSD_OFFSET (common_serial_bus.resource_source), "ResourceSource",          NULL}, \
+       {ACPI_RSD_UINT16,   ACPI_RSD_OFFSET (common_serial_bus.vendor_length),  "VendorLength",             NULL}, \
+       {ACPI_RSD_SHORTLISTX,ACPI_RSD_OFFSET (common_serial_bus.vendor_data),   "VendorData",               NULL},
+
+struct acpi_rsdump_info acpi_rs_dump_common_serial_bus[10] = {
+       {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_common_serial_bus),
+        "Common Serial Bus", NULL},
+       ACPI_RS_DUMP_COMMON_SERIAL_BUS
+};
+
+struct acpi_rsdump_info acpi_rs_dump_i2c_serial_bus[13] = {
+       {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_i2c_serial_bus),
+        "I2C Serial Bus", NULL},
+       ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+                                       ACPI_RSD_OFFSET(i2c_serial_bus.
+                                                       access_mode),
+                                       "AccessMode", acpi_gbl_am_decode},
+       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(i2c_serial_bus.connection_speed),
+        "ConnectionSpeed", NULL},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(i2c_serial_bus.slave_address),
+        "SlaveAddress", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_spi_serial_bus[17] = {
+       {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_spi_serial_bus),
+        "Spi Serial Bus", NULL},
+       ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_1BITFLAG,
+                                       ACPI_RSD_OFFSET(spi_serial_bus.
+                                                       wire_mode), "WireMode",
+                                       acpi_gbl_wm_decode},
+       {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(spi_serial_bus.device_polarity),
+        "DevicePolarity", acpi_gbl_dp_decode},
+       {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.data_bit_length),
+        "DataBitLength", NULL},
+       {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_phase),
+        "ClockPhase", acpi_gbl_cph_decode},
+       {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(spi_serial_bus.clock_polarity),
+        "ClockPolarity", acpi_gbl_cpo_decode},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(spi_serial_bus.device_selection),
+        "DeviceSelection", NULL},
+       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(spi_serial_bus.connection_speed),
+        "ConnectionSpeed", NULL},
+};
+
+struct acpi_rsdump_info acpi_rs_dump_uart_serial_bus[19] = {
+       {ACPI_RSD_TITLE, ACPI_RSD_TABLE_SIZE(acpi_rs_dump_uart_serial_bus),
+        "Uart Serial Bus", NULL},
+       ACPI_RS_DUMP_COMMON_SERIAL_BUS {ACPI_RSD_2BITFLAG,
+                                       ACPI_RSD_OFFSET(uart_serial_bus.
+                                                       flow_control),
+                                       "FlowControl", acpi_gbl_fc_decode},
+       {ACPI_RSD_2BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.stop_bits),
+        "StopBits", acpi_gbl_sb_decode},
+       {ACPI_RSD_3BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.data_bits),
+        "DataBits", acpi_gbl_bpb_decode},
+       {ACPI_RSD_1BITFLAG, ACPI_RSD_OFFSET(uart_serial_bus.endian), "Endian",
+        acpi_gbl_ed_decode},
+       {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.parity), "Parity",
+        acpi_gbl_pt_decode},
+       {ACPI_RSD_UINT8, ACPI_RSD_OFFSET(uart_serial_bus.lines_enabled),
+        "LinesEnabled", NULL},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.rx_fifo_size),
+        "RxFifoSize", NULL},
+       {ACPI_RSD_UINT16, ACPI_RSD_OFFSET(uart_serial_bus.tx_fifo_size),
+        "TxFifoSize", NULL},
+       {ACPI_RSD_UINT32, ACPI_RSD_OFFSET(uart_serial_bus.default_baud_rate),
+        "ConnectionSpeed", NULL},
+};
+
 /*
  * Tables used for common address descriptor flag fields
  */
@@ -413,7 +534,14 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
                        /* Data items, 8/16/32/64 bit */
 
                case ACPI_RSD_UINT8:
-                       acpi_rs_out_integer8(name, ACPI_GET8(target));
+                       if (table->pointer) {
+                               acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+                                                                      table->
+                                                                      pointer
+                                                                      [*target]));
+                       } else {
+                               acpi_rs_out_integer8(name, ACPI_GET8(target));
+                       }
                        break;
 
                case ACPI_RSD_UINT16:
@@ -444,6 +572,13 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
                                                                       0x03]));
                        break;
 
+               case ACPI_RSD_3BITFLAG:
+                       acpi_rs_out_string(name, ACPI_CAST_PTR(char,
+                                                              table->
+                                                              pointer[*target &
+                                                                      0x07]));
+                       break;
+
                case ACPI_RSD_SHORTLIST:
                        /*
                         * Short byte list (single line output) for DMA and IRQ resources
@@ -456,6 +591,20 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
                        }
                        break;
 
+               case ACPI_RSD_SHORTLISTX:
+                       /*
+                        * Short byte list (single line output) for GPIO vendor data
+                        * Note: The list length is obtained from the previous table entry
+                        */
+                       if (previous_target) {
+                               acpi_rs_out_title(name);
+                               acpi_rs_dump_short_byte_list(*previous_target,
+                                                            *
+                                                            (ACPI_CAST_INDIRECT_PTR
+                                                             (u8, target)));
+                       }
+                       break;
+
                case ACPI_RSD_LONGLIST:
                        /*
                         * Long byte list for Vendor resource data
@@ -480,6 +629,18 @@ acpi_rs_dump_descriptor(void *resource, struct acpi_rsdump_info *table)
                        }
                        break;
 
+               case ACPI_RSD_WORDLIST:
+                       /*
+                        * Word list for GPIO Pin Table
+                        * Note: The list length is obtained from the previous table entry
+                        */
+                       if (previous_target) {
+                               acpi_rs_dump_word_list(*previous_target,
+                                                      *(ACPI_CAST_INDIRECT_PTR
+                                                        (u16, target)));
+                       }
+                       break;
+
                case ACPI_RSD_ADDRESS:
                        /*
                         * Common flags for all Address resources
@@ -627,14 +788,20 @@ void acpi_rs_dump_resource_list(struct acpi_resource *resource_list)
 
                /* Dump the resource descriptor */
 
-               acpi_rs_dump_descriptor(&resource_list->data,
-                                       acpi_gbl_dump_resource_dispatch[type]);
+               if (type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+                       acpi_rs_dump_descriptor(&resource_list->data,
+                                               acpi_gbl_dump_serial_bus_dispatch
+                                               [resource_list->data.
+                                                common_serial_bus.type]);
+               } else {
+                       acpi_rs_dump_descriptor(&resource_list->data,
+                                               acpi_gbl_dump_resource_dispatch
+                                               [type]);
+               }
 
                /* Point to the next resource structure */
 
-               resource_list =
-                   ACPI_ADD_PTR(struct acpi_resource, resource_list,
-                                resource_list->length);
+               resource_list = ACPI_NEXT_RESOURCE(resource_list);
 
                /* Exit when END_TAG descriptor is reached */
 
@@ -768,4 +935,13 @@ static void acpi_rs_dump_dword_list(u8 length, u32 * data)
        }
 }
 
+static void acpi_rs_dump_word_list(u16 length, u16 *data)
+{
+       u16 i;
+
+       for (i = 0; i < length; i++) {
+               acpi_os_printf("%25s%2.2X : %4.4X\n", "Word", i, data[i]);
+       }
+}
+
 #endif
index f9ea60872aa4ddad8cf1cd77c20a61a0ec7c0b41..a9fa5158200b30712155cefbfc9987501cc4142e 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -76,7 +76,10 @@ struct acpi_rsconvert_info *acpi_gbl_set_resource_dispatch[] = {
        acpi_rs_convert_address64,      /* 0x0D, ACPI_RESOURCE_TYPE_ADDRESS64 */
        acpi_rs_convert_ext_address64,  /* 0x0E, ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
        acpi_rs_convert_ext_irq,        /* 0x0F, ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
-       acpi_rs_convert_generic_reg     /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+       acpi_rs_convert_generic_reg,    /* 0x10, ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+       acpi_rs_convert_gpio,   /* 0x11, ACPI_RESOURCE_TYPE_GPIO */
+       acpi_rs_convert_fixed_dma,      /* 0x12, ACPI_RESOURCE_TYPE_FIXED_DMA */
+       NULL,                   /* 0x13, ACPI_RESOURCE_TYPE_SERIAL_BUS - Use subtype table below */
 };
 
 /* Dispatch tables for AML-to-resource (Get Resource) conversion functions */
@@ -94,7 +97,7 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
        acpi_rs_convert_end_dpf,        /* 0x07, ACPI_RESOURCE_NAME_END_DEPENDENT */
        acpi_rs_convert_io,     /* 0x08, ACPI_RESOURCE_NAME_IO */
        acpi_rs_convert_fixed_io,       /* 0x09, ACPI_RESOURCE_NAME_FIXED_IO */
-       NULL,                   /* 0x0A, Reserved */
+       acpi_rs_convert_fixed_dma,      /* 0x0A, ACPI_RESOURCE_NAME_FIXED_DMA */
        NULL,                   /* 0x0B, Reserved */
        NULL,                   /* 0x0C, Reserved */
        NULL,                   /* 0x0D, Reserved */
@@ -114,7 +117,19 @@ struct acpi_rsconvert_info *acpi_gbl_get_resource_dispatch[] = {
        acpi_rs_convert_address16,      /* 0x08, ACPI_RESOURCE_NAME_ADDRESS16 */
        acpi_rs_convert_ext_irq,        /* 0x09, ACPI_RESOURCE_NAME_EXTENDED_IRQ */
        acpi_rs_convert_address64,      /* 0x0A, ACPI_RESOURCE_NAME_ADDRESS64 */
-       acpi_rs_convert_ext_address64   /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
+       acpi_rs_convert_ext_address64,  /* 0x0B, ACPI_RESOURCE_NAME_EXTENDED_ADDRESS64 */
+       acpi_rs_convert_gpio,   /* 0x0C, ACPI_RESOURCE_NAME_GPIO */
+       NULL,                   /* 0x0D, Reserved */
+       NULL,                   /* 0x0E, ACPI_RESOURCE_NAME_SERIAL_BUS - Use subtype table below */
+};
+
+/* Subtype table for serial_bus -- I2C, SPI, and UART */
+
+struct acpi_rsconvert_info *acpi_gbl_convert_resource_serial_bus_dispatch[] = {
+       NULL,
+       acpi_rs_convert_i2c_serial_bus,
+       acpi_rs_convert_spi_serial_bus,
+       acpi_rs_convert_uart_serial_bus,
 };
 
 #ifdef ACPI_FUTURE_USAGE
@@ -140,6 +155,16 @@ struct acpi_rsdump_info *acpi_gbl_dump_resource_dispatch[] = {
        acpi_rs_dump_ext_address64,     /* ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
        acpi_rs_dump_ext_irq,   /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
        acpi_rs_dump_generic_reg,       /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+       acpi_rs_dump_gpio,      /* ACPI_RESOURCE_TYPE_GPIO */
+       acpi_rs_dump_fixed_dma, /* ACPI_RESOURCE_TYPE_FIXED_DMA */
+       NULL,                   /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
+};
+
+struct acpi_rsdump_info *acpi_gbl_dump_serial_bus_dispatch[] = {
+       NULL,
+       acpi_rs_dump_i2c_serial_bus,    /* AML_RESOURCE_I2C_BUS_TYPE */
+       acpi_rs_dump_spi_serial_bus,    /* AML_RESOURCE_SPI_BUS_TYPE */
+       acpi_rs_dump_uart_serial_bus,   /* AML_RESOURCE_UART_BUS_TYPE */
 };
 #endif
 
@@ -166,7 +191,10 @@ const u8 acpi_gbl_aml_resource_sizes[] = {
        sizeof(struct aml_resource_address64),  /* ACPI_RESOURCE_TYPE_ADDRESS64 */
        sizeof(struct aml_resource_extended_address64), /*ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64 */
        sizeof(struct aml_resource_extended_irq),       /* ACPI_RESOURCE_TYPE_EXTENDED_IRQ */
-       sizeof(struct aml_resource_generic_register)    /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+       sizeof(struct aml_resource_generic_register),   /* ACPI_RESOURCE_TYPE_GENERIC_REGISTER */
+       sizeof(struct aml_resource_gpio),       /* ACPI_RESOURCE_TYPE_GPIO */
+       sizeof(struct aml_resource_fixed_dma),  /* ACPI_RESOURCE_TYPE_FIXED_DMA */
+       sizeof(struct aml_resource_common_serialbus),   /* ACPI_RESOURCE_TYPE_SERIAL_BUS */
 };
 
 const u8 acpi_gbl_resource_struct_sizes[] = {
@@ -182,7 +210,7 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
        ACPI_RS_SIZE_MIN,
        ACPI_RS_SIZE(struct acpi_resource_io),
        ACPI_RS_SIZE(struct acpi_resource_fixed_io),
-       0,
+       ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
        0,
        0,
        0,
@@ -202,5 +230,21 @@ const u8 acpi_gbl_resource_struct_sizes[] = {
        ACPI_RS_SIZE(struct acpi_resource_address16),
        ACPI_RS_SIZE(struct acpi_resource_extended_irq),
        ACPI_RS_SIZE(struct acpi_resource_address64),
-       ACPI_RS_SIZE(struct acpi_resource_extended_address64)
+       ACPI_RS_SIZE(struct acpi_resource_extended_address64),
+       ACPI_RS_SIZE(struct acpi_resource_gpio),
+       ACPI_RS_SIZE(struct acpi_resource_common_serialbus)
+};
+
+const u8 acpi_gbl_aml_resource_serial_bus_sizes[] = {
+       0,
+       sizeof(struct aml_resource_i2c_serialbus),
+       sizeof(struct aml_resource_spi_serialbus),
+       sizeof(struct aml_resource_uart_serialbus),
+};
+
+const u8 acpi_gbl_resource_struct_serial_bus_sizes[] = {
+       0,
+       ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
+       ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
+       ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
 };
index 0c7efef008bed3c2bcd652fae16bf9d0ae80ca29..f6a081057a22d24ca6e06a40db2df63890eedd45 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 50b8ad21116736185832c802d63529bbfa9532cd..e23a9ec248cbc7c8b0e5bfbcc90f1b4d552865b2 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -264,3 +264,34 @@ struct acpi_rsconvert_info acpi_rs_convert_dma[6] = {
         AML_OFFSET(dma.dma_channel_mask),
         ACPI_RS_OFFSET(data.dma.channel_count)}
 };
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_fixed_dma
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_fixed_dma[4] = {
+       {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_DMA,
+        ACPI_RS_SIZE(struct acpi_resource_fixed_dma),
+        ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_dma)},
+
+       {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_DMA,
+        sizeof(struct aml_resource_fixed_dma),
+        0},
+
+       /*
+        * These fields are contiguous in both the source and destination:
+        * request_lines
+        * Channels
+        */
+
+       {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.fixed_dma.request_lines),
+        AML_OFFSET(fixed_dma.request_lines),
+        2},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.fixed_dma.width),
+        AML_OFFSET(fixed_dma.width),
+        1},
+
+};
index 1bfcef736c5079695dd871d4490ae34bb612600b..9be129f5d6f4ec431469a4bebf76a6f0b900df9a 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -70,6 +70,8 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
        struct acpi_resource **resource_ptr =
            ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context);
        struct acpi_resource *resource;
+       union aml_resource *aml_resource;
+       struct acpi_rsconvert_info *conversion_table;
        acpi_status status;
 
        ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources);
@@ -84,14 +86,37 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
                              "Misaligned resource pointer %p", resource));
        }
 
+       /* Get the appropriate conversion info table */
+
+       aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
+       if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+               if (aml_resource->common_serial_bus.type >
+                   AML_RESOURCE_MAX_SERIALBUSTYPE) {
+                       conversion_table = NULL;
+               } else {
+                       /* This is an I2C, SPI, or UART serial_bus descriptor */
+
+                       conversion_table =
+                           acpi_gbl_convert_resource_serial_bus_dispatch
+                           [aml_resource->common_serial_bus.type];
+               }
+       } else {
+               conversion_table =
+                   acpi_gbl_get_resource_dispatch[resource_index];
+       }
+
+       if (!conversion_table) {
+               ACPI_ERROR((AE_INFO,
+                           "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+                           resource_index));
+               return (AE_AML_INVALID_RESOURCE_TYPE);
+       }
+
        /* Convert the AML byte stream resource to a local resource struct */
 
        status =
-           acpi_rs_convert_aml_to_resource(resource,
-                                           ACPI_CAST_PTR(union aml_resource,
-                                                         aml),
-                                           acpi_gbl_get_resource_dispatch
-                                           [resource_index]);
+           acpi_rs_convert_aml_to_resource(resource, aml_resource,
+                                           conversion_table);
        if (ACPI_FAILURE(status)) {
                ACPI_EXCEPTION((AE_INFO, status,
                                "Could not convert AML resource (Type 0x%X)",
@@ -106,7 +131,7 @@ acpi_rs_convert_aml_to_resources(u8 * aml,
 
        /* Point to the next structure in the output buffer */
 
-       *resource_ptr = ACPI_ADD_PTR(void, resource, resource->length);
+       *resource_ptr = ACPI_NEXT_RESOURCE(resource);
        return_ACPI_STATUS(AE_OK);
 }
 
@@ -135,6 +160,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
 {
        u8 *aml = output_buffer;
        u8 *end_aml = output_buffer + aml_size_needed;
+       struct acpi_rsconvert_info *conversion_table;
        acpi_status status;
 
        ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml);
@@ -154,11 +180,34 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
 
                /* Perform the conversion */
 
-               status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union
-                                                                                aml_resource,
-                                                                                aml),
-                                                        acpi_gbl_set_resource_dispatch
-                                                        [resource->type]);
+               if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+                       if (resource->data.common_serial_bus.type >
+                           AML_RESOURCE_MAX_SERIALBUSTYPE) {
+                               conversion_table = NULL;
+                       } else {
+                               /* This is an I2C, SPI, or UART serial_bus descriptor */
+
+                               conversion_table =
+                                   acpi_gbl_convert_resource_serial_bus_dispatch
+                                   [resource->data.common_serial_bus.type];
+                       }
+               } else {
+                       conversion_table =
+                           acpi_gbl_set_resource_dispatch[resource->type];
+               }
+
+               if (!conversion_table) {
+                       ACPI_ERROR((AE_INFO,
+                                   "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+                                   resource->type));
+                       return (AE_AML_INVALID_RESOURCE_TYPE);
+               }
+
+               status = acpi_rs_convert_resource_to_aml(resource,
+                                                        ACPI_CAST_PTR(union
+                                                                      aml_resource,
+                                                                      aml),
+                                                        conversion_table);
                if (ACPI_FAILURE(status)) {
                        ACPI_EXCEPTION((AE_INFO, status,
                                        "Could not convert resource (type 0x%X) to AML",
@@ -192,9 +241,7 @@ acpi_rs_convert_resources_to_aml(struct acpi_resource *resource,
 
                /* Point to the next input resource descriptor */
 
-               resource =
-                   ACPI_ADD_PTR(struct acpi_resource, resource,
-                                resource->length);
+               resource = ACPI_NEXT_RESOURCE(resource);
        }
 
        /* Completed buffer, but did not find an end_tag resource descriptor */
index 7cc6d8625f1e66d488b3e8c0da0c885e2483db42..4fd611ad02b48d9d706cfe886d347c345f2bbdda 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 410264b22a296011108454e353b21dbaaf646b2c..8073b371cc7cd32b44a4cab6dba88e3ae19b149f 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -83,6 +83,10 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
 
        ACPI_FUNCTION_TRACE(rs_convert_aml_to_resource);
 
+       if (!info) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
        if (((acpi_size) resource) & 0x3) {
 
                /* Each internal resource struct is expected to be 32-bit aligned */
@@ -101,7 +105,6 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
         * table length (# of table entries)
         */
        count = INIT_TABLE_LENGTH(info);
-
        while (count) {
                /*
                 * Source is the external AML byte stream buffer,
@@ -145,6 +148,14 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
                            ((ACPI_GET8(source) >> info->value) & 0x03);
                        break;
 
+               case ACPI_RSC_3BITFLAG:
+                       /*
+                        * Mask and shift the flag bits
+                        */
+                       ACPI_SET8(destination) = (u8)
+                           ((ACPI_GET8(source) >> info->value) & 0x07);
+                       break;
+
                case ACPI_RSC_COUNT:
 
                        item_count = ACPI_GET8(source);
@@ -163,6 +174,69 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
                            (info->value * (item_count - 1));
                        break;
 
+               case ACPI_RSC_COUNT_GPIO_PIN:
+
+                       target = ACPI_ADD_PTR(void, aml, info->value);
+                       item_count = ACPI_GET16(target) - ACPI_GET16(source);
+
+                       resource->length = resource->length + item_count;
+                       item_count = item_count / 2;
+                       ACPI_SET16(destination) = item_count;
+                       break;
+
+               case ACPI_RSC_COUNT_GPIO_VEN:
+
+                       item_count = ACPI_GET8(source);
+                       ACPI_SET8(destination) = (u8)item_count;
+
+                       resource->length = resource->length +
+                           (info->value * item_count);
+                       break;
+
+               case ACPI_RSC_COUNT_GPIO_RES:
+
+                       /*
+                        * Vendor data is optional (length/offset may both be zero)
+                        * Examine vendor data length field first
+                        */
+                       target = ACPI_ADD_PTR(void, aml, (info->value + 2));
+                       if (ACPI_GET16(target)) {
+
+                               /* Use vendor offset to get resource source length */
+
+                               target = ACPI_ADD_PTR(void, aml, info->value);
+                               item_count =
+                                   ACPI_GET16(target) - ACPI_GET16(source);
+                       } else {
+                               /* No vendor data to worry about */
+
+                               item_count = aml->large_header.resource_length +
+                                   sizeof(struct aml_resource_large_header) -
+                                   ACPI_GET16(source);
+                       }
+
+                       resource->length = resource->length + item_count;
+                       ACPI_SET16(destination) = item_count;
+                       break;
+
+               case ACPI_RSC_COUNT_SERIAL_VEN:
+
+                       item_count = ACPI_GET16(source) - info->value;
+
+                       resource->length = resource->length + item_count;
+                       ACPI_SET16(destination) = item_count;
+                       break;
+
+               case ACPI_RSC_COUNT_SERIAL_RES:
+
+                       item_count = (aml_resource_length +
+                                     sizeof(struct aml_resource_large_header))
+                           - ACPI_GET16(source) - info->value;
+
+                       resource->length = resource->length + item_count;
+                       ACPI_SET16(destination) = item_count;
+                       break;
+
                case ACPI_RSC_LENGTH:
 
                        resource->length = resource->length + info->value;
@@ -183,6 +257,72 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
                                          info->opcode);
                        break;
 
+               case ACPI_RSC_MOVE_GPIO_PIN:
+
+                       /* Generate and set the PIN data pointer */
+
+                       target = (char *)ACPI_ADD_PTR(void, resource,
+                                                     (resource->length -
+                                                      item_count * 2));
+                       *(u16 **)destination = ACPI_CAST_PTR(u16, target);
+
+                       /* Copy the PIN data */
+
+                       source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
+                       acpi_rs_move_data(target, source, item_count,
+                                         info->opcode);
+                       break;
+
+               case ACPI_RSC_MOVE_GPIO_RES:
+
+                       /* Generate and set the resource_source string pointer */
+
+                       target = (char *)ACPI_ADD_PTR(void, resource,
+                                                     (resource->length -
+                                                      item_count));
+                       *(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+                       /* Copy the resource_source string */
+
+                       source = ACPI_ADD_PTR(void, aml, ACPI_GET16(source));
+                       acpi_rs_move_data(target, source, item_count,
+                                         info->opcode);
+                       break;
+
+               case ACPI_RSC_MOVE_SERIAL_VEN:
+
+                       /* Generate and set the Vendor Data pointer */
+
+                       target = (char *)ACPI_ADD_PTR(void, resource,
+                                                     (resource->length -
+                                                      item_count));
+                       *(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+                       /* Copy the Vendor Data */
+
+                       source = ACPI_ADD_PTR(void, aml, info->value);
+                       acpi_rs_move_data(target, source, item_count,
+                                         info->opcode);
+                       break;
+
+               case ACPI_RSC_MOVE_SERIAL_RES:
+
+                       /* Generate and set the resource_source string pointer */
+
+                       target = (char *)ACPI_ADD_PTR(void, resource,
+                                                     (resource->length -
+                                                      item_count));
+                       *(u8 **)destination = ACPI_CAST_PTR(u8, target);
+
+                       /* Copy the resource_source string */
+
+                       source =
+                           ACPI_ADD_PTR(void, aml,
+                                        (ACPI_GET16(source) + info->value));
+                       acpi_rs_move_data(target, source, item_count,
+                                         info->opcode);
+                       break;
+
                case ACPI_RSC_SET8:
 
                        ACPI_MEMSET(destination, info->aml_offset, info->value);
@@ -219,13 +359,18 @@ acpi_rs_convert_aml_to_resource(struct acpi_resource *resource,
                         * Optional resource_source (Index and String). This is the more
                         * complicated case used by the Interrupt() macro
                         */
-                       target =
-                           ACPI_ADD_PTR(char, resource,
-                                        info->aml_offset + (item_count * 4));
+                       target = ACPI_ADD_PTR(char, resource,
+                                             info->aml_offset +
+                                             (item_count * 4));
 
                        resource->length +=
                            acpi_rs_get_resource_source(aml_resource_length,
-                                                       (acpi_rs_length) (((item_count - 1) * sizeof(u32)) + info->value), destination, aml, target);
+                                                       (acpi_rs_length)
+                                                       (((item_count -
+                                                          1) * sizeof(u32)) +
+                                                        info->value),
+                                                       destination, aml,
+                                                       target);
                        break;
 
                case ACPI_RSC_BITMASK:
@@ -327,6 +472,7 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
 {
        void *source = NULL;
        void *destination;
+       char *target;
        acpi_rsdesc_size aml_length = 0;
        u8 count;
        u16 temp16 = 0;
@@ -334,6 +480,10 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
 
        ACPI_FUNCTION_TRACE(rs_convert_resource_to_aml);
 
+       if (!info) {
+               return_ACPI_STATUS(AE_BAD_PARAMETER);
+       }
+
        /*
         * First table entry must be ACPI_RSC_INITxxx and must contain the
         * table length (# of table entries)
@@ -383,6 +533,14 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
                            ((ACPI_GET8(source) & 0x03) << info->value);
                        break;
 
+               case ACPI_RSC_3BITFLAG:
+                       /*
+                        * Mask and shift the flag bits
+                        */
+                       ACPI_SET8(destination) |= (u8)
+                           ((ACPI_GET8(source) & 0x07) << info->value);
+                       break;
+
                case ACPI_RSC_COUNT:
 
                        item_count = ACPI_GET8(source);
@@ -400,6 +558,63 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
                        acpi_rs_set_resource_length(aml_length, aml);
                        break;
 
+               case ACPI_RSC_COUNT_GPIO_PIN:
+
+                       item_count = ACPI_GET16(source);
+                       ACPI_SET16(destination) = (u16)aml_length;
+
+                       aml_length = (u16)(aml_length + item_count * 2);
+                       target = ACPI_ADD_PTR(void, aml, info->value);
+                       ACPI_SET16(target) = (u16)aml_length;
+                       acpi_rs_set_resource_length(aml_length, aml);
+                       break;
+
+               case ACPI_RSC_COUNT_GPIO_VEN:
+
+                       item_count = ACPI_GET16(source);
+                       ACPI_SET16(destination) = (u16)item_count;
+
+                       aml_length =
+                           (u16)(aml_length + (info->value * item_count));
+                       acpi_rs_set_resource_length(aml_length, aml);
+                       break;
+
+               case ACPI_RSC_COUNT_GPIO_RES:
+
+                       /* Set resource source string length */
+
+                       item_count = ACPI_GET16(source);
+                       ACPI_SET16(destination) = (u16)aml_length;
+
+                       /* Compute offset for the Vendor Data */
+
+                       aml_length = (u16)(aml_length + item_count);
+                       target = ACPI_ADD_PTR(void, aml, info->value);
+
+                       /* Set vendor offset only if there is vendor data */
+
+                       if (resource->data.gpio.vendor_length) {
+                               ACPI_SET16(target) = (u16)aml_length;
+                       }
+
+                       acpi_rs_set_resource_length(aml_length, aml);
+                       break;
+
+               case ACPI_RSC_COUNT_SERIAL_VEN:
+
+                       item_count = ACPI_GET16(source);
+                       ACPI_SET16(destination) = item_count + info->value;
+                       aml_length = (u16)(aml_length + item_count);
+                       acpi_rs_set_resource_length(aml_length, aml);
+                       break;
+
+               case ACPI_RSC_COUNT_SERIAL_RES:
+
+                       item_count = ACPI_GET16(source);
+                       aml_length = (u16)(aml_length + item_count);
+                       acpi_rs_set_resource_length(aml_length, aml);
+                       break;
+
                case ACPI_RSC_LENGTH:
 
                        acpi_rs_set_resource_length(info->value, aml);
@@ -417,6 +632,48 @@ acpi_rs_convert_resource_to_aml(struct acpi_resource *resource,
                                          info->opcode);
                        break;
 
+               case ACPI_RSC_MOVE_GPIO_PIN:
+
+                       destination = (char *)ACPI_ADD_PTR(void, aml,
+                                                          ACPI_GET16
+                                                          (destination));
+                       source = *(u16 **)source;
+                       acpi_rs_move_data(destination, source, item_count,
+                                         info->opcode);
+                       break;
+
+               case ACPI_RSC_MOVE_GPIO_RES:
+
+                       /* Used for both resource_source string and vendor_data */
+
+                       destination = (char *)ACPI_ADD_PTR(void, aml,
+                                                          ACPI_GET16
+                                                          (destination));
+                       source = *(u8 **)source;
+                       acpi_rs_move_data(destination, source, item_count,
+                                         info->opcode);
+                       break;
+
+               case ACPI_RSC_MOVE_SERIAL_VEN:
+
+                       destination = (char *)ACPI_ADD_PTR(void, aml,
+                                                          (aml_length -
+                                                           item_count));
+                       source = *(u8 **)source;
+                       acpi_rs_move_data(destination, source, item_count,
+                                         info->opcode);
+                       break;
+
+               case ACPI_RSC_MOVE_SERIAL_RES:
+
+                       destination = (char *)ACPI_ADD_PTR(void, aml,
+                                                          (aml_length -
+                                                           item_count));
+                       source = *(u8 **)source;
+                       acpi_rs_move_data(destination, source, item_count,
+                                         info->opcode);
+                       break;
+
                case ACPI_RSC_ADDRESS:
 
                        /* Set the Resource Type, General Flags, and Type-Specific Flags */
diff --git a/drivers/acpi/acpica/rsserial.c b/drivers/acpi/acpica/rsserial.c
new file mode 100644 (file)
index 0000000..9aa5e68
--- /dev/null
@@ -0,0 +1,441 @@
+/*******************************************************************************
+ *
+ * Module Name: rsserial - GPIO/serial_bus resource descriptors
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acresrc.h"
+
+#define _COMPONENT          ACPI_RESOURCES
+ACPI_MODULE_NAME("rsserial")
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_gpio
+ *
+ ******************************************************************************/
+struct acpi_rsconvert_info acpi_rs_convert_gpio[17] = {
+       {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_GPIO,
+        ACPI_RS_SIZE(struct acpi_resource_gpio),
+        ACPI_RSC_TABLE_SIZE(acpi_rs_convert_gpio)},
+
+       {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_GPIO,
+        sizeof(struct aml_resource_gpio),
+        0},
+
+       /*
+        * These fields are contiguous in both the source and destination:
+        * revision_id
+        * connection_type
+        */
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.revision_id),
+        AML_OFFSET(gpio.revision_id),
+        2},
+
+       {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.producer_consumer),
+        AML_OFFSET(gpio.flags),
+        0},
+
+       {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.sharable),
+        AML_OFFSET(gpio.int_flags),
+        3},
+
+       {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.io_restriction),
+        AML_OFFSET(gpio.int_flags),
+        0},
+
+       {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.gpio.triggering),
+        AML_OFFSET(gpio.int_flags),
+        0},
+
+       {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.gpio.polarity),
+        AML_OFFSET(gpio.int_flags),
+        1},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.pin_config),
+        AML_OFFSET(gpio.pin_config),
+        1},
+
+       /*
+        * These fields are contiguous in both the source and destination:
+        * drive_strength
+        * debounce_timeout
+        */
+       {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.gpio.drive_strength),
+        AML_OFFSET(gpio.drive_strength),
+        2},
+
+       /* Pin Table */
+
+       {ACPI_RSC_COUNT_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table_length),
+        AML_OFFSET(gpio.pin_table_offset),
+        AML_OFFSET(gpio.res_source_offset)},
+
+       {ACPI_RSC_MOVE_GPIO_PIN, ACPI_RS_OFFSET(data.gpio.pin_table),
+        AML_OFFSET(gpio.pin_table_offset),
+        0},
+
+       /* Resource Source */
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.gpio.resource_source.index),
+        AML_OFFSET(gpio.res_source_index),
+        1},
+
+       {ACPI_RSC_COUNT_GPIO_RES,
+        ACPI_RS_OFFSET(data.gpio.resource_source.string_length),
+        AML_OFFSET(gpio.res_source_offset),
+        AML_OFFSET(gpio.vendor_offset)},
+
+       {ACPI_RSC_MOVE_GPIO_RES,
+        ACPI_RS_OFFSET(data.gpio.resource_source.string_ptr),
+        AML_OFFSET(gpio.res_source_offset),
+        0},
+
+       /* Vendor Data */
+
+       {ACPI_RSC_COUNT_GPIO_VEN, ACPI_RS_OFFSET(data.gpio.vendor_length),
+        AML_OFFSET(gpio.vendor_length),
+        1},
+
+       {ACPI_RSC_MOVE_GPIO_RES, ACPI_RS_OFFSET(data.gpio.vendor_data),
+        AML_OFFSET(gpio.vendor_offset),
+        0},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_i2c_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_i2c_serial_bus[16] = {
+       {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+        ACPI_RS_SIZE(struct acpi_resource_i2c_serialbus),
+        ACPI_RSC_TABLE_SIZE(acpi_rs_convert_i2c_serial_bus)},
+
+       {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+        sizeof(struct aml_resource_i2c_serialbus),
+        0},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+        AML_OFFSET(common_serial_bus.revision_id),
+        1},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+        AML_OFFSET(common_serial_bus.type),
+        1},
+
+       {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+        AML_OFFSET(common_serial_bus.flags),
+        0},
+
+       {ACPI_RSC_1BITFLAG,
+        ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+        AML_OFFSET(common_serial_bus.flags),
+        1},
+
+       {ACPI_RSC_MOVE8,
+        ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+        AML_OFFSET(common_serial_bus.type_revision_id),
+        1},
+
+       {ACPI_RSC_MOVE16,
+        ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+        AML_OFFSET(common_serial_bus.type_data_length),
+        1},
+
+       /* Vendor data */
+
+       {ACPI_RSC_COUNT_SERIAL_VEN,
+        ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+        AML_OFFSET(common_serial_bus.type_data_length),
+        AML_RESOURCE_I2C_MIN_DATA_LEN},
+
+       {ACPI_RSC_MOVE_SERIAL_VEN,
+        ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+        0,
+        sizeof(struct aml_resource_i2c_serialbus)},
+
+       /* Resource Source */
+
+       {ACPI_RSC_MOVE8,
+        ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+        AML_OFFSET(common_serial_bus.res_source_index),
+        1},
+
+       {ACPI_RSC_COUNT_SERIAL_RES,
+        ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+        AML_OFFSET(common_serial_bus.type_data_length),
+        sizeof(struct aml_resource_common_serialbus)},
+
+       {ACPI_RSC_MOVE_SERIAL_RES,
+        ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+        AML_OFFSET(common_serial_bus.type_data_length),
+        sizeof(struct aml_resource_common_serialbus)},
+
+       /* I2C bus type specific */
+
+       {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.i2c_serial_bus.access_mode),
+        AML_OFFSET(i2c_serial_bus.type_specific_flags),
+        0},
+
+       {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.i2c_serial_bus.connection_speed),
+        AML_OFFSET(i2c_serial_bus.connection_speed),
+        1},
+
+       {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.i2c_serial_bus.slave_address),
+        AML_OFFSET(i2c_serial_bus.slave_address),
+        1},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_spi_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_spi_serial_bus[20] = {
+       {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+        ACPI_RS_SIZE(struct acpi_resource_spi_serialbus),
+        ACPI_RSC_TABLE_SIZE(acpi_rs_convert_spi_serial_bus)},
+
+       {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+        sizeof(struct aml_resource_spi_serialbus),
+        0},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+        AML_OFFSET(common_serial_bus.revision_id),
+        1},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+        AML_OFFSET(common_serial_bus.type),
+        1},
+
+       {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+        AML_OFFSET(common_serial_bus.flags),
+        0},
+
+       {ACPI_RSC_1BITFLAG,
+        ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+        AML_OFFSET(common_serial_bus.flags),
+        1},
+
+       {ACPI_RSC_MOVE8,
+        ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+        AML_OFFSET(common_serial_bus.type_revision_id),
+        1},
+
+       {ACPI_RSC_MOVE16,
+        ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+        AML_OFFSET(common_serial_bus.type_data_length),
+        1},
+
+       /* Vendor data */
+
+       {ACPI_RSC_COUNT_SERIAL_VEN,
+        ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+        AML_OFFSET(common_serial_bus.type_data_length),
+        AML_RESOURCE_SPI_MIN_DATA_LEN},
+
+       {ACPI_RSC_MOVE_SERIAL_VEN,
+        ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+        0,
+        sizeof(struct aml_resource_spi_serialbus)},
+
+       /* Resource Source */
+
+       {ACPI_RSC_MOVE8,
+        ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+        AML_OFFSET(common_serial_bus.res_source_index),
+        1},
+
+       {ACPI_RSC_COUNT_SERIAL_RES,
+        ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+        AML_OFFSET(common_serial_bus.type_data_length),
+        sizeof(struct aml_resource_common_serialbus)},
+
+       {ACPI_RSC_MOVE_SERIAL_RES,
+        ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+        AML_OFFSET(common_serial_bus.type_data_length),
+        sizeof(struct aml_resource_common_serialbus)},
+
+       /* Spi bus type specific  */
+
+       {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.wire_mode),
+        AML_OFFSET(spi_serial_bus.type_specific_flags),
+        0},
+
+       {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.spi_serial_bus.device_polarity),
+        AML_OFFSET(spi_serial_bus.type_specific_flags),
+        1},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.data_bit_length),
+        AML_OFFSET(spi_serial_bus.data_bit_length),
+        1},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_phase),
+        AML_OFFSET(spi_serial_bus.clock_phase),
+        1},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.spi_serial_bus.clock_polarity),
+        AML_OFFSET(spi_serial_bus.clock_polarity),
+        1},
+
+       {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.spi_serial_bus.device_selection),
+        AML_OFFSET(spi_serial_bus.device_selection),
+        1},
+
+       {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.spi_serial_bus.connection_speed),
+        AML_OFFSET(spi_serial_bus.connection_speed),
+        1},
+};
+
+/*******************************************************************************
+ *
+ * acpi_rs_convert_uart_serial_bus
+ *
+ ******************************************************************************/
+
+struct acpi_rsconvert_info acpi_rs_convert_uart_serial_bus[22] = {
+       {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_SERIAL_BUS,
+        ACPI_RS_SIZE(struct acpi_resource_uart_serialbus),
+        ACPI_RSC_TABLE_SIZE(acpi_rs_convert_uart_serial_bus)},
+
+       {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_SERIAL_BUS,
+        sizeof(struct aml_resource_uart_serialbus),
+        0},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.revision_id),
+        AML_OFFSET(common_serial_bus.revision_id),
+        1},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.common_serial_bus.type),
+        AML_OFFSET(common_serial_bus.type),
+        1},
+
+       {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.common_serial_bus.slave_mode),
+        AML_OFFSET(common_serial_bus.flags),
+        0},
+
+       {ACPI_RSC_1BITFLAG,
+        ACPI_RS_OFFSET(data.common_serial_bus.producer_consumer),
+        AML_OFFSET(common_serial_bus.flags),
+        1},
+
+       {ACPI_RSC_MOVE8,
+        ACPI_RS_OFFSET(data.common_serial_bus.type_revision_id),
+        AML_OFFSET(common_serial_bus.type_revision_id),
+        1},
+
+       {ACPI_RSC_MOVE16,
+        ACPI_RS_OFFSET(data.common_serial_bus.type_data_length),
+        AML_OFFSET(common_serial_bus.type_data_length),
+        1},
+
+       /* Vendor data */
+
+       {ACPI_RSC_COUNT_SERIAL_VEN,
+        ACPI_RS_OFFSET(data.common_serial_bus.vendor_length),
+        AML_OFFSET(common_serial_bus.type_data_length),
+        AML_RESOURCE_UART_MIN_DATA_LEN},
+
+       {ACPI_RSC_MOVE_SERIAL_VEN,
+        ACPI_RS_OFFSET(data.common_serial_bus.vendor_data),
+        0,
+        sizeof(struct aml_resource_uart_serialbus)},
+
+       /* Resource Source */
+
+       {ACPI_RSC_MOVE8,
+        ACPI_RS_OFFSET(data.common_serial_bus.resource_source.index),
+        AML_OFFSET(common_serial_bus.res_source_index),
+        1},
+
+       {ACPI_RSC_COUNT_SERIAL_RES,
+        ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_length),
+        AML_OFFSET(common_serial_bus.type_data_length),
+        sizeof(struct aml_resource_common_serialbus)},
+
+       {ACPI_RSC_MOVE_SERIAL_RES,
+        ACPI_RS_OFFSET(data.common_serial_bus.resource_source.string_ptr),
+        AML_OFFSET(common_serial_bus.type_data_length),
+        sizeof(struct aml_resource_common_serialbus)},
+
+       /* Uart bus type specific  */
+
+       {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.flow_control),
+        AML_OFFSET(uart_serial_bus.type_specific_flags),
+        0},
+
+       {ACPI_RSC_2BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.stop_bits),
+        AML_OFFSET(uart_serial_bus.type_specific_flags),
+        2},
+
+       {ACPI_RSC_3BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.data_bits),
+        AML_OFFSET(uart_serial_bus.type_specific_flags),
+        4},
+
+       {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.uart_serial_bus.endian),
+        AML_OFFSET(uart_serial_bus.type_specific_flags),
+        7},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.parity),
+        AML_OFFSET(uart_serial_bus.parity),
+        1},
+
+       {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.uart_serial_bus.lines_enabled),
+        AML_OFFSET(uart_serial_bus.lines_enabled),
+        1},
+
+       {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.rx_fifo_size),
+        AML_OFFSET(uart_serial_bus.rx_fifo_size),
+        1},
+
+       {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.uart_serial_bus.tx_fifo_size),
+        AML_OFFSET(uart_serial_bus.tx_fifo_size),
+        1},
+
+       {ACPI_RSC_MOVE32,
+        ACPI_RS_OFFSET(data.uart_serial_bus.default_baud_rate),
+        AML_OFFSET(uart_serial_bus.default_baud_rate),
+        1},
+};
index 231811e569399c5b75d2b7ba55be22bf82edbdbb..433a375deb9350e9635b103e3c593d6e7c3838da 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -144,6 +144,9 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
                         * since there are no alignment or endian issues
                         */
                case ACPI_RSC_MOVE8:
+               case ACPI_RSC_MOVE_GPIO_RES:
+               case ACPI_RSC_MOVE_SERIAL_VEN:
+               case ACPI_RSC_MOVE_SERIAL_RES:
                        ACPI_MEMCPY(destination, source, item_count);
                        return;
 
@@ -153,6 +156,7 @@ acpi_rs_move_data(void *destination, void *source, u16 item_count, u8 move_type)
                         * misaligned memory transfers
                         */
                case ACPI_RSC_MOVE16:
+               case ACPI_RSC_MOVE_GPIO_PIN:
                        ACPI_MOVE_16_TO_16(&ACPI_CAST_PTR(u16, destination)[i],
                                           &ACPI_CAST_PTR(u16, source)[i]);
                        break;
@@ -588,6 +592,56 @@ acpi_rs_get_prs_method_data(struct acpi_namespace_node *node,
 }
 #endif                         /*  ACPI_FUTURE_USAGE  */
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_rs_get_aei_method_data
+ *
+ * PARAMETERS:  Node            - Device node
+ *              ret_buffer      - Pointer to a buffer structure for the
+ *                                results
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to get the _AEI value of an object
+ *              contained in an object specified by the handle passed in
+ *
+ *              If the function fails an appropriate status will be returned
+ *              and the contents of the callers buffer is undefined.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_rs_get_aei_method_data(struct acpi_namespace_node *node,
+                           struct acpi_buffer *ret_buffer)
+{
+       union acpi_operand_object *obj_desc;
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(rs_get_aei_method_data);
+
+       /* Parameters guaranteed valid by caller */
+
+       /* Execute the method, no parameters */
+
+       status = acpi_ut_evaluate_object(node, METHOD_NAME__AEI,
+                                        ACPI_BTYPE_BUFFER, &obj_desc);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       /*
+        * Make the call to create a resource linked list from the
+        * byte stream buffer that comes back from the _CRS method
+        * execution.
+        */
+       status = acpi_rs_create_resource_list(obj_desc, ret_buffer);
+
+       /* On exit, we must delete the object returned by evaluate_object */
+
+       acpi_ut_remove_reference(obj_desc);
+       return_ACPI_STATUS(status);
+}
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_rs_get_method_data
index fe86b37b16ce657bd719b68c656ce252581a0452..f58c098c7aeb3fd5f5e35b532fbbd82da6eab42a 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -307,6 +307,46 @@ acpi_set_current_resources(acpi_handle device_handle,
 
 ACPI_EXPORT_SYMBOL(acpi_set_current_resources)
 
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_get_event_resources
+ *
+ * PARAMETERS:  device_handle   - Handle to the device object for the
+ *                                device we are getting resources
+ *              in_buffer       - Pointer to a buffer containing the
+ *                                resources to be set for the device
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: This function is called to get the event resources for a
+ *              specific device. The caller must first acquire a handle for
+ *              the desired device. The resource data is passed to the routine
+ *              the buffer pointed to by the in_buffer variable. Uses the
+ *              _AEI method.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_get_event_resources(acpi_handle device_handle,
+                        struct acpi_buffer *ret_buffer)
+{
+       acpi_status status;
+       struct acpi_namespace_node *node;
+
+       ACPI_FUNCTION_TRACE(acpi_get_event_resources);
+
+       /* Validate parameters then dispatch to internal routine */
+
+       status = acpi_rs_validate_parameters(device_handle, ret_buffer, &node);
+       if (ACPI_FAILURE(status)) {
+               return_ACPI_STATUS(status);
+       }
+
+       status = acpi_rs_get_aei_method_data(node, ret_buffer);
+       return_ACPI_STATUS(status);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_get_event_resources)
+
 /******************************************************************************
  *
  * FUNCTION:    acpi_resource_to_address64
@@ -486,8 +526,9 @@ acpi_rs_match_vendor_resource(struct acpi_resource *resource, void *context)
  *
  * PARAMETERS:  device_handle   - Handle to the device object for the
  *                                device we are querying
- *              Name            - Method name of the resources we want
- *                                (METHOD_NAME__CRS or METHOD_NAME__PRS)
+ *              Name            - Method name of the resources we want.
+ *                                (METHOD_NAME__CRS, METHOD_NAME__PRS, or
+ *                                METHOD_NAME__AEI)
  *              user_function   - Called for each resource
  *              Context         - Passed to user_function
  *
@@ -514,11 +555,12 @@ acpi_walk_resources(acpi_handle device_handle,
 
        if (!device_handle || !user_function || !name ||
            (!ACPI_COMPARE_NAME(name, METHOD_NAME__CRS) &&
-            !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS))) {
+            !ACPI_COMPARE_NAME(name, METHOD_NAME__PRS) &&
+            !ACPI_COMPARE_NAME(name, METHOD_NAME__AEI))) {
                return_ACPI_STATUS(AE_BAD_PARAMETER);
        }
 
-       /* Get the _CRS or _PRS resource list */
+       /* Get the _CRS/_PRS/_AEI resource list */
 
        buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
        status = acpi_rs_get_method_data(device_handle, name, &buffer);
index 6f5588e62c0ac24d396288f661fa3a959b52f734..c5d870406f4126adf7ce80ffa0635d175c1a772d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -63,14 +63,15 @@ static void acpi_tb_setup_fadt_registers(void);
 
 typedef struct acpi_fadt_info {
        char *name;
-       u8 address64;
-       u8 address32;
-       u8 length;
+       u16 address64;
+       u16 address32;
+       u16 length;
        u8 default_length;
        u8 type;
 
 } acpi_fadt_info;
 
+#define ACPI_FADT_OPTIONAL          0
 #define ACPI_FADT_REQUIRED          1
 #define ACPI_FADT_SEPARATE_LENGTH   2
 
@@ -87,7 +88,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
         ACPI_FADT_OFFSET(pm1b_event_block),
         ACPI_FADT_OFFSET(pm1_event_length),
         ACPI_PM1_REGISTER_WIDTH * 2,   /* Enable + Status register */
-        0},
+        ACPI_FADT_OPTIONAL},
 
        {"Pm1aControlBlock",
         ACPI_FADT_OFFSET(xpm1a_control_block),
@@ -101,7 +102,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
         ACPI_FADT_OFFSET(pm1b_control_block),
         ACPI_FADT_OFFSET(pm1_control_length),
         ACPI_PM1_REGISTER_WIDTH,
-        0},
+        ACPI_FADT_OPTIONAL},
 
        {"Pm2ControlBlock",
         ACPI_FADT_OFFSET(xpm2_control_block),
@@ -139,7 +140,7 @@ static struct acpi_fadt_info fadt_info_table[] = {
 
 typedef struct acpi_fadt_pm_info {
        struct acpi_generic_address *target;
-       u8 source;
+       u16 source;
        u8 register_num;
 
 } acpi_fadt_pm_info;
@@ -253,8 +254,13 @@ void acpi_tb_parse_fadt(u32 table_index)
        acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xdsdt,
                              ACPI_SIG_DSDT, ACPI_TABLE_INDEX_DSDT);
 
-       acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.Xfacs,
-                             ACPI_SIG_FACS, ACPI_TABLE_INDEX_FACS);
+       /* If Hardware Reduced flag is set, there is no FACS */
+
+       if (!acpi_gbl_reduced_hardware) {
+               acpi_tb_install_table((acpi_physical_address) acpi_gbl_FADT.
+                                     Xfacs, ACPI_SIG_FACS,
+                                     ACPI_TABLE_INDEX_FACS);
+       }
 }
 
 /*******************************************************************************
@@ -277,12 +283,12 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
 {
        /*
         * Check if the FADT is larger than the largest table that we expect
-        * (the ACPI 2.0/3.0 version). If so, truncate the table, and issue
+        * (the ACPI 5.0 version). If so, truncate the table, and issue
         * a warning.
         */
        if (length > sizeof(struct acpi_table_fadt)) {
                ACPI_WARNING((AE_INFO,
-                             "FADT (revision %u) is longer than ACPI 2.0 version, "
+                             "FADT (revision %u) is longer than ACPI 5.0 version, "
                              "truncating length %u to %u",
                              table->revision, length,
                              (u32)sizeof(struct acpi_table_fadt)));
@@ -297,6 +303,13 @@ void acpi_tb_create_local_fadt(struct acpi_table_header *table, u32 length)
        ACPI_MEMCPY(&acpi_gbl_FADT, table,
                    ACPI_MIN(length, sizeof(struct acpi_table_fadt)));
 
+       /* Take a copy of the Hardware Reduced flag */
+
+       acpi_gbl_reduced_hardware = FALSE;
+       if (acpi_gbl_FADT.flags & ACPI_FADT_HW_REDUCED) {
+               acpi_gbl_reduced_hardware = TRUE;
+       }
+
        /* Convert the local copy of the FADT to the common internal format */
 
        acpi_tb_convert_fadt();
@@ -502,6 +515,12 @@ static void acpi_tb_validate_fadt(void)
                acpi_gbl_FADT.Xdsdt = (u64) acpi_gbl_FADT.dsdt;
        }
 
+       /* If Hardware Reduced flag is set, we are all done */
+
+       if (acpi_gbl_reduced_hardware) {
+               return;
+       }
+
        /* Examine all of the 64-bit extended address fields (X fields) */
 
        for (i = 0; i < ACPI_FADT_INFO_ENTRIES; i++) {
index a55cb2bb5abbfdee7b448ced5baf23bb37c68a4a..4903e36ea75a3f4f24930c43b497626b930d8f0e 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 62365f6075dda417e7204d55105707ef1a64eaf9..1aecf7baa4e0c2f2a0f35c7ebc4ecd8bbbe161c7 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 0f2d395feabadb20e8ce47d88f53ff8d786e2427..09ca39e143373c7dfeb11efc63f01540e62d3edb 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -135,6 +135,13 @@ acpi_status acpi_tb_initialize_facs(void)
 {
        acpi_status status;
 
+       /* If Hardware Reduced flag is set, there is no FACS */
+
+       if (acpi_gbl_reduced_hardware) {
+               acpi_gbl_FACS = NULL;
+               return (AE_OK);
+       }
+
        status = acpi_get_table_by_index(ACPI_TABLE_INDEX_FACS,
                                         ACPI_CAST_INDIRECT_PTR(struct
                                                                acpi_table_header,
index e7d13f5d3f2da0f77d7b04a71e1118cb2793f7cd..abcc6412c24492129b8bb003963a7f7e826f80fc 100644 (file)
@@ -6,7 +6,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7eb6c6cc1edf76000d8842edc6a1a36bf261a43b..4258f647ca3d63cb20cf9ce2d0b01701f6d00e11 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utaddress.c b/drivers/acpi/acpica/utaddress.c
new file mode 100644 (file)
index 0000000..67932ae
--- /dev/null
@@ -0,0 +1,294 @@
+/******************************************************************************
+ *
+ * Module Name: utaddress - op_region address range check
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utaddress")
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_add_address_range
+ *
+ * PARAMETERS:  space_id            - Address space ID
+ *              Address             - op_region start address
+ *              Length              - op_region length
+ *              region_node         - op_region namespace node
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Add the Operation Region address range to the global list.
+ *              The only supported Space IDs are Memory and I/O. Called when
+ *              the op_region address/length operands are fully evaluated.
+ *
+ * MUTEX:       Locks the namespace
+ *
+ * NOTE: Because this interface is only called when an op_region argument
+ * list is evaluated, there cannot be any duplicate region_nodes.
+ * Duplicate Address/Length values are allowed, however, so that multiple
+ * address conflicts can be detected.
+ *
+ ******************************************************************************/
+acpi_status
+acpi_ut_add_address_range(acpi_adr_space_type space_id,
+                         acpi_physical_address address,
+                         u32 length, struct acpi_namespace_node *region_node)
+{
+       struct acpi_address_range *range_info;
+       acpi_status status;
+
+       ACPI_FUNCTION_TRACE(ut_add_address_range);
+
+       if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+           (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+               return_ACPI_STATUS(AE_OK);
+       }
+
+       /* Allocate/init a new info block, add it to the appropriate list */
+
+       range_info = ACPI_ALLOCATE(sizeof(struct acpi_address_range));
+       if (!range_info) {
+               return_ACPI_STATUS(AE_NO_MEMORY);
+       }
+
+       range_info->start_address = address;
+       range_info->end_address = (address + length - 1);
+       range_info->region_node = region_node;
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+       if (ACPI_FAILURE(status)) {
+               ACPI_FREE(range_info);
+               return_ACPI_STATUS(status);
+       }
+
+       range_info->next = acpi_gbl_address_range_list[space_id];
+       acpi_gbl_address_range_list[space_id] = range_info;
+
+       ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+                         "\nAdded [%4.4s] address range: 0x%p-0x%p\n",
+                         acpi_ut_get_node_name(range_info->region_node),
+                         ACPI_CAST_PTR(void, address),
+                         ACPI_CAST_PTR(void, range_info->end_address)));
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+       return_ACPI_STATUS(AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_remove_address_range
+ *
+ * PARAMETERS:  space_id            - Address space ID
+ *              region_node         - op_region namespace node
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Remove the Operation Region from the global list. The only
+ *              supported Space IDs are Memory and I/O. Called when an
+ *              op_region is deleted.
+ *
+ * MUTEX:       Assumes the namespace is locked
+ *
+ ******************************************************************************/
+
+void
+acpi_ut_remove_address_range(acpi_adr_space_type space_id,
+                            struct acpi_namespace_node *region_node)
+{
+       struct acpi_address_range *range_info;
+       struct acpi_address_range *prev;
+
+       ACPI_FUNCTION_TRACE(ut_remove_address_range);
+
+       if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+           (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+               return_VOID;
+       }
+
+       /* Get the appropriate list head and check the list */
+
+       range_info = prev = acpi_gbl_address_range_list[space_id];
+       while (range_info) {
+               if (range_info->region_node == region_node) {
+                       if (range_info == prev) {       /* Found at list head */
+                               acpi_gbl_address_range_list[space_id] =
+                                   range_info->next;
+                       } else {
+                               prev->next = range_info->next;
+                       }
+
+                       ACPI_DEBUG_PRINT((ACPI_DB_NAMES,
+                                         "\nRemoved [%4.4s] address range: 0x%p-0x%p\n",
+                                         acpi_ut_get_node_name(range_info->
+                                                               region_node),
+                                         ACPI_CAST_PTR(void,
+                                                       range_info->
+                                                       start_address),
+                                         ACPI_CAST_PTR(void,
+                                                       range_info->
+                                                       end_address)));
+
+                       ACPI_FREE(range_info);
+                       return_VOID;
+               }
+
+               prev = range_info;
+               range_info = range_info->next;
+       }
+
+       return_VOID;
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_check_address_range
+ *
+ * PARAMETERS:  space_id            - Address space ID
+ *              Address             - Start address
+ *              Length              - Length of address range
+ *              Warn                - TRUE if warning on overlap desired
+ *
+ * RETURN:      Count of the number of conflicts detected. Zero is always
+ *              returned for Space IDs other than Memory or I/O.
+ *
+ * DESCRIPTION: Check if the input address range overlaps any of the
+ *              ASL operation region address ranges. The only supported
+ *              Space IDs are Memory and I/O.
+ *
+ * MUTEX:       Assumes the namespace is locked.
+ *
+ ******************************************************************************/
+
+u32
+acpi_ut_check_address_range(acpi_adr_space_type space_id,
+                           acpi_physical_address address, u32 length, u8 warn)
+{
+       struct acpi_address_range *range_info;
+       acpi_physical_address end_address;
+       char *pathname;
+       u32 overlap_count = 0;
+
+       ACPI_FUNCTION_TRACE(ut_check_address_range);
+
+       if ((space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY) &&
+           (space_id != ACPI_ADR_SPACE_SYSTEM_IO)) {
+               return_UINT32(0);
+       }
+
+       range_info = acpi_gbl_address_range_list[space_id];
+       end_address = address + length - 1;
+
+       /* Check entire list for all possible conflicts */
+
+       while (range_info) {
+               /*
+                * Check if the requested Address/Length overlaps this address_range.
+                * Four cases to consider:
+                *
+                * 1) Input address/length is contained completely in the address range
+                * 2) Input address/length overlaps range at the range start
+                * 3) Input address/length overlaps range at the range end
+                * 4) Input address/length completely encompasses the range
+                */
+               if ((address <= range_info->end_address) &&
+                   (end_address >= range_info->start_address)) {
+
+                       /* Found an address range overlap */
+
+                       overlap_count++;
+                       if (warn) {     /* Optional warning message */
+                               pathname =
+                                   acpi_ns_get_external_pathname(range_info->
+                                                                 region_node);
+
+                               ACPI_WARNING((AE_INFO,
+                                             "0x%p-0x%p %s conflicts with Region %s %d",
+                                             ACPI_CAST_PTR(void, address),
+                                             ACPI_CAST_PTR(void, end_address),
+                                             acpi_ut_get_region_name(space_id),
+                                             pathname, overlap_count));
+                               ACPI_FREE(pathname);
+                       }
+               }
+
+               range_info = range_info->next;
+       }
+
+       return_UINT32(overlap_count);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_delete_address_lists
+ *
+ * PARAMETERS:  None
+ *
+ * RETURN:      None
+ *
+ * DESCRIPTION: Delete all global address range lists (called during
+ *              subsystem shutdown).
+ *
+ ******************************************************************************/
+
+void acpi_ut_delete_address_lists(void)
+{
+       struct acpi_address_range *next;
+       struct acpi_address_range *range_info;
+       int i;
+
+       /* Delete all elements in all address range lists */
+
+       for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+               next = acpi_gbl_address_range_list[i];
+
+               while (next) {
+                       range_info = next;
+                       next = range_info->next;
+                       ACPI_FREE(range_info);
+               }
+
+               acpi_gbl_address_range_list[i] = NULL;
+       }
+}
index 0a697351cf6974474852370b843880ee3872ab7d..9982d2ea66fbbc382a4b6daaf64d1bdcf647b678 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index aded299a2fa842210a7ee9c616ff321573f8c5c4..3317c0a406ee539381e5f80f93983c6653f8697f 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index a1f8d7509e664f256bb03153319699915fc0bfef..a0998a886318dba8f286f93e393fc2994848d2b5 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 8b087e2d64f4d9e331be57092e3c36395b58db8c..d42ede5260c77cac6f9b947ec625c252ebe027bc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -171,7 +171,9 @@ const char *acpi_gbl_region_types[ACPI_NUM_PREDEFINED_REGIONS] = {
        "SMBus",
        "SystemCMOS",
        "PCIBARTarget",
-       "IPMI"
+       "IPMI",
+       "GeneralPurposeIo",
+       "GenericSerialBus"
 };
 
 char *acpi_ut_get_region_name(u8 space_id)
index 31f5a7832ef135e94659179fbec6a927098b5fa3..2a6c3e183697ac68a6c6bca589116c20f8bf4520 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -215,11 +215,14 @@ static void acpi_ut_delete_internal_obj(union acpi_operand_object *object)
                ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS,
                                  "***** Region %p\n", object));
 
-               /* Invalidate the region address/length via the host OS */
-
-               acpi_os_invalidate_address(object->region.space_id,
-                                         object->region.address,
-                                         (acpi_size) object->region.length);
+               /*
+                * Update address_range list. However, only permanent regions
+                * are installed in this list. (Not created within a method)
+                */
+               if (!(object->region.node->flags & ANOBJ_TEMPORARY)) {
+                       acpi_ut_remove_address_range(object->region.space_id,
+                                                    object->region.node);
+               }
 
                second_desc = acpi_ns_get_secondary_object(object);
                if (second_desc) {
index 18f73c9d10bce26a71a469569a868c72bc75fabd..479f32b33415722742c10c7b125cebd682b4b054 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ffba0a39c3e8598d1aeceecd1a007ddf974213cd..4153584cf526a81bb3ab99bd3f5f59a37fb19f42 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -264,6 +264,12 @@ acpi_status acpi_ut_init_globals(void)
                return_ACPI_STATUS(status);
        }
 
+       /* Address Range lists */
+
+       for (i = 0; i < ACPI_ADDRESS_RANGE_MAX; i++) {
+               acpi_gbl_address_range_list[i] = NULL;
+       }
+
        /* Mutex locked flags */
 
        for (i = 0; i < ACPI_NUM_MUTEX; i++) {
index b679ea693545db6fd16703268c9cbd6354cecfcd..c92eb1d937859d673ff1c08293ef4ad94f861328 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 191b6828cce99d300a20fd01d559b21628a0189c..8359c0c5dc9830456ec2605f2bcc9114cd4a25cc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -92,6 +92,7 @@ static void acpi_ut_terminate(void)
                gpe_xrupt_info = next_gpe_xrupt_info;
        }
 
+       acpi_ut_delete_address_lists();
        return_VOID;
 }
 
index f6bb75c6faf5ea4a653569e9653e3094a07b7056..155fd786d0f2a45b6174f210b621fee6b6b7436d 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index ce481da9bb451858da374fdee0e5f271196ab335..2491a552b0e69eeba847364f3ab643505f5afb09 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index c33a852d4f42a5d9b9113840bcd918680a3836b8..86f19db74e0549949c9685e391166b282523b949 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 7d797e2baecd11996ae49525bc99e79f1fbbd5e9..43174df3312100f0a5e9d4b58dfde1679c9a1fd3 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -293,14 +293,10 @@ acpi_status acpi_ut_acquire_mutex(acpi_mutex_handle mutex_id)
 
 acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
 {
-       acpi_thread_id this_thread_id;
-
        ACPI_FUNCTION_NAME(ut_release_mutex);
 
-       this_thread_id = acpi_os_get_thread_id();
-
        ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Thread %u releasing Mutex [%s]\n",
-                         (u32)this_thread_id,
+                         (u32)acpi_os_get_thread_id(),
                          acpi_ut_get_mutex_name(mutex_id)));
 
        if (mutex_id > ACPI_MAX_MUTEX) {
@@ -329,7 +325,8 @@ acpi_status acpi_ut_release_mutex(acpi_mutex_handle mutex_id)
                 * the ACPI subsystem code.
                 */
                for (i = mutex_id; i < ACPI_NUM_MUTEX; i++) {
-                       if (acpi_gbl_mutex_info[i].thread_id == this_thread_id) {
+                       if (acpi_gbl_mutex_info[i].thread_id ==
+                           acpi_os_get_thread_id()) {
                                if (i == mutex_id) {
                                        continue;
                                }
index 188340a017b4036422e900ccac69d44f44754780..b112744fc9ae3018dd591b5904f94b8059ea6b10 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 1fb10cb8f11dfdaa660c0bf4ee9095a79986bd55..2360cf70c18ccdcb810ccc08c2c520d3bcaf3948 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 6ffd3a8bdaa5dadbdac41ede3dd0f6c95db8c0f4..9d441ea703052545c2b9f98f122de42a6acfb26e 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -43,7 +43,7 @@
 
 #include <acpi/acpi.h>
 #include "accommon.h"
-#include "amlresrc.h"
+#include "acresrc.h"
 
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utresrc")
@@ -154,6 +154,138 @@ const char *acpi_gbl_typ_decode[] = {
        "TypeF"
 };
 
+const char *acpi_gbl_ppc_decode[] = {
+       "PullDefault",
+       "PullUp",
+       "PullDown",
+       "PullNone"
+};
+
+const char *acpi_gbl_ior_decode[] = {
+       "IoRestrictionNone",
+       "IoRestrictionInputOnly",
+       "IoRestrictionOutputOnly",
+       "IoRestrictionNoneAndPreserve"
+};
+
+const char *acpi_gbl_dts_decode[] = {
+       "Width8bit",
+       "Width16bit",
+       "Width32bit",
+       "Width64bit",
+       "Width128bit",
+       "Width256bit",
+};
+
+/* GPIO connection type */
+
+const char *acpi_gbl_ct_decode[] = {
+       "Interrupt",
+       "I/O"
+};
+
+/* Serial bus type */
+
+const char *acpi_gbl_sbt_decode[] = {
+       "/* UNKNOWN serial bus type */",
+       "I2C",
+       "SPI",
+       "UART"
+};
+
+/* I2C serial bus access mode */
+
+const char *acpi_gbl_am_decode[] = {
+       "AddressingMode7Bit",
+       "AddressingMode10Bit"
+};
+
+/* I2C serial bus slave mode */
+
+const char *acpi_gbl_sm_decode[] = {
+       "ControllerInitiated",
+       "DeviceInitiated"
+};
+
+/* SPI serial bus wire mode */
+
+const char *acpi_gbl_wm_decode[] = {
+       "FourWireMode",
+       "ThreeWireMode"
+};
+
+/* SPI serial clock phase */
+
+const char *acpi_gbl_cph_decode[] = {
+       "ClockPhaseFirst",
+       "ClockPhaseSecond"
+};
+
+/* SPI serial bus clock polarity */
+
+const char *acpi_gbl_cpo_decode[] = {
+       "ClockPolarityLow",
+       "ClockPolarityHigh"
+};
+
+/* SPI serial bus device polarity */
+
+const char *acpi_gbl_dp_decode[] = {
+       "PolarityLow",
+       "PolarityHigh"
+};
+
+/* UART serial bus endian */
+
+const char *acpi_gbl_ed_decode[] = {
+       "LittleEndian",
+       "BigEndian"
+};
+
+/* UART serial bus bits per byte */
+
+const char *acpi_gbl_bpb_decode[] = {
+       "DataBitsFive",
+       "DataBitsSix",
+       "DataBitsSeven",
+       "DataBitsEight",
+       "DataBitsNine",
+       "/* UNKNOWN Bits per byte */",
+       "/* UNKNOWN Bits per byte */",
+       "/* UNKNOWN Bits per byte */"
+};
+
+/* UART serial bus stop bits */
+
+const char *acpi_gbl_sb_decode[] = {
+       "StopBitsNone",
+       "StopBitsOne",
+       "StopBitsOnePlusHalf",
+       "StopBitsTwo"
+};
+
+/* UART serial bus flow control */
+
+const char *acpi_gbl_fc_decode[] = {
+       "FlowControlNone",
+       "FlowControlHardware",
+       "FlowControlXON",
+       "/* UNKNOWN flow control keyword */"
+};
+
+/* UART serial bus parity type */
+
+const char *acpi_gbl_pt_decode[] = {
+       "ParityTypeNone",
+       "ParityTypeEven",
+       "ParityTypeOdd",
+       "ParityTypeMark",
+       "ParityTypeSpace",
+       "/* UNKNOWN parity keyword */",
+       "/* UNKNOWN parity keyword */",
+       "/* UNKNOWN parity keyword */"
+};
+
 #endif
 
 /*
@@ -173,7 +305,7 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
        ACPI_AML_SIZE_SMALL(struct aml_resource_end_dependent),
        ACPI_AML_SIZE_SMALL(struct aml_resource_io),
        ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_io),
-       0,
+       ACPI_AML_SIZE_SMALL(struct aml_resource_fixed_dma),
        0,
        0,
        0,
@@ -193,7 +325,17 @@ const u8 acpi_gbl_resource_aml_sizes[] = {
        ACPI_AML_SIZE_LARGE(struct aml_resource_address16),
        ACPI_AML_SIZE_LARGE(struct aml_resource_extended_irq),
        ACPI_AML_SIZE_LARGE(struct aml_resource_address64),
-       ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64)
+       ACPI_AML_SIZE_LARGE(struct aml_resource_extended_address64),
+       ACPI_AML_SIZE_LARGE(struct aml_resource_gpio),
+       0,
+       ACPI_AML_SIZE_LARGE(struct aml_resource_common_serialbus),
+};
+
+const u8 acpi_gbl_resource_aml_serial_bus_sizes[] = {
+       0,
+       ACPI_AML_SIZE_LARGE(struct aml_resource_i2c_serialbus),
+       ACPI_AML_SIZE_LARGE(struct aml_resource_spi_serialbus),
+       ACPI_AML_SIZE_LARGE(struct aml_resource_uart_serialbus),
 };
 
 /*
@@ -209,35 +351,49 @@ static const u8 acpi_gbl_resource_types[] = {
        0,
        0,
        0,
-       ACPI_SMALL_VARIABLE_LENGTH,
-       ACPI_FIXED_LENGTH,
-       ACPI_SMALL_VARIABLE_LENGTH,
-       ACPI_FIXED_LENGTH,
-       ACPI_FIXED_LENGTH,
-       ACPI_FIXED_LENGTH,
-       0,
+       ACPI_SMALL_VARIABLE_LENGTH,     /* 04 IRQ */
+       ACPI_FIXED_LENGTH,      /* 05 DMA */
+       ACPI_SMALL_VARIABLE_LENGTH,     /* 06 start_dependent_functions */
+       ACPI_FIXED_LENGTH,      /* 07 end_dependent_functions */
+       ACPI_FIXED_LENGTH,      /* 08 IO */
+       ACPI_FIXED_LENGTH,      /* 09 fixed_iO */
+       ACPI_FIXED_LENGTH,      /* 0_a fixed_dMA */
        0,
        0,
        0,
-       ACPI_VARIABLE_LENGTH,
-       ACPI_FIXED_LENGTH,
+       ACPI_VARIABLE_LENGTH,   /* 0_e vendor_short */
+       ACPI_FIXED_LENGTH,      /* 0_f end_tag */
 
        /* Large descriptors */
 
        0,
-       ACPI_FIXED_LENGTH,
-       ACPI_FIXED_LENGTH,
+       ACPI_FIXED_LENGTH,      /* 01 Memory24 */
+       ACPI_FIXED_LENGTH,      /* 02 generic_register */
        0,
-       ACPI_VARIABLE_LENGTH,
-       ACPI_FIXED_LENGTH,
-       ACPI_FIXED_LENGTH,
-       ACPI_VARIABLE_LENGTH,
-       ACPI_VARIABLE_LENGTH,
-       ACPI_VARIABLE_LENGTH,
-       ACPI_VARIABLE_LENGTH,
-       ACPI_FIXED_LENGTH
+       ACPI_VARIABLE_LENGTH,   /* 04 vendor_long */
+       ACPI_FIXED_LENGTH,      /* 05 Memory32 */
+       ACPI_FIXED_LENGTH,      /* 06 memory32_fixed */
+       ACPI_VARIABLE_LENGTH,   /* 07 Dword* address */
+       ACPI_VARIABLE_LENGTH,   /* 08 Word* address */
+       ACPI_VARIABLE_LENGTH,   /* 09 extended_iRQ */
+       ACPI_VARIABLE_LENGTH,   /* 0_a Qword* address */
+       ACPI_FIXED_LENGTH,      /* 0_b Extended* address */
+       ACPI_VARIABLE_LENGTH,   /* 0_c Gpio* */
+       0,
+       ACPI_VARIABLE_LENGTH    /* 0_e *serial_bus */
 };
 
+/*
+ * For the i_aSL compiler/disassembler, we don't want any error messages
+ * because the disassembler uses the resource validation code to determine
+ * if Buffer objects are actually Resource Templates.
+ */
+#ifdef ACPI_ASL_COMPILER
+#define ACPI_RESOURCE_ERROR(plist)
+#else
+#define ACPI_RESOURCE_ERROR(plist)  ACPI_ERROR(plist)
+#endif
+
 /*******************************************************************************
  *
  * FUNCTION:    acpi_ut_walk_aml_resources
@@ -265,6 +421,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
        u8 resource_index;
        u32 length;
        u32 offset = 0;
+       u8 end_tag[2] = { 0x79, 0x00 };
 
        ACPI_FUNCTION_TRACE(ut_walk_aml_resources);
 
@@ -286,6 +443,10 @@ acpi_ut_walk_aml_resources(u8 * aml,
 
                status = acpi_ut_validate_resource(aml, &resource_index);
                if (ACPI_FAILURE(status)) {
+                       /*
+                        * Exit on failure. Cannot continue because the descriptor length
+                        * may be bogus also.
+                        */
                        return_ACPI_STATUS(status);
                }
 
@@ -300,7 +461,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
                            user_function(aml, length, offset, resource_index,
                                          context);
                        if (ACPI_FAILURE(status)) {
-                               return (status);
+                               return_ACPI_STATUS(status);
                        }
                }
 
@@ -333,7 +494,19 @@ acpi_ut_walk_aml_resources(u8 * aml,
 
        /* Did not find an end_tag descriptor */
 
-       return (AE_AML_NO_RESOURCE_END_TAG);
+       if (user_function) {
+
+               /* Insert an end_tag anyway. acpi_rs_get_list_length always leaves room */
+
+               (void)acpi_ut_validate_resource(end_tag, &resource_index);
+               status =
+                   user_function(end_tag, 2, offset, resource_index, context);
+               if (ACPI_FAILURE(status)) {
+                       return_ACPI_STATUS(status);
+               }
+       }
+
+       return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG);
 }
 
 /*******************************************************************************
@@ -354,6 +527,7 @@ acpi_ut_walk_aml_resources(u8 * aml,
 
 acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
 {
+       union aml_resource *aml_resource;
        u8 resource_type;
        u8 resource_index;
        acpi_rs_length resource_length;
@@ -375,7 +549,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
                /* Verify the large resource type (name) against the max */
 
                if (resource_type > ACPI_RESOURCE_NAME_LARGE_MAX) {
-                       return (AE_AML_INVALID_RESOURCE_TYPE);
+                       goto invalid_resource;
                }
 
                /*
@@ -392,15 +566,17 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
                    ((resource_type & ACPI_RESOURCE_NAME_SMALL_MASK) >> 3);
        }
 
-       /* Check validity of the resource type, zero indicates name is invalid */
-
+       /*
+        * Check validity of the resource type, via acpi_gbl_resource_types. Zero
+        * indicates an invalid resource.
+        */
        if (!acpi_gbl_resource_types[resource_index]) {
-               return (AE_AML_INVALID_RESOURCE_TYPE);
+               goto invalid_resource;
        }
 
        /*
-        * 2) Validate the resource_length field. This ensures that the length
-        *    is at least reasonable, and guarantees that it is non-zero.
+        * Validate the resource_length field. This ensures that the length
+        * is at least reasonable, and guarantees that it is non-zero.
         */
        resource_length = acpi_ut_get_resource_length(aml);
        minimum_resource_length = acpi_gbl_resource_aml_sizes[resource_index];
@@ -413,7 +589,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
                /* Fixed length resource, length must match exactly */
 
                if (resource_length != minimum_resource_length) {
-                       return (AE_AML_BAD_RESOURCE_LENGTH);
+                       goto bad_resource_length;
                }
                break;
 
@@ -422,7 +598,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
                /* Variable length resource, length must be at least the minimum */
 
                if (resource_length < minimum_resource_length) {
-                       return (AE_AML_BAD_RESOURCE_LENGTH);
+                       goto bad_resource_length;
                }
                break;
 
@@ -432,7 +608,7 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
 
                if ((resource_length > minimum_resource_length) ||
                    (resource_length < (minimum_resource_length - 1))) {
-                       return (AE_AML_BAD_RESOURCE_LENGTH);
+                       goto bad_resource_length;
                }
                break;
 
@@ -440,7 +616,23 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
 
                /* Shouldn't happen (because of validation earlier), but be sure */
 
-               return (AE_AML_INVALID_RESOURCE_TYPE);
+               goto invalid_resource;
+       }
+
+       aml_resource = ACPI_CAST_PTR(union aml_resource, aml);
+       if (resource_type == ACPI_RESOURCE_NAME_SERIAL_BUS) {
+
+               /* Validate the bus_type field */
+
+               if ((aml_resource->common_serial_bus.type == 0) ||
+                   (aml_resource->common_serial_bus.type >
+                    AML_RESOURCE_MAX_SERIALBUSTYPE)) {
+                       ACPI_RESOURCE_ERROR((AE_INFO,
+                                            "Invalid/unsupported SerialBus resource descriptor: BusType 0x%2.2X",
+                                            aml_resource->common_serial_bus.
+                                            type));
+                       return (AE_AML_INVALID_RESOURCE_TYPE);
+               }
        }
 
        /* Optionally return the resource table index */
@@ -450,6 +642,22 @@ acpi_status acpi_ut_validate_resource(void *aml, u8 * return_index)
        }
 
        return (AE_OK);
+
+      invalid_resource:
+
+       ACPI_RESOURCE_ERROR((AE_INFO,
+                            "Invalid/unsupported resource descriptor: Type 0x%2.2X",
+                            resource_type));
+       return (AE_AML_INVALID_RESOURCE_TYPE);
+
+      bad_resource_length:
+
+       ACPI_RESOURCE_ERROR((AE_INFO,
+                            "Invalid resource descriptor length: Type "
+                            "0x%2.2X, Length 0x%4.4X, MinLength 0x%4.4X",
+                            resource_type, resource_length,
+                            minimum_resource_length));
+       return (AE_AML_BAD_RESOURCE_LENGTH);
 }
 
 /*******************************************************************************
index 30c21e1a9360c79adad9516dbd12787f9e892bd8..4267477c2797b2558becff9b23683d24a3cfadd1 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
index 420ebfe08c7218b091e1050217524547b15284aa..644e8c8ebc4b30c6ea3ca607708e3478c806c4bc 100644 (file)
@@ -5,7 +5,7 @@
  *****************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -48,6 +48,7 @@
 #include "acnamesp.h"
 #include "acdebug.h"
 #include "actables.h"
+#include "acinterp.h"
 
 #define _COMPONENT          ACPI_UTILITIES
 ACPI_MODULE_NAME("utxface")
@@ -640,4 +641,41 @@ acpi_status acpi_install_interface_handler(acpi_interface_handler handler)
 }
 
 ACPI_EXPORT_SYMBOL(acpi_install_interface_handler)
+
+/*****************************************************************************
+ *
+ * FUNCTION:    acpi_check_address_range
+ *
+ * PARAMETERS:  space_id            - Address space ID
+ *              Address             - Start address
+ *              Length              - Length
+ *              Warn                - TRUE if warning on overlap desired
+ *
+ * RETURN:      Count of the number of conflicts detected.
+ *
+ * DESCRIPTION: Check if the input address range overlaps any of the
+ *              ASL operation region address ranges.
+ *
+ ****************************************************************************/
+u32
+acpi_check_address_range(acpi_adr_space_type space_id,
+                        acpi_physical_address address,
+                        acpi_size length, u8 warn)
+{
+       u32 overlaps;
+       acpi_status status;
+
+       status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE);
+       if (ACPI_FAILURE(status)) {
+               return (0);
+       }
+
+       overlaps = acpi_ut_check_address_range(space_id, address,
+                                              (u32)length, warn);
+
+       (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE);
+       return (overlaps);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_check_address_range)
 #endif                         /* !ACPI_ASL_COMPILER */
index 8d0245ec431525bbd62a030d82e1eea2bfcdb9bd..52b568af18199570446431a81638814b3cc6c99a 100644 (file)
@@ -5,7 +5,7 @@
  ******************************************************************************/
 
 /*
- * Copyright (C) 2000 - 2011, Intel Corp.
+ * Copyright (C) 2000 - 2012, Intel Corp.
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
diff --git a/drivers/acpi/acpica/utxfmutex.c b/drivers/acpi/acpica/utxfmutex.c
new file mode 100644 (file)
index 0000000..1427d19
--- /dev/null
@@ -0,0 +1,187 @@
+/*******************************************************************************
+ *
+ * Module Name: utxfmutex - external AML mutex access functions
+ *
+ ******************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2012, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#include <acpi/acpi.h>
+#include "accommon.h"
+#include "acnamesp.h"
+
+#define _COMPONENT          ACPI_UTILITIES
+ACPI_MODULE_NAME("utxfmutex")
+
+/* Local prototypes */
+static acpi_status
+acpi_ut_get_mutex_object(acpi_handle handle,
+                        acpi_string pathname,
+                        union acpi_operand_object **ret_obj);
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_ut_get_mutex_object
+ *
+ * PARAMETERS:  Handle              - Mutex or prefix handle (optional)
+ *              Pathname            - Mutex pathname (optional)
+ *              ret_obj             - Where the mutex object is returned
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Get an AML mutex object. The mutex node is pointed to by
+ *              Handle:Pathname. Either Handle or Pathname can be NULL, but
+ *              not both.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_ut_get_mutex_object(acpi_handle handle,
+                        acpi_string pathname,
+                        union acpi_operand_object **ret_obj)
+{
+       struct acpi_namespace_node *mutex_node;
+       union acpi_operand_object *mutex_obj;
+       acpi_status status;
+
+       /* Parameter validation */
+
+       if (!ret_obj || (!handle && !pathname)) {
+               return (AE_BAD_PARAMETER);
+       }
+
+       /* Get a the namespace node for the mutex */
+
+       mutex_node = handle;
+       if (pathname != NULL) {
+               status = acpi_get_handle(handle, pathname,
+                                        ACPI_CAST_PTR(acpi_handle,
+                                                      &mutex_node));
+               if (ACPI_FAILURE(status)) {
+                       return (status);
+               }
+       }
+
+       /* Ensure that we actually have a Mutex object */
+
+       if (!mutex_node || (mutex_node->type != ACPI_TYPE_MUTEX)) {
+               return (AE_TYPE);
+       }
+
+       /* Get the low-level mutex object */
+
+       mutex_obj = acpi_ns_get_attached_object(mutex_node);
+       if (!mutex_obj) {
+               return (AE_NULL_OBJECT);
+       }
+
+       *ret_obj = mutex_obj;
+       return (AE_OK);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_acquire_mutex
+ *
+ * PARAMETERS:  Handle              - Mutex or prefix handle (optional)
+ *              Pathname            - Mutex pathname (optional)
+ *              Timeout             - Max time to wait for the lock (millisec)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Acquire an AML mutex. This is a device driver interface to
+ *              AML mutex objects, and allows for transaction locking between
+ *              drivers and AML code. The mutex node is pointed to by
+ *              Handle:Pathname. Either Handle or Pathname can be NULL, but
+ *              not both.
+ *
+ ******************************************************************************/
+
+acpi_status
+acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout)
+{
+       acpi_status status;
+       union acpi_operand_object *mutex_obj;
+
+       /* Get the low-level mutex associated with Handle:Pathname */
+
+       status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
+       if (ACPI_FAILURE(status)) {
+               return (status);
+       }
+
+       /* Acquire the OS mutex */
+
+       status = acpi_os_acquire_mutex(mutex_obj->mutex.os_mutex, timeout);
+       return (status);
+}
+
+/*******************************************************************************
+ *
+ * FUNCTION:    acpi_release_mutex
+ *
+ * PARAMETERS:  Handle              - Mutex or prefix handle (optional)
+ *              Pathname            - Mutex pathname (optional)
+ *
+ * RETURN:      Status
+ *
+ * DESCRIPTION: Release an AML mutex. This is a device driver interface to
+ *              AML mutex objects, and allows for transaction locking between
+ *              drivers and AML code. The mutex node is pointed to by
+ *              Handle:Pathname. Either Handle or Pathname can be NULL, but
+ *              not both.
+ *
+ ******************************************************************************/
+
+acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname)
+{
+       acpi_status status;
+       union acpi_operand_object *mutex_obj;
+
+       /* Get the low-level mutex associated with Handle:Pathname */
+
+       status = acpi_ut_get_mutex_object(handle, pathname, &mutex_obj);
+       if (ACPI_FAILURE(status)) {
+               return (status);
+       }
+
+       /* Release the OS mutex */
+
+       acpi_os_release_mutex(mutex_obj->mutex.os_mutex);
+       return (AE_OK);
+}
index 61540360d5ce815f67e67250874ad357f3e328e6..e5d53b7ddc7e0eb024f7cdc6863619bd2c3dc0a2 100644 (file)
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
+#include <linux/acpi_io.h>
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/kref.h>
 #include <linux/rculist.h>
 #include <linux/interrupt.h>
 #include <linux/debugfs.h>
-#include <acpi/atomicio.h>
 
 #include "apei-internal.h"
 
@@ -70,7 +70,7 @@ int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val)
 {
        int rc;
 
-       rc = acpi_atomic_read(val, &entry->register_region);
+       rc = apei_read(val, &entry->register_region);
        if (rc)
                return rc;
        *val >>= entry->register_region.bit_offset;
@@ -116,13 +116,13 @@ int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val)
        val <<= entry->register_region.bit_offset;
        if (entry->flags & APEI_EXEC_PRESERVE_REGISTER) {
                u64 valr = 0;
-               rc = acpi_atomic_read(&valr, &entry->register_region);
+               rc = apei_read(&valr, &entry->register_region);
                if (rc)
                        return rc;
                valr &= ~(entry->mask << entry->register_region.bit_offset);
                val |= valr;
        }
-       rc = acpi_atomic_write(val, &entry->register_region);
+       rc = apei_write(val, &entry->register_region);
 
        return rc;
 }
@@ -243,7 +243,7 @@ static int pre_map_gar_callback(struct apei_exec_context *ctx,
        u8 ins = entry->instruction;
 
        if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-               return acpi_pre_map_gar(&entry->register_region);
+               return acpi_os_map_generic_address(&entry->register_region);
 
        return 0;
 }
@@ -276,7 +276,7 @@ static int post_unmap_gar_callback(struct apei_exec_context *ctx,
        u8 ins = entry->instruction;
 
        if (ctx->ins_table[ins].flags & APEI_EXEC_INS_ACCESS_REGISTER)
-               acpi_post_unmap_gar(&entry->register_region);
+               acpi_os_unmap_generic_address(&entry->register_region);
 
        return 0;
 }
@@ -421,6 +421,17 @@ static int apei_resources_merge(struct apei_resources *resources1,
        return 0;
 }
 
+int apei_resources_add(struct apei_resources *resources,
+                      unsigned long start, unsigned long size,
+                      bool iomem)
+{
+       if (iomem)
+               return apei_res_add(&resources->iomem, start, size);
+       else
+               return apei_res_add(&resources->ioport, start, size);
+}
+EXPORT_SYMBOL_GPL(apei_resources_add);
+
 /*
  * EINJ has two groups of GARs (EINJ table entry and trigger table
  * entry), so common resources are subtracted from the trigger table
@@ -438,8 +449,19 @@ int apei_resources_sub(struct apei_resources *resources1,
 }
 EXPORT_SYMBOL_GPL(apei_resources_sub);
 
+static int apei_get_nvs_callback(__u64 start, __u64 size, void *data)
+{
+       struct apei_resources *resources = data;
+       return apei_res_add(&resources->iomem, start, size);
+}
+
+static int apei_get_nvs_resources(struct apei_resources *resources)
+{
+       return acpi_nvs_for_each_region(apei_get_nvs_callback, resources);
+}
+
 /*
- * IO memory/port rersource management mechanism is used to check
+ * IO memory/port resource management mechanism is used to check
  * whether memory/port area used by GARs conflicts with normal memory
  * or IO memory/port of devices.
  */
@@ -448,21 +470,35 @@ int apei_resources_request(struct apei_resources *resources,
 {
        struct apei_res *res, *res_bak = NULL;
        struct resource *r;
+       struct apei_resources nvs_resources;
        int rc;
 
        rc = apei_resources_sub(resources, &apei_resources_all);
        if (rc)
                return rc;
 
+       /*
+        * Some firmware uses ACPI NVS region, that has been marked as
+        * busy, so exclude it from APEI resources to avoid false
+        * conflict.
+        */
+       apei_resources_init(&nvs_resources);
+       rc = apei_get_nvs_resources(&nvs_resources);
+       if (rc)
+               goto res_fini;
+       rc = apei_resources_sub(resources, &nvs_resources);
+       if (rc)
+               goto res_fini;
+
        rc = -EINVAL;
        list_for_each_entry(res, &resources->iomem, list) {
                r = request_mem_region(res->start, res->end - res->start,
                                       desc);
                if (!r) {
                        pr_err(APEI_PFX
-               "Can not request iomem region <%016llx-%016llx> for GARs.\n",
+               "Can not request [mem %#010llx-%#010llx] for %s registers\n",
                               (unsigned long long)res->start,
-                              (unsigned long long)res->end);
+                              (unsigned long long)res->end - 1, desc);
                        res_bak = res;
                        goto err_unmap_iomem;
                }
@@ -472,9 +508,9 @@ int apei_resources_request(struct apei_resources *resources,
                r = request_region(res->start, res->end - res->start, desc);
                if (!r) {
                        pr_err(APEI_PFX
-               "Can not request ioport region <%016llx-%016llx> for GARs.\n",
+               "Can not request [io  %#06llx-%#06llx] for %s registers\n",
                               (unsigned long long)res->start,
-                              (unsigned long long)res->end);
+                              (unsigned long long)res->end - 1, desc);
                        res_bak = res;
                        goto err_unmap_ioport;
                }
@@ -500,6 +536,8 @@ err_unmap_iomem:
                        break;
                release_mem_region(res->start, res->end - res->start);
        }
+res_fini:
+       apei_resources_fini(&nvs_resources);
        return rc;
 }
 EXPORT_SYMBOL_GPL(apei_resources_request);
@@ -553,6 +591,69 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr)
        return 0;
 }
 
+/* read GAR in interrupt (including NMI) or process context */
+int apei_read(u64 *val, struct acpi_generic_address *reg)
+{
+       int rc;
+       u64 address;
+       acpi_status status;
+
+       rc = apei_check_gar(reg, &address);
+       if (rc)
+               return rc;
+
+       *val = 0;
+       switch(reg->space_id) {
+       case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+               status = acpi_os_read_memory64((acpi_physical_address)
+                                            address, val, reg->bit_width);
+               if (ACPI_FAILURE(status))
+                       return -EIO;
+               break;
+       case ACPI_ADR_SPACE_SYSTEM_IO:
+               status = acpi_os_read_port(address, (u32 *)val, reg->bit_width);
+               if (ACPI_FAILURE(status))
+                       return -EIO;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(apei_read);
+
+/* write GAR in interrupt (including NMI) or process context */
+int apei_write(u64 val, struct acpi_generic_address *reg)
+{
+       int rc;
+       u64 address;
+       acpi_status status;
+
+       rc = apei_check_gar(reg, &address);
+       if (rc)
+               return rc;
+
+       switch (reg->space_id) {
+       case ACPI_ADR_SPACE_SYSTEM_MEMORY:
+               status = acpi_os_write_memory64((acpi_physical_address)
+                                             address, val, reg->bit_width);
+               if (ACPI_FAILURE(status))
+                       return -EIO;
+               break;
+       case ACPI_ADR_SPACE_SYSTEM_IO:
+               status = acpi_os_write_port(address, val, reg->bit_width);
+               if (ACPI_FAILURE(status))
+                       return -EIO;
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(apei_write);
+
 static int collect_res_callback(struct apei_exec_context *ctx,
                                struct acpi_whea_header *entry,
                                void *data)
index f57050e7a5e756ceb1f8885ce01030d0ac593ea4..cca240a33038fe148ca5e9324214ba988c62c6c2 100644 (file)
@@ -68,6 +68,9 @@ static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 actio
 /* IP has been set in instruction function */
 #define APEI_EXEC_SET_IP       1
 
+int apei_read(u64 *val, struct acpi_generic_address *reg);
+int apei_write(u64 val, struct acpi_generic_address *reg);
+
 int __apei_exec_read_register(struct acpi_whea_header *entry, u64 *val);
 int __apei_exec_write_register(struct acpi_whea_header *entry, u64 val);
 int apei_exec_read_register(struct apei_exec_context *ctx,
@@ -95,6 +98,9 @@ static inline void apei_resources_init(struct apei_resources *resources)
 }
 
 void apei_resources_fini(struct apei_resources *resources);
+int apei_resources_add(struct apei_resources *resources,
+                      unsigned long start, unsigned long size,
+                      bool iomem);
 int apei_resources_sub(struct apei_resources *resources1,
                       struct apei_resources *resources2);
 int apei_resources_request(struct apei_resources *resources,
index 589b96c38704db0bf803842aaf3c6b22b890cd06..4ca087dd5f4fceb797e1272912b06f082af7549d 100644 (file)
 /* Firmware should respond within 1 milliseconds */
 #define FIRMWARE_TIMEOUT       (1 * NSEC_PER_MSEC)
 
+/*
+ * ACPI version 5 provides a SET_ERROR_TYPE_WITH_ADDRESS action.
+ */
+static int acpi5;
+
+struct set_error_type_with_address {
+       u32     type;
+       u32     vendor_extension;
+       u32     flags;
+       u32     apicid;
+       u64     memory_address;
+       u64     memory_address_range;
+       u32     pcie_sbdf;
+};
+enum {
+       SETWA_FLAGS_APICID = 1,
+       SETWA_FLAGS_MEM = 2,
+       SETWA_FLAGS_PCIE_SBDF = 4,
+};
+
+/*
+ * Vendor extensions for platform specific operations
+ */
+struct vendor_error_type_extension {
+       u32     length;
+       u32     pcie_sbdf;
+       u16     vendor_id;
+       u16     device_id;
+       u8      rev_id;
+       u8      reserved[3];
+};
+
+static u32 vendor_flags;
+static struct debugfs_blob_wrapper vendor_blob;
+static char vendor_dev[64];
+
 /*
  * Some BIOSes allow parameters to the SET_ERROR_TYPE entries in the
  * EINJ table through an unpublished extension. Use with caution as
@@ -103,15 +139,7 @@ static struct apei_exec_ins_type einj_ins_type[] = {
  */
 static DEFINE_MUTEX(einj_mutex);
 
-static struct einj_parameter *einj_param;
-
-#ifndef writeq
-static inline void writeq(__u64 val, volatile void __iomem *addr)
-{
-       writel(val, addr);
-       writel(val >> 32, addr+4);
-}
-#endif
+static void *einj_param;
 
 static void einj_exec_ctx_init(struct apei_exec_context *ctx)
 {
@@ -158,10 +186,30 @@ static int einj_timedout(u64 *t)
        return 0;
 }
 
-static u64 einj_get_parameter_address(void)
+static void check_vendor_extension(u64 paddr,
+                                  struct set_error_type_with_address *v5param)
+{
+       int     offset = v5param->vendor_extension;
+       struct  vendor_error_type_extension *v;
+       u32     sbdf;
+
+       if (!offset)
+               return;
+       v = acpi_os_map_memory(paddr + offset, sizeof(*v));
+       if (!v)
+               return;
+       sbdf = v->pcie_sbdf;
+       sprintf(vendor_dev, "%x:%x:%x.%x vendor_id=%x device_id=%x rev_id=%x\n",
+               sbdf >> 24, (sbdf >> 16) & 0xff,
+               (sbdf >> 11) & 0x1f, (sbdf >> 8) & 0x7,
+                v->vendor_id, v->device_id, v->rev_id);
+       acpi_os_unmap_memory(v, sizeof(*v));
+}
+
+static void *einj_get_parameter_address(void)
 {
        int i;
-       u64 paddr = 0;
+       u64 paddrv4 = 0, paddrv5 = 0;
        struct acpi_whea_header *entry;
 
        entry = EINJ_TAB_ENTRY(einj_tab);
@@ -170,12 +218,40 @@ static u64 einj_get_parameter_address(void)
                    entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
                    entry->register_region.space_id ==
                    ACPI_ADR_SPACE_SYSTEM_MEMORY)
-                       memcpy(&paddr, &entry->register_region.address,
-                              sizeof(paddr));
+                       memcpy(&paddrv4, &entry->register_region.address,
+                              sizeof(paddrv4));
+               if (entry->action == ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS &&
+                   entry->instruction == ACPI_EINJ_WRITE_REGISTER &&
+                   entry->register_region.space_id ==
+                   ACPI_ADR_SPACE_SYSTEM_MEMORY)
+                       memcpy(&paddrv5, &entry->register_region.address,
+                              sizeof(paddrv5));
                entry++;
        }
+       if (paddrv5) {
+               struct set_error_type_with_address *v5param;
+
+               v5param = acpi_os_map_memory(paddrv5, sizeof(*v5param));
+               if (v5param) {
+                       acpi5 = 1;
+                       check_vendor_extension(paddrv5, v5param);
+                       return v5param;
+               }
+       }
+       if (paddrv4) {
+               struct einj_parameter *v4param;
+
+               v4param = acpi_os_map_memory(paddrv4, sizeof(*v4param));
+               if (!v4param)
+                       return NULL;
+               if (v4param->reserved1 || v4param->reserved2) {
+                       acpi_os_unmap_memory(v4param, sizeof(*v4param));
+                       return NULL;
+               }
+               return v4param;
+       }
 
-       return paddr;
+       return NULL;
 }
 
 /* do sanity check to trigger table */
@@ -184,7 +260,7 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
        if (trigger_tab->header_size != sizeof(struct acpi_einj_trigger))
                return -EINVAL;
        if (trigger_tab->table_size > PAGE_SIZE ||
-           trigger_tab->table_size <= trigger_tab->header_size)
+           trigger_tab->table_size < trigger_tab->header_size)
                return -EINVAL;
        if (trigger_tab->entry_count !=
            (trigger_tab->table_size - trigger_tab->header_size) /
@@ -194,8 +270,29 @@ static int einj_check_trigger_header(struct acpi_einj_trigger *trigger_tab)
        return 0;
 }
 
+static struct acpi_generic_address *einj_get_trigger_parameter_region(
+       struct acpi_einj_trigger *trigger_tab, u64 param1, u64 param2)
+{
+       int i;
+       struct acpi_whea_header *entry;
+
+       entry = (struct acpi_whea_header *)
+               ((char *)trigger_tab + sizeof(struct acpi_einj_trigger));
+       for (i = 0; i < trigger_tab->entry_count; i++) {
+               if (entry->action == ACPI_EINJ_TRIGGER_ERROR &&
+               entry->instruction == ACPI_EINJ_WRITE_REGISTER_VALUE &&
+               entry->register_region.space_id ==
+                       ACPI_ADR_SPACE_SYSTEM_MEMORY &&
+               (entry->register_region.address & param2) == (param1 & param2))
+                       return &entry->register_region;
+               entry++;
+       }
+
+       return NULL;
+}
 /* Execute instructions in trigger error action table */
-static int __einj_error_trigger(u64 trigger_paddr)
+static int __einj_error_trigger(u64 trigger_paddr, u32 type,
+                               u64 param1, u64 param2)
 {
        struct acpi_einj_trigger *trigger_tab = NULL;
        struct apei_exec_context trigger_ctx;
@@ -204,14 +301,16 @@ static int __einj_error_trigger(u64 trigger_paddr)
        struct resource *r;
        u32 table_size;
        int rc = -EIO;
+       struct acpi_generic_address *trigger_param_region = NULL;
 
        r = request_mem_region(trigger_paddr, sizeof(*trigger_tab),
                               "APEI EINJ Trigger Table");
        if (!r) {
                pr_err(EINJ_PFX
-       "Can not request iomem region <%016llx-%016llx> for Trigger table.\n",
+       "Can not request [mem %#010llx-%#010llx] for Trigger table\n",
                       (unsigned long long)trigger_paddr,
-                      (unsigned long long)trigger_paddr+sizeof(*trigger_tab));
+                      (unsigned long long)trigger_paddr +
+                           sizeof(*trigger_tab) - 1);
                goto out;
        }
        trigger_tab = ioremap_cache(trigger_paddr, sizeof(*trigger_tab));
@@ -225,6 +324,11 @@ static int __einj_error_trigger(u64 trigger_paddr)
                           "The trigger error action table is invalid\n");
                goto out_rel_header;
        }
+
+       /* No action structures in the TRIGGER_ERROR table, nothing to do */
+       if (!trigger_tab->entry_count)
+               goto out_rel_header;
+
        rc = -EIO;
        table_size = trigger_tab->table_size;
        r = request_mem_region(trigger_paddr + sizeof(*trigger_tab),
@@ -232,9 +336,9 @@ static int __einj_error_trigger(u64 trigger_paddr)
                               "APEI EINJ Trigger Table");
        if (!r) {
                pr_err(EINJ_PFX
-"Can not request iomem region <%016llx-%016llx> for Trigger Table Entry.\n",
-                      (unsigned long long)trigger_paddr+sizeof(*trigger_tab),
-                      (unsigned long long)trigger_paddr + table_size);
+"Can not request [mem %#010llx-%#010llx] for Trigger Table Entry\n",
+                      (unsigned long long)trigger_paddr + sizeof(*trigger_tab),
+                      (unsigned long long)trigger_paddr + table_size - 1);
                goto out_rel_header;
        }
        iounmap(trigger_tab);
@@ -255,6 +359,30 @@ static int __einj_error_trigger(u64 trigger_paddr)
        rc = apei_resources_sub(&trigger_resources, &einj_resources);
        if (rc)
                goto out_fini;
+       /*
+        * Some firmware will access target address specified in
+        * param1 to trigger the error when injecting memory error.
+        * This will cause resource conflict with regular memory.  So
+        * remove it from trigger table resources.
+        */
+       if (param_extension && (type & 0x0038) && param2) {
+               struct apei_resources addr_resources;
+               apei_resources_init(&addr_resources);
+               trigger_param_region = einj_get_trigger_parameter_region(
+                       trigger_tab, param1, param2);
+               if (trigger_param_region) {
+                       rc = apei_resources_add(&addr_resources,
+                               trigger_param_region->address,
+                               trigger_param_region->bit_width/8, true);
+                       if (rc)
+                               goto out_fini;
+                       rc = apei_resources_sub(&trigger_resources,
+                                       &addr_resources);
+               }
+               apei_resources_fini(&addr_resources);
+               if (rc)
+                       goto out_fini;
+       }
        rc = apei_resources_request(&trigger_resources, "APEI EINJ Trigger");
        if (rc)
                goto out_fini;
@@ -293,12 +421,56 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
        if (rc)
                return rc;
        apei_exec_ctx_set_input(&ctx, type);
-       rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
-       if (rc)
-               return rc;
-       if (einj_param) {
-               writeq(param1, &einj_param->param1);
-               writeq(param2, &einj_param->param2);
+       if (acpi5) {
+               struct set_error_type_with_address *v5param = einj_param;
+
+               v5param->type = type;
+               if (type & 0x80000000) {
+                       switch (vendor_flags) {
+                       case SETWA_FLAGS_APICID:
+                               v5param->apicid = param1;
+                               break;
+                       case SETWA_FLAGS_MEM:
+                               v5param->memory_address = param1;
+                               v5param->memory_address_range = param2;
+                               break;
+                       case SETWA_FLAGS_PCIE_SBDF:
+                               v5param->pcie_sbdf = param1;
+                               break;
+                       }
+                       v5param->flags = vendor_flags;
+               } else {
+                       switch (type) {
+                       case ACPI_EINJ_PROCESSOR_CORRECTABLE:
+                       case ACPI_EINJ_PROCESSOR_UNCORRECTABLE:
+                       case ACPI_EINJ_PROCESSOR_FATAL:
+                               v5param->apicid = param1;
+                               v5param->flags = SETWA_FLAGS_APICID;
+                               break;
+                       case ACPI_EINJ_MEMORY_CORRECTABLE:
+                       case ACPI_EINJ_MEMORY_UNCORRECTABLE:
+                       case ACPI_EINJ_MEMORY_FATAL:
+                               v5param->memory_address = param1;
+                               v5param->memory_address_range = param2;
+                               v5param->flags = SETWA_FLAGS_MEM;
+                               break;
+                       case ACPI_EINJ_PCIX_CORRECTABLE:
+                       case ACPI_EINJ_PCIX_UNCORRECTABLE:
+                       case ACPI_EINJ_PCIX_FATAL:
+                               v5param->pcie_sbdf = param1;
+                               v5param->flags = SETWA_FLAGS_PCIE_SBDF;
+                               break;
+                       }
+               }
+       } else {
+               rc = apei_exec_run(&ctx, ACPI_EINJ_SET_ERROR_TYPE);
+               if (rc)
+                       return rc;
+               if (einj_param) {
+                       struct einj_parameter *v4param = einj_param;
+                       v4param->param1 = param1;
+                       v4param->param2 = param2;
+               }
        }
        rc = apei_exec_run(&ctx, ACPI_EINJ_EXECUTE_OPERATION);
        if (rc)
@@ -324,7 +496,7 @@ static int __einj_error_inject(u32 type, u64 param1, u64 param2)
        if (rc)
                return rc;
        trigger_paddr = apei_exec_ctx_get_output(&ctx);
-       rc = __einj_error_trigger(trigger_paddr);
+       rc = __einj_error_trigger(trigger_paddr, type, param1, param2);
        if (rc)
                return rc;
        rc = apei_exec_run_optional(&ctx, ACPI_EINJ_END_OPERATION);
@@ -408,15 +580,25 @@ static int error_type_set(void *data, u64 val)
 {
        int rc;
        u32 available_error_type = 0;
+       u32 tval, vendor;
+
+       /*
+        * Vendor defined types have 0x80000000 bit set, and
+        * are not enumerated by ACPI_EINJ_GET_ERROR_TYPE
+        */
+       vendor = val & 0x80000000;
+       tval = val & 0x7fffffff;
 
        /* Only one error type can be specified */
-       if (val & (val - 1))
-               return -EINVAL;
-       rc = einj_get_available_error_type(&available_error_type);
-       if (rc)
-               return rc;
-       if (!(val & available_error_type))
+       if (tval & (tval - 1))
                return -EINVAL;
+       if (!vendor) {
+               rc = einj_get_available_error_type(&available_error_type);
+               if (rc)
+                       return rc;
+               if (!(val & available_error_type))
+                       return -EINVAL;
+       }
        error_type = val;
 
        return 0;
@@ -455,7 +637,6 @@ static int einj_check_table(struct acpi_table_einj *einj_tab)
 static int __init einj_init(void)
 {
        int rc;
-       u64 param_paddr;
        acpi_status status;
        struct dentry *fentry;
        struct apei_exec_context ctx;
@@ -465,10 +646,9 @@ static int __init einj_init(void)
 
        status = acpi_get_table(ACPI_SIG_EINJ, 0,
                                (struct acpi_table_header **)&einj_tab);
-       if (status == AE_NOT_FOUND) {
-               pr_info(EINJ_PFX "Table is not found!\n");
+       if (status == AE_NOT_FOUND)
                return -ENODEV;
-       else if (ACPI_FAILURE(status)) {
+       else if (ACPI_FAILURE(status)) {
                const char *msg = acpi_format_exception(status);
                pr_err(EINJ_PFX "Failed to get table, %s\n", msg);
                return -EINVAL;
@@ -509,23 +689,30 @@ static int __init einj_init(void)
        rc = apei_exec_pre_map_gars(&ctx);
        if (rc)
                goto err_release;
-       if (param_extension) {
-               param_paddr = einj_get_parameter_address();
-               if (param_paddr) {
-                       einj_param = ioremap(param_paddr, sizeof(*einj_param));
-                       rc = -ENOMEM;
-                       if (!einj_param)
-                               goto err_unmap;
-                       fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
-                                                   einj_debug_dir, &error_param1);
-                       if (!fentry)
-                               goto err_unmap;
-                       fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
-                                                   einj_debug_dir, &error_param2);
-                       if (!fentry)
-                               goto err_unmap;
-               } else
-                       pr_warn(EINJ_PFX "Parameter extension is not supported.\n");
+
+       einj_param = einj_get_parameter_address();
+       if ((param_extension || acpi5) && einj_param) {
+               fentry = debugfs_create_x64("param1", S_IRUSR | S_IWUSR,
+                                           einj_debug_dir, &error_param1);
+               if (!fentry)
+                       goto err_unmap;
+               fentry = debugfs_create_x64("param2", S_IRUSR | S_IWUSR,
+                                           einj_debug_dir, &error_param2);
+               if (!fentry)
+                       goto err_unmap;
+       }
+
+       if (vendor_dev[0]) {
+               vendor_blob.data = vendor_dev;
+               vendor_blob.size = strlen(vendor_dev);
+               fentry = debugfs_create_blob("vendor", S_IRUSR,
+                                            einj_debug_dir, &vendor_blob);
+               if (!fentry)
+                       goto err_unmap;
+               fentry = debugfs_create_x32("vendor_flags", S_IRUSR | S_IWUSR,
+                                           einj_debug_dir, &vendor_flags);
+               if (!fentry)
+                       goto err_unmap;
        }
 
        pr_info(EINJ_PFX "Error INJection is initialized.\n");
@@ -533,8 +720,13 @@ static int __init einj_init(void)
        return 0;
 
 err_unmap:
-       if (einj_param)
-               iounmap(einj_param);
+       if (einj_param) {
+               acpi_size size = (acpi5) ?
+                       sizeof(struct set_error_type_with_address) :
+                       sizeof(struct einj_parameter);
+
+               acpi_os_unmap_memory(einj_param, size);
+       }
        apei_exec_post_unmap_gars(&ctx);
 err_release:
        apei_resources_release(&einj_resources);
@@ -550,8 +742,13 @@ static void __exit einj_exit(void)
 {
        struct apei_exec_context ctx;
 
-       if (einj_param)
-               iounmap(einj_param);
+       if (einj_param) {
+               acpi_size size = (acpi5) ?
+                       sizeof(struct set_error_type_with_address) :
+                       sizeof(struct einj_parameter);
+
+               acpi_os_unmap_memory(einj_param, size);
+       }
        einj_exec_ctx_init(&ctx);
        apei_exec_post_unmap_gars(&ctx);
        apei_resources_release(&einj_resources);
index 6a9e3bad13f418d93572faff72d18a4df124f8e8..eb9fab5b96e4d3dca071e11e8dbdec2405b23508 100644 (file)
@@ -1127,10 +1127,9 @@ static int __init erst_init(void)
 
        status = acpi_get_table(ACPI_SIG_ERST, 0,
                                (struct acpi_table_header **)&erst_tab);
-       if (status == AE_NOT_FOUND) {
-               pr_info(ERST_PFX "Table is not found!\n");
+       if (status == AE_NOT_FOUND)
                goto err;
-       else if (ACPI_FAILURE(status)) {
+       else if (ACPI_FAILURE(status)) {
                const char *msg = acpi_format_exception(status);
                pr_err(ERST_PFX "Failed to get table, %s\n", msg);
                rc = -EINVAL;
index ebaf037a787b5479dcc87100e883a3440b8ef25f..9b3cac0abecc33c672c884ece470dfb494d1836c 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/module.h>
 #include <linux/init.h>
 #include <linux/acpi.h>
+#include <linux/acpi_io.h>
 #include <linux/io.h>
 #include <linux/interrupt.h>
 #include <linux/timer.h>
@@ -45,8 +46,9 @@
 #include <linux/irq_work.h>
 #include <linux/llist.h>
 #include <linux/genalloc.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
 #include <acpi/apei.h>
-#include <acpi/atomicio.h>
 #include <acpi/hed.h>
 #include <asm/mce.h>
 #include <asm/tlbflush.h>
@@ -299,7 +301,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
        if (!ghes)
                return ERR_PTR(-ENOMEM);
        ghes->generic = generic;
-       rc = acpi_pre_map_gar(&generic->error_status_address);
+       rc = acpi_os_map_generic_address(&generic->error_status_address);
        if (rc)
                goto err_free;
        error_block_length = generic->error_block_length;
@@ -319,7 +321,7 @@ static struct ghes *ghes_new(struct acpi_hest_generic *generic)
        return ghes;
 
 err_unmap:
-       acpi_post_unmap_gar(&generic->error_status_address);
+       acpi_os_unmap_generic_address(&generic->error_status_address);
 err_free:
        kfree(ghes);
        return ERR_PTR(rc);
@@ -328,7 +330,7 @@ err_free:
 static void ghes_fini(struct ghes *ghes)
 {
        kfree(ghes->estatus);
-       acpi_post_unmap_gar(&ghes->generic->error_status_address);
+       acpi_os_unmap_generic_address(&ghes->generic->error_status_address);
 }
 
 enum {
@@ -399,7 +401,7 @@ static int ghes_read_estatus(struct ghes *ghes, int silent)
        u32 len;
        int rc;
 
-       rc = acpi_atomic_read(&buf_paddr, &g->error_status_address);
+       rc = apei_read(&buf_paddr, &g->error_status_address);
        if (rc) {
                if (!silent && printk_ratelimit())
                        pr_warning(FW_WARN GHES_PFX
@@ -476,6 +478,27 @@ static void ghes_do_proc(const struct acpi_hest_generic_status *estatus)
                        }
 #endif
                }
+#ifdef CONFIG_ACPI_APEI_PCIEAER
+               else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
+                                     CPER_SEC_PCIE)) {
+                       struct cper_sec_pcie *pcie_err;
+                       pcie_err = (struct cper_sec_pcie *)(gdata+1);
+                       if (sev == GHES_SEV_RECOVERABLE &&
+                           sec_sev == GHES_SEV_RECOVERABLE &&
+                           pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
+                           pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
+                               unsigned int devfn;
+                               int aer_severity;
+                               devfn = PCI_DEVFN(pcie_err->device_id.device,
+                                                 pcie_err->device_id.function);
+                               aer_severity = cper_severity_to_aer(sev);
+                               aer_recover_queue(pcie_err->device_id.segment,
+                                                 pcie_err->device_id.bus,
+                                                 devfn, aer_severity);
+                       }
+
+               }
+#endif
        }
 }
 
@@ -483,16 +506,22 @@ static void __ghes_print_estatus(const char *pfx,
                                 const struct acpi_hest_generic *generic,
                                 const struct acpi_hest_generic_status *estatus)
 {
+       static atomic_t seqno;
+       unsigned int curr_seqno;
+       char pfx_seq[64];
+
        if (pfx == NULL) {
                if (ghes_severity(estatus->error_severity) <=
                    GHES_SEV_CORRECTED)
-                       pfx = KERN_WARNING HW_ERR;
+                       pfx = KERN_WARNING;
                else
-                       pfx = KERN_ERR HW_ERR;
+                       pfx = KERN_ERR;
        }
+       curr_seqno = atomic_inc_return(&seqno);
+       snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
        printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
-              pfx, generic->header.source_id);
-       apei_estatus_print(pfx, estatus);
+              pfx_seq, generic->header.source_id);
+       apei_estatus_print(pfx_seq, estatus);
 }
 
 static int ghes_print_estatus(const char *pfx,
@@ -711,26 +740,34 @@ static int ghes_notify_sci(struct notifier_block *this,
        return ret;
 }
 
+static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
+{
+       struct llist_node *next, *tail = NULL;
+
+       while (llnode) {
+               next = llnode->next;
+               llnode->next = tail;
+               tail = llnode;
+               llnode = next;
+       }
+
+       return tail;
+}
+
 static void ghes_proc_in_irq(struct irq_work *irq_work)
 {
-       struct llist_node *llnode, *next, *tail = NULL;
+       struct llist_node *llnode, *next;
        struct ghes_estatus_node *estatus_node;
        struct acpi_hest_generic *generic;
        struct acpi_hest_generic_status *estatus;
        u32 len, node_len;
 
+       llnode = llist_del_all(&ghes_estatus_llist);
        /*
         * Because the time order of estatus in list is reversed,
         * revert it back to proper order.
         */
-       llnode = llist_del_all(&ghes_estatus_llist);
-       while (llnode) {
-               next = llnode->next;
-               llnode->next = tail;
-               tail = llnode;
-               llnode = next;
-       }
-       llnode = tail;
+       llnode = llist_nodes_reverse(llnode);
        while (llnode) {
                next = llnode->next;
                estatus_node = llist_entry(llnode, struct ghes_estatus_node,
@@ -750,6 +787,32 @@ static void ghes_proc_in_irq(struct irq_work *irq_work)
        }
 }
 
+static void ghes_print_queued_estatus(void)
+{
+       struct llist_node *llnode;
+       struct ghes_estatus_node *estatus_node;
+       struct acpi_hest_generic *generic;
+       struct acpi_hest_generic_status *estatus;
+       u32 len, node_len;
+
+       llnode = llist_del_all(&ghes_estatus_llist);
+       /*
+        * Because the time order of estatus in list is reversed,
+        * revert it back to proper order.
+        */
+       llnode = llist_nodes_reverse(llnode);
+       while (llnode) {
+               estatus_node = llist_entry(llnode, struct ghes_estatus_node,
+                                          llnode);
+               estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
+               len = apei_estatus_len(estatus);
+               node_len = GHES_ESTATUS_NODE_LEN(len);
+               generic = estatus_node->generic;
+               ghes_print_estatus(NULL, generic, estatus);
+               llnode = llnode->next;
+       }
+}
+
 static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
 {
        struct ghes *ghes, *ghes_global = NULL;
@@ -775,7 +838,8 @@ static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
 
        if (sev_global >= GHES_SEV_PANIC) {
                oops_begin();
-               __ghes_print_estatus(KERN_EMERG HW_ERR, ghes_global->generic,
+               ghes_print_queued_estatus();
+               __ghes_print_estatus(KERN_EMERG, ghes_global->generic,
                                     ghes_global->estatus);
                /* reboot to log the error! */
                if (panic_timeout == 0)
index ee7fddc4665c9788d1502f0f28304493cede903b..7f00cf38098f21bdbe0d6a69ba624d18b00f7cb3 100644 (file)
@@ -221,10 +221,9 @@ void __init acpi_hest_init(void)
 
        status = acpi_get_table(ACPI_SIG_HEST, 0,
                                (struct acpi_table_header **)&hest_tab);
-       if (status == AE_NOT_FOUND) {
-               pr_info(HEST_PFX "Table not found.\n");
+       if (status == AE_NOT_FOUND)
                goto err;
-       else if (ACPI_FAILURE(status)) {
+       else if (ACPI_FAILURE(status)) {
                const char *msg = acpi_format_exception(status);
                pr_err(HEST_PFX "Failed to get table, %s\n", msg);
                rc = -EINVAL;
diff --git a/drivers/acpi/atomicio.c b/drivers/acpi/atomicio.c
deleted file mode 100644 (file)
index cfc0cc1..0000000
+++ /dev/null
@@ -1,365 +0,0 @@
-/*
- * atomicio.c - ACPI IO memory pre-mapping/post-unmapping, then
- * accessing in atomic context.
- *
- * This is used for NMI handler to access IO memory area, because
- * ioremap/iounmap can not be used in NMI handler. The IO memory area
- * is pre-mapped in process context and accessed in NMI handler.
- *
- * Copyright (C) 2009-2010, Intel Corp.
- *     Author: Huang Ying <ying.huang@intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- */
-
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-#include <linux/io.h>
-#include <linux/kref.h>
-#include <linux/rculist.h>
-#include <linux/interrupt.h>
-#include <linux/slab.h>
-#include <acpi/atomicio.h>
-
-#define ACPI_PFX "ACPI: "
-
-static LIST_HEAD(acpi_iomaps);
-/*
- * Used for mutual exclusion between writers of acpi_iomaps list, for
- * synchronization between readers and writer, RCU is used.
- */
-static DEFINE_SPINLOCK(acpi_iomaps_lock);
-
-struct acpi_iomap {
-       struct list_head list;
-       void __iomem *vaddr;
-       unsigned long size;
-       phys_addr_t paddr;
-       struct kref ref;
-};
-
-/* acpi_iomaps_lock or RCU read lock must be held before calling */
-static struct acpi_iomap *__acpi_find_iomap(phys_addr_t paddr,
-                                           unsigned long size)
-{
-       struct acpi_iomap *map;
-
-       list_for_each_entry_rcu(map, &acpi_iomaps, list) {
-               if (map->paddr + map->size >= paddr + size &&
-                   map->paddr <= paddr)
-                       return map;
-       }
-       return NULL;
-}
-
-/*
- * Atomic "ioremap" used by NMI handler, if the specified IO memory
- * area is not pre-mapped, NULL will be returned.
- *
- * acpi_iomaps_lock or RCU read lock must be held before calling
- */
-static void __iomem *__acpi_ioremap_fast(phys_addr_t paddr,
-                                        unsigned long size)
-{
-       struct acpi_iomap *map;
-
-       map = __acpi_find_iomap(paddr, size/8);
-       if (map)
-               return map->vaddr + (paddr - map->paddr);
-       else
-               return NULL;
-}
-
-/* acpi_iomaps_lock must be held before calling */
-static void __iomem *__acpi_try_ioremap(phys_addr_t paddr,
-                                       unsigned long size)
-{
-       struct acpi_iomap *map;
-
-       map = __acpi_find_iomap(paddr, size);
-       if (map) {
-               kref_get(&map->ref);
-               return map->vaddr + (paddr - map->paddr);
-       } else
-               return NULL;
-}
-
-/*
- * Used to pre-map the specified IO memory area. First try to find
- * whether the area is already pre-mapped, if it is, increase the
- * reference count (in __acpi_try_ioremap) and return; otherwise, do
- * the real ioremap, and add the mapping into acpi_iomaps list.
- */
-static void __iomem *acpi_pre_map(phys_addr_t paddr,
-                                 unsigned long size)
-{
-       void __iomem *vaddr;
-       struct acpi_iomap *map;
-       unsigned long pg_sz, flags;
-       phys_addr_t pg_off;
-
-       spin_lock_irqsave(&acpi_iomaps_lock, flags);
-       vaddr = __acpi_try_ioremap(paddr, size);
-       spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
-       if (vaddr)
-               return vaddr;
-
-       pg_off = paddr & PAGE_MASK;
-       pg_sz = ((paddr + size + PAGE_SIZE - 1) & PAGE_MASK) - pg_off;
-       vaddr = ioremap(pg_off, pg_sz);
-       if (!vaddr)
-               return NULL;
-       map = kmalloc(sizeof(*map), GFP_KERNEL);
-       if (!map)
-               goto err_unmap;
-       INIT_LIST_HEAD(&map->list);
-       map->paddr = pg_off;
-       map->size = pg_sz;
-       map->vaddr = vaddr;
-       kref_init(&map->ref);
-
-       spin_lock_irqsave(&acpi_iomaps_lock, flags);
-       vaddr = __acpi_try_ioremap(paddr, size);
-       if (vaddr) {
-               spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
-               iounmap(map->vaddr);
-               kfree(map);
-               return vaddr;
-       }
-       list_add_tail_rcu(&map->list, &acpi_iomaps);
-       spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
-
-       return map->vaddr + (paddr - map->paddr);
-err_unmap:
-       iounmap(vaddr);
-       return NULL;
-}
-
-/* acpi_iomaps_lock must be held before calling */
-static void __acpi_kref_del_iomap(struct kref *ref)
-{
-       struct acpi_iomap *map;
-
-       map = container_of(ref, struct acpi_iomap, ref);
-       list_del_rcu(&map->list);
-}
-
-/*
- * Used to post-unmap the specified IO memory area. The iounmap is
- * done only if the reference count goes zero.
- */
-static void acpi_post_unmap(phys_addr_t paddr, unsigned long size)
-{
-       struct acpi_iomap *map;
-       unsigned long flags;
-       int del;
-
-       spin_lock_irqsave(&acpi_iomaps_lock, flags);
-       map = __acpi_find_iomap(paddr, size);
-       BUG_ON(!map);
-       del = kref_put(&map->ref, __acpi_kref_del_iomap);
-       spin_unlock_irqrestore(&acpi_iomaps_lock, flags);
-
-       if (!del)
-               return;
-
-       synchronize_rcu();
-       iounmap(map->vaddr);
-       kfree(map);
-}
-
-/* In NMI handler, should set silent = 1 */
-static int acpi_check_gar(struct acpi_generic_address *reg,
-                         u64 *paddr, int silent)
-{
-       u32 width, space_id;
-
-       width = reg->bit_width;
-       space_id = reg->space_id;
-       /* Handle possible alignment issues */
-       memcpy(paddr, &reg->address, sizeof(*paddr));
-       if (!*paddr) {
-               if (!silent)
-                       pr_warning(FW_BUG ACPI_PFX
-                       "Invalid physical address in GAR [0x%llx/%u/%u]\n",
-                                  *paddr, width, space_id);
-               return -EINVAL;
-       }
-
-       if ((width != 8) && (width != 16) && (width != 32) && (width != 64)) {
-               if (!silent)
-                       pr_warning(FW_BUG ACPI_PFX
-                                  "Invalid bit width in GAR [0x%llx/%u/%u]\n",
-                                  *paddr, width, space_id);
-               return -EINVAL;
-       }
-
-       if (space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY &&
-           space_id != ACPI_ADR_SPACE_SYSTEM_IO) {
-               if (!silent)
-                       pr_warning(FW_BUG ACPI_PFX
-                       "Invalid address space type in GAR [0x%llx/%u/%u]\n",
-                                  *paddr, width, space_id);
-               return -EINVAL;
-       }
-
-       return 0;
-}
-
-/* Pre-map, working on GAR */
-int acpi_pre_map_gar(struct acpi_generic_address *reg)
-{
-       u64 paddr;
-       void __iomem *vaddr;
-       int rc;
-
-       if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
-               return 0;
-
-       rc = acpi_check_gar(reg, &paddr, 0);
-       if (rc)
-               return rc;
-
-       vaddr = acpi_pre_map(paddr, reg->bit_width / 8);
-       if (!vaddr)
-               return -EIO;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_pre_map_gar);
-
-/* Post-unmap, working on GAR */
-int acpi_post_unmap_gar(struct acpi_generic_address *reg)
-{
-       u64 paddr;
-       int rc;
-
-       if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
-               return 0;
-
-       rc = acpi_check_gar(reg, &paddr, 0);
-       if (rc)
-               return rc;
-
-       acpi_post_unmap(paddr, reg->bit_width / 8);
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(acpi_post_unmap_gar);
-
-/*
- * Can be used in atomic (including NMI) or process context. RCU read
- * lock can only be released after the IO memory area accessing.
- */
-static int acpi_atomic_read_mem(u64 paddr, u64 *val, u32 width)
-{
-       void __iomem *addr;
-
-       rcu_read_lock();
-       addr = __acpi_ioremap_fast(paddr, width);
-       switch (width) {
-       case 8:
-               *val = readb(addr);
-               break;
-       case 16:
-               *val = readw(addr);
-               break;
-       case 32:
-               *val = readl(addr);
-               break;
-#ifdef readq
-       case 64:
-               *val = readq(addr);
-               break;
-#endif
-       default:
-               return -EINVAL;
-       }
-       rcu_read_unlock();
-
-       return 0;
-}
-
-static int acpi_atomic_write_mem(u64 paddr, u64 val, u32 width)
-{
-       void __iomem *addr;
-
-       rcu_read_lock();
-       addr = __acpi_ioremap_fast(paddr, width);
-       switch (width) {
-       case 8:
-               writeb(val, addr);
-               break;
-       case 16:
-               writew(val, addr);
-               break;
-       case 32:
-               writel(val, addr);
-               break;
-#ifdef writeq
-       case 64:
-               writeq(val, addr);
-               break;
-#endif
-       default:
-               return -EINVAL;
-       }
-       rcu_read_unlock();
-
-       return 0;
-}
-
-/* GAR accessing in atomic (including NMI) or process context */
-int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg)
-{
-       u64 paddr;
-       int rc;
-
-       rc = acpi_check_gar(reg, &paddr, 1);
-       if (rc)
-               return rc;
-
-       *val = 0;
-       switch (reg->space_id) {
-       case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-               return acpi_atomic_read_mem(paddr, val, reg->bit_width);
-       case ACPI_ADR_SPACE_SYSTEM_IO:
-               return acpi_os_read_port(paddr, (u32 *)val, reg->bit_width);
-       default:
-               return -EINVAL;
-       }
-}
-EXPORT_SYMBOL_GPL(acpi_atomic_read);
-
-int acpi_atomic_write(u64 val, struct acpi_generic_address *reg)
-{
-       u64 paddr;
-       int rc;
-
-       rc = acpi_check_gar(reg, &paddr, 1);
-       if (rc)
-               return rc;
-
-       switch (reg->space_id) {
-       case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-               return acpi_atomic_write_mem(paddr, val, reg->bit_width);
-       case ACPI_ADR_SPACE_SYSTEM_IO:
-               return acpi_os_write_port(paddr, val, reg->bit_width);
-       default:
-               return -EINVAL;
-       }
-}
-EXPORT_SYMBOL_GPL(acpi_atomic_write);
index 3b5c3189fd995e4cc200c532b2d05ba6b274fac7..e56f3be7b07d36fceb32dce25ede0fa35308f0e7 100644 (file)
@@ -45,6 +45,8 @@ static int pxm_to_node_map[MAX_PXM_DOMAINS]
 static int node_to_pxm_map[MAX_NUMNODES]
                        = { [0 ... MAX_NUMNODES - 1] = PXM_INVAL };
 
+unsigned char acpi_srat_revision __initdata;
+
 int pxm_to_node(int pxm)
 {
        if (pxm < 0)
@@ -255,9 +257,13 @@ acpi_parse_memory_affinity(struct acpi_subtable_header * header,
 
 static int __init acpi_parse_srat(struct acpi_table_header *table)
 {
+       struct acpi_table_srat *srat;
        if (!table)
                return -EINVAL;
 
+       srat = (struct acpi_table_srat *)table;
+       acpi_srat_revision = srat->header.revision;
+
        /* Real work done in acpi_table_parse_srat below. */
 
        return 0;
index 096787b43c960a71afd971ed8f312ba39e7787a9..7a2035fa8c713d660a697d0d49e0a430476ed9fa 100644 (file)
 #include <linux/acpi_io.h>
 #include <acpi/acpiosxf.h>
 
+/* ACPI NVS regions, APEI may use it */
+
+struct nvs_region {
+       __u64 phys_start;
+       __u64 size;
+       struct list_head node;
+};
+
+static LIST_HEAD(nvs_region_list);
+
+#ifdef CONFIG_ACPI_SLEEP
+static int suspend_nvs_register(unsigned long start, unsigned long size);
+#else
+static inline int suspend_nvs_register(unsigned long a, unsigned long b)
+{
+       return 0;
+}
+#endif
+
+int acpi_nvs_register(__u64 start, __u64 size)
+{
+       struct nvs_region *region;
+
+       region = kmalloc(sizeof(*region), GFP_KERNEL);
+       if (!region)
+               return -ENOMEM;
+       region->phys_start = start;
+       region->size = size;
+       list_add_tail(&region->node, &nvs_region_list);
+
+       return suspend_nvs_register(start, size);
+}
+
+int acpi_nvs_for_each_region(int (*func)(__u64 start, __u64 size, void *data),
+                            void *data)
+{
+       int rc;
+       struct nvs_region *region;
+
+       list_for_each_entry(region, &nvs_region_list, node) {
+               rc = func(region->phys_start, region->size, data);
+               if (rc)
+                       return rc;
+       }
+
+       return 0;
+}
+
+
+#ifdef CONFIG_ACPI_SLEEP
 /*
  * Platforms, like ACPI, may want us to save some memory used by them during
  * suspend and to restore the contents of this memory during the subsequent
@@ -41,7 +91,7 @@ static LIST_HEAD(nvs_list);
  *     things so that the data from page-aligned addresses in this region will
  *     be copied into separate RAM pages.
  */
-int suspend_nvs_register(unsigned long start, unsigned long size)
+static int suspend_nvs_register(unsigned long start, unsigned long size)
 {
        struct nvs_page *entry, *next;
 
@@ -159,3 +209,4 @@ void suspend_nvs_restore(void)
                if (entry->data)
                        memcpy(entry->kaddr, entry->data, entry->size);
 }
+#endif
index f31c5c5f1b7e083351e013649f1e1d73fdd74fa6..412a1e04a9226a84970654c433597455cb53f0ca 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/highmem.h>
 #include <linux/pci.h>
 #include <linux/interrupt.h>
 #include <linux/kmod.h>
@@ -83,19 +84,6 @@ static struct workqueue_struct *kacpi_notify_wq;
 struct workqueue_struct *kacpi_hotplug_wq;
 EXPORT_SYMBOL(kacpi_hotplug_wq);
 
-struct acpi_res_list {
-       resource_size_t start;
-       resource_size_t end;
-       acpi_adr_space_type resource_type; /* IO port, System memory, ...*/
-       char name[5];   /* only can have a length of 4 chars, make use of this
-                          one instead of res->name, no need to kalloc then */
-       struct list_head resource_list;
-       int count;
-};
-
-static LIST_HEAD(resource_list_head);
-static DEFINE_SPINLOCK(acpi_res_lock);
-
 /*
  * This list of permanent mappings is for memory that may be accessed from
  * interrupt context, where we can't do the ioremap().
@@ -166,17 +154,21 @@ static u32 acpi_osi_handler(acpi_string interface, u32 supported)
        return supported;
 }
 
-static void __init acpi_request_region (struct acpi_generic_address *addr,
+static void __init acpi_request_region (struct acpi_generic_address *gas,
        unsigned int length, char *desc)
 {
-       if (!addr->address || !length)
+       u64 addr;
+
+       /* Handle possible alignment issues */
+       memcpy(&addr, &gas->address, sizeof(addr));
+       if (!addr || !length)
                return;
 
        /* Resources are never freed */
-       if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
-               request_region(addr->address, length, desc);
-       else if (addr->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
-               request_mem_region(addr->address, length, desc);
+       if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
+               request_region(addr, length, desc);
+       else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
+               request_mem_region(addr, length, desc);
 }
 
 static int __init acpi_reserve_resources(void)
@@ -330,6 +322,37 @@ acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
        return NULL;
 }
 
+#ifndef CONFIG_IA64
+#define should_use_kmap(pfn)   page_is_ram(pfn)
+#else
+/* ioremap will take care of cache attributes */
+#define should_use_kmap(pfn)   0
+#endif
+
+static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
+{
+       unsigned long pfn;
+
+       pfn = pg_off >> PAGE_SHIFT;
+       if (should_use_kmap(pfn)) {
+               if (pg_sz > PAGE_SIZE)
+                       return NULL;
+               return (void __iomem __force *)kmap(pfn_to_page(pfn));
+       } else
+               return acpi_os_ioremap(pg_off, pg_sz);
+}
+
+static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
+{
+       unsigned long pfn;
+
+       pfn = pg_off >> PAGE_SHIFT;
+       if (page_is_ram(pfn))
+               kunmap(pfn_to_page(pfn));
+       else
+               iounmap(vaddr);
+}
+
 void __iomem *__init_refok
 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 {
@@ -362,7 +385,7 @@ acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
 
        pg_off = round_down(phys, PAGE_SIZE);
        pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
-       virt = acpi_os_ioremap(pg_off, pg_sz);
+       virt = acpi_map(pg_off, pg_sz);
        if (!virt) {
                mutex_unlock(&acpi_ioremap_lock);
                kfree(map);
@@ -393,7 +416,7 @@ static void acpi_os_map_cleanup(struct acpi_ioremap *map)
 {
        if (!map->refcount) {
                synchronize_rcu();
-               iounmap(map->virt);
+               acpi_unmap(map->phys, map->virt);
                kfree(map);
        }
 }
@@ -427,35 +450,42 @@ void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
                __acpi_unmap_table(virt, size);
 }
 
-static int acpi_os_map_generic_address(struct acpi_generic_address *addr)
+int acpi_os_map_generic_address(struct acpi_generic_address *gas)
 {
+       u64 addr;
        void __iomem *virt;
 
-       if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+       if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
                return 0;
 
-       if (!addr->address || !addr->bit_width)
+       /* Handle possible alignment issues */
+       memcpy(&addr, &gas->address, sizeof(addr));
+       if (!addr || !gas->bit_width)
                return -EINVAL;
 
-       virt = acpi_os_map_memory(addr->address, addr->bit_width / 8);
+       virt = acpi_os_map_memory(addr, gas->bit_width / 8);
        if (!virt)
                return -EIO;
 
        return 0;
 }
+EXPORT_SYMBOL(acpi_os_map_generic_address);
 
-static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
+void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
 {
+       u64 addr;
        struct acpi_ioremap *map;
 
-       if (addr->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+       if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
                return;
 
-       if (!addr->address || !addr->bit_width)
+       /* Handle possible alignment issues */
+       memcpy(&addr, &gas->address, sizeof(addr));
+       if (!addr || !gas->bit_width)
                return;
 
        mutex_lock(&acpi_ioremap_lock);
-       map = acpi_map_lookup(addr->address, addr->bit_width / 8);
+       map = acpi_map_lookup(addr, gas->bit_width / 8);
        if (!map) {
                mutex_unlock(&acpi_ioremap_lock);
                return;
@@ -465,6 +495,7 @@ static void acpi_os_unmap_generic_address(struct acpi_generic_address *addr)
 
        acpi_os_map_cleanup(map);
 }
+EXPORT_SYMBOL(acpi_os_unmap_generic_address);
 
 #ifdef ACPI_FUTURE_USAGE
 acpi_status
@@ -711,6 +742,67 @@ acpi_os_read_memory(acpi_physical_address phys_addr, u32 * value, u32 width)
        return AE_OK;
 }
 
+#ifdef readq
+static inline u64 read64(const volatile void __iomem *addr)
+{
+       return readq(addr);
+}
+#else
+static inline u64 read64(const volatile void __iomem *addr)
+{
+       u64 l, h;
+       l = readl(addr);
+       h = readl(addr+4);
+       return l | (h << 32);
+}
+#endif
+
+acpi_status
+acpi_os_read_memory64(acpi_physical_address phys_addr, u64 *value, u32 width)
+{
+       void __iomem *virt_addr;
+       unsigned int size = width / 8;
+       bool unmap = false;
+       u64 dummy;
+
+       rcu_read_lock();
+       virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+       if (!virt_addr) {
+               rcu_read_unlock();
+               virt_addr = acpi_os_ioremap(phys_addr, size);
+               if (!virt_addr)
+                       return AE_BAD_ADDRESS;
+               unmap = true;
+       }
+
+       if (!value)
+               value = &dummy;
+
+       switch (width) {
+       case 8:
+               *(u8 *) value = readb(virt_addr);
+               break;
+       case 16:
+               *(u16 *) value = readw(virt_addr);
+               break;
+       case 32:
+               *(u32 *) value = readl(virt_addr);
+               break;
+       case 64:
+               *(u64 *) value = read64(virt_addr);
+               break;
+       default:
+               BUG();
+       }
+
+       if (unmap)
+               iounmap(virt_addr);
+       else
+               rcu_read_unlock();
+
+       return AE_OK;
+}
+
 acpi_status
 acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
 {
@@ -750,6 +842,61 @@ acpi_os_write_memory(acpi_physical_address phys_addr, u32 value, u32 width)
        return AE_OK;
 }
 
+#ifdef writeq
+static inline void write64(u64 val, volatile void __iomem *addr)
+{
+       writeq(val, addr);
+}
+#else
+static inline void write64(u64 val, volatile void __iomem *addr)
+{
+       writel(val, addr);
+       writel(val>>32, addr+4);
+}
+#endif
+
+acpi_status
+acpi_os_write_memory64(acpi_physical_address phys_addr, u64 value, u32 width)
+{
+       void __iomem *virt_addr;
+       unsigned int size = width / 8;
+       bool unmap = false;
+
+       rcu_read_lock();
+       virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
+       if (!virt_addr) {
+               rcu_read_unlock();
+               virt_addr = acpi_os_ioremap(phys_addr, size);
+               if (!virt_addr)
+                       return AE_BAD_ADDRESS;
+               unmap = true;
+       }
+
+       switch (width) {
+       case 8:
+               writeb(value, virt_addr);
+               break;
+       case 16:
+               writew(value, virt_addr);
+               break;
+       case 32:
+               writel(value, virt_addr);
+               break;
+       case 64:
+               write64(value, virt_addr);
+               break;
+       default:
+               BUG();
+       }
+
+       if (unmap)
+               iounmap(virt_addr);
+       else
+               rcu_read_unlock();
+
+       return AE_OK;
+}
+
 acpi_status
 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
                               u64 *value, u32 width)
@@ -1278,44 +1425,28 @@ __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
  * drivers */
 int acpi_check_resource_conflict(const struct resource *res)
 {
-       struct acpi_res_list *res_list_elem;
-       int ioport = 0, clash = 0;
+       acpi_adr_space_type space_id;
+       acpi_size length;
+       u8 warn = 0;
+       int clash = 0;
 
        if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
                return 0;
        if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
                return 0;
 
-       ioport = res->flags & IORESOURCE_IO;
-
-       spin_lock(&acpi_res_lock);
-       list_for_each_entry(res_list_elem, &resource_list_head,
-                           resource_list) {
-               if (ioport && (res_list_elem->resource_type
-                              != ACPI_ADR_SPACE_SYSTEM_IO))
-                       continue;
-               if (!ioport && (res_list_elem->resource_type
-                               != ACPI_ADR_SPACE_SYSTEM_MEMORY))
-                       continue;
+       if (res->flags & IORESOURCE_IO)
+               space_id = ACPI_ADR_SPACE_SYSTEM_IO;
+       else
+               space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
 
-               if (res->end < res_list_elem->start
-                   || res_list_elem->end < res->start)
-                       continue;
-               clash = 1;
-               break;
-       }
-       spin_unlock(&acpi_res_lock);
+       length = res->end - res->start + 1;
+       if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
+               warn = 1;
+       clash = acpi_check_address_range(space_id, res->start, length, warn);
 
        if (clash) {
                if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
-                       printk(KERN_WARNING "ACPI: resource %s %pR"
-                              " conflicts with ACPI region %s "
-                              "[%s 0x%zx-0x%zx]\n",
-                              res->name, res, res_list_elem->name,
-                              (res_list_elem->resource_type ==
-                               ACPI_ADR_SPACE_SYSTEM_IO) ? "io" : "mem",
-                              (size_t) res_list_elem->start,
-                              (size_t) res_list_elem->end);
                        if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
                                printk(KERN_NOTICE "ACPI: This conflict may"
                                       " cause random problems and system"
@@ -1467,155 +1598,6 @@ acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
        kmem_cache_free(cache, object);
        return (AE_OK);
 }
-
-static inline int acpi_res_list_add(struct acpi_res_list *res)
-{
-       struct acpi_res_list *res_list_elem;
-
-       list_for_each_entry(res_list_elem, &resource_list_head,
-                           resource_list) {
-
-               if (res->resource_type == res_list_elem->resource_type &&
-                   res->start == res_list_elem->start &&
-                   res->end == res_list_elem->end) {
-
-                       /*
-                        * The Region(addr,len) already exist in the list,
-                        * just increase the count
-                        */
-
-                       res_list_elem->count++;
-                       return 0;
-               }
-       }
-
-       res->count = 1;
-       list_add(&res->resource_list, &resource_list_head);
-       return 1;
-}
-
-static inline void acpi_res_list_del(struct acpi_res_list *res)
-{
-       struct acpi_res_list *res_list_elem;
-
-       list_for_each_entry(res_list_elem, &resource_list_head,
-                           resource_list) {
-
-               if (res->resource_type == res_list_elem->resource_type &&
-                   res->start == res_list_elem->start &&
-                   res->end == res_list_elem->end) {
-
-                       /*
-                        * If the res count is decreased to 0,
-                        * remove and free it
-                        */
-
-                       if (--res_list_elem->count == 0) {
-                               list_del(&res_list_elem->resource_list);
-                               kfree(res_list_elem);
-                       }
-                       return;
-               }
-       }
-}
-
-acpi_status
-acpi_os_invalidate_address(
-    u8                   space_id,
-    acpi_physical_address   address,
-    acpi_size               length)
-{
-       struct acpi_res_list res;
-
-       switch (space_id) {
-       case ACPI_ADR_SPACE_SYSTEM_IO:
-       case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-               /* Only interference checks against SystemIO and SystemMemory
-                  are needed */
-               res.start = address;
-               res.end = address + length - 1;
-               res.resource_type = space_id;
-               spin_lock(&acpi_res_lock);
-               acpi_res_list_del(&res);
-               spin_unlock(&acpi_res_lock);
-               break;
-       case ACPI_ADR_SPACE_PCI_CONFIG:
-       case ACPI_ADR_SPACE_EC:
-       case ACPI_ADR_SPACE_SMBUS:
-       case ACPI_ADR_SPACE_CMOS:
-       case ACPI_ADR_SPACE_PCI_BAR_TARGET:
-       case ACPI_ADR_SPACE_DATA_TABLE:
-       case ACPI_ADR_SPACE_FIXED_HARDWARE:
-               break;
-       }
-       return AE_OK;
-}
-
-/******************************************************************************
- *
- * FUNCTION:    acpi_os_validate_address
- *
- * PARAMETERS:  space_id             - ACPI space ID
- *              address             - Physical address
- *              length              - Address length
- *
- * RETURN:      AE_OK if address/length is valid for the space_id. Otherwise,
- *              should return AE_AML_ILLEGAL_ADDRESS.
- *
- * DESCRIPTION: Validate a system address via the host OS. Used to validate
- *              the addresses accessed by AML operation regions.
- *
- *****************************************************************************/
-
-acpi_status
-acpi_os_validate_address (
-    u8                   space_id,
-    acpi_physical_address   address,
-    acpi_size               length,
-    char *name)
-{
-       struct acpi_res_list *res;
-       int added;
-       if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
-               return AE_OK;
-
-       switch (space_id) {
-       case ACPI_ADR_SPACE_SYSTEM_IO:
-       case ACPI_ADR_SPACE_SYSTEM_MEMORY:
-               /* Only interference checks against SystemIO and SystemMemory
-                  are needed */
-               res = kzalloc(sizeof(struct acpi_res_list), GFP_KERNEL);
-               if (!res)
-                       return AE_OK;
-               /* ACPI names are fixed to 4 bytes, still better use strlcpy */
-               strlcpy(res->name, name, 5);
-               res->start = address;
-               res->end = address + length - 1;
-               res->resource_type = space_id;
-               spin_lock(&acpi_res_lock);
-               added = acpi_res_list_add(res);
-               spin_unlock(&acpi_res_lock);
-               pr_debug("%s %s resource: start: 0x%llx, end: 0x%llx, "
-                        "name: %s\n", added ? "Added" : "Already exist",
-                        (space_id == ACPI_ADR_SPACE_SYSTEM_IO)
-                        ? "SystemIO" : "System Memory",
-                        (unsigned long long)res->start,
-                        (unsigned long long)res->end,
-                        res->name);
-               if (!added)
-                       kfree(res);
-               break;
-       case ACPI_ADR_SPACE_PCI_CONFIG:
-       case ACPI_ADR_SPACE_EC:
-       case ACPI_ADR_SPACE_SMBUS:
-       case ACPI_ADR_SPACE_CMOS:
-       case ACPI_ADR_SPACE_PCI_BAR_TARGET:
-       case ACPI_ADR_SPACE_DATA_TABLE:
-       case ACPI_ADR_SPACE_FIXED_HARDWARE:
-               break;
-       }
-       return AE_OK;
-}
 #endif
 
 acpi_status __init acpi_os_initialize(void)
index 3a0428e8435c0fcb0221540ded7f20d731d764ae..c850de4c9a146883a7d91f16c8cd8ddc3b694940 100644 (file)
@@ -173,8 +173,30 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
        apic_id = map_mat_entry(handle, type, acpi_id);
        if (apic_id == -1)
                apic_id = map_madt_entry(type, acpi_id);
-       if (apic_id == -1)
-               return apic_id;
+       if (apic_id == -1) {
+               /*
+                * On UP processor, there is no _MAT or MADT table.
+                * So above apic_id is always set to -1.
+                *
+                * BIOS may define multiple CPU handles even for UP processor.
+                * For example,
+                *
+                * Scope (_PR)
+                 * {
+                *     Processor (CPU0, 0x00, 0x00000410, 0x06) {}
+                *     Processor (CPU1, 0x01, 0x00000410, 0x06) {}
+                *     Processor (CPU2, 0x02, 0x00000410, 0x06) {}
+                *     Processor (CPU3, 0x03, 0x00000410, 0x06) {}
+                * }
+                *
+                * Ignores apic_id and always return 0 for CPU0's handle.
+                * Return -1 for other CPU's handle.
+                */
+               if (acpi_id == 0)
+                       return acpi_id;
+               else
+                       return apic_id;
+       }
 
 #ifdef CONFIG_SMP
        for_each_possible_cpu(i) {
index 20a68ca386de033dac092e74541f66f78cb21466..8ae05ce18500092baadd8938f56ef5822d6603c8 100644 (file)
@@ -82,9 +82,9 @@ MODULE_LICENSE("GPL");
 static int acpi_processor_add(struct acpi_device *device);
 static int acpi_processor_remove(struct acpi_device *device, int type);
 static void acpi_processor_notify(struct acpi_device *device, u32 event);
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu);
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr);
 static int acpi_processor_handle_eject(struct acpi_processor *pr);
-
+static int acpi_processor_start(struct acpi_processor *pr);
 
 static const struct acpi_device_id processor_device_ids[] = {
        {ACPI_PROCESSOR_OBJECT_HID, 0},
@@ -324,10 +324,8 @@ static int acpi_processor_get_info(struct acpi_device *device)
         *  they are physically not present.
         */
        if (pr->id == -1) {
-               if (ACPI_FAILURE
-                   (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
+               if (ACPI_FAILURE(acpi_processor_hotadd_init(pr)))
                        return -ENODEV;
-               }
        }
        /*
         * On some boxes several processors use the same processor bus id.
@@ -425,10 +423,29 @@ static int acpi_cpu_soft_notify(struct notifier_block *nfb,
        struct acpi_processor *pr = per_cpu(processors, cpu);
 
        if (action == CPU_ONLINE && pr) {
-               acpi_processor_ppc_has_changed(pr, 0);
-               acpi_processor_hotplug(pr);
-               acpi_processor_reevaluate_tstate(pr, action);
-               acpi_processor_tstate_has_changed(pr);
+               /* CPU got physically hotplugged and onlined the first time:
+                * Initialize missing things
+                */
+               if (pr->flags.need_hotplug_init) {
+                       struct cpuidle_driver *idle_driver =
+                               cpuidle_get_driver();
+
+                       printk(KERN_INFO "Will online and init hotplugged "
+                              "CPU: %d\n", pr->id);
+                       WARN(acpi_processor_start(pr), "Failed to start CPU:"
+                               " %d\n", pr->id);
+                       pr->flags.need_hotplug_init = 0;
+                       if (idle_driver && !strcmp(idle_driver->name,
+                                                  "intel_idle")) {
+                               intel_idle_cpu_init(pr->id);
+                       }
+               /* Normal CPU soft online event */
+               } else {
+                       acpi_processor_ppc_has_changed(pr, 0);
+                       acpi_processor_cst_has_changed(pr);
+                       acpi_processor_reevaluate_tstate(pr, action);
+                       acpi_processor_tstate_has_changed(pr);
+               }
        }
        if (action == CPU_DEAD && pr) {
                /* invalidate the flag.throttling after one CPU is offline */
@@ -442,6 +459,71 @@ static struct notifier_block acpi_cpu_notifier =
            .notifier_call = acpi_cpu_soft_notify,
 };
 
+/*
+ * acpi_processor_start() is called by the cpu_hotplug_notifier func:
+ * acpi_cpu_soft_notify(). Getting it __cpuinit{data} is difficult, the
+ * root cause seem to be that acpi_processor_uninstall_hotplug_notify()
+ * is in the module_exit (__exit) func. Allowing acpi_processor_start()
+ * to not be in __cpuinit section, but being called from __cpuinit funcs
+ * via __ref looks like the right thing to do here.
+ */
+static __ref int acpi_processor_start(struct acpi_processor *pr)
+{
+       struct acpi_device *device = per_cpu(processor_device_array, pr->id);
+       int result = 0;
+
+#ifdef CONFIG_CPU_FREQ
+       acpi_processor_ppc_has_changed(pr, 0);
+#endif
+       acpi_processor_get_throttling_info(pr);
+       acpi_processor_get_limit_info(pr);
+
+       if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
+               acpi_processor_power_init(pr, device);
+
+       pr->cdev = thermal_cooling_device_register("Processor", device,
+                                                  &processor_cooling_ops);
+       if (IS_ERR(pr->cdev)) {
+               result = PTR_ERR(pr->cdev);
+               goto err_power_exit;
+       }
+
+       dev_dbg(&device->dev, "registered as cooling_device%d\n",
+               pr->cdev->id);
+
+       result = sysfs_create_link(&device->dev.kobj,
+                                  &pr->cdev->device.kobj,
+                                  "thermal_cooling");
+       if (result) {
+               printk(KERN_ERR PREFIX "Create sysfs link\n");
+               goto err_thermal_unregister;
+       }
+       result = sysfs_create_link(&pr->cdev->device.kobj,
+                                  &device->dev.kobj,
+                                  "device");
+       if (result) {
+               printk(KERN_ERR PREFIX "Create sysfs link\n");
+               goto err_remove_sysfs_thermal;
+       }
+
+       return 0;
+
+err_remove_sysfs_thermal:
+       sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
+err_thermal_unregister:
+       thermal_cooling_device_unregister(pr->cdev);
+err_power_exit:
+       acpi_processor_power_exit(pr, device);
+
+       return result;
+}
+
+/*
+ * Do not put anything in here which needs the core to be online.
+ * For example MSR access or setting up things which check for cpuinfo_x86
+ * (cpu_data(cpu)) values, like CPU feature flags, family, model, etc.
+ * Such things have to be put in and set up above in acpi_processor_start()
+ */
 static int __cpuinit acpi_processor_add(struct acpi_device *device)
 {
        struct acpi_processor *pr = NULL;
@@ -497,48 +579,21 @@ static int __cpuinit acpi_processor_add(struct acpi_device *device)
                goto err_free_cpumask;
        }
 
-#ifdef CONFIG_CPU_FREQ
-       acpi_processor_ppc_has_changed(pr, 0);
-#endif
-       acpi_processor_get_throttling_info(pr);
-       acpi_processor_get_limit_info(pr);
-
-       if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
-               acpi_processor_power_init(pr, device);
-
-       pr->cdev = thermal_cooling_device_register("Processor", device,
-                                               &processor_cooling_ops);
-       if (IS_ERR(pr->cdev)) {
-               result = PTR_ERR(pr->cdev);
-               goto err_power_exit;
-       }
-
-       dev_dbg(&device->dev, "registered as cooling_device%d\n",
-                pr->cdev->id);
+       /*
+        * Do not start hotplugged CPUs now, but when they
+        * are onlined the first time
+        */
+       if (pr->flags.need_hotplug_init)
+               return 0;
 
-       result = sysfs_create_link(&device->dev.kobj,
-                                  &pr->cdev->device.kobj,
-                                  "thermal_cooling");
-       if (result) {
-               printk(KERN_ERR PREFIX "Create sysfs link\n");
-               goto err_thermal_unregister;
-       }
-       result = sysfs_create_link(&pr->cdev->device.kobj,
-                                  &device->dev.kobj,
-                                  "device");
-       if (result) {
-               printk(KERN_ERR PREFIX "Create sysfs link\n");
+       result = acpi_processor_start(pr);
+       if (result)
                goto err_remove_sysfs;
-       }
 
        return 0;
 
 err_remove_sysfs:
-       sysfs_remove_link(&device->dev.kobj, "thermal_cooling");
-err_thermal_unregister:
-       thermal_cooling_device_unregister(pr->cdev);
-err_power_exit:
-       acpi_processor_power_exit(pr, device);
+       sysfs_remove_link(&device->dev.kobj, "sysdev");
 err_free_cpumask:
        free_cpumask_var(pr->throttling.shared_cpu_map);
 
@@ -720,21 +775,33 @@ processor_walk_namespace_cb(acpi_handle handle,
        return (AE_OK);
 }
 
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
 {
+       acpi_handle handle = pr->handle;
 
        if (!is_processor_present(handle)) {
                return AE_ERROR;
        }
 
-       if (acpi_map_lsapic(handle, p_cpu))
+       if (acpi_map_lsapic(handle, &pr->id))
                return AE_ERROR;
 
-       if (arch_register_cpu(*p_cpu)) {
-               acpi_unmap_lsapic(*p_cpu);
+       if (arch_register_cpu(pr->id)) {
+               acpi_unmap_lsapic(pr->id);
                return AE_ERROR;
        }
 
+       /* CPU got hot-plugged, but cpu_data is not initialized yet
+        * Set flag to delay cpu_idle/throttling initialization
+        * in:
+        * acpi_processor_add()
+        *   acpi_processor_get_info()
+        * and do it when the CPU gets online the first time
+        * TBD: Cleanup above functions and try to do this more elegant.
+        */
+       printk(KERN_INFO "CPU %d got hotplugged\n", pr->id);
+       pr->flags.need_hotplug_init = 1;
+
        return AE_OK;
 }
 
@@ -748,7 +815,7 @@ static int acpi_processor_handle_eject(struct acpi_processor *pr)
        return (0);
 }
 #else
-static acpi_status acpi_processor_hotadd_init(acpi_handle handle, int *p_cpu)
+static acpi_status acpi_processor_hotadd_init(struct acpi_processor *pr)
 {
        return AE_ERROR;
 }
@@ -827,8 +894,6 @@ static void __exit acpi_processor_exit(void)
 
        acpi_bus_unregister_driver(&acpi_processor_driver);
 
-       cpuidle_unregister_driver(&acpi_idle_driver);
-
        return;
 }
 
index 0a7ed69546ba47db0c322542864c168068baa7f4..ca191ff978444c2fedf09aa950f8b26d327befbd 100644 (file)
@@ -438,6 +438,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
        },
        {
        .callback = init_nvs_nosave,
+       .ident = "Sony Vaio VPCCW29FX",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "VPCCW29FX"),
+               },
+       },
+       {
+       .callback = init_nvs_nosave,
        .ident = "Averatec AV1020-ED2",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "AVERATEC"),
index 69ac373c72abfc41e53304c0a8e2c7bb4fbf4e5c..fdf27b9fce43a882b706bf5148cd51a6a67f4f83 100644 (file)
@@ -1116,6 +1116,13 @@ static int piix_broken_suspend(void)
                                DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE U205"),
                        },
                },
+               {
+                       .ident = "Satellite Pro A120",
+                       .matches = {
+                               DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
+                               DMI_MATCH(DMI_PRODUCT_NAME, "Satellite Pro A120"),
+                       },
+               },
                {
                        .ident = "Portege M500",
                        .matches = {
index 11c9aea4f4f7ea93a50fdc4b3547323d3728738d..c06e0ec11556d7696b620fa0b32e5af508863052 100644 (file)
@@ -4125,6 +4125,8 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
         * device and controller are SATA.
         */
        { "PIONEER DVD-RW  DVRTD08",    NULL,   ATA_HORKAGE_NOSETXFER },
+       { "PIONEER DVD-RW  DVRTD08A",   NULL,   ATA_HORKAGE_NOSETXFER },
+       { "PIONEER DVD-RW  DVR-215",    NULL,   ATA_HORKAGE_NOSETXFER },
        { "PIONEER DVD-RW  DVR-212D",   NULL,   ATA_HORKAGE_NOSETXFER },
        { "PIONEER DVD-RW  DVR-216D",   NULL,   ATA_HORKAGE_NOSETXFER },
 
index 9a7f0ea565df6c6066d7e64465c8385b81014a01..74aaee30e264ce1959c3807cb6a9e4c8bad27c95 100644 (file)
@@ -291,6 +291,7 @@ int ata_tport_add(struct device *parent,
                goto tport_err;
        }
 
+       device_enable_async_suspend(dev);
        pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
 
index d6a4677fdf711801e4d09e74d6cc122db9a50904..1e65842e2ca719a0916bc371c676e68f27c096e2 100644 (file)
@@ -251,6 +251,8 @@ static const u32 udma_tenvmin = 20;
 static const u32 udma_tackmin = 20;
 static const u32 udma_tssmin = 50;
 
+#define BFIN_MAX_SG_SEGMENTS 4
+
 /**
  *
  *     Function:       num_clocks_min
@@ -829,79 +831,61 @@ static void bfin_set_devctl(struct ata_port *ap, u8 ctl)
 
 static void bfin_bmdma_setup(struct ata_queued_cmd *qc)
 {
-       unsigned short config = WDSIZE_16;
+       struct ata_port *ap = qc->ap;
+       struct dma_desc_array *dma_desc_cpu = (struct dma_desc_array *)ap->bmdma_prd;
+       void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+       unsigned short config = DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_16 | DMAEN;
        struct scatterlist *sg;
        unsigned int si;
+       unsigned int channel;
+       unsigned int dir;
+       unsigned int size = 0;
 
        dev_dbg(qc->ap->dev, "in atapi dma setup\n");
        /* Program the ATA_CTRL register with dir */
        if (qc->tf.flags & ATA_TFLAG_WRITE) {
-               /* fill the ATAPI DMA controller */
-               set_dma_config(CH_ATAPI_TX, config);
-               set_dma_x_modify(CH_ATAPI_TX, 2);
-               for_each_sg(qc->sg, sg, qc->n_elem, si) {
-                       set_dma_start_addr(CH_ATAPI_TX, sg_dma_address(sg));
-                       set_dma_x_count(CH_ATAPI_TX, sg_dma_len(sg) >> 1);
-               }
+               channel = CH_ATAPI_TX;
+               dir = DMA_TO_DEVICE;
        } else {
+               channel = CH_ATAPI_RX;
+               dir = DMA_FROM_DEVICE;
                config |= WNR;
-               /* fill the ATAPI DMA controller */
-               set_dma_config(CH_ATAPI_RX, config);
-               set_dma_x_modify(CH_ATAPI_RX, 2);
-               for_each_sg(qc->sg, sg, qc->n_elem, si) {
-                       set_dma_start_addr(CH_ATAPI_RX, sg_dma_address(sg));
-                       set_dma_x_count(CH_ATAPI_RX, sg_dma_len(sg) >> 1);
-               }
        }
-}
 
-/**
- *     bfin_bmdma_start - Start an IDE DMA transaction
- *     @qc: Info associated with this ATA transaction.
- *
- *     Note: Original code is ata_bmdma_start().
- */
+       dma_map_sg(ap->dev, qc->sg, qc->n_elem, dir);
 
-static void bfin_bmdma_start(struct ata_queued_cmd *qc)
-{
-       struct ata_port *ap = qc->ap;
-       void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
-       struct scatterlist *sg;
-       unsigned int si;
+       /* fill the ATAPI DMA controller */
+       for_each_sg(qc->sg, sg, qc->n_elem, si) {
+               dma_desc_cpu[si].start_addr = sg_dma_address(sg);
+               dma_desc_cpu[si].cfg = config;
+               dma_desc_cpu[si].x_count = sg_dma_len(sg) >> 1;
+               dma_desc_cpu[si].x_modify = 2;
+               size += sg_dma_len(sg);
+       }
 
-       dev_dbg(qc->ap->dev, "in atapi dma start\n");
-       if (!(ap->udma_mask || ap->mwdma_mask))
-               return;
+       /* Set the last descriptor to stop mode */
+       dma_desc_cpu[qc->n_elem - 1].cfg &= ~(DMAFLOW | NDSIZE);
 
-       /* start ATAPI DMA controller*/
-       if (qc->tf.flags & ATA_TFLAG_WRITE) {
-               /*
-                * On blackfin arch, uncacheable memory is not
-                * allocated with flag GFP_DMA. DMA buffer from
-                * common kenel code should be flushed if WB
-                * data cache is enabled. Otherwise, this loop
-                * is an empty loop and optimized out.
-                */
-               for_each_sg(qc->sg, sg, qc->n_elem, si) {
-                       flush_dcache_range(sg_dma_address(sg),
-                               sg_dma_address(sg) + sg_dma_len(sg));
-               }
-               enable_dma(CH_ATAPI_TX);
-               dev_dbg(qc->ap->dev, "enable udma write\n");
+       flush_dcache_range((unsigned int)dma_desc_cpu,
+               (unsigned int)dma_desc_cpu +
+                       qc->n_elem * sizeof(struct dma_desc_array));
 
-               /* Send ATA DMA write command */
-               bfin_exec_command(ap, &qc->tf);
+       /* Enable ATA DMA operation*/
+       set_dma_curr_desc_addr(channel, (unsigned long *)ap->bmdma_prd_dma);
+       set_dma_x_count(channel, 0);
+       set_dma_x_modify(channel, 0);
+       set_dma_config(channel, config);
+
+       SSYNC();
+
+       /* Send ATA DMA command */
+       bfin_exec_command(ap, &qc->tf);
 
+       if (qc->tf.flags & ATA_TFLAG_WRITE) {
                /* set ATA DMA write direction */
                ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
                        | XFER_DIR));
        } else {
-               enable_dma(CH_ATAPI_RX);
-               dev_dbg(qc->ap->dev, "enable udma read\n");
-
-               /* Send ATA DMA read command */
-               bfin_exec_command(ap, &qc->tf);
-
                /* set ATA DMA read direction */
                ATAPI_SET_CONTROL(base, (ATAPI_GET_CONTROL(base)
                        & ~XFER_DIR));
@@ -913,12 +897,28 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
        /* Set ATAPI state machine contorl in terminate sequence */
        ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base) | END_ON_TERM);
 
-       /* Set transfer length to buffer len */
-       for_each_sg(qc->sg, sg, qc->n_elem, si) {
-               ATAPI_SET_XFER_LEN(base, (sg_dma_len(sg) >> 1));
-       }
+       /* Set transfer length to the total size of sg buffers */
+       ATAPI_SET_XFER_LEN(base, size >> 1);
+}
 
-       /* Enable ATA DMA operation*/
+/**
+ *     bfin_bmdma_start - Start an IDE DMA transaction
+ *     @qc: Info associated with this ATA transaction.
+ *
+ *     Note: Original code is ata_bmdma_start().
+ */
+
+static void bfin_bmdma_start(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       void __iomem *base = (void __iomem *)ap->ioaddr.ctl_addr;
+
+       dev_dbg(qc->ap->dev, "in atapi dma start\n");
+
+       if (!(ap->udma_mask || ap->mwdma_mask))
+               return;
+
+       /* start ATAPI transfer*/
        if (ap->udma_mask)
                ATAPI_SET_CONTROL(base, ATAPI_GET_CONTROL(base)
                        | ULTRA_START);
@@ -935,34 +935,23 @@ static void bfin_bmdma_start(struct ata_queued_cmd *qc)
 static void bfin_bmdma_stop(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
-       struct scatterlist *sg;
-       unsigned int si;
+       unsigned int dir;
 
        dev_dbg(qc->ap->dev, "in atapi dma stop\n");
+
        if (!(ap->udma_mask || ap->mwdma_mask))
                return;
 
        /* stop ATAPI DMA controller*/
-       if (qc->tf.flags & ATA_TFLAG_WRITE)
+       if (qc->tf.flags & ATA_TFLAG_WRITE) {
+               dir = DMA_TO_DEVICE;
                disable_dma(CH_ATAPI_TX);
-       else {
+       } else {
+               dir = DMA_FROM_DEVICE;
                disable_dma(CH_ATAPI_RX);
-               if (ap->hsm_task_state & HSM_ST_LAST) {
-                       /*
-                        * On blackfin arch, uncacheable memory is not
-                        * allocated with flag GFP_DMA. DMA buffer from
-                        * common kenel code should be invalidated if
-                        * data cache is enabled. Otherwise, this loop
-                        * is an empty loop and optimized out.
-                        */
-                       for_each_sg(qc->sg, sg, qc->n_elem, si) {
-                               invalidate_dcache_range(
-                                       sg_dma_address(sg),
-                                       sg_dma_address(sg)
-                                       + sg_dma_len(sg));
-                       }
-               }
        }
+
+       dma_unmap_sg(ap->dev, qc->sg, qc->n_elem, dir);
 }
 
 /**
@@ -1260,6 +1249,11 @@ static void bfin_port_stop(struct ata_port *ap)
 {
        dev_dbg(ap->dev, "in atapi port stop\n");
        if (ap->udma_mask != 0 || ap->mwdma_mask != 0) {
+               dma_free_coherent(ap->dev,
+                       BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+                       ap->bmdma_prd,
+                       ap->bmdma_prd_dma);
+
                free_dma(CH_ATAPI_RX);
                free_dma(CH_ATAPI_TX);
        }
@@ -1271,14 +1265,29 @@ static int bfin_port_start(struct ata_port *ap)
        if (!(ap->udma_mask || ap->mwdma_mask))
                return 0;
 
+       ap->bmdma_prd = dma_alloc_coherent(ap->dev,
+                               BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+                               &ap->bmdma_prd_dma,
+                               GFP_KERNEL);
+
+       if (ap->bmdma_prd == NULL) {
+               dev_info(ap->dev, "Unable to allocate DMA descriptor array.\n");
+               goto out;
+       }
+
        if (request_dma(CH_ATAPI_RX, "BFIN ATAPI RX DMA") >= 0) {
                if (request_dma(CH_ATAPI_TX,
                        "BFIN ATAPI TX DMA") >= 0)
                        return 0;
 
                free_dma(CH_ATAPI_RX);
+               dma_free_coherent(ap->dev,
+                       BFIN_MAX_SG_SEGMENTS * sizeof(struct dma_desc_array),
+                       ap->bmdma_prd,
+                       ap->bmdma_prd_dma);
        }
 
+out:
        ap->udma_mask = 0;
        ap->mwdma_mask = 0;
        dev_err(ap->dev, "Unable to request ATAPI DMA!"
@@ -1400,7 +1409,7 @@ static irqreturn_t bfin_ata_interrupt(int irq, void *dev_instance)
 
 static struct scsi_host_template bfin_sht = {
        ATA_BASE_SHT(DRV_NAME),
-       .sg_tablesize           = SG_NONE,
+       .sg_tablesize           = BFIN_MAX_SG_SEGMENTS,
        .dma_boundary           = ATA_DMA_BOUNDARY,
 };
 
index 5a2c95ba050a28caacbb4e3a799a0cdd176fcf47..0120b0d1e9a5aa838cee9a7347992377012d6c8c 100644 (file)
@@ -140,6 +140,7 @@ enum {
         */
        HCONTROL_ONLINE_PHY_RST = (1 << 31),
        HCONTROL_FORCE_OFFLINE = (1 << 30),
+       HCONTROL_LEGACY = (1 << 28),
        HCONTROL_PARITY_PROT_MOD = (1 << 14),
        HCONTROL_DPATH_PARITY = (1 << 12),
        HCONTROL_SNOOP_ENABLE = (1 << 10),
@@ -1223,6 +1224,10 @@ static int sata_fsl_init_controller(struct ata_host *host)
         * part of the port_start() callback
         */
 
+       /* sata controller to operate in enterprise mode */
+       temp = ioread32(hcr_base + HCONTROL);
+       iowrite32(temp & ~HCONTROL_LEGACY, hcr_base + HCONTROL);
+
        /* ack. any pending IRQs for this controller/port */
        temp = ioread32(hcr_base + HSTATUS);
        if (temp & 0x3F)
@@ -1421,6 +1426,12 @@ static int sata_fsl_resume(struct platform_device *op)
        /* Recovery the CHBA register in host controller cmd register set */
        iowrite32(pp->cmdslot_paddr & 0xffffffff, hcr_base + CHBA);
 
+       iowrite32((ioread32(hcr_base + HCONTROL)
+                               | HCONTROL_ONLINE_PHY_RST
+                               | HCONTROL_SNOOP_ENABLE
+                               | HCONTROL_PMP_ATTACHED),
+                       hcr_base + HCONTROL);
+
        ata_host_resume(host);
        return 0;
 }
index 2c8272dd93c4690229e3ea2442488e99a6de22e9..610f9997a4039c6df17d33d1931624c410120053 100644 (file)
@@ -1,6 +1,6 @@
 # Makefile for the Linux device tree
 
-obj-y                  := core.o sys.o bus.o dd.o syscore.o \
+obj-y                  := core.o bus.o dd.o syscore.o \
                           driver.o class.o platform.o \
                           cpu.o firmware.o init.o map.o devres.o \
                           attribute_container.o transport_class.o \
index 99dc5921e1dd28be8e6a910964dde8ca14900238..40fb12288ce258263cd4a995273f0bcc882df2ab 100644 (file)
@@ -915,9 +915,10 @@ static BUS_ATTR(uevent, S_IWUSR, NULL, bus_uevent_store);
 
 /**
  * __bus_register - register a driver-core subsystem
- * @bus: bus.
+ * @bus: bus to register
+ * @key: lockdep class key
  *
- * Once we have that, we registered the bus with the kobject
+ * Once we have that, we register the bus with the kobject
  * infrastructure, then register the children subsystems it has:
  * the devices and drivers that belong to the subsystem.
  */
@@ -1220,8 +1221,8 @@ static void system_root_device_release(struct device *dev)
 }
 /**
  * subsys_system_register - register a subsystem at /sys/devices/system/
- * @subsys - system subsystem
- * @groups - default attributes for the root device
+ * @subsys: system subsystem
+ * @groups: default attributes for the root device
  *
  * All 'system' subsystems have a /sys/devices/system/<name> root device
  * with the name of the subsystem. The root device can carry subsystem-
index 4a67cc0c8b37aaeeec97e27eef04752a52504d1a..74dda4f697f92d772355f37c0a82553e7827119d 100644 (file)
@@ -632,6 +632,11 @@ static void klist_children_put(struct klist_node *n)
  * may be used for reference counting of @dev after calling this
  * function.
  *
+ * All fields in @dev must be initialized by the caller to 0, except
+ * for those explicitly set to some other value.  The simplest
+ * approach is to use kzalloc() to allocate the structure containing
+ * @dev.
+ *
  * NOTE: Use put_device() to give up your reference instead of freeing
  * @dev directly once you have called this function.
  */
@@ -930,6 +935,13 @@ int device_private_init(struct device *dev)
  * to the global and sibling lists for the device, then
  * adds it to the other relevant subsystems of the driver model.
  *
+ * Do not call this routine or device_register() more than once for
+ * any device structure.  The driver model core is not designed to work
+ * with devices that get unregistered and then spring back to life.
+ * (Among other things, it's very hard to guarantee that all references
+ * to the previous incarnation of @dev have been dropped.)  Allocate
+ * and register a fresh new struct device instead.
+ *
  * NOTE: _Never_ directly free @dev after calling this function, even
  * if it returned an error! Always use put_device() to give up your
  * reference instead.
@@ -1022,7 +1034,7 @@ int device_add(struct device *dev)
        device_pm_add(dev);
 
        /* Notify clients of device addition.  This call must come
-        * after dpm_sysf_add() and before kobject_uevent().
+        * after dpm_sysfs_add() and before kobject_uevent().
         */
        if (dev->bus)
                blocking_notifier_call_chain(&dev->bus->p->bus_notifier,
@@ -1090,6 +1102,9 @@ name_error:
  * have a clearly defined need to use and refcount the device
  * before it is added to the hierarchy.
  *
+ * For more information, see the kerneldoc for device_initialize()
+ * and device_add().
+ *
  * NOTE: _Never_ directly free @dev after calling this function, even
  * if it returned an error! Always use put_device() to give up the
  * reference initialized in this function instead.
index 26ab358dac62daf5cac1531eeb0cf24546c42efd..6c9387d646ecccc0d9c8fa62f6f8ce54593894a9 100644 (file)
@@ -525,8 +525,7 @@ static int _request_firmware(const struct firmware **firmware_p,
        if (!firmware) {
                dev_err(device, "%s: kmalloc(struct firmware) failed\n",
                        __func__);
-               retval = -ENOMEM;
-               goto out;
+               return -ENOMEM;
        }
 
        if (fw_get_builtin_firmware(firmware, name)) {
index 92e6a9048065eb567f3d9c857e689c2b4667dd15..978bbf7ac6af03bb26eed17329d3a24f6bf5337e 100644 (file)
@@ -1429,6 +1429,8 @@ static int pm_genpd_default_restore_state(struct device *dev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+
 /**
  * pm_genpd_default_suspend - Default "device suspend" for PM domians.
  * @dev: Device to handle.
@@ -1517,6 +1519,19 @@ static int pm_genpd_default_thaw(struct device *dev)
        return cb ? cb(dev) : pm_generic_thaw(dev);
 }
 
+#else /* !CONFIG_PM_SLEEP */
+
+#define pm_genpd_default_suspend       NULL
+#define pm_genpd_default_suspend_late  NULL
+#define pm_genpd_default_resume_early  NULL
+#define pm_genpd_default_resume                NULL
+#define pm_genpd_default_freeze                NULL
+#define pm_genpd_default_freeze_late   NULL
+#define pm_genpd_default_thaw_early    NULL
+#define pm_genpd_default_thaw          NULL
+
+#endif /* !CONFIG_PM_SLEEP */
+
 /**
  * pm_genpd_init - Initialize a generic I/O PM domain object.
  * @genpd: PM domain object to initialize.
index 51527ee92d101018680cd5b633265105a2952df1..66a265bf5867d1b516a03f34e8e50946db4c1e3e 100644 (file)
@@ -12,6 +12,8 @@
 #include <linux/pm_qos.h>
 #include <linux/hrtimer.h>
 
+#ifdef CONFIG_PM_RUNTIME
+
 /**
  * default_stop_ok - Default PM domain governor routine for stopping devices.
  * @dev: Device to check.
@@ -137,16 +139,28 @@ static bool default_power_down_ok(struct dev_pm_domain *pd)
        return true;
 }
 
-struct dev_power_governor simple_qos_governor = {
-       .stop_ok = default_stop_ok,
-       .power_down_ok = default_power_down_ok,
-};
-
 static bool always_on_power_down_ok(struct dev_pm_domain *domain)
 {
        return false;
 }
 
+#else /* !CONFIG_PM_RUNTIME */
+
+bool default_stop_ok(struct device *dev)
+{
+       return false;
+}
+
+#define default_power_down_ok  NULL
+#define always_on_power_down_ok        NULL
+
+#endif /* !CONFIG_PM_RUNTIME */
+
+struct dev_power_governor simple_qos_governor = {
+       .stop_ok = default_stop_ok,
+       .power_down_ok = default_power_down_ok,
+};
+
 /**
  * pm_genpd_gov_always_on - A governor implementing an always-on policy
  */
index be10a4ff660915625454375ce9fb30a97d5b7ef9..65558034318f3f295abde2fe7a599c4971e7d7ec 100644 (file)
@@ -284,6 +284,9 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
        map->precious_reg = config->precious_reg;
        map->cache_type = config->cache_type;
 
+       map->cache_bypass = false;
+       map->cache_only = false;
+
        ret = regcache_init(map, config);
 
        mutex_unlock(&map->lock);
diff --git a/drivers/base/sys.c b/drivers/base/sys.c
deleted file mode 100644 (file)
index 409f5ce..0000000
+++ /dev/null
@@ -1,383 +0,0 @@
-/*
- * sys.c - pseudo-bus for system 'devices' (cpus, PICs, timers, etc)
- *
- * Copyright (c) 2002-3 Patrick Mochel
- *               2002-3 Open Source Development Lab
- *
- * This file is released under the GPLv2
- *
- * This exports a 'system' bus type.
- * By default, a 'sys' bus gets added to the root of the system. There will
- * always be core system devices. Devices can use sysdev_register() to
- * add themselves as children of the system bus.
- */
-
-#include <linux/sysdev.h>
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/pm.h>
-#include <linux/device.h>
-#include <linux/mutex.h>
-#include <linux/interrupt.h>
-
-#include "base.h"
-
-#define to_sysdev(k) container_of(k, struct sys_device, kobj)
-#define to_sysdev_attr(a) container_of(a, struct sysdev_attribute, attr)
-
-
-static ssize_t
-sysdev_show(struct kobject *kobj, struct attribute *attr, char *buffer)
-{
-       struct sys_device *sysdev = to_sysdev(kobj);
-       struct sysdev_attribute *sysdev_attr = to_sysdev_attr(attr);
-
-       if (sysdev_attr->show)
-               return sysdev_attr->show(sysdev, sysdev_attr, buffer);
-       return -EIO;
-}
-
-
-static ssize_t
-sysdev_store(struct kobject *kobj, struct attribute *attr,
-            const char *buffer, size_t count)
-{
-       struct sys_device *sysdev = to_sysdev(kobj);
-       struct sysdev_attribute *sysdev_attr = to_sysdev_attr(attr);
-
-       if (sysdev_attr->store)
-               return sysdev_attr->store(sysdev, sysdev_attr, buffer, count);
-       return -EIO;
-}
-
-static const struct sysfs_ops sysfs_ops = {
-       .show   = sysdev_show,
-       .store  = sysdev_store,
-};
-
-static struct kobj_type ktype_sysdev = {
-       .sysfs_ops      = &sysfs_ops,
-};
-
-
-int sysdev_create_file(struct sys_device *s, struct sysdev_attribute *a)
-{
-       return sysfs_create_file(&s->kobj, &a->attr);
-}
-
-
-void sysdev_remove_file(struct sys_device *s, struct sysdev_attribute *a)
-{
-       sysfs_remove_file(&s->kobj, &a->attr);
-}
-
-EXPORT_SYMBOL_GPL(sysdev_create_file);
-EXPORT_SYMBOL_GPL(sysdev_remove_file);
-
-#define to_sysdev_class(k) container_of(k, struct sysdev_class, kset.kobj)
-#define to_sysdev_class_attr(a) container_of(a, \
-       struct sysdev_class_attribute, attr)
-
-static ssize_t sysdev_class_show(struct kobject *kobj, struct attribute *attr,
-                                char *buffer)
-{
-       struct sysdev_class *class = to_sysdev_class(kobj);
-       struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr);
-
-       if (class_attr->show)
-               return class_attr->show(class, class_attr, buffer);
-       return -EIO;
-}
-
-static ssize_t sysdev_class_store(struct kobject *kobj, struct attribute *attr,
-                                 const char *buffer, size_t count)
-{
-       struct sysdev_class *class = to_sysdev_class(kobj);
-       struct sysdev_class_attribute *class_attr = to_sysdev_class_attr(attr);
-
-       if (class_attr->store)
-               return class_attr->store(class, class_attr, buffer, count);
-       return -EIO;
-}
-
-static const struct sysfs_ops sysfs_class_ops = {
-       .show   = sysdev_class_show,
-       .store  = sysdev_class_store,
-};
-
-static struct kobj_type ktype_sysdev_class = {
-       .sysfs_ops      = &sysfs_class_ops,
-};
-
-int sysdev_class_create_file(struct sysdev_class *c,
-                            struct sysdev_class_attribute *a)
-{
-       return sysfs_create_file(&c->kset.kobj, &a->attr);
-}
-EXPORT_SYMBOL_GPL(sysdev_class_create_file);
-
-void sysdev_class_remove_file(struct sysdev_class *c,
-                             struct sysdev_class_attribute *a)
-{
-       sysfs_remove_file(&c->kset.kobj, &a->attr);
-}
-EXPORT_SYMBOL_GPL(sysdev_class_remove_file);
-
-extern struct kset *system_kset;
-
-int sysdev_class_register(struct sysdev_class *cls)
-{
-       int retval;
-
-       pr_debug("Registering sysdev class '%s'\n", cls->name);
-
-       INIT_LIST_HEAD(&cls->drivers);
-       memset(&cls->kset.kobj, 0x00, sizeof(struct kobject));
-       cls->kset.kobj.parent = &system_kset->kobj;
-       cls->kset.kobj.ktype = &ktype_sysdev_class;
-       cls->kset.kobj.kset = system_kset;
-
-       retval = kobject_set_name(&cls->kset.kobj, "%s", cls->name);
-       if (retval)
-               return retval;
-
-       retval = kset_register(&cls->kset);
-       if (!retval && cls->attrs)
-               retval = sysfs_create_files(&cls->kset.kobj,
-                                           (const struct attribute **)cls->attrs);
-       return retval;
-}
-
-void sysdev_class_unregister(struct sysdev_class *cls)
-{
-       pr_debug("Unregistering sysdev class '%s'\n",
-                kobject_name(&cls->kset.kobj));
-       if (cls->attrs)
-               sysfs_remove_files(&cls->kset.kobj,
-                                  (const struct attribute **)cls->attrs);
-       kset_unregister(&cls->kset);
-}
-
-EXPORT_SYMBOL_GPL(sysdev_class_register);
-EXPORT_SYMBOL_GPL(sysdev_class_unregister);
-
-static DEFINE_MUTEX(sysdev_drivers_lock);
-
-/*
- * @dev != NULL means that we're unwinding because some drv->add()
- * failed for some reason. You need to grab sysdev_drivers_lock before
- * calling this.
- */
-static void __sysdev_driver_remove(struct sysdev_class *cls,
-                                  struct sysdev_driver *drv,
-                                  struct sys_device *from_dev)
-{
-       struct sys_device *dev = from_dev;
-
-       list_del_init(&drv->entry);
-       if (!cls)
-               return;
-
-       if (!drv->remove)
-               goto kset_put;
-
-       if (dev)
-               list_for_each_entry_continue_reverse(dev, &cls->kset.list,
-                                                    kobj.entry)
-                       drv->remove(dev);
-       else
-               list_for_each_entry(dev, &cls->kset.list, kobj.entry)
-                       drv->remove(dev);
-
-kset_put:
-       kset_put(&cls->kset);
-}
-
-/**
- *     sysdev_driver_register - Register auxiliary driver
- *     @cls:   Device class driver belongs to.
- *     @drv:   Driver.
- *
- *     @drv is inserted into @cls->drivers to be
- *     called on each operation on devices of that class. The refcount
- *     of @cls is incremented.
- */
-int sysdev_driver_register(struct sysdev_class *cls, struct sysdev_driver *drv)
-{
-       struct sys_device *dev = NULL;
-       int err = 0;
-
-       if (!cls) {
-               WARN(1, KERN_WARNING "sysdev: invalid class passed to %s!\n",
-                       __func__);
-               return -EINVAL;
-       }
-
-       /* Check whether this driver has already been added to a class. */
-       if (drv->entry.next && !list_empty(&drv->entry))
-               WARN(1, KERN_WARNING "sysdev: class %s: driver (%p) has already"
-                       " been registered to a class, something is wrong, but "
-                       "will forge on!\n", cls->name, drv);
-
-       mutex_lock(&sysdev_drivers_lock);
-       if (cls && kset_get(&cls->kset)) {
-               list_add_tail(&drv->entry, &cls->drivers);
-
-               /* If devices of this class already exist, tell the driver */
-               if (drv->add) {
-                       list_for_each_entry(dev, &cls->kset.list, kobj.entry) {
-                               err = drv->add(dev);
-                               if (err)
-                                       goto unwind;
-                       }
-               }
-       } else {
-               err = -EINVAL;
-               WARN(1, KERN_ERR "%s: invalid device class\n", __func__);
-       }
-
-       goto unlock;
-
-unwind:
-       __sysdev_driver_remove(cls, drv, dev);
-
-unlock:
-       mutex_unlock(&sysdev_drivers_lock);
-       return err;
-}
-
-/**
- *     sysdev_driver_unregister - Remove an auxiliary driver.
- *     @cls:   Class driver belongs to.
- *     @drv:   Driver.
- */
-void sysdev_driver_unregister(struct sysdev_class *cls,
-                             struct sysdev_driver *drv)
-{
-       mutex_lock(&sysdev_drivers_lock);
-       __sysdev_driver_remove(cls, drv, NULL);
-       mutex_unlock(&sysdev_drivers_lock);
-}
-EXPORT_SYMBOL_GPL(sysdev_driver_register);
-EXPORT_SYMBOL_GPL(sysdev_driver_unregister);
-
-/**
- *     sysdev_register - add a system device to the tree
- *     @sysdev:        device in question
- *
- */
-int sysdev_register(struct sys_device *sysdev)
-{
-       int error;
-       struct sysdev_class *cls = sysdev->cls;
-
-       if (!cls)
-               return -EINVAL;
-
-       pr_debug("Registering sys device of class '%s'\n",
-                kobject_name(&cls->kset.kobj));
-
-       /* initialize the kobject to 0, in case it had previously been used */
-       memset(&sysdev->kobj, 0x00, sizeof(struct kobject));
-
-       /* Make sure the kset is set */
-       sysdev->kobj.kset = &cls->kset;
-
-       /* Register the object */
-       error = kobject_init_and_add(&sysdev->kobj, &ktype_sysdev, NULL,
-                                    "%s%d", kobject_name(&cls->kset.kobj),
-                                    sysdev->id);
-
-       if (!error) {
-               struct sysdev_driver *drv;
-
-               pr_debug("Registering sys device '%s'\n",
-                        kobject_name(&sysdev->kobj));
-
-               mutex_lock(&sysdev_drivers_lock);
-               /* Generic notification is implicit, because it's that
-                * code that should have called us.
-                */
-
-               /* Notify class auxiliary drivers */
-               list_for_each_entry(drv, &cls->drivers, entry) {
-                       if (drv->add)
-                               drv->add(sysdev);
-               }
-               mutex_unlock(&sysdev_drivers_lock);
-               kobject_uevent(&sysdev->kobj, KOBJ_ADD);
-       }
-
-       return error;
-}
-
-void sysdev_unregister(struct sys_device *sysdev)
-{
-       struct sysdev_driver *drv;
-
-       mutex_lock(&sysdev_drivers_lock);
-       list_for_each_entry(drv, &sysdev->cls->drivers, entry) {
-               if (drv->remove)
-                       drv->remove(sysdev);
-       }
-       mutex_unlock(&sysdev_drivers_lock);
-
-       kobject_put(&sysdev->kobj);
-}
-
-EXPORT_SYMBOL_GPL(sysdev_register);
-EXPORT_SYMBOL_GPL(sysdev_unregister);
-
-#define to_ext_attr(x) container_of(x, struct sysdev_ext_attribute, attr)
-
-ssize_t sysdev_store_ulong(struct sys_device *sysdev,
-                          struct sysdev_attribute *attr,
-                          const char *buf, size_t size)
-{
-       struct sysdev_ext_attribute *ea = to_ext_attr(attr);
-       char *end;
-       unsigned long new = simple_strtoul(buf, &end, 0);
-       if (end == buf)
-               return -EINVAL;
-       *(unsigned long *)(ea->var) = new;
-       /* Always return full write size even if we didn't consume all */
-       return size;
-}
-EXPORT_SYMBOL_GPL(sysdev_store_ulong);
-
-ssize_t sysdev_show_ulong(struct sys_device *sysdev,
-                         struct sysdev_attribute *attr,
-                         char *buf)
-{
-       struct sysdev_ext_attribute *ea = to_ext_attr(attr);
-       return snprintf(buf, PAGE_SIZE, "%lx\n", *(unsigned long *)(ea->var));
-}
-EXPORT_SYMBOL_GPL(sysdev_show_ulong);
-
-ssize_t sysdev_store_int(struct sys_device *sysdev,
-                          struct sysdev_attribute *attr,
-                          const char *buf, size_t size)
-{
-       struct sysdev_ext_attribute *ea = to_ext_attr(attr);
-       char *end;
-       long new = simple_strtol(buf, &end, 0);
-       if (end == buf || new > INT_MAX || new < INT_MIN)
-               return -EINVAL;
-       *(int *)(ea->var) = new;
-       /* Always return full write size even if we didn't consume all */
-       return size;
-}
-EXPORT_SYMBOL_GPL(sysdev_store_int);
-
-ssize_t sysdev_show_int(struct sys_device *sysdev,
-                         struct sysdev_attribute *attr,
-                         char *buf)
-{
-       struct sysdev_ext_attribute *ea = to_ext_attr(attr);
-       return snprintf(buf, PAGE_SIZE, "%d\n", *(int *)(ea->var));
-}
-EXPORT_SYMBOL_GPL(sysdev_show_int);
-
index fda56bde36b836cc7fa4a8bee2c1bc92b0e5a43d..0def898a1d159c12d2543df6ffd6264932a7affc 100644 (file)
@@ -19,6 +19,7 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
                                   struct bcma_device *core_cc,
                                   struct bcma_device *core_mips);
 #ifdef CONFIG_PM
+int bcma_bus_suspend(struct bcma_bus *bus);
 int bcma_bus_resume(struct bcma_bus *bus);
 #endif
 
index 443b83a2fd7aa012c1c6835137e2642a79499b41..f59244e3397137ca9b6a6d311612dd53db721646 100644 (file)
@@ -235,38 +235,32 @@ static void bcma_host_pci_remove(struct pci_dev *dev)
 }
 
 #ifdef CONFIG_PM
-static int bcma_host_pci_suspend(struct pci_dev *dev, pm_message_t state)
+static int bcma_host_pci_suspend(struct device *dev)
 {
-       /* Host specific */
-       pci_save_state(dev);
-       pci_disable_device(dev);
-       pci_set_power_state(dev, pci_choose_state(dev, state));
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct bcma_bus *bus = pci_get_drvdata(pdev);
 
-       return 0;
+       bus->mapped_core = NULL;
+
+       return bcma_bus_suspend(bus);
 }
 
-static int bcma_host_pci_resume(struct pci_dev *dev)
+static int bcma_host_pci_resume(struct device *dev)
 {
-       struct bcma_bus *bus = pci_get_drvdata(dev);
-       int err;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct bcma_bus *bus = pci_get_drvdata(pdev);
 
-       /* Host specific */
-       pci_set_power_state(dev, 0);
-       err = pci_enable_device(dev);
-       if (err)
-               return err;
-       pci_restore_state(dev);
+       return bcma_bus_resume(bus);
+}
 
-       /* Bus specific */
-       err = bcma_bus_resume(bus);
-       if (err)
-               return err;
+static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
+                        bcma_host_pci_resume);
+#define BCMA_PM_OPS    (&bcma_pm_ops)
 
-       return 0;
-}
 #else /* CONFIG_PM */
-# define bcma_host_pci_suspend NULL
-# define bcma_host_pci_resume  NULL
+
+#define BCMA_PM_OPS     NULL
+
 #endif /* CONFIG_PM */
 
 static DEFINE_PCI_DEVICE_TABLE(bcma_pci_bridge_tbl) = {
@@ -284,8 +278,7 @@ static struct pci_driver bcma_pci_bridge_driver = {
        .id_table = bcma_pci_bridge_tbl,
        .probe = bcma_host_pci_probe,
        .remove = bcma_host_pci_remove,
-       .suspend = bcma_host_pci_suspend,
-       .resume = bcma_host_pci_resume,
+       .driver.pm = BCMA_PM_OPS,
 };
 
 int __init bcma_host_pci_init(void)
index 10f92b371e582bf6d371f425b17dd1dfe5629afd..febbc0a1222ae1444acea8312bab18b7163fbcdb 100644 (file)
@@ -241,6 +241,21 @@ int __init bcma_bus_early_register(struct bcma_bus *bus,
 }
 
 #ifdef CONFIG_PM
+int bcma_bus_suspend(struct bcma_bus *bus)
+{
+       struct bcma_device *core;
+
+       list_for_each_entry(core, &bus->cores, list) {
+               struct device_driver *drv = core->dev.driver;
+               if (drv) {
+                       struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+                       if (adrv->suspend)
+                               adrv->suspend(core);
+               }
+       }
+       return 0;
+}
+
 int bcma_bus_resume(struct bcma_bus *bus)
 {
        struct bcma_device *core;
@@ -252,6 +267,15 @@ int bcma_bus_resume(struct bcma_bus *bus)
                bcma_core_chipcommon_init(&bus->drv_cc);
        }
 
+       list_for_each_entry(core, &bus->cores, list) {
+               struct device_driver *drv = core->dev.driver;
+               if (drv) {
+                       struct bcma_driver *adrv = container_of(drv, struct bcma_driver, drv);
+                       if (adrv->resume)
+                               adrv->resume(core);
+               }
+       }
+
        return 0;
 }
 #endif
index a30aa103f95b33a4567b519250331725dc5f9539..4e4c8a4a5fd3fb4412a19fdbd3f62647b285fd6a 100644 (file)
@@ -317,6 +317,17 @@ config BLK_DEV_NBD
 
          If unsure, say N.
 
+config BLK_DEV_NVME
+       tristate "NVM Express block device"
+       depends on PCI
+       ---help---
+         The NVM Express driver is for solid state drives directly
+         connected to the PCI or PCI Express bus.  If you know you
+         don't have one of these, it is safe to answer N.
+
+         To compile this driver as a module, choose M here: the
+         module will be called nvme.
+
 config BLK_DEV_OSD
        tristate "OSD object-as-blkdev support"
        depends on SCSI_OSD_ULD
index ad7b74a44ef3dd3e1e1c39f7e44855e45e0bfd56..5b795059f8fb76107b5f2950dd88c5c9696ad4ee 100644 (file)
@@ -23,6 +23,7 @@ obj-$(CONFIG_XILINX_SYSACE)   += xsysace.o
 obj-$(CONFIG_CDROM_PKTCDVD)    += pktcdvd.o
 obj-$(CONFIG_MG_DISK)          += mg_disk.o
 obj-$(CONFIG_SUNVDC)           += sunvdc.o
+obj-$(CONFIG_BLK_DEV_NVME)     += nvme.o
 obj-$(CONFIG_BLK_DEV_OSD)      += osdblk.o
 
 obj-$(CONFIG_BLK_DEV_UMEM)     += umem.o
diff --git a/drivers/block/nvme.c b/drivers/block/nvme.c
new file mode 100644 (file)
index 0000000..c1dc4d8
--- /dev/null
@@ -0,0 +1,1739 @@
+/*
+ * NVM Express device driver
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#include <linux/nvme.h>
+#include <linux/bio.h>
+#include <linux/bitops.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/fs.h>
+#include <linux/genhd.h>
+#include <linux/idr.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kdev_t.h>
+#include <linux/kthread.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pci.h>
+#include <linux/poison.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#define NVME_Q_DEPTH 1024
+#define SQ_SIZE(depth)         (depth * sizeof(struct nvme_command))
+#define CQ_SIZE(depth)         (depth * sizeof(struct nvme_completion))
+#define NVME_MINORS 64
+#define NVME_IO_TIMEOUT        (5 * HZ)
+#define ADMIN_TIMEOUT  (60 * HZ)
+
+static int nvme_major;
+module_param(nvme_major, int, 0);
+
+static int use_threaded_interrupts;
+module_param(use_threaded_interrupts, int, 0);
+
+static DEFINE_SPINLOCK(dev_list_lock);
+static LIST_HEAD(dev_list);
+static struct task_struct *nvme_thread;
+
+/*
+ * Represents an NVM Express device.  Each nvme_dev is a PCI function.
+ */
+struct nvme_dev {
+       struct list_head node;
+       struct nvme_queue **queues;
+       u32 __iomem *dbs;
+       struct pci_dev *pci_dev;
+       struct dma_pool *prp_page_pool;
+       struct dma_pool *prp_small_pool;
+       int instance;
+       int queue_count;
+       int db_stride;
+       u32 ctrl_config;
+       struct msix_entry *entry;
+       struct nvme_bar __iomem *bar;
+       struct list_head namespaces;
+       char serial[20];
+       char model[40];
+       char firmware_rev[8];
+};
+
+/*
+ * An NVM Express namespace is equivalent to a SCSI LUN
+ */
+struct nvme_ns {
+       struct list_head list;
+
+       struct nvme_dev *dev;
+       struct request_queue *queue;
+       struct gendisk *disk;
+
+       int ns_id;
+       int lba_shift;
+};
+
+/*
+ * An NVM Express queue.  Each device has at least two (one for admin
+ * commands and one for I/O commands).
+ */
+struct nvme_queue {
+       struct device *q_dmadev;
+       struct nvme_dev *dev;
+       spinlock_t q_lock;
+       struct nvme_command *sq_cmds;
+       volatile struct nvme_completion *cqes;
+       dma_addr_t sq_dma_addr;
+       dma_addr_t cq_dma_addr;
+       wait_queue_head_t sq_full;
+       wait_queue_t sq_cong_wait;
+       struct bio_list sq_cong;
+       u32 __iomem *q_db;
+       u16 q_depth;
+       u16 cq_vector;
+       u16 sq_head;
+       u16 sq_tail;
+       u16 cq_head;
+       u16 cq_phase;
+       unsigned long cmdid_data[];
+};
+
+/*
+ * Check we didin't inadvertently grow the command struct
+ */
+static inline void _nvme_check_size(void)
+{
+       BUILD_BUG_ON(sizeof(struct nvme_rw_command) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_create_cq) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_create_sq) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
+       BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
+       BUILD_BUG_ON(sizeof(struct nvme_lba_range_type) != 64);
+}
+
+typedef void (*nvme_completion_fn)(struct nvme_dev *, void *,
+                                               struct nvme_completion *);
+
+struct nvme_cmd_info {
+       nvme_completion_fn fn;
+       void *ctx;
+       unsigned long timeout;
+};
+
+static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
+{
+       return (void *)&nvmeq->cmdid_data[BITS_TO_LONGS(nvmeq->q_depth)];
+}
+
+/**
+ * alloc_cmdid() - Allocate a Command ID
+ * @nvmeq: The queue that will be used for this command
+ * @ctx: A pointer that will be passed to the handler
+ * @handler: The function to call on completion
+ *
+ * Allocate a Command ID for a queue.  The data passed in will
+ * be passed to the completion handler.  This is implemented by using
+ * the bottom two bits of the ctx pointer to store the handler ID.
+ * Passing in a pointer that's not 4-byte aligned will cause a BUG.
+ * We can change this if it becomes a problem.
+ *
+ * May be called with local interrupts disabled and the q_lock held,
+ * or with interrupts enabled and no locks held.
+ */
+static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
+                               nvme_completion_fn handler, unsigned timeout)
+{
+       int depth = nvmeq->q_depth - 1;
+       struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+       int cmdid;
+
+       do {
+               cmdid = find_first_zero_bit(nvmeq->cmdid_data, depth);
+               if (cmdid >= depth)
+                       return -EBUSY;
+       } while (test_and_set_bit(cmdid, nvmeq->cmdid_data));
+
+       info[cmdid].fn = handler;
+       info[cmdid].ctx = ctx;
+       info[cmdid].timeout = jiffies + timeout;
+       return cmdid;
+}
+
+static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
+                               nvme_completion_fn handler, unsigned timeout)
+{
+       int cmdid;
+       wait_event_killable(nvmeq->sq_full,
+               (cmdid = alloc_cmdid(nvmeq, ctx, handler, timeout)) >= 0);
+       return (cmdid < 0) ? -EINTR : cmdid;
+}
+
+/* Special values must be less than 0x1000 */
+#define CMD_CTX_BASE           ((void *)POISON_POINTER_DELTA)
+#define CMD_CTX_CANCELLED      (0x30C + CMD_CTX_BASE)
+#define CMD_CTX_COMPLETED      (0x310 + CMD_CTX_BASE)
+#define CMD_CTX_INVALID                (0x314 + CMD_CTX_BASE)
+#define CMD_CTX_FLUSH          (0x318 + CMD_CTX_BASE)
+
+static void special_completion(struct nvme_dev *dev, void *ctx,
+                                               struct nvme_completion *cqe)
+{
+       if (ctx == CMD_CTX_CANCELLED)
+               return;
+       if (ctx == CMD_CTX_FLUSH)
+               return;
+       if (ctx == CMD_CTX_COMPLETED) {
+               dev_warn(&dev->pci_dev->dev,
+                               "completed id %d twice on queue %d\n",
+                               cqe->command_id, le16_to_cpup(&cqe->sq_id));
+               return;
+       }
+       if (ctx == CMD_CTX_INVALID) {
+               dev_warn(&dev->pci_dev->dev,
+                               "invalid id %d completed on queue %d\n",
+                               cqe->command_id, le16_to_cpup(&cqe->sq_id));
+               return;
+       }
+
+       dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held.  May not sleep.
+ */
+static void *free_cmdid(struct nvme_queue *nvmeq, int cmdid,
+                                               nvme_completion_fn *fn)
+{
+       void *ctx;
+       struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+       if (cmdid >= nvmeq->q_depth) {
+               *fn = special_completion;
+               return CMD_CTX_INVALID;
+       }
+       *fn = info[cmdid].fn;
+       ctx = info[cmdid].ctx;
+       info[cmdid].fn = special_completion;
+       info[cmdid].ctx = CMD_CTX_COMPLETED;
+       clear_bit(cmdid, nvmeq->cmdid_data);
+       wake_up(&nvmeq->sq_full);
+       return ctx;
+}
+
+static void *cancel_cmdid(struct nvme_queue *nvmeq, int cmdid,
+                                               nvme_completion_fn *fn)
+{
+       void *ctx;
+       struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+       if (fn)
+               *fn = info[cmdid].fn;
+       ctx = info[cmdid].ctx;
+       info[cmdid].fn = special_completion;
+       info[cmdid].ctx = CMD_CTX_CANCELLED;
+       return ctx;
+}
+
+static struct nvme_queue *get_nvmeq(struct nvme_dev *dev)
+{
+       return dev->queues[get_cpu() + 1];
+}
+
+static void put_nvmeq(struct nvme_queue *nvmeq)
+{
+       put_cpu();
+}
+
+/**
+ * nvme_submit_cmd() - Copy a command into a queue and ring the doorbell
+ * @nvmeq: The queue to use
+ * @cmd: The command to send
+ *
+ * Safe to use from interrupt context
+ */
+static int nvme_submit_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd)
+{
+       unsigned long flags;
+       u16 tail;
+       spin_lock_irqsave(&nvmeq->q_lock, flags);
+       tail = nvmeq->sq_tail;
+       memcpy(&nvmeq->sq_cmds[tail], cmd, sizeof(*cmd));
+       if (++tail == nvmeq->q_depth)
+               tail = 0;
+       writel(tail, nvmeq->q_db);
+       nvmeq->sq_tail = tail;
+       spin_unlock_irqrestore(&nvmeq->q_lock, flags);
+
+       return 0;
+}
+
+/*
+ * The nvme_iod describes the data in an I/O, including the list of PRP
+ * entries.  You can't see it in this data structure because C doesn't let
+ * me express that.  Use nvme_alloc_iod to ensure there's enough space
+ * allocated to store the PRP list.
+ */
+struct nvme_iod {
+       void *private;          /* For the use of the submitter of the I/O */
+       int npages;             /* In the PRP list. 0 means small pool in use */
+       int offset;             /* Of PRP list */
+       int nents;              /* Used in scatterlist */
+       int length;             /* Of data, in bytes */
+       dma_addr_t first_dma;
+       struct scatterlist sg[0];
+};
+
+static __le64 **iod_list(struct nvme_iod *iod)
+{
+       return ((void *)iod) + iod->offset;
+}
+
+/*
+ * Will slightly overestimate the number of pages needed.  This is OK
+ * as it only leads to a small amount of wasted memory for the lifetime of
+ * the I/O.
+ */
+static int nvme_npages(unsigned size)
+{
+       unsigned nprps = DIV_ROUND_UP(size + PAGE_SIZE, PAGE_SIZE);
+       return DIV_ROUND_UP(8 * nprps, PAGE_SIZE - 8);
+}
+
+static struct nvme_iod *
+nvme_alloc_iod(unsigned nseg, unsigned nbytes, gfp_t gfp)
+{
+       struct nvme_iod *iod = kmalloc(sizeof(struct nvme_iod) +
+                               sizeof(__le64 *) * nvme_npages(nbytes) +
+                               sizeof(struct scatterlist) * nseg, gfp);
+
+       if (iod) {
+               iod->offset = offsetof(struct nvme_iod, sg[nseg]);
+               iod->npages = -1;
+               iod->length = nbytes;
+       }
+
+       return iod;
+}
+
+static void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod)
+{
+       const int last_prp = PAGE_SIZE / 8 - 1;
+       int i;
+       __le64 **list = iod_list(iod);
+       dma_addr_t prp_dma = iod->first_dma;
+
+       if (iod->npages == 0)
+               dma_pool_free(dev->prp_small_pool, list[0], prp_dma);
+       for (i = 0; i < iod->npages; i++) {
+               __le64 *prp_list = list[i];
+               dma_addr_t next_prp_dma = le64_to_cpu(prp_list[last_prp]);
+               dma_pool_free(dev->prp_page_pool, prp_list, prp_dma);
+               prp_dma = next_prp_dma;
+       }
+       kfree(iod);
+}
+
+static void requeue_bio(struct nvme_dev *dev, struct bio *bio)
+{
+       struct nvme_queue *nvmeq = get_nvmeq(dev);
+       if (bio_list_empty(&nvmeq->sq_cong))
+               add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+       bio_list_add(&nvmeq->sq_cong, bio);
+       put_nvmeq(nvmeq);
+       wake_up_process(nvme_thread);
+}
+
+static void bio_completion(struct nvme_dev *dev, void *ctx,
+                                               struct nvme_completion *cqe)
+{
+       struct nvme_iod *iod = ctx;
+       struct bio *bio = iod->private;
+       u16 status = le16_to_cpup(&cqe->status) >> 1;
+
+       dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+                       bio_data_dir(bio) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+       nvme_free_iod(dev, iod);
+       if (status) {
+               bio_endio(bio, -EIO);
+       } else if (bio->bi_vcnt > bio->bi_idx) {
+               requeue_bio(dev, bio);
+       } else {
+               bio_endio(bio, 0);
+       }
+}
+
+/* length is in bytes.  gfp flags indicates whether we may sleep. */
+static int nvme_setup_prps(struct nvme_dev *dev,
+                       struct nvme_common_command *cmd, struct nvme_iod *iod,
+                       int total_len, gfp_t gfp)
+{
+       struct dma_pool *pool;
+       int length = total_len;
+       struct scatterlist *sg = iod->sg;
+       int dma_len = sg_dma_len(sg);
+       u64 dma_addr = sg_dma_address(sg);
+       int offset = offset_in_page(dma_addr);
+       __le64 *prp_list;
+       __le64 **list = iod_list(iod);
+       dma_addr_t prp_dma;
+       int nprps, i;
+
+       cmd->prp1 = cpu_to_le64(dma_addr);
+       length -= (PAGE_SIZE - offset);
+       if (length <= 0)
+               return total_len;
+
+       dma_len -= (PAGE_SIZE - offset);
+       if (dma_len) {
+               dma_addr += (PAGE_SIZE - offset);
+       } else {
+               sg = sg_next(sg);
+               dma_addr = sg_dma_address(sg);
+               dma_len = sg_dma_len(sg);
+       }
+
+       if (length <= PAGE_SIZE) {
+               cmd->prp2 = cpu_to_le64(dma_addr);
+               return total_len;
+       }
+
+       nprps = DIV_ROUND_UP(length, PAGE_SIZE);
+       if (nprps <= (256 / 8)) {
+               pool = dev->prp_small_pool;
+               iod->npages = 0;
+       } else {
+               pool = dev->prp_page_pool;
+               iod->npages = 1;
+       }
+
+       prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+       if (!prp_list) {
+               cmd->prp2 = cpu_to_le64(dma_addr);
+               iod->npages = -1;
+               return (total_len - length) + PAGE_SIZE;
+       }
+       list[0] = prp_list;
+       iod->first_dma = prp_dma;
+       cmd->prp2 = cpu_to_le64(prp_dma);
+       i = 0;
+       for (;;) {
+               if (i == PAGE_SIZE / 8) {
+                       __le64 *old_prp_list = prp_list;
+                       prp_list = dma_pool_alloc(pool, gfp, &prp_dma);
+                       if (!prp_list)
+                               return total_len - length;
+                       list[iod->npages++] = prp_list;
+                       prp_list[0] = old_prp_list[i - 1];
+                       old_prp_list[i - 1] = cpu_to_le64(prp_dma);
+                       i = 1;
+               }
+               prp_list[i++] = cpu_to_le64(dma_addr);
+               dma_len -= PAGE_SIZE;
+               dma_addr += PAGE_SIZE;
+               length -= PAGE_SIZE;
+               if (length <= 0)
+                       break;
+               if (dma_len > 0)
+                       continue;
+               BUG_ON(dma_len < 0);
+               sg = sg_next(sg);
+               dma_addr = sg_dma_address(sg);
+               dma_len = sg_dma_len(sg);
+       }
+
+       return total_len;
+}
+
+/* NVMe scatterlists require no holes in the virtual address */
+#define BIOVEC_NOT_VIRT_MERGEABLE(vec1, vec2)  ((vec2)->bv_offset || \
+                       (((vec1)->bv_offset + (vec1)->bv_len) % PAGE_SIZE))
+
+static int nvme_map_bio(struct device *dev, struct nvme_iod *iod,
+               struct bio *bio, enum dma_data_direction dma_dir, int psegs)
+{
+       struct bio_vec *bvec, *bvprv = NULL;
+       struct scatterlist *sg = NULL;
+       int i, old_idx, length = 0, nsegs = 0;
+
+       sg_init_table(iod->sg, psegs);
+       old_idx = bio->bi_idx;
+       bio_for_each_segment(bvec, bio, i) {
+               if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
+                       sg->length += bvec->bv_len;
+               } else {
+                       if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
+                               break;
+                       sg = sg ? sg + 1 : iod->sg;
+                       sg_set_page(sg, bvec->bv_page, bvec->bv_len,
+                                                       bvec->bv_offset);
+                       nsegs++;
+               }
+               length += bvec->bv_len;
+               bvprv = bvec;
+       }
+       bio->bi_idx = i;
+       iod->nents = nsegs;
+       sg_mark_end(sg);
+       if (dma_map_sg(dev, iod->sg, iod->nents, dma_dir) == 0) {
+               bio->bi_idx = old_idx;
+               return -ENOMEM;
+       }
+       return length;
+}
+
+static int nvme_submit_flush(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+                                                               int cmdid)
+{
+       struct nvme_command *cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+       memset(cmnd, 0, sizeof(*cmnd));
+       cmnd->common.opcode = nvme_cmd_flush;
+       cmnd->common.command_id = cmdid;
+       cmnd->common.nsid = cpu_to_le32(ns->ns_id);
+
+       if (++nvmeq->sq_tail == nvmeq->q_depth)
+               nvmeq->sq_tail = 0;
+       writel(nvmeq->sq_tail, nvmeq->q_db);
+
+       return 0;
+}
+
+static int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns)
+{
+       int cmdid = alloc_cmdid(nvmeq, (void *)CMD_CTX_FLUSH,
+                                       special_completion, NVME_IO_TIMEOUT);
+       if (unlikely(cmdid < 0))
+               return cmdid;
+
+       return nvme_submit_flush(nvmeq, ns, cmdid);
+}
+
+/*
+ * Called with local interrupts disabled and the q_lock held.  May not sleep.
+ */
+static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
+                                                               struct bio *bio)
+{
+       struct nvme_command *cmnd;
+       struct nvme_iod *iod;
+       enum dma_data_direction dma_dir;
+       int cmdid, length, result = -ENOMEM;
+       u16 control;
+       u32 dsmgmt;
+       int psegs = bio_phys_segments(ns->queue, bio);
+
+       if ((bio->bi_rw & REQ_FLUSH) && psegs) {
+               result = nvme_submit_flush_data(nvmeq, ns);
+               if (result)
+                       return result;
+       }
+
+       iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
+       if (!iod)
+               goto nomem;
+       iod->private = bio;
+
+       result = -EBUSY;
+       cmdid = alloc_cmdid(nvmeq, iod, bio_completion, NVME_IO_TIMEOUT);
+       if (unlikely(cmdid < 0))
+               goto free_iod;
+
+       if ((bio->bi_rw & REQ_FLUSH) && !psegs)
+               return nvme_submit_flush(nvmeq, ns, cmdid);
+
+       control = 0;
+       if (bio->bi_rw & REQ_FUA)
+               control |= NVME_RW_FUA;
+       if (bio->bi_rw & (REQ_FAILFAST_DEV | REQ_RAHEAD))
+               control |= NVME_RW_LR;
+
+       dsmgmt = 0;
+       if (bio->bi_rw & REQ_RAHEAD)
+               dsmgmt |= NVME_RW_DSM_FREQ_PREFETCH;
+
+       cmnd = &nvmeq->sq_cmds[nvmeq->sq_tail];
+
+       memset(cmnd, 0, sizeof(*cmnd));
+       if (bio_data_dir(bio)) {
+               cmnd->rw.opcode = nvme_cmd_write;
+               dma_dir = DMA_TO_DEVICE;
+       } else {
+               cmnd->rw.opcode = nvme_cmd_read;
+               dma_dir = DMA_FROM_DEVICE;
+       }
+
+       result = nvme_map_bio(nvmeq->q_dmadev, iod, bio, dma_dir, psegs);
+       if (result < 0)
+               goto free_iod;
+       length = result;
+
+       cmnd->rw.command_id = cmdid;
+       cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
+       length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
+                                                               GFP_ATOMIC);
+       cmnd->rw.slba = cpu_to_le64(bio->bi_sector >> (ns->lba_shift - 9));
+       cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
+       cmnd->rw.control = cpu_to_le16(control);
+       cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
+
+       bio->bi_sector += length >> 9;
+
+       if (++nvmeq->sq_tail == nvmeq->q_depth)
+               nvmeq->sq_tail = 0;
+       writel(nvmeq->sq_tail, nvmeq->q_db);
+
+       return 0;
+
+ free_iod:
+       nvme_free_iod(nvmeq->dev, iod);
+ nomem:
+       return result;
+}
+
+static void nvme_make_request(struct request_queue *q, struct bio *bio)
+{
+       struct nvme_ns *ns = q->queuedata;
+       struct nvme_queue *nvmeq = get_nvmeq(ns->dev);
+       int result = -EBUSY;
+
+       spin_lock_irq(&nvmeq->q_lock);
+       if (bio_list_empty(&nvmeq->sq_cong))
+               result = nvme_submit_bio_queue(nvmeq, ns, bio);
+       if (unlikely(result)) {
+               if (bio_list_empty(&nvmeq->sq_cong))
+                       add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
+               bio_list_add(&nvmeq->sq_cong, bio);
+       }
+
+       spin_unlock_irq(&nvmeq->q_lock);
+       put_nvmeq(nvmeq);
+}
+
+static irqreturn_t nvme_process_cq(struct nvme_queue *nvmeq)
+{
+       u16 head, phase;
+
+       head = nvmeq->cq_head;
+       phase = nvmeq->cq_phase;
+
+       for (;;) {
+               void *ctx;
+               nvme_completion_fn fn;
+               struct nvme_completion cqe = nvmeq->cqes[head];
+               if ((le16_to_cpu(cqe.status) & 1) != phase)
+                       break;
+               nvmeq->sq_head = le16_to_cpu(cqe.sq_head);
+               if (++head == nvmeq->q_depth) {
+                       head = 0;
+                       phase = !phase;
+               }
+
+               ctx = free_cmdid(nvmeq, cqe.command_id, &fn);
+               fn(nvmeq->dev, ctx, &cqe);
+       }
+
+       /* If the controller ignores the cq head doorbell and continuously
+        * writes to the queue, it is theoretically possible to wrap around
+        * the queue twice and mistakenly return IRQ_NONE.  Linux only
+        * requires that 0.1% of your interrupts are handled, so this isn't
+        * a big problem.
+        */
+       if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
+               return IRQ_NONE;
+
+       writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+       nvmeq->cq_head = head;
+       nvmeq->cq_phase = phase;
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t nvme_irq(int irq, void *data)
+{
+       irqreturn_t result;
+       struct nvme_queue *nvmeq = data;
+       spin_lock(&nvmeq->q_lock);
+       result = nvme_process_cq(nvmeq);
+       spin_unlock(&nvmeq->q_lock);
+       return result;
+}
+
+static irqreturn_t nvme_irq_check(int irq, void *data)
+{
+       struct nvme_queue *nvmeq = data;
+       struct nvme_completion cqe = nvmeq->cqes[nvmeq->cq_head];
+       if ((le16_to_cpu(cqe.status) & 1) != nvmeq->cq_phase)
+               return IRQ_NONE;
+       return IRQ_WAKE_THREAD;
+}
+
+static void nvme_abort_command(struct nvme_queue *nvmeq, int cmdid)
+{
+       spin_lock_irq(&nvmeq->q_lock);
+       cancel_cmdid(nvmeq, cmdid, NULL);
+       spin_unlock_irq(&nvmeq->q_lock);
+}
+
+struct sync_cmd_info {
+       struct task_struct *task;
+       u32 result;
+       int status;
+};
+
+static void sync_completion(struct nvme_dev *dev, void *ctx,
+                                               struct nvme_completion *cqe)
+{
+       struct sync_cmd_info *cmdinfo = ctx;
+       cmdinfo->result = le32_to_cpup(&cqe->result);
+       cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+       wake_up_process(cmdinfo->task);
+}
+
+/*
+ * Returns 0 on success.  If the result is negative, it's a Linux error code;
+ * if the result is positive, it's an NVM Express status code
+ */
+static int nvme_submit_sync_cmd(struct nvme_queue *nvmeq,
+                       struct nvme_command *cmd, u32 *result, unsigned timeout)
+{
+       int cmdid;
+       struct sync_cmd_info cmdinfo;
+
+       cmdinfo.task = current;
+       cmdinfo.status = -EINTR;
+
+       cmdid = alloc_cmdid_killable(nvmeq, &cmdinfo, sync_completion,
+                                                               timeout);
+       if (cmdid < 0)
+               return cmdid;
+       cmd->common.command_id = cmdid;
+
+       set_current_state(TASK_KILLABLE);
+       nvme_submit_cmd(nvmeq, cmd);
+       schedule();
+
+       if (cmdinfo.status == -EINTR) {
+               nvme_abort_command(nvmeq, cmdid);
+               return -EINTR;
+       }
+
+       if (result)
+               *result = cmdinfo.result;
+
+       return cmdinfo.status;
+}
+
+static int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
+                                                               u32 *result)
+{
+       return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
+}
+
+static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
+{
+       int status;
+       struct nvme_command c;
+
+       memset(&c, 0, sizeof(c));
+       c.delete_queue.opcode = opcode;
+       c.delete_queue.qid = cpu_to_le16(id);
+
+       status = nvme_submit_admin_cmd(dev, &c, NULL);
+       if (status)
+               return -EIO;
+       return 0;
+}
+
+static int adapter_alloc_cq(struct nvme_dev *dev, u16 qid,
+                                               struct nvme_queue *nvmeq)
+{
+       int status;
+       struct nvme_command c;
+       int flags = NVME_QUEUE_PHYS_CONTIG | NVME_CQ_IRQ_ENABLED;
+
+       memset(&c, 0, sizeof(c));
+       c.create_cq.opcode = nvme_admin_create_cq;
+       c.create_cq.prp1 = cpu_to_le64(nvmeq->cq_dma_addr);
+       c.create_cq.cqid = cpu_to_le16(qid);
+       c.create_cq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+       c.create_cq.cq_flags = cpu_to_le16(flags);
+       c.create_cq.irq_vector = cpu_to_le16(nvmeq->cq_vector);
+
+       status = nvme_submit_admin_cmd(dev, &c, NULL);
+       if (status)
+               return -EIO;
+       return 0;
+}
+
+static int adapter_alloc_sq(struct nvme_dev *dev, u16 qid,
+                                               struct nvme_queue *nvmeq)
+{
+       int status;
+       struct nvme_command c;
+       int flags = NVME_QUEUE_PHYS_CONTIG | NVME_SQ_PRIO_MEDIUM;
+
+       memset(&c, 0, sizeof(c));
+       c.create_sq.opcode = nvme_admin_create_sq;
+       c.create_sq.prp1 = cpu_to_le64(nvmeq->sq_dma_addr);
+       c.create_sq.sqid = cpu_to_le16(qid);
+       c.create_sq.qsize = cpu_to_le16(nvmeq->q_depth - 1);
+       c.create_sq.sq_flags = cpu_to_le16(flags);
+       c.create_sq.cqid = cpu_to_le16(qid);
+
+       status = nvme_submit_admin_cmd(dev, &c, NULL);
+       if (status)
+               return -EIO;
+       return 0;
+}
+
+static int adapter_delete_cq(struct nvme_dev *dev, u16 cqid)
+{
+       return adapter_delete_queue(dev, nvme_admin_delete_cq, cqid);
+}
+
+static int adapter_delete_sq(struct nvme_dev *dev, u16 sqid)
+{
+       return adapter_delete_queue(dev, nvme_admin_delete_sq, sqid);
+}
+
+static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
+                                                       dma_addr_t dma_addr)
+{
+       struct nvme_command c;
+
+       memset(&c, 0, sizeof(c));
+       c.identify.opcode = nvme_admin_identify;
+       c.identify.nsid = cpu_to_le32(nsid);
+       c.identify.prp1 = cpu_to_le64(dma_addr);
+       c.identify.cns = cpu_to_le32(cns);
+
+       return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
+                               unsigned dword11, dma_addr_t dma_addr)
+{
+       struct nvme_command c;
+
+       memset(&c, 0, sizeof(c));
+       c.features.opcode = nvme_admin_get_features;
+       c.features.prp1 = cpu_to_le64(dma_addr);
+       c.features.fid = cpu_to_le32(fid);
+       c.features.dword11 = cpu_to_le32(dword11);
+
+       return nvme_submit_admin_cmd(dev, &c, NULL);
+}
+
+static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
+                       unsigned dword11, dma_addr_t dma_addr, u32 *result)
+{
+       struct nvme_command c;
+
+       memset(&c, 0, sizeof(c));
+       c.features.opcode = nvme_admin_set_features;
+       c.features.prp1 = cpu_to_le64(dma_addr);
+       c.features.fid = cpu_to_le32(fid);
+       c.features.dword11 = cpu_to_le32(dword11);
+
+       return nvme_submit_admin_cmd(dev, &c, result);
+}
+
+static void nvme_free_queue(struct nvme_dev *dev, int qid)
+{
+       struct nvme_queue *nvmeq = dev->queues[qid];
+       int vector = dev->entry[nvmeq->cq_vector].vector;
+
+       irq_set_affinity_hint(vector, NULL);
+       free_irq(vector, nvmeq);
+
+       /* Don't tell the adapter to delete the admin queue */
+       if (qid) {
+               adapter_delete_sq(dev, qid);
+               adapter_delete_cq(dev, qid);
+       }
+
+       dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+                               (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+       dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+                                       nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+       kfree(nvmeq);
+}
+
+static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
+                                                       int depth, int vector)
+{
+       struct device *dmadev = &dev->pci_dev->dev;
+       unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
+       struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
+       if (!nvmeq)
+               return NULL;
+
+       nvmeq->cqes = dma_alloc_coherent(dmadev, CQ_SIZE(depth),
+                                       &nvmeq->cq_dma_addr, GFP_KERNEL);
+       if (!nvmeq->cqes)
+               goto free_nvmeq;
+       memset((void *)nvmeq->cqes, 0, CQ_SIZE(depth));
+
+       nvmeq->sq_cmds = dma_alloc_coherent(dmadev, SQ_SIZE(depth),
+                                       &nvmeq->sq_dma_addr, GFP_KERNEL);
+       if (!nvmeq->sq_cmds)
+               goto free_cqdma;
+
+       nvmeq->q_dmadev = dmadev;
+       nvmeq->dev = dev;
+       spin_lock_init(&nvmeq->q_lock);
+       nvmeq->cq_head = 0;
+       nvmeq->cq_phase = 1;
+       init_waitqueue_head(&nvmeq->sq_full);
+       init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
+       bio_list_init(&nvmeq->sq_cong);
+       nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+       nvmeq->q_depth = depth;
+       nvmeq->cq_vector = vector;
+
+       return nvmeq;
+
+ free_cqdma:
+       dma_free_coherent(dmadev, CQ_SIZE(nvmeq->q_depth), (void *)nvmeq->cqes,
+                                                       nvmeq->cq_dma_addr);
+ free_nvmeq:
+       kfree(nvmeq);
+       return NULL;
+}
+
+static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
+                                                       const char *name)
+{
+       if (use_threaded_interrupts)
+               return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
+                                       nvme_irq_check, nvme_irq,
+                                       IRQF_DISABLED | IRQF_SHARED,
+                                       name, nvmeq);
+       return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
+                               IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+}
+
+static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
+                                       int qid, int cq_size, int vector)
+{
+       int result;
+       struct nvme_queue *nvmeq = nvme_alloc_queue(dev, qid, cq_size, vector);
+
+       if (!nvmeq)
+               return ERR_PTR(-ENOMEM);
+
+       result = adapter_alloc_cq(dev, qid, nvmeq);
+       if (result < 0)
+               goto free_nvmeq;
+
+       result = adapter_alloc_sq(dev, qid, nvmeq);
+       if (result < 0)
+               goto release_cq;
+
+       result = queue_request_irq(dev, nvmeq, "nvme");
+       if (result < 0)
+               goto release_sq;
+
+       return nvmeq;
+
+ release_sq:
+       adapter_delete_sq(dev, qid);
+ release_cq:
+       adapter_delete_cq(dev, qid);
+ free_nvmeq:
+       dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+                               (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+       dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+                                       nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+       kfree(nvmeq);
+       return ERR_PTR(result);
+}
+
+static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
+{
+       int result;
+       u32 aqa;
+       u64 cap;
+       unsigned long timeout;
+       struct nvme_queue *nvmeq;
+
+       dev->dbs = ((void __iomem *)dev->bar) + 4096;
+
+       nvmeq = nvme_alloc_queue(dev, 0, 64, 0);
+       if (!nvmeq)
+               return -ENOMEM;
+
+       aqa = nvmeq->q_depth - 1;
+       aqa |= aqa << 16;
+
+       dev->ctrl_config = NVME_CC_ENABLE | NVME_CC_CSS_NVM;
+       dev->ctrl_config |= (PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT;
+       dev->ctrl_config |= NVME_CC_ARB_RR | NVME_CC_SHN_NONE;
+       dev->ctrl_config |= NVME_CC_IOSQES | NVME_CC_IOCQES;
+
+       writel(0, &dev->bar->cc);
+       writel(aqa, &dev->bar->aqa);
+       writeq(nvmeq->sq_dma_addr, &dev->bar->asq);
+       writeq(nvmeq->cq_dma_addr, &dev->bar->acq);
+       writel(dev->ctrl_config, &dev->bar->cc);
+
+       cap = readq(&dev->bar->cap);
+       timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
+       dev->db_stride = NVME_CAP_STRIDE(cap);
+
+       while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
+               msleep(100);
+               if (fatal_signal_pending(current))
+                       return -EINTR;
+               if (time_after(jiffies, timeout)) {
+                       dev_err(&dev->pci_dev->dev,
+                               "Device not ready; aborting initialisation\n");
+                       return -ENODEV;
+               }
+       }
+
+       result = queue_request_irq(dev, nvmeq, "nvme admin");
+       dev->queues[0] = nvmeq;
+       return result;
+}
+
+static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
+                               unsigned long addr, unsigned length)
+{
+       int i, err, count, nents, offset;
+       struct scatterlist *sg;
+       struct page **pages;
+       struct nvme_iod *iod;
+
+       if (addr & 3)
+               return ERR_PTR(-EINVAL);
+       if (!length)
+               return ERR_PTR(-EINVAL);
+
+       offset = offset_in_page(addr);
+       count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
+       pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
+
+       err = get_user_pages_fast(addr, count, 1, pages);
+       if (err < count) {
+               count = err;
+               err = -EFAULT;
+               goto put_pages;
+       }
+
+       iod = nvme_alloc_iod(count, length, GFP_KERNEL);
+       sg = iod->sg;
+       sg_init_table(sg, count);
+       for (i = 0; i < count; i++) {
+               sg_set_page(&sg[i], pages[i],
+                               min_t(int, length, PAGE_SIZE - offset), offset);
+               length -= (PAGE_SIZE - offset);
+               offset = 0;
+       }
+       sg_mark_end(&sg[i - 1]);
+       iod->nents = count;
+
+       err = -ENOMEM;
+       nents = dma_map_sg(&dev->pci_dev->dev, sg, count,
+                               write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+       if (!nents)
+               goto free_iod;
+
+       kfree(pages);
+       return iod;
+
+ free_iod:
+       kfree(iod);
+ put_pages:
+       for (i = 0; i < count; i++)
+               put_page(pages[i]);
+       kfree(pages);
+       return ERR_PTR(err);
+}
+
+static void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
+                       struct nvme_iod *iod)
+{
+       int i;
+
+       dma_unmap_sg(&dev->pci_dev->dev, iod->sg, iod->nents,
+                               write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+       for (i = 0; i < iod->nents; i++)
+               put_page(sg_page(&iod->sg[i]));
+}
+
+static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
+{
+       struct nvme_dev *dev = ns->dev;
+       struct nvme_queue *nvmeq;
+       struct nvme_user_io io;
+       struct nvme_command c;
+       unsigned length;
+       int status;
+       struct nvme_iod *iod;
+
+       if (copy_from_user(&io, uio, sizeof(io)))
+               return -EFAULT;
+       length = (io.nblocks + 1) << ns->lba_shift;
+
+       switch (io.opcode) {
+       case nvme_cmd_write:
+       case nvme_cmd_read:
+       case nvme_cmd_compare:
+               iod = nvme_map_user_pages(dev, io.opcode & 1, io.addr, length);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (IS_ERR(iod))
+               return PTR_ERR(iod);
+
+       memset(&c, 0, sizeof(c));
+       c.rw.opcode = io.opcode;
+       c.rw.flags = io.flags;
+       c.rw.nsid = cpu_to_le32(ns->ns_id);
+       c.rw.slba = cpu_to_le64(io.slba);
+       c.rw.length = cpu_to_le16(io.nblocks);
+       c.rw.control = cpu_to_le16(io.control);
+       c.rw.dsmgmt = cpu_to_le16(io.dsmgmt);
+       c.rw.reftag = io.reftag;
+       c.rw.apptag = io.apptag;
+       c.rw.appmask = io.appmask;
+       /* XXX: metadata */
+       length = nvme_setup_prps(dev, &c.common, iod, length, GFP_KERNEL);
+
+       nvmeq = get_nvmeq(dev);
+       /*
+        * Since nvme_submit_sync_cmd sleeps, we can't keep preemption
+        * disabled.  We may be preempted at any point, and be rescheduled
+        * to a different CPU.  That will cause cacheline bouncing, but no
+        * additional races since q_lock already protects against other CPUs.
+        */
+       put_nvmeq(nvmeq);
+       if (length != (io.nblocks + 1) << ns->lba_shift)
+               status = -ENOMEM;
+       else
+               status = nvme_submit_sync_cmd(nvmeq, &c, NULL, NVME_IO_TIMEOUT);
+
+       nvme_unmap_user_pages(dev, io.opcode & 1, iod);
+       nvme_free_iod(dev, iod);
+       return status;
+}
+
+static int nvme_user_admin_cmd(struct nvme_ns *ns,
+                                       struct nvme_admin_cmd __user *ucmd)
+{
+       struct nvme_dev *dev = ns->dev;
+       struct nvme_admin_cmd cmd;
+       struct nvme_command c;
+       int status, length;
+       struct nvme_iod *iod;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+       if (copy_from_user(&cmd, ucmd, sizeof(cmd)))
+               return -EFAULT;
+
+       memset(&c, 0, sizeof(c));
+       c.common.opcode = cmd.opcode;
+       c.common.flags = cmd.flags;
+       c.common.nsid = cpu_to_le32(cmd.nsid);
+       c.common.cdw2[0] = cpu_to_le32(cmd.cdw2);
+       c.common.cdw2[1] = cpu_to_le32(cmd.cdw3);
+       c.common.cdw10[0] = cpu_to_le32(cmd.cdw10);
+       c.common.cdw10[1] = cpu_to_le32(cmd.cdw11);
+       c.common.cdw10[2] = cpu_to_le32(cmd.cdw12);
+       c.common.cdw10[3] = cpu_to_le32(cmd.cdw13);
+       c.common.cdw10[4] = cpu_to_le32(cmd.cdw14);
+       c.common.cdw10[5] = cpu_to_le32(cmd.cdw15);
+
+       length = cmd.data_len;
+       if (cmd.data_len) {
+               iod = nvme_map_user_pages(dev, cmd.opcode & 1, cmd.addr,
+                                                               length);
+               if (IS_ERR(iod))
+                       return PTR_ERR(iod);
+               length = nvme_setup_prps(dev, &c.common, iod, length,
+                                                               GFP_KERNEL);
+       }
+
+       if (length != cmd.data_len)
+               status = -ENOMEM;
+       else
+               status = nvme_submit_admin_cmd(dev, &c, NULL);
+
+       if (cmd.data_len) {
+               nvme_unmap_user_pages(dev, cmd.opcode & 1, iod);
+               nvme_free_iod(dev, iod);
+       }
+       return status;
+}
+
+static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
+                                                       unsigned long arg)
+{
+       struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+       switch (cmd) {
+       case NVME_IOCTL_ID:
+               return ns->ns_id;
+       case NVME_IOCTL_ADMIN_CMD:
+               return nvme_user_admin_cmd(ns, (void __user *)arg);
+       case NVME_IOCTL_SUBMIT_IO:
+               return nvme_submit_io(ns, (void __user *)arg);
+       default:
+               return -ENOTTY;
+       }
+}
+
+static const struct block_device_operations nvme_fops = {
+       .owner          = THIS_MODULE,
+       .ioctl          = nvme_ioctl,
+       .compat_ioctl   = nvme_ioctl,
+};
+
+static void nvme_timeout_ios(struct nvme_queue *nvmeq)
+{
+       int depth = nvmeq->q_depth - 1;
+       struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+       unsigned long now = jiffies;
+       int cmdid;
+
+       for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+               void *ctx;
+               nvme_completion_fn fn;
+               static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
+
+               if (!time_after(now, info[cmdid].timeout))
+                       continue;
+               dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
+               ctx = cancel_cmdid(nvmeq, cmdid, &fn);
+               fn(nvmeq->dev, ctx, &cqe);
+       }
+}
+
+static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
+{
+       while (bio_list_peek(&nvmeq->sq_cong)) {
+               struct bio *bio = bio_list_pop(&nvmeq->sq_cong);
+               struct nvme_ns *ns = bio->bi_bdev->bd_disk->private_data;
+               if (nvme_submit_bio_queue(nvmeq, ns, bio)) {
+                       bio_list_add_head(&nvmeq->sq_cong, bio);
+                       break;
+               }
+               if (bio_list_empty(&nvmeq->sq_cong))
+                       remove_wait_queue(&nvmeq->sq_full,
+                                                       &nvmeq->sq_cong_wait);
+       }
+}
+
+static int nvme_kthread(void *data)
+{
+       struct nvme_dev *dev;
+
+       while (!kthread_should_stop()) {
+               __set_current_state(TASK_RUNNING);
+               spin_lock(&dev_list_lock);
+               list_for_each_entry(dev, &dev_list, node) {
+                       int i;
+                       for (i = 0; i < dev->queue_count; i++) {
+                               struct nvme_queue *nvmeq = dev->queues[i];
+                               if (!nvmeq)
+                                       continue;
+                               spin_lock_irq(&nvmeq->q_lock);
+                               if (nvme_process_cq(nvmeq))
+                                       printk("process_cq did something\n");
+                               nvme_timeout_ios(nvmeq);
+                               nvme_resubmit_bios(nvmeq);
+                               spin_unlock_irq(&nvmeq->q_lock);
+                       }
+               }
+               spin_unlock(&dev_list_lock);
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout(HZ);
+       }
+       return 0;
+}
+
+static DEFINE_IDA(nvme_index_ida);
+
+static int nvme_get_ns_idx(void)
+{
+       int index, error;
+
+       do {
+               if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
+                       return -1;
+
+               spin_lock(&dev_list_lock);
+               error = ida_get_new(&nvme_index_ida, &index);
+               spin_unlock(&dev_list_lock);
+       } while (error == -EAGAIN);
+
+       if (error)
+               index = -1;
+       return index;
+}
+
+static void nvme_put_ns_idx(int index)
+{
+       spin_lock(&dev_list_lock);
+       ida_remove(&nvme_index_ida, index);
+       spin_unlock(&dev_list_lock);
+}
+
+static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
+                       struct nvme_id_ns *id, struct nvme_lba_range_type *rt)
+{
+       struct nvme_ns *ns;
+       struct gendisk *disk;
+       int lbaf;
+
+       if (rt->attributes & NVME_LBART_ATTRIB_HIDE)
+               return NULL;
+
+       ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+       if (!ns)
+               return NULL;
+       ns->queue = blk_alloc_queue(GFP_KERNEL);
+       if (!ns->queue)
+               goto out_free_ns;
+       ns->queue->queue_flags = QUEUE_FLAG_DEFAULT;
+       queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
+       queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
+/*     queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); */
+       blk_queue_make_request(ns->queue, nvme_make_request);
+       ns->dev = dev;
+       ns->queue->queuedata = ns;
+
+       disk = alloc_disk(NVME_MINORS);
+       if (!disk)
+               goto out_free_queue;
+       ns->ns_id = nsid;
+       ns->disk = disk;
+       lbaf = id->flbas & 0xf;
+       ns->lba_shift = id->lbaf[lbaf].ds;
+
+       disk->major = nvme_major;
+       disk->minors = NVME_MINORS;
+       disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+       disk->fops = &nvme_fops;
+       disk->private_data = ns;
+       disk->queue = ns->queue;
+       disk->driverfs_dev = &dev->pci_dev->dev;
+       sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
+       set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
+
+       return ns;
+
+ out_free_queue:
+       blk_cleanup_queue(ns->queue);
+ out_free_ns:
+       kfree(ns);
+       return NULL;
+}
+
+static void nvme_ns_free(struct nvme_ns *ns)
+{
+       int index = ns->disk->first_minor / NVME_MINORS;
+       put_disk(ns->disk);
+       nvme_put_ns_idx(index);
+       blk_cleanup_queue(ns->queue);
+       kfree(ns);
+}
+
+static int set_queue_count(struct nvme_dev *dev, int count)
+{
+       int status;
+       u32 result;
+       u32 q_count = (count - 1) | ((count - 1) << 16);
+
+       status = nvme_set_features(dev, NVME_FEAT_NUM_QUEUES, q_count, 0,
+                                                               &result);
+       if (status)
+               return -EIO;
+       return min(result & 0xffff, result >> 16) + 1;
+}
+
+static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
+{
+       int result, cpu, i, nr_io_queues, db_bar_size;
+
+       nr_io_queues = num_online_cpus();
+       result = set_queue_count(dev, nr_io_queues);
+       if (result < 0)
+               return result;
+       if (result < nr_io_queues)
+               nr_io_queues = result;
+
+       /* Deregister the admin queue's interrupt */
+       free_irq(dev->entry[0].vector, dev->queues[0]);
+
+       db_bar_size = 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+       if (db_bar_size > 8192) {
+               iounmap(dev->bar);
+               dev->bar = ioremap(pci_resource_start(dev->pci_dev, 0),
+                                                               db_bar_size);
+               dev->dbs = ((void __iomem *)dev->bar) + 4096;
+               dev->queues[0]->q_db = dev->dbs;
+       }
+
+       for (i = 0; i < nr_io_queues; i++)
+               dev->entry[i].entry = i;
+       for (;;) {
+               result = pci_enable_msix(dev->pci_dev, dev->entry,
+                                                               nr_io_queues);
+               if (result == 0) {
+                       break;
+               } else if (result > 0) {
+                       nr_io_queues = result;
+                       continue;
+               } else {
+                       nr_io_queues = 1;
+                       break;
+               }
+       }
+
+       result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+       /* XXX: handle failure here */
+
+       cpu = cpumask_first(cpu_online_mask);
+       for (i = 0; i < nr_io_queues; i++) {
+               irq_set_affinity_hint(dev->entry[i].vector, get_cpu_mask(cpu));
+               cpu = cpumask_next(cpu, cpu_online_mask);
+       }
+
+       for (i = 0; i < nr_io_queues; i++) {
+               dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
+                                                       NVME_Q_DEPTH, i);
+               if (IS_ERR(dev->queues[i + 1]))
+                       return PTR_ERR(dev->queues[i + 1]);
+               dev->queue_count++;
+       }
+
+       for (; i < num_possible_cpus(); i++) {
+               int target = i % rounddown_pow_of_two(dev->queue_count - 1);
+               dev->queues[i + 1] = dev->queues[target + 1];
+       }
+
+       return 0;
+}
+
+static void nvme_free_queues(struct nvme_dev *dev)
+{
+       int i;
+
+       for (i = dev->queue_count - 1; i >= 0; i--)
+               nvme_free_queue(dev, i);
+}
+
+static int __devinit nvme_dev_add(struct nvme_dev *dev)
+{
+       int res, nn, i;
+       struct nvme_ns *ns, *next;
+       struct nvme_id_ctrl *ctrl;
+       struct nvme_id_ns *id_ns;
+       void *mem;
+       dma_addr_t dma_addr;
+
+       res = nvme_setup_io_queues(dev);
+       if (res)
+               return res;
+
+       mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
+                                                               GFP_KERNEL);
+
+       res = nvme_identify(dev, 0, 1, dma_addr);
+       if (res) {
+               res = -EIO;
+               goto out_free;
+       }
+
+       ctrl = mem;
+       nn = le32_to_cpup(&ctrl->nn);
+       memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
+       memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
+       memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
+
+       id_ns = mem;
+       for (i = 1; i <= nn; i++) {
+               res = nvme_identify(dev, i, 0, dma_addr);
+               if (res)
+                       continue;
+
+               if (id_ns->ncap == 0)
+                       continue;
+
+               res = nvme_get_features(dev, NVME_FEAT_LBA_RANGE, i,
+                                                       dma_addr + 4096);
+               if (res)
+                       continue;
+
+               ns = nvme_alloc_ns(dev, i, mem, mem + 4096);
+               if (ns)
+                       list_add_tail(&ns->list, &dev->namespaces);
+       }
+       list_for_each_entry(ns, &dev->namespaces, list)
+               add_disk(ns->disk);
+
+       goto out;
+
+ out_free:
+       list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+               list_del(&ns->list);
+               nvme_ns_free(ns);
+       }
+
+ out:
+       dma_free_coherent(&dev->pci_dev->dev, 8192, mem, dma_addr);
+       return res;
+}
+
+static int nvme_dev_remove(struct nvme_dev *dev)
+{
+       struct nvme_ns *ns, *next;
+
+       spin_lock(&dev_list_lock);
+       list_del(&dev->node);
+       spin_unlock(&dev_list_lock);
+
+       /* TODO: wait all I/O finished or cancel them */
+
+       list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+               list_del(&ns->list);
+               del_gendisk(ns->disk);
+               nvme_ns_free(ns);
+       }
+
+       nvme_free_queues(dev);
+
+       return 0;
+}
+
+static int nvme_setup_prp_pools(struct nvme_dev *dev)
+{
+       struct device *dmadev = &dev->pci_dev->dev;
+       dev->prp_page_pool = dma_pool_create("prp list page", dmadev,
+                                               PAGE_SIZE, PAGE_SIZE, 0);
+       if (!dev->prp_page_pool)
+               return -ENOMEM;
+
+       /* Optimisation for I/Os between 4k and 128k */
+       dev->prp_small_pool = dma_pool_create("prp list 256", dmadev,
+                                               256, 256, 0);
+       if (!dev->prp_small_pool) {
+               dma_pool_destroy(dev->prp_page_pool);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static void nvme_release_prp_pools(struct nvme_dev *dev)
+{
+       dma_pool_destroy(dev->prp_page_pool);
+       dma_pool_destroy(dev->prp_small_pool);
+}
+
+/* XXX: Use an ida or something to let remove / add work correctly */
+static void nvme_set_instance(struct nvme_dev *dev)
+{
+       static int instance;
+       dev->instance = instance++;
+}
+
+static void nvme_release_instance(struct nvme_dev *dev)
+{
+}
+
+static int __devinit nvme_probe(struct pci_dev *pdev,
+                                               const struct pci_device_id *id)
+{
+       int bars, result = -ENOMEM;
+       struct nvme_dev *dev;
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev)
+               return -ENOMEM;
+       dev->entry = kcalloc(num_possible_cpus(), sizeof(*dev->entry),
+                                                               GFP_KERNEL);
+       if (!dev->entry)
+               goto free;
+       dev->queues = kcalloc(num_possible_cpus() + 1, sizeof(void *),
+                                                               GFP_KERNEL);
+       if (!dev->queues)
+               goto free;
+
+       if (pci_enable_device_mem(pdev))
+               goto free;
+       pci_set_master(pdev);
+       bars = pci_select_bars(pdev, IORESOURCE_MEM);
+       if (pci_request_selected_regions(pdev, bars, "nvme"))
+               goto disable;
+
+       INIT_LIST_HEAD(&dev->namespaces);
+       dev->pci_dev = pdev;
+       pci_set_drvdata(pdev, dev);
+       dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+       dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+       nvme_set_instance(dev);
+       dev->entry[0].vector = pdev->irq;
+
+       result = nvme_setup_prp_pools(dev);
+       if (result)
+               goto disable_msix;
+
+       dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
+       if (!dev->bar) {
+               result = -ENOMEM;
+               goto disable_msix;
+       }
+
+       result = nvme_configure_admin_queue(dev);
+       if (result)
+               goto unmap;
+       dev->queue_count++;
+
+       spin_lock(&dev_list_lock);
+       list_add(&dev->node, &dev_list);
+       spin_unlock(&dev_list_lock);
+
+       result = nvme_dev_add(dev);
+       if (result)
+               goto delete;
+
+       return 0;
+
+ delete:
+       spin_lock(&dev_list_lock);
+       list_del(&dev->node);
+       spin_unlock(&dev_list_lock);
+
+       nvme_free_queues(dev);
+ unmap:
+       iounmap(dev->bar);
+ disable_msix:
+       pci_disable_msix(pdev);
+       nvme_release_instance(dev);
+       nvme_release_prp_pools(dev);
+ disable:
+       pci_disable_device(pdev);
+       pci_release_regions(pdev);
+ free:
+       kfree(dev->queues);
+       kfree(dev->entry);
+       kfree(dev);
+       return result;
+}
+
+static void __devexit nvme_remove(struct pci_dev *pdev)
+{
+       struct nvme_dev *dev = pci_get_drvdata(pdev);
+       nvme_dev_remove(dev);
+       pci_disable_msix(pdev);
+       iounmap(dev->bar);
+       nvme_release_instance(dev);
+       nvme_release_prp_pools(dev);
+       pci_disable_device(pdev);
+       pci_release_regions(pdev);
+       kfree(dev->queues);
+       kfree(dev->entry);
+       kfree(dev);
+}
+
+/* These functions are yet to be implemented */
+#define nvme_error_detected NULL
+#define nvme_dump_registers NULL
+#define nvme_link_reset NULL
+#define nvme_slot_reset NULL
+#define nvme_error_resume NULL
+#define nvme_suspend NULL
+#define nvme_resume NULL
+
+static struct pci_error_handlers nvme_err_handler = {
+       .error_detected = nvme_error_detected,
+       .mmio_enabled   = nvme_dump_registers,
+       .link_reset     = nvme_link_reset,
+       .slot_reset     = nvme_slot_reset,
+       .resume         = nvme_error_resume,
+};
+
+/* Move to pci_ids.h later */
+#define PCI_CLASS_STORAGE_EXPRESS      0x010802
+
+static DEFINE_PCI_DEVICE_TABLE(nvme_id_table) = {
+       { PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
+       { 0, }
+};
+MODULE_DEVICE_TABLE(pci, nvme_id_table);
+
+static struct pci_driver nvme_driver = {
+       .name           = "nvme",
+       .id_table       = nvme_id_table,
+       .probe          = nvme_probe,
+       .remove         = __devexit_p(nvme_remove),
+       .suspend        = nvme_suspend,
+       .resume         = nvme_resume,
+       .err_handler    = &nvme_err_handler,
+};
+
+static int __init nvme_init(void)
+{
+       int result = -EBUSY;
+
+       nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
+       if (IS_ERR(nvme_thread))
+               return PTR_ERR(nvme_thread);
+
+       nvme_major = register_blkdev(nvme_major, "nvme");
+       if (nvme_major <= 0)
+               goto kill_kthread;
+
+       result = pci_register_driver(&nvme_driver);
+       if (result)
+               goto unregister_blkdev;
+       return 0;
+
+ unregister_blkdev:
+       unregister_blkdev(nvme_major, "nvme");
+ kill_kthread:
+       kthread_stop(nvme_thread);
+       return result;
+}
+
+static void __exit nvme_exit(void)
+{
+       pci_unregister_driver(&nvme_driver);
+       unregister_blkdev(nvme_major, "nvme");
+       kthread_stop(nvme_thread);
+}
+
+MODULE_AUTHOR("Matthew Wilcox <willy@linux.intel.com>");
+MODULE_LICENSE("GPL");
+MODULE_VERSION("0.8");
+module_init(nvme_init);
+module_exit(nvme_exit);
index 3fd31dec8c9c1980fcdc8f7aa95b28d23addcb5b..a6278e7e61a00bfde01bcb6726524ffb3a06514e 100644 (file)
@@ -380,6 +380,7 @@ static int rbd_get_client(struct rbd_device *rbd_dev, const char *mon_addr,
        rbdc = __rbd_client_find(opt);
        if (rbdc) {
                ceph_destroy_options(opt);
+               kfree(rbd_opts);
 
                /* using an existing client */
                kref_get(&rbdc->kref);
@@ -406,15 +407,15 @@ done_err:
 
 /*
  * Destroy ceph client
+ *
+ * Caller must hold node_lock.
  */
 static void rbd_client_release(struct kref *kref)
 {
        struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
 
        dout("rbd_release_client %p\n", rbdc);
-       spin_lock(&node_lock);
        list_del(&rbdc->node);
-       spin_unlock(&node_lock);
 
        ceph_destroy_client(rbdc->client);
        kfree(rbdc->rbd_opts);
@@ -427,7 +428,9 @@ static void rbd_client_release(struct kref *kref)
  */
 static void rbd_put_client(struct rbd_device *rbd_dev)
 {
+       spin_lock(&node_lock);
        kref_put(&rbd_dev->rbd_client->kref, rbd_client_release);
+       spin_unlock(&node_lock);
        rbd_dev->rbd_client = NULL;
        rbd_dev->client = NULL;
 }
index 4b71647782d03d16c38dd1cb319ed396daa9844c..317c28ce8328bb310c892c8bab4aca60cbbbc4f5 100644 (file)
@@ -194,10 +194,10 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
 
 err_out:
        if (bridge->driver->needs_scratch_page) {
-               void *va = page_address(bridge->scratch_page_page);
+               struct page *page = bridge->scratch_page_page;
 
-               bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
-               bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
+               bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
+               bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
        }
        if (got_gatt)
                bridge->driver->free_gatt_table(bridge);
@@ -221,10 +221,10 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
 
        if (bridge->driver->agp_destroy_page &&
            bridge->driver->needs_scratch_page) {
-               void *va = page_address(bridge->scratch_page_page);
+               struct page *page = bridge->scratch_page_page;
 
-               bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
-               bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
+               bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP);
+               bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE);
        }
 }
 
index 732215b805c1af45f391769702c0b515ea888cb1..54ca8b23cde3f1aa5550d2806087300227531070 100644 (file)
@@ -965,6 +965,7 @@ EXPORT_SYMBOL(get_random_bytes);
  */
 static void init_std_data(struct entropy_store *r)
 {
+       int i;
        ktime_t now;
        unsigned long flags;
 
@@ -974,6 +975,11 @@ static void init_std_data(struct entropy_store *r)
 
        now = ktime_get_real();
        mix_pool_bytes(r, &now, sizeof(now));
+       for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof flags) {
+               if (!arch_get_random_long(&flags))
+                       break;
+               mix_pool_bytes(r, &flags, sizeof(flags));
+       }
        mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
 }
 
index 6a8771f47a55c799866468fe80946a479a0440f8..32362cf35b8d1a42c4b1f3e4f67881687ecad0cc 100644 (file)
@@ -846,6 +846,15 @@ int tpm_do_selftest(struct tpm_chip *chip)
 
        do {
                rc = __tpm_pcr_read(chip, 0, digest);
+               if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
+                       dev_info(chip->dev,
+                                "TPM is disabled/deactivated (0x%X)\n", rc);
+                       /* TPM is disabled and/or deactivated; driver can
+                        * proceed and TPM does handle commands for
+                        * suspend/resume correctly
+                        */
+                       return 0;
+               }
                if (rc != TPM_WARN_DOING_SELFTEST)
                        return rc;
                msleep(delay_msec);
index 8c1df302fbb6ce17cd9580cca1aaeca84f785745..01054713828128ea59fa1c06b2adf035b8b3f281 100644 (file)
@@ -39,6 +39,9 @@ enum tpm_addr {
 };
 
 #define TPM_WARN_DOING_SELFTEST 0x802
+#define TPM_ERR_DEACTIVATED     0x6
+#define TPM_ERR_DISABLED        0x7
+
 #define TPM_HEADER_SIZE                10
 extern ssize_t tpm_show_pubek(struct device *, struct device_attribute *attr,
                                char *);
index 5a99bb3f255ae7c34fedc540949ab0163a15d5a5..f1a274994bb1fbdc1c8e0e54b26aebe58b161c2b 100644 (file)
@@ -124,7 +124,7 @@ config MV_XOR
 
 config MX3_IPU
        bool "MX3x Image Processing Unit support"
-       depends on SOC_IMX31 || SOC_IMX35
+       depends on ARCH_MXC
        select DMA_ENGINE
        default y
        help
@@ -187,6 +187,13 @@ config TIMB_DMA
        help
          Enable support for the Timberdale FPGA DMA engine.
 
+config SIRF_DMA
+       tristate "CSR SiRFprimaII DMA support"
+       depends on ARCH_PRIMA2
+       select DMA_ENGINE
+       help
+         Enable support for the CSR SiRFprimaII DMA engine.
+
 config ARCH_HAS_ASYNC_TX_FIND_CHANNEL
        bool
 
@@ -201,26 +208,26 @@ config PL330_DMA
          platform_data for a dma-pl330 device.
 
 config PCH_DMA
-       tristate "Intel EG20T PCH / OKI Semi IOH(ML7213/ML7223) DMA support"
+       tristate "Intel EG20T PCH / LAPIS Semicon IOH(ML7213/ML7223/ML7831) DMA"
        depends on PCI && X86
        select DMA_ENGINE
        help
          Enable support for Intel EG20T PCH DMA engine.
 
-         This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-         Output Hub), ML7213 and ML7223.
-         ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
-         for MP(Media Phone) use.
-         ML7213/ML7223 is companion chip for Intel Atom E6xx series.
-         ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+         This driver also can be used for LAPIS Semiconductor IOH(Input/
+         Output Hub), ML7213, ML7223 and ML7831.
+         ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+         for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+         ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
 config IMX_SDMA
        tristate "i.MX SDMA support"
-       depends on ARCH_MX25 || SOC_IMX31 || SOC_IMX35 || ARCH_MX5
+       depends on ARCH_MXC
        select DMA_ENGINE
        help
          Support the i.MX SDMA engine. This engine is integrated into
-         Freescale i.MX25/31/35/51 chips.
+         Freescale i.MX25/31/35/51/53 chips.
 
 config IMX_DMA
        tristate "i.MX DMA support"
index 30cf3b1f0c5ca4ff633d9e50474feef0a49fc565..009a222e8283cba3929fd8763ae9abe9b4d6d694 100644 (file)
@@ -21,6 +21,7 @@ obj-$(CONFIG_IMX_SDMA) += imx-sdma.o
 obj-$(CONFIG_IMX_DMA) += imx-dma.o
 obj-$(CONFIG_MXS_DMA) += mxs-dma.o
 obj-$(CONFIG_TIMB_DMA) += timb_dma.o
+obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
 obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
 obj-$(CONFIG_PL330_DMA) += pl330.o
 obj-$(CONFIG_PCH_DMA) += pch_dma.o
index 0698695e8bf9508e0c0dac5cdad44f7f8f508c07..8a281584458b582bbb872137323ff82cdaac2eb1 100644 (file)
@@ -854,8 +854,10 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
        int ret;
 
        /* Check if we already have a channel */
-       if (plchan->phychan)
-               return 0;
+       if (plchan->phychan) {
+               ch = plchan->phychan;
+               goto got_channel;
+       }
 
        ch = pl08x_get_phy_channel(pl08x, plchan);
        if (!ch) {
@@ -880,21 +882,22 @@ static int prep_phy_channel(struct pl08x_dma_chan *plchan,
                        return -EBUSY;
                }
                ch->signal = ret;
-
-               /* Assign the flow control signal to this channel */
-               if (txd->direction == DMA_TO_DEVICE)
-                       txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
-               else if (txd->direction == DMA_FROM_DEVICE)
-                       txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
        }
 
+       plchan->phychan = ch;
        dev_dbg(&pl08x->adev->dev, "allocated physical channel %d and signal %d for xfer on %s\n",
                 ch->id,
                 ch->signal,
                 plchan->name);
 
+got_channel:
+       /* Assign the flow control signal to this channel */
+       if (txd->direction == DMA_MEM_TO_DEV)
+               txd->ccfg |= ch->signal << PL080_CONFIG_DST_SEL_SHIFT;
+       else if (txd->direction == DMA_DEV_TO_MEM)
+               txd->ccfg |= ch->signal << PL080_CONFIG_SRC_SEL_SHIFT;
+
        plchan->phychan_hold++;
-       plchan->phychan = ch;
 
        return 0;
 }
@@ -1102,10 +1105,10 @@ static int dma_set_runtime_config(struct dma_chan *chan,
 
        /* Transfer direction */
        plchan->runtime_direction = config->direction;
-       if (config->direction == DMA_TO_DEVICE) {
+       if (config->direction == DMA_MEM_TO_DEV) {
                addr_width = config->dst_addr_width;
                maxburst = config->dst_maxburst;
-       } else if (config->direction == DMA_FROM_DEVICE) {
+       } else if (config->direction == DMA_DEV_TO_MEM) {
                addr_width = config->src_addr_width;
                maxburst = config->src_maxburst;
        } else {
@@ -1136,7 +1139,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
        cctl |= burst << PL080_CONTROL_SB_SIZE_SHIFT;
        cctl |= burst << PL080_CONTROL_DB_SIZE_SHIFT;
 
-       if (plchan->runtime_direction == DMA_FROM_DEVICE) {
+       if (plchan->runtime_direction == DMA_DEV_TO_MEM) {
                plchan->src_addr = config->src_addr;
                plchan->src_cctl = pl08x_cctl(cctl) | PL080_CONTROL_DST_INCR |
                        pl08x_select_bus(plchan->cd->periph_buses,
@@ -1152,7 +1155,7 @@ static int dma_set_runtime_config(struct dma_chan *chan,
                "configured channel %s (%s) for %s, data width %d, "
                "maxburst %d words, LE, CCTL=0x%08x\n",
                dma_chan_name(chan), plchan->name,
-               (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+               (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
                addr_width,
                maxburst,
                cctl);
@@ -1322,7 +1325,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_dma_memcpy(
 
 static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
                struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags)
 {
        struct pl08x_dma_chan *plchan = to_pl08x_chan(chan);
@@ -1354,10 +1357,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
         */
        txd->direction = direction;
 
-       if (direction == DMA_TO_DEVICE) {
+       if (direction == DMA_MEM_TO_DEV) {
                txd->cctl = plchan->dst_cctl;
                slave_addr = plchan->dst_addr;
-       } else if (direction == DMA_FROM_DEVICE) {
+       } else if (direction == DMA_DEV_TO_MEM) {
                txd->cctl = plchan->src_cctl;
                slave_addr = plchan->src_addr;
        } else {
@@ -1368,10 +1371,10 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
        }
 
        if (plchan->cd->device_fc)
-               tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER_PER :
+               tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER_PER :
                        PL080_FLOW_PER2MEM_PER;
        else
-               tmp = (direction == DMA_TO_DEVICE) ? PL080_FLOW_MEM2PER :
+               tmp = (direction == DMA_MEM_TO_DEV) ? PL080_FLOW_MEM2PER :
                        PL080_FLOW_PER2MEM;
 
        txd->ccfg |= tmp << PL080_CONFIG_FLOW_CONTROL_SHIFT;
@@ -1387,7 +1390,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
                list_add_tail(&dsg->node, &txd->dsg_list);
 
                dsg->len = sg_dma_len(sg);
-               if (direction == DMA_TO_DEVICE) {
+               if (direction == DMA_MEM_TO_DEV) {
                        dsg->src_addr = sg_phys(sg);
                        dsg->dst_addr = slave_addr;
                } else {
index fcfa0a8b5c59956516060cd9ff5535f6524a819f..f4aed5fc2cb6c33d87d8932d8603f705d3d2e589 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/module.h>
 #include <linux/platform_device.h>
 #include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
 
 #include "at_hdmac_regs.h"
 
@@ -660,7 +662,7 @@ err_desc_get:
  */
 static struct dma_async_tx_descriptor *
 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
@@ -678,7 +680,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 
        dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
                        sg_len,
-                       direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+                       direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
                        flags);
 
        if (unlikely(!atslave || !sg_len)) {
@@ -692,7 +694,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        ctrlb = ATC_IEN;
 
        switch (direction) {
-       case DMA_TO_DEVICE:
+       case DMA_MEM_TO_DEV:
                ctrla |=  ATC_DST_WIDTH(reg_width);
                ctrlb |=  ATC_DST_ADDR_MODE_FIXED
                        | ATC_SRC_ADDR_MODE_INCR
@@ -725,7 +727,7 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        total_len += len;
                }
                break;
-       case DMA_FROM_DEVICE:
+       case DMA_DEV_TO_MEM:
                ctrla |=  ATC_SRC_WIDTH(reg_width);
                ctrlb |=  ATC_DST_ADDR_MODE_INCR
                        | ATC_SRC_ADDR_MODE_FIXED
@@ -787,7 +789,7 @@ err_desc_get:
  */
 static int
 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        if (period_len > (ATC_BTSIZE_MAX << reg_width))
                goto err_out;
@@ -795,7 +797,7 @@ atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
                goto err_out;
        if (unlikely(buf_addr & ((1 << reg_width) - 1)))
                goto err_out;
-       if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+       if (unlikely(!(direction & (DMA_DEV_TO_MEM | DMA_MEM_TO_DEV))))
                goto err_out;
 
        return 0;
@@ -810,7 +812,7 @@ err_out:
 static int
 atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
                unsigned int period_index, dma_addr_t buf_addr,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        u32             ctrla;
        unsigned int    reg_width = atslave->reg_width;
@@ -822,7 +824,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
                | period_len >> reg_width;
 
        switch (direction) {
-       case DMA_TO_DEVICE:
+       case DMA_MEM_TO_DEV:
                desc->lli.saddr = buf_addr + (period_len * period_index);
                desc->lli.daddr = atslave->tx_reg;
                desc->lli.ctrla = ctrla;
@@ -833,7 +835,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
                                | ATC_DIF(AT_DMA_PER_IF);
                break;
 
-       case DMA_FROM_DEVICE:
+       case DMA_DEV_TO_MEM:
                desc->lli.saddr = atslave->rx_reg;
                desc->lli.daddr = buf_addr + (period_len * period_index);
                desc->lli.ctrla = ctrla;
@@ -861,7 +863,7 @@ atc_dma_cyclic_fill_desc(struct at_dma_slave *atslave, struct at_desc *desc,
  */
 static struct dma_async_tx_descriptor *
 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        struct at_dma_chan      *atchan = to_at_dma_chan(chan);
        struct at_dma_slave     *atslave = chan->private;
@@ -872,7 +874,7 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
        unsigned int            i;
 
        dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
-                       direction == DMA_TO_DEVICE ? "TO DEVICE" : "FROM DEVICE",
+                       direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
                        buf_addr,
                        periods, buf_len, period_len);
 
@@ -1175,6 +1177,56 @@ static void atc_free_chan_resources(struct dma_chan *chan)
 
 /*--  Module Management  -----------------------------------------------*/
 
+/* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
+static struct at_dma_platform_data at91sam9rl_config = {
+       .nr_channels = 2,
+};
+static struct at_dma_platform_data at91sam9g45_config = {
+       .nr_channels = 8,
+};
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_dma_dt_ids[] = {
+       {
+               .compatible = "atmel,at91sam9rl-dma",
+               .data = &at91sam9rl_config,
+       }, {
+               .compatible = "atmel,at91sam9g45-dma",
+               .data = &at91sam9g45_config,
+       }, {
+               /* sentinel */
+       }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
+#endif
+
+static const struct platform_device_id atdma_devtypes[] = {
+       {
+               .name = "at91sam9rl_dma",
+               .driver_data = (unsigned long) &at91sam9rl_config,
+       }, {
+               .name = "at91sam9g45_dma",
+               .driver_data = (unsigned long) &at91sam9g45_config,
+       }, {
+               /* sentinel */
+       }
+};
+
+static inline struct at_dma_platform_data * __init at_dma_get_driver_data(
+                                               struct platform_device *pdev)
+{
+       if (pdev->dev.of_node) {
+               const struct of_device_id *match;
+               match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
+               if (match == NULL)
+                       return NULL;
+               return match->data;
+       }
+       return (struct at_dma_platform_data *)
+                       platform_get_device_id(pdev)->driver_data;
+}
+
 /**
  * at_dma_off - disable DMA controller
  * @atdma: the Atmel HDAMC device
@@ -1193,18 +1245,23 @@ static void at_dma_off(struct at_dma *atdma)
 
 static int __init at_dma_probe(struct platform_device *pdev)
 {
-       struct at_dma_platform_data *pdata;
        struct resource         *io;
        struct at_dma           *atdma;
        size_t                  size;
        int                     irq;
        int                     err;
        int                     i;
+       struct at_dma_platform_data *plat_dat;
 
-       /* get DMA Controller parameters from platform */
-       pdata = pdev->dev.platform_data;
-       if (!pdata || pdata->nr_channels > AT_DMA_MAX_NR_CHANNELS)
-               return -EINVAL;
+       /* setup platform data for each SoC */
+       dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
+       dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
+       dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
+
+       /* get DMA parameters from controller type */
+       plat_dat = at_dma_get_driver_data(pdev);
+       if (!plat_dat)
+               return -ENODEV;
 
        io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        if (!io)
@@ -1215,14 +1272,14 @@ static int __init at_dma_probe(struct platform_device *pdev)
                return irq;
 
        size = sizeof(struct at_dma);
-       size += pdata->nr_channels * sizeof(struct at_dma_chan);
+       size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
        atdma = kzalloc(size, GFP_KERNEL);
        if (!atdma)
                return -ENOMEM;
 
-       /* discover transaction capabilites from the platform data */
-       atdma->dma_common.cap_mask = pdata->cap_mask;
-       atdma->all_chan_mask = (1 << pdata->nr_channels) - 1;
+       /* discover transaction capabilities */
+       atdma->dma_common.cap_mask = plat_dat->cap_mask;
+       atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
 
        size = resource_size(io);
        if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
@@ -1268,7 +1325,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
 
        /* initialize channels related values */
        INIT_LIST_HEAD(&atdma->dma_common.channels);
-       for (i = 0; i < pdata->nr_channels; i++) {
+       for (i = 0; i < plat_dat->nr_channels; i++) {
                struct at_dma_chan      *atchan = &atdma->chan[i];
 
                atchan->chan_common.device = &atdma->dma_common;
@@ -1286,7 +1343,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
 
                tasklet_init(&atchan->tasklet, atc_tasklet,
                                (unsigned long)atchan);
-               atc_enable_irq(atchan);
+               atc_enable_chan_irq(atdma, i);
        }
 
        /* set base routines */
@@ -1313,7 +1370,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
        dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s), %d channels\n",
          dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
          dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)  ? "slave " : "",
-         pdata->nr_channels);
+         plat_dat->nr_channels);
 
        dma_async_device_register(&atdma->dma_common);
 
@@ -1353,7 +1410,7 @@ static int __exit at_dma_remove(struct platform_device *pdev)
                struct at_dma_chan      *atchan = to_at_dma_chan(chan);
 
                /* Disable interrupts */
-               atc_disable_irq(atchan);
+               atc_disable_chan_irq(atdma, chan->chan_id);
                tasklet_disable(&atchan->tasklet);
 
                tasklet_kill(&atchan->tasklet);
@@ -1495,9 +1552,11 @@ static const struct dev_pm_ops at_dma_dev_pm_ops = {
 static struct platform_driver at_dma_driver = {
        .remove         = __exit_p(at_dma_remove),
        .shutdown       = at_dma_shutdown,
+       .id_table       = atdma_devtypes,
        .driver = {
                .name   = "at_hdmac",
                .pm     = &at_dma_dev_pm_ops,
+               .of_match_table = of_match_ptr(atmel_dma_dt_ids),
        },
 };
 
index aa4c9aebab7cedec68a42aececeb218aaea0208b..a8d3277d60b5cdd238ea77959c7b40185edfc077 100644 (file)
@@ -251,6 +251,7 @@ static inline struct at_dma_chan *to_at_dma_chan(struct dma_chan *dchan)
 /**
  * struct at_dma - internal representation of an Atmel HDMA Controller
  * @chan_common: common dmaengine dma_device object members
+ * @atdma_devtype: identifier of DMA controller compatibility
  * @ch_regs: memory mapped register base
  * @clk: dma controller clock
  * @save_imr: interrupt mask register that is saved on suspend/resume cycle
@@ -326,28 +327,27 @@ static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
 }
 
 
-static void atc_setup_irq(struct at_dma_chan *atchan, int on)
+static void atc_setup_irq(struct at_dma *atdma, int chan_id, int on)
 {
-       struct at_dma   *atdma = to_at_dma(atchan->chan_common.device);
-       u32             ebci;
+       u32 ebci;
 
        /* enable interrupts on buffer transfer completion & error */
-       ebci =    AT_DMA_BTC(atchan->chan_common.chan_id)
-               | AT_DMA_ERR(atchan->chan_common.chan_id);
+       ebci =    AT_DMA_BTC(chan_id)
+               | AT_DMA_ERR(chan_id);
        if (on)
                dma_writel(atdma, EBCIER, ebci);
        else
                dma_writel(atdma, EBCIDR, ebci);
 }
 
-static inline void atc_enable_irq(struct at_dma_chan *atchan)
+static void atc_enable_chan_irq(struct at_dma *atdma, int chan_id)
 {
-       atc_setup_irq(atchan, 1);
+       atc_setup_irq(atdma, chan_id, 1);
 }
 
-static inline void atc_disable_irq(struct at_dma_chan *atchan)
+static void atc_disable_chan_irq(struct at_dma *atdma, int chan_id)
 {
-       atc_setup_irq(atchan, 0);
+       atc_setup_irq(atdma, chan_id, 0);
 }
 
 
index 4234f416ef115055cb425822a2c411a73dd41590..d65a718c0f9b1ae5819aef9af4c1c58200627a22 100644 (file)
@@ -39,7 +39,7 @@ struct coh901318_desc {
        struct scatterlist *sg;
        unsigned int sg_len;
        struct coh901318_lli *lli;
-       enum dma_data_direction dir;
+       enum dma_transfer_direction dir;
        unsigned long flags;
        u32 head_config;
        u32 head_ctrl;
@@ -1034,7 +1034,7 @@ coh901318_prep_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 
 static struct dma_async_tx_descriptor *
 coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-                       unsigned int sg_len, enum dma_data_direction direction,
+                       unsigned int sg_len, enum dma_transfer_direction direction,
                        unsigned long flags)
 {
        struct coh901318_chan *cohc = to_coh901318_chan(chan);
@@ -1077,7 +1077,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        ctrl_last |= cohc->runtime_ctrl;
        ctrl |= cohc->runtime_ctrl;
 
-       if (direction == DMA_TO_DEVICE) {
+       if (direction == DMA_MEM_TO_DEV) {
                u32 tx_flags = COH901318_CX_CTRL_PRDD_SOURCE |
                        COH901318_CX_CTRL_SRC_ADDR_INC_ENABLE;
 
@@ -1085,7 +1085,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                ctrl_chained |= tx_flags;
                ctrl_last |= tx_flags;
                ctrl |= tx_flags;
-       } else if (direction == DMA_FROM_DEVICE) {
+       } else if (direction == DMA_DEV_TO_MEM) {
                u32 rx_flags = COH901318_CX_CTRL_PRDD_DEST |
                        COH901318_CX_CTRL_DST_ADDR_INC_ENABLE;
 
@@ -1274,11 +1274,11 @@ static void coh901318_dma_set_runtimeconfig(struct dma_chan *chan,
        int i = 0;
 
        /* We only support mem to per or per to mem transfers */
-       if (config->direction == DMA_FROM_DEVICE) {
+       if (config->direction == DMA_DEV_TO_MEM) {
                addr = config->src_addr;
                addr_width = config->src_addr_width;
                maxburst = config->src_maxburst;
-       } else if (config->direction == DMA_TO_DEVICE) {
+       } else if (config->direction == DMA_MEM_TO_DEV) {
                addr = config->dst_addr;
                addr_width = config->dst_addr_width;
                maxburst = config->dst_maxburst;
index 9f7e0e6a7eea12e8487cef8fd1395c38cfbdc7a0..6c0e2d4c66827c7a179a55c8bc52375b932dfaa1 100644 (file)
@@ -7,11 +7,10 @@
  * Author: Per Friden <per.friden@stericsson.com>
  */
 
-#include <linux/dma-mapping.h>
 #include <linux/spinlock.h>
-#include <linux/dmapool.h>
 #include <linux/memory.h>
 #include <linux/gfp.h>
+#include <linux/dmapool.h>
 #include <mach/coh901318.h>
 
 #include "coh901318_lli.h"
@@ -177,18 +176,18 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
                          struct coh901318_lli *lli,
                          dma_addr_t buf, unsigned int size,
                          dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_eom,
-                         enum dma_data_direction dir)
+                         enum dma_transfer_direction dir)
 {
        int s = size;
        dma_addr_t src;
        dma_addr_t dst;
 
 
-       if (dir == DMA_TO_DEVICE) {
+       if (dir == DMA_MEM_TO_DEV) {
                src = buf;
                dst = dev_addr;
 
-       } else if (dir == DMA_FROM_DEVICE) {
+       } else if (dir == DMA_DEV_TO_MEM) {
 
                src = dev_addr;
                dst = buf;
@@ -215,9 +214,9 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
 
                lli = coh901318_lli_next(lli);
 
-               if (dir == DMA_TO_DEVICE)
+               if (dir == DMA_MEM_TO_DEV)
                        src += block_size;
-               else if (dir == DMA_FROM_DEVICE)
+               else if (dir == DMA_DEV_TO_MEM)
                        dst += block_size;
        }
 
@@ -234,7 +233,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
                      struct scatterlist *sgl, unsigned int nents,
                      dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl,
                      u32 ctrl_last,
-                     enum dma_data_direction dir, u32 ctrl_irq_mask)
+                     enum dma_transfer_direction dir, u32 ctrl_irq_mask)
 {
        int i;
        struct scatterlist *sg;
@@ -249,9 +248,9 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
 
        spin_lock(&pool->lock);
 
-       if (dir == DMA_TO_DEVICE)
+       if (dir == DMA_MEM_TO_DEV)
                dst = dev_addr;
-       else if (dir == DMA_FROM_DEVICE)
+       else if (dir == DMA_DEV_TO_MEM)
                src = dev_addr;
        else
                goto err;
@@ -269,7 +268,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
                        ctrl_sg = ctrl ? ctrl : ctrl_last;
 
 
-               if (dir == DMA_TO_DEVICE)
+               if (dir == DMA_MEM_TO_DEV)
                        /* increment source address */
                        src = sg_phys(sg);
                else
@@ -293,7 +292,7 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
                        lli->src_addr = src;
                        lli->dst_addr = dst;
 
-                       if (dir == DMA_FROM_DEVICE)
+                       if (dir == DMA_DEV_TO_MEM)
                                dst += elem_size;
                        else
                                src += elem_size;
index 7a5c80990e9ef795def59abeb4e8b310e1245759..abff3714fdda73ed508d588c3b1b3fc923d08e89 100644 (file)
@@ -97,7 +97,7 @@ coh901318_lli_fill_single(struct coh901318_pool *pool,
                          struct coh901318_lli *lli,
                          dma_addr_t buf, unsigned int size,
                          dma_addr_t dev_addr, u32 ctrl_chained, u32 ctrl_last,
-                         enum dma_data_direction dir);
+                         enum dma_transfer_direction dir);
 
 /**
  * coh901318_lli_fill_single() - Prepares the lli:s for dma scatter list transfer
@@ -119,6 +119,6 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
                      struct scatterlist *sg, unsigned int nents,
                      dma_addr_t dev_addr, u32 ctrl_chained,
                      u32 ctrl, u32 ctrl_last,
-                     enum dma_data_direction dir, u32 ctrl_irq_mask);
+                     enum dma_transfer_direction dir, u32 ctrl_irq_mask);
 
 #endif /* COH901318_LLI_H */
index b48967b499da0bad30529501492def4695dc1f25..a6c6051ec85811041277c64d675810f14d9da851 100644 (file)
@@ -693,12 +693,12 @@ int dma_async_device_register(struct dma_device *device)
                !device->device_prep_dma_interrupt);
        BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
                !device->device_prep_dma_sg);
-       BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
-               !device->device_prep_slave_sg);
        BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
                !device->device_prep_dma_cyclic);
        BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
                !device->device_control);
+       BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
+               !device->device_prep_interleaved_dma);
 
        BUG_ON(!device->device_alloc_chan_resources);
        BUG_ON(!device->device_free_chan_resources);
index 2b8661b54eaf7ec159d563b7798e6f07ec9dc34e..24225f0fdcd85f93d5feccae6d935e1563a679fb 100644 (file)
@@ -599,7 +599,7 @@ static int dmatest_add_channel(struct dma_chan *chan)
        }
        if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
                cnt = dmatest_add_threads(dtc, DMA_PQ);
-               thread_count += cnt > 0 ?: 0;
+               thread_count += cnt > 0 ? cnt : 0;
        }
 
        pr_info("dmatest: Started %u threads using %s\n",
index 9bfd6d3607180930692140b43f31db630c25bee6..9b592b02b5f49a3023cdc883322af7c451448a0a 100644 (file)
@@ -166,6 +166,38 @@ dwc_assign_cookie(struct dw_dma_chan *dwc, struct dw_desc *desc)
        return cookie;
 }
 
+static void dwc_initialize(struct dw_dma_chan *dwc)
+{
+       struct dw_dma *dw = to_dw_dma(dwc->chan.device);
+       struct dw_dma_slave *dws = dwc->chan.private;
+       u32 cfghi = DWC_CFGH_FIFO_MODE;
+       u32 cfglo = DWC_CFGL_CH_PRIOR(dwc->priority);
+
+       if (dwc->initialized == true)
+               return;
+
+       if (dws) {
+               /*
+                * We need controller-specific data to set up slave
+                * transfers.
+                */
+               BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
+
+               cfghi = dws->cfg_hi;
+               cfglo |= dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
+       }
+
+       channel_writel(dwc, CFG_LO, cfglo);
+       channel_writel(dwc, CFG_HI, cfghi);
+
+       /* Enable interrupts */
+       channel_set_bit(dw, MASK.XFER, dwc->mask);
+       channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+       channel_set_bit(dw, MASK.ERROR, dwc->mask);
+
+       dwc->initialized = true;
+}
+
 /*----------------------------------------------------------------------*/
 
 /* Called with dwc->lock held and bh disabled */
@@ -189,6 +221,8 @@ static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
                return;
        }
 
+       dwc_initialize(dwc);
+
        channel_writel(dwc, LLP, first->txd.phys);
        channel_writel(dwc, CTL_LO,
                        DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
@@ -696,7 +730,7 @@ err_desc_get:
 
 static struct dma_async_tx_descriptor *
 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags)
 {
        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
@@ -720,7 +754,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        prev = first = NULL;
 
        switch (direction) {
-       case DMA_TO_DEVICE:
+       case DMA_MEM_TO_DEV:
                ctllo = (DWC_DEFAULT_CTLLO(chan->private)
                                | DWC_CTLL_DST_WIDTH(reg_width)
                                | DWC_CTLL_DST_FIX
@@ -777,7 +811,7 @@ slave_sg_todev_fill_desc:
                                goto slave_sg_todev_fill_desc;
                }
                break;
-       case DMA_FROM_DEVICE:
+       case DMA_DEV_TO_MEM:
                ctllo = (DWC_DEFAULT_CTLLO(chan->private)
                                | DWC_CTLL_SRC_WIDTH(reg_width)
                                | DWC_CTLL_DST_INC
@@ -959,10 +993,7 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
        struct dw_dma_chan      *dwc = to_dw_dma_chan(chan);
        struct dw_dma           *dw = to_dw_dma(chan->device);
        struct dw_desc          *desc;
-       struct dw_dma_slave     *dws;
        int                     i;
-       u32                     cfghi;
-       u32                     cfglo;
        unsigned long           flags;
 
        dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
@@ -975,26 +1006,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
 
        dwc->completed = chan->cookie = 1;
 
-       cfghi = DWC_CFGH_FIFO_MODE;
-       cfglo = 0;
-
-       dws = chan->private;
-       if (dws) {
-               /*
-                * We need controller-specific data to set up slave
-                * transfers.
-                */
-               BUG_ON(!dws->dma_dev || dws->dma_dev != dw->dma.dev);
-
-               cfghi = dws->cfg_hi;
-               cfglo = dws->cfg_lo & ~DWC_CFGL_CH_PRIOR_MASK;
-       }
-
-       cfglo |= DWC_CFGL_CH_PRIOR(dwc->priority);
-
-       channel_writel(dwc, CFG_LO, cfglo);
-       channel_writel(dwc, CFG_HI, cfghi);
-
        /*
         * NOTE: some controllers may have additional features that we
         * need to initialize here, like "scatter-gather" (which
@@ -1026,11 +1037,6 @@ static int dwc_alloc_chan_resources(struct dma_chan *chan)
                i = ++dwc->descs_allocated;
        }
 
-       /* Enable interrupts */
-       channel_set_bit(dw, MASK.XFER, dwc->mask);
-       channel_set_bit(dw, MASK.BLOCK, dwc->mask);
-       channel_set_bit(dw, MASK.ERROR, dwc->mask);
-
        spin_unlock_irqrestore(&dwc->lock, flags);
 
        dev_dbg(chan2dev(chan),
@@ -1058,6 +1064,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
        spin_lock_irqsave(&dwc->lock, flags);
        list_splice_init(&dwc->free_list, &list);
        dwc->descs_allocated = 0;
+       dwc->initialized = false;
 
        /* Disable interrupts */
        channel_clear_bit(dw, MASK.XFER, dwc->mask);
@@ -1165,7 +1172,7 @@ EXPORT_SYMBOL(dw_dma_cyclic_stop);
  */
 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
                dma_addr_t buf_addr, size_t buf_len, size_t period_len,
-               enum dma_data_direction direction)
+               enum dma_transfer_direction direction)
 {
        struct dw_dma_chan              *dwc = to_dw_dma_chan(chan);
        struct dw_cyclic_desc           *cdesc;
@@ -1206,7 +1213,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
                goto out_err;
        if (unlikely(buf_addr & ((1 << reg_width) - 1)))
                goto out_err;
-       if (unlikely(!(direction & (DMA_TO_DEVICE | DMA_FROM_DEVICE))))
+       if (unlikely(!(direction & (DMA_MEM_TO_DEV | DMA_DEV_TO_MEM))))
                goto out_err;
 
        retval = ERR_PTR(-ENOMEM);
@@ -1228,7 +1235,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
                        goto out_err_desc_get;
 
                switch (direction) {
-               case DMA_TO_DEVICE:
+               case DMA_MEM_TO_DEV:
                        desc->lli.dar = dws->tx_reg;
                        desc->lli.sar = buf_addr + (period_len * i);
                        desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1239,7 +1246,7 @@ struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
                                        | DWC_CTLL_FC(dws->fc)
                                        | DWC_CTLL_INT_EN);
                        break;
-               case DMA_FROM_DEVICE:
+               case DMA_DEV_TO_MEM:
                        desc->lli.dar = buf_addr + (period_len * i);
                        desc->lli.sar = dws->rx_reg;
                        desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan->private)
@@ -1335,6 +1342,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free);
 
 static void dw_dma_off(struct dw_dma *dw)
 {
+       int i;
+
        dma_writel(dw, CFG, 0);
 
        channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
@@ -1345,6 +1354,9 @@ static void dw_dma_off(struct dw_dma *dw)
 
        while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
                cpu_relax();
+
+       for (i = 0; i < dw->dma.chancnt; i++)
+               dw->chan[i].initialized = false;
 }
 
 static int __init dw_probe(struct platform_device *pdev)
@@ -1533,6 +1545,7 @@ static int dw_suspend_noirq(struct device *dev)
 
        dw_dma_off(platform_get_drvdata(pdev));
        clk_disable(dw->clk);
+
        return 0;
 }
 
index c3419518d701dbe3a0671cbc906a88c7ee3823a3..5eef6946a36713bd7413ecd29c1751291c4cc41c 100644 (file)
@@ -140,6 +140,7 @@ struct dw_dma_chan {
        u8                      mask;
        u8                      priority;
        bool                    paused;
+       bool                    initialized;
 
        spinlock_t              lock;
 
index b47e2b803fafdaedaf7c8a765858790ce869f2db..59e7a965772bfdff900aa1d82063d6f2510dd8c4 100644 (file)
@@ -246,6 +246,9 @@ static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
 static struct ep93xx_dma_desc *
 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
 {
+       if (list_empty(&edmac->active))
+               return NULL;
+
        return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
 }
 
@@ -263,16 +266,22 @@ ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
  */
 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
 {
+       struct ep93xx_dma_desc *desc;
+
        list_rotate_left(&edmac->active);
 
        if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
                return true;
 
+       desc = ep93xx_dma_get_active(edmac);
+       if (!desc)
+               return false;
+
        /*
         * If txd.cookie is set it means that we are back in the first
         * descriptor in the chain and hence done with it.
         */
-       return !ep93xx_dma_get_active(edmac)->txd.cookie;
+       return !desc->txd.cookie;
 }
 
 /*
@@ -327,10 +336,16 @@ static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
 
 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
 {
-       struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+       struct ep93xx_dma_desc *desc;
        u32 bus_addr;
 
-       if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
+       desc = ep93xx_dma_get_active(edmac);
+       if (!desc) {
+               dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
+               return;
+       }
+
+       if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
                bus_addr = desc->src_addr;
        else
                bus_addr = desc->dst_addr;
@@ -443,7 +458,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
                control = (5 << M2M_CONTROL_PWSC_SHIFT);
                control |= M2M_CONTROL_NO_HDSK;
 
-               if (data->direction == DMA_TO_DEVICE) {
+               if (data->direction == DMA_MEM_TO_DEV) {
                        control |= M2M_CONTROL_DAH;
                        control |= M2M_CONTROL_TM_TX;
                        control |= M2M_CONTROL_RSS_SSPTX;
@@ -459,11 +474,7 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
                 * This IDE part is totally untested. Values below are taken
                 * from the EP93xx Users's Guide and might not be correct.
                 */
-               control |= M2M_CONTROL_NO_HDSK;
-               control |= M2M_CONTROL_RSS_IDE;
-               control |= M2M_CONTROL_PW_16;
-
-               if (data->direction == DMA_TO_DEVICE) {
+               if (data->direction == DMA_MEM_TO_DEV) {
                        /* Worst case from the UG */
                        control = (3 << M2M_CONTROL_PWSC_SHIFT);
                        control |= M2M_CONTROL_DAH;
@@ -473,6 +484,10 @@ static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
                        control |= M2M_CONTROL_SAH;
                        control |= M2M_CONTROL_TM_RX;
                }
+
+               control |= M2M_CONTROL_NO_HDSK;
+               control |= M2M_CONTROL_RSS_IDE;
+               control |= M2M_CONTROL_PW_16;
                break;
 
        default:
@@ -491,7 +506,13 @@ static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
 
 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
 {
-       struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
+       struct ep93xx_dma_desc *desc;
+
+       desc = ep93xx_dma_get_active(edmac);
+       if (!desc) {
+               dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
+               return;
+       }
 
        if (edmac->buffer == 0) {
                writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
@@ -669,24 +690,30 @@ static void ep93xx_dma_tasklet(unsigned long data)
 {
        struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
        struct ep93xx_dma_desc *desc, *d;
-       dma_async_tx_callback callback;
-       void *callback_param;
+       dma_async_tx_callback callback = NULL;
+       void *callback_param = NULL;
        LIST_HEAD(list);
 
        spin_lock_irq(&edmac->lock);
+       /*
+        * If dma_terminate_all() was called before we get to run, the active
+        * list has become empty. If that happens we aren't supposed to do
+        * anything more than call ep93xx_dma_advance_work().
+        */
        desc = ep93xx_dma_get_active(edmac);
-       if (desc->complete) {
-               edmac->last_completed = desc->txd.cookie;
-               list_splice_init(&edmac->active, &list);
+       if (desc) {
+               if (desc->complete) {
+                       edmac->last_completed = desc->txd.cookie;
+                       list_splice_init(&edmac->active, &list);
+               }
+               callback = desc->txd.callback;
+               callback_param = desc->txd.callback_param;
        }
        spin_unlock_irq(&edmac->lock);
 
        /* Pick up the next descriptor from the queue */
        ep93xx_dma_advance_work(edmac);
 
-       callback = desc->txd.callback;
-       callback_param = desc->txd.callback_param;
-
        /* Now we can release all the chained descriptors */
        list_for_each_entry_safe(desc, d, &list, node) {
                /*
@@ -706,13 +733,22 @@ static void ep93xx_dma_tasklet(unsigned long data)
 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
 {
        struct ep93xx_dma_chan *edmac = dev_id;
+       struct ep93xx_dma_desc *desc;
        irqreturn_t ret = IRQ_HANDLED;
 
        spin_lock(&edmac->lock);
 
+       desc = ep93xx_dma_get_active(edmac);
+       if (!desc) {
+               dev_warn(chan2dev(edmac),
+                        "got interrupt while active list is empty\n");
+               spin_unlock(&edmac->lock);
+               return IRQ_NONE;
+       }
+
        switch (edmac->edma->hw_interrupt(edmac)) {
        case INTERRUPT_DONE:
-               ep93xx_dma_get_active(edmac)->complete = true;
+               desc->complete = true;
                tasklet_schedule(&edmac->tasklet);
                break;
 
@@ -803,8 +839,8 @@ static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
                        switch (data->port) {
                        case EP93XX_DMA_SSP:
                        case EP93XX_DMA_IDE:
-                               if (data->direction != DMA_TO_DEVICE &&
-                                   data->direction != DMA_FROM_DEVICE)
+                               if (data->direction != DMA_MEM_TO_DEV &&
+                                   data->direction != DMA_DEV_TO_MEM)
                                        return -EINVAL;
                                break;
                        default:
@@ -952,7 +988,7 @@ fail:
  */
 static struct dma_async_tx_descriptor *
 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-                        unsigned int sg_len, enum dma_data_direction dir,
+                        unsigned int sg_len, enum dma_transfer_direction dir,
                         unsigned long flags)
 {
        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
@@ -988,7 +1024,7 @@ ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        goto fail;
                }
 
-               if (dir == DMA_TO_DEVICE) {
+               if (dir == DMA_MEM_TO_DEV) {
                        desc->src_addr = sg_dma_address(sg);
                        desc->dst_addr = edmac->runtime_addr;
                } else {
@@ -1032,7 +1068,7 @@ fail:
 static struct dma_async_tx_descriptor *
 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
                           size_t buf_len, size_t period_len,
-                          enum dma_data_direction dir)
+                          enum dma_transfer_direction dir)
 {
        struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
        struct ep93xx_dma_desc *desc, *first;
@@ -1065,7 +1101,7 @@ ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
                        goto fail;
                }
 
-               if (dir == DMA_TO_DEVICE) {
+               if (dir == DMA_MEM_TO_DEV) {
                        desc->src_addr = dma_addr + offset;
                        desc->dst_addr = edmac->runtime_addr;
                } else {
@@ -1133,12 +1169,12 @@ static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
                return -EINVAL;
 
        switch (config->direction) {
-       case DMA_FROM_DEVICE:
+       case DMA_DEV_TO_MEM:
                width = config->src_addr_width;
                addr = config->src_addr;
                break;
 
-       case DMA_TO_DEVICE:
+       case DMA_MEM_TO_DEV:
                width = config->dst_addr_width;
                addr = config->dst_addr;
                break;
index 8a781540590cdf1e76e79c74d137a647c02642f6..b98070c33ca9d3b8aa42d1fb5085da9772957733 100644 (file)
@@ -772,7 +772,7 @@ fail:
  */
 static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
        struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
-       enum dma_data_direction direction, unsigned long flags)
+       enum dma_transfer_direction direction, unsigned long flags)
 {
        /*
         * This operation is not supported on the Freescale DMA controller
@@ -819,7 +819,7 @@ static int fsl_dma_device_control(struct dma_chan *dchan,
                        return -ENXIO;
 
                /* we set the controller burst size depending on direction */
-               if (config->direction == DMA_TO_DEVICE)
+               if (config->direction == DMA_MEM_TO_DEV)
                        size = config->dst_addr_width * config->dst_maxburst;
                else
                        size = config->src_addr_width * config->src_maxburst;
index 4be55f9bb6c19c6b6165a6de82eea9b2d977c18d..e4383ee2c9acd015b72675557ad03a06357f4eda 100644 (file)
@@ -107,7 +107,7 @@ static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                imx_dma_disable(imxdmac->imxdma_channel);
                return 0;
        case DMA_SLAVE_CONFIG:
-               if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+               if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
                        imxdmac->per_address = dmaengine_cfg->src_addr;
                        imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
                        imxdmac->word_size = dmaengine_cfg->src_addr_width;
@@ -224,7 +224,7 @@ static void imxdma_free_chan_resources(struct dma_chan *chan)
 
 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
                struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags)
 {
        struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
@@ -241,7 +241,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
                dma_length += sg->length;
        }
 
-       if (direction == DMA_FROM_DEVICE)
+       if (direction == DMA_DEV_TO_MEM)
                dmamode = DMA_MODE_READ;
        else
                dmamode = DMA_MODE_WRITE;
@@ -271,7 +271,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
 
 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
        struct imxdma_engine *imxdma = imxdmac->imxdma;
@@ -317,7 +317,7 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
        imxdmac->sg_list[periods].page_link =
                ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
 
-       if (direction == DMA_FROM_DEVICE)
+       if (direction == DMA_DEV_TO_MEM)
                dmamode = DMA_MODE_READ;
        else
                dmamode = DMA_MODE_WRITE;
index f993955a640c376342498d3336668348ecd78884..8bc5acf36ee5b2e1a633f2cd7f1f39bd5e41059b 100644 (file)
@@ -247,7 +247,7 @@ struct sdma_engine;
 struct sdma_channel {
        struct sdma_engine              *sdma;
        unsigned int                    channel;
-       enum dma_data_direction         direction;
+       enum dma_transfer_direction             direction;
        enum sdma_peripheral_type       peripheral_type;
        unsigned int                    event_id0;
        unsigned int                    event_id1;
@@ -268,6 +268,8 @@ struct sdma_channel {
        struct dma_async_tx_descriptor  desc;
        dma_cookie_t                    last_completed;
        enum dma_status                 status;
+       unsigned int                    chn_count;
+       unsigned int                    chn_real_count;
 };
 
 #define IMX_DMA_SG_LOOP                (1 << 0)
@@ -503,6 +505,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
        struct sdma_buffer_descriptor *bd;
        int i, error = 0;
 
+       sdmac->chn_real_count = 0;
        /*
         * non loop mode. Iterate over all descriptors, collect
         * errors and call callback function
@@ -512,6 +515,7 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
 
                 if (bd->mode.status & (BD_DONE | BD_RROR))
                        error = -EIO;
+                sdmac->chn_real_count += bd->mode.count;
        }
 
        if (error)
@@ -519,9 +523,9 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
        else
                sdmac->status = DMA_SUCCESS;
 
+       sdmac->last_completed = sdmac->desc.cookie;
        if (sdmac->desc.callback)
                sdmac->desc.callback(sdmac->desc.callback_param);
-       sdmac->last_completed = sdmac->desc.cookie;
 }
 
 static void mxc_sdma_handle_channel(struct sdma_channel *sdmac)
@@ -650,7 +654,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
        struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
        int ret;
 
-       if (sdmac->direction == DMA_FROM_DEVICE) {
+       if (sdmac->direction == DMA_DEV_TO_MEM) {
                load_address = sdmac->pc_from_device;
        } else {
                load_address = sdmac->pc_to_device;
@@ -832,17 +836,18 @@ static struct sdma_channel *to_sdma_chan(struct dma_chan *chan)
 
 static dma_cookie_t sdma_tx_submit(struct dma_async_tx_descriptor *tx)
 {
+       unsigned long flags;
        struct sdma_channel *sdmac = to_sdma_chan(tx->chan);
        struct sdma_engine *sdma = sdmac->sdma;
        dma_cookie_t cookie;
 
-       spin_lock_irq(&sdmac->lock);
+       spin_lock_irqsave(&sdmac->lock, flags);
 
        cookie = sdma_assign_cookie(sdmac);
 
        sdma_enable_channel(sdma, sdmac->channel);
 
-       spin_unlock_irq(&sdmac->lock);
+       spin_unlock_irqrestore(&sdmac->lock, flags);
 
        return cookie;
 }
@@ -911,7 +916,7 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
 
 static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
                struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
@@ -941,6 +946,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
                goto err_out;
        }
 
+       sdmac->chn_count = 0;
        for_each_sg(sgl, sg, sg_len, i) {
                struct sdma_buffer_descriptor *bd = &sdmac->bd[i];
                int param;
@@ -957,6 +963,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
                }
 
                bd->mode.count = count;
+               sdmac->chn_count += count;
 
                if (sdmac->word_size > DMA_SLAVE_BUSWIDTH_4_BYTES) {
                        ret =  -EINVAL;
@@ -1008,7 +1015,7 @@ err_out:
 
 static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
        struct sdma_engine *sdma = sdmac->sdma;
@@ -1093,15 +1100,18 @@ static int sdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                sdma_disable_channel(sdmac);
                return 0;
        case DMA_SLAVE_CONFIG:
-               if (dmaengine_cfg->direction == DMA_FROM_DEVICE) {
+               if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
                        sdmac->per_address = dmaengine_cfg->src_addr;
-                       sdmac->watermark_level = dmaengine_cfg->src_maxburst;
+                       sdmac->watermark_level = dmaengine_cfg->src_maxburst *
+                                               dmaengine_cfg->src_addr_width;
                        sdmac->word_size = dmaengine_cfg->src_addr_width;
                } else {
                        sdmac->per_address = dmaengine_cfg->dst_addr;
-                       sdmac->watermark_level = dmaengine_cfg->dst_maxburst;
+                       sdmac->watermark_level = dmaengine_cfg->dst_maxburst *
+                                               dmaengine_cfg->dst_addr_width;
                        sdmac->word_size = dmaengine_cfg->dst_addr_width;
                }
+               sdmac->direction = dmaengine_cfg->direction;
                return sdma_config_channel(sdmac);
        default:
                return -ENOSYS;
@@ -1119,7 +1129,8 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
 
        last_used = chan->cookie;
 
-       dma_set_tx_state(txstate, sdmac->last_completed, last_used, 0);
+       dma_set_tx_state(txstate, sdmac->last_completed, last_used,
+                       sdmac->chn_count - sdmac->chn_real_count);
 
        return sdmac->status;
 }
index 19a0c64d45d3643a99e7b8972c8c3f3a16fc6e5f..74f70aadf9e47313cb23e3bee95781c8f24a2e02 100644 (file)
@@ -280,7 +280,8 @@ static void midc_dostart(struct intel_mid_dma_chan *midc,
  * callbacks but must be called with the lock held.
  */
 static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
-              struct intel_mid_dma_desc *desc)
+               struct intel_mid_dma_desc *desc)
+               __releases(&midc->lock) __acquires(&midc->lock)
 {
        struct dma_async_tx_descriptor  *txd = &desc->txd;
        dma_async_tx_callback callback_txd = NULL;
@@ -311,6 +312,7 @@ static void midc_descriptor_complete(struct intel_mid_dma_chan *midc,
                        pci_pool_free(desc->lli_pool, desc->lli,
                                                desc->lli_phys);
                        pci_pool_destroy(desc->lli_pool);
+                       desc->lli = NULL;
                }
                list_move(&desc->desc_node, &midc->free_list);
                midc->busy = false;
@@ -395,10 +397,10 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
                                                        midc->dma->block_size);
                /*Populate SAR and DAR values*/
                sg_phy_addr = sg_phys(sg);
-               if (desc->dirn ==  DMA_TO_DEVICE) {
+               if (desc->dirn ==  DMA_MEM_TO_DEV) {
                        lli_bloc_desc->sar  = sg_phy_addr;
                        lli_bloc_desc->dar  = mids->dma_slave.dst_addr;
-               } else if (desc->dirn ==  DMA_FROM_DEVICE) {
+               } else if (desc->dirn ==  DMA_DEV_TO_MEM) {
                        lli_bloc_desc->sar  = mids->dma_slave.src_addr;
                        lli_bloc_desc->dar  = sg_phy_addr;
                }
@@ -490,7 +492,9 @@ static enum dma_status intel_mid_dma_tx_status(struct dma_chan *chan,
 
        ret = dma_async_is_complete(cookie, last_complete, last_used);
        if (ret != DMA_SUCCESS) {
+               spin_lock_bh(&midc->lock);
                midc_scan_descriptors(to_middma_device(chan->device), midc);
+               spin_unlock_bh(&midc->lock);
 
                last_complete = midc->completed;
                last_used = chan->cookie;
@@ -566,6 +570,7 @@ static int intel_mid_dma_device_control(struct dma_chan *chan,
                        pci_pool_free(desc->lli_pool, desc->lli,
                                                desc->lli_phys);
                        pci_pool_destroy(desc->lli_pool);
+                       desc->lli = NULL;
                }
                list_move(&desc->desc_node, &midc->free_list);
        }
@@ -632,13 +637,13 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
                if (midc->dma->pimr_mask) {
                        cfg_hi.cfgx.protctl = 0x0; /*default value*/
                        cfg_hi.cfgx.fifo_mode = 1;
-                       if (mids->dma_slave.direction == DMA_TO_DEVICE) {
+                       if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
                                cfg_hi.cfgx.src_per = 0;
                                if (mids->device_instance == 0)
                                        cfg_hi.cfgx.dst_per = 3;
                                if (mids->device_instance == 1)
                                        cfg_hi.cfgx.dst_per = 1;
-                       } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
+                       } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
                                if (mids->device_instance == 0)
                                        cfg_hi.cfgx.src_per = 2;
                                if (mids->device_instance == 1)
@@ -682,11 +687,11 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_memcpy(
                ctl_lo.ctlx.sinc = 0;
                ctl_lo.ctlx.dinc = 0;
        } else {
-               if (mids->dma_slave.direction == DMA_TO_DEVICE) {
+               if (mids->dma_slave.direction == DMA_MEM_TO_DEV) {
                        ctl_lo.ctlx.sinc = 0;
                        ctl_lo.ctlx.dinc = 2;
                        ctl_lo.ctlx.tt_fc = 1;
-               } else if (mids->dma_slave.direction == DMA_FROM_DEVICE) {
+               } else if (mids->dma_slave.direction == DMA_DEV_TO_MEM) {
                        ctl_lo.ctlx.sinc = 2;
                        ctl_lo.ctlx.dinc = 0;
                        ctl_lo.ctlx.tt_fc = 2;
@@ -732,7 +737,7 @@ err_desc_get:
  */
 static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
                        struct dma_chan *chan, struct scatterlist *sgl,
-                       unsigned int sg_len, enum dma_data_direction direction,
+                       unsigned int sg_len, enum dma_transfer_direction direction,
                        unsigned long flags)
 {
        struct intel_mid_dma_chan *midc = NULL;
@@ -868,7 +873,7 @@ static int intel_mid_dma_alloc_chan_resources(struct dma_chan *chan)
        pm_runtime_get_sync(&mid->pdev->dev);
 
        if (mid->state == SUSPENDED) {
-               if (dma_resume(mid->pdev)) {
+               if (dma_resume(&mid->pdev->dev)) {
                        pr_err("ERR_MDMA: resume failed");
                        return -EFAULT;
                }
@@ -1099,7 +1104,8 @@ static int mid_setup_dma(struct pci_dev *pdev)
                                        LNW_PERIPHRAL_MASK_SIZE);
                if (dma->mask_reg == NULL) {
                        pr_err("ERR_MDMA:Can't map periphral intr space !!\n");
-                       return -ENOMEM;
+                       err = -ENOMEM;
+                       goto err_ioremap;
                }
        } else
                dma->mask_reg = NULL;
@@ -1196,6 +1202,9 @@ static int mid_setup_dma(struct pci_dev *pdev)
 err_engine:
        free_irq(pdev->irq, dma);
 err_irq:
+       if (dma->mask_reg)
+               iounmap(dma->mask_reg);
+err_ioremap:
        pci_pool_destroy(dma->dma_pool);
 err_dma_pool:
        pr_err("ERR_MDMA:setup_dma failed: %d\n", err);
@@ -1337,8 +1346,9 @@ static void __devexit intel_mid_dma_remove(struct pci_dev *pdev)
 *
 * This function is called by OS when a power event occurs
 */
-int dma_suspend(struct pci_dev *pci, pm_message_t state)
+static int dma_suspend(struct device *dev)
 {
+       struct pci_dev *pci = to_pci_dev(dev);
        int i;
        struct middma_device *device = pci_get_drvdata(pci);
        pr_debug("MDMA: dma_suspend called\n");
@@ -1362,8 +1372,9 @@ int dma_suspend(struct pci_dev *pci, pm_message_t state)
 *
 * This function is called by OS when a power event occurs
 */
-int dma_resume(struct pci_dev *pci)
+int dma_resume(struct device *dev)
 {
+       struct pci_dev *pci = to_pci_dev(dev);
        int ret;
        struct middma_device *device = pci_get_drvdata(pci);
 
@@ -1429,6 +1440,8 @@ static const struct dev_pm_ops intel_mid_dma_pm = {
        .runtime_suspend = dma_runtime_suspend,
        .runtime_resume = dma_runtime_resume,
        .runtime_idle = dma_runtime_idle,
+       .suspend = dma_suspend,
+       .resume = dma_resume,
 };
 
 static struct pci_driver intel_mid_dma_pci_driver = {
@@ -1437,8 +1450,6 @@ static struct pci_driver intel_mid_dma_pci_driver = {
        .probe          =       intel_mid_dma_probe,
        .remove         =       __devexit_p(intel_mid_dma_remove),
 #ifdef CONFIG_PM
-       .suspend = dma_suspend,
-       .resume = dma_resume,
        .driver = {
                .pm = &intel_mid_dma_pm,
        },
index aea5ee88ce035a1d432eb836ef6b4b938f163ab4..c83d35b97bd8e38a91330854c3d8b10acbe4cedf 100644 (file)
@@ -262,7 +262,7 @@ struct intel_mid_dma_desc {
        unsigned int                    lli_length;
        unsigned int                    current_lli;
        dma_addr_t                      next;
-       enum dma_data_direction         dirn;
+       enum dma_transfer_direction             dirn;
        enum dma_status                 status;
        enum dma_slave_buswidth         width; /*width of DMA txn*/
        enum intel_mid_dma_mode         cfg_mode; /*mode configuration*/
@@ -296,6 +296,6 @@ static inline struct intel_mid_dma_slave *to_intel_mid_dma_slave
 }
 
 
-int dma_resume(struct pci_dev *pci);
+int dma_resume(struct device *dev);
 
 #endif /*__INTEL_MID_DMAC_REGS_H__*/
index e03f811a83dd980e5aad9e2b25b0acdc291769f5..04be90b645b839e929512032a234089cfe2d082c 100644 (file)
@@ -1735,8 +1735,6 @@ static void iop_chan_start_null_xor(struct iop_adma_chan *iop_chan)
        spin_unlock_bh(&iop_chan->lock);
 }
 
-MODULE_ALIAS("platform:iop-adma");
-
 static struct platform_driver iop_adma_driver = {
        .probe          = iop_adma_probe,
        .remove         = __devexit_p(iop_adma_remove),
@@ -1746,19 +1744,9 @@ static struct platform_driver iop_adma_driver = {
        },
 };
 
-static int __init iop_adma_init (void)
-{
-       return platform_driver_register(&iop_adma_driver);
-}
-
-static void __exit iop_adma_exit (void)
-{
-       platform_driver_unregister(&iop_adma_driver);
-       return;
-}
-module_exit(iop_adma_exit);
-module_init(iop_adma_init);
+module_platform_driver(iop_adma_driver);
 
 MODULE_AUTHOR("Intel Corporation");
 MODULE_DESCRIPTION("IOP ADMA Engine Driver");
 MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:iop-adma");
index 0e5ef33f90a17ad75bebaa889a7ae7a46d75248c..6212b16e8cf21ea32ae05732ae1f3b1147b134b8 100644 (file)
@@ -312,7 +312,7 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
        case IPU_PIX_FMT_RGB565:
                params->ip.bpp  = 2;
                params->ip.pfs  = 4;
-               params->ip.npb  = 7;
+               params->ip.npb  = 15;
                params->ip.sat  = 2;            /* SAT = 32-bit access */
                params->ip.ofs0 = 0;            /* Red bit offset */
                params->ip.ofs1 = 5;            /* Green bit offset */
@@ -422,12 +422,6 @@ static void ipu_ch_param_set_size(union chan_param_mem *params,
        params->pp.nsb = 1;
 }
 
-static void ipu_ch_param_set_burst_size(union chan_param_mem *params,
-                                       uint16_t burst_pixels)
-{
-       params->pp.npb = burst_pixels - 1;
-}
-
 static void ipu_ch_param_set_buffer(union chan_param_mem *params,
                                    dma_addr_t buf0, dma_addr_t buf1)
 {
@@ -690,23 +684,6 @@ static int ipu_init_channel_buffer(struct idmac_channel *ichan,
        ipu_ch_param_set_size(&params, pixel_fmt, width, height, stride_bytes);
        ipu_ch_param_set_buffer(&params, phyaddr_0, phyaddr_1);
        ipu_ch_param_set_rotation(&params, rot_mode);
-       /* Some channels (rotation) have restriction on burst length */
-       switch (channel) {
-       case IDMAC_IC_7:        /* Hangs with burst 8, 16, other values
-                                  invalid - Table 44-30 */
-/*
-               ipu_ch_param_set_burst_size(&params, 8);
- */
-               break;
-       case IDMAC_SDC_0:
-       case IDMAC_SDC_1:
-               /* In original code only IPU_PIX_FMT_RGB565 was setting burst */
-               ipu_ch_param_set_burst_size(&params, 16);
-               break;
-       case IDMAC_IC_0:
-       default:
-               break;
-       }
 
        spin_lock_irqsave(&ipu->lock, flags);
 
@@ -1364,7 +1341,7 @@ static void ipu_gc_tasklet(unsigned long arg)
 /* Allocate and initialise a transfer descriptor. */
 static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan,
                struct scatterlist *sgl, unsigned int sg_len,
-               enum dma_data_direction direction, unsigned long tx_flags)
+               enum dma_transfer_direction direction, unsigned long tx_flags)
 {
        struct idmac_channel *ichan = to_idmac_chan(chan);
        struct idmac_tx_desc *desc = NULL;
@@ -1376,7 +1353,7 @@ static struct dma_async_tx_descriptor *idmac_prep_slave_sg(struct dma_chan *chan
            chan->chan_id != IDMAC_IC_7)
                return NULL;
 
-       if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE) {
+       if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV) {
                dev_err(chan->device->dev, "Invalid DMA direction %d!\n", direction);
                return NULL;
        }
index 8ba4edc6185e904ea65b98537a1c09a2b38549bb..4d6d4cf669496ff0afc65d5dee450452b143a2c6 100644 (file)
@@ -835,17 +835,7 @@ static struct platform_driver mpc_dma_driver = {
        },
 };
 
-static int __init mpc_dma_init(void)
-{
-       return platform_driver_register(&mpc_dma_driver);
-}
-module_init(mpc_dma_init);
-
-static void __exit mpc_dma_exit(void)
-{
-       platform_driver_unregister(&mpc_dma_driver);
-}
-module_exit(mpc_dma_exit);
+module_platform_driver(mpc_dma_driver);
 
 MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
index fc903c0ed234eae754d06911b94f73f463a3b7eb..b06cd4ca626fb4fd93d5ab3b7fedddbfd6ea6c25 100644 (file)
@@ -44,7 +44,6 @@
 #define HW_APBHX_CTRL0                         0x000
 #define BM_APBH_CTRL0_APB_BURST8_EN            (1 << 29)
 #define BM_APBH_CTRL0_APB_BURST_EN             (1 << 28)
-#define BP_APBH_CTRL0_CLKGATE_CHANNEL          8
 #define BP_APBH_CTRL0_RESET_CHANNEL            16
 #define HW_APBHX_CTRL1                         0x010
 #define HW_APBHX_CTRL2                         0x020
@@ -111,6 +110,7 @@ struct mxs_dma_chan {
        int                             chan_irq;
        struct mxs_dma_ccw              *ccw;
        dma_addr_t                      ccw_phys;
+       int                             desc_count;
        dma_cookie_t                    last_completed;
        enum dma_status                 status;
        unsigned int                    flags;
@@ -130,23 +130,6 @@ struct mxs_dma_engine {
        struct mxs_dma_chan             mxs_chans[MXS_DMA_CHANNELS];
 };
 
-static inline void mxs_dma_clkgate(struct mxs_dma_chan *mxs_chan, int enable)
-{
-       struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
-       int chan_id = mxs_chan->chan.chan_id;
-       int set_clr = enable ? MXS_CLR_ADDR : MXS_SET_ADDR;
-
-       /* enable apbh channel clock */
-       if (dma_is_apbh()) {
-               if (apbh_is_old())
-                       writel(1 << (chan_id + BP_APBH_CTRL0_CLKGATE_CHANNEL),
-                               mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
-               else
-                       writel(1 << chan_id,
-                               mxs_dma->base + HW_APBHX_CTRL0 + set_clr);
-       }
-}
-
 static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
 {
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -165,9 +148,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
        int chan_id = mxs_chan->chan.chan_id;
 
-       /* clkgate needs to be enabled before writing other registers */
-       mxs_dma_clkgate(mxs_chan, 1);
-
        /* set cmd_addr up */
        writel(mxs_chan->ccw_phys,
                mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id));
@@ -178,9 +158,6 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
 
 static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
 {
-       /* disable apbh channel clock */
-       mxs_dma_clkgate(mxs_chan, 0);
-
        mxs_chan->status = DMA_SUCCESS;
 }
 
@@ -268,7 +245,7 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
        /*
         * When both completion and error of termination bits set at the
         * same time, we do not take it as an error.  IOW, it only becomes
-        * an error we need to handler here in case of ether it's (1) an bus
+        * an error we need to handle here in case of either it's (1) a bus
         * error or (2) a termination error with no completion.
         */
        stat2 = ((stat2 >> MXS_DMA_CHANNELS) & stat2) | /* (1) */
@@ -338,10 +315,7 @@ static int mxs_dma_alloc_chan_resources(struct dma_chan *chan)
        if (ret)
                goto err_clk;
 
-       /* clkgate needs to be enabled for reset to finish */
-       mxs_dma_clkgate(mxs_chan, 1);
        mxs_dma_reset_chan(mxs_chan);
-       mxs_dma_clkgate(mxs_chan, 0);
 
        dma_async_tx_descriptor_init(&mxs_chan->desc, chan);
        mxs_chan->desc.tx_submit = mxs_dma_tx_submit;
@@ -377,7 +351,7 @@ static void mxs_dma_free_chan_resources(struct dma_chan *chan)
 
 static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long append)
 {
        struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
@@ -386,7 +360,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
        struct scatterlist *sg;
        int i, j;
        u32 *pio;
-       static int idx;
+       int idx = append ? mxs_chan->desc_count : 0;
 
        if (mxs_chan->status == DMA_IN_PROGRESS && !append)
                return NULL;
@@ -417,7 +391,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                idx = 0;
        }
 
-       if (direction == DMA_NONE) {
+       if (direction == DMA_TRANS_NONE) {
                ccw = &mxs_chan->ccw[idx++];
                pio = (u32 *) sgl;
 
@@ -450,7 +424,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                        ccw->bits |= CCW_CHAIN;
                        ccw->bits |= CCW_HALT_ON_TERM;
                        ccw->bits |= CCW_TERM_FLUSH;
-                       ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+                       ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
                                        MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ,
                                        COMMAND);
 
@@ -462,6 +436,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
                        }
                }
        }
+       mxs_chan->desc_count = idx;
 
        return &mxs_chan->desc;
 
@@ -472,7 +447,7 @@ err_out:
 
 static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
        struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
@@ -515,7 +490,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
                ccw->bits |= CCW_IRQ;
                ccw->bits |= CCW_HALT_ON_TERM;
                ccw->bits |= CCW_TERM_FLUSH;
-               ccw->bits |= BF_CCW(direction == DMA_FROM_DEVICE ?
+               ccw->bits |= BF_CCW(direction == DMA_DEV_TO_MEM ?
                                MXS_DMA_CMD_WRITE : MXS_DMA_CMD_READ, COMMAND);
 
                dma_addr += period_len;
@@ -523,6 +498,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_dma_cyclic(
 
                i++;
        }
+       mxs_chan->desc_count = i;
 
        return &mxs_chan->desc;
 
@@ -539,8 +515,8 @@ static int mxs_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
 
        switch (cmd) {
        case DMA_TERMINATE_ALL:
-               mxs_dma_disable_chan(mxs_chan);
                mxs_dma_reset_chan(mxs_chan);
+               mxs_dma_disable_chan(mxs_chan);
                break;
        case DMA_PAUSE:
                mxs_dma_pause_chan(mxs_chan);
@@ -580,7 +556,7 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
 
        ret = clk_prepare_enable(mxs_dma->clk);
        if (ret)
-               goto err_out;
+               return ret;
 
        ret = mxs_reset_block(mxs_dma->base);
        if (ret)
@@ -604,11 +580,8 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
        writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
                mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR);
 
-       clk_disable_unprepare(mxs_dma->clk);
-
-       return 0;
-
 err_out:
+       clk_disable_unprepare(mxs_dma->clk);
        return ret;
 }
 
index a6d0e3dbed0748f1d666f5972b4109dfb043a3e2..823f58179f9d46a8a169dbbaf7551b1d91ec5f28 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * Topcliff PCH DMA controller driver
  * Copyright (c) 2010 Intel Corporation
- * Copyright (C) 2011 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -99,7 +99,7 @@ struct pch_dma_desc {
 struct pch_dma_chan {
        struct dma_chan         chan;
        void __iomem *membase;
-       enum dma_data_direction dir;
+       enum dma_transfer_direction dir;
        struct tasklet_struct   tasklet;
        unsigned long           err_status;
 
@@ -224,7 +224,7 @@ static void pdc_set_dir(struct dma_chan *chan)
                mask_ctl = DMA_MASK_CTL0_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
                                       (DMA_CTL0_BITS_PER_CH * chan->chan_id));
                val &= mask_mode;
-               if (pd_chan->dir == DMA_TO_DEVICE)
+               if (pd_chan->dir == DMA_MEM_TO_DEV)
                        val |= 0x1 << (DMA_CTL0_BITS_PER_CH * chan->chan_id +
                                       DMA_CTL0_DIR_SHIFT_BITS);
                else
@@ -242,7 +242,7 @@ static void pdc_set_dir(struct dma_chan *chan)
                mask_ctl = DMA_MASK_CTL2_MODE & ~(DMA_CTL0_MODE_MASK_BITS <<
                                                 (DMA_CTL0_BITS_PER_CH * ch));
                val &= mask_mode;
-               if (pd_chan->dir == DMA_TO_DEVICE)
+               if (pd_chan->dir == DMA_MEM_TO_DEV)
                        val |= 0x1 << (DMA_CTL0_BITS_PER_CH * ch +
                                       DMA_CTL0_DIR_SHIFT_BITS);
                else
@@ -607,7 +607,7 @@ static void pd_issue_pending(struct dma_chan *chan)
 
 static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
                        struct scatterlist *sgl, unsigned int sg_len,
-                       enum dma_data_direction direction, unsigned long flags)
+                       enum dma_transfer_direction direction, unsigned long flags)
 {
        struct pch_dma_chan *pd_chan = to_pd_chan(chan);
        struct pch_dma_slave *pd_slave = chan->private;
@@ -623,9 +623,9 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
                return NULL;
        }
 
-       if (direction == DMA_FROM_DEVICE)
+       if (direction == DMA_DEV_TO_MEM)
                reg = pd_slave->rx_reg;
-       else if (direction == DMA_TO_DEVICE)
+       else if (direction == DMA_MEM_TO_DEV)
                reg = pd_slave->tx_reg;
        else
                return NULL;
@@ -1018,6 +1018,8 @@ static void __devexit pch_dma_remove(struct pci_dev *pdev)
 #define PCI_DEVICE_ID_ML7223_DMA2_4CH  0x800E
 #define PCI_DEVICE_ID_ML7223_DMA3_4CH  0x8017
 #define PCI_DEVICE_ID_ML7223_DMA4_4CH  0x803B
+#define PCI_DEVICE_ID_ML7831_DMA1_8CH  0x8810
+#define PCI_DEVICE_ID_ML7831_DMA2_4CH  0x8815
 
 DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
@@ -1030,6 +1032,8 @@ DEFINE_PCI_DEVICE_TABLE(pch_dma_id_table) = {
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA2_4CH), 4}, /* Video SPI */
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA3_4CH), 4}, /* Security */
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_DMA4_4CH), 4}, /* FPGA */
+       { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA1_8CH), 8}, /* UART */
+       { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_DMA2_4CH), 4}, /* SPI */
        { 0, },
 };
 
@@ -1057,7 +1061,7 @@ static void __exit pch_dma_exit(void)
 module_init(pch_dma_init);
 module_exit(pch_dma_exit);
 
-MODULE_DESCRIPTION("Intel EG20T PCH / OKI SEMICONDUCTOR ML7213 IOH "
+MODULE_DESCRIPTION("Intel EG20T PCH / LAPIS Semicon ML7213/ML7223/ML7831 IOH "
                   "DMA controller driver");
 MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
 MODULE_LICENSE("GPL v2");
index 09adcfcd953e6841e840fb9caa663212c20c03f7..b8ec03ee8e22e495e633ff95bf9faf0d02756dac 100644 (file)
@@ -350,14 +350,14 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
        case DMA_SLAVE_CONFIG:
                slave_config = (struct dma_slave_config *)arg;
 
-               if (slave_config->direction == DMA_TO_DEVICE) {
+               if (slave_config->direction == DMA_MEM_TO_DEV) {
                        if (slave_config->dst_addr)
                                pch->fifo_addr = slave_config->dst_addr;
                        if (slave_config->dst_addr_width)
                                pch->burst_sz = __ffs(slave_config->dst_addr_width);
                        if (slave_config->dst_maxburst)
                                pch->burst_len = slave_config->dst_maxburst;
-               } else if (slave_config->direction == DMA_FROM_DEVICE) {
+               } else if (slave_config->direction == DMA_DEV_TO_MEM) {
                        if (slave_config->src_addr)
                                pch->fifo_addr = slave_config->src_addr;
                        if (slave_config->src_addr_width)
@@ -621,7 +621,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
 
 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
                struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
-               size_t period_len, enum dma_data_direction direction)
+               size_t period_len, enum dma_transfer_direction direction)
 {
        struct dma_pl330_desc *desc;
        struct dma_pl330_chan *pch = to_pchan(chan);
@@ -636,14 +636,14 @@ static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
        }
 
        switch (direction) {
-       case DMA_TO_DEVICE:
+       case DMA_MEM_TO_DEV:
                desc->rqcfg.src_inc = 1;
                desc->rqcfg.dst_inc = 0;
                desc->req.rqtype = MEMTODEV;
                src = dma_addr;
                dst = pch->fifo_addr;
                break;
-       case DMA_FROM_DEVICE:
+       case DMA_DEV_TO_MEM:
                desc->rqcfg.src_inc = 0;
                desc->rqcfg.dst_inc = 1;
                desc->req.rqtype = DEVTOMEM;
@@ -710,7 +710,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
 
 static struct dma_async_tx_descriptor *
 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flg)
 {
        struct dma_pl330_desc *first, *desc = NULL;
@@ -759,7 +759,7 @@ pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                else
                        list_add_tail(&desc->node, &first->node);
 
-               if (direction == DMA_TO_DEVICE) {
+               if (direction == DMA_MEM_TO_DEV) {
                        desc->rqcfg.src_inc = 1;
                        desc->rqcfg.dst_inc = 0;
                        desc->req.rqtype = MEMTODEV;
@@ -834,17 +834,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
 
        amba_set_drvdata(adev, pdmac);
 
-#ifdef CONFIG_PM_RUNTIME
-       /* to use the runtime PM helper functions */
-       pm_runtime_enable(&adev->dev);
-
-       /* enable the power domain */
-       if (pm_runtime_get_sync(&adev->dev)) {
-               dev_err(&adev->dev, "failed to get runtime pm\n");
-               ret = -ENODEV;
-               goto probe_err1;
-       }
-#else
+#ifndef CONFIG_PM_RUNTIME
        /* enable dma clk */
        clk_enable(pdmac->clk);
 #endif
@@ -977,10 +967,7 @@ static int __devexit pl330_remove(struct amba_device *adev)
        res = &adev->res;
        release_mem_region(res->start, resource_size(res));
 
-#ifdef CONFIG_PM_RUNTIME
-       pm_runtime_put(&adev->dev);
-       pm_runtime_disable(&adev->dev);
-#else
+#ifndef CONFIG_PM_RUNTIME
        clk_disable(pdmac->clk);
 #endif
 
index 81809c2b46abef271cb2ac90408914990d0376a7..812fd76e9c18e4b2b210f20125324560a873ddeb 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/interrupt.h>
 #include <linux/dmaengine.h>
 #include <linux/delay.h>
-#include <linux/dma-mapping.h>
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
 #include <linux/sh_dma.h>
@@ -57,6 +56,15 @@ static LIST_HEAD(sh_dmae_devices);
 static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
 
 static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all);
+static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
+
+static void chclr_write(struct sh_dmae_chan *sh_dc, u32 data)
+{
+       struct sh_dmae_device *shdev = to_sh_dev(sh_dc);
+
+       __raw_writel(data, shdev->chan_reg +
+                    shdev->pdata->channel[sh_dc->id].chclr_offset);
+}
 
 static void sh_dmae_writel(struct sh_dmae_chan *sh_dc, u32 data, u32 reg)
 {
@@ -129,6 +137,15 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
 
        dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
 
+       if (shdev->pdata->chclr_present) {
+               int i;
+               for (i = 0; i < shdev->pdata->channel_num; i++) {
+                       struct sh_dmae_chan *sh_chan = shdev->chan[i];
+                       if (sh_chan)
+                               chclr_write(sh_chan, 0);
+               }
+       }
+
        dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
 
        dmaor = dmaor_read(shdev);
@@ -139,6 +156,10 @@ static int sh_dmae_rst(struct sh_dmae_device *shdev)
                dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
                return -EIO;
        }
+       if (shdev->pdata->dmaor_init & ~dmaor)
+               dev_warn(shdev->common.dev,
+                        "DMAOR=0x%x hasn't latched the initial value 0x%x.\n",
+                        dmaor, shdev->pdata->dmaor_init);
        return 0;
 }
 
@@ -259,8 +280,6 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
        return 0;
 }
 
-static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan);
-
 static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
 {
        struct sh_desc *desc = tx_to_sh_desc(tx), *chunk, *last = desc, *c;
@@ -340,6 +359,8 @@ static dma_cookie_t sh_dmae_tx_submit(struct dma_async_tx_descriptor *tx)
                                sh_chan_xfer_ld_queue(sh_chan);
                        sh_chan->pm_state = DMAE_PM_ESTABLISHED;
                }
+       } else {
+               sh_chan->pm_state = DMAE_PM_PENDING;
        }
 
        spin_unlock_irq(&sh_chan->desc_lock);
@@ -479,19 +500,19 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
  * @sh_chan:   DMA channel
  * @flags:     DMA transfer flags
  * @dest:      destination DMA address, incremented when direction equals
- *             DMA_FROM_DEVICE or DMA_BIDIRECTIONAL
+ *             DMA_DEV_TO_MEM
  * @src:       source DMA address, incremented when direction equals
- *             DMA_TO_DEVICE or DMA_BIDIRECTIONAL
+ *             DMA_MEM_TO_DEV
  * @len:       DMA transfer length
  * @first:     if NULL, set to the current descriptor and cookie set to -EBUSY
  * @direction: needed for slave DMA to decide which address to keep constant,
- *             equals DMA_BIDIRECTIONAL for MEMCPY
+ *             equals DMA_MEM_TO_MEM for MEMCPY
  * Returns 0 or an error
  * Locks: called with desc_lock held
  */
 static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
        unsigned long flags, dma_addr_t *dest, dma_addr_t *src, size_t *len,
-       struct sh_desc **first, enum dma_data_direction direction)
+       struct sh_desc **first, enum dma_transfer_direction direction)
 {
        struct sh_desc *new;
        size_t copy_size;
@@ -531,9 +552,9 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
        new->direction = direction;
 
        *len -= copy_size;
-       if (direction == DMA_BIDIRECTIONAL || direction == DMA_TO_DEVICE)
+       if (direction == DMA_MEM_TO_MEM || direction == DMA_MEM_TO_DEV)
                *src += copy_size;
-       if (direction == DMA_BIDIRECTIONAL || direction == DMA_FROM_DEVICE)
+       if (direction == DMA_MEM_TO_MEM || direction == DMA_DEV_TO_MEM)
                *dest += copy_size;
 
        return new;
@@ -546,12 +567,12 @@ static struct sh_desc *sh_dmae_add_desc(struct sh_dmae_chan *sh_chan,
  * converted to scatter-gather to guarantee consistent locking and a correct
  * list manipulation. For slave DMA direction carries the usual meaning, and,
  * logically, the SG list is RAM and the addr variable contains slave address,
- * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_BIDIRECTIONAL
+ * e.g., the FIFO I/O register. For MEMCPY direction equals DMA_MEM_TO_MEM
  * and the SG list contains only one element and points at the source buffer.
  */
 static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_chan,
        struct scatterlist *sgl, unsigned int sg_len, dma_addr_t *addr,
-       enum dma_data_direction direction, unsigned long flags)
+       enum dma_transfer_direction direction, unsigned long flags)
 {
        struct scatterlist *sg;
        struct sh_desc *first = NULL, *new = NULL /* compiler... */;
@@ -592,7 +613,7 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_sg(struct sh_dmae_chan *sh_c
                        dev_dbg(sh_chan->dev, "Add SG #%d@%p[%d], dma %llx\n",
                                i, sg, len, (unsigned long long)sg_addr);
 
-                       if (direction == DMA_FROM_DEVICE)
+                       if (direction == DMA_DEV_TO_MEM)
                                new = sh_dmae_add_desc(sh_chan, flags,
                                                &sg_addr, addr, &len, &first,
                                                direction);
@@ -646,13 +667,13 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
        sg_dma_address(&sg) = dma_src;
        sg_dma_len(&sg) = len;
 
-       return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_BIDIRECTIONAL,
+       return sh_dmae_prep_sg(sh_chan, &sg, 1, &dma_dest, DMA_MEM_TO_MEM,
                               flags);
 }
 
 static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
        struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
-       enum dma_data_direction direction, unsigned long flags)
+       enum dma_transfer_direction direction, unsigned long flags)
 {
        struct sh_dmae_slave *param;
        struct sh_dmae_chan *sh_chan;
@@ -996,7 +1017,7 @@ static void dmae_do_tasklet(unsigned long data)
        spin_lock_irq(&sh_chan->desc_lock);
        list_for_each_entry(desc, &sh_chan->ld_queue, node) {
                if (desc->mark == DESC_SUBMITTED &&
-                   ((desc->direction == DMA_FROM_DEVICE &&
+                   ((desc->direction == DMA_DEV_TO_MEM &&
                      (desc->hw.dar + desc->hw.tcr) == dar_buf) ||
                     (desc->hw.sar + desc->hw.tcr) == sar_buf)) {
                        dev_dbg(sh_chan->dev, "done #%d@%p dst %u\n",
@@ -1225,6 +1246,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, shdev);
 
+       shdev->common.dev = &pdev->dev;
+
        pm_runtime_enable(&pdev->dev);
        pm_runtime_get_sync(&pdev->dev);
 
@@ -1239,7 +1262,8 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
 
        INIT_LIST_HEAD(&shdev->common.channels);
 
-       dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
+       if (!pdata->slave_only)
+               dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
        if (pdata->slave && pdata->slave_num)
                dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
 
@@ -1254,7 +1278,6 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
        shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
        shdev->common.device_control = sh_dmae_control;
 
-       shdev->common.dev = &pdev->dev;
        /* Default transfer size of 32 bytes requires 32-byte alignment */
        shdev->common.copy_align = LOG2_DEFAULT_XFER_SIZE;
 
@@ -1435,22 +1458,17 @@ static int sh_dmae_runtime_resume(struct device *dev)
 #ifdef CONFIG_PM
 static int sh_dmae_suspend(struct device *dev)
 {
-       struct sh_dmae_device *shdev = dev_get_drvdata(dev);
-       int i;
-
-       for (i = 0; i < shdev->pdata->channel_num; i++) {
-               struct sh_dmae_chan *sh_chan = shdev->chan[i];
-               if (sh_chan->descs_allocated)
-                       sh_chan->pm_error = pm_runtime_put_sync(dev);
-       }
-
        return 0;
 }
 
 static int sh_dmae_resume(struct device *dev)
 {
        struct sh_dmae_device *shdev = dev_get_drvdata(dev);
-       int i;
+       int i, ret;
+
+       ret = sh_dmae_rst(shdev);
+       if (ret < 0)
+               dev_err(dev, "Failed to reset!\n");
 
        for (i = 0; i < shdev->pdata->channel_num; i++) {
                struct sh_dmae_chan *sh_chan = shdev->chan[i];
@@ -1459,9 +1477,6 @@ static int sh_dmae_resume(struct device *dev)
                if (!sh_chan->descs_allocated)
                        continue;
 
-               if (!sh_chan->pm_error)
-                       pm_runtime_get_sync(dev);
-
                if (param) {
                        const struct sh_dmae_slave_config *cfg = param->config;
                        dmae_set_dmars(sh_chan, cfg->mid_rid);
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
new file mode 100644 (file)
index 0000000..2333810
--- /dev/null
@@ -0,0 +1,707 @@
+/*
+ * DMA controller driver for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/sirfsoc_dma.h>
+
+#define SIRFSOC_DMA_DESCRIPTORS                 16
+#define SIRFSOC_DMA_CHANNELS                    16
+
+#define SIRFSOC_DMA_CH_ADDR                     0x00
+#define SIRFSOC_DMA_CH_XLEN                     0x04
+#define SIRFSOC_DMA_CH_YLEN                     0x08
+#define SIRFSOC_DMA_CH_CTRL                     0x0C
+
+#define SIRFSOC_DMA_WIDTH_0                     0x100
+#define SIRFSOC_DMA_CH_VALID                    0x140
+#define SIRFSOC_DMA_CH_INT                      0x144
+#define SIRFSOC_DMA_INT_EN                      0x148
+#define SIRFSOC_DMA_CH_LOOP_CTRL                0x150
+
+#define SIRFSOC_DMA_MODE_CTRL_BIT               4
+#define SIRFSOC_DMA_DIR_CTRL_BIT                5
+
+/* xlen and dma_width register is in 4 bytes boundary */
+#define SIRFSOC_DMA_WORD_LEN                   4
+
+struct sirfsoc_dma_desc {
+       struct dma_async_tx_descriptor  desc;
+       struct list_head                node;
+
+       /* SiRFprimaII 2D-DMA parameters */
+
+       int             xlen;           /* DMA xlen */
+       int             ylen;           /* DMA ylen */
+       int             width;          /* DMA width */
+       int             dir;
+       bool            cyclic;         /* is loop DMA? */
+       u32             addr;           /* DMA buffer address */
+};
+
+struct sirfsoc_dma_chan {
+       struct dma_chan                 chan;
+       struct list_head                free;
+       struct list_head                prepared;
+       struct list_head                queued;
+       struct list_head                active;
+       struct list_head                completed;
+       dma_cookie_t                    completed_cookie;
+       unsigned long                   happened_cyclic;
+       unsigned long                   completed_cyclic;
+
+       /* Lock for this structure */
+       spinlock_t                      lock;
+
+       int                             mode;
+};
+
+struct sirfsoc_dma {
+       struct dma_device               dma;
+       struct tasklet_struct           tasklet;
+       struct sirfsoc_dma_chan         channels[SIRFSOC_DMA_CHANNELS];
+       void __iomem                    *base;
+       int                             irq;
+};
+
+#define DRV_NAME       "sirfsoc_dma"
+
+/* Convert struct dma_chan to struct sirfsoc_dma_chan */
+static inline
+struct sirfsoc_dma_chan *dma_chan_to_sirfsoc_dma_chan(struct dma_chan *c)
+{
+       return container_of(c, struct sirfsoc_dma_chan, chan);
+}
+
+/* Convert struct dma_chan to struct sirfsoc_dma */
+static inline struct sirfsoc_dma *dma_chan_to_sirfsoc_dma(struct dma_chan *c)
+{
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(c);
+       return container_of(schan, struct sirfsoc_dma, channels[c->chan_id]);
+}
+
+/* Execute all queued DMA descriptors */
+static void sirfsoc_dma_execute(struct sirfsoc_dma_chan *schan)
+{
+       struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+       int cid = schan->chan.chan_id;
+       struct sirfsoc_dma_desc *sdesc = NULL;
+
+       /*
+        * lock has been held by functions calling this, so we don't hold
+        * lock again
+        */
+
+       sdesc = list_first_entry(&schan->queued, struct sirfsoc_dma_desc,
+               node);
+       /* Move the first queued descriptor to active list */
+       list_move_tail(&schan->queued, &schan->active);
+
+       /* Start the DMA transfer */
+       writel_relaxed(sdesc->width, sdma->base + SIRFSOC_DMA_WIDTH_0 +
+               cid * 4);
+       writel_relaxed(cid | (schan->mode << SIRFSOC_DMA_MODE_CTRL_BIT) |
+               (sdesc->dir << SIRFSOC_DMA_DIR_CTRL_BIT),
+               sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_CTRL);
+       writel_relaxed(sdesc->xlen, sdma->base + cid * 0x10 +
+               SIRFSOC_DMA_CH_XLEN);
+       writel_relaxed(sdesc->ylen, sdma->base + cid * 0x10 +
+               SIRFSOC_DMA_CH_YLEN);
+       writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) |
+               (1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+
+       /*
+        * writel has an implict memory write barrier to make sure data is
+        * flushed into memory before starting DMA
+        */
+       writel(sdesc->addr >> 2, sdma->base + cid * 0x10 + SIRFSOC_DMA_CH_ADDR);
+
+       if (sdesc->cyclic) {
+               writel((1 << cid) | 1 << (cid + 16) |
+                       readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL),
+                       sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+               schan->happened_cyclic = schan->completed_cyclic = 0;
+       }
+}
+
+/* Interrupt handler */
+static irqreturn_t sirfsoc_dma_irq(int irq, void *data)
+{
+       struct sirfsoc_dma *sdma = data;
+       struct sirfsoc_dma_chan *schan;
+       struct sirfsoc_dma_desc *sdesc = NULL;
+       u32 is;
+       int ch;
+
+       is = readl(sdma->base + SIRFSOC_DMA_CH_INT);
+       while ((ch = fls(is) - 1) >= 0) {
+               is &= ~(1 << ch);
+               writel_relaxed(1 << ch, sdma->base + SIRFSOC_DMA_CH_INT);
+               schan = &sdma->channels[ch];
+
+               spin_lock(&schan->lock);
+
+               sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+                       node);
+               if (!sdesc->cyclic) {
+                       /* Execute queued descriptors */
+                       list_splice_tail_init(&schan->active, &schan->completed);
+                       if (!list_empty(&schan->queued))
+                               sirfsoc_dma_execute(schan);
+               } else
+                       schan->happened_cyclic++;
+
+               spin_unlock(&schan->lock);
+       }
+
+       /* Schedule tasklet */
+       tasklet_schedule(&sdma->tasklet);
+
+       return IRQ_HANDLED;
+}
+
+/* process completed descriptors */
+static void sirfsoc_dma_process_completed(struct sirfsoc_dma *sdma)
+{
+       dma_cookie_t last_cookie = 0;
+       struct sirfsoc_dma_chan *schan;
+       struct sirfsoc_dma_desc *sdesc;
+       struct dma_async_tx_descriptor *desc;
+       unsigned long flags;
+       unsigned long happened_cyclic;
+       LIST_HEAD(list);
+       int i;
+
+       for (i = 0; i < sdma->dma.chancnt; i++) {
+               schan = &sdma->channels[i];
+
+               /* Get all completed descriptors */
+               spin_lock_irqsave(&schan->lock, flags);
+               if (!list_empty(&schan->completed)) {
+                       list_splice_tail_init(&schan->completed, &list);
+                       spin_unlock_irqrestore(&schan->lock, flags);
+
+                       /* Execute callbacks and run dependencies */
+                       list_for_each_entry(sdesc, &list, node) {
+                               desc = &sdesc->desc;
+
+                               if (desc->callback)
+                                       desc->callback(desc->callback_param);
+
+                               last_cookie = desc->cookie;
+                               dma_run_dependencies(desc);
+                       }
+
+                       /* Free descriptors */
+                       spin_lock_irqsave(&schan->lock, flags);
+                       list_splice_tail_init(&list, &schan->free);
+                       schan->completed_cookie = last_cookie;
+                       spin_unlock_irqrestore(&schan->lock, flags);
+               } else {
+                       /* for cyclic channel, desc is always in active list */
+                       sdesc = list_first_entry(&schan->active, struct sirfsoc_dma_desc,
+                               node);
+
+                       if (!sdesc || (sdesc && !sdesc->cyclic)) {
+                               /* without active cyclic DMA */
+                               spin_unlock_irqrestore(&schan->lock, flags);
+                               continue;
+                       }
+
+                       /* cyclic DMA */
+                       happened_cyclic = schan->happened_cyclic;
+                       spin_unlock_irqrestore(&schan->lock, flags);
+
+                       desc = &sdesc->desc;
+                       while (happened_cyclic != schan->completed_cyclic) {
+                               if (desc->callback)
+                                       desc->callback(desc->callback_param);
+                               schan->completed_cyclic++;
+                       }
+               }
+       }
+}
+
+/* DMA Tasklet */
+static void sirfsoc_dma_tasklet(unsigned long data)
+{
+       struct sirfsoc_dma *sdma = (void *)data;
+
+       sirfsoc_dma_process_completed(sdma);
+}
+
+/* Submit descriptor to hardware */
+static dma_cookie_t sirfsoc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
+{
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(txd->chan);
+       struct sirfsoc_dma_desc *sdesc;
+       unsigned long flags;
+       dma_cookie_t cookie;
+
+       sdesc = container_of(txd, struct sirfsoc_dma_desc, desc);
+
+       spin_lock_irqsave(&schan->lock, flags);
+
+       /* Move descriptor to queue */
+       list_move_tail(&sdesc->node, &schan->queued);
+
+       /* Update cookie */
+       cookie = schan->chan.cookie + 1;
+       if (cookie <= 0)
+               cookie = 1;
+
+       schan->chan.cookie = cookie;
+       sdesc->desc.cookie = cookie;
+
+       spin_unlock_irqrestore(&schan->lock, flags);
+
+       return cookie;
+}
+
+static int sirfsoc_dma_slave_config(struct sirfsoc_dma_chan *schan,
+       struct dma_slave_config *config)
+{
+       unsigned long flags;
+
+       if ((config->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES) ||
+               (config->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES))
+               return -EINVAL;
+
+       spin_lock_irqsave(&schan->lock, flags);
+       schan->mode = (config->src_maxburst == 4 ? 1 : 0);
+       spin_unlock_irqrestore(&schan->lock, flags);
+
+       return 0;
+}
+
+static int sirfsoc_dma_terminate_all(struct sirfsoc_dma_chan *schan)
+{
+       struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(&schan->chan);
+       int cid = schan->chan.chan_id;
+       unsigned long flags;
+
+       writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_INT_EN) &
+               ~(1 << cid), sdma->base + SIRFSOC_DMA_INT_EN);
+       writel_relaxed(1 << cid, sdma->base + SIRFSOC_DMA_CH_VALID);
+
+       writel_relaxed(readl_relaxed(sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL)
+               & ~((1 << cid) | 1 << (cid + 16)),
+                       sdma->base + SIRFSOC_DMA_CH_LOOP_CTRL);
+
+       spin_lock_irqsave(&schan->lock, flags);
+       list_splice_tail_init(&schan->active, &schan->free);
+       list_splice_tail_init(&schan->queued, &schan->free);
+       spin_unlock_irqrestore(&schan->lock, flags);
+
+       return 0;
+}
+
+static int sirfsoc_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
+       unsigned long arg)
+{
+       struct dma_slave_config *config;
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+
+       switch (cmd) {
+       case DMA_TERMINATE_ALL:
+               return sirfsoc_dma_terminate_all(schan);
+       case DMA_SLAVE_CONFIG:
+               config = (struct dma_slave_config *)arg;
+               return sirfsoc_dma_slave_config(schan, config);
+
+       default:
+               break;
+       }
+
+       return -ENOSYS;
+}
+
+/* Alloc channel resources */
+static int sirfsoc_dma_alloc_chan_resources(struct dma_chan *chan)
+{
+       struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       struct sirfsoc_dma_desc *sdesc;
+       unsigned long flags;
+       LIST_HEAD(descs);
+       int i;
+
+       /* Alloc descriptors for this channel */
+       for (i = 0; i < SIRFSOC_DMA_DESCRIPTORS; i++) {
+               sdesc = kzalloc(sizeof(*sdesc), GFP_KERNEL);
+               if (!sdesc) {
+                       dev_notice(sdma->dma.dev, "Memory allocation error. "
+                               "Allocated only %u descriptors\n", i);
+                       break;
+               }
+
+               dma_async_tx_descriptor_init(&sdesc->desc, chan);
+               sdesc->desc.flags = DMA_CTRL_ACK;
+               sdesc->desc.tx_submit = sirfsoc_dma_tx_submit;
+
+               list_add_tail(&sdesc->node, &descs);
+       }
+
+       /* Return error only if no descriptors were allocated */
+       if (i == 0)
+               return -ENOMEM;
+
+       spin_lock_irqsave(&schan->lock, flags);
+
+       list_splice_tail_init(&descs, &schan->free);
+       spin_unlock_irqrestore(&schan->lock, flags);
+
+       return i;
+}
+
+/* Free channel resources */
+static void sirfsoc_dma_free_chan_resources(struct dma_chan *chan)
+{
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       struct sirfsoc_dma_desc *sdesc, *tmp;
+       unsigned long flags;
+       LIST_HEAD(descs);
+
+       spin_lock_irqsave(&schan->lock, flags);
+
+       /* Channel must be idle */
+       BUG_ON(!list_empty(&schan->prepared));
+       BUG_ON(!list_empty(&schan->queued));
+       BUG_ON(!list_empty(&schan->active));
+       BUG_ON(!list_empty(&schan->completed));
+
+       /* Move data */
+       list_splice_tail_init(&schan->free, &descs);
+
+       spin_unlock_irqrestore(&schan->lock, flags);
+
+       /* Free descriptors */
+       list_for_each_entry_safe(sdesc, tmp, &descs, node)
+               kfree(sdesc);
+}
+
+/* Send pending descriptor to hardware */
+static void sirfsoc_dma_issue_pending(struct dma_chan *chan)
+{
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       unsigned long flags;
+
+       spin_lock_irqsave(&schan->lock, flags);
+
+       if (list_empty(&schan->active) && !list_empty(&schan->queued))
+               sirfsoc_dma_execute(schan);
+
+       spin_unlock_irqrestore(&schan->lock, flags);
+}
+
+/* Check request completion status */
+static enum dma_status
+sirfsoc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
+       struct dma_tx_state *txstate)
+{
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       unsigned long flags;
+       dma_cookie_t last_used;
+       dma_cookie_t last_complete;
+
+       spin_lock_irqsave(&schan->lock, flags);
+       last_used = schan->chan.cookie;
+       last_complete = schan->completed_cookie;
+       spin_unlock_irqrestore(&schan->lock, flags);
+
+       dma_set_tx_state(txstate, last_complete, last_used, 0);
+       return dma_async_is_complete(cookie, last_complete, last_used);
+}
+
+static struct dma_async_tx_descriptor *sirfsoc_dma_prep_interleaved(
+       struct dma_chan *chan, struct dma_interleaved_template *xt,
+       unsigned long flags)
+{
+       struct sirfsoc_dma *sdma = dma_chan_to_sirfsoc_dma(chan);
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       struct sirfsoc_dma_desc *sdesc = NULL;
+       unsigned long iflags;
+       int ret;
+
+       if ((xt->dir != DMA_MEM_TO_DEV) || (xt->dir != DMA_DEV_TO_MEM)) {
+               ret = -EINVAL;
+               goto err_dir;
+       }
+
+       /* Get free descriptor */
+       spin_lock_irqsave(&schan->lock, iflags);
+       if (!list_empty(&schan->free)) {
+               sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+                       node);
+               list_del(&sdesc->node);
+       }
+       spin_unlock_irqrestore(&schan->lock, iflags);
+
+       if (!sdesc) {
+               /* try to free completed descriptors */
+               sirfsoc_dma_process_completed(sdma);
+               ret = 0;
+               goto no_desc;
+       }
+
+       /* Place descriptor in prepared list */
+       spin_lock_irqsave(&schan->lock, iflags);
+
+       /*
+        * Number of chunks in a frame can only be 1 for prima2
+        * and ylen (number of frame - 1) must be at least 0
+        */
+       if ((xt->frame_size == 1) && (xt->numf > 0)) {
+               sdesc->cyclic = 0;
+               sdesc->xlen = xt->sgl[0].size / SIRFSOC_DMA_WORD_LEN;
+               sdesc->width = (xt->sgl[0].size + xt->sgl[0].icg) /
+                               SIRFSOC_DMA_WORD_LEN;
+               sdesc->ylen = xt->numf - 1;
+               if (xt->dir == DMA_MEM_TO_DEV) {
+                       sdesc->addr = xt->src_start;
+                       sdesc->dir = 1;
+               } else {
+                       sdesc->addr = xt->dst_start;
+                       sdesc->dir = 0;
+               }
+
+               list_add_tail(&sdesc->node, &schan->prepared);
+       } else {
+               pr_err("sirfsoc DMA Invalid xfer\n");
+               ret = -EINVAL;
+               goto err_xfer;
+       }
+       spin_unlock_irqrestore(&schan->lock, iflags);
+
+       return &sdesc->desc;
+err_xfer:
+       spin_unlock_irqrestore(&schan->lock, iflags);
+no_desc:
+err_dir:
+       return ERR_PTR(ret);
+}
+
+static struct dma_async_tx_descriptor *
+sirfsoc_dma_prep_cyclic(struct dma_chan *chan, dma_addr_t addr,
+       size_t buf_len, size_t period_len,
+       enum dma_transfer_direction direction)
+{
+       struct sirfsoc_dma_chan *schan = dma_chan_to_sirfsoc_dma_chan(chan);
+       struct sirfsoc_dma_desc *sdesc = NULL;
+       unsigned long iflags;
+
+       /*
+        * we only support cycle transfer with 2 period
+        * If the X-length is set to 0, it would be the loop mode.
+        * The DMA address keeps increasing until reaching the end of a loop
+        * area whose size is defined by (DMA_WIDTH x (Y_LENGTH + 1)). Then
+        * the DMA address goes back to the beginning of this area.
+        * In loop mode, the DMA data region is divided into two parts, BUFA
+        * and BUFB. DMA controller generates interrupts twice in each loop:
+        * when the DMA address reaches the end of BUFA or the end of the
+        * BUFB
+        */
+       if (buf_len !=  2 * period_len)
+               return ERR_PTR(-EINVAL);
+
+       /* Get free descriptor */
+       spin_lock_irqsave(&schan->lock, iflags);
+       if (!list_empty(&schan->free)) {
+               sdesc = list_first_entry(&schan->free, struct sirfsoc_dma_desc,
+                       node);
+               list_del(&sdesc->node);
+       }
+       spin_unlock_irqrestore(&schan->lock, iflags);
+
+       if (!sdesc)
+               return 0;
+
+       /* Place descriptor in prepared list */
+       spin_lock_irqsave(&schan->lock, iflags);
+       sdesc->addr = addr;
+       sdesc->cyclic = 1;
+       sdesc->xlen = 0;
+       sdesc->ylen = buf_len / SIRFSOC_DMA_WORD_LEN - 1;
+       sdesc->width = 1;
+       list_add_tail(&sdesc->node, &schan->prepared);
+       spin_unlock_irqrestore(&schan->lock, iflags);
+
+       return &sdesc->desc;
+}
+
+/*
+ * The DMA controller consists of 16 independent DMA channels.
+ * Each channel is allocated to a different function
+ */
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id)
+{
+       unsigned int ch_nr = (unsigned int) chan_id;
+
+       if (ch_nr == chan->chan_id +
+               chan->device->dev_id * SIRFSOC_DMA_CHANNELS)
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL(sirfsoc_dma_filter_id);
+
+static int __devinit sirfsoc_dma_probe(struct platform_device *op)
+{
+       struct device_node *dn = op->dev.of_node;
+       struct device *dev = &op->dev;
+       struct dma_device *dma;
+       struct sirfsoc_dma *sdma;
+       struct sirfsoc_dma_chan *schan;
+       struct resource res;
+       ulong regs_start, regs_size;
+       u32 id;
+       int ret, i;
+
+       sdma = devm_kzalloc(dev, sizeof(*sdma), GFP_KERNEL);
+       if (!sdma) {
+               dev_err(dev, "Memory exhausted!\n");
+               return -ENOMEM;
+       }
+
+       if (of_property_read_u32(dn, "cell-index", &id)) {
+               dev_err(dev, "Fail to get DMAC index\n");
+               ret = -ENODEV;
+               goto free_mem;
+       }
+
+       sdma->irq = irq_of_parse_and_map(dn, 0);
+       if (sdma->irq == NO_IRQ) {
+               dev_err(dev, "Error mapping IRQ!\n");
+               ret = -EINVAL;
+               goto free_mem;
+       }
+
+       ret = of_address_to_resource(dn, 0, &res);
+       if (ret) {
+               dev_err(dev, "Error parsing memory region!\n");
+               goto free_mem;
+       }
+
+       regs_start = res.start;
+       regs_size = resource_size(&res);
+
+       sdma->base = devm_ioremap(dev, regs_start, regs_size);
+       if (!sdma->base) {
+               dev_err(dev, "Error mapping memory region!\n");
+               ret = -ENOMEM;
+               goto irq_dispose;
+       }
+
+       ret = devm_request_irq(dev, sdma->irq, &sirfsoc_dma_irq, 0, DRV_NAME,
+               sdma);
+       if (ret) {
+               dev_err(dev, "Error requesting IRQ!\n");
+               ret = -EINVAL;
+               goto unmap_mem;
+       }
+
+       dma = &sdma->dma;
+       dma->dev = dev;
+       dma->chancnt = SIRFSOC_DMA_CHANNELS;
+
+       dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
+       dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
+       dma->device_issue_pending = sirfsoc_dma_issue_pending;
+       dma->device_control = sirfsoc_dma_control;
+       dma->device_tx_status = sirfsoc_dma_tx_status;
+       dma->device_prep_interleaved_dma = sirfsoc_dma_prep_interleaved;
+       dma->device_prep_dma_cyclic = sirfsoc_dma_prep_cyclic;
+
+       INIT_LIST_HEAD(&dma->channels);
+       dma_cap_set(DMA_SLAVE, dma->cap_mask);
+       dma_cap_set(DMA_CYCLIC, dma->cap_mask);
+       dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
+       dma_cap_set(DMA_PRIVATE, dma->cap_mask);
+
+       for (i = 0; i < dma->chancnt; i++) {
+               schan = &sdma->channels[i];
+
+               schan->chan.device = dma;
+               schan->chan.cookie = 1;
+               schan->completed_cookie = schan->chan.cookie;
+
+               INIT_LIST_HEAD(&schan->free);
+               INIT_LIST_HEAD(&schan->prepared);
+               INIT_LIST_HEAD(&schan->queued);
+               INIT_LIST_HEAD(&schan->active);
+               INIT_LIST_HEAD(&schan->completed);
+
+               spin_lock_init(&schan->lock);
+               list_add_tail(&schan->chan.device_node, &dma->channels);
+       }
+
+       tasklet_init(&sdma->tasklet, sirfsoc_dma_tasklet, (unsigned long)sdma);
+
+       /* Register DMA engine */
+       dev_set_drvdata(dev, sdma);
+       ret = dma_async_device_register(dma);
+       if (ret)
+               goto free_irq;
+
+       dev_info(dev, "initialized SIRFSOC DMAC driver\n");
+
+       return 0;
+
+free_irq:
+       devm_free_irq(dev, sdma->irq, sdma);
+irq_dispose:
+       irq_dispose_mapping(sdma->irq);
+unmap_mem:
+       iounmap(sdma->base);
+free_mem:
+       devm_kfree(dev, sdma);
+       return ret;
+}
+
+static int __devexit sirfsoc_dma_remove(struct platform_device *op)
+{
+       struct device *dev = &op->dev;
+       struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
+
+       dma_async_device_unregister(&sdma->dma);
+       devm_free_irq(dev, sdma->irq, sdma);
+       irq_dispose_mapping(sdma->irq);
+       iounmap(sdma->base);
+       devm_kfree(dev, sdma);
+       return 0;
+}
+
+static struct of_device_id sirfsoc_dma_match[] = {
+       { .compatible = "sirf,prima2-dmac", },
+       {},
+};
+
+static struct platform_driver sirfsoc_dma_driver = {
+       .probe          = sirfsoc_dma_probe,
+       .remove         = __devexit_p(sirfsoc_dma_remove),
+       .driver = {
+               .name = DRV_NAME,
+               .owner = THIS_MODULE,
+               .of_match_table = sirfsoc_dma_match,
+       },
+};
+
+module_platform_driver(sirfsoc_dma_driver);
+
+MODULE_AUTHOR("Rongjun Ying <rongjun.ying@csr.com>, "
+       "Barry Song <baohua.song@csr.com>");
+MODULE_DESCRIPTION("SIRFSOC DMA control driver");
+MODULE_LICENSE("GPL v2");
index 13259cad0ceb61df6a593af758ebf1aec4ec877b..cc5ecbc067a3d8a8d88c97190b0248fb0851593b 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/pm_runtime.h>
 #include <linux/err.h>
 #include <linux/amba/bus.h>
 
@@ -32,6 +34,9 @@
 /* Maximum iterations taken before giving up suspending a channel */
 #define D40_SUSPEND_MAX_IT 500
 
+/* Milliseconds */
+#define DMA40_AUTOSUSPEND_DELAY        100
+
 /* Hardware requirement on LCLA alignment */
 #define LCLA_ALIGNMENT 0x40000
 
@@ -62,6 +67,55 @@ enum d40_command {
        D40_DMA_SUSPENDED       = 3
 };
 
+/*
+ * These are the registers that has to be saved and later restored
+ * when the DMA hw is powered off.
+ * TODO: Add save/restore of D40_DREG_GCC on dma40 v3 or later, if that works.
+ */
+static u32 d40_backup_regs[] = {
+       D40_DREG_LCPA,
+       D40_DREG_LCLA,
+       D40_DREG_PRMSE,
+       D40_DREG_PRMSO,
+       D40_DREG_PRMOE,
+       D40_DREG_PRMOO,
+};
+
+#define BACKUP_REGS_SZ ARRAY_SIZE(d40_backup_regs)
+
+/* TODO: Check if all these registers have to be saved/restored on dma40 v3 */
+static u32 d40_backup_regs_v3[] = {
+       D40_DREG_PSEG1,
+       D40_DREG_PSEG2,
+       D40_DREG_PSEG3,
+       D40_DREG_PSEG4,
+       D40_DREG_PCEG1,
+       D40_DREG_PCEG2,
+       D40_DREG_PCEG3,
+       D40_DREG_PCEG4,
+       D40_DREG_RSEG1,
+       D40_DREG_RSEG2,
+       D40_DREG_RSEG3,
+       D40_DREG_RSEG4,
+       D40_DREG_RCEG1,
+       D40_DREG_RCEG2,
+       D40_DREG_RCEG3,
+       D40_DREG_RCEG4,
+};
+
+#define BACKUP_REGS_SZ_V3 ARRAY_SIZE(d40_backup_regs_v3)
+
+static u32 d40_backup_regs_chan[] = {
+       D40_CHAN_REG_SSCFG,
+       D40_CHAN_REG_SSELT,
+       D40_CHAN_REG_SSPTR,
+       D40_CHAN_REG_SSLNK,
+       D40_CHAN_REG_SDCFG,
+       D40_CHAN_REG_SDELT,
+       D40_CHAN_REG_SDPTR,
+       D40_CHAN_REG_SDLNK,
+};
+
 /**
  * struct d40_lli_pool - Structure for keeping LLIs in memory
  *
@@ -96,7 +150,7 @@ struct d40_lli_pool {
  * during a transfer.
  * @node: List entry.
  * @is_in_client_list: true if the client owns this descriptor.
- * the previous one.
+ * @cyclic: true if this is a cyclic job
  *
  * This descriptor is used for both logical and physical transfers.
  */
@@ -143,6 +197,7 @@ struct d40_lcla_pool {
  * channels.
  *
  * @lock: A lock protection this entity.
+ * @reserved: True if used by secure world or otherwise.
  * @num: The physical channel number of this entity.
  * @allocated_src: Bit mapped to show which src event line's are mapped to
  * this physical channel. Can also be free or physically allocated.
@@ -152,6 +207,7 @@ struct d40_lcla_pool {
  */
 struct d40_phy_res {
        spinlock_t lock;
+       bool       reserved;
        int        num;
        u32        allocated_src;
        u32        allocated_dst;
@@ -185,7 +241,6 @@ struct d40_base;
  * @src_def_cfg: Default cfg register setting for src.
  * @dst_def_cfg: Default cfg register setting for dst.
  * @log_def: Default logical channel settings.
- * @lcla: Space for one dst src pair for logical channel transfers.
  * @lcpa: Pointer to dst and src lcpa settings.
  * @runtime_addr: runtime configured address.
  * @runtime_direction: runtime configured direction.
@@ -217,7 +272,7 @@ struct d40_chan {
        struct d40_log_lli_full         *lcpa;
        /* Runtime reconfiguration */
        dma_addr_t                      runtime_addr;
-       enum dma_data_direction         runtime_direction;
+       enum dma_transfer_direction     runtime_direction;
 };
 
 /**
@@ -241,6 +296,7 @@ struct d40_chan {
  * @dma_both: dma_device channels that can do both memcpy and slave transfers.
  * @dma_slave: dma_device channels that can do only do slave transfers.
  * @dma_memcpy: dma_device channels that can do only do memcpy transfers.
+ * @phy_chans: Room for all possible physical channels in system.
  * @log_chans: Room for all possible logical channels in system.
  * @lookup_log_chans: Used to map interrupt number to logical channel. Points
  * to log_chans entries.
@@ -248,12 +304,20 @@ struct d40_chan {
  * to phy_chans entries.
  * @plat_data: Pointer to provided platform_data which is the driver
  * configuration.
+ * @lcpa_regulator: Pointer to hold the regulator for the esram bank for lcla.
  * @phy_res: Vector containing all physical channels.
  * @lcla_pool: lcla pool settings and data.
  * @lcpa_base: The virtual mapped address of LCPA.
  * @phy_lcpa: The physical address of the LCPA.
  * @lcpa_size: The size of the LCPA area.
  * @desc_slab: cache for descriptors.
+ * @reg_val_backup: Here the values of some hardware registers are stored
+ * before the DMA is powered off. They are restored when the power is back on.
+ * @reg_val_backup_v3: Backup of registers that only exits on dma40 v3 and
+ * later.
+ * @reg_val_backup_chan: Backup data for standard channel parameter registers.
+ * @gcc_pwr_off_mask: Mask to maintain the channels that can be turned off.
+ * @initialized: true if the dma has been initialized
  */
 struct d40_base {
        spinlock_t                       interrupt_lock;
@@ -275,6 +339,7 @@ struct d40_base {
        struct d40_chan                 **lookup_log_chans;
        struct d40_chan                 **lookup_phy_chans;
        struct stedma40_platform_data    *plat_data;
+       struct regulator                 *lcpa_regulator;
        /* Physical half channels */
        struct d40_phy_res               *phy_res;
        struct d40_lcla_pool              lcla_pool;
@@ -282,6 +347,11 @@ struct d40_base {
        dma_addr_t                        phy_lcpa;
        resource_size_t                   lcpa_size;
        struct kmem_cache                *desc_slab;
+       u32                               reg_val_backup[BACKUP_REGS_SZ];
+       u32                               reg_val_backup_v3[BACKUP_REGS_SZ_V3];
+       u32                              *reg_val_backup_chan;
+       u16                               gcc_pwr_off_mask;
+       bool                              initialized;
 };
 
 /**
@@ -479,13 +549,14 @@ static struct d40_desc *d40_desc_get(struct d40_chan *d40c)
                struct d40_desc *d;
                struct d40_desc *_d;
 
-               list_for_each_entry_safe(d, _d, &d40c->client, node)
+               list_for_each_entry_safe(d, _d, &d40c->client, node) {
                        if (async_tx_test_ack(&d->txd)) {
                                d40_desc_remove(d);
                                desc = d;
                                memset(desc, 0, sizeof(*desc));
                                break;
                        }
+               }
        }
 
        if (!desc)
@@ -536,6 +607,7 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
        bool cyclic = desc->cyclic;
        int curr_lcla = -EINVAL;
        int first_lcla = 0;
+       bool use_esram_lcla = chan->base->plat_data->use_esram_lcla;
        bool linkback;
 
        /*
@@ -608,11 +680,16 @@ static void d40_log_lli_to_lcxa(struct d40_chan *chan, struct d40_desc *desc)
                                       &lli->src[lli_current],
                                       next_lcla, flags);
 
-               dma_sync_single_range_for_device(chan->base->dev,
-                                       pool->dma_addr, lcla_offset,
-                                       2 * sizeof(struct d40_log_lli),
-                                       DMA_TO_DEVICE);
-
+               /*
+                * Cache maintenance is not needed if lcla is
+                * mapped in esram
+                */
+               if (!use_esram_lcla) {
+                       dma_sync_single_range_for_device(chan->base->dev,
+                                               pool->dma_addr, lcla_offset,
+                                               2 * sizeof(struct d40_log_lli),
+                                               DMA_TO_DEVICE);
+               }
                curr_lcla = next_lcla;
 
                if (curr_lcla == -EINVAL || curr_lcla == first_lcla) {
@@ -740,7 +817,61 @@ static int d40_sg_2_dmalen(struct scatterlist *sgl, int sg_len,
        return len;
 }
 
-/* Support functions for logical channels */
+
+#ifdef CONFIG_PM
+static void dma40_backup(void __iomem *baseaddr, u32 *backup,
+                        u32 *regaddr, int num, bool save)
+{
+       int i;
+
+       for (i = 0; i < num; i++) {
+               void __iomem *addr = baseaddr + regaddr[i];
+
+               if (save)
+                       backup[i] = readl_relaxed(addr);
+               else
+                       writel_relaxed(backup[i], addr);
+       }
+}
+
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+       int i;
+
+       /* Save/Restore channel specific registers */
+       for (i = 0; i < base->num_phy_chans; i++) {
+               void __iomem *addr;
+               int idx;
+
+               if (base->phy_res[i].reserved)
+                       continue;
+
+               addr = base->virtbase + D40_DREG_PCBASE + i * D40_DREG_PCDELTA;
+               idx = i * ARRAY_SIZE(d40_backup_regs_chan);
+
+               dma40_backup(addr, &base->reg_val_backup_chan[idx],
+                            d40_backup_regs_chan,
+                            ARRAY_SIZE(d40_backup_regs_chan),
+                            save);
+       }
+
+       /* Save/Restore global registers */
+       dma40_backup(base->virtbase, base->reg_val_backup,
+                    d40_backup_regs, ARRAY_SIZE(d40_backup_regs),
+                    save);
+
+       /* Save/Restore registers only existing on dma40 v3 and later */
+       if (base->rev >= 3)
+               dma40_backup(base->virtbase, base->reg_val_backup_v3,
+                            d40_backup_regs_v3,
+                            ARRAY_SIZE(d40_backup_regs_v3),
+                            save);
+}
+#else
+static void d40_save_restore_registers(struct d40_base *base, bool save)
+{
+}
+#endif
 
 static int d40_channel_execute_command(struct d40_chan *d40c,
                                       enum d40_command command)
@@ -973,6 +1104,10 @@ static void d40_config_write(struct d40_chan *d40c)
                /* Set LIDX for lcla */
                writel(lidx, chanbase + D40_CHAN_REG_SSELT);
                writel(lidx, chanbase + D40_CHAN_REG_SDELT);
+
+               /* Clear LNK which will be used by d40_chan_has_events() */
+               writel(0, chanbase + D40_CHAN_REG_SSLNK);
+               writel(0, chanbase + D40_CHAN_REG_SDLNK);
        }
 }
 
@@ -1013,6 +1148,7 @@ static int d40_pause(struct d40_chan *d40c)
        if (!d40c->busy)
                return 0;
 
+       pm_runtime_get_sync(d40c->base->dev);
        spin_lock_irqsave(&d40c->lock, flags);
 
        res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
@@ -1025,7 +1161,8 @@ static int d40_pause(struct d40_chan *d40c)
                                                                  D40_DMA_RUN);
                }
        }
-
+       pm_runtime_mark_last_busy(d40c->base->dev);
+       pm_runtime_put_autosuspend(d40c->base->dev);
        spin_unlock_irqrestore(&d40c->lock, flags);
        return res;
 }
@@ -1039,7 +1176,7 @@ static int d40_resume(struct d40_chan *d40c)
                return 0;
 
        spin_lock_irqsave(&d40c->lock, flags);
-
+       pm_runtime_get_sync(d40c->base->dev);
        if (d40c->base->rev == 0)
                if (chan_is_logical(d40c)) {
                        res = d40_channel_execute_command(d40c,
@@ -1057,6 +1194,8 @@ static int d40_resume(struct d40_chan *d40c)
        }
 
 no_suspend:
+       pm_runtime_mark_last_busy(d40c->base->dev);
+       pm_runtime_put_autosuspend(d40c->base->dev);
        spin_unlock_irqrestore(&d40c->lock, flags);
        return res;
 }
@@ -1129,7 +1268,10 @@ static struct d40_desc *d40_queue_start(struct d40_chan *d40c)
        d40d = d40_first_queued(d40c);
 
        if (d40d != NULL) {
-               d40c->busy = true;
+               if (!d40c->busy)
+                       d40c->busy = true;
+
+               pm_runtime_get_sync(d40c->base->dev);
 
                /* Remove from queue */
                d40_desc_remove(d40d);
@@ -1190,6 +1332,8 @@ static void dma_tc_handle(struct d40_chan *d40c)
 
                if (d40_queue_start(d40c) == NULL)
                        d40c->busy = false;
+               pm_runtime_mark_last_busy(d40c->base->dev);
+               pm_runtime_put_autosuspend(d40c->base->dev);
        }
 
        d40c->pending_tx++;
@@ -1405,11 +1549,16 @@ static int d40_validate_conf(struct d40_chan *d40c,
        return res;
 }
 
-static bool d40_alloc_mask_set(struct d40_phy_res *phy, bool is_src,
-                              int log_event_line, bool is_log)
+static bool d40_alloc_mask_set(struct d40_phy_res *phy,
+                              bool is_src, int log_event_line, bool is_log,
+                              bool *first_user)
 {
        unsigned long flags;
        spin_lock_irqsave(&phy->lock, flags);
+
+       *first_user = ((phy->allocated_src | phy->allocated_dst)
+                       == D40_ALLOC_FREE);
+
        if (!is_log) {
                /* Physical interrupts are masked per physical full channel */
                if (phy->allocated_src == D40_ALLOC_FREE &&
@@ -1490,7 +1639,7 @@ out:
        return is_free;
 }
 
-static int d40_allocate_channel(struct d40_chan *d40c)
+static int d40_allocate_channel(struct d40_chan *d40c, bool *first_phy_user)
 {
        int dev_type;
        int event_group;
@@ -1526,7 +1675,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
                        for (i = 0; i < d40c->base->num_phy_chans; i++) {
 
                                if (d40_alloc_mask_set(&phys[i], is_src,
-                                                      0, is_log))
+                                                      0, is_log,
+                                                      first_phy_user))
                                        goto found_phy;
                        }
                } else
@@ -1536,7 +1686,8 @@ static int d40_allocate_channel(struct d40_chan *d40c)
                                        if (d40_alloc_mask_set(&phys[i],
                                                               is_src,
                                                               0,
-                                                              is_log))
+                                                              is_log,
+                                                              first_phy_user))
                                                goto found_phy;
                                }
                        }
@@ -1552,6 +1703,25 @@ found_phy:
        /* Find logical channel */
        for (j = 0; j < d40c->base->num_phy_chans; j += 8) {
                int phy_num = j + event_group * 2;
+
+               if (d40c->dma_cfg.use_fixed_channel) {
+                       i = d40c->dma_cfg.phy_channel;
+
+                       if ((i != phy_num) && (i != phy_num + 1)) {
+                               dev_err(chan2dev(d40c),
+                                       "invalid fixed phy channel %d\n", i);
+                               return -EINVAL;
+                       }
+
+                       if (d40_alloc_mask_set(&phys[i], is_src, event_line,
+                                              is_log, first_phy_user))
+                               goto found_log;
+
+                       dev_err(chan2dev(d40c),
+                               "could not allocate fixed phy channel %d\n", i);
+                       return -EINVAL;
+               }
+
                /*
                 * Spread logical channels across all available physical rather
                 * than pack every logical channel at the first available phy
@@ -1560,13 +1730,15 @@ found_phy:
                if (is_src) {
                        for (i = phy_num; i < phy_num + 2; i++) {
                                if (d40_alloc_mask_set(&phys[i], is_src,
-                                                      event_line, is_log))
+                                                      event_line, is_log,
+                                                      first_phy_user))
                                        goto found_log;
                        }
                } else {
                        for (i = phy_num + 1; i >= phy_num; i--) {
                                if (d40_alloc_mask_set(&phys[i], is_src,
-                                                      event_line, is_log))
+                                                      event_line, is_log,
+                                                      first_phy_user))
                                        goto found_log;
                        }
                }
@@ -1643,10 +1815,11 @@ static int d40_free_dma(struct d40_chan *d40c)
                return -EINVAL;
        }
 
+       pm_runtime_get_sync(d40c->base->dev);
        res = d40_channel_execute_command(d40c, D40_DMA_SUSPEND_REQ);
        if (res) {
                chan_err(d40c, "suspend failed\n");
-               return res;
+               goto out;
        }
 
        if (chan_is_logical(d40c)) {
@@ -1664,13 +1837,11 @@ static int d40_free_dma(struct d40_chan *d40c)
                        if (d40_chan_has_events(d40c)) {
                                res = d40_channel_execute_command(d40c,
                                                                  D40_DMA_RUN);
-                               if (res) {
+                               if (res)
                                        chan_err(d40c,
                                                "Executing RUN command\n");
-                                       return res;
-                               }
                        }
-                       return 0;
+                       goto out;
                }
        } else {
                (void) d40_alloc_mask_free(phy, is_src, 0);
@@ -1680,13 +1851,23 @@ static int d40_free_dma(struct d40_chan *d40c)
        res = d40_channel_execute_command(d40c, D40_DMA_STOP);
        if (res) {
                chan_err(d40c, "Failed to stop channel\n");
-               return res;
+               goto out;
        }
+
+       if (d40c->busy) {
+               pm_runtime_mark_last_busy(d40c->base->dev);
+               pm_runtime_put_autosuspend(d40c->base->dev);
+       }
+
+       d40c->busy = false;
        d40c->phy_chan = NULL;
        d40c->configured = false;
        d40c->base->lookup_phy_chans[phy->num] = NULL;
+out:
 
-       return 0;
+       pm_runtime_mark_last_busy(d40c->base->dev);
+       pm_runtime_put_autosuspend(d40c->base->dev);
+       return res;
 }
 
 static bool d40_is_paused(struct d40_chan *d40c)
@@ -1855,7 +2036,7 @@ err:
 }
 
 static dma_addr_t
-d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
+d40_get_dev_addr(struct d40_chan *chan, enum dma_transfer_direction direction)
 {
        struct stedma40_platform_data *plat = chan->base->plat_data;
        struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
@@ -1864,9 +2045,9 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
        if (chan->runtime_addr)
                return chan->runtime_addr;
 
-       if (direction == DMA_FROM_DEVICE)
+       if (direction == DMA_DEV_TO_MEM)
                addr = plat->dev_rx[cfg->src_dev_type];
-       else if (direction == DMA_TO_DEVICE)
+       else if (direction == DMA_MEM_TO_DEV)
                addr = plat->dev_tx[cfg->dst_dev_type];
 
        return addr;
@@ -1875,7 +2056,7 @@ d40_get_dev_addr(struct d40_chan *chan, enum dma_data_direction direction)
 static struct dma_async_tx_descriptor *
 d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
            struct scatterlist *sg_dst, unsigned int sg_len,
-           enum dma_data_direction direction, unsigned long dma_flags)
+           enum dma_transfer_direction direction, unsigned long dma_flags)
 {
        struct d40_chan *chan = container_of(dchan, struct d40_chan, chan);
        dma_addr_t src_dev_addr = 0;
@@ -1902,9 +2083,9 @@ d40_prep_sg(struct dma_chan *dchan, struct scatterlist *sg_src,
        if (direction != DMA_NONE) {
                dma_addr_t dev_addr = d40_get_dev_addr(chan, direction);
 
-               if (direction == DMA_FROM_DEVICE)
+               if (direction == DMA_DEV_TO_MEM)
                        src_dev_addr = dev_addr;
-               else if (direction == DMA_TO_DEVICE)
+               else if (direction == DMA_MEM_TO_DEV)
                        dst_dev_addr = dev_addr;
        }
 
@@ -2011,14 +2192,15 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
                        goto fail;
                }
        }
-       is_free_phy = (d40c->phy_chan == NULL);
 
-       err = d40_allocate_channel(d40c);
+       err = d40_allocate_channel(d40c, &is_free_phy);
        if (err) {
                chan_err(d40c, "Failed to allocate channel\n");
+               d40c->configured = false;
                goto fail;
        }
 
+       pm_runtime_get_sync(d40c->base->dev);
        /* Fill in basic CFG register values */
        d40_phy_cfg(&d40c->dma_cfg, &d40c->src_def_cfg,
                    &d40c->dst_def_cfg, chan_is_logical(d40c));
@@ -2038,6 +2220,12 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
                          D40_LCPA_CHAN_SIZE + D40_LCPA_CHAN_DST_DELTA;
        }
 
+       dev_dbg(chan2dev(d40c), "allocated %s channel (phy %d%s)\n",
+                chan_is_logical(d40c) ? "logical" : "physical",
+                d40c->phy_chan->num,
+                d40c->dma_cfg.use_fixed_channel ? ", fixed" : "");
+
+
        /*
         * Only write channel configuration to the DMA if the physical
         * resource is free. In case of multiple logical channels
@@ -2046,6 +2234,8 @@ static int d40_alloc_chan_resources(struct dma_chan *chan)
        if (is_free_phy)
                d40_config_write(d40c);
 fail:
+       pm_runtime_mark_last_busy(d40c->base->dev);
+       pm_runtime_put_autosuspend(d40c->base->dev);
        spin_unlock_irqrestore(&d40c->lock, flags);
        return err;
 }
@@ -2108,10 +2298,10 @@ d40_prep_memcpy_sg(struct dma_chan *chan,
 static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
                                                         struct scatterlist *sgl,
                                                         unsigned int sg_len,
-                                                        enum dma_data_direction direction,
+                                                        enum dma_transfer_direction direction,
                                                         unsigned long dma_flags)
 {
-       if (direction != DMA_FROM_DEVICE && direction != DMA_TO_DEVICE)
+       if (direction != DMA_DEV_TO_MEM && direction != DMA_MEM_TO_DEV)
                return NULL;
 
        return d40_prep_sg(chan, sgl, sgl, sg_len, direction, dma_flags);
@@ -2120,7 +2310,7 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
 static struct dma_async_tx_descriptor *
 dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
                     size_t buf_len, size_t period_len,
-                    enum dma_data_direction direction)
+                    enum dma_transfer_direction direction)
 {
        unsigned int periods = buf_len / period_len;
        struct dma_async_tx_descriptor *txd;
@@ -2269,7 +2459,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
        dst_addr_width = config->dst_addr_width;
        dst_maxburst = config->dst_maxburst;
 
-       if (config->direction == DMA_FROM_DEVICE) {
+       if (config->direction == DMA_DEV_TO_MEM) {
                dma_addr_t dev_addr_rx =
                        d40c->base->plat_data->dev_rx[cfg->src_dev_type];
 
@@ -2292,7 +2482,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
                if (dst_maxburst == 0)
                        dst_maxburst = src_maxburst;
 
-       } else if (config->direction == DMA_TO_DEVICE) {
+       } else if (config->direction == DMA_MEM_TO_DEV) {
                dma_addr_t dev_addr_tx =
                        d40c->base->plat_data->dev_tx[cfg->dst_dev_type];
 
@@ -2357,7 +2547,7 @@ static int d40_set_runtime_config(struct dma_chan *chan,
                "configured channel %s for %s, data width %d/%d, "
                "maxburst %d/%d elements, LE, no flow control\n",
                dma_chan_name(chan),
-               (config->direction == DMA_FROM_DEVICE) ? "RX" : "TX",
+               (config->direction == DMA_DEV_TO_MEM) ? "RX" : "TX",
                src_addr_width, dst_addr_width,
                src_maxburst, dst_maxburst);
 
@@ -2519,6 +2709,72 @@ failure1:
        return err;
 }
 
+/* Suspend resume functionality */
+#ifdef CONFIG_PM
+static int dma40_pm_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct d40_base *base = platform_get_drvdata(pdev);
+       int ret = 0;
+       if (!pm_runtime_suspended(dev))
+               return -EBUSY;
+
+       if (base->lcpa_regulator)
+               ret = regulator_disable(base->lcpa_regulator);
+       return ret;
+}
+
+static int dma40_runtime_suspend(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct d40_base *base = platform_get_drvdata(pdev);
+
+       d40_save_restore_registers(base, true);
+
+       /* Don't disable/enable clocks for v1 due to HW bugs */
+       if (base->rev != 1)
+               writel_relaxed(base->gcc_pwr_off_mask,
+                              base->virtbase + D40_DREG_GCC);
+
+       return 0;
+}
+
+static int dma40_runtime_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct d40_base *base = platform_get_drvdata(pdev);
+
+       if (base->initialized)
+               d40_save_restore_registers(base, false);
+
+       writel_relaxed(D40_DREG_GCC_ENABLE_ALL,
+                      base->virtbase + D40_DREG_GCC);
+       return 0;
+}
+
+static int dma40_resume(struct device *dev)
+{
+       struct platform_device *pdev = to_platform_device(dev);
+       struct d40_base *base = platform_get_drvdata(pdev);
+       int ret = 0;
+
+       if (base->lcpa_regulator)
+               ret = regulator_enable(base->lcpa_regulator);
+
+       return ret;
+}
+
+static const struct dev_pm_ops dma40_pm_ops = {
+       .suspend                = dma40_pm_suspend,
+       .runtime_suspend        = dma40_runtime_suspend,
+       .runtime_resume         = dma40_runtime_resume,
+       .resume                 = dma40_resume,
+};
+#define DMA40_PM_OPS   (&dma40_pm_ops)
+#else
+#define DMA40_PM_OPS   NULL
+#endif
+
 /* Initialization functions. */
 
 static int __init d40_phy_res_init(struct d40_base *base)
@@ -2527,6 +2783,7 @@ static int __init d40_phy_res_init(struct d40_base *base)
        int num_phy_chans_avail = 0;
        u32 val[2];
        int odd_even_bit = -2;
+       int gcc = D40_DREG_GCC_ENA;
 
        val[0] = readl(base->virtbase + D40_DREG_PRSME);
        val[1] = readl(base->virtbase + D40_DREG_PRSMO);
@@ -2538,9 +2795,17 @@ static int __init d40_phy_res_init(struct d40_base *base)
                        /* Mark security only channels as occupied */
                        base->phy_res[i].allocated_src = D40_ALLOC_PHY;
                        base->phy_res[i].allocated_dst = D40_ALLOC_PHY;
+                       base->phy_res[i].reserved = true;
+                       gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+                                                      D40_DREG_GCC_SRC);
+                       gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(i),
+                                                      D40_DREG_GCC_DST);
+
+
                } else {
                        base->phy_res[i].allocated_src = D40_ALLOC_FREE;
                        base->phy_res[i].allocated_dst = D40_ALLOC_FREE;
+                       base->phy_res[i].reserved = false;
                        num_phy_chans_avail++;
                }
                spin_lock_init(&base->phy_res[i].lock);
@@ -2552,6 +2817,11 @@ static int __init d40_phy_res_init(struct d40_base *base)
 
                base->phy_res[chan].allocated_src = D40_ALLOC_PHY;
                base->phy_res[chan].allocated_dst = D40_ALLOC_PHY;
+               base->phy_res[chan].reserved = true;
+               gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+                                              D40_DREG_GCC_SRC);
+               gcc |= D40_DREG_GCC_EVTGRP_ENA(D40_PHYS_TO_GROUP(chan),
+                                              D40_DREG_GCC_DST);
                num_phy_chans_avail--;
        }
 
@@ -2572,6 +2842,15 @@ static int __init d40_phy_res_init(struct d40_base *base)
                val[0] = val[0] >> 2;
        }
 
+       /*
+        * To keep things simple, Enable all clocks initially.
+        * The clocks will get managed later post channel allocation.
+        * The clocks for the event lines on which reserved channels exists
+        * are not managed here.
+        */
+       writel(D40_DREG_GCC_ENABLE_ALL, base->virtbase + D40_DREG_GCC);
+       base->gcc_pwr_off_mask = gcc;
+
        return num_phy_chans_avail;
 }
 
@@ -2699,10 +2978,15 @@ static struct d40_base * __init d40_hw_detect_init(struct platform_device *pdev)
                        goto failure;
        }
 
-       base->lcla_pool.alloc_map = kzalloc(num_phy_chans *
-                                           sizeof(struct d40_desc *) *
-                                           D40_LCLA_LINK_PER_EVENT_GRP,
+       base->reg_val_backup_chan = kmalloc(base->num_phy_chans *
+                                           sizeof(d40_backup_regs_chan),
                                            GFP_KERNEL);
+       if (!base->reg_val_backup_chan)
+               goto failure;
+
+       base->lcla_pool.alloc_map =
+               kzalloc(num_phy_chans * sizeof(struct d40_desc *)
+                       * D40_LCLA_LINK_PER_EVENT_GRP, GFP_KERNEL);
        if (!base->lcla_pool.alloc_map)
                goto failure;
 
@@ -2741,9 +3025,9 @@ failure:
 static void __init d40_hw_init(struct d40_base *base)
 {
 
-       static const struct d40_reg_val dma_init_reg[] = {
+       static struct d40_reg_val dma_init_reg[] = {
                /* Clock every part of the DMA block from start */
-               { .reg = D40_DREG_GCC,    .val = 0x0000ff01},
+               { .reg = D40_DREG_GCC,    .val = D40_DREG_GCC_ENABLE_ALL},
 
                /* Interrupts on all logical channels */
                { .reg = D40_DREG_LCMIS0, .val = 0xFFFFFFFF},
@@ -2943,11 +3227,31 @@ static int __init d40_probe(struct platform_device *pdev)
                d40_err(&pdev->dev, "Failed to ioremap LCPA region\n");
                goto failure;
        }
+       /* If lcla has to be located in ESRAM we don't need to allocate */
+       if (base->plat_data->use_esram_lcla) {
+               res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+                                                       "lcla_esram");
+               if (!res) {
+                       ret = -ENOENT;
+                       d40_err(&pdev->dev,
+                               "No \"lcla_esram\" memory resource\n");
+                       goto failure;
+               }
+               base->lcla_pool.base = ioremap(res->start,
+                                               resource_size(res));
+               if (!base->lcla_pool.base) {
+                       ret = -ENOMEM;
+                       d40_err(&pdev->dev, "Failed to ioremap LCLA region\n");
+                       goto failure;
+               }
+               writel(res->start, base->virtbase + D40_DREG_LCLA);
 
-       ret = d40_lcla_allocate(base);
-       if (ret) {
-               d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
-               goto failure;
+       } else {
+               ret = d40_lcla_allocate(base);
+               if (ret) {
+                       d40_err(&pdev->dev, "Failed to allocate LCLA area\n");
+                       goto failure;
+               }
        }
 
        spin_lock_init(&base->lcla_pool.lock);
@@ -2960,6 +3264,32 @@ static int __init d40_probe(struct platform_device *pdev)
                goto failure;
        }
 
+       pm_runtime_irq_safe(base->dev);
+       pm_runtime_set_autosuspend_delay(base->dev, DMA40_AUTOSUSPEND_DELAY);
+       pm_runtime_use_autosuspend(base->dev);
+       pm_runtime_enable(base->dev);
+       pm_runtime_resume(base->dev);
+
+       if (base->plat_data->use_esram_lcla) {
+
+               base->lcpa_regulator = regulator_get(base->dev, "lcla_esram");
+               if (IS_ERR(base->lcpa_regulator)) {
+                       d40_err(&pdev->dev, "Failed to get lcpa_regulator\n");
+                       base->lcpa_regulator = NULL;
+                       goto failure;
+               }
+
+               ret = regulator_enable(base->lcpa_regulator);
+               if (ret) {
+                       d40_err(&pdev->dev,
+                               "Failed to enable lcpa_regulator\n");
+                       regulator_put(base->lcpa_regulator);
+                       base->lcpa_regulator = NULL;
+                       goto failure;
+               }
+       }
+
+       base->initialized = true;
        err = d40_dmaengine_init(base, num_reserved_chans);
        if (err)
                goto failure;
@@ -2976,6 +3306,11 @@ failure:
                if (base->virtbase)
                        iounmap(base->virtbase);
 
+               if (base->lcla_pool.base && base->plat_data->use_esram_lcla) {
+                       iounmap(base->lcla_pool.base);
+                       base->lcla_pool.base = NULL;
+               }
+
                if (base->lcla_pool.dma_addr)
                        dma_unmap_single(base->dev, base->lcla_pool.dma_addr,
                                         SZ_1K * base->num_phy_chans,
@@ -2998,6 +3333,11 @@ failure:
                        clk_put(base->clk);
                }
 
+               if (base->lcpa_regulator) {
+                       regulator_disable(base->lcpa_regulator);
+                       regulator_put(base->lcpa_regulator);
+               }
+
                kfree(base->lcla_pool.alloc_map);
                kfree(base->lookup_log_chans);
                kfree(base->lookup_phy_chans);
@@ -3013,6 +3353,7 @@ static struct platform_driver d40_driver = {
        .driver = {
                .owner = THIS_MODULE,
                .name  = D40_NAME,
+               .pm = DMA40_PM_OPS,
        },
 };
 
index b44c455158de3461f31ee5a0b0594cb136b2ad6a..8d3d490968a3a8240b6f91e1631609a2425c3f4f 100644 (file)
@@ -16,6 +16,8 @@
 
 #define D40_TYPE_TO_GROUP(type) (type / 16)
 #define D40_TYPE_TO_EVENT(type) (type % 16)
+#define D40_GROUP_SIZE 8
+#define D40_PHYS_TO_GROUP(phys) ((phys & (D40_GROUP_SIZE - 1)) / 2)
 
 /* Most bits of the CFG register are the same in log as in phy mode */
 #define D40_SREG_CFG_MST_POS           15
 
 /* DMA Register Offsets */
 #define D40_DREG_GCC           0x000
+#define D40_DREG_GCC_ENA       0x1
+/* This assumes that there are only 4 event groups */
+#define D40_DREG_GCC_ENABLE_ALL        0xff01
+#define D40_DREG_GCC_EVTGRP_POS 8
+#define D40_DREG_GCC_SRC 0
+#define D40_DREG_GCC_DST 1
+#define D40_DREG_GCC_EVTGRP_ENA(x, y) \
+       (1 << (D40_DREG_GCC_EVTGRP_POS + 2 * x + y))
+
 #define D40_DREG_PRTYP         0x004
 #define D40_DREG_PRSME         0x008
 #define D40_DREG_PRSMO         0x00C
index a4a398f2ef61eb5a362ca20c37ebe9a479e2121a..a6f9c1684a0fc1dc4a9c7a2dbd2407fe6252b68d 100644 (file)
@@ -90,7 +90,7 @@ struct timb_dma_chan {
        struct list_head        queue;
        struct list_head        free_list;
        unsigned int            bytes_per_line;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction     direction;
        unsigned int            descs; /* Descriptors to allocate */
        unsigned int            desc_elems; /* number of elems per descriptor */
 };
@@ -166,10 +166,10 @@ static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc,
 
        if (single)
                dma_unmap_single(chan2dev(&td_chan->chan), addr, len,
-                       td_chan->direction);
+                       DMA_TO_DEVICE);
        else
                dma_unmap_page(chan2dev(&td_chan->chan), addr, len,
-                       td_chan->direction);
+                       DMA_TO_DEVICE);
 }
 
 static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single)
@@ -235,7 +235,7 @@ static void __td_start_dma(struct timb_dma_chan *td_chan)
                "td_chan: %p, chan: %d, membase: %p\n",
                td_chan, td_chan->chan.chan_id, td_chan->membase);
 
-       if (td_chan->direction == DMA_FROM_DEVICE) {
+       if (td_chan->direction == DMA_DEV_TO_MEM) {
 
                /* descriptor address */
                iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR);
@@ -278,7 +278,7 @@ static void __td_finish(struct timb_dma_chan *td_chan)
                txd->cookie);
 
        /* make sure to stop the transfer */
-       if (td_chan->direction == DMA_FROM_DEVICE)
+       if (td_chan->direction == DMA_DEV_TO_MEM)
                iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER);
 /* Currently no support for stopping DMA transfers
        else
@@ -558,7 +558,7 @@ static void td_issue_pending(struct dma_chan *chan)
 
 static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
        struct scatterlist *sgl, unsigned int sg_len,
-       enum dma_data_direction direction, unsigned long flags)
+       enum dma_transfer_direction direction, unsigned long flags)
 {
        struct timb_dma_chan *td_chan =
                container_of(chan, struct timb_dma_chan, chan);
@@ -606,7 +606,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
        }
 
        dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys,
-               td_desc->desc_list_len, DMA_TO_DEVICE);
+               td_desc->desc_list_len, DMA_MEM_TO_DEV);
 
        return &td_desc->txd;
 }
@@ -775,8 +775,8 @@ static int __devinit td_probe(struct platform_device *pdev)
                td_chan->descs = pchan->descriptors;
                td_chan->desc_elems = pchan->descriptor_elements;
                td_chan->bytes_per_line = pchan->bytes_per_line;
-               td_chan->direction = pchan->rx ? DMA_FROM_DEVICE :
-                       DMA_TO_DEVICE;
+               td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM :
+                       DMA_MEM_TO_DEV;
 
                td_chan->membase = td->membase +
                        (i / 2) * TIMBDMA_INSTANCE_OFFSET +
@@ -841,17 +841,7 @@ static struct platform_driver td_driver = {
        .remove = __exit_p(td_remove),
 };
 
-static int __init td_init(void)
-{
-       return platform_driver_register(&td_driver);
-}
-module_init(td_init);
-
-static void __exit td_exit(void)
-{
-       platform_driver_unregister(&td_driver);
-}
-module_exit(td_exit);
+module_platform_driver(td_driver);
 
 MODULE_LICENSE("GPL v2");
 MODULE_DESCRIPTION("Timberdale DMA controller driver");
index cbd83e362b5e0a78bcb25dec65f4fb29663710d9..6122c364cf11bb0050fb2b029c083b3c40233722 100644 (file)
@@ -845,7 +845,7 @@ txx9dmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 
 static struct dma_async_tx_descriptor *
 txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags)
 {
        struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
@@ -860,9 +860,9 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
 
        BUG_ON(!ds || !ds->reg_width);
        if (ds->tx_reg)
-               BUG_ON(direction != DMA_TO_DEVICE);
+               BUG_ON(direction != DMA_MEM_TO_DEV);
        else
-               BUG_ON(direction != DMA_FROM_DEVICE);
+               BUG_ON(direction != DMA_DEV_TO_MEM);
        if (unlikely(!sg_len))
                return NULL;
 
@@ -882,7 +882,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                mem = sg_dma_address(sg);
 
                if (__is_dmac64(ddev)) {
-                       if (direction == DMA_TO_DEVICE) {
+                       if (direction == DMA_MEM_TO_DEV) {
                                desc->hwdesc.SAR = mem;
                                desc->hwdesc.DAR = ds->tx_reg;
                        } else {
@@ -891,7 +891,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        }
                        desc->hwdesc.CNTR = sg_dma_len(sg);
                } else {
-                       if (direction == DMA_TO_DEVICE) {
+                       if (direction == DMA_MEM_TO_DEV) {
                                desc->hwdesc32.SAR = mem;
                                desc->hwdesc32.DAR = ds->tx_reg;
                        } else {
@@ -900,7 +900,7 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        }
                        desc->hwdesc32.CNTR = sg_dma_len(sg);
                }
-               if (direction == DMA_TO_DEVICE) {
+               if (direction == DMA_MEM_TO_DEV) {
                        sai = ds->reg_width;
                        dai = 0;
                } else {
index 6628feaa76229009f1bf5f62b3419d9e79dcea35..7f5f0da726dafde5ae23ad1343861e12857c5002 100644 (file)
@@ -263,6 +263,7 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
 static char ohci_driver_name[] = KBUILD_MODNAME;
 
 #define PCI_DEVICE_ID_AGERE_FW643      0x5901
+#define PCI_DEVICE_ID_CREATIVE_SB1394  0x4001
 #define PCI_DEVICE_ID_JMICRON_JMB38X_FW        0x2380
 #define PCI_DEVICE_ID_TI_TSB12LV22     0x8009
 #define PCI_DEVICE_ID_TI_TSB12LV26     0x8020
@@ -289,6 +290,9 @@ static const struct {
        {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
                QUIRK_NO_MSI},
 
+       {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
+               QUIRK_RESET_PACKET},
+
        {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
                QUIRK_NO_MSI},
 
@@ -299,7 +303,7 @@ static const struct {
                QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
-               QUIRK_CYCLE_TIMER},
+               QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
index 37c4bd1cacd579bfe66523bc5f371d00af1969b4..d0c41188d4e536b4983857de9c834913f1355c13 100644 (file)
@@ -87,6 +87,7 @@ config GPIO_GENERIC_PLATFORM
 
 config GPIO_IT8761E
        tristate "IT8761E GPIO support"
+       depends on X86  # unconditional access to IO space.
        help
          Say yes here to support GPIO functionality of IT8761E super I/O chip.
 
index 5b6948081f8fb3d953d5f95703ed6dac95f9e23d..ddfacc5ce56d55fce87d0131e535b27671ed5828 100644 (file)
@@ -96,7 +96,7 @@ static const char *gpio_p2_names[LPC32XX_GPIO_P2_MAX] = {
 };
 
 static const char *gpio_p3_names[LPC32XX_GPIO_P3_MAX] = {
-       "gpi000", "gpio01", "gpio02", "gpio03",
+       "gpio00", "gpio01", "gpio02", "gpio03",
        "gpio04", "gpio05"
 };
 
index 461958fc2264e91321a5bb80f7ba9cbed0a1a5bc..f0febe5b8221384c390711208c191a56cc2ee4f8 100644 (file)
@@ -248,7 +248,7 @@ static void ioh_gpio_setup(struct ioh_gpio *chip, int num_port)
 static int ioh_irq_type(struct irq_data *d, unsigned int type)
 {
        u32 im;
-       u32 *im_reg;
+       void __iomem *im_reg;
        u32 ien;
        u32 im_pos;
        int ch;
@@ -412,7 +412,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
        int i, j;
        struct ioh_gpio *chip;
        void __iomem *base;
-       void __iomem *chip_save;
+       void *chip_save;
        int irq_base;
 
        ret = pci_enable_device(pdev);
@@ -428,7 +428,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
        }
 
        base = pci_iomap(pdev, 1, 0);
-       if (base == 0) {
+       if (!base) {
                dev_err(&pdev->dev, "%s : pci_iomap failed", __func__);
                ret = -ENOMEM;
                goto err_iomap;
@@ -448,6 +448,7 @@ static int __devinit ioh_gpio_probe(struct pci_dev *pdev,
                chip->reg = chip->base;
                chip->ch = i;
                mutex_init(&chip->lock);
+               spin_lock_init(&chip->spinlock);
                ioh_gpio_setup(chip, num_ports[i]);
                ret = gpiochip_add(&chip->gpio);
                if (ret) {
@@ -521,7 +522,7 @@ static void __devexit ioh_gpio_remove(struct pci_dev *pdev)
        int err;
        int i;
        struct ioh_gpio *chip = pci_get_drvdata(pdev);
-       void __iomem *chip_save;
+       void *chip_save;
 
        chip_save = chip;
 
index f0603297f829e787129f4bf0c34beb980a17f368..e8729cc2ba2b9f1dc864a17803252ef40b2adbfa 100644 (file)
@@ -231,7 +231,7 @@ static void pch_gpio_setup(struct pch_gpio *chip)
 static int pch_irq_type(struct irq_data *d, unsigned int type)
 {
        u32 im;
-       u32 *im_reg;
+       u32 __iomem *im_reg;
        u32 ien;
        u32 im_pos;
        int ch;
@@ -376,7 +376,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
        }
 
        chip->base = pci_iomap(pdev, 1, 0);
-       if (chip->base == 0) {
+       if (!chip->base) {
                dev_err(&pdev->dev, "%s : pci_iomap FAILED", __func__);
                ret = -ENOMEM;
                goto err_iomap;
@@ -392,6 +392,7 @@ static int __devinit pch_gpio_probe(struct pci_dev *pdev,
        chip->reg = chip->base;
        pci_set_drvdata(pdev, chip);
        mutex_init(&chip->lock);
+       spin_lock_init(&chip->spinlock);
        pch_gpio_setup(chip);
        ret = gpiochip_add(&chip->gpio);
        if (ret) {
index a7661773c0525a43367de809c851053c78e6a186..0a79a1167a251dd71cd1c02dc82aedd1b1842213 100644 (file)
@@ -2387,27 +2387,30 @@ static struct samsung_gpio_chip exynos4_gpios_3[] = {
 };
 
 #if defined(CONFIG_ARCH_EXYNOS4) && defined(CONFIG_OF)
-static int exynos4_gpio_xlate(struct gpio_chip *gc, struct device_node *np,
-                             const void *gpio_spec, u32 *flags)
+static int exynos4_gpio_xlate(struct gpio_chip *gc,
+                       const struct of_phandle_args *gpiospec, u32 *flags)
 {
-       const __be32 *gpio = gpio_spec;
-       const u32 n = be32_to_cpup(gpio);
-       unsigned int pin = gc->base + be32_to_cpu(gpio[0]);
+       unsigned int pin;
 
        if (WARN_ON(gc->of_gpio_n_cells < 4))
                return -EINVAL;
 
-       if (n > gc->ngpio)
+       if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
                return -EINVAL;
 
-       if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(be32_to_cpu(gpio[1]))))
+       if (gpiospec->args[0] > gc->ngpio)
+               return -EINVAL;
+
+       pin = gc->base + gpiospec->args[0];
+
+       if (s3c_gpio_cfgpin(pin, S3C_GPIO_SFN(gpiospec->args[1])))
                pr_warn("gpio_xlate: failed to set pin function\n");
-       if (s3c_gpio_setpull(pin, be32_to_cpu(gpio[2])))
+       if (s3c_gpio_setpull(pin, gpiospec->args[2]))
                pr_warn("gpio_xlate: failed to set pin pull up/down\n");
-       if (s5p_gpio_set_drvstr(pin, be32_to_cpu(gpio[3])))
+       if (s5p_gpio_set_drvstr(pin, gpiospec->args[3]))
                pr_warn("gpio_xlate: failed to set pin drive strength\n");
 
-       return n;
+       return gpiospec->args[0];
 }
 
 static const struct of_device_id exynos4_gpio_dt_match[] __initdata = {
index b9c1c297669e64a167a9257021a82d54e1f854c5..91f45b965d1e812d542e81d2c81ab6e60ae52843 100644 (file)
@@ -52,7 +52,7 @@ static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
        struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio);
 
        /* Set the initial value */
-       tps65910_gpio_set(gc, 0, value);
+       tps65910_gpio_set(gc, offset, value);
 
        return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset,
                                                GPIO_CFG_MASK);
index 3f46772f0cb212d5135ad686fbd20857e6b54a25..ba23790450e9d4877ec8721ed5280140a18272ea 100644 (file)
@@ -101,7 +101,7 @@ static int drm_add_magic(struct drm_master *master, struct drm_file *priv,
  * Searches and unlinks the entry in drm_device::magiclist with the magic
  * number hash key, while holding the drm_device::struct_mutex lock.
  */
-static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
+int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
 {
        struct drm_magic_entry *pt;
        struct drm_hash_item *hash;
@@ -136,6 +136,8 @@ static int drm_remove_magic(struct drm_master *master, drm_magic_t magic)
  * If there is a magic number in drm_file::magic then use it, otherwise
  * searches an unique non-zero magic number and add it associating it with \p
  * file_priv.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
  */
 int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
@@ -173,6 +175,8 @@ int drm_getmagic(struct drm_device *dev, void *data, struct drm_file *file_priv)
  * \return zero if authentication successed, or a negative number otherwise.
  *
  * Checks if \p file_priv is associated with the magic number passed in \arg.
+ * This ioctl needs protection by the drm_global_mutex, which protects
+ * struct drm_file::magic and struct drm_magic_entry::priv.
  */
 int drm_authmagic(struct drm_device *dev, void *data,
                  struct drm_file *file_priv)
index c00cf154cc0bbd81ce3c807fde6bffb2a5ff777f..6263b0147598de9688fcdfefb8a0dadc841f9ad9 100644 (file)
@@ -487,6 +487,11 @@ int drm_release(struct inode *inode, struct file *filp)
                  (long)old_encode_dev(file_priv->minor->device),
                  dev->open_count);
 
+       /* Release any auth tokens that might point to this file_priv,
+          (do that under the drm_global_mutex) */
+       if (file_priv->magic)
+               (void) drm_remove_magic(file_priv->master, file_priv->magic);
+
        /* if the master has gone away we can't do anything with the lock */
        if (file_priv->minor->master)
                drm_master_release(dev, filp);
index 396e60ce811467bc95117368517a302d51390611..f8625e2907288f590552183ff579a9c7aa756d40 100644 (file)
@@ -140,7 +140,7 @@ int drm_gem_object_init(struct drm_device *dev,
        obj->dev = dev;
        obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
        if (IS_ERR(obj->filp))
-               return -ENOMEM;
+               return PTR_ERR(obj->filp);
 
        kref_init(&obj->refcount);
        atomic_set(&obj->handle_count, 0);
index f9aaa56eae07c76575c090ac0ded6a086b215ca1..b9e5266c341baae491412e1fe75887c270b1fdb7 100644 (file)
@@ -13,7 +13,7 @@ config DRM_EXYNOS
 
 config DRM_EXYNOS_FIMD
        tristate "Exynos DRM FIMD"
-       depends on DRM_EXYNOS
+       depends on DRM_EXYNOS && !FB_S3C
        default n
        help
          Choose this option if you want to use Exynos FIMD for DRM.
@@ -21,7 +21,7 @@ config DRM_EXYNOS_FIMD
 
 config DRM_EXYNOS_HDMI
        tristate "Exynos DRM HDMI"
-       depends on DRM_EXYNOS
+       depends on DRM_EXYNOS && !VIDEO_SAMSUNG_S5P_TV
        help
          Choose this option if you want to use Exynos HDMI for DRM.
          If M is selected, the module will be called exynos_drm_hdmi
index ca83139cd30997fcb2eb2d0df791e6bbea9df8ba..b6a737d196ae556c6b038ad8243540a76f2271af 100644 (file)
@@ -158,7 +158,8 @@ static void fimd_dpms(struct device *subdrv_dev, int mode)
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               pm_runtime_put_sync(subdrv_dev);
+               if (!ctx->suspended)
+                       pm_runtime_put_sync(subdrv_dev);
                break;
        default:
                DRM_DEBUG_KMS("unspecified mode %d\n", mode);
@@ -734,6 +735,46 @@ static void fimd_clear_win(struct fimd_context *ctx, int win)
        writel(val, ctx->regs + SHADOWCON);
 }
 
+static int fimd_power_on(struct fimd_context *ctx, bool enable)
+{
+       struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+       struct device *dev = subdrv->manager.dev;
+
+       DRM_DEBUG_KMS("%s\n", __FILE__);
+
+       if (enable != false && enable != true)
+               return -EINVAL;
+
+       if (enable) {
+               int ret;
+
+               ret = clk_enable(ctx->bus_clk);
+               if (ret < 0)
+                       return ret;
+
+               ret = clk_enable(ctx->lcd_clk);
+               if  (ret < 0) {
+                       clk_disable(ctx->bus_clk);
+                       return ret;
+               }
+
+               ctx->suspended = false;
+
+               /* if vblank was enabled status, enable it again. */
+               if (test_and_clear_bit(0, &ctx->irq_flags))
+                       fimd_enable_vblank(dev);
+
+               fimd_apply(dev);
+       } else {
+               clk_disable(ctx->lcd_clk);
+               clk_disable(ctx->bus_clk);
+
+               ctx->suspended = true;
+       }
+
+       return 0;
+}
+
 static int __devinit fimd_probe(struct platform_device *pdev)
 {
        struct device *dev = &pdev->dev;
@@ -911,39 +952,30 @@ out:
 #ifdef CONFIG_PM_SLEEP
 static int fimd_suspend(struct device *dev)
 {
-       int ret;
+       struct fimd_context *ctx = get_fimd_context(dev);
 
        if (pm_runtime_suspended(dev))
                return 0;
 
-       ret = pm_runtime_suspend(dev);
-       if (ret < 0)
-               return ret;
-
-       return 0;
+       /*
+        * do not use pm_runtime_suspend(). if pm_runtime_suspend() is
+        * called here, an error would be returned by that interface
+        * because the usage_count of pm runtime is more than 1.
+        */
+       return fimd_power_on(ctx, false);
 }
 
 static int fimd_resume(struct device *dev)
 {
-       int ret;
-
-       ret = pm_runtime_resume(dev);
-       if (ret < 0) {
-               DRM_ERROR("failed to resume runtime pm.\n");
-               return ret;
-       }
-
-       pm_runtime_disable(dev);
-
-       ret = pm_runtime_set_active(dev);
-       if (ret < 0) {
-               DRM_ERROR("failed to active runtime pm.\n");
-               pm_runtime_enable(dev);
-               pm_runtime_suspend(dev);
-               return ret;
-       }
+       struct fimd_context *ctx = get_fimd_context(dev);
 
-       pm_runtime_enable(dev);
+       /*
+        * if entered to sleep when lcd panel was on, the usage_count
+        * of pm runtime would still be 1 so in this case, fimd driver
+        * should be on directly not drawing on pm runtime interface.
+        */
+       if (!pm_runtime_suspended(dev))
+               return fimd_power_on(ctx, true);
 
        return 0;
 }
@@ -956,39 +988,16 @@ static int fimd_runtime_suspend(struct device *dev)
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       clk_disable(ctx->lcd_clk);
-       clk_disable(ctx->bus_clk);
-
-       ctx->suspended = true;
-       return 0;
+       return fimd_power_on(ctx, false);
 }
 
 static int fimd_runtime_resume(struct device *dev)
 {
        struct fimd_context *ctx = get_fimd_context(dev);
-       int ret;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
-       ret = clk_enable(ctx->bus_clk);
-       if (ret < 0)
-               return ret;
-
-       ret = clk_enable(ctx->lcd_clk);
-       if  (ret < 0) {
-               clk_disable(ctx->bus_clk);
-               return ret;
-       }
-
-       ctx->suspended = false;
-
-       /* if vblank was enabled status, enable it again. */
-       if (test_and_clear_bit(0, &ctx->irq_flags))
-               fimd_enable_vblank(dev);
-
-       fimd_apply(dev);
-
-       return 0;
+       return fimd_power_on(ctx, true);
 }
 #endif
 
index f48f7ce92f5f5e37fd1aa208429d5c4395b9b354..3429d3fd93f3265d31cb3cad101b87eccc74964d 100644 (file)
@@ -1116,8 +1116,8 @@ err_ddc:
 err_iomap:
        iounmap(hdata->regs);
 err_req_region:
-       release_resource(hdata->regs_res);
-       kfree(hdata->regs_res);
+       release_mem_region(hdata->regs_res->start,
+                       resource_size(hdata->regs_res));
 err_resource:
        hdmi_resources_cleanup(hdata);
 err_data:
@@ -1145,8 +1145,8 @@ static int __devexit hdmi_remove(struct platform_device *pdev)
 
        iounmap(hdata->regs);
 
-       release_resource(hdata->regs_res);
-       kfree(hdata->regs_res);
+       release_mem_region(hdata->regs_res->start,
+                       resource_size(hdata->regs_res));
 
        /* hdmiphy i2c driver */
        i2c_del_driver(&hdmiphy_driver);
index 791c0ef1a65b0433ccedf258b59fdf986ddbdcd3..830dfdd6bf154a473e15357654cf4d19be8d6b81 100644 (file)
@@ -113,12 +113,12 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
 
 void psbfb_suspend(struct drm_device *dev)
 {
-       struct drm_framebuffer *fb = 0;
-       struct psb_framebuffer *psbfb = to_psb_fb(fb);
+       struct drm_framebuffer *fb;
 
        console_lock();
        mutex_lock(&dev->mode_config.mutex);
        list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+               struct psb_framebuffer *psbfb = to_psb_fb(fb);
                struct fb_info *info = psbfb->fbdev;
                fb_set_suspend(info, 1);
                drm_fb_helper_blank(FB_BLANK_POWERDOWN, info);
@@ -129,12 +129,12 @@ void psbfb_suspend(struct drm_device *dev)
 
 void psbfb_resume(struct drm_device *dev)
 {
-       struct drm_framebuffer *fb = 0;
-       struct psb_framebuffer *psbfb = to_psb_fb(fb);
+       struct drm_framebuffer *fb;
 
        console_lock();
        mutex_lock(&dev->mode_config.mutex);
        list_for_each_entry(fb, &dev->mode_config.fb_list, head) {
+               struct psb_framebuffer *psbfb = to_psb_fb(fb);
                struct fb_info *info = psbfb->fbdev;
                fb_set_suspend(info, 0);
                drm_fb_helper_blank(FB_BLANK_UNBLANK, info);
index e770bd190a5c8a2efda446339805869944528a81..5d5330f667f14031512c022e12ed3d8eb61bc530 100644 (file)
@@ -20,6 +20,7 @@
  */
 
 #include <drm/drmP.h>
+#include <linux/shmem_fs.h>
 #include "psb_drv.h"
 
 
@@ -203,9 +204,7 @@ static int psb_gtt_attach_pages(struct gtt_range *gt)
        gt->npage = pages;
 
        for (i = 0; i < pages; i++) {
-               /* FIXME: needs updating as per mail from Hugh Dickins */
-               p = read_cache_page_gfp(mapping, i,
-                                       __GFP_COLD | GFP_KERNEL);
+               p = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(p))
                        goto err;
                gt->pages[i] = p;
index f7c17b23983389ae7b978224180dadcd4b6616c3..7f4b4e10246ecfbb087ac825c1969b69dc9742c5 100644 (file)
@@ -886,7 +886,7 @@ static int i810_flush_queue(struct drm_device *dev)
 }
 
 /* Must be called with the lock held */
-void i810_driver_reclaim_buffers(struct drm_device *dev,
+static void i810_reclaim_buffers(struct drm_device *dev,
                                 struct drm_file *file_priv)
 {
        struct drm_device_dma *dma = dev->dma;
@@ -1223,17 +1223,12 @@ void i810_driver_preclose(struct drm_device *dev, struct drm_file *file_priv)
                if (dev_priv->page_flipping)
                        i810_do_cleanup_pageflip(dev);
        }
+}
 
-       if (file_priv->master && file_priv->master->lock.hw_lock) {
-               drm_idlelock_take(&file_priv->master->lock);
-               i810_driver_reclaim_buffers(dev, file_priv);
-               drm_idlelock_release(&file_priv->master->lock);
-       } else {
-               /* master disappeared, clean up stuff anyway and hope nothing
-                * goes wrong */
-               i810_driver_reclaim_buffers(dev, file_priv);
-       }
-
+void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
+                                       struct drm_file *file_priv)
+{
+       i810_reclaim_buffers(dev, file_priv);
 }
 
 int i810_driver_dma_quiescent(struct drm_device *dev)
index 053f1ee58393a885e6aff3d1b8e9c5de8f7c5cf5..ec12f7dc717a863ed7fcd17911aec55a1c486fce 100644 (file)
@@ -63,6 +63,7 @@ static struct drm_driver driver = {
        .lastclose = i810_driver_lastclose,
        .preclose = i810_driver_preclose,
        .device_is_agp = i810_driver_device_is_agp,
+       .reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
        .dma_quiescent = i810_driver_dma_quiescent,
        .ioctls = i810_ioctls,
        .fops = &i810_driver_fops,
index 6e0acad9e0f556549621e7945a00d7cafd0abbac..c9339f48179551dacd84b03bd74bd327d4750d80 100644 (file)
@@ -116,12 +116,14 @@ typedef struct drm_i810_private {
 
                                /* i810_dma.c */
 extern int i810_driver_dma_quiescent(struct drm_device *dev);
-void i810_driver_reclaim_buffers(struct drm_device *dev,
-                                struct drm_file *file_priv);
+extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
+                                              struct drm_file *file_priv);
 extern int i810_driver_load(struct drm_device *, unsigned long flags);
 extern void i810_driver_lastclose(struct drm_device *dev);
 extern void i810_driver_preclose(struct drm_device *dev,
                                 struct drm_file *file_priv);
+extern void i810_driver_reclaim_buffers_locked(struct drm_device *dev,
+                                              struct drm_file *file_priv);
 extern int i810_driver_device_is_agp(struct drm_device *dev);
 
 extern long i810_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
index 11807989f918b351d0585e5e4a54b9d87c0a3125..deaa657292b45b910a81cd9a018baf81c1fce6dc 100644 (file)
@@ -121,11 +121,11 @@ static const char *cache_level_str(int type)
 static void
 describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
 {
-       seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s%s",
+       seq_printf(m, "%p: %s%s %8zdKiB %04x %04x %d %d%s%s%s",
                   &obj->base,
                   get_pin_flag(obj),
                   get_tiling_flag(obj),
-                  obj->base.size,
+                  obj->base.size / 1024,
                   obj->base.read_domains,
                   obj->base.write_domain,
                   obj->last_rendering_seqno,
@@ -653,7 +653,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
        seq_printf(m, "  Size :    %08x\n", ring->size);
        seq_printf(m, "  Active :  %08x\n", intel_ring_get_active_head(ring));
        seq_printf(m, "  NOPID :   %08x\n", I915_READ_NOPID(ring));
-       if (IS_GEN6(dev)) {
+       if (IS_GEN6(dev) || IS_GEN7(dev)) {
                seq_printf(m, "  Sync 0 :   %08x\n", I915_READ_SYNC_0(ring));
                seq_printf(m, "  Sync 1 :   %08x\n", I915_READ_SYNC_1(ring));
        }
@@ -1075,6 +1075,7 @@ static int gen6_drpc_info(struct seq_file *m)
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 rpmodectl1, gt_core_status, rcctl1;
+       unsigned forcewake_count;
        int count=0, ret;
 
 
@@ -1082,9 +1083,13 @@ static int gen6_drpc_info(struct seq_file *m)
        if (ret)
                return ret;
 
-       if (atomic_read(&dev_priv->forcewake_count)) {
-               seq_printf(m, "RC information inaccurate because userspace "
-                             "holds a reference \n");
+       spin_lock_irq(&dev_priv->gt_lock);
+       forcewake_count = dev_priv->forcewake_count;
+       spin_unlock_irq(&dev_priv->gt_lock);
+
+       if (forcewake_count) {
+               seq_printf(m, "RC information inaccurate because somebody "
+                             "holds a forcewake reference \n");
        } else {
                /* NB: we cannot use forcewake, else we read the wrong values */
                while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
@@ -1106,7 +1111,7 @@ static int gen6_drpc_info(struct seq_file *m)
        seq_printf(m, "SW control enabled: %s\n",
                   yesno((rpmodectl1 & GEN6_RP_MEDIA_MODE_MASK) ==
                          GEN6_RP_MEDIA_SW_MODE));
-       seq_printf(m, "RC6 Enabled: %s\n",
+       seq_printf(m, "RC1e Enabled: %s\n",
                   yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
        seq_printf(m, "RC6 Enabled: %s\n",
                   yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
@@ -1398,9 +1403,13 @@ static int i915_gen6_forcewake_count_info(struct seq_file *m, void *data)
        struct drm_info_node *node = (struct drm_info_node *) m->private;
        struct drm_device *dev = node->minor->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
+       unsigned forcewake_count;
+
+       spin_lock_irq(&dev_priv->gt_lock);
+       forcewake_count = dev_priv->forcewake_count;
+       spin_unlock_irq(&dev_priv->gt_lock);
 
-       seq_printf(m, "forcewake count = %d\n",
-                  atomic_read(&dev_priv->forcewake_count));
+       seq_printf(m, "forcewake count = %u\n", forcewake_count);
 
        return 0;
 }
@@ -1665,7 +1674,7 @@ static int i915_forcewake_open(struct inode *inode, struct file *file)
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
 
-       if (!IS_GEN6(dev))
+       if (INTEL_INFO(dev)->gen < 6)
                return 0;
 
        ret = mutex_lock_interruptible(&dev->struct_mutex);
@@ -1682,7 +1691,7 @@ int i915_forcewake_release(struct inode *inode, struct file *file)
        struct drm_device *dev = inode->i_private;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
-       if (!IS_GEN6(dev))
+       if (INTEL_INFO(dev)->gen < 6)
                return 0;
 
        /*
index 5f4d5893e98356ff8b657a3a3048858d8ba15fab..ddfe3d902b2a3a5d908b0c7664a4348f94ce3988 100644 (file)
@@ -2045,6 +2045,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (!IS_I945G(dev) && !IS_I945GM(dev))
                pci_enable_msi(dev->pdev);
 
+       spin_lock_init(&dev_priv->gt_lock);
        spin_lock_init(&dev_priv->irq_lock);
        spin_lock_init(&dev_priv->error_lock);
        spin_lock_init(&dev_priv->rps_lock);
index 8f7187915b0dea430f864baef322ac509c8106be..308f819135626c6b9c8d3805c953bd7a9ddf55dc 100644 (file)
@@ -368,11 +368,12 @@ void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
  */
 void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
 {
-       WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+       unsigned long irqflags;
 
-       /* Forcewake is atomic in case we get in here without the lock */
-       if (atomic_add_return(1, &dev_priv->forcewake_count) == 1)
+       spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+       if (dev_priv->forcewake_count++ == 0)
                dev_priv->display.force_wake_get(dev_priv);
+       spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
 }
 
 void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
@@ -392,10 +393,12 @@ void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
  */
 void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 {
-       WARN_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
+       unsigned long irqflags;
 
-       if (atomic_dec_and_test(&dev_priv->forcewake_count))
+       spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+       if (--dev_priv->forcewake_count == 0)
                dev_priv->display.force_wake_put(dev_priv);
+       spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
 }
 
 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
@@ -597,9 +600,36 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
 static int gen6_do_reset(struct drm_device *dev, u8 flags)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
+       int     ret;
+       unsigned long irqflags;
 
-       I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
-       return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+       /* Hold gt_lock across reset to prevent any register access
+        * with forcewake not set correctly
+        */
+       spin_lock_irqsave(&dev_priv->gt_lock, irqflags);
+
+       /* Reset the chip */
+
+       /* GEN6_GDRST is not in the gt power well, no need to check
+        * for fifo space for the write or forcewake the chip for
+        * the read
+        */
+       I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
+
+       /* Spin waiting for the device to ack the reset request */
+       ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+
+       /* If reset with a user forcewake, try to restore, otherwise turn it off */
+       if (dev_priv->forcewake_count)
+               dev_priv->display.force_wake_get(dev_priv);
+       else
+               dev_priv->display.force_wake_put(dev_priv);
+
+       /* Restore fifo count */
+       dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
+
+       spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags);
+       return ret;
 }
 
 /**
@@ -643,9 +673,6 @@ int i915_reset(struct drm_device *dev, u8 flags)
        case 7:
        case 6:
                ret = gen6_do_reset(dev, flags);
-               /* If reset with a user forcewake, try to restore */
-               if (atomic_read(&dev_priv->forcewake_count))
-                       __gen6_gt_force_wake_get(dev_priv);
                break;
        case 5:
                ret = ironlake_do_reset(dev, flags);
@@ -927,9 +954,14 @@ MODULE_LICENSE("GPL and additional rights");
 u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
        u##x val = 0; \
        if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-               gen6_gt_force_wake_get(dev_priv); \
+               unsigned long irqflags; \
+               spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \
+               if (dev_priv->forcewake_count == 0) \
+                       dev_priv->display.force_wake_get(dev_priv); \
                val = read##y(dev_priv->regs + reg); \
-               gen6_gt_force_wake_put(dev_priv); \
+               if (dev_priv->forcewake_count == 0) \
+                       dev_priv->display.force_wake_put(dev_priv); \
+               spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \
        } else { \
                val = read##y(dev_priv->regs + reg); \
        } \
index 602bc80baabb982814dcce3640e13b53a638f09e..9689ca38b2b333f26c75e95421e773b3331bd81c 100644 (file)
@@ -288,7 +288,13 @@ typedef struct drm_i915_private {
        int relative_constants_mode;
 
        void __iomem *regs;
-       u32 gt_fifo_count;
+       /** gt_fifo_count and the subsequent register write are synchronized
+        * with dev->struct_mutex. */
+       unsigned gt_fifo_count;
+       /** forcewake_count is protected by gt_lock */
+       unsigned forcewake_count;
+       /** gt_lock is also taken in irq contexts. */
+       struct spinlock gt_lock;
 
        struct intel_gmbus {
                struct i2c_adapter adapter;
@@ -741,8 +747,6 @@ typedef struct drm_i915_private {
 
        struct drm_property *broadcast_rgb_property;
        struct drm_property *force_audio_property;
-
-       atomic_t forcewake_count;
 } drm_i915_private_t;
 
 enum i915_cache_level {
index 5d433fc11ace138748907b17cbb3a9932732b46b..5bd4361ea84dd2e5e4a0e39a6af249ccd7786573 100644 (file)
@@ -1751,7 +1751,8 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
                INIT_WORK(&dev_priv->rps_work, gen6_pm_rps_work);
 
        I915_WRITE(HWSTAM, 0xeffe);
-       if (IS_GEN6(dev) || IS_GEN7(dev)) {
+
+       if (IS_GEN6(dev)) {
                /* Workaround stalls observed on Sandy Bridge GPUs by
                 * making the blitter command streamer generate a
                 * write to the Hardware Status Page for
index 7886e4fb60e3e23fb283461a690dbe43a928fc6e..2b5eb229ff2cc1c443d7f01039a4cc308201d211 100644 (file)
 #include "drm.h"
 #include "i915_drm.h"
 #include "intel_drv.h"
+#include "i915_reg.h"
 
 static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32     dpll_reg;
 
+       /* On IVB, 3rd pipe shares PLL with another one */
+       if (pipe > 1)
+               return false;
+
        if (HAS_PCH_SPLIT(dev))
-               dpll_reg = (pipe == PIPE_A) ? _PCH_DPLL_A : _PCH_DPLL_B;
+               dpll_reg = PCH_DPLL(pipe);
        else
                dpll_reg = (pipe == PIPE_A) ? _DPLL_A : _DPLL_B;
 
@@ -822,7 +827,7 @@ int i915_save_state(struct drm_device *dev)
 
        if (IS_IRONLAKE_M(dev))
                ironlake_disable_drps(dev);
-       if (IS_GEN6(dev))
+       if (INTEL_INFO(dev)->gen >= 6)
                gen6_disable_rps(dev);
 
        /* Cache mode state */
@@ -881,7 +886,7 @@ int i915_restore_state(struct drm_device *dev)
                intel_init_emon(dev);
        }
 
-       if (IS_GEN6(dev)) {
+       if (INTEL_INFO(dev)->gen >= 6) {
                gen6_enable_rps(dev_priv);
                gen6_update_ring_freq(dev_priv);
        }
index 8af3735e27c615506eb94a171bde35e7aebd36ef..dbda6e3bdf076697cee1d87616387c52d47efcdc 100644 (file)
@@ -467,8 +467,12 @@ struct edp_link_params {
 struct bdb_edp {
        struct edp_power_seq power_seqs[16];
        u32 color_depth;
-       u32 sdrrs_msa_timing_delay;
        struct edp_link_params link_params[16];
+       u32 sdrrs_msa_timing_delay;
+
+       /* ith bit indicates enabled/disabled for (i+1)th panel */
+       u16 edp_s3d_feature;
+       u16 edp_t3_optimization;
 } __attribute__ ((packed));
 
 void intel_setup_bios(struct drm_device *dev);
index fee0ad02c6d0f6563bc41a9fd91f6ebfdf076a18..dd729d46a61fb55a3caffb265cce9744cebd2e57 100644 (file)
@@ -24,6 +24,7 @@
  *     Eric Anholt <eric@anholt.net>
  */
 
+#include <linux/dmi.h>
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include "drmP.h"
@@ -540,6 +541,24 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
        .destroy = intel_encoder_destroy,
 };
 
+static int __init intel_no_crt_dmi_callback(const struct dmi_system_id *id)
+{
+       DRM_DEBUG_KMS("Skipping CRT initialization for %s\n", id->ident);
+       return 1;
+}
+
+static const struct dmi_system_id intel_no_crt[] = {
+       {
+               .callback = intel_no_crt_dmi_callback,
+               .ident = "ACER ZGB",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "ACER"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "ZGB"),
+               },
+       },
+       { }
+};
+
 void intel_crt_init(struct drm_device *dev)
 {
        struct drm_connector *connector;
@@ -547,6 +566,10 @@ void intel_crt_init(struct drm_device *dev)
        struct intel_connector *intel_connector;
        struct drm_i915_private *dev_priv = dev->dev_private;
 
+       /* Skip machines without VGA that falsely report hotplug events */
+       if (dmi_check_system(intel_no_crt))
+               return;
+
        crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
        if (!crt)
                return;
index 2a3f707caab8cc39b91e065071b60244da4d8ba6..b3b51c43dad09b7e51a35377d0fb351e09307b62 100644 (file)
@@ -5808,12 +5808,15 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
        if (is_lvds) {
                temp = I915_READ(PCH_LVDS);
                temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP;
-               if (HAS_PCH_CPT(dev))
+               if (HAS_PCH_CPT(dev)) {
+                       temp &= ~PORT_TRANS_SEL_MASK;
                        temp |= PORT_TRANS_SEL_CPT(pipe);
-               else if (pipe == 1)
-                       temp |= LVDS_PIPEB_SELECT;
-               else
-                       temp &= ~LVDS_PIPEB_SELECT;
+               } else {
+                       if (pipe == 1)
+                               temp |= LVDS_PIPEB_SELECT;
+                       else
+                               temp &= ~LVDS_PIPEB_SELECT;
+               }
 
                /* set the corresponsding LVDS_BORDER bit */
                temp |= dev_priv->lvds_border_bits;
@@ -9025,12 +9028,9 @@ void intel_modeset_init(struct drm_device *dev)
 
        for (i = 0; i < dev_priv->num_pipe; i++) {
                intel_crtc_init(dev, i);
-               if (HAS_PCH_SPLIT(dev)) {
-                       ret = intel_plane_init(dev, i);
-                       if (ret)
-                               DRM_ERROR("plane %d init failed: %d\n",
-                                         i, ret);
-               }
+               ret = intel_plane_init(dev, i);
+               if (ret)
+                       DRM_DEBUG_KMS("plane %d init failed: %d\n", i, ret);
        }
 
        /* Just disable it once at startup */
index e44191132ac4e97307029e44a50d65e7a96be53a..798f6e1aa544fc8f43883fdfe1632e48d4a7adcc 100644 (file)
@@ -708,6 +708,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
                },
        },
        {
+                .callback = intel_no_lvds_dmi_callback,
+                .ident = "Clientron E830",
+                .matches = {
+                        DMI_MATCH(DMI_SYS_VENDOR, "Clientron"),
+                        DMI_MATCH(DMI_PRODUCT_NAME, "E830"),
+                },
+        },
+        {
                .callback = intel_no_lvds_dmi_callback,
                .ident = "Asus EeeBox PC EB1007",
                .matches = {
index 77e729d4e4f02476b289aed344d344411c0eb2c1..1ab842c6032e949a37855a3995aa161d9f276977 100644 (file)
@@ -635,6 +635,19 @@ render_ring_add_request(struct intel_ring_buffer *ring,
        return 0;
 }
 
+static u32
+gen6_ring_get_seqno(struct intel_ring_buffer *ring)
+{
+       struct drm_device *dev = ring->dev;
+
+       /* Workaround to force correct ordering between irq and seqno writes on
+        * ivb (and maybe also on snb) by reading from a CS register (like
+        * ACTHD) before reading the status page. */
+       if (IS_GEN7(dev))
+               intel_ring_get_active_head(ring);
+       return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+}
+
 static u32
 ring_get_seqno(struct intel_ring_buffer *ring)
 {
@@ -791,17 +804,6 @@ ring_add_request(struct intel_ring_buffer *ring,
        return 0;
 }
 
-static bool
-gen7_blt_ring_get_irq(struct intel_ring_buffer *ring)
-{
-       /* The BLT ring on IVB appears to have broken synchronization
-        * between the seqno write and the interrupt, so that the
-        * interrupt appears first.  Returning false here makes
-        * i915_wait_request() do a polling loop, instead.
-        */
-       return false;
-}
-
 static bool
 gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
 {
@@ -811,6 +813,12 @@ gen6_ring_get_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
        if (!dev->irq_enabled)
               return false;
 
+       /* It looks like we need to prevent the gt from suspending while waiting
+        * for an notifiy irq, otherwise irqs seem to get lost on at least the
+        * blt/bsd rings on ivb. */
+       if (IS_GEN7(dev))
+               gen6_gt_force_wake_get(dev_priv);
+
        spin_lock(&ring->irq_lock);
        if (ring->irq_refcount++ == 0) {
                ring->irq_mask &= ~rflag;
@@ -835,6 +843,9 @@ gen6_ring_put_irq(struct intel_ring_buffer *ring, u32 gflag, u32 rflag)
                ironlake_disable_irq(dev_priv, gflag);
        }
        spin_unlock(&ring->irq_lock);
+
+       if (IS_GEN7(dev))
+               gen6_gt_force_wake_put(dev_priv);
 }
 
 static bool
@@ -1341,7 +1352,7 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
        .write_tail             = gen6_bsd_ring_write_tail,
        .flush                  = gen6_ring_flush,
        .add_request            = gen6_add_request,
-       .get_seqno              = ring_get_seqno,
+       .get_seqno              = gen6_ring_get_seqno,
        .irq_get                = gen6_bsd_ring_get_irq,
        .irq_put                = gen6_bsd_ring_put_irq,
        .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
@@ -1476,7 +1487,7 @@ static const struct intel_ring_buffer gen6_blt_ring = {
        .write_tail             = ring_write_tail,
        .flush                  = blt_ring_flush,
        .add_request            = gen6_add_request,
-       .get_seqno              = ring_get_seqno,
+       .get_seqno              = gen6_ring_get_seqno,
        .irq_get                = blt_ring_get_irq,
        .irq_put                = blt_ring_put_irq,
        .dispatch_execbuffer    = gen6_ring_dispatch_execbuffer,
@@ -1499,6 +1510,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
                ring->flush = gen6_render_ring_flush;
                ring->irq_get = gen6_render_ring_get_irq;
                ring->irq_put = gen6_render_ring_put_irq;
+               ring->get_seqno = gen6_ring_get_seqno;
        } else if (IS_GEN5(dev)) {
                ring->add_request = pc_render_add_request;
                ring->get_seqno = pc_render_get_seqno;
@@ -1577,8 +1589,5 @@ int intel_init_blt_ring_buffer(struct drm_device *dev)
 
        *ring = gen6_blt_ring;
 
-       if (IS_GEN7(dev))
-               ring->irq_get = gen7_blt_ring_get_irq;
-
        return intel_init_ring_buffer(dev, ring);
 }
index f7b9268df2666831795835c1f378a93cf1340379..e334ec33a47d4eb0cbd731c0c834de6c2102c458 100644 (file)
@@ -1066,15 +1066,13 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
 
        /* Set the SDVO control regs. */
        if (INTEL_INFO(dev)->gen >= 4) {
-               sdvox = 0;
+               /* The real mode polarity is set by the SDVO commands, using
+                * struct intel_sdvo_dtd. */
+               sdvox = SDVO_VSYNC_ACTIVE_HIGH | SDVO_HSYNC_ACTIVE_HIGH;
                if (intel_sdvo->is_hdmi)
                        sdvox |= intel_sdvo->color_range;
                if (INTEL_INFO(dev)->gen < 5)
                        sdvox |= SDVO_BORDER_ENABLE;
-               if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
-                       sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
-               if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
-                       sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
        } else {
                sdvox = I915_READ(intel_sdvo->sdvo_reg);
                switch (intel_sdvo->sdvo_reg) {
index d13989fda50101f99c72cd00b23268b91027081c..2288abf88cce4e3420bbedc379747480aa8843e8 100644 (file)
@@ -466,10 +466,8 @@ intel_update_plane(struct drm_plane *plane, struct drm_crtc *crtc,
        mutex_lock(&dev->struct_mutex);
 
        ret = intel_pin_and_fence_fb_obj(dev, obj, NULL);
-       if (ret) {
-               DRM_ERROR("failed to pin object\n");
+       if (ret)
                goto out_unlock;
-       }
 
        intel_plane->obj = obj;
 
@@ -632,10 +630,8 @@ intel_plane_init(struct drm_device *dev, enum pipe pipe)
        unsigned long possible_crtcs;
        int ret;
 
-       if (!(IS_GEN6(dev) || IS_GEN7(dev))) {
-               DRM_ERROR("new plane code only for SNB+\n");
+       if (!(IS_GEN6(dev) || IS_GEN7(dev)))
                return -ENODEV;
-       }
 
        intel_plane = kzalloc(sizeof(struct intel_plane), GFP_KERNEL);
        if (!intel_plane)
index f3c6a9a8b081ae8f06734af32535433db67f5d12..1571be37ce3e36b1089994a3454c4a23f8a733e6 100644 (file)
@@ -417,7 +417,7 @@ static const struct tv_mode tv_modes[] = {
        {
                .name           = "NTSC-M",
                .clock          = 108000,
-               .refresh        = 29970,
+               .refresh        = 59940,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
                /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 3.580MHz */
@@ -460,7 +460,7 @@ static const struct tv_mode tv_modes[] = {
        {
                .name           = "NTSC-443",
                .clock          = 108000,
-               .refresh        = 29970,
+               .refresh        = 59940,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
                /* 525 Lines, 60 Fields, 15.734KHz line, Sub-Carrier 4.43MHz */
@@ -502,7 +502,7 @@ static const struct tv_mode tv_modes[] = {
        {
                .name           = "NTSC-J",
                .clock          = 108000,
-               .refresh        = 29970,
+               .refresh        = 59940,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
 
@@ -545,7 +545,7 @@ static const struct tv_mode tv_modes[] = {
        {
                .name           = "PAL-M",
                .clock          = 108000,
-               .refresh        = 29970,
+               .refresh        = 59940,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
 
@@ -589,7 +589,7 @@ static const struct tv_mode tv_modes[] = {
                /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
                .name       = "PAL-N",
                .clock          = 108000,
-               .refresh        = 25000,
+               .refresh        = 50000,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
 
@@ -634,7 +634,7 @@ static const struct tv_mode tv_modes[] = {
                /* 625 Lines, 50 Fields, 15.625KHz line, Sub-Carrier 4.434MHz */
                .name       = "PAL",
                .clock          = 108000,
-               .refresh        = 25000,
+               .refresh        = 50000,
                .oversample     = TV_OVERSAMPLE_8X,
                .component_only = 0,
 
@@ -673,78 +673,6 @@ static const struct tv_mode tv_modes[] = {
 
                .filter_table = filter_table,
        },
-       {
-               .name       = "480p@59.94Hz",
-               .clock          = 107520,
-               .refresh        = 59940,
-               .oversample     = TV_OVERSAMPLE_4X,
-               .component_only = 1,
-
-               .hsync_end      = 64,               .hblank_end         = 122,
-               .hblank_start   = 842,              .htotal             = 857,
-
-               .progressive    = true,             .trilevel_sync = false,
-
-               .vsync_start_f1 = 12,               .vsync_start_f2     = 12,
-               .vsync_len      = 12,
-
-               .veq_ena        = false,
-
-               .vi_end_f1      = 44,               .vi_end_f2          = 44,
-               .nbr_end        = 479,
-
-               .burst_ena      = false,
-
-               .filter_table = filter_table,
-       },
-       {
-               .name       = "480p@60Hz",
-               .clock          = 107520,
-               .refresh        = 60000,
-               .oversample     = TV_OVERSAMPLE_4X,
-               .component_only = 1,
-
-               .hsync_end      = 64,               .hblank_end         = 122,
-               .hblank_start   = 842,              .htotal             = 856,
-
-               .progressive    = true,             .trilevel_sync = false,
-
-               .vsync_start_f1 = 12,               .vsync_start_f2     = 12,
-               .vsync_len      = 12,
-
-               .veq_ena        = false,
-
-               .vi_end_f1      = 44,               .vi_end_f2          = 44,
-               .nbr_end        = 479,
-
-               .burst_ena      = false,
-
-               .filter_table = filter_table,
-       },
-       {
-               .name       = "576p",
-               .clock          = 107520,
-               .refresh        = 50000,
-               .oversample     = TV_OVERSAMPLE_4X,
-               .component_only = 1,
-
-               .hsync_end      = 64,               .hblank_end         = 139,
-               .hblank_start   = 859,              .htotal             = 863,
-
-               .progressive    = true,         .trilevel_sync = false,
-
-               .vsync_start_f1 = 10,               .vsync_start_f2     = 10,
-               .vsync_len      = 10,
-
-               .veq_ena        = false,
-
-               .vi_end_f1      = 48,               .vi_end_f2          = 48,
-               .nbr_end        = 575,
-
-               .burst_ena      = false,
-
-               .filter_table = filter_table,
-       },
        {
                .name       = "720p@60Hz",
                .clock          = 148800,
@@ -769,30 +697,6 @@ static const struct tv_mode tv_modes[] = {
 
                .filter_table = filter_table,
        },
-       {
-               .name       = "720p@59.94Hz",
-               .clock          = 148800,
-               .refresh        = 59940,
-               .oversample     = TV_OVERSAMPLE_2X,
-               .component_only = 1,
-
-               .hsync_end      = 80,               .hblank_end         = 300,
-               .hblank_start   = 1580,             .htotal             = 1651,
-
-               .progressive    = true,             .trilevel_sync = true,
-
-               .vsync_start_f1 = 10,               .vsync_start_f2     = 10,
-               .vsync_len      = 10,
-
-               .veq_ena        = false,
-
-               .vi_end_f1      = 29,               .vi_end_f2          = 29,
-               .nbr_end        = 719,
-
-               .burst_ena      = false,
-
-               .filter_table = filter_table,
-       },
        {
                .name       = "720p@50Hz",
                .clock          = 148800,
@@ -821,7 +725,7 @@ static const struct tv_mode tv_modes[] = {
        {
                .name       = "1080i@50Hz",
                .clock          = 148800,
-               .refresh        = 25000,
+               .refresh        = 50000,
                .oversample     = TV_OVERSAMPLE_2X,
                .component_only = 1,
 
@@ -847,7 +751,7 @@ static const struct tv_mode tv_modes[] = {
        {
                .name       = "1080i@60Hz",
                .clock          = 148800,
-               .refresh        = 30000,
+               .refresh        = 60000,
                .oversample     = TV_OVERSAMPLE_2X,
                .component_only = 1,
 
@@ -868,32 +772,6 @@ static const struct tv_mode tv_modes[] = {
 
                .burst_ena      = false,
 
-               .filter_table = filter_table,
-       },
-       {
-               .name       = "1080i@59.94Hz",
-               .clock          = 148800,
-               .refresh        = 29970,
-               .oversample     = TV_OVERSAMPLE_2X,
-               .component_only = 1,
-
-               .hsync_end      = 88,               .hblank_end         = 235,
-               .hblank_start   = 2155,             .htotal             = 2201,
-
-               .progressive    = false,            .trilevel_sync = true,
-
-               .vsync_start_f1 = 4,            .vsync_start_f2    = 5,
-               .vsync_len      = 10,
-
-               .veq_ena        = true,             .veq_start_f1       = 4,
-               .veq_start_f2   = 4,            .veq_len          = 10,
-
-
-               .vi_end_f1      = 21,           .vi_end_f2        = 22,
-               .nbr_end        = 539,
-
-               .burst_ena      = false,
-
                .filter_table = filter_table,
        },
 };
index 1e382ad5a2b85b04fb6f8b0953fe793eee44f615..a37c31e358aa40eb9a6e618c9c59459b8233329b 100644 (file)
@@ -54,9 +54,10 @@ struct bit_entry {
 int bit_table(struct drm_device *, u8 id, struct bit_entry *);
 
 enum dcb_gpio_tag {
-       DCB_GPIO_TVDAC0 = 0xc,
+       DCB_GPIO_PANEL_POWER = 0x01,
+       DCB_GPIO_TVDAC0 = 0x0c,
        DCB_GPIO_TVDAC1 = 0x2d,
-       DCB_GPIO_PWM_FAN = 0x9,
+       DCB_GPIO_PWM_FAN = 0x09,
        DCB_GPIO_FAN_SENSE = 0x3d,
        DCB_GPIO_UNUSED = 0xff
 };
index 724b41a2b9e9414b27ef6c4ea347a116657c9026..ec54364ac828f1bd69c13e42cf50507db5e913f6 100644 (file)
@@ -812,6 +812,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
        struct nouveau_bo *nvbo = nouveau_bo(bo);
        struct nouveau_vma *vma;
 
+       /* ttm can now (stupidly) pass the driver bos it didn't create... */
+       if (bo->destroy != nouveau_bo_del_ttm)
+               return;
+
        list_for_each_entry(vma, &nvbo->vma_list, head) {
                if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
                        nouveau_vm_map(vma, new_mem->mm_node);
index 3cb52bc52b21101bbdf0a136b1cd4c2a639fb79e..795a9e3c990a0cba3599b5c754f33adf8e15da93 100644 (file)
@@ -219,6 +219,16 @@ nouveau_display_init(struct drm_device *dev)
        if (ret)
                return ret;
 
+       /* power on internal panel if it's not already.  the init tables of
+        * some vbios default this to off for some reason, causing the
+        * panel to not work after resume
+        */
+       if (nouveau_gpio_func_get(dev, DCB_GPIO_PANEL_POWER) == 0) {
+               nouveau_gpio_func_set(dev, DCB_GPIO_PANEL_POWER, true);
+               msleep(300);
+       }
+
+       /* enable polling for external displays */
        drm_kms_helper_poll_enable(dev);
 
        /* enable hotplug interrupts */
index e4a7cfe7898dc455fa4e423828a73ff162ee4ecc..81d7962e7252cd8f8a819e881fd5430e3f495ff1 100644 (file)
@@ -124,7 +124,7 @@ MODULE_PARM_DESC(ctxfw, "Use external HUB/GPC ucode (fermi)\n");
 int nouveau_ctxfw;
 module_param_named(ctxfw, nouveau_ctxfw, int, 0400);
 
-MODULE_PARM_DESC(ctxfw, "Santise DCB table according to MXM-SIS\n");
+MODULE_PARM_DESC(mxmdcb, "Santise DCB table according to MXM-SIS\n");
 int nouveau_mxmdcb = 1;
 module_param_named(mxmdcb, nouveau_mxmdcb, int, 0400);
 
index 5f0bc57fdaab5e14f1c3bc9f6e049f02c43a5069..7ce3fde4074312948e39c902fb4ca239c8e5a733 100644 (file)
@@ -379,6 +379,25 @@ retry:
        return 0;
 }
 
+static int
+validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo)
+{
+       struct nouveau_fence *fence = NULL;
+       int ret = 0;
+
+       spin_lock(&nvbo->bo.bdev->fence_lock);
+       if (nvbo->bo.sync_obj)
+               fence = nouveau_fence_ref(nvbo->bo.sync_obj);
+       spin_unlock(&nvbo->bo.bdev->fence_lock);
+
+       if (fence) {
+               ret = nouveau_fence_sync(fence, chan);
+               nouveau_fence_unref(&fence);
+       }
+
+       return ret;
+}
+
 static int
 validate_list(struct nouveau_channel *chan, struct list_head *list,
              struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr)
@@ -393,7 +412,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
        list_for_each_entry(nvbo, list, entry) {
                struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 
-               ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
+               ret = validate_sync(chan, nvbo);
                if (unlikely(ret)) {
                        NV_ERROR(dev, "fail pre-validate sync\n");
                        return ret;
@@ -416,7 +435,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
                        return ret;
                }
 
-               ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
+               ret = validate_sync(chan, nvbo);
                if (unlikely(ret)) {
                        NV_ERROR(dev, "fail post-validate sync\n");
                        return ret;
index 8bccddf4eff0011e9ad2e330b0ae8163270c2470..e5a64f0f4cb74538f41e121d19b459395be4ce05 100644 (file)
@@ -656,7 +656,16 @@ nouveau_mxm_init(struct drm_device *dev)
 
        if (mxm_shadow(dev, mxm[0])) {
                MXM_MSG(dev, "failed to locate valid SIS\n");
+#if 0
+               /* we should, perhaps, fall back to some kind of limited
+                * mode here if the x86 vbios hasn't already done the
+                * work for us (so we prevent loading with completely
+                * whacked vbios tables).
+                */
                return -EINVAL;
+#else
+               return 0;
+#endif
        }
 
        MXM_MSG(dev, "MXMS Version %d.%d\n",
index 03937212e9d81c97a3595e269cadc13c0f4eea97..ec5481dfcd82ed9ce3aaca764ac0471bfb9eb52f 100644 (file)
@@ -495,9 +495,9 @@ nv50_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
        struct drm_nouveau_private *dev_priv = dev->dev_private;
        struct nv50_pm_state *info;
        struct pll_lims pll;
-       int ret = -EINVAL;
+       int clk, ret = -EINVAL;
        int N, M, P1, P2;
-       u32 clk, out;
+       u32 out;
 
        if (dev_priv->chipset == 0xaa ||
            dev_priv->chipset == 0xac)
index 0fda830ef806eb6e5c9b346586a852e818609dd1..742f17f009a966868179d046dd17390950778b81 100644 (file)
@@ -355,15 +355,12 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
        atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 }
 
-static void atombios_disable_ss(struct drm_crtc *crtc)
+static void atombios_disable_ss(struct radeon_device *rdev, int pll_id)
 {
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-       struct drm_device *dev = crtc->dev;
-       struct radeon_device *rdev = dev->dev_private;
        u32 ss_cntl;
 
        if (ASIC_IS_DCE4(rdev)) {
-               switch (radeon_crtc->pll_id) {
+               switch (pll_id) {
                case ATOM_PPLL1:
                        ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
                        ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
@@ -379,7 +376,7 @@ static void atombios_disable_ss(struct drm_crtc *crtc)
                        return;
                }
        } else if (ASIC_IS_AVIVO(rdev)) {
-               switch (radeon_crtc->pll_id) {
+               switch (pll_id) {
                case ATOM_PPLL1:
                        ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
                        ss_cntl &= ~1;
@@ -406,13 +403,11 @@ union atom_enable_ss {
        ENABLE_SPREAD_SPECTRUM_ON_PPLL_V3 v3;
 };
 
-static void atombios_crtc_program_ss(struct drm_crtc *crtc,
+static void atombios_crtc_program_ss(struct radeon_device *rdev,
                                     int enable,
                                     int pll_id,
                                     struct radeon_atom_ss *ss)
 {
-       struct drm_device *dev = crtc->dev;
-       struct radeon_device *rdev = dev->dev_private;
        int index = GetIndexIntoMasterTable(COMMAND, EnableSpreadSpectrumOnPPLL);
        union atom_enable_ss args;
 
@@ -479,7 +474,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
        } else if (ASIC_IS_AVIVO(rdev)) {
                if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
                    (ss->type & ATOM_EXTERNAL_SS_MASK)) {
-                       atombios_disable_ss(crtc);
+                       atombios_disable_ss(rdev, pll_id);
                        return;
                }
                args.lvds_ss_2.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
@@ -491,7 +486,7 @@ static void atombios_crtc_program_ss(struct drm_crtc *crtc,
        } else {
                if ((enable == ATOM_DISABLE) || (ss->percentage == 0) ||
                    (ss->type & ATOM_EXTERNAL_SS_MASK)) {
-                       atombios_disable_ss(crtc);
+                       atombios_disable_ss(rdev, pll_id);
                        return;
                }
                args.lvds_ss.usSpreadSpectrumPercentage = cpu_to_le16(ss->percentage);
@@ -523,6 +518,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
        int encoder_mode = 0;
        u32 dp_clock = mode->clock;
        int bpc = 8;
+       bool is_duallink = false;
 
        /* reset the pll flags */
        pll->flags = 0;
@@ -557,6 +553,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                        if (connector && connector->display_info.bpc)
                                bpc = connector->display_info.bpc;
                        encoder_mode = atombios_get_encoder_mode(encoder);
+                       is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
                        if ((radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT | ATOM_DEVICE_DFP_SUPPORT)) ||
                            (radeon_encoder_get_dp_bridge_encoder_id(encoder) != ENCODER_OBJECT_ID_NONE)) {
                                if (connector) {
@@ -652,7 +649,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                                        if (dig->coherent_mode)
                                                args.v3.sInput.ucDispPllConfig |=
                                                        DISPPLL_CONFIG_COHERENT_MODE;
-                                       if (mode->clock > 165000)
+                                       if (is_duallink)
                                                args.v3.sInput.ucDispPllConfig |=
                                                        DISPPLL_CONFIG_DUAL_LINK;
                                }
@@ -702,11 +699,9 @@ union set_pixel_clock {
 /* on DCE5, make sure the voltage is high enough to support the
  * required disp clk.
  */
-static void atombios_crtc_set_dcpll(struct drm_crtc *crtc,
+static void atombios_crtc_set_dcpll(struct radeon_device *rdev,
                                    u32 dispclk)
 {
-       struct drm_device *dev = crtc->dev;
-       struct radeon_device *rdev = dev->dev_private;
        u8 frev, crev;
        int index;
        union set_pixel_clock args;
@@ -996,7 +991,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
                radeon_compute_pll_legacy(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
                                          &ref_div, &post_div);
 
-       atombios_crtc_program_ss(crtc, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
+       atombios_crtc_program_ss(rdev, ATOM_DISABLE, radeon_crtc->pll_id, &ss);
 
        atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
                                  encoder_mode, radeon_encoder->encoder_id, mode->clock,
@@ -1019,7 +1014,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
                        ss.step = step_size;
                }
 
-               atombios_crtc_program_ss(crtc, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
+               atombios_crtc_program_ss(rdev, ATOM_ENABLE, radeon_crtc->pll_id, &ss);
        }
 }
 
@@ -1189,7 +1184,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(EVERGREEN_GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
        WREG32(EVERGREEN_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
-              crtc->mode.vdisplay);
+              target_fb->height);
        x &= ~3;
        y &= ~1;
        WREG32(EVERGREEN_VIEWPORT_START + radeon_crtc->crtc_offset,
@@ -1358,7 +1353,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(AVIVO_D1GRPH_ENABLE + radeon_crtc->crtc_offset, 1);
 
        WREG32(AVIVO_D1MODE_DESKTOP_HEIGHT + radeon_crtc->crtc_offset,
-              crtc->mode.vdisplay);
+              target_fb->height);
        x &= ~3;
        y &= ~1;
        WREG32(AVIVO_D1MODE_VIEWPORT_START + radeon_crtc->crtc_offset,
@@ -1494,6 +1489,24 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
 
 }
 
+void radeon_atom_dcpll_init(struct radeon_device *rdev)
+{
+       /* always set DCPLL */
+       if (ASIC_IS_DCE4(rdev)) {
+               struct radeon_atom_ss ss;
+               bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
+                                                                  ASIC_INTERNAL_SS_ON_DCPLL,
+                                                                  rdev->clock.default_dispclk);
+               if (ss_enabled)
+                       atombios_crtc_program_ss(rdev, ATOM_DISABLE, ATOM_DCPLL, &ss);
+               /* XXX: DCE5, make sure voltage, dispclk is high enough */
+               atombios_crtc_set_dcpll(rdev, rdev->clock.default_dispclk);
+               if (ss_enabled)
+                       atombios_crtc_program_ss(rdev, ATOM_ENABLE, ATOM_DCPLL, &ss);
+       }
+
+}
+
 int atombios_crtc_mode_set(struct drm_crtc *crtc,
                           struct drm_display_mode *mode,
                           struct drm_display_mode *adjusted_mode,
@@ -1515,19 +1528,6 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
                }
        }
 
-       /* always set DCPLL */
-       if (ASIC_IS_DCE4(rdev)) {
-               struct radeon_atom_ss ss;
-               bool ss_enabled = radeon_atombios_get_asic_ss_info(rdev, &ss,
-                                                                  ASIC_INTERNAL_SS_ON_DCPLL,
-                                                                  rdev->clock.default_dispclk);
-               if (ss_enabled)
-                       atombios_crtc_program_ss(crtc, ATOM_DISABLE, ATOM_DCPLL, &ss);
-               /* XXX: DCE5, make sure voltage, dispclk is high enough */
-               atombios_crtc_set_dcpll(crtc, rdev->clock.default_dispclk);
-               if (ss_enabled)
-                       atombios_crtc_program_ss(crtc, ATOM_ENABLE, ATOM_DCPLL, &ss);
-       }
        atombios_crtc_set_pll(crtc, adjusted_mode);
 
        if (ASIC_IS_DCE4(rdev))
index 6fb335a4fddafee8bdf3bfc0bbe48e54df265106..552b436451fd49d54df3edb412a63734debad250 100644 (file)
@@ -549,8 +549,8 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
        return false;
 }
 
-static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
-                                    struct drm_connector *connector)
+int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+                            struct drm_connector *connector)
 {
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
@@ -558,28 +558,33 @@ static void radeon_dp_set_panel_mode(struct drm_encoder *encoder,
        int panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
 
        if (!ASIC_IS_DCE4(rdev))
-               return;
+               return panel_mode;
 
        if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
            ENCODER_OBJECT_ID_NUTMEG)
                panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
        else if (radeon_connector_encoder_get_dp_bridge_encoder_id(connector) ==
-                ENCODER_OBJECT_ID_TRAVIS)
-               panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
-       else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+                ENCODER_OBJECT_ID_TRAVIS) {
+               u8 id[6];
+               int i;
+               for (i = 0; i < 6; i++)
+                       id[i] = radeon_read_dpcd_reg(radeon_connector, 0x503 + i);
+               if (id[0] == 0x73 &&
+                   id[1] == 0x69 &&
+                   id[2] == 0x76 &&
+                   id[3] == 0x61 &&
+                   id[4] == 0x72 &&
+                   id[5] == 0x54)
+                       panel_mode = DP_PANEL_MODE_INTERNAL_DP1_MODE;
+               else
+                       panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
+       } else if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
                u8 tmp = radeon_read_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_CAP);
                if (tmp & 1)
                        panel_mode = DP_PANEL_MODE_INTERNAL_DP2_MODE;
        }
 
-       atombios_dig_encoder_setup(encoder,
-                                  ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
-                                  panel_mode);
-
-       if ((connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
-           (panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
-               radeon_write_dpcd_reg(radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
-       }
+       return panel_mode;
 }
 
 void radeon_dp_set_link_config(struct drm_connector *connector,
@@ -717,6 +722,8 @@ static void radeon_dp_set_tp(struct radeon_dp_link_train_info *dp_info, int tp)
 
 static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
 {
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(dp_info->encoder);
+       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
        u8 tmp;
 
        /* power up the sink */
@@ -732,7 +739,10 @@ static int radeon_dp_link_train_init(struct radeon_dp_link_train_info *dp_info)
                radeon_write_dpcd_reg(dp_info->radeon_connector,
                                      DP_DOWNSPREAD_CTRL, 0);
 
-       radeon_dp_set_panel_mode(dp_info->encoder, dp_info->connector);
+       if ((dp_info->connector->connector_type == DRM_MODE_CONNECTOR_eDP) &&
+           (dig->panel_mode == DP_PANEL_MODE_INTERNAL_DP2_MODE)) {
+               radeon_write_dpcd_reg(dp_info->radeon_connector, DP_EDP_CONFIGURATION_SET, 1);
+       }
 
        /* set the lane count on the sink */
        tmp = dp_info->dp_lane_count;
index f1f06ca9f1f533fc89179c5109a60ca9af4f5ebb..b88c4608731becef4b196d706960c84e3a58917c 100644 (file)
@@ -57,22 +57,6 @@ static inline bool radeon_encoder_is_digital(struct drm_encoder *encoder)
        }
 }
 
-static struct drm_connector *
-radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
-{
-       struct drm_device *dev = encoder->dev;
-       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-       struct drm_connector *connector;
-       struct radeon_connector *radeon_connector;
-
-       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
-               radeon_connector = to_radeon_connector(connector);
-               if (radeon_encoder->devices & radeon_connector->devices)
-                       return connector;
-       }
-       return NULL;
-}
-
 static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
                                   struct drm_display_mode *mode,
                                   struct drm_display_mode *adjusted_mode)
@@ -253,7 +237,7 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
                        /* R4xx, R5xx */
                        args.ext_tmds.sXTmdsEncoder.ucEnable = action;
 
-                       if (radeon_encoder->pixel_clock > 165000)
+                       if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                args.ext_tmds.sXTmdsEncoder.ucMisc |= PANEL_ENCODER_MISC_DUAL;
 
                        args.ext_tmds.sXTmdsEncoder.ucMisc |= ATOM_PANEL_MISC_888RGB;
@@ -265,7 +249,7 @@ atombios_dvo_setup(struct drm_encoder *encoder, int action)
                        /* DFP1, CRT1, TV1 depending on the type of port */
                        args.dvo.sDVOEncoder.ucDeviceType = ATOM_DEVICE_DFP1_INDEX;
 
-                       if (radeon_encoder->pixel_clock > 165000)
+                       if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                args.dvo.sDVOEncoder.usDevAttr.sDigAttrib.ucAttribute |= PANEL_ENCODER_MISC_DUAL;
                        break;
                case 3:
@@ -349,7 +333,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
                        } else {
                                if (dig->linkb)
                                        args.v1.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
-                               if (radeon_encoder->pixel_clock > 165000)
+                               if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                        args.v1.ucMisc |= PANEL_ENCODER_MISC_DUAL;
                                /*if (pScrn->rgbBits == 8) */
                                args.v1.ucMisc |= ATOM_PANEL_MISC_888RGB;
@@ -388,7 +372,7 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
                        } else {
                                if (dig->linkb)
                                        args.v2.ucMisc |= PANEL_ENCODER_MISC_TMDS_LINKB;
-                               if (radeon_encoder->pixel_clock > 165000)
+                               if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                        args.v2.ucMisc |= PANEL_ENCODER_MISC_DUAL;
                        }
                        break;
@@ -432,7 +416,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        switch (connector->connector_type) {
        case DRM_MODE_CONNECTOR_DVII:
        case DRM_MODE_CONNECTOR_HDMIB: /* HDMI-B is basically DL-DVI; analog works fine */
-               if (drm_detect_monitor_audio(radeon_connector->edid) &&
+               if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
                    radeon_audio)
                        return ATOM_ENCODER_MODE_HDMI;
                else if (radeon_connector->use_digital)
@@ -443,7 +427,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
        case DRM_MODE_CONNECTOR_DVID:
        case DRM_MODE_CONNECTOR_HDMIA:
        default:
-               if (drm_detect_monitor_audio(radeon_connector->edid) &&
+               if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
                    radeon_audio)
                        return ATOM_ENCODER_MODE_HDMI;
                else
@@ -457,7 +441,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
                if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
                    (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
                        return ATOM_ENCODER_MODE_DP;
-               else if (drm_detect_monitor_audio(radeon_connector->edid) &&
+               else if (drm_detect_hdmi_monitor(radeon_connector->edid) &&
                         radeon_audio)
                        return ATOM_ENCODER_MODE_HDMI;
                else
@@ -587,7 +571,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
 
                        if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
                                args.v1.ucLaneNum = dp_lane_count;
-                       else if (radeon_encoder->pixel_clock > 165000)
+                       else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                args.v1.ucLaneNum = 8;
                        else
                                args.v1.ucLaneNum = 4;
@@ -622,7 +606,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
 
                        if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
                                args.v3.ucLaneNum = dp_lane_count;
-                       else if (radeon_encoder->pixel_clock > 165000)
+                       else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                args.v3.ucLaneNum = 8;
                        else
                                args.v3.ucLaneNum = 4;
@@ -662,7 +646,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mo
 
                        if (ENCODER_MODE_IS_DP(args.v1.ucEncoderMode))
                                args.v4.ucLaneNum = dp_lane_count;
-                       else if (radeon_encoder->pixel_clock > 165000)
+                       else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                args.v4.ucLaneNum = 8;
                        else
                                args.v4.ucLaneNum = 4;
@@ -806,7 +790,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                                if (is_dp)
                                        args.v1.usPixelClock =
                                                cpu_to_le16(dp_clock / 10);
-                               else if (radeon_encoder->pixel_clock > 165000)
+                               else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                        args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
                                else
                                        args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
@@ -821,7 +805,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
 
                        if ((rdev->flags & RADEON_IS_IGP) &&
                            (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
-                               if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
+                               if (is_dp ||
+                                   !radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) {
                                        if (igp_lane_info & 0x1)
                                                args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
                                        else if (igp_lane_info & 0x2)
@@ -848,7 +833,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                        else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
                                if (dig->coherent_mode)
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
-                               if (radeon_encoder->pixel_clock > 165000)
+                               if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                        args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
                        }
                        break;
@@ -863,7 +848,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                                if (is_dp)
                                        args.v2.usPixelClock =
                                                cpu_to_le16(dp_clock / 10);
-                               else if (radeon_encoder->pixel_clock > 165000)
+                               else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                        args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
                                else
                                        args.v2.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
@@ -891,7 +876,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                        } else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
                                if (dig->coherent_mode)
                                        args.v2.acConfig.fCoherentMode = 1;
-                               if (radeon_encoder->pixel_clock > 165000)
+                               if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                        args.v2.acConfig.fDualLinkConnector = 1;
                        }
                        break;
@@ -906,7 +891,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                                if (is_dp)
                                        args.v3.usPixelClock =
                                                cpu_to_le16(dp_clock / 10);
-                               else if (radeon_encoder->pixel_clock > 165000)
+                               else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                        args.v3.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
                                else
                                        args.v3.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
@@ -914,7 +899,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
 
                        if (is_dp)
                                args.v3.ucLaneNum = dp_lane_count;
-                       else if (radeon_encoder->pixel_clock > 165000)
+                       else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                args.v3.ucLaneNum = 8;
                        else
                                args.v3.ucLaneNum = 4;
@@ -951,7 +936,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                        else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
                                if (dig->coherent_mode)
                                        args.v3.acConfig.fCoherentMode = 1;
-                               if (radeon_encoder->pixel_clock > 165000)
+                               if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                        args.v3.acConfig.fDualLinkConnector = 1;
                        }
                        break;
@@ -966,7 +951,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                                if (is_dp)
                                        args.v4.usPixelClock =
                                                cpu_to_le16(dp_clock / 10);
-                               else if (radeon_encoder->pixel_clock > 165000)
+                               else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                        args.v4.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
                                else
                                        args.v4.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
@@ -974,7 +959,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
 
                        if (is_dp)
                                args.v4.ucLaneNum = dp_lane_count;
-                       else if (radeon_encoder->pixel_clock > 165000)
+                       else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                args.v4.ucLaneNum = 8;
                        else
                                args.v4.ucLaneNum = 4;
@@ -1014,7 +999,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
                        else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
                                if (dig->coherent_mode)
                                        args.v4.acConfig.fCoherentMode = 1;
-                               if (radeon_encoder->pixel_clock > 165000)
+                               if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                        args.v4.acConfig.fDualLinkConnector = 1;
                        }
                        break;
@@ -1137,7 +1122,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
                                if (dp_clock == 270000)
                                        args.v1.sDigEncoder.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
                                args.v1.sDigEncoder.ucLaneNum = dp_lane_count;
-                       } else if (radeon_encoder->pixel_clock > 165000)
+                       } else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                args.v1.sDigEncoder.ucLaneNum = 8;
                        else
                                args.v1.sDigEncoder.ucLaneNum = 4;
@@ -1156,7 +1141,7 @@ atombios_external_encoder_setup(struct drm_encoder *encoder,
                                else if (dp_clock == 540000)
                                        args.v3.sExtEncoder.ucConfig |= EXTERNAL_ENCODER_CONFIG_V3_DPLINKRATE_5_40GHZ;
                                args.v3.sExtEncoder.ucLaneNum = dp_lane_count;
-                       } else if (radeon_encoder->pixel_clock > 165000)
+                       } else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
                                args.v3.sExtEncoder.ucLaneNum = 8;
                        else
                                args.v3.sExtEncoder.ucLaneNum = 4;
@@ -1341,7 +1326,8 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
        switch (mode) {
        case DRM_MODE_DPMS_ON:
                /* some early dce3.2 boards have a bug in their transmitter control table */
-               if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730))
+               if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
+                   ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
                else
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
@@ -1351,8 +1337,6 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
                                                             ATOM_TRANSMITTER_ACTION_POWER_ON);
                                radeon_dig_connector->edp_on = true;
                        }
-                       if (ASIC_IS_DCE4(rdev))
-                               atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
                        radeon_dp_link_train(encoder, connector);
                        if (ASIC_IS_DCE4(rdev))
                                atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON, 0);
@@ -1363,7 +1347,10 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
-               atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
+               else
+                       atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
                if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
                        if (ASIC_IS_DCE4(rdev))
                                atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF, 0);
@@ -1810,7 +1797,21 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
        case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
        case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
-               if (ASIC_IS_DCE4(rdev)) {
+               if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
+                       struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+                       struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+
+                       if (!connector)
+                               dig->panel_mode = DP_PANEL_MODE_EXTERNAL_DP_MODE;
+                       else
+                               dig->panel_mode = radeon_dp_get_panel_mode(encoder, connector);
+
+                       /* setup and enable the encoder */
+                       atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
+                       atombios_dig_encoder_setup(encoder,
+                                                  ATOM_ENCODER_CMD_SETUP_PANEL_MODE,
+                                                  dig->panel_mode);
+               } else if (ASIC_IS_DCE4(rdev)) {
                        /* disable the transmitter */
                        atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
                        /* setup and enable the encoder */
index 636660fca8c246f9e65205fcae1ca379e180131c..ae09fe82afbc76702504a0ac74eda5eed7e05917 100644 (file)
@@ -1455,6 +1455,7 @@ int evergreen_cp_resume(struct radeon_device *rdev)
 #endif
        WREG32(CP_RB_CNTL, tmp);
        WREG32(CP_SEM_WAIT_TIMER, 0x0);
+       WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
 
        /* Set the write pointer delay */
        WREG32(CP_RB_WPTR_DELAY, 0);
index b502216d42afdc4638362bceec578adfdf998720..74713d42df296d32db965c7398d47bc8edc566c4 100644 (file)
 #define        CP_RB_WPTR_ADDR_HI                              0xC11C
 #define        CP_RB_WPTR_DELAY                                0x8704
 #define        CP_SEM_WAIT_TIMER                               0x85BC
+#define        CP_SEM_INCOMPLETE_TIMER_CNTL                    0x85C8
 #define        CP_DEBUG                                        0xC1FC
 
 
index 32113729540069f22ce31ffa18e23c7ccc290159..db09065e68fd60ee488da280199a382c5b19efb8 100644 (file)
@@ -1219,6 +1219,7 @@ int cayman_cp_resume(struct radeon_device *rdev)
        RREG32(GRBM_SOFT_RESET);
 
        WREG32(CP_SEM_WAIT_TIMER, 0x0);
+       WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL, 0x0);
 
        /* Set the write pointer delay */
        WREG32(CP_RB_WPTR_DELAY, 0);
index f9df2a645e79caeeab62396e71222b89026e6f8e..9a7f3b6e02de38acc1655b9e74fae3d5dac91448 100644 (file)
 #define        SCRATCH_UMSK                                    0x8540
 #define        SCRATCH_ADDR                                    0x8544
 #define        CP_SEM_WAIT_TIMER                               0x85BC
+#define        CP_SEM_INCOMPLETE_TIMER_CNTL                    0x85C8
 #define        CP_COHER_CNTL2                                  0x85E8
 #define CP_ME_CNTL                                     0x86D8
 #define                CP_ME_HALT                                      (1 << 28)
index d996f43811302fedd6f6a48169d1858b61d11df2..accc032c103fd3f846f2b57e6da4de8c693c5d1e 100644 (file)
@@ -468,27 +468,42 @@ set_default_state(struct radeon_device *rdev)
        radeon_ring_write(ring, sq_stack_resource_mgmt_2);
 }
 
+#define I2F_MAX_BITS 15
+#define I2F_MAX_INPUT  ((1 << I2F_MAX_BITS) - 1)
+#define I2F_SHIFT (24 - I2F_MAX_BITS)
+
+/*
+ * Converts unsigned integer into 32-bit IEEE floating point representation.
+ * Conversion is not universal and only works for the range from 0
+ * to 2^I2F_MAX_BITS-1. Currently we only use it with inputs between
+ * 0 and 16384 (inclusive), so I2F_MAX_BITS=15 is enough. If necessary,
+ * I2F_MAX_BITS can be increased, but that will add to the loop iterations
+ * and slow us down. Conversion is done by shifting the input and counting
+ * down until the first 1 reaches bit position 23. The resulting counter
+ * and the shifted input are, respectively, the exponent and the fraction.
+ * The sign is always zero.
+ */
 static uint32_t i2f(uint32_t input)
 {
        u32 result, i, exponent, fraction;
 
-       if ((input & 0x3fff) == 0)
-               result = 0; /* 0 is a special case */
+       WARN_ON_ONCE(input > I2F_MAX_INPUT);
+
+       if ((input & I2F_MAX_INPUT) == 0)
+               result = 0;
        else {
-               exponent = 140; /* exponent biased by 127; */
-               fraction = (input & 0x3fff) << 10; /* cheat and only
-                                                     handle numbers below 2^^15 */
-               for (i = 0; i < 14; i++) {
+               exponent = 126 + I2F_MAX_BITS;
+               fraction = (input & I2F_MAX_INPUT) << I2F_SHIFT;
+
+               for (i = 0; i < I2F_MAX_BITS; i++) {
                        if (fraction & 0x800000)
                                break;
                        else {
-                               fraction = fraction << 1; /* keep
-                                                            shifting left until top bit = 1 */
+                               fraction = fraction << 1;
                                exponent = exponent - 1;
                        }
                }
-               result = exponent << 23 | (fraction & 0x7fffff); /* mask
-                                                                   off top bit; assumed 1 */
+               result = exponent << 23 | (fraction & 0x7fffff);
        }
        return result;
 }
index 73e05cb85eca03cd2c65461ce1af7ca3f82e0bfc..1668ec1ee77047d945bb8cb952a523d51f8bd452 100644 (file)
@@ -156,6 +156,47 @@ static inline int radeon_atrm_get_bios_chunk(uint8_t *bios, int offset, int len)
 bool radeon_get_bios(struct radeon_device *rdev);
 
 
+/*
+ * Mutex which allows recursive locking from the same process.
+ */
+struct radeon_mutex {
+       struct mutex            mutex;
+       struct task_struct      *owner;
+       int                     level;
+};
+
+static inline void radeon_mutex_init(struct radeon_mutex *mutex)
+{
+       mutex_init(&mutex->mutex);
+       mutex->owner = NULL;
+       mutex->level = 0;
+}
+
+static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
+{
+       if (mutex_trylock(&mutex->mutex)) {
+               /* The mutex was unlocked before, so it's ours now */
+               mutex->owner = current;
+       } else if (mutex->owner != current) {
+               /* Another process locked the mutex, take it */
+               mutex_lock(&mutex->mutex);
+               mutex->owner = current;
+       }
+       /* Otherwise the mutex was already locked by this process */
+
+       mutex->level++;
+}
+
+static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
+{
+       if (--mutex->level > 0)
+               return;
+
+       mutex->owner = NULL;
+       mutex_unlock(&mutex->mutex);
+}
+
+
 /*
  * Dummy page
  */
@@ -598,7 +639,7 @@ struct radeon_ib {
  * mutex protects scheduled_ibs, ready, alloc_bm
  */
 struct radeon_ib_pool {
-       struct mutex                    mutex;
+       struct radeon_mutex             mutex;
        struct radeon_sa_manager        sa_manager;
        struct radeon_ib                ibs[RADEON_IB_POOL_SIZE];
        bool                            ready;
@@ -1354,47 +1395,6 @@ struct r600_vram_scratch {
 };
 
 
-/*
- * Mutex which allows recursive locking from the same process.
- */
-struct radeon_mutex {
-       struct mutex            mutex;
-       struct task_struct      *owner;
-       int                     level;
-};
-
-static inline void radeon_mutex_init(struct radeon_mutex *mutex)
-{
-       mutex_init(&mutex->mutex);
-       mutex->owner = NULL;
-       mutex->level = 0;
-}
-
-static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
-{
-       if (mutex_trylock(&mutex->mutex)) {
-               /* The mutex was unlocked before, so it's ours now */
-               mutex->owner = current;
-       } else if (mutex->owner != current) {
-               /* Another process locked the mutex, take it */
-               mutex_lock(&mutex->mutex);
-               mutex->owner = current;
-       }
-       /* Otherwise the mutex was already locked by this process */
-
-       mutex->level++;
-}
-
-static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
-{
-       if (--mutex->level > 0)
-               return;
-
-       mutex->owner = NULL;
-       mutex_unlock(&mutex->mutex);
-}
-
-
 /*
  * Core structure, functions and helpers.
  */
index 9d95792bea3eab3c961b1221918beb1e4378d2f9..98724fcb00885e8a6ba3e8e88937b209e545a8e8 100644 (file)
@@ -58,7 +58,8 @@ static int radeon_atrm_call(acpi_handle atrm_handle, uint8_t *bios,
        }
 
        obj = (union acpi_object *)buffer.pointer;
-       memcpy(bios+offset, obj->buffer.pointer, len);
+       memcpy(bios+offset, obj->buffer.pointer, obj->buffer.length);
+       len = obj->buffer.length;
        kfree(buffer.pointer);
        return len;
 }
index 229a20f10e2b0c548b02cd5df527b9b72d6fe31a..501f4881e5aab4d1f1dcb9b57b4ba1a61743d9a7 100644 (file)
@@ -120,7 +120,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
                ret = radeon_atrm_get_bios_chunk(rdev->bios,
                                                 (i * ATRM_BIOS_PAGE),
                                                 ATRM_BIOS_PAGE);
-               if (ret <= 0)
+               if (ret < ATRM_BIOS_PAGE)
                        break;
        }
 
index 0afb13bd8dcad47ef5572991fec27e126185416d..49f7cb7e226b893a4a95a068b25179a30f93c892 100644 (file)
@@ -720,7 +720,7 @@ int radeon_device_init(struct radeon_device *rdev,
        /* mutex initialization are all done here so we
         * can recall function without having locking issues */
        radeon_mutex_init(&rdev->cs_mutex);
-       mutex_init(&rdev->ib_pool.mutex);
+       radeon_mutex_init(&rdev->ib_pool.mutex);
        for (i = 0; i < RADEON_NUM_RINGS; ++i)
                mutex_init(&rdev->ring[i].mutex);
        mutex_init(&rdev->dc_hw_i2c_mutex);
@@ -883,6 +883,8 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
        if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
                return 0;
 
+       drm_kms_helper_poll_disable(dev);
+
        /* turn off display hw */
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
@@ -959,9 +961,11 @@ int radeon_resume_kms(struct drm_device *dev)
        radeon_fbdev_set_suspend(rdev, 0);
        console_unlock();
 
-       /* init dig PHYs */
-       if (rdev->is_atom_bios)
+       /* init dig PHYs, disp eng pll */
+       if (rdev->is_atom_bios) {
                radeon_atom_encoder_init(rdev);
+               radeon_atom_dcpll_init(rdev);
+       }
        /* reset hpd state */
        radeon_hpd_init(rdev);
        /* blat the mode back in */
@@ -970,6 +974,8 @@ int radeon_resume_kms(struct drm_device *dev)
        list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
                drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
        }
+
+       drm_kms_helper_poll_enable(dev);
        return 0;
 }
 
index d3ffc18774a611df74a66ea136fbd12906eeaa63..8c49fef1ce78d01765f0565b517f1f9bc23a5a1c 100644 (file)
@@ -1305,9 +1305,11 @@ int radeon_modeset_init(struct radeon_device *rdev)
                return ret;
        }
 
-       /* init dig PHYs */
-       if (rdev->is_atom_bios)
+       /* init dig PHYs, disp eng pll */
+       if (rdev->is_atom_bios) {
                radeon_atom_encoder_init(rdev);
+               radeon_atom_dcpll_init(rdev);
+       }
 
        /* initialize hpd */
        radeon_hpd_init(rdev);
index 4b27efa4405b94b63011b2e8948d678c35ccfd62..9419c51bcf50f0f98f86cfd8122730ceea70a4d6 100644 (file)
@@ -202,6 +202,22 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
        return NULL;
 }
 
+struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+       struct drm_connector *connector;
+       struct radeon_connector *radeon_connector;
+
+       list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+               radeon_connector = to_radeon_connector(connector);
+               if (radeon_encoder->devices & radeon_connector->devices)
+                       return connector;
+       }
+       return NULL;
+}
+
 struct drm_encoder *radeon_get_external_encoder(struct drm_encoder *encoder)
 {
        struct drm_device *dev = encoder->dev;
@@ -288,3 +304,64 @@ void radeon_panel_mode_fixup(struct drm_encoder *encoder,
 
 }
 
+bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
+                                   u32 pixel_clock)
+{
+       struct drm_device *dev = encoder->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct drm_connector *connector;
+       struct radeon_connector *radeon_connector;
+       struct radeon_connector_atom_dig *dig_connector;
+
+       connector = radeon_get_connector_for_encoder(encoder);
+       /* if we don't have an active device yet, just use one of
+        * the connectors tied to the encoder.
+        */
+       if (!connector)
+               connector = radeon_get_connector_for_encoder_init(encoder);
+       radeon_connector = to_radeon_connector(connector);
+
+       switch (connector->connector_type) {
+       case DRM_MODE_CONNECTOR_DVII:
+       case DRM_MODE_CONNECTOR_HDMIB:
+               if (radeon_connector->use_digital) {
+                       /* HDMI 1.3 supports up to 340 Mhz over single link */
+                       if (ASIC_IS_DCE3(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+                               if (pixel_clock > 340000)
+                                       return true;
+                               else
+                                       return false;
+                       } else {
+                               if (pixel_clock > 165000)
+                                       return true;
+                               else
+                                       return false;
+                       }
+               } else
+                       return false;
+       case DRM_MODE_CONNECTOR_DVID:
+       case DRM_MODE_CONNECTOR_HDMIA:
+       case DRM_MODE_CONNECTOR_DisplayPort:
+               dig_connector = radeon_connector->con_priv;
+               if ((dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+                   (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+                       return false;
+               else {
+                       /* HDMI 1.3 supports up to 340 Mhz over single link */
+                       if (ASIC_IS_DCE3(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+                               if (pixel_clock > 340000)
+                                       return true;
+                               else
+                                       return false;
+                       } else {
+                               if (pixel_clock > 165000)
+                                       return true;
+                               else
+                                       return false;
+                       }
+               }
+       default:
+               return false;
+       }
+}
+
index 7bb1b079f4806f6d70414809e43e060c1bd1cec2..98a8ad680109efc5fdb7b246d4a79a68839f383d 100644 (file)
@@ -897,6 +897,7 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
        i2c->rec = *rec;
        i2c->adapter.owner = THIS_MODULE;
        i2c->adapter.class = I2C_CLASS_DDC;
+       i2c->adapter.dev.parent = &dev->pdev->dev;
        i2c->dev = dev;
        i2c_set_adapdata(&i2c->adapter, i2c);
        if (rec->mm_i2c ||
@@ -957,6 +958,7 @@ struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
        i2c->rec = *rec;
        i2c->adapter.owner = THIS_MODULE;
        i2c->adapter.class = I2C_CLASS_DDC;
+       i2c->adapter.dev.parent = &dev->pdev->dev;
        i2c->dev = dev;
        snprintf(i2c->adapter.name, sizeof(i2c->adapter.name),
                 "Radeon aux bus %s", name);
index be38921bf761a570229afad2109b5aeb32127f29..66d5fe1c81747cfa73da445d1f2099d36e9e4261 100644 (file)
@@ -135,6 +135,12 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
            (rdev->pdev->subsystem_device == 0x30c2))
                return true;
 
+       /* Dell RS690 only seems to work with MSIs. */
+       if ((rdev->pdev->device == 0x791f) &&
+           (rdev->pdev->subsystem_vendor == 0x1028) &&
+           (rdev->pdev->subsystem_device == 0x01fc))
+               return true;
+
        /* Dell RS690 only seems to work with MSIs. */
        if ((rdev->pdev->device == 0x791f) &&
            (rdev->pdev->subsystem_vendor == 0x1028) &&
index 08ff857c8fd6609e9530b742470c743bde1e5c73..4330e3253573ffeb92a5dab9f39223e074fc65ca 100644 (file)
@@ -362,6 +362,7 @@ struct radeon_encoder_atom_dig {
        struct backlight_device *bl_dev;
        int dpms_mode;
        uint8_t backlight_level;
+       int panel_mode;
 };
 
 struct radeon_encoder_atom_dac {
@@ -466,6 +467,10 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev);
 
 extern struct drm_connector *
 radeon_get_connector_for_encoder(struct drm_encoder *encoder);
+extern struct drm_connector *
+radeon_get_connector_for_encoder_init(struct drm_encoder *encoder);
+extern bool radeon_dig_monitor_is_duallink(struct drm_encoder *encoder,
+                                   u32 pixel_clock);
 
 extern u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder);
 extern u16 radeon_connector_encoder_get_dp_bridge_encoder_id(struct drm_connector *connector);
@@ -482,8 +487,11 @@ extern void radeon_dp_link_train(struct drm_encoder *encoder,
 extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
 extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
 extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern int radeon_dp_get_panel_mode(struct drm_encoder *encoder,
+                                   struct drm_connector *connector);
 extern void atombios_dig_encoder_setup(struct drm_encoder *encoder, int action, int panel_mode);
 extern void radeon_atom_encoder_init(struct radeon_device *rdev);
+extern void radeon_atom_dcpll_init(struct radeon_device *rdev);
 extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
                                           int action, uint8_t lane_num,
                                           uint8_t lane_set);
index e8bc70933d1b342a9bd3b9d04bbcbfc14c5438dc..30a4c5014c8b3f6a19f2e5e6d3102c488ac013e2 100644 (file)
@@ -109,12 +109,12 @@ int radeon_ib_get(struct radeon_device *rdev, int ring,
                return r;
        }
 
-       mutex_lock(&rdev->ib_pool.mutex);
+       radeon_mutex_lock(&rdev->ib_pool.mutex);
        idx = rdev->ib_pool.head_id;
 retry:
        if (cretry > 5) {
                dev_err(rdev->dev, "failed to get an ib after 5 retry\n");
-               mutex_unlock(&rdev->ib_pool.mutex);
+               radeon_mutex_unlock(&rdev->ib_pool.mutex);
                radeon_fence_unref(&fence);
                return -ENOMEM;
        }
@@ -139,7 +139,7 @@ retry:
                                 */
                                rdev->ib_pool.head_id = (1 + idx);
                                rdev->ib_pool.head_id &= (RADEON_IB_POOL_SIZE - 1);
-                               mutex_unlock(&rdev->ib_pool.mutex);
+                               radeon_mutex_unlock(&rdev->ib_pool.mutex);
                                return 0;
                        }
                }
@@ -158,7 +158,7 @@ retry:
                }
                idx = (idx + 1) & (RADEON_IB_POOL_SIZE - 1);
        }
-       mutex_unlock(&rdev->ib_pool.mutex);
+       radeon_mutex_unlock(&rdev->ib_pool.mutex);
        radeon_fence_unref(&fence);
        return r;
 }
@@ -171,12 +171,12 @@ void radeon_ib_free(struct radeon_device *rdev, struct radeon_ib **ib)
        if (tmp == NULL) {
                return;
        }
-       mutex_lock(&rdev->ib_pool.mutex);
+       radeon_mutex_lock(&rdev->ib_pool.mutex);
        if (tmp->fence && !tmp->fence->emitted) {
                radeon_sa_bo_free(rdev, &tmp->sa_bo);
                radeon_fence_unref(&tmp->fence);
        }
-       mutex_unlock(&rdev->ib_pool.mutex);
+       radeon_mutex_unlock(&rdev->ib_pool.mutex);
 }
 
 int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
@@ -204,22 +204,25 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib)
 
 int radeon_ib_pool_init(struct radeon_device *rdev)
 {
+       struct radeon_sa_manager tmp;
        int i, r;
 
-       mutex_lock(&rdev->ib_pool.mutex);
-       if (rdev->ib_pool.ready) {
-               mutex_unlock(&rdev->ib_pool.mutex);
-               return 0;
-       }
-
-       r = radeon_sa_bo_manager_init(rdev, &rdev->ib_pool.sa_manager,
+       r = radeon_sa_bo_manager_init(rdev, &tmp,
                                      RADEON_IB_POOL_SIZE*64*1024,
                                      RADEON_GEM_DOMAIN_GTT);
        if (r) {
-               mutex_unlock(&rdev->ib_pool.mutex);
                return r;
        }
 
+       radeon_mutex_lock(&rdev->ib_pool.mutex);
+       if (rdev->ib_pool.ready) {
+               radeon_mutex_unlock(&rdev->ib_pool.mutex);
+               radeon_sa_bo_manager_fini(rdev, &tmp);
+               return 0;
+       }
+
+       rdev->ib_pool.sa_manager = tmp;
+       INIT_LIST_HEAD(&rdev->ib_pool.sa_manager.sa_bo);
        for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
                rdev->ib_pool.ibs[i].fence = NULL;
                rdev->ib_pool.ibs[i].idx = i;
@@ -236,7 +239,7 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
        if (radeon_debugfs_ring_init(rdev)) {
                DRM_ERROR("Failed to register debugfs file for rings !\n");
        }
-       mutex_unlock(&rdev->ib_pool.mutex);
+       radeon_mutex_unlock(&rdev->ib_pool.mutex);
        return 0;
 }
 
@@ -244,7 +247,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
 {
        unsigned i;
 
-       mutex_lock(&rdev->ib_pool.mutex);
+       radeon_mutex_lock(&rdev->ib_pool.mutex);
        if (rdev->ib_pool.ready) {
                for (i = 0; i < RADEON_IB_POOL_SIZE; i++) {
                        radeon_sa_bo_free(rdev, &rdev->ib_pool.ibs[i].sa_bo);
@@ -253,7 +256,7 @@ void radeon_ib_pool_fini(struct radeon_device *rdev)
                radeon_sa_bo_manager_fini(rdev, &rdev->ib_pool.sa_manager);
                rdev->ib_pool.ready = false;
        }
-       mutex_unlock(&rdev->ib_pool.mutex);
+       radeon_mutex_unlock(&rdev->ib_pool.mutex);
 }
 
 int radeon_ib_pool_start(struct radeon_device *rdev)
index 06da063ece2e59681f3c51110006e13e7e58ae0f..573220cc5269fe48d6f0a5fb972c33c5731bb1af 100644 (file)
@@ -40,7 +40,6 @@ static struct pci_device_id pciidlist[] = {
 static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
 {
        drm_sis_private_t *dev_priv;
-       int ret;
 
        dev_priv = kzalloc(sizeof(drm_sis_private_t), GFP_KERNEL);
        if (dev_priv == NULL)
@@ -50,7 +49,7 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
        dev_priv->chipset = chipset;
        idr_init(&dev->object_name_idr);
 
-       return ret;
+       return 0;
 }
 
 static int sis_driver_unload(struct drm_device *dev)
index 2f0eab66ece6c1eb9eeffaae72a517727a0257ee..7c3a57de8187c603628758c8650448f889de550f 100644 (file)
@@ -404,6 +404,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
                }
        }
 
+       if (bdev->driver->move_notify)
+               bdev->driver->move_notify(bo, mem);
+
        if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
            !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
                ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
@@ -413,11 +416,17 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
        else
                ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
 
-       if (ret)
-               goto out_err;
+       if (ret) {
+               if (bdev->driver->move_notify) {
+                       struct ttm_mem_reg tmp_mem = *mem;
+                       *mem = bo->mem;
+                       bo->mem = tmp_mem;
+                       bdev->driver->move_notify(bo, mem);
+                       bo->mem = *mem;
+               }
 
-       if (bdev->driver->move_notify)
-               bdev->driver->move_notify(bo, mem);
+               goto out_err;
+       }
 
 moved:
        if (bo->evicted) {
index 0af6ebdf205d821c6b68c825ea7db1ada77dff42..b66ef0e3cde14a2f98fa8d2a1245f41ca83c3268 100644 (file)
@@ -378,7 +378,7 @@ int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
                                  unsigned int *handle)
 {
        if (handle)
-               handle = 0;
+               *handle = 0;
 
        return 0;
 }
index 0c33ae9cf0f0e27549f3fd505e242dbb98b18be2..406632472c1bb038cdf06dae37c7f115f479e94a 100644 (file)
@@ -548,6 +548,7 @@ static int mousevsc_remove(struct hv_device *dev)
        struct mousevsc_dev *input_dev = hv_get_drvdata(dev);
 
        vmbus_close(dev->channel);
+       hid_hw_stop(input_dev->hid_device);
        hid_destroy_device(input_dev->hid_device);
        mousevsc_free_device(input_dev);
 
index b47e58b52d9fbd1a28ef00d2d9392f8e44fe4d0f..acab74cde72730dd8660c6d7a8d64a8d7ba75451 100644 (file)
@@ -531,7 +531,6 @@ static int wacom_probe(struct hid_device *hdev,
        wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
        wdata->battery.use_for_apm = 0;
 
-       power_supply_powers(&wdata->battery, &hdev->dev);
 
        ret = power_supply_register(&hdev->dev, &wdata->battery);
        if (ret) {
@@ -540,6 +539,8 @@ static int wacom_probe(struct hid_device *hdev,
                goto err_battery;
        }
 
+       power_supply_powers(&wdata->battery, &hdev->dev);
+
        wdata->ac.properties = wacom_ac_props;
        wdata->ac.num_properties = ARRAY_SIZE(wacom_ac_props);
        wdata->ac.get_property = wacom_ac_get_property;
@@ -547,14 +548,14 @@ static int wacom_probe(struct hid_device *hdev,
        wdata->ac.type = POWER_SUPPLY_TYPE_MAINS;
        wdata->ac.use_for_apm = 0;
 
-       power_supply_powers(&wdata->battery, &hdev->dev);
-
        ret = power_supply_register(&hdev->dev, &wdata->ac);
        if (ret) {
                hid_warn(hdev,
                         "can't create ac battery attribute, err: %d\n", ret);
                goto err_ac;
        }
+
+       power_supply_powers(&wdata->ac, &hdev->dev);
 #endif
        return 0;
 
index fc253b472f9d4033922693aa980d947210b1d4c2..cac3589b1ed5ac192455b804db0ea321d603ec8a 100644 (file)
@@ -1226,14 +1226,14 @@ static int wiimote_hid_probe(struct hid_device *hdev,
        wdata->battery.type = POWER_SUPPLY_TYPE_BATTERY;
        wdata->battery.use_for_apm = 0;
 
-       power_supply_powers(&wdata->battery, &hdev->dev);
-
        ret = power_supply_register(&wdata->hdev->dev, &wdata->battery);
        if (ret) {
                hid_err(hdev, "Cannot register battery device\n");
                goto err_battery;
        }
 
+       power_supply_powers(&wdata->battery, &hdev->dev);
+
        ret = wiimote_leds_create(wdata);
        if (ret)
                goto err_free;
index 7c297d305d5dd42f7399eadb33e624fd9a6a0a69..b1ec0e2aeb57b0b26a66e17b067192cfc30689c1 100644 (file)
@@ -922,11 +922,11 @@ void hiddev_disconnect(struct hid_device *hid)
        struct hiddev *hiddev = hid->hiddev;
        struct usbhid_device *usbhid = hid->driver_data;
 
+       usb_deregister_dev(usbhid->intf, &hiddev_class);
+
        mutex_lock(&hiddev->existancelock);
        hiddev->exist = 0;
 
-       usb_deregister_dev(usbhid->intf, &hiddev_class);
-
        if (hiddev->open) {
                mutex_unlock(&hiddev->existancelock);
                usbhid_close(hiddev->hid);
index cb351d3583875a8ea1ebbb78ae681cb888e5dee3..02260406b9e440ac406ac3ae7fc3cded1f05bb4e 100644 (file)
@@ -474,8 +474,8 @@ config SENSORS_IT87
        select HWMON_VID
        help
          If you say yes here you get support for ITE IT8705F, IT8712F,
-         IT8716F, IT8718F, IT8720F, IT8721F, IT8726F and IT8758E sensor
-         chips, and the SiS960 clone.
+         IT8716F, IT8718F, IT8720F, IT8721F, IT8726F, IT8728F and IT8758E
+         sensor chips, and the SiS960 clone.
 
          This driver can also be built as a module.  If so, the module
          will be called it87.
@@ -515,11 +515,11 @@ config SENSORS_LINEAGE
          will be called lineage-pem.
 
 config SENSORS_LM63
-       tristate "National Semiconductor LM63 and LM64"
+       tristate "National Semiconductor LM63 and compatibles"
        depends on I2C
        help
          If you say yes here you get support for the National
-         Semiconductor LM63 and LM64 remote diode digital temperature
+         Semiconductor LM63, LM64, and LM96163 remote diode digital temperature
          sensors with integrated fan control.  Such chips are found
          on the Tyan S4882 (Thunder K8QS Pro) motherboard, among
          others.
index e6291dafa4caab89a36feb2cdc1a50abde70364c..97e2cfb0bc9365827bac1d40667954d18e4d0fc1 100644 (file)
@@ -155,7 +155,8 @@ adm1031_write_value(struct i2c_client *client, u8 reg, unsigned int value)
 #define TEMP_OFFSET_FROM_REG(val)      TEMP_FROM_REG((val) < 0 ? \
                                                      (val) | 0x70 : (val))
 
-#define FAN_FROM_REG(reg, div)         ((reg) ? (11250 * 60) / ((reg) * (div)) : 0)
+#define FAN_FROM_REG(reg, div)         ((reg) ? \
+                                        (11250 * 60) / ((reg) * (div)) : 0)
 
 static int FAN_TO_REG(int reg, int div)
 {
@@ -174,8 +175,8 @@ static int FAN_TO_REG(int reg, int div)
        (((reg) & 0x1F) | (((val) << 5) & 0xe0))
 
 #define AUTO_TEMP_MIN_TO_REG(val, reg) \
-       ((((val)/500) & 0xf8)|((reg) & 0x7))
-#define AUTO_TEMP_RANGE_FROM_REG(reg)  (5000 * (1<< ((reg)&0x7)))
+       ((((val) / 500) & 0xf8) | ((reg) & 0x7))
+#define AUTO_TEMP_RANGE_FROM_REG(reg)  (5000 * (1 << ((reg) & 0x7)))
 #define AUTO_TEMP_MIN_FROM_REG(reg)    (1000 * ((((reg) >> 3) & 0x1f) << 2))
 
 #define AUTO_TEMP_MIN_FROM_REG_DEG(reg)        ((((reg) >> 3) & 0x1f) << 2)
@@ -202,7 +203,7 @@ static int AUTO_TEMP_MAX_TO_REG(int val, int reg, int pwm)
 
 /* FAN auto control */
 #define GET_FAN_AUTO_BITFIELD(data, idx)       \
-       (*(data)->chan_select_table)[FAN_CHAN_FROM_REG((data)->conf1)][idx%2]
+       (*(data)->chan_select_table)[FAN_CHAN_FROM_REG((data)->conf1)][idx % 2]
 
 /* The tables below contains the possible values for the auto fan
  * control bitfields. the index in the table is the register value.
@@ -230,7 +231,7 @@ static const auto_chan_table_t auto_channel_select_table_adm1030 = {
  */
 static int
 get_fan_auto_nearest(struct adm1031_data *data,
-                    int chan, u8 val, u8 reg, u8 * new_reg)
+                    int chan, u8 val, u8 reg, u8 *new_reg)
 {
        int i;
        int first_match = -1, exact_match = -1;
@@ -258,13 +259,13 @@ get_fan_auto_nearest(struct adm1031_data *data,
                }
        }
 
-       if (exact_match >= 0) {
+       if (exact_match >= 0)
                *new_reg = exact_match;
-       } else if (first_match >= 0) {
+       else if (first_match >= 0)
                *new_reg = first_match;
-       } else {
+       else
                return -EINVAL;
-       }
+
        return 0;
 }
 
@@ -283,23 +284,28 @@ set_fan_auto_channel(struct device *dev, struct device_attribute *attr,
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
        int nr = to_sensor_dev_attr(attr)->index;
-       int val = simple_strtol(buf, NULL, 10);
+       long val;
        u8 reg;
        int ret;
        u8 old_fan_mode;
 
+       ret = kstrtol(buf, 10, &val);
+       if (ret)
+               return ret;
+
        old_fan_mode = data->conf1;
 
        mutex_lock(&data->update_lock);
 
-       if ((ret = get_fan_auto_nearest(data, nr, val, data->conf1, &reg))) {
+       ret = get_fan_auto_nearest(data, nr, val, data->conf1, &reg);
+       if (ret) {
                mutex_unlock(&data->update_lock);
                return ret;
        }
        data->conf1 = FAN_CHAN_TO_REG(reg, data->conf1);
        if ((data->conf1 & ADM1031_CONF1_AUTO_MODE) ^
            (old_fan_mode & ADM1031_CONF1_AUTO_MODE)) {
-               if (data->conf1 & ADM1031_CONF1_AUTO_MODE){
+               if (data->conf1 & ADM1031_CONF1_AUTO_MODE) {
                        /* Switch to Auto Fan Mode
                         * Save PWM registers
                         * Set PWM registers to 33% Both */
@@ -350,7 +356,12 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
        int nr = to_sensor_dev_attr(attr)->index;
-       int val = simple_strtol(buf, NULL, 10);
+       long val;
+       int ret;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret)
+               return ret;
 
        mutex_lock(&data->update_lock);
        data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]);
@@ -374,10 +385,16 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
        int nr = to_sensor_dev_attr(attr)->index;
-       int val = simple_strtol(buf, NULL, 10);
+       long val;
+       int ret;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret)
+               return ret;
 
        mutex_lock(&data->update_lock);
-       data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], data->pwm[nr]);
+       data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr],
+                                                 data->pwm[nr]);
        adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr),
                            data->temp_max[nr]);
        mutex_unlock(&data->update_lock);
@@ -410,8 +427,12 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
        int nr = to_sensor_dev_attr(attr)->index;
-       int val = simple_strtol(buf, NULL, 10);
-       int reg;
+       long val;
+       int ret, reg;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret)
+               return ret;
 
        mutex_lock(&data->update_lock);
        if ((data->conf1 & ADM1031_CONF1_AUTO_MODE) &&
@@ -449,9 +470,13 @@ static int trust_fan_readings(struct adm1031_data *data, int chan)
 
        if (data->conf1 & ADM1031_CONF1_AUTO_MODE) {
                switch (data->conf1 & 0x60) {
-               case 0x00:      /* remote temp1 controls fan1 remote temp2 controls fan2 */
+               case 0x00:
+                       /*
+                        * remote temp1 controls fan1,
+                        * remote temp2 controls fan2
+                        */
                        res = data->temp[chan+1] >=
-                             AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[chan+1]);
+                           AUTO_TEMP_MIN_FROM_REG_DEG(data->auto_temp[chan+1]);
                        break;
                case 0x20:      /* remote temp1 controls both fans */
                        res =
@@ -515,7 +540,12 @@ static ssize_t set_fan_min(struct device *dev, struct device_attribute *attr,
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
        int nr = to_sensor_dev_attr(attr)->index;
-       int val = simple_strtol(buf, NULL, 10);
+       long val;
+       int ret;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret)
+               return ret;
 
        mutex_lock(&data->update_lock);
        if (val) {
@@ -534,10 +564,15 @@ static ssize_t set_fan_div(struct device *dev, struct device_attribute *attr,
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
        int nr = to_sensor_dev_attr(attr)->index;
-       int val = simple_strtol(buf, NULL, 10);
+       long val;
        u8 tmp;
        int old_div;
        int new_min;
+       int ret;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret)
+               return ret;
 
        tmp = val == 8 ? 0xc0 :
              val == 4 ? 0x80 :
@@ -631,9 +666,13 @@ static ssize_t set_temp_offset(struct device *dev,
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
        int nr = to_sensor_dev_attr(attr)->index;
-       int val;
+       long val;
+       int ret;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret)
+               return ret;
 
-       val = simple_strtol(buf, NULL, 10);
        val = SENSORS_LIMIT(val, -15000, 15000);
        mutex_lock(&data->update_lock);
        data->temp_offset[nr] = TEMP_OFFSET_TO_REG(val);
@@ -648,9 +687,13 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
        int nr = to_sensor_dev_attr(attr)->index;
-       int val;
+       long val;
+       int ret;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret)
+               return ret;
 
-       val = simple_strtol(buf, NULL, 10);
        val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
        mutex_lock(&data->update_lock);
        data->temp_min[nr] = TEMP_TO_REG(val);
@@ -665,9 +708,13 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
        int nr = to_sensor_dev_attr(attr)->index;
-       int val;
+       long val;
+       int ret;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret)
+               return ret;
 
-       val = simple_strtol(buf, NULL, 10);
        val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
        mutex_lock(&data->update_lock);
        data->temp_max[nr] = TEMP_TO_REG(val);
@@ -682,9 +729,13 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
        struct i2c_client *client = to_i2c_client(dev);
        struct adm1031_data *data = i2c_get_clientdata(client);
        int nr = to_sensor_dev_attr(attr)->index;
-       int val;
+       long val;
+       int ret;
+
+       ret = kstrtol(buf, 10, &val);
+       if (ret)
+               return ret;
 
-       val = simple_strtol(buf, NULL, 10);
        val = SENSORS_LIMIT(val, -55000, nr == 0 ? 127750 : 127875);
        mutex_lock(&data->update_lock);
        data->temp_crit[nr] = TEMP_TO_REG(val);
@@ -711,7 +762,8 @@ temp_reg(2);
 temp_reg(3);
 
 /* Alarms */
-static ssize_t show_alarms(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_alarms(struct device *dev, struct device_attribute *attr,
+                          char *buf)
 {
        struct adm1031_data *data = adm1031_update_device(dev);
        return sprintf(buf, "%d\n", data->alarm);
@@ -919,12 +971,13 @@ static int adm1031_probe(struct i2c_client *client,
        adm1031_init_client(client);
 
        /* Register sysfs hooks */
-       if ((err = sysfs_create_group(&client->dev.kobj, &adm1031_group)))
+       err = sysfs_create_group(&client->dev.kobj, &adm1031_group);
+       if (err)
                goto exit_free;
 
        if (data->chip_type == adm1031) {
-               if ((err = sysfs_create_group(&client->dev.kobj,
-                                               &adm1031_group_opt)))
+               err = sysfs_create_group(&client->dev.kobj, &adm1031_group_opt);
+               if (err)
                        goto exit_remove;
        }
 
@@ -970,14 +1023,13 @@ static void adm1031_init_client(struct i2c_client *client)
        }
        /* Initialize the ADM1031 chip (enables fan speed reading ) */
        read_val = adm1031_read_value(client, ADM1031_REG_CONF2);
-       if ((read_val | mask) != read_val) {
-           adm1031_write_value(client, ADM1031_REG_CONF2, read_val | mask);
-       }
+       if ((read_val | mask) != read_val)
+               adm1031_write_value(client, ADM1031_REG_CONF2, read_val | mask);
 
        read_val = adm1031_read_value(client, ADM1031_REG_CONF1);
        if ((read_val | ADM1031_CONF1_MONITOR_ENABLE) != read_val) {
-           adm1031_write_value(client, ADM1031_REG_CONF1, read_val |
-                               ADM1031_CONF1_MONITOR_ENABLE);
+               adm1031_write_value(client, ADM1031_REG_CONF1,
+                                   read_val | ADM1031_CONF1_MONITOR_ENABLE);
        }
 
        /* Read the chip's update rate */
@@ -1024,8 +1076,7 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
                                /* oldh is actually newer */
                                if (newh != oldh)
                                        dev_warn(&client->dev,
-                                                "Remote temperature may be "
-                                                "wrong.\n");
+                                         "Remote temperature may be wrong.\n");
 #endif
                        }
                        data->temp[chan] = newh;
@@ -1052,22 +1103,24 @@ static struct adm1031_data *adm1031_update_device(struct device *dev)
                data->conf2 = adm1031_read_value(client, ADM1031_REG_CONF2);
 
                data->alarm = adm1031_read_value(client, ADM1031_REG_STATUS(0))
-                            | (adm1031_read_value(client, ADM1031_REG_STATUS(1))
-                               << 8);
-               if (data->chip_type == adm1030) {
+                   | (adm1031_read_value(client, ADM1031_REG_STATUS(1)) << 8);
+               if (data->chip_type == adm1030)
                        data->alarm &= 0xc0ff;
-               }
 
-               for (chan=0; chan<(data->chip_type == adm1030 ? 1 : 2); chan++) {
+               for (chan = 0; chan < (data->chip_type == adm1030 ? 1 : 2);
+                    chan++) {
                        data->fan_div[chan] =
-                           adm1031_read_value(client, ADM1031_REG_FAN_DIV(chan));
+                           adm1031_read_value(client,
+                                              ADM1031_REG_FAN_DIV(chan));
                        data->fan_min[chan] =
-                           adm1031_read_value(client, ADM1031_REG_FAN_MIN(chan));
+                           adm1031_read_value(client,
+                                              ADM1031_REG_FAN_MIN(chan));
                        data->fan[chan] =
-                           adm1031_read_value(client, ADM1031_REG_FAN_SPEED(chan));
+                           adm1031_read_value(client,
+                                              ADM1031_REG_FAN_SPEED(chan));
                        data->pwm[chan] =
-                           0xf & (adm1031_read_value(client, ADM1031_REG_PWM) >>
-                                  (4*chan));
+                         (adm1031_read_value(client,
+                                       ADM1031_REG_PWM) >> (4 * chan)) & 0x0f;
                }
                data->last_updated = jiffies;
                data->valid = 1;
index 1fdef885341c4438842c92a381726e864dd3868c..a6c6ec36615e686b296fd4b199107eae08f2ba34 100644 (file)
@@ -190,7 +190,8 @@ static ssize_t show_temp(struct device *dev,
        return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
 }
 
-static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
+                                 struct device *dev)
 {
        /* The 100C is default for both mobile and non mobile CPUs */
 
@@ -284,7 +285,8 @@ static int adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
        return tjmax;
 }
 
-static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
+static int __cpuinit get_tjmax(struct cpuinfo_x86 *c, u32 id,
+                              struct device *dev)
 {
        int err;
        u32 eax, edx;
@@ -323,7 +325,8 @@ static int get_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *dev)
        return adjust_tjmax(c, id, dev);
 }
 
-static int create_name_attr(struct platform_data *pdata, struct device *dev)
+static int __devinit create_name_attr(struct platform_data *pdata,
+                                     struct device *dev)
 {
        sysfs_attr_init(&pdata->name_attr.attr);
        pdata->name_attr.attr.name = "name";
@@ -332,8 +335,8 @@ static int create_name_attr(struct platform_data *pdata, struct device *dev)
        return device_create_file(dev, &pdata->name_attr);
 }
 
-static int create_core_attrs(struct temp_data *tdata, struct device *dev,
-                               int attr_no)
+static int __cpuinit create_core_attrs(struct temp_data *tdata,
+                                      struct device *dev, int attr_no)
 {
        int err, i;
        static ssize_t (*const rd_ptr[TOTAL_ATTRS]) (struct device *dev,
@@ -383,7 +386,7 @@ static int __cpuinit chk_ucode_version(unsigned int cpu)
        return 0;
 }
 
-static struct platform_device *coretemp_get_pdev(unsigned int cpu)
+static struct platform_device __cpuinit *coretemp_get_pdev(unsigned int cpu)
 {
        u16 phys_proc_id = TO_PHYS_ID(cpu);
        struct pdev_entry *p;
@@ -400,7 +403,8 @@ static struct platform_device *coretemp_get_pdev(unsigned int cpu)
        return NULL;
 }
 
-static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
+static struct temp_data __cpuinit *init_temp_data(unsigned int cpu,
+                                                 int pkg_flag)
 {
        struct temp_data *tdata;
 
@@ -418,7 +422,7 @@ static struct temp_data *init_temp_data(unsigned int cpu, int pkg_flag)
        return tdata;
 }
 
-static int create_core_data(struct platform_device *pdev,
+static int __cpuinit create_core_data(struct platform_device *pdev,
                                unsigned int cpu, int pkg_flag)
 {
        struct temp_data *tdata;
@@ -489,7 +493,7 @@ exit_free:
        return err;
 }
 
-static void coretemp_add_core(unsigned int cpu, int pkg_flag)
+static void __cpuinit coretemp_add_core(unsigned int cpu, int pkg_flag)
 {
        struct platform_device *pdev = coretemp_get_pdev(cpu);
        int err;
@@ -618,7 +622,7 @@ exit:
        return err;
 }
 
-static void coretemp_device_remove(unsigned int cpu)
+static void __cpuinit coretemp_device_remove(unsigned int cpu)
 {
        struct pdev_entry *p, *n;
        u16 phys_proc_id = TO_PHYS_ID(cpu);
@@ -634,7 +638,7 @@ static void coretemp_device_remove(unsigned int cpu)
        mutex_unlock(&pdev_list_mutex);
 }
 
-static bool is_any_core_online(struct platform_data *pdata)
+static bool __cpuinit is_any_core_online(struct platform_data *pdata)
 {
        int i;
 
index 92f949767ece5ccd6878896893177e34be63da2e..6dbfd3e516e4820ef182ce2fca2ad3e787ab1c59 100644 (file)
@@ -283,11 +283,11 @@ static inline long temp_from_reg(u8 reg)
 
 static inline u8 temp_to_reg(long val)
 {
-       if (val < 0)
-               val = 0;
-       else if (val > 1000 * 0xff)
-               val = 0xff;
-       return ((val + 500) / 1000);
+       if (val <= 0)
+               return 0;
+       if (val >= 1000 * 0xff)
+               return 0xff;
+       return (val + 500) / 1000;
 }
 
 /*
index 603ef2af270758fb4fd9a6f860a779de40c4288f..0054d6f9cec95e4645608c7fec6fde4f646344ac 100644 (file)
@@ -17,6 +17,7 @@
  *            IT8720F  Super I/O chip w/LPC interface
  *            IT8721F  Super I/O chip w/LPC interface
  *            IT8726F  Super I/O chip w/LPC interface
+ *            IT8728F  Super I/O chip w/LPC interface
  *            IT8758E  Super I/O chip w/LPC interface
  *            Sis950   A clone of the IT8705F
  *
@@ -58,7 +59,7 @@
 
 #define DRVNAME "it87"
 
-enum chips { it87, it8712, it8716, it8718, it8720, it8721 };
+enum chips { it87, it8712, it8716, it8718, it8720, it8721, it8728 };
 
 static unsigned short force_id;
 module_param(force_id, ushort, 0);
@@ -135,6 +136,7 @@ static inline void superio_exit(void)
 #define IT8720F_DEVID 0x8720
 #define IT8721F_DEVID 0x8721
 #define IT8726F_DEVID 0x8726
+#define IT8728F_DEVID 0x8728
 #define IT87_ACT_REG  0x30
 #define IT87_BASE_REG 0x60
 
@@ -274,11 +276,31 @@ struct it87_data {
        s8 auto_temp[3][5];     /* [nr][0] is point1_temp_hyst */
 };
 
+static inline int has_12mv_adc(const struct it87_data *data)
+{
+       /*
+        * IT8721F and later have a 12 mV ADC, also with internal scaling
+        * on selected inputs.
+        */
+       return data->type == it8721
+           || data->type == it8728;
+}
+
+static inline int has_newer_autopwm(const struct it87_data *data)
+{
+       /*
+        * IT8721F and later have separate registers for the temperature
+        * mapping and the manual duty cycle.
+        */
+       return data->type == it8721
+           || data->type == it8728;
+}
+
 static u8 in_to_reg(const struct it87_data *data, int nr, long val)
 {
        long lsb;
 
-       if (data->type == it8721) {
+       if (has_12mv_adc(data)) {
                if (data->in_scaled & (1 << nr))
                        lsb = 24;
                else
@@ -292,7 +314,7 @@ static u8 in_to_reg(const struct it87_data *data, int nr, long val)
 
 static int in_from_reg(const struct it87_data *data, int nr, int val)
 {
-       if (data->type == it8721) {
+       if (has_12mv_adc(data)) {
                if (data->in_scaled & (1 << nr))
                        return val * 24;
                else
@@ -329,7 +351,7 @@ static inline u16 FAN16_TO_REG(long rpm)
 
 static u8 pwm_to_reg(const struct it87_data *data, long val)
 {
-       if (data->type == it8721)
+       if (has_newer_autopwm(data))
                return val;
        else
                return val >> 1;
@@ -337,7 +359,7 @@ static u8 pwm_to_reg(const struct it87_data *data, long val)
 
 static int pwm_from_reg(const struct it87_data *data, u8 reg)
 {
-       if (data->type == it8721)
+       if (has_newer_autopwm(data))
                return reg;
        else
                return (reg & 0x7f) << 1;
@@ -374,7 +396,8 @@ static inline int has_16bit_fans(const struct it87_data *data)
            || data->type == it8716
            || data->type == it8718
            || data->type == it8720
-           || data->type == it8721;
+           || data->type == it8721
+           || data->type == it8728;
 }
 
 static inline int has_old_autopwm(const struct it87_data *data)
@@ -842,7 +865,7 @@ static ssize_t set_pwm_enable(struct device *dev,
                                 data->fan_main_ctrl);
        } else {
                if (val == 1)                           /* Manual mode */
-                       data->pwm_ctrl[nr] = data->type == it8721 ?
+                       data->pwm_ctrl[nr] = has_newer_autopwm(data) ?
                                             data->pwm_temp_map[nr] :
                                             data->pwm_duty[nr];
                else                                    /* Automatic mode */
@@ -870,7 +893,7 @@ static ssize_t set_pwm(struct device *dev, struct device_attribute *attr,
                return -EINVAL;
 
        mutex_lock(&data->update_lock);
-       if (data->type == it8721) {
+       if (has_newer_autopwm(data)) {
                /* If we are in automatic mode, the PWM duty cycle register
                 * is read-only so we can't write the value */
                if (data->pwm_ctrl[nr] & 0x80) {
@@ -1311,8 +1334,8 @@ static ssize_t show_label(struct device *dev, struct device_attribute *attr,
        struct it87_data *data = dev_get_drvdata(dev);
        int nr = to_sensor_dev_attr(attr)->index;
 
-       return sprintf(buf, "%s\n", data->type == it8721 ? labels_it8721[nr]
-                                                        : labels[nr]);
+       return sprintf(buf, "%s\n", has_12mv_adc(data) ? labels_it8721[nr]
+                                                      : labels[nr]);
 }
 static SENSOR_DEVICE_ATTR(in3_label, S_IRUGO, show_label, NULL, 0);
 static SENSOR_DEVICE_ATTR(in7_label, S_IRUGO, show_label, NULL, 1);
@@ -1605,6 +1628,9 @@ static int __init it87_find(unsigned short *address,
        case IT8721F_DEVID:
                sio_data->type = it8721;
                break;
+       case IT8728F_DEVID:
+               sio_data->type = it8728;
+               break;
        case 0xffff:    /* No device at all */
                goto exit;
        default:
@@ -1646,8 +1672,11 @@ static int __init it87_find(unsigned short *address,
                superio_select(GPIO);
 
                reg = superio_inb(IT87_SIO_GPIO3_REG);
-               if (sio_data->type == it8721) {
-                       /* The IT8721F/IT8758E doesn't have VID pins at all */
+               if (sio_data->type == it8721 || sio_data->type == it8728) {
+                       /*
+                        * The IT8721F/IT8758E doesn't have VID pins at all,
+                        * not sure about the IT8728F.
+                        */
                        sio_data->skip_vid = 1;
                } else {
                        /* We need at least 4 VID pins */
@@ -1692,7 +1721,8 @@ static int __init it87_find(unsigned short *address,
                }
                if (reg & (1 << 0))
                        sio_data->internal |= (1 << 0);
-               if ((reg & (1 << 1)) || sio_data->type == it8721)
+               if ((reg & (1 << 1)) || sio_data->type == it8721 ||
+                   sio_data->type == it8728)
                        sio_data->internal |= (1 << 1);
 
                sio_data->beep_pin = superio_inb(IT87_SIO_BEEP_PIN_REG) & 0x3f;
@@ -1770,6 +1800,7 @@ static int __devinit it87_probe(struct platform_device *pdev)
                "it8718",
                "it8720",
                "it8721",
+               "it8728",
        };
 
        res = platform_get_resource(pdev, IORESOURCE_IO, 0);
@@ -1807,7 +1838,7 @@ static int __devinit it87_probe(struct platform_device *pdev)
        enable_pwm_interface = it87_check_pwm(dev);
 
        /* Starting with IT8721F, we handle scaling of internal voltages */
-       if (data->type == it8721) {
+       if (has_12mv_adc(data)) {
                if (sio_data->internal & (1 << 0))
                        data->in_scaled |= (1 << 3);    /* in3 is AVCC */
                if (sio_data->internal & (1 << 1))
@@ -2093,7 +2124,7 @@ static void __devinit it87_init_device(struct platform_device *pdev)
 static void it87_update_pwm_ctrl(struct it87_data *data, int nr)
 {
        data->pwm_ctrl[nr] = it87_read_value(data, IT87_REG_PWM(nr));
-       if (data->type == it8721) {
+       if (has_newer_autopwm(data)) {
                data->pwm_temp_map[nr] = data->pwm_ctrl[nr] & 0x03;
                data->pwm_duty[nr] = it87_read_value(data,
                                                     IT87_REG_PWM_DUTY(nr));
index 508cb291f71bfc35681bfda50dbad88a1a8a8dc2..5e6457a6644d879ab94830edd6881e0826f6678c 100644 (file)
 #include <linux/err.h>
 #include <linux/mutex.h>
 #include <linux/sysfs.h>
+#include <linux/types.h>
 
 /*
  * Addresses to scan
- * Address is fully defined internally and cannot be changed.
+ * Address is fully defined internally and cannot be changed except for
+ * LM64 which has one pin dedicated to address selection.
+ * LM63 and LM96163 have address 0x4c.
+ * LM64 can have address 0x18 or 0x4e.
  */
 
 static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
@@ -60,6 +64,7 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
  */
 
 #define LM63_REG_CONFIG1               0x03
+#define LM63_REG_CONVRATE              0x04
 #define LM63_REG_CONFIG2               0xBF
 #define LM63_REG_CONFIG_FAN            0x4A
 
@@ -70,6 +75,9 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
 
 #define LM63_REG_PWM_VALUE             0x4C
 #define LM63_REG_PWM_FREQ              0x4D
+#define LM63_REG_LUT_TEMP_HYST         0x4F
+#define LM63_REG_LUT_TEMP(nr)          (0x50 + 2 * (nr))
+#define LM63_REG_LUT_PWM(nr)           (0x51 + 2 * (nr))
 
 #define LM63_REG_LOCAL_TEMP            0x00
 #define LM63_REG_LOCAL_HIGH            0x05
@@ -91,6 +99,16 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
 #define LM63_REG_MAN_ID                        0xFE
 #define LM63_REG_CHIP_ID               0xFF
 
+#define LM96163_REG_TRUTHERM           0x30
+#define LM96163_REG_REMOTE_TEMP_U_MSB  0x31
+#define LM96163_REG_REMOTE_TEMP_U_LSB  0x32
+#define LM96163_REG_CONFIG_ENHANCED    0x45
+
+#define LM63_MAX_CONVRATE              9
+
+#define LM63_MAX_CONVRATE_HZ           32
+#define LM96163_MAX_CONVRATE_HZ                26
+
 /*
  * Conversions and various macros
  * For tachometer counts, the LM63 uses 16-bit values.
@@ -112,15 +130,24 @@ static const unsigned short normal_i2c[] = { 0x18, 0x4c, 0x4e, I2C_CLIENT_END };
                                 (val) >= 127000 ? 127 : \
                                 (val) < 0 ? ((val) - 500) / 1000 : \
                                 ((val) + 500) / 1000)
+#define TEMP8U_TO_REG(val)     ((val) <= 0 ? 0 : \
+                                (val) >= 255000 ? 255 : \
+                                ((val) + 500) / 1000)
 #define TEMP11_FROM_REG(reg)   ((reg) / 32 * 125)
 #define TEMP11_TO_REG(val)     ((val) <= -128000 ? 0x8000 : \
                                 (val) >= 127875 ? 0x7FE0 : \
                                 (val) < 0 ? ((val) - 62) / 125 * 32 : \
                                 ((val) + 62) / 125 * 32)
+#define TEMP11U_TO_REG(val)    ((val) <= 0 ? 0 : \
+                                (val) >= 255875 ? 0xFFE0 : \
+                                ((val) + 62) / 125 * 32)
 #define HYST_TO_REG(val)       ((val) <= 0 ? 0 : \
                                 (val) >= 127000 ? 127 : \
                                 ((val) + 500) / 1000)
 
+#define UPDATE_INTERVAL(max, rate) \
+                       ((1000 << (LM63_MAX_CONVRATE - (rate))) / (max))
+
 /*
  * Functions declaration
  */
@@ -134,7 +161,7 @@ static struct lm63_data *lm63_update_device(struct device *dev);
 static int lm63_detect(struct i2c_client *client, struct i2c_board_info *info);
 static void lm63_init_client(struct i2c_client *client);
 
-enum chips { lm63, lm64 };
+enum chips { lm63, lm64, lm96163 };
 
 /*
  * Driver data (common to all clients)
@@ -143,6 +170,7 @@ enum chips { lm63, lm64 };
 static const struct i2c_device_id lm63_id[] = {
        { "lm63", lm63 },
        { "lm64", lm64 },
+       { "lm96163", lm96163 },
        { }
 };
 MODULE_DEVICE_TABLE(i2c, lm63_id);
@@ -167,26 +195,53 @@ struct lm63_data {
        struct device *hwmon_dev;
        struct mutex update_lock;
        char valid; /* zero until following fields are valid */
+       char lut_valid; /* zero until lut fields are valid */
        unsigned long last_updated; /* in jiffies */
-       int kind;
+       unsigned long lut_last_updated; /* in jiffies */
+       enum chips kind;
        int temp2_offset;
 
+       int update_interval;    /* in milliseconds */
+       int max_convrate_hz;
+       int lut_size;           /* 8 or 12 */
+
        /* registers values */
        u8 config, config_fan;
        u16 fan[2];     /* 0: input
                           1: low limit */
        u8 pwm1_freq;
-       u8 pwm1_value;
-       s8 temp8[3];    /* 0: local input
+       u8 pwm1[13];    /* 0: current output
+                          1-12: lookup table */
+       s8 temp8[15];   /* 0: local input
                           1: local high limit
-                          2: remote critical limit */
-       s16 temp11[3];  /* 0: remote input
+                          2: remote critical limit
+                          3-14: lookup table */
+       s16 temp11[4];  /* 0: remote input
                           1: remote low limit
-                          2: remote high limit */
+                          2: remote high limit
+                          3: remote offset */
+       u16 temp11u;    /* remote input (unsigned) */
        u8 temp2_crit_hyst;
+       u8 lut_temp_hyst;
        u8 alarms;
+       bool pwm_highres;
+       bool lut_temp_highres;
+       bool remote_unsigned; /* true if unsigned remote upper limits */
+       bool trutherm;
 };
 
+static inline int temp8_from_reg(struct lm63_data *data, int nr)
+{
+       if (data->remote_unsigned)
+               return TEMP8_FROM_REG((u8)data->temp8[nr]);
+       return TEMP8_FROM_REG(data->temp8[nr]);
+}
+
+static inline int lut_temp_from_reg(struct lm63_data *data, int nr)
+{
+       return data->temp8[nr] * (data->lut_temp_highres ? 500 : 1000);
+}
+
 /*
  * Sysfs callback functions and files
  */
@@ -204,7 +259,12 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *dummy,
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct lm63_data *data = i2c_get_clientdata(client);
-       unsigned long val = simple_strtoul(buf, NULL, 10);
+       unsigned long val;
+       int err;
+
+       err = kstrtoul(buf, 10, &val);
+       if (err)
+               return err;
 
        mutex_lock(&data->update_lock);
        data->fan[1] = FAN_TO_REG(val);
@@ -216,13 +276,22 @@ static ssize_t set_fan(struct device *dev, struct device_attribute *dummy,
        return count;
 }
 
-static ssize_t show_pwm1(struct device *dev, struct device_attribute *dummy,
+static ssize_t show_pwm1(struct device *dev, struct device_attribute *devattr,
                         char *buf)
 {
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct lm63_data *data = lm63_update_device(dev);
-       return sprintf(buf, "%d\n", data->pwm1_value >= 2 * data->pwm1_freq ?
-                      255 : (data->pwm1_value * 255 + data->pwm1_freq) /
-                      (2 * data->pwm1_freq));
+       int nr = attr->index;
+       int pwm;
+
+       if (data->pwm_highres)
+               pwm = data->pwm1[nr];
+       else
+               pwm = data->pwm1[nr] >= 2 * data->pwm1_freq ?
+                      255 : (data->pwm1[nr] * 255 + data->pwm1_freq) /
+                      (2 * data->pwm1_freq);
+
+       return sprintf(buf, "%d\n", pwm);
 }
 
 static ssize_t set_pwm1(struct device *dev, struct device_attribute *dummy,
@@ -231,22 +300,26 @@ static ssize_t set_pwm1(struct device *dev, struct device_attribute *dummy,
        struct i2c_client *client = to_i2c_client(dev);
        struct lm63_data *data = i2c_get_clientdata(client);
        unsigned long val;
-       
+       int err;
+
        if (!(data->config_fan & 0x20)) /* register is read-only */
                return -EPERM;
 
-       val = simple_strtoul(buf, NULL, 10);
+       err = kstrtoul(buf, 10, &val);
+       if (err)
+               return err;
+
+       val = SENSORS_LIMIT(val, 0, 255);
        mutex_lock(&data->update_lock);
-       data->pwm1_value = val <= 0 ? 0 :
-                          val >= 255 ? 2 * data->pwm1_freq :
-                          (val * data->pwm1_freq * 2 + 127) / 255;
-       i2c_smbus_write_byte_data(client, LM63_REG_PWM_VALUE, data->pwm1_value);
+       data->pwm1[0] = data->pwm_highres ? val :
+                       (val * data->pwm1_freq * 2 + 127) / 255;
+       i2c_smbus_write_byte_data(client, LM63_REG_PWM_VALUE, data->pwm1[0]);
        mutex_unlock(&data->update_lock);
        return count;
 }
 
-static ssize_t show_pwm1_enable(struct device *dev, struct device_attribute *dummy,
-                               char *buf)
+static ssize_t show_pwm1_enable(struct device *dev,
+                               struct device_attribute *dummy, char *buf)
 {
        struct lm63_data *data = lm63_update_device(dev);
        return sprintf(buf, "%d\n", data->config_fan & 0x20 ? 1 : 2);
@@ -273,21 +346,47 @@ static ssize_t show_remote_temp8(struct device *dev,
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct lm63_data *data = lm63_update_device(dev);
-       return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[attr->index])
+       return sprintf(buf, "%d\n", temp8_from_reg(data, attr->index)
+                      + data->temp2_offset);
+}
+
+static ssize_t show_lut_temp(struct device *dev,
+                             struct device_attribute *devattr,
+                             char *buf)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct lm63_data *data = lm63_update_device(dev);
+       return sprintf(buf, "%d\n", lut_temp_from_reg(data, attr->index)
                       + data->temp2_offset);
 }
 
-static ssize_t set_local_temp8(struct device *dev,
-                              struct device_attribute *dummy,
-                              const char *buf, size_t count)
+static ssize_t set_temp8(struct device *dev, struct device_attribute *devattr,
+                        const char *buf, size_t count)
 {
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct i2c_client *client = to_i2c_client(dev);
        struct lm63_data *data = i2c_get_clientdata(client);
-       long val = simple_strtol(buf, NULL, 10);
+       int nr = attr->index;
+       int reg = nr == 2 ? LM63_REG_REMOTE_TCRIT : LM63_REG_LOCAL_HIGH;
+       long val;
+       int err;
+       int temp;
+
+       err = kstrtol(buf, 10, &val);
+       if (err)
+               return err;
 
        mutex_lock(&data->update_lock);
-       data->temp8[1] = TEMP8_TO_REG(val);
-       i2c_smbus_write_byte_data(client, LM63_REG_LOCAL_HIGH, data->temp8[1]);
+       if (nr == 2) {
+               if (data->remote_unsigned)
+                       temp = TEMP8U_TO_REG(val - data->temp2_offset);
+               else
+                       temp = TEMP8_TO_REG(val - data->temp2_offset);
+       } else {
+               temp = TEMP8_TO_REG(val);
+       }
+       data->temp8[nr] = temp;
+       i2c_smbus_write_byte_data(client, reg, temp);
        mutex_unlock(&data->update_lock);
        return count;
 }
@@ -297,28 +396,56 @@ static ssize_t show_temp11(struct device *dev, struct device_attribute *devattr,
 {
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct lm63_data *data = lm63_update_device(dev);
-       return sprintf(buf, "%d\n", TEMP11_FROM_REG(data->temp11[attr->index])
-                      + data->temp2_offset);
+       int nr = attr->index;
+       int temp;
+
+       if (!nr) {
+               /*
+                * Use unsigned temperature unless its value is zero.
+                * If it is zero, use signed temperature.
+                */
+               if (data->temp11u)
+                       temp = TEMP11_FROM_REG(data->temp11u);
+               else
+                       temp = TEMP11_FROM_REG(data->temp11[nr]);
+       } else {
+               if (data->remote_unsigned && nr == 2)
+                       temp = TEMP11_FROM_REG((u16)data->temp11[nr]);
+               else
+                       temp = TEMP11_FROM_REG(data->temp11[nr]);
+       }
+       return sprintf(buf, "%d\n", temp + data->temp2_offset);
 }
 
 static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
                          const char *buf, size_t count)
 {
-       static const u8 reg[4] = {
+       static const u8 reg[6] = {
                LM63_REG_REMOTE_LOW_MSB,
                LM63_REG_REMOTE_LOW_LSB,
                LM63_REG_REMOTE_HIGH_MSB,
                LM63_REG_REMOTE_HIGH_LSB,
+               LM63_REG_REMOTE_OFFSET_MSB,
+               LM63_REG_REMOTE_OFFSET_LSB,
        };
 
        struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
        struct i2c_client *client = to_i2c_client(dev);
        struct lm63_data *data = i2c_get_clientdata(client);
-       long val = simple_strtol(buf, NULL, 10);
+       long val;
+       int err;
        int nr = attr->index;
 
+       err = kstrtol(buf, 10, &val);
+       if (err)
+               return err;
+
        mutex_lock(&data->update_lock);
-       data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
+       if (data->remote_unsigned && nr == 2)
+               data->temp11[nr] = TEMP11U_TO_REG(val - data->temp2_offset);
+       else
+               data->temp11[nr] = TEMP11_TO_REG(val - data->temp2_offset);
+
        i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2],
                                  data->temp11[nr] >> 8);
        i2c_smbus_write_byte_data(client, reg[(nr - 1) * 2 + 1],
@@ -327,35 +454,143 @@ static ssize_t set_temp11(struct device *dev, struct device_attribute *devattr,
        return count;
 }
 
-/* Hysteresis register holds a relative value, while we want to present
-   an absolute to user-space */
-static ssize_t show_temp2_crit_hyst(struct device *dev, struct device_attribute *dummy,
-                                   char *buf)
+/*
+ * Hysteresis register holds a relative value, while we want to present
+ * an absolute to user-space
+ */
+static ssize_t show_temp2_crit_hyst(struct device *dev,
+                                   struct device_attribute *dummy, char *buf)
 {
        struct lm63_data *data = lm63_update_device(dev);
-       return sprintf(buf, "%d\n", TEMP8_FROM_REG(data->temp8[2])
+       return sprintf(buf, "%d\n", temp8_from_reg(data, 2)
                       + data->temp2_offset
                       - TEMP8_FROM_REG(data->temp2_crit_hyst));
 }
 
-/* And now the other way around, user-space provides an absolute
-   hysteresis value and we have to store a relative one */
-static ssize_t set_temp2_crit_hyst(struct device *dev, struct device_attribute *dummy,
+static ssize_t show_lut_temp_hyst(struct device *dev,
+                                 struct device_attribute *devattr, char *buf)
+{
+       struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr);
+       struct lm63_data *data = lm63_update_device(dev);
+
+       return sprintf(buf, "%d\n", lut_temp_from_reg(data, attr->index)
+                      + data->temp2_offset
+                      - TEMP8_FROM_REG(data->lut_temp_hyst));
+}
+
+/*
+ * And now the other way around, user-space provides an absolute
+ * hysteresis value and we have to store a relative one
+ */
+static ssize_t set_temp2_crit_hyst(struct device *dev,
+                                  struct device_attribute *dummy,
                                   const char *buf, size_t count)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct lm63_data *data = i2c_get_clientdata(client);
-       long val = simple_strtol(buf, NULL, 10);
+       long val;
+       int err;
        long hyst;
 
+       err = kstrtol(buf, 10, &val);
+       if (err)
+               return err;
+
        mutex_lock(&data->update_lock);
-       hyst = TEMP8_FROM_REG(data->temp8[2]) + data->temp2_offset - val;
+       hyst = temp8_from_reg(data, 2) + data->temp2_offset - val;
        i2c_smbus_write_byte_data(client, LM63_REG_REMOTE_TCRIT_HYST,
                                  HYST_TO_REG(hyst));
        mutex_unlock(&data->update_lock);
        return count;
 }
 
+/*
+ * Set conversion rate.
+ * client->update_lock must be held when calling this function.
+ */
+static void lm63_set_convrate(struct i2c_client *client, struct lm63_data *data,
+                             unsigned int interval)
+{
+       int i;
+       unsigned int update_interval;
+
+       /* Shift calculations to avoid rounding errors */
+       interval <<= 6;
+
+       /* find the nearest update rate */
+       update_interval = (1 << (LM63_MAX_CONVRATE + 6)) * 1000
+         / data->max_convrate_hz;
+       for (i = 0; i < LM63_MAX_CONVRATE; i++, update_interval >>= 1)
+               if (interval >= update_interval * 3 / 4)
+                       break;
+
+       i2c_smbus_write_byte_data(client, LM63_REG_CONVRATE, i);
+       data->update_interval = UPDATE_INTERVAL(data->max_convrate_hz, i);
+}
+
+static ssize_t show_update_interval(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct lm63_data *data = dev_get_drvdata(dev);
+
+       return sprintf(buf, "%u\n", data->update_interval);
+}
+
+static ssize_t set_update_interval(struct device *dev,
+                                  struct device_attribute *attr,
+                                  const char *buf, size_t count)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lm63_data *data = i2c_get_clientdata(client);
+       unsigned long val;
+       int err;
+
+       err = kstrtoul(buf, 10, &val);
+       if (err)
+               return err;
+
+       mutex_lock(&data->update_lock);
+       lm63_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
+       mutex_unlock(&data->update_lock);
+
+       return count;
+}
+
+static ssize_t show_type(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lm63_data *data = i2c_get_clientdata(client);
+
+       return sprintf(buf, data->trutherm ? "1\n" : "2\n");
+}
+
+static ssize_t set_type(struct device *dev, struct device_attribute *attr,
+                       const char *buf, size_t count)
+{
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lm63_data *data = i2c_get_clientdata(client);
+       unsigned long val;
+       int ret;
+       u8 reg;
+
+       ret = kstrtoul(buf, 10, &val);
+       if (ret < 0)
+               return ret;
+       if (val != 1 && val != 2)
+               return -EINVAL;
+
+       mutex_lock(&data->update_lock);
+       data->trutherm = val == 1;
+       reg = i2c_smbus_read_byte_data(client, LM96163_REG_TRUTHERM) & ~0x02;
+       i2c_smbus_write_byte_data(client, LM96163_REG_TRUTHERM,
+                                 reg | (data->trutherm ? 0x02 : 0x00));
+       data->valid = 0;
+       mutex_unlock(&data->update_lock);
+
+       return count;
+}
+
 static ssize_t show_alarms(struct device *dev, struct device_attribute *dummy,
                           char *buf)
 {
@@ -377,27 +612,87 @@ static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, show_fan, NULL, 0);
 static SENSOR_DEVICE_ATTR(fan1_min, S_IWUSR | S_IRUGO, show_fan,
        set_fan, 1);
 
-static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1);
+static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, show_pwm1, set_pwm1, 0);
 static DEVICE_ATTR(pwm1_enable, S_IRUGO, show_pwm1_enable, NULL);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_pwm, S_IRUGO, show_pwm1, NULL, 1);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_temp, S_IRUGO,
+       show_lut_temp, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point1_temp_hyst, S_IRUGO,
+       show_lut_temp_hyst, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_pwm, S_IRUGO, show_pwm1, NULL, 2);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_temp, S_IRUGO,
+       show_lut_temp, NULL, 4);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point2_temp_hyst, S_IRUGO,
+       show_lut_temp_hyst, NULL, 4);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_pwm, S_IRUGO, show_pwm1, NULL, 3);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_temp, S_IRUGO,
+       show_lut_temp, NULL, 5);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point3_temp_hyst, S_IRUGO,
+       show_lut_temp_hyst, NULL, 5);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point4_pwm, S_IRUGO, show_pwm1, NULL, 4);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point4_temp, S_IRUGO,
+       show_lut_temp, NULL, 6);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point4_temp_hyst, S_IRUGO,
+       show_lut_temp_hyst, NULL, 6);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point5_pwm, S_IRUGO, show_pwm1, NULL, 5);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point5_temp, S_IRUGO,
+       show_lut_temp, NULL, 7);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point5_temp_hyst, S_IRUGO,
+       show_lut_temp_hyst, NULL, 7);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point6_pwm, S_IRUGO, show_pwm1, NULL, 6);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point6_temp, S_IRUGO,
+       show_lut_temp, NULL, 8);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point6_temp_hyst, S_IRUGO,
+       show_lut_temp_hyst, NULL, 8);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point7_pwm, S_IRUGO, show_pwm1, NULL, 7);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point7_temp, S_IRUGO,
+       show_lut_temp, NULL, 9);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point7_temp_hyst, S_IRUGO,
+       show_lut_temp_hyst, NULL, 9);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point8_pwm, S_IRUGO, show_pwm1, NULL, 8);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point8_temp, S_IRUGO,
+       show_lut_temp, NULL, 10);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point8_temp_hyst, S_IRUGO,
+       show_lut_temp_hyst, NULL, 10);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point9_pwm, S_IRUGO, show_pwm1, NULL, 9);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point9_temp, S_IRUGO,
+       show_lut_temp, NULL, 11);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point9_temp_hyst, S_IRUGO,
+       show_lut_temp_hyst, NULL, 11);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point10_pwm, S_IRUGO, show_pwm1, NULL, 10);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point10_temp, S_IRUGO,
+       show_lut_temp, NULL, 12);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point10_temp_hyst, S_IRUGO,
+       show_lut_temp_hyst, NULL, 12);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point11_pwm, S_IRUGO, show_pwm1, NULL, 11);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point11_temp, S_IRUGO,
+       show_lut_temp, NULL, 13);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point11_temp_hyst, S_IRUGO,
+       show_lut_temp_hyst, NULL, 13);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point12_pwm, S_IRUGO, show_pwm1, NULL, 12);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point12_temp, S_IRUGO,
+       show_lut_temp, NULL, 14);
+static SENSOR_DEVICE_ATTR(pwm1_auto_point12_temp_hyst, S_IRUGO,
+       show_lut_temp_hyst, NULL, 14);
 
 static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, show_local_temp8, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, show_local_temp8,
-       set_local_temp8, 1);
+       set_temp8, 1);
 
 static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, show_temp11, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp2_min, S_IWUSR | S_IRUGO, show_temp11,
        set_temp11, 1);
 static SENSOR_DEVICE_ATTR(temp2_max, S_IWUSR | S_IRUGO, show_temp11,
        set_temp11, 2);
-/*
- * On LM63, temp2_crit can be set only once, which should be job
- * of the bootloader.
- */
+static SENSOR_DEVICE_ATTR(temp2_offset, S_IWUSR | S_IRUGO, show_temp11,
+       set_temp11, 3);
 static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, show_remote_temp8,
-       NULL, 2);
+       set_temp8, 2);
 static DEVICE_ATTR(temp2_crit_hyst, S_IWUSR | S_IRUGO, show_temp2_crit_hyst,
        set_temp2_crit_hyst);
 
+static DEVICE_ATTR(temp2_type, S_IWUSR | S_IRUGO, show_type, set_type);
+
 /* Individual alarm files */
 static SENSOR_DEVICE_ATTR(fan1_min_alarm, S_IRUGO, show_alarm, NULL, 0);
 static SENSOR_DEVICE_ATTR(temp2_crit_alarm, S_IRUGO, show_alarm, NULL, 1);
@@ -408,14 +703,43 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 6);
 /* Raw alarm file for compatibility */
 static DEVICE_ATTR(alarms, S_IRUGO, show_alarms, NULL);
 
+static DEVICE_ATTR(update_interval, S_IRUGO | S_IWUSR, show_update_interval,
+                  set_update_interval);
+
 static struct attribute *lm63_attributes[] = {
-       &dev_attr_pwm1.attr,
+       &sensor_dev_attr_pwm1.dev_attr.attr,
        &dev_attr_pwm1_enable.attr,
+       &sensor_dev_attr_pwm1_auto_point1_pwm.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point1_temp.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point1_temp_hyst.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point2_pwm.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point2_temp.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point2_temp_hyst.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point3_pwm.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point3_temp.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point3_temp_hyst.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point4_pwm.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point4_temp.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point4_temp_hyst.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point5_pwm.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point5_temp.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point5_temp_hyst.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point6_pwm.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point6_temp.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point6_temp_hyst.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point7_pwm.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point7_temp.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point7_temp_hyst.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point8_pwm.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point8_temp.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point8_temp_hyst.dev_attr.attr,
+
        &sensor_dev_attr_temp1_input.dev_attr.attr,
        &sensor_dev_attr_temp2_input.dev_attr.attr,
        &sensor_dev_attr_temp2_min.dev_attr.attr,
        &sensor_dev_attr_temp1_max.dev_attr.attr,
        &sensor_dev_attr_temp2_max.dev_attr.attr,
+       &sensor_dev_attr_temp2_offset.dev_attr.attr,
        &sensor_dev_attr_temp2_crit.dev_attr.attr,
        &dev_attr_temp2_crit_hyst.attr,
 
@@ -425,10 +749,54 @@ static struct attribute *lm63_attributes[] = {
        &sensor_dev_attr_temp2_max_alarm.dev_attr.attr,
        &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
        &dev_attr_alarms.attr,
+       &dev_attr_update_interval.attr,
        NULL
 };
 
+static struct attribute *lm63_attributes_extra_lut[] = {
+       &sensor_dev_attr_pwm1_auto_point9_pwm.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point9_temp.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point9_temp_hyst.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point10_pwm.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point10_temp.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point10_temp_hyst.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point11_pwm.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point11_temp.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point11_temp_hyst.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point12_pwm.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point12_temp.dev_attr.attr,
+       &sensor_dev_attr_pwm1_auto_point12_temp_hyst.dev_attr.attr,
+       NULL
+};
+
+static const struct attribute_group lm63_group_extra_lut = {
+       .attrs = lm63_attributes_extra_lut,
+};
+
+/*
+ * On LM63, temp2_crit can be set only once, which should be job
+ * of the bootloader.
+ * On LM64, temp2_crit can always be set.
+ * On LM96163, temp2_crit can be set if bit 1 of the configuration
+ * register is true.
+ */
+static umode_t lm63_attribute_mode(struct kobject *kobj,
+                                  struct attribute *attr, int index)
+{
+       struct device *dev = container_of(kobj, struct device, kobj);
+       struct i2c_client *client = to_i2c_client(dev);
+       struct lm63_data *data = i2c_get_clientdata(client);
+
+       if (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr
+           && (data->kind == lm64 ||
+               (data->kind == lm96163 && (data->config & 0x02))))
+               return attr->mode | S_IWUSR;
+
+       return attr->mode;
+}
+
 static const struct attribute_group lm63_group = {
+       .is_visible = lm63_attribute_mode,
        .attrs = lm63_attributes,
 };
 
@@ -487,6 +855,8 @@ static int lm63_detect(struct i2c_client *new_client,
                strlcpy(info->type, "lm63", I2C_NAME_SIZE);
        else if (chip_id == 0x51 && (address == 0x18 || address == 0x4e))
                strlcpy(info->type, "lm64", I2C_NAME_SIZE);
+       else if (chip_id == 0x49 && address == 0x4c)
+               strlcpy(info->type, "lm96163", I2C_NAME_SIZE);
        else
                return -ENODEV;
 
@@ -518,12 +888,24 @@ static int lm63_probe(struct i2c_client *new_client,
        lm63_init_client(new_client);
 
        /* Register sysfs hooks */
-       if ((err = sysfs_create_group(&new_client->dev.kobj,
-                                     &lm63_group)))
+       err = sysfs_create_group(&new_client->dev.kobj, &lm63_group);
+       if (err)
                goto exit_free;
        if (data->config & 0x04) { /* tachometer enabled */
-               if ((err = sysfs_create_group(&new_client->dev.kobj,
-                                             &lm63_group_fan1)))
+               err = sysfs_create_group(&new_client->dev.kobj,
+                                        &lm63_group_fan1);
+               if (err)
+                       goto exit_remove_files;
+       }
+       if (data->kind == lm96163) {
+               err = device_create_file(&new_client->dev,
+                                        &dev_attr_temp2_type);
+               if (err)
+                       goto exit_remove_files;
+
+               err = sysfs_create_group(&new_client->dev.kobj,
+                                        &lm63_group_extra_lut);
+               if (err)
                        goto exit_remove_files;
        }
 
@@ -538,17 +920,25 @@ static int lm63_probe(struct i2c_client *new_client,
 exit_remove_files:
        sysfs_remove_group(&new_client->dev.kobj, &lm63_group);
        sysfs_remove_group(&new_client->dev.kobj, &lm63_group_fan1);
+       if (data->kind == lm96163) {
+               device_remove_file(&new_client->dev, &dev_attr_temp2_type);
+               sysfs_remove_group(&new_client->dev.kobj,
+                                  &lm63_group_extra_lut);
+       }
 exit_free:
        kfree(data);
 exit:
        return err;
 }
 
-/* Idealy we shouldn't have to initialize anything, since the BIOS
-   should have taken care of everything */
+/*
+ * Ideally we shouldn't have to initialize anything, since the BIOS
+ * should have taken care of everything
+ */
 static void lm63_init_client(struct i2c_client *client)
 {
        struct lm63_data *data = i2c_get_clientdata(client);
+       u8 convrate;
 
        data->config = i2c_smbus_read_byte_data(client, LM63_REG_CONFIG1);
        data->config_fan = i2c_smbus_read_byte_data(client,
@@ -561,16 +951,57 @@ static void lm63_init_client(struct i2c_client *client)
                i2c_smbus_write_byte_data(client, LM63_REG_CONFIG1,
                                          data->config);
        }
+       /* Tachometer is always enabled on LM64 */
+       if (data->kind == lm64)
+               data->config |= 0x04;
 
        /* We may need pwm1_freq before ever updating the client data */
        data->pwm1_freq = i2c_smbus_read_byte_data(client, LM63_REG_PWM_FREQ);
        if (data->pwm1_freq == 0)
                data->pwm1_freq = 1;
 
+       switch (data->kind) {
+       case lm63:
+       case lm64:
+               data->max_convrate_hz = LM63_MAX_CONVRATE_HZ;
+               data->lut_size = 8;
+               break;
+       case lm96163:
+               data->max_convrate_hz = LM96163_MAX_CONVRATE_HZ;
+               data->lut_size = 12;
+               data->trutherm
+                 = i2c_smbus_read_byte_data(client,
+                                            LM96163_REG_TRUTHERM) & 0x02;
+               break;
+       }
+       convrate = i2c_smbus_read_byte_data(client, LM63_REG_CONVRATE);
+       if (unlikely(convrate > LM63_MAX_CONVRATE))
+               convrate = LM63_MAX_CONVRATE;
+       data->update_interval = UPDATE_INTERVAL(data->max_convrate_hz,
+                                               convrate);
+
+       /*
+        * For LM96163, check if high resolution PWM
+        * and unsigned temperature format is enabled.
+        */
+       if (data->kind == lm96163) {
+               u8 config_enhanced
+                 = i2c_smbus_read_byte_data(client,
+                                            LM96163_REG_CONFIG_ENHANCED);
+               if (config_enhanced & 0x20)
+                       data->lut_temp_highres = true;
+               if ((config_enhanced & 0x10)
+                   && !(data->config_fan & 0x08) && data->pwm1_freq == 8)
+                       data->pwm_highres = true;
+               if (config_enhanced & 0x08)
+                       data->remote_unsigned = true;
+       }
+
        /* Show some debug info about the LM63 configuration */
-       dev_dbg(&client->dev, "Alert/tach pin configured for %s\n",
-               (data->config & 0x04) ? "tachometer input" :
-               "alert output");
+       if (data->kind == lm63)
+               dev_dbg(&client->dev, "Alert/tach pin configured for %s\n",
+                       (data->config & 0x04) ? "tachometer input" :
+                       "alert output");
        dev_dbg(&client->dev, "PWM clock %s kHz, output frequency %u Hz\n",
                (data->config_fan & 0x08) ? "1.4" : "360",
                ((data->config_fan & 0x08) ? 700 : 180000) / data->pwm1_freq);
@@ -586,6 +1017,10 @@ static int lm63_remove(struct i2c_client *client)
        hwmon_device_unregister(data->hwmon_dev);
        sysfs_remove_group(&client->dev.kobj, &lm63_group);
        sysfs_remove_group(&client->dev.kobj, &lm63_group_fan1);
+       if (data->kind == lm96163) {
+               device_remove_file(&client->dev, &dev_attr_temp2_type);
+               sysfs_remove_group(&client->dev.kobj, &lm63_group_extra_lut);
+       }
 
        kfree(data);
        return 0;
@@ -595,10 +1030,15 @@ static struct lm63_data *lm63_update_device(struct device *dev)
 {
        struct i2c_client *client = to_i2c_client(dev);
        struct lm63_data *data = i2c_get_clientdata(client);
+       unsigned long next_update;
+       int i;
 
        mutex_lock(&data->update_lock);
 
-       if (time_after(jiffies, data->last_updated + HZ) || !data->valid) {
+       next_update = data->last_updated
+         + msecs_to_jiffies(data->update_interval) + 1;
+
+       if (time_after(jiffies, next_update) || !data->valid) {
                if (data->config & 0x04) { /* tachometer enabled  */
                        /* order matters for fan1_input */
                        data->fan[0] = i2c_smbus_read_byte_data(client,
@@ -615,8 +1055,8 @@ static struct lm63_data *lm63_update_device(struct device *dev)
                                  LM63_REG_PWM_FREQ);
                if (data->pwm1_freq == 0)
                        data->pwm1_freq = 1;
-               data->pwm1_value = i2c_smbus_read_byte_data(client,
-                                  LM63_REG_PWM_VALUE);
+               data->pwm1[0] = i2c_smbus_read_byte_data(client,
+                               LM63_REG_PWM_VALUE);
 
                data->temp8[0] = i2c_smbus_read_byte_data(client,
                                 LM63_REG_LOCAL_TEMP);
@@ -636,6 +1076,17 @@ static struct lm63_data *lm63_update_device(struct device *dev)
                                  LM63_REG_REMOTE_HIGH_MSB) << 8)
                                | i2c_smbus_read_byte_data(client,
                                  LM63_REG_REMOTE_HIGH_LSB);
+               data->temp11[3] = (i2c_smbus_read_byte_data(client,
+                                 LM63_REG_REMOTE_OFFSET_MSB) << 8)
+                               | i2c_smbus_read_byte_data(client,
+                                 LM63_REG_REMOTE_OFFSET_LSB);
+
+               if (data->kind == lm96163)
+                       data->temp11u = (i2c_smbus_read_byte_data(client,
+                                       LM96163_REG_REMOTE_TEMP_U_MSB) << 8)
+                                     | i2c_smbus_read_byte_data(client,
+                                       LM96163_REG_REMOTE_TEMP_U_LSB);
+
                data->temp8[2] = i2c_smbus_read_byte_data(client,
                                 LM63_REG_REMOTE_TCRIT);
                data->temp2_crit_hyst = i2c_smbus_read_byte_data(client,
@@ -648,6 +1099,21 @@ static struct lm63_data *lm63_update_device(struct device *dev)
                data->valid = 1;
        }
 
+       if (time_after(jiffies, data->lut_last_updated + 5 * HZ) ||
+           !data->lut_valid) {
+               for (i = 0; i < data->lut_size; i++) {
+                       data->pwm1[1 + i] = i2c_smbus_read_byte_data(client,
+                                           LM63_REG_LUT_PWM(i));
+                       data->temp8[3 + i] = i2c_smbus_read_byte_data(client,
+                                            LM63_REG_LUT_TEMP(i));
+               }
+               data->lut_temp_hyst = i2c_smbus_read_byte_data(client,
+                                     LM63_REG_LUT_TEMP_HYST);
+
+               data->lut_last_updated = jiffies;
+               data->lut_valid = 1;
+       }
+
        mutex_unlock(&data->update_lock);
 
        return data;
index bdfd675488aeb5e64b9d4c144c5cfd185ad0ecb0..d2dd5f90496df1ca4393467e31f1ab78264f17d8 100644 (file)
@@ -917,7 +917,7 @@ static ssize_t set_update_interval(struct device *dev,
                return err;
 
        mutex_lock(&data->update_lock);
-       lm90_set_convrate(client, data, val);
+       lm90_set_convrate(client, data, SENSORS_LIMIT(val, 0, 100000));
        mutex_unlock(&data->update_lock);
 
        return count;
index 84ef3a898707ec4bd572c04f4be75836d036a8c3..482ca901db30b61632773176a4131be60a200575 100644 (file)
@@ -106,11 +106,14 @@ static ssize_t show_adc(struct device *dev,
        if (ret < 0)
                return ret;
 
-       return sprintf(buf, "%d\n", ret);
+       /* assume the reference voltage to be 2.048V, with an 8-bit sample,
+        * the LSB weight is 8mV
+        */
+       return sprintf(buf, "%d\n", ret * 8);
 }
 
 #define MAX1111_ADC_ATTR(_id)          \
-       SENSOR_DEVICE_ATTR(adc##_id##_in, S_IRUGO, show_adc, NULL, _id)
+       SENSOR_DEVICE_ATTR(in##_id##_input, S_IRUGO, show_adc, NULL, _id)
 
 static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
 static MAX1111_ADC_ATTR(0);
@@ -120,10 +123,10 @@ static MAX1111_ADC_ATTR(3);
 
 static struct attribute *max1111_attributes[] = {
        &dev_attr_name.attr,
-       &sensor_dev_attr_adc0_in.dev_attr.attr,
-       &sensor_dev_attr_adc1_in.dev_attr.attr,
-       &sensor_dev_attr_adc2_in.dev_attr.attr,
-       &sensor_dev_attr_adc3_in.dev_attr.attr,
+       &sensor_dev_attr_in0_input.dev_attr.attr,
+       &sensor_dev_attr_in1_input.dev_attr.attr,
+       &sensor_dev_attr_in2_input.dev_attr.attr,
+       &sensor_dev_attr_in3_input.dev_attr.attr,
        NULL,
 };
 
index 6ddeae049058e377562c3382b5d06095a9e39beb..91fdd1fe18b0a8e5c4bfe54659f5811e939b49d4 100644 (file)
@@ -883,7 +883,7 @@ static int sht15_invalidate_voltage(struct notifier_block *nb,
 
 static int __devinit sht15_probe(struct platform_device *pdev)
 {
-       int ret = 0;
+       int ret;
        struct sht15_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
        u8 status = 0;
 
@@ -901,6 +901,7 @@ static int __devinit sht15_probe(struct platform_device *pdev)
        init_waitqueue_head(&data->wait_queue);
 
        if (pdev->dev.platform_data == NULL) {
+               ret = -EINVAL;
                dev_err(&pdev->dev, "no platform data supplied\n");
                goto err_free_data;
        }
index 0e0af0445222c4346af8fef879831831e49caffa..4d383e7e051dee53d7411049b23ab1ad5ee32589 100644 (file)
@@ -1319,6 +1319,7 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
 {
        struct w83627ehf_data *data = dev_get_drvdata(dev);
        struct sensor_device_attribute *sensor_attr = to_sensor_dev_attr(attr);
+       struct w83627ehf_sio_data *sio_data = dev->platform_data;
        int nr = sensor_attr->index;
        unsigned long val;
        int err;
@@ -1330,6 +1331,11 @@ store_pwm_mode(struct device *dev, struct device_attribute *attr,
 
        if (val > 1)
                return -EINVAL;
+
+       /* On NCT67766F, DC mode is only supported for pwm1 */
+       if (sio_data->kind == nct6776 && nr && val != 1)
+               return -EINVAL;
+
        mutex_lock(&data->update_lock);
        reg = w83627ehf_read_value(data, W83627EHF_REG_PWM_ENABLE[nr]);
        data->pwm_mode[nr] = val;
@@ -1914,9 +1920,26 @@ w83627ehf_check_fan_inputs(const struct w83627ehf_sio_data *sio_data,
                fan4min = 0;
                fan5pin = 0;
        } else if (sio_data->kind == nct6776) {
-               fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
-               fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
-               fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
+               bool gpok = superio_inb(sio_data->sioreg, 0x27) & 0x80;
+
+               superio_select(sio_data->sioreg, W83627EHF_LD_HWM);
+               regval = superio_inb(sio_data->sioreg, SIO_REG_ENABLE);
+
+               if (regval & 0x80)
+                       fan3pin = gpok;
+               else
+                       fan3pin = !(superio_inb(sio_data->sioreg, 0x24) & 0x40);
+
+               if (regval & 0x40)
+                       fan4pin = gpok;
+               else
+                       fan4pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x01);
+
+               if (regval & 0x20)
+                       fan5pin = gpok;
+               else
+                       fan5pin = !!(superio_inb(sio_data->sioreg, 0x1C) & 0x02);
+
                fan4min = fan4pin;
        } else if (sio_data->kind == w83667hg || sio_data->kind == w83667hg_b) {
                fan3pin = 1;
index cbe7a2fb779f4036798c0231ef2dddb349f6fc41..3101dd59e3794c6d2d34c2ff4df65f1cece29a28 100644 (file)
@@ -682,19 +682,19 @@ config I2C_XILINX
          will be called xilinx_i2c.
 
 config I2C_EG20T
-       tristate "Intel EG20T PCH / OKI SEMICONDUCTOR IOH(ML7213/ML7223)"
+       tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) I2C"
        depends on PCI
        help
          This driver is for PCH(Platform controller Hub) I2C of EG20T which
          is an IOH(Input/Output Hub) for x86 embedded processor.
          This driver can access PCH I2C bus device.
 
-         This driver also can be used for OKI SEMICONDUCTOR IOH(Input/
-         Output Hub), ML7213 and ML7223.
-         ML7213 IOH is for IVI(In-Vehicle Infotainment) use and ML7223 IOH is
-         for MP(Media Phone) use.
-         ML7213/ML7223 is companion chip for Intel Atom E6xx series.
-         ML7213/ML7223 is completely compatible for Intel EG20T PCH.
+         This driver also can be used for LAPIS Semiconductor IOH(Input/
+         Output Hub), ML7213, ML7223 and ML7831.
+         ML7213 IOH is for IVI(In-Vehicle Infotainment) use, ML7223 IOH is
+         for MP(Media Phone) use and ML7831 IOH is for general purpose use.
+         ML7213/ML7223/ML7831 is companion chip for Intel Atom E6xx series.
+         ML7213/ML7223/ML7831 is completely compatible for Intel EG20T PCH.
 
 comment "External I2C/SMBus adapter drivers"
 
index 3ef3557b6e32d5d03b1b048bdae39988d0524a4e..ca8877641040284e4eb7964c9abe63a7e2d4a11d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 /*
 Set the number of I2C instance max
 Intel EG20T PCH :              1ch
-OKI SEMICONDUCTOR ML7213 IOH : 2ch
+LAPIS Semiconductor ML7213 IOH :       2ch
+LAPIS Semiconductor ML7831 IOH :       1ch
 */
 #define PCH_I2C_MAX_DEV                        2
 
@@ -180,15 +181,17 @@ static int pch_clk = 50000;       /* specifies I2C clock speed in KHz */
 static wait_queue_head_t pch_event;
 static DEFINE_MUTEX(pch_mutex);
 
-/* Definition for ML7213 by OKI SEMICONDUCTOR */
+/* Definition for ML7213 by LAPIS Semiconductor */
 #define PCI_VENDOR_ID_ROHM             0x10DB
 #define PCI_DEVICE_ID_ML7213_I2C       0x802D
 #define PCI_DEVICE_ID_ML7223_I2C       0x8010
+#define PCI_DEVICE_ID_ML7831_I2C       0x8817
 
 static DEFINE_PCI_DEVICE_TABLE(pch_pcidev_id) = {
        { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PCH_I2C),   1, },
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_I2C), 2, },
        { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_I2C), 1, },
+       { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_I2C), 1, },
        {0,}
 };
 
@@ -243,7 +246,7 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
        if (pch_clk > PCH_MAX_CLK)
                pch_clk = 62500;
 
-       pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / pch_i2c_speed * 8;
+       pch_i2cbc = (pch_clk + (pch_i2c_speed * 4)) / (pch_i2c_speed * 8);
        /* Set transfer speed in I2CBC */
        iowrite32(pch_i2cbc, p + PCH_I2CBC);
 
@@ -918,7 +921,9 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
                pch_adap->dev.parent = &pdev->dev;
 
                pch_i2c_init(&adap_info->pch_data[i]);
-               ret = i2c_add_adapter(pch_adap);
+
+               pch_adap->nr = i;
+               ret = i2c_add_numbered_adapter(pch_adap);
                if (ret) {
                        pch_pci_err(pdev, "i2c_add_adapter[ch:%d] FAILED\n", i);
                        goto err_add_adapter;
@@ -1058,8 +1063,8 @@ static void __exit pch_pci_exit(void)
 }
 module_exit(pch_pci_exit);
 
-MODULE_DESCRIPTION("Intel EG20T PCH/OKI SEMICONDUCTOR ML7213 IOH I2C Driver");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semico ML7213/ML7223/ML7831 IOH I2C");
 MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.okisemi.com>");
+MODULE_AUTHOR("Tomoya MORINAGA. <tomoya-linux@dsn.lapis-semi.com>");
 module_param(pch_i2c_speed, int, (S_IRUSR | S_IWUSR));
 module_param(pch_clk, int, (S_IRUSR | S_IWUSR));
index fa23faa20f0e34435881e24abc6d5e4c9545a259..801df6000e9b2042ce6bc6453b38e49bf41fd031 100644 (file)
@@ -37,6 +37,9 @@
 #include <linux/platform_device.h>
 #include <linux/clk.h>
 #include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_i2c.h>
+#include <linux/of_device.h>
 #include <linux/slab.h>
 #include <linux/i2c-omap.h>
 #include <linux/pm_runtime.h>
@@ -182,7 +185,9 @@ struct omap_i2c_dev {
        u32                     latency;        /* maximum mpu wkup latency */
        void                    (*set_mpu_wkup_lat)(struct device *dev,
                                                    long latency);
-       u32                     speed;          /* Speed of bus in Khz */
+       u32                     speed;          /* Speed of bus in kHz */
+       u32                     dtrev;          /* extra revision from DT */
+       u32                     flags;
        u16                     cmd_err;
        u8                      *buf;
        u8                      *regs;
@@ -235,7 +240,7 @@ static const u8 reg_map_ip_v2[] = {
        [OMAP_I2C_BUF_REG] = 0x94,
        [OMAP_I2C_CNT_REG] = 0x98,
        [OMAP_I2C_DATA_REG] = 0x9c,
-       [OMAP_I2C_SYSC_REG] = 0x20,
+       [OMAP_I2C_SYSC_REG] = 0x10,
        [OMAP_I2C_CON_REG] = 0xa4,
        [OMAP_I2C_OA_REG] = 0xa8,
        [OMAP_I2C_SA_REG] = 0xac,
@@ -266,11 +271,7 @@ static inline u16 omap_i2c_read_reg(struct omap_i2c_dev *i2c_dev, int reg)
 
 static void omap_i2c_unidle(struct omap_i2c_dev *dev)
 {
-       struct omap_i2c_bus_platform_data *pdata;
-
-       pdata = dev->dev->platform_data;
-
-       if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+       if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
                omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
                omap_i2c_write_reg(dev, OMAP_I2C_PSC_REG, dev->pscstate);
                omap_i2c_write_reg(dev, OMAP_I2C_SCLL_REG, dev->scllstate);
@@ -291,13 +292,10 @@ static void omap_i2c_unidle(struct omap_i2c_dev *dev)
 
 static void omap_i2c_idle(struct omap_i2c_dev *dev)
 {
-       struct omap_i2c_bus_platform_data *pdata;
        u16 iv;
 
-       pdata = dev->dev->platform_data;
-
        dev->iestate = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
-       if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+       if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
                omap_i2c_write_reg(dev, OMAP_I2C_IP_V2_IRQENABLE_CLR, 1);
        else
                omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, 0);
@@ -320,9 +318,6 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
        unsigned long timeout;
        unsigned long internal_clk = 0;
        struct clk *fclk;
-       struct omap_i2c_bus_platform_data *pdata;
-
-       pdata = dev->dev->platform_data;
 
        if (dev->rev >= OMAP_I2C_OMAP1_REV_2) {
                /* Disable I2C controller before soft reset */
@@ -373,7 +368,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
        }
        omap_i2c_write_reg(dev, OMAP_I2C_CON_REG, 0);
 
-       if (pdata->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
+       if (dev->flags & OMAP_I2C_FLAG_ALWAYS_ARMXOR_CLK) {
                /*
                 * The I2C functional clock is the armxor_ck, so there's
                 * no need to get "armxor_ck" separately.  Now, if OMAP2420
@@ -397,7 +392,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
                        psc = fclk_rate / 12000000;
        }
 
-       if (!(pdata->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
+       if (!(dev->flags & OMAP_I2C_FLAG_SIMPLE_CLOCK)) {
 
                /*
                 * HSI2C controller internal clk rate should be 19.2 Mhz for
@@ -406,7 +401,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
                 * The filter is iclk (fclk for HS) period.
                 */
                if (dev->speed > 400 ||
-                              pdata->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
+                              dev->flags & OMAP_I2C_FLAG_FORCE_19200_INT_CLK)
                        internal_clk = 19200;
                else if (dev->speed > 100)
                        internal_clk = 9600;
@@ -475,7 +470,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
 
        dev->errata = 0;
 
-       if (pdata->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
+       if (dev->flags & OMAP_I2C_FLAG_APPLY_ERRATA_I207)
                dev->errata |= I2C_OMAP_ERRATA_I207;
 
        /* Enable interrupts */
@@ -484,7 +479,7 @@ static int omap_i2c_init(struct omap_i2c_dev *dev)
                        OMAP_I2C_IE_AL)  | ((dev->fifo_size) ?
                                (OMAP_I2C_IE_RDR | OMAP_I2C_IE_XDR) : 0);
        omap_i2c_write_reg(dev, OMAP_I2C_IE_REG, dev->iestate);
-       if (pdata->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
+       if (dev->flags & OMAP_I2C_FLAG_RESET_REGS_POSTIDLE) {
                dev->pscstate = psc;
                dev->scllstate = scll;
                dev->sclhstate = sclh;
@@ -804,9 +799,6 @@ omap_i2c_isr(int this_irq, void *dev_id)
        u16 bits;
        u16 stat, w;
        int err, count = 0;
-       struct omap_i2c_bus_platform_data *pdata;
-
-       pdata = dev->dev->platform_data;
 
        if (pm_runtime_suspended(dev->dev))
                return IRQ_NONE;
@@ -830,11 +822,9 @@ complete:
                                ~(OMAP_I2C_STAT_RRDY | OMAP_I2C_STAT_RDR |
                                OMAP_I2C_STAT_XRDY | OMAP_I2C_STAT_XDR));
 
-               if (stat & OMAP_I2C_STAT_NACK) {
+               if (stat & OMAP_I2C_STAT_NACK)
                        err |= OMAP_I2C_STAT_NACK;
-                       omap_i2c_write_reg(dev, OMAP_I2C_CON_REG,
-                                          OMAP_I2C_CON_STP);
-               }
+
                if (stat & OMAP_I2C_STAT_AL) {
                        dev_err(dev->dev, "Arbitration lost\n");
                        err |= OMAP_I2C_STAT_AL;
@@ -875,7 +865,7 @@ complete:
                                         * Data reg in 2430, omap3 and
                                         * omap4 is 8 bit wide
                                         */
-                                       if (pdata->flags &
+                                       if (dev->flags &
                                                 OMAP_I2C_FLAG_16BIT_DATA_REG) {
                                                if (dev->buf_len) {
                                                        *dev->buf++ = w >> 8;
@@ -918,7 +908,7 @@ complete:
                                         * Data reg in 2430, omap3 and
                                         * omap4 is 8 bit wide
                                         */
-                                       if (pdata->flags &
+                                       if (dev->flags &
                                                 OMAP_I2C_FLAG_16BIT_DATA_REG) {
                                                if (dev->buf_len) {
                                                        w |= *dev->buf++ << 8;
@@ -965,6 +955,32 @@ static const struct i2c_algorithm omap_i2c_algo = {
        .functionality  = omap_i2c_func,
 };
 
+#ifdef CONFIG_OF
+static struct omap_i2c_bus_platform_data omap3_pdata = {
+       .rev = OMAP_I2C_IP_VERSION_1,
+       .flags = OMAP_I2C_FLAG_APPLY_ERRATA_I207 |
+                OMAP_I2C_FLAG_RESET_REGS_POSTIDLE |
+                OMAP_I2C_FLAG_BUS_SHIFT_2,
+};
+
+static struct omap_i2c_bus_platform_data omap4_pdata = {
+       .rev = OMAP_I2C_IP_VERSION_2,
+};
+
+static const struct of_device_id omap_i2c_of_match[] = {
+       {
+               .compatible = "ti,omap4-i2c",
+               .data = &omap4_pdata,
+       },
+       {
+               .compatible = "ti,omap3-i2c",
+               .data = &omap3_pdata,
+       },
+       { },
+};
+MODULE_DEVICE_TABLE(of, omap_i2c_of_match);
+#endif
+
 static int __devinit
 omap_i2c_probe(struct platform_device *pdev)
 {
@@ -972,9 +988,10 @@ omap_i2c_probe(struct platform_device *pdev)
        struct i2c_adapter      *adap;
        struct resource         *mem, *irq, *ioarea;
        struct omap_i2c_bus_platform_data *pdata = pdev->dev.platform_data;
+       struct device_node      *node = pdev->dev.of_node;
+       const struct of_device_id *match;
        irq_handler_t isr;
        int r;
-       u32 speed = 0;
 
        /* NOTE: driver uses the static register mapping */
        mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1001,15 +1018,24 @@ omap_i2c_probe(struct platform_device *pdev)
                goto err_release_region;
        }
 
-       if (pdata != NULL) {
-               speed = pdata->clkrate;
+       match = of_match_device(of_match_ptr(omap_i2c_of_match), &pdev->dev);
+       if (match) {
+               u32 freq = 100000; /* default to 100000 Hz */
+
+               pdata = match->data;
+               dev->dtrev = pdata->rev;
+               dev->flags = pdata->flags;
+
+               of_property_read_u32(node, "clock-frequency", &freq);
+               /* convert DT freq value in Hz into kHz for speed */
+               dev->speed = freq / 1000;
+       } else if (pdata != NULL) {
+               dev->speed = pdata->clkrate;
+               dev->flags = pdata->flags;
                dev->set_mpu_wkup_lat = pdata->set_mpu_wkup_lat;
-       } else {
-               speed = 100;    /* Default speed */
-               dev->set_mpu_wkup_lat = NULL;
+               dev->dtrev = pdata->rev;
        }
 
-       dev->speed = speed;
        dev->dev = &pdev->dev;
        dev->irq = irq->start;
        dev->base = ioremap(mem->start, resource_size(mem));
@@ -1020,9 +1046,9 @@ omap_i2c_probe(struct platform_device *pdev)
 
        platform_set_drvdata(pdev, dev);
 
-       dev->reg_shift = (pdata->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
+       dev->reg_shift = (dev->flags >> OMAP_I2C_FLAG_BUS_SHIFT__SHIFT) & 3;
 
-       if (pdata->rev == OMAP_I2C_IP_VERSION_2)
+       if (dev->dtrev == OMAP_I2C_IP_VERSION_2)
                dev->regs = (u8 *)reg_map_ip_v2;
        else
                dev->regs = (u8 *)reg_map_ip_v1;
@@ -1035,7 +1061,7 @@ omap_i2c_probe(struct platform_device *pdev)
        if (dev->rev <= OMAP_I2C_REV_ON_3430)
                dev->errata |= I2C_OMAP3_1P153;
 
-       if (!(pdata->flags & OMAP_I2C_FLAG_NO_FIFO)) {
+       if (!(dev->flags & OMAP_I2C_FLAG_NO_FIFO)) {
                u16 s;
 
                /* Set up the fifo size - Get total size */
@@ -1058,7 +1084,7 @@ omap_i2c_probe(struct platform_device *pdev)
                /* calculate wakeup latency constraint for MPU */
                if (dev->set_mpu_wkup_lat != NULL)
                        dev->latency = (1000000 * dev->fifo_size) /
-                                      (1000 * speed / 8);
+                                      (1000 * dev->speed / 8);
        }
 
        /* reset ASAP, clearing any IRQs */
@@ -1074,7 +1100,7 @@ omap_i2c_probe(struct platform_device *pdev)
        }
 
        dev_info(dev->dev, "bus %d rev%d.%d.%d at %d kHz\n", pdev->id,
-                pdata->rev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
+                dev->dtrev, dev->rev >> 4, dev->rev & 0xf, dev->speed);
 
        pm_runtime_put(dev->dev);
 
@@ -1085,6 +1111,7 @@ omap_i2c_probe(struct platform_device *pdev)
        strlcpy(adap->name, "OMAP I2C adapter", sizeof(adap->name));
        adap->algo = &omap_i2c_algo;
        adap->dev.parent = &pdev->dev;
+       adap->dev.of_node = pdev->dev.of_node;
 
        /* i2c device drivers may be active on return from add_adapter() */
        adap->nr = pdev->id;
@@ -1094,6 +1121,8 @@ omap_i2c_probe(struct platform_device *pdev)
                goto err_free_irq;
        }
 
+       of_i2c_register_devices(adap);
+
        return 0;
 
 err_free_irq:
@@ -1166,6 +1195,7 @@ static struct platform_driver omap_i2c_driver = {
                .name   = "omap_i2c",
                .owner  = THIS_MODULE,
                .pm     = OMAP_I2C_PM_OPS,
+               .of_match_table = of_match_ptr(omap_i2c_of_match),
        },
 };
 
index 5d2f8e13cf0e670e83b48b6dc243402d781b9758..54ab97bae0425f85f1a7df4982f56c3d74643694 100644 (file)
@@ -197,7 +197,7 @@ static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = {
                .enter = &intel_idle },
 };
 
-static int get_driver_data(int cstate)
+static long get_driver_data(int cstate)
 {
        int driver_data;
        switch (cstate) {
@@ -232,6 +232,7 @@ static int get_driver_data(int cstate)
  * @drv: cpuidle driver
  * @index: index of cpuidle state
  *
+ * Must be called under local_irq_disable().
  */
 static int intel_idle(struct cpuidle_device *dev,
                struct cpuidle_driver *drv, int index)
@@ -247,8 +248,6 @@ static int intel_idle(struct cpuidle_device *dev,
 
        cstate = (((eax) >> MWAIT_SUBSTATE_SIZE) & MWAIT_CSTATE_MASK) + 1;
 
-       local_irq_disable();
-
        /*
         * leave_mm() to avoid costly and often unnecessary wakeups
         * for flushing the user TLB's associated with the active mm.
@@ -348,7 +347,8 @@ static int intel_idle_probe(void)
        cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &mwait_substates);
 
        if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
-               !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
+           !(ecx & CPUID5_ECX_INTERRUPT_BREAK) ||
+           !mwait_substates)
                        return -ENODEV;
 
        pr_debug(PREFIX "MWAIT substates: 0x%x\n", mwait_substates);
@@ -394,7 +394,7 @@ static int intel_idle_probe(void)
        if (boot_cpu_has(X86_FEATURE_ARAT))     /* Always Reliable APIC Timer */
                lapic_timer_reliable_states = LAPIC_TIMER_ALWAYS_RELIABLE;
        else {
-               smp_call_function(__setup_broadcast_timer, (void *)true, 1);
+               on_each_cpu(__setup_broadcast_timer, (void *)true, 1);
                register_cpu_notifier(&setup_broadcast_notifier);
        }
 
@@ -471,71 +471,67 @@ static int intel_idle_cpuidle_driver_init(void)
        }
 
        if (auto_demotion_disable_flags)
-               smp_call_function(auto_demotion_disable, NULL, 1);
+               on_each_cpu(auto_demotion_disable, NULL, 1);
 
        return 0;
 }
 
 
 /*
- * intel_idle_cpuidle_devices_init()
+ * intel_idle_cpu_init()
  * allocate, initialize, register cpuidle_devices
+ * @cpu: cpu/core to initialize
  */
-static int intel_idle_cpuidle_devices_init(void)
+int intel_idle_cpu_init(int cpu)
 {
-       int i, cstate;
+       int cstate;
        struct cpuidle_device *dev;
 
-       intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
-       if (intel_idle_cpuidle_devices == NULL)
-               return -ENOMEM;
-
-       for_each_online_cpu(i) {
-               dev = per_cpu_ptr(intel_idle_cpuidle_devices, i);
+       dev = per_cpu_ptr(intel_idle_cpuidle_devices, cpu);
 
-               dev->state_count = 1;
+       dev->state_count = 1;
 
-               for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
-                       int num_substates;
+       for (cstate = 1; cstate < MWAIT_MAX_NUM_CSTATES; ++cstate) {
+               int num_substates;
 
-                       if (cstate > max_cstate) {
-                               printk(PREFIX "max_cstate %d reached\n",
-                                       max_cstate);
-                               break;
-                       }
+               if (cstate > max_cstate) {
+                       printk(PREFIX "max_cstate %d reached\n",
+                              max_cstate);
+                       break;
+               }
 
-                       /* does the state exist in CPUID.MWAIT? */
-                       num_substates = (mwait_substates >> ((cstate) * 4))
-                                               & MWAIT_SUBSTATE_MASK;
-                       if (num_substates == 0)
-                               continue;
-                       /* is the state not enabled? */
-                       if (cpuidle_state_table[cstate].enter == NULL) {
-                               continue;
-                       }
+               /* does the state exist in CPUID.MWAIT? */
+               num_substates = (mwait_substates >> ((cstate) * 4))
+                       & MWAIT_SUBSTATE_MASK;
+               if (num_substates == 0)
+                       continue;
+               /* is the state not enabled? */
+               if (cpuidle_state_table[cstate].enter == NULL)
+                       continue;
 
-                       dev->states_usage[dev->state_count].driver_data =
-                               (void *)get_driver_data(cstate);
+               dev->states_usage[dev->state_count].driver_data =
+                       (void *)get_driver_data(cstate);
 
                        dev->state_count += 1;
                }
+       dev->cpu = cpu;
 
-               dev->cpu = i;
-               if (cpuidle_register_device(dev)) {
-                       pr_debug(PREFIX "cpuidle_register_device %d failed!\n",
-                                i);
-                       intel_idle_cpuidle_devices_uninit();
-                       return -EIO;
-               }
+       if (cpuidle_register_device(dev)) {
+               pr_debug(PREFIX "cpuidle_register_device %d failed!\n", cpu);
+               intel_idle_cpuidle_devices_uninit();
+               return -EIO;
        }
 
+       if (auto_demotion_disable_flags)
+               smp_call_function_single(cpu, auto_demotion_disable, NULL, 1);
+
        return 0;
 }
-
+EXPORT_SYMBOL_GPL(intel_idle_cpu_init);
 
 static int __init intel_idle_init(void)
 {
-       int retval;
+       int retval, i;
 
        /* Do not load intel_idle at all for now if idle= is passed */
        if (boot_option_idle_override != IDLE_NO_OVERRIDE)
@@ -553,10 +549,16 @@ static int __init intel_idle_init(void)
                return retval;
        }
 
-       retval = intel_idle_cpuidle_devices_init();
-       if (retval) {
-               cpuidle_unregister_driver(&intel_idle_driver);
-               return retval;
+       intel_idle_cpuidle_devices = alloc_percpu(struct cpuidle_device);
+       if (intel_idle_cpuidle_devices == NULL)
+               return -ENOMEM;
+
+       for_each_online_cpu(i) {
+               retval = intel_idle_cpu_init(i);
+               if (retval) {
+                       cpuidle_unregister_driver(&intel_idle_driver);
+                       return retval;
+               }
        }
 
        return 0;
@@ -568,7 +570,7 @@ static void __exit intel_idle_exit(void)
        cpuidle_unregister_driver(&intel_idle_driver);
 
        if (lapic_timer_reliable_states != LAPIC_TIMER_ALWAYS_RELIABLE) {
-               smp_call_function(__setup_broadcast_timer, (void *)false, 1);
+               on_each_cpu(__setup_broadcast_timer, (void *)false, 1);
                unregister_cpu_notifier(&setup_broadcast_notifier);
        }
 
index 0f9a84c1046ab38776abe891ef0ff98ea0c19c98..eb0add311dc8be40ff8876f8b1bcd7d3f2e0b161 100644 (file)
@@ -55,6 +55,7 @@ source "drivers/infiniband/hw/nes/Kconfig"
 source "drivers/infiniband/ulp/ipoib/Kconfig"
 
 source "drivers/infiniband/ulp/srp/Kconfig"
+source "drivers/infiniband/ulp/srpt/Kconfig"
 
 source "drivers/infiniband/ulp/iser/Kconfig"
 
index 9cc7a47d3e6737f22379384ac60366ea183d65dc..a3b2d8eac86ef0089a41dff9686c4dd752b0ca4a 100644 (file)
@@ -10,4 +10,5 @@ obj-$(CONFIG_MLX4_INFINIBAND)         += hw/mlx4/
 obj-$(CONFIG_INFINIBAND_NES)           += hw/nes/
 obj-$(CONFIG_INFINIBAND_IPOIB)         += ulp/ipoib/
 obj-$(CONFIG_INFINIBAND_SRP)           += ulp/srp/
+obj-$(CONFIG_INFINIBAND_SRPT)          += ulp/srpt/
 obj-$(CONFIG_INFINIBAND_ISER)          += ulp/iser/
index b37b0c02a7b9d7d81cd5a55471e28d2780080377..5034a87cc72dc887553fdfb503e42d8f2e0005aa 100644 (file)
@@ -808,9 +808,12 @@ static ssize_t ucma_accept(struct ucma_file *file, const char __user *inbuf,
                return PTR_ERR(ctx);
 
        if (cmd.conn_param.valid) {
-               ctx->uid = cmd.uid;
                ucma_copy_conn_param(&conn_param, &cmd.conn_param);
+               mutex_lock(&file->mut);
                ret = rdma_accept(ctx->cm_id, &conn_param);
+               if (!ret)
+                       ctx->uid = cmd.uid;
+               mutex_unlock(&file->mut);
        } else
                ret = rdma_accept(ctx->cm_id, NULL);
 
index b930da4c0c632e64019c38b0ebcab9562e15bf65..4d27e4c3fe34badbe941e735edaf9d8fb33b61f2 100644 (file)
@@ -1485,6 +1485,7 @@ ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
                qp->event_handler = attr.event_handler;
                qp->qp_context    = attr.qp_context;
                qp->qp_type       = attr.qp_type;
+               atomic_set(&qp->usecnt, 0);
                atomic_inc(&pd->usecnt);
                atomic_inc(&attr.send_cq->usecnt);
                if (attr.recv_cq)
index 602b1bd723a963aa49904fc0a907d12d965b009e..575b78045aafd1b5a861f9fad753f0108dd7e5d0 100644 (file)
@@ -421,6 +421,7 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
                qp->uobject    = NULL;
                qp->qp_type    = qp_init_attr->qp_type;
 
+               atomic_set(&qp->usecnt, 0);
                if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
                        qp->event_handler = __ib_shared_qp_event_handler;
                        qp->qp_context = qp;
@@ -430,7 +431,6 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd,
                        qp->xrcd = qp_init_attr->xrcd;
                        atomic_inc(&qp_init_attr->xrcd->usecnt);
                        INIT_LIST_HEAD(&qp->open_list);
-                       atomic_set(&qp->usecnt, 0);
 
                        real_qp = qp;
                        qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
index b7d4216db3c35636ff91734e33d3ad0bcef357b1..a4de9d58e9b4749edda15575ed4c8a4e254678c1 100644 (file)
@@ -89,7 +89,7 @@ static int create_file(const char *name, umode_t mode,
                error = ipathfs_mknod(parent->d_inode, *dentry,
                                      mode, fops, data);
        else
-               error = PTR_ERR(dentry);
+               error = PTR_ERR(*dentry);
        mutex_unlock(&parent->d_inode->i_mutex);
 
        return error;
index 95c94d8f02543ed89631f424a3c39c31c74cfa13..259b0670b51cd6cb430c859f9012796a9c2af954 100644 (file)
@@ -257,12 +257,9 @@ static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
                        return IB_MAD_RESULT_SUCCESS;
 
                /*
-                * Don't process SMInfo queries or vendor-specific
-                * MADs -- the SMA can't handle them.
+                * Don't process SMInfo queries -- the SMA can't handle them.
                 */
-               if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO ||
-                   ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) ==
-                    IB_SMP_ATTR_VENDOR_MASK))
+               if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
                        return IB_MAD_RESULT_SUCCESS;
        } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
                   in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1   ||
index 7013da5e9eda8e9df0b8511568b4b2dd18ea177b..7140199f562ead43fb9062d0c89a1faa2568dc04 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 - 2009 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
index 568b4f11380ae545d264a48e6fae51784ddf5533..c438e4691b3cde3ec4c3874a41c07f73ec4c59b9 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 - 2009 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
index 425065b36b8c4b68c160c3c7e5bcf1a6c6127081..a4972abedef1e967acafc123cfe3fe14bbbabecb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 - 2009 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -233,6 +233,7 @@ static int send_mpa_reject(struct nes_cm_node *cm_node)
        u8 *start_ptr = &start_addr;
        u8 **start_buff = &start_ptr;
        u16 buff_len = 0;
+       struct ietf_mpa_v1 *mpa_frame;
 
        skb = dev_alloc_skb(MAX_CM_BUFFER);
        if (!skb) {
@@ -242,6 +243,8 @@ static int send_mpa_reject(struct nes_cm_node *cm_node)
 
        /* send an MPA reject frame */
        cm_build_mpa_frame(cm_node, start_buff, &buff_len, NULL, MPA_KEY_REPLY);
+       mpa_frame = (struct ietf_mpa_v1 *)*start_buff;
+       mpa_frame->flags |= IETF_MPA_FLAGS_REJECT;
        form_cm_frame(skb, cm_node, NULL, 0, *start_buff, buff_len, SET_ACK | SET_FIN);
 
        cm_node->state = NES_CM_STATE_FIN_WAIT1;
@@ -1360,8 +1363,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
                                if (!memcmp(nesadapter->arp_table[arpindex].mac_addr,
                                            neigh->ha, ETH_ALEN)) {
                                        /* Mac address same as in nes_arp_table */
-                                       ip_rt_put(rt);
-                                       return rc;
+                                       goto out;
                                }
 
                                nes_manage_arp_cache(nesvnic->netdev,
@@ -1377,6 +1379,8 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi
                        neigh_event_send(neigh, NULL);
                }
        }
+
+out:
        rcu_read_unlock();
        ip_rt_put(rt);
        return rc;
index bdfa1fbb35fcea66d1190cc94b237554c029006b..4646e66660874c4a6b2a5b9a4290c0132096e9f7 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 - 2009 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index b4393a16099dc69df36fa5f9f2f19d42e2b33d4f..a69eef16d72d035023813c3b5af0c1174266c986 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 - 2009 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 055f4b545df00a73069da6978a0164895e17754c..d42c9f435b1b7a62ad55a331c1f6bc76c84fdbab 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 - 2009 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 0b590e152c6abeeb84e78130a68d62ab742eca5f..d748e4b31b8ddf2de875fbd57e7e3277ed0171f8 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2006 - 2009 Intel Corporation.  All rights reserved.
+* Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
index b3b2a240c6e9471410ea3b4a6286849186744c92..3ba7be36945207bce1448d2edb24cab2783bfa88 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 - 2009 Intel-NE, Inc.  All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel-NE, Inc.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 8c8af254555a2282642a6cc3bacf3597d5628044..4f7f701c4a817c6829646338edbeaedcd5e68410 100644 (file)
@@ -1,5 +1,5 @@
 /*
-* Copyright (c) 2010 Intel-NE, Inc.  All rights reserved.
+* Copyright (c) 2006 - 2011 Intel-NE, Inc.  All rights reserved.
 *
 * This software is available to you under a choice of one of two
 * licenses.  You may choose to be licensed under the terms of the GNU
index 4b3fa711a2470edc4485f286a3ae4ced7adec74b..f3a3ecf8d09ebfbff9bcc37b1a4beac97d60ebeb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 - 2009 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 71e133ab209b3c30522681eb10ba163d112f0190..4926de744488e71b4ec6e618825ed43218b2f519 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 - 2009 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
index 8b4c2ff548887e967c567a600e50c828aa7946d8..e98f4fc0b7683230136490c5fdf0f3567c01901d 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 - 2009 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 5095bc41c6ccdd9203f6259a3a1be294aca9b563..0927b5cc65d33ac36850a1396c23e1b046b54f6b 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 - 2009 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -3427,6 +3427,8 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
                        set_wqe_32bit_value(wqe->wqe_words,
                                            NES_IWARP_SQ_FMR_WQE_LENGTH_LOW_IDX,
                                            ib_wr->wr.fast_reg.length);
+                       set_wqe_32bit_value(wqe->wqe_words,
+                                           NES_IWARP_SQ_FMR_WQE_LENGTH_HIGH_IDX, 0);
                        set_wqe_32bit_value(wqe->wqe_words,
                                            NES_IWARP_SQ_FMR_WQE_MR_STAG_IDX,
                                            ib_wr->wr.fast_reg.rkey);
@@ -3724,7 +3726,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
                                                entry->opcode = IB_WC_SEND;
                                                break;
                                        case NES_IWARP_SQ_OP_LOCINV:
-                                               entry->opcode = IB_WR_LOCAL_INV;
+                                               entry->opcode = IB_WC_LOCAL_INV;
                                                break;
                                        case NES_IWARP_SQ_OP_FAST_REG:
                                                entry->opcode = IB_WC_FAST_REG_MR;
index fe6b6e92fa901de9d4354de95783610cc271d571..0eff7c44d76b00bbcb1bbe06602dd54606d390ee 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2006 - 2009 Intel Corporation.  All rights reserved.
+ * Copyright (c) 2006 - 2011 Intel Corporation.  All rights reserved.
  * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
index 4f18e2d332dfd79ba8dcfc66a6c9251347a562c1..d0c64d514813909c7a863a0f29939e64123b7cba 100644 (file)
@@ -2105,7 +2105,7 @@ static void alloc_dummy_hdrq(struct qib_devdata *dd)
        dd->cspec->dummy_hdrq = dma_alloc_coherent(&dd->pcidev->dev,
                                        dd->rcd[0]->rcvhdrq_size,
                                        &dd->cspec->dummy_hdrq_phys,
-                                       GFP_KERNEL | __GFP_COMP);
+                                       GFP_ATOMIC | __GFP_COMP);
        if (!dd->cspec->dummy_hdrq) {
                qib_devinfo(dd->pcidev, "Couldn't allocate dummy hdrq\n");
                /* fallback to just 0'ing */
index f695061d688e06ce026c312c79415f04a51aac70..0fde788e110087fefa825d6f7b9353fbbb572bd2 100644 (file)
@@ -560,7 +560,7 @@ static int qib_tune_pcie_coalesce(struct qib_devdata *dd)
  * BIOS may not set PCIe bus-utilization parameters for best performance.
  * Check and optionally adjust them to maximize our throughput.
  */
-static int qib_pcie_caps = 0x51;
+static int qib_pcie_caps;
 module_param_named(pcie_caps, qib_pcie_caps, int, S_IRUGO);
 MODULE_PARM_DESC(pcie_caps, "Max PCIe tuning: Payload (0..3), ReadReq (4..7)");
 
diff --git a/drivers/infiniband/ulp/srpt/Kconfig b/drivers/infiniband/ulp/srpt/Kconfig
new file mode 100644 (file)
index 0000000..31ee83d
--- /dev/null
@@ -0,0 +1,12 @@
+config INFINIBAND_SRPT
+       tristate "InfiniBand SCSI RDMA Protocol target support"
+       depends on INFINIBAND && TARGET_CORE
+       ---help---
+
+         Support for the SCSI RDMA Protocol (SRP) Target driver. The
+         SRP protocol is a protocol that allows an initiator to access
+         a block storage device on another host (target) over a network
+         that supports the RDMA protocol. Currently the RDMA protocol is
+         supported by InfiniBand and by iWarp network hardware. More
+         information about the SRP protocol can be found on the website
+         of the INCITS T10 technical committee (http://www.t10.org/).
diff --git a/drivers/infiniband/ulp/srpt/Makefile b/drivers/infiniband/ulp/srpt/Makefile
new file mode 100644 (file)
index 0000000..e3ee4bd
--- /dev/null
@@ -0,0 +1,2 @@
+ccflags-y                      := -Idrivers/target
+obj-$(CONFIG_INFINIBAND_SRPT)  += ib_srpt.o
diff --git a/drivers/infiniband/ulp/srpt/ib_dm_mad.h b/drivers/infiniband/ulp/srpt/ib_dm_mad.h
new file mode 100644 (file)
index 0000000..fb1de1f
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_DM_MAD_H
+#define IB_DM_MAD_H
+
+#include <linux/types.h>
+
+#include <rdma/ib_mad.h>
+
+enum {
+       /*
+        * See also section 13.4.7 Status Field, table 115 MAD Common Status
+        * Field Bit Values and also section 16.3.1.1 Status Field in the
+        * InfiniBand Architecture Specification.
+        */
+       DM_MAD_STATUS_UNSUP_METHOD = 0x0008,
+       DM_MAD_STATUS_UNSUP_METHOD_ATTR = 0x000c,
+       DM_MAD_STATUS_INVALID_FIELD = 0x001c,
+       DM_MAD_STATUS_NO_IOC = 0x0100,
+
+       /*
+        * See also the Device Management chapter, section 16.3.3 Attributes,
+        * table 279 Device Management Attributes in the InfiniBand
+        * Architecture Specification.
+        */
+       DM_ATTR_CLASS_PORT_INFO = 0x01,
+       DM_ATTR_IOU_INFO = 0x10,
+       DM_ATTR_IOC_PROFILE = 0x11,
+       DM_ATTR_SVC_ENTRIES = 0x12
+};
+
+struct ib_dm_hdr {
+       u8 reserved[28];
+};
+
+/*
+ * Structure of management datagram sent by the SRP target implementation.
+ * Contains a management datagram header, reliable multi-packet transaction
+ * protocol (RMPP) header and ib_dm_hdr. Notes:
+ * - The SRP target implementation does not use RMPP or ib_dm_hdr when sending
+ *   management datagrams.
+ * - The header size must be exactly 64 bytes (IB_MGMT_DEVICE_HDR), since this
+ *   is the header size that is passed to ib_create_send_mad() in ib_srpt.c.
+ * - The maximum supported size for a management datagram when not using RMPP
+ *   is 256 bytes -- 64 bytes header and 192 (IB_MGMT_DEVICE_DATA) bytes data.
+ */
+struct ib_dm_mad {
+       struct ib_mad_hdr mad_hdr;
+       struct ib_rmpp_hdr rmpp_hdr;
+       struct ib_dm_hdr dm_hdr;
+       u8 data[IB_MGMT_DEVICE_DATA];
+};
+
+/*
+ * IOUnitInfo as defined in section 16.3.3.3 IOUnitInfo of the InfiniBand
+ * Architecture Specification.
+ */
+struct ib_dm_iou_info {
+       __be16 change_id;
+       u8 max_controllers;
+       u8 op_rom;
+       u8 controller_list[128];
+};
+
+/*
+ * IOControllerprofile as defined in section 16.3.3.4 IOControllerProfile of
+ * the InfiniBand Architecture Specification.
+ */
+struct ib_dm_ioc_profile {
+       __be64 guid;
+       __be32 vendor_id;
+       __be32 device_id;
+       __be16 device_version;
+       __be16 reserved1;
+       __be32 subsys_vendor_id;
+       __be32 subsys_device_id;
+       __be16 io_class;
+       __be16 io_subclass;
+       __be16 protocol;
+       __be16 protocol_version;
+       __be16 service_conn;
+       __be16 initiators_supported;
+       __be16 send_queue_depth;
+       u8 reserved2;
+       u8 rdma_read_depth;
+       __be32 send_size;
+       __be32 rdma_size;
+       u8 op_cap_mask;
+       u8 svc_cap_mask;
+       u8 num_svc_entries;
+       u8 reserved3[9];
+       u8 id_string[64];
+};
+
+struct ib_dm_svc_entry {
+       u8 name[40];
+       __be64 id;
+};
+
+/*
+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.8 in the T10 SRP r16a document.
+ */
+struct ib_dm_svc_entries {
+       struct ib_dm_svc_entry service_entries[4];
+};
+
+#endif
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
new file mode 100644 (file)
index 0000000..2b73d43
--- /dev/null
@@ -0,0 +1,4070 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
+ * Copyright (C) 2008 - 2011 Bart Van Assche <bvanassche@acm.org>.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/ctype.h>
+#include <linux/kthread.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/atomic.h>
+#include <scsi/scsi_tcq.h>
+#include <target/configfs_macros.h>
+#include <target/target_core_base.h>
+#include <target/target_core_fabric_configfs.h>
+#include <target/target_core_fabric.h>
+#include <target/target_core_configfs.h>
+#include "ib_srpt.h"
+
+/* Name of this kernel module. */
+#define DRV_NAME               "ib_srpt"
+#define DRV_VERSION            "2.0.0"
+#define DRV_RELDATE            "2011-02-14"
+
+#define SRPT_ID_STRING "Linux SRP target"
+
+#undef pr_fmt
+#define pr_fmt(fmt) DRV_NAME " " fmt
+
+MODULE_AUTHOR("Vu Pham and Bart Van Assche");
+MODULE_DESCRIPTION("InfiniBand SCSI RDMA Protocol target "
+                  "v" DRV_VERSION " (" DRV_RELDATE ")");
+MODULE_LICENSE("Dual BSD/GPL");
+
+/*
+ * Global Variables
+ */
+
+static u64 srpt_service_guid;
+static DEFINE_SPINLOCK(srpt_dev_lock); /* Protects srpt_dev_list. */
+static LIST_HEAD(srpt_dev_list);       /* List of srpt_device structures. */
+
+static unsigned srp_max_req_size = DEFAULT_MAX_REQ_SIZE;
+module_param(srp_max_req_size, int, 0444);
+MODULE_PARM_DESC(srp_max_req_size,
+                "Maximum size of SRP request messages in bytes.");
+
+static int srpt_srq_size = DEFAULT_SRPT_SRQ_SIZE;
+module_param(srpt_srq_size, int, 0444);
+MODULE_PARM_DESC(srpt_srq_size,
+                "Shared receive queue (SRQ) size.");
+
+static int srpt_get_u64_x(char *buffer, struct kernel_param *kp)
+{
+       return sprintf(buffer, "0x%016llx", *(u64 *)kp->arg);
+}
+module_param_call(srpt_service_guid, NULL, srpt_get_u64_x, &srpt_service_guid,
+                 0444);
+MODULE_PARM_DESC(srpt_service_guid,
+                "Using this value for ioc_guid, id_ext, and cm_listen_id"
+                " instead of using the node_guid of the first HCA.");
+
+static struct ib_client srpt_client;
+static struct target_fabric_configfs *srpt_target;
+static void srpt_release_channel(struct srpt_rdma_ch *ch);
+static int srpt_queue_status(struct se_cmd *cmd);
+
+/**
+ * opposite_dma_dir() - Swap DMA_TO_DEVICE and DMA_FROM_DEVICE.
+ */
+static inline
+enum dma_data_direction opposite_dma_dir(enum dma_data_direction dir)
+{
+       switch (dir) {
+       case DMA_TO_DEVICE:     return DMA_FROM_DEVICE;
+       case DMA_FROM_DEVICE:   return DMA_TO_DEVICE;
+       default:                return dir;
+       }
+}
+
+/**
+ * srpt_sdev_name() - Return the name associated with the HCA.
+ *
+ * Examples are ib0, ib1, ...
+ */
+static inline const char *srpt_sdev_name(struct srpt_device *sdev)
+{
+       return sdev->device->name;
+}
+
+static enum rdma_ch_state srpt_get_ch_state(struct srpt_rdma_ch *ch)
+{
+       unsigned long flags;
+       enum rdma_ch_state state;
+
+       spin_lock_irqsave(&ch->spinlock, flags);
+       state = ch->state;
+       spin_unlock_irqrestore(&ch->spinlock, flags);
+       return state;
+}
+
+static enum rdma_ch_state
+srpt_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state new_state)
+{
+       unsigned long flags;
+       enum rdma_ch_state prev;
+
+       spin_lock_irqsave(&ch->spinlock, flags);
+       prev = ch->state;
+       ch->state = new_state;
+       spin_unlock_irqrestore(&ch->spinlock, flags);
+       return prev;
+}
+
+/**
+ * srpt_test_and_set_ch_state() - Test and set the channel state.
+ *
+ * Returns true if and only if the channel state has been set to the new state.
+ */
+static bool
+srpt_test_and_set_ch_state(struct srpt_rdma_ch *ch, enum rdma_ch_state old,
+                          enum rdma_ch_state new)
+{
+       unsigned long flags;
+       enum rdma_ch_state prev;
+
+       spin_lock_irqsave(&ch->spinlock, flags);
+       prev = ch->state;
+       if (prev == old)
+               ch->state = new;
+       spin_unlock_irqrestore(&ch->spinlock, flags);
+       return prev == old;
+}
+
+/**
+ * srpt_event_handler() - Asynchronous IB event callback function.
+ *
+ * Callback function called by the InfiniBand core when an asynchronous IB
+ * event occurs. This callback may occur in interrupt context. See also
+ * section 11.5.2, Set Asynchronous Event Handler in the InfiniBand
+ * Architecture Specification.
+ */
+static void srpt_event_handler(struct ib_event_handler *handler,
+                              struct ib_event *event)
+{
+       struct srpt_device *sdev;
+       struct srpt_port *sport;
+
+       sdev = ib_get_client_data(event->device, &srpt_client);
+       if (!sdev || sdev->device != event->device)
+               return;
+
+       pr_debug("ASYNC event= %d on device= %s\n", event->event,
+                srpt_sdev_name(sdev));
+
+       switch (event->event) {
+       case IB_EVENT_PORT_ERR:
+               if (event->element.port_num <= sdev->device->phys_port_cnt) {
+                       sport = &sdev->port[event->element.port_num - 1];
+                       sport->lid = 0;
+                       sport->sm_lid = 0;
+               }
+               break;
+       case IB_EVENT_PORT_ACTIVE:
+       case IB_EVENT_LID_CHANGE:
+       case IB_EVENT_PKEY_CHANGE:
+       case IB_EVENT_SM_CHANGE:
+       case IB_EVENT_CLIENT_REREGISTER:
+               /* Refresh port data asynchronously. */
+               if (event->element.port_num <= sdev->device->phys_port_cnt) {
+                       sport = &sdev->port[event->element.port_num - 1];
+                       if (!sport->lid && !sport->sm_lid)
+                               schedule_work(&sport->work);
+               }
+               break;
+       default:
+               printk(KERN_ERR "received unrecognized IB event %d\n",
+                      event->event);
+               break;
+       }
+}
+
+/**
+ * srpt_srq_event() - SRQ event callback function.
+ */
+static void srpt_srq_event(struct ib_event *event, void *ctx)
+{
+       printk(KERN_INFO "SRQ event %d\n", event->event);
+}
+
+/**
+ * srpt_qp_event() - QP event callback function.
+ */
+static void srpt_qp_event(struct ib_event *event, struct srpt_rdma_ch *ch)
+{
+       pr_debug("QP event %d on cm_id=%p sess_name=%s state=%d\n",
+                event->event, ch->cm_id, ch->sess_name, srpt_get_ch_state(ch));
+
+       switch (event->event) {
+       case IB_EVENT_COMM_EST:
+               ib_cm_notify(ch->cm_id, event->event);
+               break;
+       case IB_EVENT_QP_LAST_WQE_REACHED:
+               if (srpt_test_and_set_ch_state(ch, CH_DRAINING,
+                                              CH_RELEASING))
+                       srpt_release_channel(ch);
+               else
+                       pr_debug("%s: state %d - ignored LAST_WQE.\n",
+                                ch->sess_name, srpt_get_ch_state(ch));
+               break;
+       default:
+               printk(KERN_ERR "received unrecognized IB QP event %d\n",
+                      event->event);
+               break;
+       }
+}
+
+/**
+ * srpt_set_ioc() - Helper function for initializing an IOUnitInfo structure.
+ *
+ * @slot: one-based slot number.
+ * @value: four-bit value.
+ *
+ * Copies the lowest four bits of value in element slot of the array of four
+ * bit elements called c_list (controller list). The index slot is one-based.
+ */
+static void srpt_set_ioc(u8 *c_list, u32 slot, u8 value)
+{
+       u16 id;
+       u8 tmp;
+
+       id = (slot - 1) / 2;
+       if (slot & 0x1) {
+               tmp = c_list[id] & 0xf;
+               c_list[id] = (value << 4) | tmp;
+       } else {
+               tmp = c_list[id] & 0xf0;
+               c_list[id] = (value & 0xf) | tmp;
+       }
+}
+
+/**
+ * srpt_get_class_port_info() - Copy ClassPortInfo to a management datagram.
+ *
+ * See also section 16.3.3.1 ClassPortInfo in the InfiniBand Architecture
+ * Specification.
+ */
+static void srpt_get_class_port_info(struct ib_dm_mad *mad)
+{
+       struct ib_class_port_info *cif;
+
+       cif = (struct ib_class_port_info *)mad->data;
+       memset(cif, 0, sizeof *cif);
+       cif->base_version = 1;
+       cif->class_version = 1;
+       cif->resp_time_value = 20;
+
+       mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_iou() - Write IOUnitInfo to a management datagram.
+ *
+ * See also section 16.3.3.3 IOUnitInfo in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.6 in the SRP r16a document.
+ */
+static void srpt_get_iou(struct ib_dm_mad *mad)
+{
+       struct ib_dm_iou_info *ioui;
+       u8 slot;
+       int i;
+
+       ioui = (struct ib_dm_iou_info *)mad->data;
+       ioui->change_id = __constant_cpu_to_be16(1);
+       ioui->max_controllers = 16;
+
+       /* set present for slot 1 and empty for the rest */
+       srpt_set_ioc(ioui->controller_list, 1, 1);
+       for (i = 1, slot = 2; i < 16; i++, slot++)
+               srpt_set_ioc(ioui->controller_list, slot, 0);
+
+       mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_ioc() - Write IOControllerprofile to a management datagram.
+ *
+ * See also section 16.3.3.4 IOControllerProfile in the InfiniBand
+ * Architecture Specification. See also section B.7, table B.7 in the SRP
+ * r16a document.
+ */
+static void srpt_get_ioc(struct srpt_port *sport, u32 slot,
+                        struct ib_dm_mad *mad)
+{
+       struct srpt_device *sdev = sport->sdev;
+       struct ib_dm_ioc_profile *iocp;
+
+       iocp = (struct ib_dm_ioc_profile *)mad->data;
+
+       if (!slot || slot > 16) {
+               mad->mad_hdr.status
+                       = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+               return;
+       }
+
+       if (slot > 2) {
+               mad->mad_hdr.status
+                       = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+               return;
+       }
+
+       memset(iocp, 0, sizeof *iocp);
+       strcpy(iocp->id_string, SRPT_ID_STRING);
+       iocp->guid = cpu_to_be64(srpt_service_guid);
+       iocp->vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
+       iocp->device_id = cpu_to_be32(sdev->dev_attr.vendor_part_id);
+       iocp->device_version = cpu_to_be16(sdev->dev_attr.hw_ver);
+       iocp->subsys_vendor_id = cpu_to_be32(sdev->dev_attr.vendor_id);
+       iocp->subsys_device_id = 0x0;
+       iocp->io_class = __constant_cpu_to_be16(SRP_REV16A_IB_IO_CLASS);
+       iocp->io_subclass = __constant_cpu_to_be16(SRP_IO_SUBCLASS);
+       iocp->protocol = __constant_cpu_to_be16(SRP_PROTOCOL);
+       iocp->protocol_version = __constant_cpu_to_be16(SRP_PROTOCOL_VERSION);
+       iocp->send_queue_depth = cpu_to_be16(sdev->srq_size);
+       iocp->rdma_read_depth = 4;
+       iocp->send_size = cpu_to_be32(srp_max_req_size);
+       iocp->rdma_size = cpu_to_be32(min(sport->port_attrib.srp_max_rdma_size,
+                                         1U << 24));
+       iocp->num_svc_entries = 1;
+       iocp->op_cap_mask = SRP_SEND_TO_IOC | SRP_SEND_FROM_IOC |
+               SRP_RDMA_READ_FROM_IOC | SRP_RDMA_WRITE_FROM_IOC;
+
+       mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_get_svc_entries() - Write ServiceEntries to a management datagram.
+ *
+ * See also section 16.3.3.5 ServiceEntries in the InfiniBand Architecture
+ * Specification. See also section B.7, table B.8 in the SRP r16a document.
+ */
+static void srpt_get_svc_entries(u64 ioc_guid,
+                                u16 slot, u8 hi, u8 lo, struct ib_dm_mad *mad)
+{
+       struct ib_dm_svc_entries *svc_entries;
+
+       WARN_ON(!ioc_guid);
+
+       if (!slot || slot > 16) {
+               mad->mad_hdr.status
+                       = __constant_cpu_to_be16(DM_MAD_STATUS_INVALID_FIELD);
+               return;
+       }
+
+       if (slot > 2 || lo > hi || hi > 1) {
+               mad->mad_hdr.status
+                       = __constant_cpu_to_be16(DM_MAD_STATUS_NO_IOC);
+               return;
+       }
+
+       svc_entries = (struct ib_dm_svc_entries *)mad->data;
+       memset(svc_entries, 0, sizeof *svc_entries);
+       svc_entries->service_entries[0].id = cpu_to_be64(ioc_guid);
+       snprintf(svc_entries->service_entries[0].name,
+                sizeof(svc_entries->service_entries[0].name),
+                "%s%016llx",
+                SRP_SERVICE_NAME_PREFIX,
+                ioc_guid);
+
+       mad->mad_hdr.status = 0;
+}
+
+/**
+ * srpt_mgmt_method_get() - Process a received management datagram.
+ * @sp:      source port through which the MAD has been received.
+ * @rq_mad:  received MAD.
+ * @rsp_mad: response MAD.
+ */
+static void srpt_mgmt_method_get(struct srpt_port *sp, struct ib_mad *rq_mad,
+                                struct ib_dm_mad *rsp_mad)
+{
+       u16 attr_id;
+       u32 slot;
+       u8 hi, lo;
+
+       attr_id = be16_to_cpu(rq_mad->mad_hdr.attr_id);
+       switch (attr_id) {
+       case DM_ATTR_CLASS_PORT_INFO:
+               srpt_get_class_port_info(rsp_mad);
+               break;
+       case DM_ATTR_IOU_INFO:
+               srpt_get_iou(rsp_mad);
+               break;
+       case DM_ATTR_IOC_PROFILE:
+               slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
+               srpt_get_ioc(sp, slot, rsp_mad);
+               break;
+       case DM_ATTR_SVC_ENTRIES:
+               slot = be32_to_cpu(rq_mad->mad_hdr.attr_mod);
+               hi = (u8) ((slot >> 8) & 0xff);
+               lo = (u8) (slot & 0xff);
+               slot = (u16) ((slot >> 16) & 0xffff);
+               srpt_get_svc_entries(srpt_service_guid,
+                                    slot, hi, lo, rsp_mad);
+               break;
+       default:
+               rsp_mad->mad_hdr.status =
+                   __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+               break;
+       }
+}
+
+/**
+ * srpt_mad_send_handler() - Post MAD-send callback function.
+ */
+static void srpt_mad_send_handler(struct ib_mad_agent *mad_agent,
+                                 struct ib_mad_send_wc *mad_wc)
+{
+       ib_destroy_ah(mad_wc->send_buf->ah);
+       ib_free_send_mad(mad_wc->send_buf);
+}
+
+/**
+ * srpt_mad_recv_handler() - MAD reception callback function.
+ */
+static void srpt_mad_recv_handler(struct ib_mad_agent *mad_agent,
+                                 struct ib_mad_recv_wc *mad_wc)
+{
+       struct srpt_port *sport = (struct srpt_port *)mad_agent->context;
+       struct ib_ah *ah;
+       struct ib_mad_send_buf *rsp;
+       struct ib_dm_mad *dm_mad;
+
+       if (!mad_wc || !mad_wc->recv_buf.mad)
+               return;
+
+       ah = ib_create_ah_from_wc(mad_agent->qp->pd, mad_wc->wc,
+                                 mad_wc->recv_buf.grh, mad_agent->port_num);
+       if (IS_ERR(ah))
+               goto err;
+
+       BUILD_BUG_ON(offsetof(struct ib_dm_mad, data) != IB_MGMT_DEVICE_HDR);
+
+       rsp = ib_create_send_mad(mad_agent, mad_wc->wc->src_qp,
+                                mad_wc->wc->pkey_index, 0,
+                                IB_MGMT_DEVICE_HDR, IB_MGMT_DEVICE_DATA,
+                                GFP_KERNEL);
+       if (IS_ERR(rsp))
+               goto err_rsp;
+
+       rsp->ah = ah;
+
+       dm_mad = rsp->mad;
+       memcpy(dm_mad, mad_wc->recv_buf.mad, sizeof *dm_mad);
+       dm_mad->mad_hdr.method = IB_MGMT_METHOD_GET_RESP;
+       dm_mad->mad_hdr.status = 0;
+
+       switch (mad_wc->recv_buf.mad->mad_hdr.method) {
+       case IB_MGMT_METHOD_GET:
+               srpt_mgmt_method_get(sport, mad_wc->recv_buf.mad, dm_mad);
+               break;
+       case IB_MGMT_METHOD_SET:
+               dm_mad->mad_hdr.status =
+                   __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD_ATTR);
+               break;
+       default:
+               dm_mad->mad_hdr.status =
+                   __constant_cpu_to_be16(DM_MAD_STATUS_UNSUP_METHOD);
+               break;
+       }
+
+       if (!ib_post_send_mad(rsp, NULL)) {
+               ib_free_recv_mad(mad_wc);
+               /* will destroy_ah & free_send_mad in send completion */
+               return;
+       }
+
+       ib_free_send_mad(rsp);
+
+err_rsp:
+       ib_destroy_ah(ah);
+err:
+       ib_free_recv_mad(mad_wc);
+}
+
+/**
+ * srpt_refresh_port() - Configure a HCA port.
+ *
+ * Enable InfiniBand management datagram processing, update the cached sm_lid,
+ * lid and gid values, and register a callback function for processing MADs
+ * on the specified port.
+ *
+ * Note: It is safe to call this function more than once for the same port.
+ */
+static int srpt_refresh_port(struct srpt_port *sport)
+{
+       struct ib_mad_reg_req reg_req;
+       struct ib_port_modify port_modify;
+       struct ib_port_attr port_attr;
+       int ret;
+
+       memset(&port_modify, 0, sizeof port_modify);
+       port_modify.set_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+       port_modify.clr_port_cap_mask = 0;
+
+       ret = ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+       if (ret)
+               goto err_mod_port;
+
+       ret = ib_query_port(sport->sdev->device, sport->port, &port_attr);
+       if (ret)
+               goto err_query_port;
+
+       sport->sm_lid = port_attr.sm_lid;
+       sport->lid = port_attr.lid;
+
+       ret = ib_query_gid(sport->sdev->device, sport->port, 0, &sport->gid);
+       if (ret)
+               goto err_query_port;
+
+       if (!sport->mad_agent) {
+               memset(&reg_req, 0, sizeof reg_req);
+               reg_req.mgmt_class = IB_MGMT_CLASS_DEVICE_MGMT;
+               reg_req.mgmt_class_version = IB_MGMT_BASE_VERSION;
+               set_bit(IB_MGMT_METHOD_GET, reg_req.method_mask);
+               set_bit(IB_MGMT_METHOD_SET, reg_req.method_mask);
+
+               sport->mad_agent = ib_register_mad_agent(sport->sdev->device,
+                                                        sport->port,
+                                                        IB_QPT_GSI,
+                                                        &reg_req, 0,
+                                                        srpt_mad_send_handler,
+                                                        srpt_mad_recv_handler,
+                                                        sport);
+               if (IS_ERR(sport->mad_agent)) {
+                       ret = PTR_ERR(sport->mad_agent);
+                       sport->mad_agent = NULL;
+                       goto err_query_port;
+               }
+       }
+
+       return 0;
+
+err_query_port:
+
+       port_modify.set_port_cap_mask = 0;
+       port_modify.clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP;
+       ib_modify_port(sport->sdev->device, sport->port, 0, &port_modify);
+
+err_mod_port:
+
+       return ret;
+}
+
+/**
+ * srpt_unregister_mad_agent() - Unregister MAD callback functions.
+ *
+ * Note: It is safe to call this function more than once for the same device.
+ */
+static void srpt_unregister_mad_agent(struct srpt_device *sdev)
+{
+       struct ib_port_modify port_modify = {
+               .clr_port_cap_mask = IB_PORT_DEVICE_MGMT_SUP,
+       };
+       struct srpt_port *sport;
+       int i;
+
+       for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+               sport = &sdev->port[i - 1];
+               WARN_ON(sport->port != i);
+               if (ib_modify_port(sdev->device, i, 0, &port_modify) < 0)
+                       printk(KERN_ERR "disabling MAD processing failed.\n");
+               if (sport->mad_agent) {
+                       ib_unregister_mad_agent(sport->mad_agent);
+                       sport->mad_agent = NULL;
+               }
+       }
+}
+
+/**
+ * srpt_alloc_ioctx() - Allocate an SRPT I/O context structure.
+ */
+static struct srpt_ioctx *srpt_alloc_ioctx(struct srpt_device *sdev,
+                                          int ioctx_size, int dma_size,
+                                          enum dma_data_direction dir)
+{
+       struct srpt_ioctx *ioctx;
+
+       ioctx = kmalloc(ioctx_size, GFP_KERNEL);
+       if (!ioctx)
+               goto err;
+
+       ioctx->buf = kmalloc(dma_size, GFP_KERNEL);
+       if (!ioctx->buf)
+               goto err_free_ioctx;
+
+       ioctx->dma = ib_dma_map_single(sdev->device, ioctx->buf, dma_size, dir);
+       if (ib_dma_mapping_error(sdev->device, ioctx->dma))
+               goto err_free_buf;
+
+       return ioctx;
+
+err_free_buf:
+       kfree(ioctx->buf);
+err_free_ioctx:
+       kfree(ioctx);
+err:
+       return NULL;
+}
+
+/**
+ * srpt_free_ioctx() - Free an SRPT I/O context structure.
+ */
+static void srpt_free_ioctx(struct srpt_device *sdev, struct srpt_ioctx *ioctx,
+                           int dma_size, enum dma_data_direction dir)
+{
+       if (!ioctx)
+               return;
+
+       ib_dma_unmap_single(sdev->device, ioctx->dma, dma_size, dir);
+       kfree(ioctx->buf);
+       kfree(ioctx);
+}
+
+/**
+ * srpt_alloc_ioctx_ring() - Allocate a ring of SRPT I/O context structures.
+ * @sdev:       Device to allocate the I/O context ring for.
+ * @ring_size:  Number of elements in the I/O context ring.
+ * @ioctx_size: I/O context size.
+ * @dma_size:   DMA buffer size.
+ * @dir:        DMA data direction.
+ */
+static struct srpt_ioctx **srpt_alloc_ioctx_ring(struct srpt_device *sdev,
+                               int ring_size, int ioctx_size,
+                               int dma_size, enum dma_data_direction dir)
+{
+       struct srpt_ioctx **ring;
+       int i;
+
+       WARN_ON(ioctx_size != sizeof(struct srpt_recv_ioctx)
+               && ioctx_size != sizeof(struct srpt_send_ioctx));
+
+       ring = kmalloc(ring_size * sizeof(ring[0]), GFP_KERNEL);
+       if (!ring)
+               goto out;
+       for (i = 0; i < ring_size; ++i) {
+               ring[i] = srpt_alloc_ioctx(sdev, ioctx_size, dma_size, dir);
+               if (!ring[i])
+                       goto err;
+               ring[i]->index = i;
+       }
+       goto out;
+
+err:
+       while (--i >= 0)
+               srpt_free_ioctx(sdev, ring[i], dma_size, dir);
+       kfree(ring);
+       ring = NULL;
+out:
+       return ring;
+}
+
+/**
+ * srpt_free_ioctx_ring() - Free the ring of SRPT I/O context structures.
+ */
+static void srpt_free_ioctx_ring(struct srpt_ioctx **ioctx_ring,
+                                struct srpt_device *sdev, int ring_size,
+                                int dma_size, enum dma_data_direction dir)
+{
+       int i;
+
+       for (i = 0; i < ring_size; ++i)
+               srpt_free_ioctx(sdev, ioctx_ring[i], dma_size, dir);
+       kfree(ioctx_ring);
+}
+
+/**
+ * srpt_get_cmd_state() - Get the state of a SCSI command.
+ */
+static enum srpt_command_state srpt_get_cmd_state(struct srpt_send_ioctx *ioctx)
+{
+       enum srpt_command_state state;
+       unsigned long flags;
+
+       BUG_ON(!ioctx);
+
+       spin_lock_irqsave(&ioctx->spinlock, flags);
+       state = ioctx->state;
+       spin_unlock_irqrestore(&ioctx->spinlock, flags);
+       return state;
+}
+
+/**
+ * srpt_set_cmd_state() - Set the state of a SCSI command.
+ *
+ * Does not modify the state of aborted commands. Returns the previous command
+ * state.
+ */
+static enum srpt_command_state srpt_set_cmd_state(struct srpt_send_ioctx *ioctx,
+                                                 enum srpt_command_state new)
+{
+       enum srpt_command_state previous;
+       unsigned long flags;
+
+       BUG_ON(!ioctx);
+
+       spin_lock_irqsave(&ioctx->spinlock, flags);
+       previous = ioctx->state;
+       if (previous != SRPT_STATE_DONE)
+               ioctx->state = new;
+       spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+       return previous;
+}
+
+/**
+ * srpt_test_and_set_cmd_state() - Test and set the state of a command.
+ *
+ * Returns true if and only if the previous command state was equal to 'old'.
+ */
+static bool srpt_test_and_set_cmd_state(struct srpt_send_ioctx *ioctx,
+                                       enum srpt_command_state old,
+                                       enum srpt_command_state new)
+{
+       enum srpt_command_state previous;
+       unsigned long flags;
+
+       WARN_ON(!ioctx);
+       WARN_ON(old == SRPT_STATE_DONE);
+       WARN_ON(new == SRPT_STATE_NEW);
+
+       spin_lock_irqsave(&ioctx->spinlock, flags);
+       previous = ioctx->state;
+       if (previous == old)
+               ioctx->state = new;
+       spin_unlock_irqrestore(&ioctx->spinlock, flags);
+       return previous == old;
+}
+
+/**
+ * srpt_post_recv() - Post an IB receive request.
+ */
+static int srpt_post_recv(struct srpt_device *sdev,
+                         struct srpt_recv_ioctx *ioctx)
+{
+       struct ib_sge list;
+       struct ib_recv_wr wr, *bad_wr;
+
+       BUG_ON(!sdev);
+       wr.wr_id = encode_wr_id(SRPT_RECV, ioctx->ioctx.index);
+
+       list.addr = ioctx->ioctx.dma;
+       list.length = srp_max_req_size;
+       list.lkey = sdev->mr->lkey;
+
+       wr.next = NULL;
+       wr.sg_list = &list;
+       wr.num_sge = 1;
+
+       return ib_post_srq_recv(sdev->srq, &wr, &bad_wr);
+}
+
+/**
+ * srpt_post_send() - Post an IB send request.
+ *
+ * Returns zero upon success and a non-zero value upon failure.
+ */
+static int srpt_post_send(struct srpt_rdma_ch *ch,
+                         struct srpt_send_ioctx *ioctx, int len)
+{
+       struct ib_sge list;
+       struct ib_send_wr wr, *bad_wr;
+       struct srpt_device *sdev = ch->sport->sdev;
+       int ret;
+
+       atomic_inc(&ch->req_lim);
+
+       ret = -ENOMEM;
+       if (unlikely(atomic_dec_return(&ch->sq_wr_avail) < 0)) {
+               printk(KERN_WARNING "IB send queue full (needed 1)\n");
+               goto out;
+       }
+
+       ib_dma_sync_single_for_device(sdev->device, ioctx->ioctx.dma, len,
+                                     DMA_TO_DEVICE);
+
+       list.addr = ioctx->ioctx.dma;
+       list.length = len;
+       list.lkey = sdev->mr->lkey;
+
+       wr.next = NULL;
+       wr.wr_id = encode_wr_id(SRPT_SEND, ioctx->ioctx.index);
+       wr.sg_list = &list;
+       wr.num_sge = 1;
+       wr.opcode = IB_WR_SEND;
+       wr.send_flags = IB_SEND_SIGNALED;
+
+       ret = ib_post_send(ch->qp, &wr, &bad_wr);
+
+out:
+       if (ret < 0) {
+               atomic_inc(&ch->sq_wr_avail);
+               atomic_dec(&ch->req_lim);
+       }
+       return ret;
+}
+
+/**
+ * srpt_get_desc_tbl() - Parse the data descriptors of an SRP_CMD request.
+ * @ioctx: Pointer to the I/O context associated with the request.
+ * @srp_cmd: Pointer to the SRP_CMD request data.
+ * @dir: Pointer to the variable to which the transfer direction will be
+ *   written.
+ * @data_len: Pointer to the variable to which the total data length of all
+ *   descriptors in the SRP_CMD request will be written.
+ *
+ * This function initializes ioctx->nrbuf and ioctx->r_bufs.
+ *
+ * Returns -EINVAL when the SRP_CMD request contains inconsistent descriptors;
+ * -ENOMEM when memory allocation fails and zero upon success.
+ */
+static int srpt_get_desc_tbl(struct srpt_send_ioctx *ioctx,
+                            struct srp_cmd *srp_cmd,
+                            enum dma_data_direction *dir, u64 *data_len)
+{
+       struct srp_indirect_buf *idb;
+       struct srp_direct_buf *db;
+       unsigned add_cdb_offset;
+       int ret;
+
+       /*
+        * The pointer computations below will only be compiled correctly
+        * if srp_cmd::add_data is declared as s8*, u8*, s8[] or u8[], so check
+        * whether srp_cmd::add_data has been declared as a byte pointer.
+        */
+       BUILD_BUG_ON(!__same_type(srp_cmd->add_data[0], (s8)0)
+                    && !__same_type(srp_cmd->add_data[0], (u8)0));
+
+       BUG_ON(!dir);
+       BUG_ON(!data_len);
+
+       ret = 0;
+       *data_len = 0;
+
+       /*
+        * The lower four bits of the buffer format field contain the DATA-IN
+        * buffer descriptor format, and the highest four bits contain the
+        * DATA-OUT buffer descriptor format.
+        */
+       *dir = DMA_NONE;
+       if (srp_cmd->buf_fmt & 0xf)
+               /* DATA-IN: transfer data from target to initiator (read). */
+               *dir = DMA_FROM_DEVICE;
+       else if (srp_cmd->buf_fmt >> 4)
+               /* DATA-OUT: transfer data from initiator to target (write). */
+               *dir = DMA_TO_DEVICE;
+
+       /*
+        * According to the SRP spec, the lower two bits of the 'ADDITIONAL
+        * CDB LENGTH' field are reserved and the size in bytes of this field
+        * is four times the value specified in bits 3..7. Hence the "& ~3".
+        */
+       add_cdb_offset = srp_cmd->add_cdb_len & ~3;
+       if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_DIRECT) ||
+           ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_DIRECT)) {
+               ioctx->n_rbuf = 1;
+               ioctx->rbufs = &ioctx->single_rbuf;
+
+               db = (struct srp_direct_buf *)(srp_cmd->add_data
+                                              + add_cdb_offset);
+               memcpy(ioctx->rbufs, db, sizeof *db);
+               *data_len = be32_to_cpu(db->len);
+       } else if (((srp_cmd->buf_fmt & 0xf) == SRP_DATA_DESC_INDIRECT) ||
+                  ((srp_cmd->buf_fmt >> 4) == SRP_DATA_DESC_INDIRECT)) {
+               idb = (struct srp_indirect_buf *)(srp_cmd->add_data
+                                                 + add_cdb_offset);
+
+               ioctx->n_rbuf = be32_to_cpu(idb->table_desc.len) / sizeof *db;
+
+               if (ioctx->n_rbuf >
+                   (srp_cmd->data_out_desc_cnt + srp_cmd->data_in_desc_cnt)) {
+                       printk(KERN_ERR "received unsupported SRP_CMD request"
+                              " type (%u out + %u in != %u / %zu)\n",
+                              srp_cmd->data_out_desc_cnt,
+                              srp_cmd->data_in_desc_cnt,
+                              be32_to_cpu(idb->table_desc.len),
+                              sizeof(*db));
+                       ioctx->n_rbuf = 0;
+                       ret = -EINVAL;
+                       goto out;
+               }
+
+               if (ioctx->n_rbuf == 1)
+                       ioctx->rbufs = &ioctx->single_rbuf;
+               else {
+                       ioctx->rbufs =
+                               kmalloc(ioctx->n_rbuf * sizeof *db, GFP_ATOMIC);
+                       if (!ioctx->rbufs) {
+                               ioctx->n_rbuf = 0;
+                               ret = -ENOMEM;
+                               goto out;
+                       }
+               }
+
+               db = idb->desc_list;
+               memcpy(ioctx->rbufs, db, ioctx->n_rbuf * sizeof *db);
+               *data_len = be32_to_cpu(idb->len);
+       }
+out:
+       return ret;
+}
+
+/**
+ * srpt_init_ch_qp() - Initialize queue pair attributes.
+ *
+ * Initialized the attributes of queue pair 'qp' by allowing local write,
+ * remote read and remote write. Also transitions 'qp' to state IB_QPS_INIT.
+ */
+static int srpt_init_ch_qp(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+       struct ib_qp_attr *attr;
+       int ret;
+
+       attr = kzalloc(sizeof *attr, GFP_KERNEL);
+       if (!attr)
+               return -ENOMEM;
+
+       attr->qp_state = IB_QPS_INIT;
+       attr->qp_access_flags = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_READ |
+           IB_ACCESS_REMOTE_WRITE;
+       attr->port_num = ch->sport->port;
+       attr->pkey_index = 0;
+
+       ret = ib_modify_qp(qp, attr,
+                          IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PORT |
+                          IB_QP_PKEY_INDEX);
+
+       kfree(attr);
+       return ret;
+}
+
+/**
+ * srpt_ch_qp_rtr() - Change the state of a channel to 'ready to receive' (RTR).
+ * @ch: channel of the queue pair.
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
+ *
+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
+ * If this structure ever becomes larger, it might be necessary to allocate
+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rtr(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+       struct ib_qp_attr qp_attr;
+       int attr_mask;
+       int ret;
+
+       qp_attr.qp_state = IB_QPS_RTR;
+       ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+       if (ret)
+               goto out;
+
+       qp_attr.max_dest_rd_atomic = 4;
+
+       ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
+       return ret;
+}
+
+/**
+ * srpt_ch_qp_rts() - Change the state of a channel to 'ready to send' (RTS).
+ * @ch: channel of the queue pair.
+ * @qp: queue pair to change the state of.
+ *
+ * Returns zero upon success and a negative value upon failure.
+ *
+ * Note: currently a struct ib_qp_attr takes 136 bytes on a 64-bit system.
+ * If this structure ever becomes larger, it might be necessary to allocate
+ * it dynamically instead of on the stack.
+ */
+static int srpt_ch_qp_rts(struct srpt_rdma_ch *ch, struct ib_qp *qp)
+{
+       struct ib_qp_attr qp_attr;
+       int attr_mask;
+       int ret;
+
+       qp_attr.qp_state = IB_QPS_RTS;
+       ret = ib_cm_init_qp_attr(ch->cm_id, &qp_attr, &attr_mask);
+       if (ret)
+               goto out;
+
+       qp_attr.max_rd_atomic = 4;
+
+       ret = ib_modify_qp(qp, &qp_attr, attr_mask);
+
+out:
+       return ret;
+}
+
+/**
+ * srpt_ch_qp_err() - Set the channel queue pair state to 'error'.
+ */
+static int srpt_ch_qp_err(struct srpt_rdma_ch *ch)
+{
+       struct ib_qp_attr qp_attr;
+
+       qp_attr.qp_state = IB_QPS_ERR;
+       return ib_modify_qp(ch->qp, &qp_attr, IB_QP_STATE);
+}
+
+/**
+ * srpt_unmap_sg_to_ib_sge() - Unmap an IB SGE list.
+ */
+static void srpt_unmap_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+                                   struct srpt_send_ioctx *ioctx)
+{
+       struct scatterlist *sg;
+       enum dma_data_direction dir;
+
+       BUG_ON(!ch);
+       BUG_ON(!ioctx);
+       BUG_ON(ioctx->n_rdma && !ioctx->rdma_ius);
+
+       while (ioctx->n_rdma)
+               kfree(ioctx->rdma_ius[--ioctx->n_rdma].sge);
+
+       kfree(ioctx->rdma_ius);
+       ioctx->rdma_ius = NULL;
+
+       if (ioctx->mapped_sg_count) {
+               sg = ioctx->sg;
+               WARN_ON(!sg);
+               dir = ioctx->cmd.data_direction;
+               BUG_ON(dir == DMA_NONE);
+               ib_dma_unmap_sg(ch->sport->sdev->device, sg, ioctx->sg_cnt,
+                               opposite_dma_dir(dir));
+               ioctx->mapped_sg_count = 0;
+       }
+}
+
+/**
+ * srpt_map_sg_to_ib_sge() - Map an SG list to an IB SGE list.
+ */
+static int srpt_map_sg_to_ib_sge(struct srpt_rdma_ch *ch,
+                                struct srpt_send_ioctx *ioctx)
+{
+       struct se_cmd *cmd;
+       struct scatterlist *sg, *sg_orig;
+       int sg_cnt;
+       enum dma_data_direction dir;
+       struct rdma_iu *riu;
+       struct srp_direct_buf *db;
+       dma_addr_t dma_addr;
+       struct ib_sge *sge;
+       u64 raddr;
+       u32 rsize;
+       u32 tsize;
+       u32 dma_len;
+       int count, nrdma;
+       int i, j, k;
+
+       BUG_ON(!ch);
+       BUG_ON(!ioctx);
+       cmd = &ioctx->cmd;
+       dir = cmd->data_direction;
+       BUG_ON(dir == DMA_NONE);
+
+       transport_do_task_sg_chain(cmd);
+       ioctx->sg = sg = sg_orig = cmd->t_tasks_sg_chained;
+       ioctx->sg_cnt = sg_cnt = cmd->t_tasks_sg_chained_no;
+
+       count = ib_dma_map_sg(ch->sport->sdev->device, sg, sg_cnt,
+                             opposite_dma_dir(dir));
+       if (unlikely(!count))
+               return -EAGAIN;
+
+       ioctx->mapped_sg_count = count;
+
+       if (ioctx->rdma_ius && ioctx->n_rdma_ius)
+               nrdma = ioctx->n_rdma_ius;
+       else {
+               nrdma = (count + SRPT_DEF_SG_PER_WQE - 1) / SRPT_DEF_SG_PER_WQE
+                       + ioctx->n_rbuf;
+
+               ioctx->rdma_ius = kzalloc(nrdma * sizeof *riu, GFP_KERNEL);
+               if (!ioctx->rdma_ius)
+                       goto free_mem;
+
+               ioctx->n_rdma_ius = nrdma;
+       }
+
+       db = ioctx->rbufs;
+       tsize = cmd->data_length;
+       dma_len = sg_dma_len(&sg[0]);
+       riu = ioctx->rdma_ius;
+
+       /*
+        * For each remote desc - calculate the #ib_sge.
+        * If #ib_sge < SRPT_DEF_SG_PER_WQE per rdma operation then
+        *      each remote desc rdma_iu is required a rdma wr;
+        * else
+        *      we need to allocate extra rdma_iu to carry extra #ib_sge in
+        *      another rdma wr
+        */
+       for (i = 0, j = 0;
+            j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
+               rsize = be32_to_cpu(db->len);
+               raddr = be64_to_cpu(db->va);
+               riu->raddr = raddr;
+               riu->rkey = be32_to_cpu(db->key);
+               riu->sge_cnt = 0;
+
+               /* calculate how many sge required for this remote_buf */
+               while (rsize > 0 && tsize > 0) {
+
+                       if (rsize >= dma_len) {
+                               tsize -= dma_len;
+                               rsize -= dma_len;
+                               raddr += dma_len;
+
+                               if (tsize > 0) {
+                                       ++j;
+                                       if (j < count) {
+                                               sg = sg_next(sg);
+                                               dma_len = sg_dma_len(sg);
+                                       }
+                               }
+                       } else {
+                               tsize -= rsize;
+                               dma_len -= rsize;
+                               rsize = 0;
+                       }
+
+                       ++riu->sge_cnt;
+
+                       if (rsize > 0 && riu->sge_cnt == SRPT_DEF_SG_PER_WQE) {
+                               ++ioctx->n_rdma;
+                               riu->sge =
+                                   kmalloc(riu->sge_cnt * sizeof *riu->sge,
+                                           GFP_KERNEL);
+                               if (!riu->sge)
+                                       goto free_mem;
+
+                               ++riu;
+                               riu->sge_cnt = 0;
+                               riu->raddr = raddr;
+                               riu->rkey = be32_to_cpu(db->key);
+                       }
+               }
+
+               ++ioctx->n_rdma;
+               riu->sge = kmalloc(riu->sge_cnt * sizeof *riu->sge,
+                                  GFP_KERNEL);
+               if (!riu->sge)
+                       goto free_mem;
+       }
+
+       db = ioctx->rbufs;
+       tsize = cmd->data_length;
+       riu = ioctx->rdma_ius;
+       sg = sg_orig;
+       dma_len = sg_dma_len(&sg[0]);
+       dma_addr = sg_dma_address(&sg[0]);
+
+       /* this second loop is really mapped sg_addres to rdma_iu->ib_sge */
+       for (i = 0, j = 0;
+            j < count && i < ioctx->n_rbuf && tsize > 0; ++i, ++riu, ++db) {
+               rsize = be32_to_cpu(db->len);
+               sge = riu->sge;
+               k = 0;
+
+               while (rsize > 0 && tsize > 0) {
+                       sge->addr = dma_addr;
+                       sge->lkey = ch->sport->sdev->mr->lkey;
+
+                       if (rsize >= dma_len) {
+                               sge->length =
+                                       (tsize < dma_len) ? tsize : dma_len;
+                               tsize -= dma_len;
+                               rsize -= dma_len;
+
+                               if (tsize > 0) {
+                                       ++j;
+                                       if (j < count) {
+                                               sg = sg_next(sg);
+                                               dma_len = sg_dma_len(sg);
+                                               dma_addr = sg_dma_address(sg);
+                                       }
+                               }
+                       } else {
+                               sge->length = (tsize < rsize) ? tsize : rsize;
+                               tsize -= rsize;
+                               dma_len -= rsize;
+                               dma_addr += rsize;
+                               rsize = 0;
+                       }
+
+                       ++k;
+                       if (k == riu->sge_cnt && rsize > 0 && tsize > 0) {
+                               ++riu;
+                               sge = riu->sge;
+                               k = 0;
+                       } else if (rsize > 0 && tsize > 0)
+                               ++sge;
+               }
+       }
+
+       return 0;
+
+free_mem:
+       srpt_unmap_sg_to_ib_sge(ch, ioctx);
+
+       return -ENOMEM;
+}
+
+/**
+ * srpt_get_send_ioctx() - Obtain an I/O context for sending to the initiator.
+ */
+static struct srpt_send_ioctx *srpt_get_send_ioctx(struct srpt_rdma_ch *ch)
+{
+       struct srpt_send_ioctx *ioctx;
+       unsigned long flags;
+
+       BUG_ON(!ch);
+
+       ioctx = NULL;
+       spin_lock_irqsave(&ch->spinlock, flags);
+       if (!list_empty(&ch->free_list)) {
+               ioctx = list_first_entry(&ch->free_list,
+                                        struct srpt_send_ioctx, free_list);
+               list_del(&ioctx->free_list);
+       }
+       spin_unlock_irqrestore(&ch->spinlock, flags);
+
+       if (!ioctx)
+               return ioctx;
+
+       BUG_ON(ioctx->ch != ch);
+       kref_init(&ioctx->kref);
+       spin_lock_init(&ioctx->spinlock);
+       ioctx->state = SRPT_STATE_NEW;
+       ioctx->n_rbuf = 0;
+       ioctx->rbufs = NULL;
+       ioctx->n_rdma = 0;
+       ioctx->n_rdma_ius = 0;
+       ioctx->rdma_ius = NULL;
+       ioctx->mapped_sg_count = 0;
+       init_completion(&ioctx->tx_done);
+       ioctx->queue_status_only = false;
+       /*
+        * transport_init_se_cmd() does not initialize all fields, so do it
+        * here.
+        */
+       memset(&ioctx->cmd, 0, sizeof(ioctx->cmd));
+       memset(&ioctx->sense_data, 0, sizeof(ioctx->sense_data));
+
+       return ioctx;
+}
+
+/**
+ * srpt_put_send_ioctx() - Free up resources.
+ */
+static void srpt_put_send_ioctx(struct srpt_send_ioctx *ioctx)
+{
+       struct srpt_rdma_ch *ch;
+       unsigned long flags;
+
+       BUG_ON(!ioctx);
+       ch = ioctx->ch;
+       BUG_ON(!ch);
+
+       WARN_ON(srpt_get_cmd_state(ioctx) != SRPT_STATE_DONE);
+
+       srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+       transport_generic_free_cmd(&ioctx->cmd, 0);
+
+       if (ioctx->n_rbuf > 1) {
+               kfree(ioctx->rbufs);
+               ioctx->rbufs = NULL;
+               ioctx->n_rbuf = 0;
+       }
+
+       spin_lock_irqsave(&ch->spinlock, flags);
+       list_add(&ioctx->free_list, &ch->free_list);
+       spin_unlock_irqrestore(&ch->spinlock, flags);
+}
+
+static void srpt_put_send_ioctx_kref(struct kref *kref)
+{
+       srpt_put_send_ioctx(container_of(kref, struct srpt_send_ioctx, kref));
+}
+
+/**
+ * srpt_abort_cmd() - Abort a SCSI command.
+ * @ioctx:   I/O context associated with the SCSI command.
+ * @context: Preferred execution context.
+ */
+static int srpt_abort_cmd(struct srpt_send_ioctx *ioctx)
+{
+       enum srpt_command_state state;
+       unsigned long flags;
+
+       BUG_ON(!ioctx);
+
+       /*
+        * If the command is in a state where the target core is waiting for
+        * the ib_srpt driver, change the state to the next state. Changing
+        * the state of the command from SRPT_STATE_NEED_DATA to
+        * SRPT_STATE_DATA_IN ensures that srpt_xmit_response() will call this
+        * function a second time.
+        */
+
+       spin_lock_irqsave(&ioctx->spinlock, flags);
+       state = ioctx->state;
+       switch (state) {
+       case SRPT_STATE_NEED_DATA:
+               ioctx->state = SRPT_STATE_DATA_IN;
+               break;
+       case SRPT_STATE_DATA_IN:
+       case SRPT_STATE_CMD_RSP_SENT:
+       case SRPT_STATE_MGMT_RSP_SENT:
+               ioctx->state = SRPT_STATE_DONE;
+               break;
+       default:
+               break;
+       }
+       spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+       if (state == SRPT_STATE_DONE)
+               goto out;
+
+       pr_debug("Aborting cmd with state %d and tag %lld\n", state,
+                ioctx->tag);
+
+       switch (state) {
+       case SRPT_STATE_NEW:
+       case SRPT_STATE_DATA_IN:
+       case SRPT_STATE_MGMT:
+               /*
+                * Do nothing - defer abort processing until
+                * srpt_queue_response() is invoked.
+                */
+               WARN_ON(!transport_check_aborted_status(&ioctx->cmd, false));
+               break;
+       case SRPT_STATE_NEED_DATA:
+               /* DMA_TO_DEVICE (write) - RDMA read error. */
+               atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+               transport_generic_handle_data(&ioctx->cmd);
+               break;
+       case SRPT_STATE_CMD_RSP_SENT:
+               /*
+                * SRP_RSP sending failed or the SRP_RSP send completion has
+                * not been received in time.
+                */
+               srpt_unmap_sg_to_ib_sge(ioctx->ch, ioctx);
+               atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+               kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+               break;
+       case SRPT_STATE_MGMT_RSP_SENT:
+               srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+               kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+               break;
+       default:
+               WARN_ON("ERROR: unexpected command state");
+               break;
+       }
+
+out:
+       return state;
+}
+
+/**
+ * srpt_handle_send_err_comp() - Process an IB_WC_SEND error completion.
+ */
+static void srpt_handle_send_err_comp(struct srpt_rdma_ch *ch, u64 wr_id)
+{
+       struct srpt_send_ioctx *ioctx;
+       enum srpt_command_state state;
+       struct se_cmd *cmd;
+       u32 index;
+
+       atomic_inc(&ch->sq_wr_avail);
+
+       index = idx_from_wr_id(wr_id);
+       ioctx = ch->ioctx_ring[index];
+       state = srpt_get_cmd_state(ioctx);
+       cmd = &ioctx->cmd;
+
+       WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
+               && state != SRPT_STATE_MGMT_RSP_SENT
+               && state != SRPT_STATE_NEED_DATA
+               && state != SRPT_STATE_DONE);
+
+       /* If SRP_RSP sending failed, undo the ch->req_lim change. */
+       if (state == SRPT_STATE_CMD_RSP_SENT
+           || state == SRPT_STATE_MGMT_RSP_SENT)
+               atomic_dec(&ch->req_lim);
+
+       srpt_abort_cmd(ioctx);
+}
+
+/**
+ * srpt_handle_send_comp() - Process an IB send completion notification.
+ */
+static void srpt_handle_send_comp(struct srpt_rdma_ch *ch,
+                                 struct srpt_send_ioctx *ioctx)
+{
+       enum srpt_command_state state;
+
+       atomic_inc(&ch->sq_wr_avail);
+
+       state = srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+
+       if (WARN_ON(state != SRPT_STATE_CMD_RSP_SENT
+                   && state != SRPT_STATE_MGMT_RSP_SENT
+                   && state != SRPT_STATE_DONE))
+               pr_debug("state = %d\n", state);
+
+       if (state != SRPT_STATE_DONE)
+               kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+       else
+               printk(KERN_ERR "IB completion has been received too late for"
+                      " wr_id = %u.\n", ioctx->ioctx.index);
+}
+
+/**
+ * srpt_handle_rdma_comp() - Process an IB RDMA completion notification.
+ *
+ * Note: transport_generic_handle_data() is asynchronous so unmapping the
+ * data that has been transferred via IB RDMA must be postponed until the
+ * check_stop_free() callback.
+ */
+static void srpt_handle_rdma_comp(struct srpt_rdma_ch *ch,
+                                 struct srpt_send_ioctx *ioctx,
+                                 enum srpt_opcode opcode)
+{
+       WARN_ON(ioctx->n_rdma <= 0);
+       atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+
+       if (opcode == SRPT_RDMA_READ_LAST) {
+               if (srpt_test_and_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA,
+                                               SRPT_STATE_DATA_IN))
+                       transport_generic_handle_data(&ioctx->cmd);
+               else
+                       printk(KERN_ERR "%s[%d]: wrong state = %d\n", __func__,
+                              __LINE__, srpt_get_cmd_state(ioctx));
+       } else if (opcode == SRPT_RDMA_ABORT) {
+               ioctx->rdma_aborted = true;
+       } else {
+               WARN(true, "unexpected opcode %d\n", opcode);
+       }
+}
+
+/**
+ * srpt_handle_rdma_err_comp() - Process an IB RDMA error completion.
+ */
+static void srpt_handle_rdma_err_comp(struct srpt_rdma_ch *ch,
+                                     struct srpt_send_ioctx *ioctx,
+                                     enum srpt_opcode opcode)
+{
+       struct se_cmd *cmd;
+       enum srpt_command_state state;
+
+       cmd = &ioctx->cmd;
+       state = srpt_get_cmd_state(ioctx);
+       switch (opcode) {
+       case SRPT_RDMA_READ_LAST:
+               if (ioctx->n_rdma <= 0) {
+                       printk(KERN_ERR "Received invalid RDMA read"
+                              " error completion with idx %d\n",
+                              ioctx->ioctx.index);
+                       break;
+               }
+               atomic_add(ioctx->n_rdma, &ch->sq_wr_avail);
+               if (state == SRPT_STATE_NEED_DATA)
+                       srpt_abort_cmd(ioctx);
+               else
+                       printk(KERN_ERR "%s[%d]: wrong state = %d\n",
+                              __func__, __LINE__, state);
+               break;
+       case SRPT_RDMA_WRITE_LAST:
+               atomic_set(&ioctx->cmd.transport_lun_stop, 1);
+               break;
+       default:
+               printk(KERN_ERR "%s[%d]: opcode = %u\n", __func__,
+                      __LINE__, opcode);
+               break;
+       }
+}
+
+/**
+ * srpt_build_cmd_rsp() - Build an SRP_RSP response.
+ * @ch: RDMA channel through which the request has been received.
+ * @ioctx: I/O context associated with the SRP_CMD request. The response will
+ *   be built in the buffer ioctx->buf points at and hence this function will
+ *   overwrite the request data.
+ * @tag: tag of the request for which this response is being generated.
+ * @status: value for the STATUS field of the SRP_RSP information unit.
+ *
+ * Returns the size in bytes of the SRP_RSP response.
+ *
+ * An SRP_RSP response contains a SCSI status or service response. See also
+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
+ * response. See also SPC-2 for more information about sense data.
+ */
+static int srpt_build_cmd_rsp(struct srpt_rdma_ch *ch,
+                             struct srpt_send_ioctx *ioctx, u64 tag,
+                             int status)
+{
+       struct srp_rsp *srp_rsp;
+       const u8 *sense_data;
+       int sense_data_len, max_sense_len;
+
+       /*
+        * The lowest bit of all SAM-3 status codes is zero (see also
+        * paragraph 5.3 in SAM-3).
+        */
+       WARN_ON(status & 1);
+
+       srp_rsp = ioctx->ioctx.buf;
+       BUG_ON(!srp_rsp);
+
+       sense_data = ioctx->sense_data;
+       sense_data_len = ioctx->cmd.scsi_sense_length;
+       WARN_ON(sense_data_len > sizeof(ioctx->sense_data));
+
+       memset(srp_rsp, 0, sizeof *srp_rsp);
+       srp_rsp->opcode = SRP_RSP;
+       srp_rsp->req_lim_delta =
+               __constant_cpu_to_be32(1 + atomic_xchg(&ch->req_lim_delta, 0));
+       srp_rsp->tag = tag;
+       srp_rsp->status = status;
+
+       if (sense_data_len) {
+               BUILD_BUG_ON(MIN_MAX_RSP_SIZE <= sizeof(*srp_rsp));
+               max_sense_len = ch->max_ti_iu_len - sizeof(*srp_rsp);
+               if (sense_data_len > max_sense_len) {
+                       printk(KERN_WARNING "truncated sense data from %d to %d"
+                              " bytes\n", sense_data_len, max_sense_len);
+                       sense_data_len = max_sense_len;
+               }
+
+               srp_rsp->flags |= SRP_RSP_FLAG_SNSVALID;
+               srp_rsp->sense_data_len = cpu_to_be32(sense_data_len);
+               memcpy(srp_rsp + 1, sense_data, sense_data_len);
+       }
+
+       return sizeof(*srp_rsp) + sense_data_len;
+}
+
+/**
+ * srpt_build_tskmgmt_rsp() - Build a task management response.
+ * @ch:       RDMA channel through which the request has been received.
+ * @ioctx:    I/O context in which the SRP_RSP response will be built.
+ * @rsp_code: RSP_CODE that will be stored in the response.
+ * @tag:      Tag of the request for which this response is being generated.
+ *
+ * Returns the size in bytes of the SRP_RSP response.
+ *
+ * An SRP_RSP response contains a SCSI status or service response. See also
+ * section 6.9 in the SRP r16a document for the format of an SRP_RSP
+ * response.
+ */
+static int srpt_build_tskmgmt_rsp(struct srpt_rdma_ch *ch,
+                                 struct srpt_send_ioctx *ioctx,
+                                 u8 rsp_code, u64 tag)
+{
+       struct srp_rsp *srp_rsp;
+       int resp_data_len;
+       int resp_len;
+
+       resp_data_len = (rsp_code == SRP_TSK_MGMT_SUCCESS) ? 0 : 4;
+       resp_len = sizeof(*srp_rsp) + resp_data_len;
+
+       srp_rsp = ioctx->ioctx.buf;
+       BUG_ON(!srp_rsp);
+       memset(srp_rsp, 0, sizeof *srp_rsp);
+
+       srp_rsp->opcode = SRP_RSP;
+       srp_rsp->req_lim_delta = __constant_cpu_to_be32(1
+                                   + atomic_xchg(&ch->req_lim_delta, 0));
+       srp_rsp->tag = tag;
+
+       if (rsp_code != SRP_TSK_MGMT_SUCCESS) {
+               srp_rsp->flags |= SRP_RSP_FLAG_RSPVALID;
+               srp_rsp->resp_data_len = cpu_to_be32(resp_data_len);
+               srp_rsp->data[3] = rsp_code;
+       }
+
+       return resp_len;
+}
+
+#define NO_SUCH_LUN ((uint64_t)-1LL)
+
+/*
+ * SCSI LUN addressing method. See also SAM-2 and the section about
+ * eight byte LUNs.
+ */
+enum scsi_lun_addr_method {
+       SCSI_LUN_ADDR_METHOD_PERIPHERAL   = 0,
+       SCSI_LUN_ADDR_METHOD_FLAT         = 1,
+       SCSI_LUN_ADDR_METHOD_LUN          = 2,
+       SCSI_LUN_ADDR_METHOD_EXTENDED_LUN = 3,
+};
+
+/*
+ * srpt_unpack_lun() - Convert from network LUN to linear LUN.
+ *
+ * Convert an 2-byte, 4-byte, 6-byte or 8-byte LUN structure in network byte
+ * order (big endian) to a linear LUN. Supports three LUN addressing methods:
+ * peripheral, flat and logical unit. See also SAM-2, section 4.9.4 (page 40).
+ */
+static uint64_t srpt_unpack_lun(const uint8_t *lun, int len)
+{
+       uint64_t res = NO_SUCH_LUN;
+       int addressing_method;
+
+       if (unlikely(len < 2)) {
+               printk(KERN_ERR "Illegal LUN length %d, expected 2 bytes or "
+                      "more", len);
+               goto out;
+       }
+
+       switch (len) {
+       case 8:
+               if ((*((__be64 *)lun) &
+                    __constant_cpu_to_be64(0x0000FFFFFFFFFFFFLL)) != 0)
+                       goto out_err;
+               break;
+       case 4:
+               if (*((__be16 *)&lun[2]) != 0)
+                       goto out_err;
+               break;
+       case 6:
+               if (*((__be32 *)&lun[2]) != 0)
+                       goto out_err;
+               break;
+       case 2:
+               break;
+       default:
+               goto out_err;
+       }
+
+       addressing_method = (*lun) >> 6; /* highest two bits of byte 0 */
+       switch (addressing_method) {
+       case SCSI_LUN_ADDR_METHOD_PERIPHERAL:
+       case SCSI_LUN_ADDR_METHOD_FLAT:
+       case SCSI_LUN_ADDR_METHOD_LUN:
+               res = *(lun + 1) | (((*lun) & 0x3f) << 8);
+               break;
+
+       case SCSI_LUN_ADDR_METHOD_EXTENDED_LUN:
+       default:
+               printk(KERN_ERR "Unimplemented LUN addressing method %u",
+                      addressing_method);
+               break;
+       }
+
+out:
+       return res;
+
+out_err:
+       printk(KERN_ERR "Support for multi-level LUNs has not yet been"
+              " implemented");
+       goto out;
+}
+
+static int srpt_check_stop_free(struct se_cmd *cmd)
+{
+       struct srpt_send_ioctx *ioctx;
+
+       ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+       return kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+}
+
+/**
+ * srpt_handle_cmd() - Process SRP_CMD.
+ */
+static int srpt_handle_cmd(struct srpt_rdma_ch *ch,
+                          struct srpt_recv_ioctx *recv_ioctx,
+                          struct srpt_send_ioctx *send_ioctx)
+{
+       struct se_cmd *cmd;
+       struct srp_cmd *srp_cmd;
+       uint64_t unpacked_lun;
+       u64 data_len;
+       enum dma_data_direction dir;
+       int ret;
+
+       BUG_ON(!send_ioctx);
+
+       srp_cmd = recv_ioctx->ioctx.buf;
+       kref_get(&send_ioctx->kref);
+       cmd = &send_ioctx->cmd;
+       send_ioctx->tag = srp_cmd->tag;
+
+       switch (srp_cmd->task_attr) {
+       case SRP_CMD_SIMPLE_Q:
+               cmd->sam_task_attr = MSG_SIMPLE_TAG;
+               break;
+       case SRP_CMD_ORDERED_Q:
+       default:
+               cmd->sam_task_attr = MSG_ORDERED_TAG;
+               break;
+       case SRP_CMD_HEAD_OF_Q:
+               cmd->sam_task_attr = MSG_HEAD_TAG;
+               break;
+       case SRP_CMD_ACA:
+               cmd->sam_task_attr = MSG_ACA_TAG;
+               break;
+       }
+
+       ret = srpt_get_desc_tbl(send_ioctx, srp_cmd, &dir, &data_len);
+       if (ret) {
+               printk(KERN_ERR "0x%llx: parsing SRP descriptor table failed.\n",
+                      srp_cmd->tag);
+               cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+               cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+               goto send_sense;
+       }
+
+       cmd->data_length = data_len;
+       cmd->data_direction = dir;
+       unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_cmd->lun,
+                                      sizeof(srp_cmd->lun));
+       if (transport_lookup_cmd_lun(cmd, unpacked_lun) < 0)
+               goto send_sense;
+       ret = transport_generic_allocate_tasks(cmd, srp_cmd->cdb);
+       if (cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
+               srpt_queue_status(cmd);
+       else if (cmd->se_cmd_flags & SCF_SCSI_CDB_EXCEPTION)
+               goto send_sense;
+       else
+               WARN_ON_ONCE(ret);
+
+       transport_handle_cdb_direct(cmd);
+       return 0;
+
+send_sense:
+       transport_send_check_condition_and_sense(cmd, cmd->scsi_sense_reason,
+                                                0);
+       return -1;
+}
+
+/**
+ * srpt_rx_mgmt_fn_tag() - Process a task management function by tag.
+ * @ch: RDMA channel of the task management request.
+ * @fn: Task management function to perform.
+ * @req_tag: Tag of the SRP task management request.
+ * @mgmt_ioctx: I/O context of the task management request.
+ *
+ * Returns zero if the target core will process the task management
+ * request asynchronously.
+ *
+ * Note: It is assumed that the initiator serializes tag-based task management
+ * requests.
+ */
+static int srpt_rx_mgmt_fn_tag(struct srpt_send_ioctx *ioctx, u64 tag)
+{
+       struct srpt_device *sdev;
+       struct srpt_rdma_ch *ch;
+       struct srpt_send_ioctx *target;
+       int ret, i;
+
+       ret = -EINVAL;
+       ch = ioctx->ch;
+       BUG_ON(!ch);
+       BUG_ON(!ch->sport);
+       sdev = ch->sport->sdev;
+       BUG_ON(!sdev);
+       spin_lock_irq(&sdev->spinlock);
+       for (i = 0; i < ch->rq_size; ++i) {
+               target = ch->ioctx_ring[i];
+               if (target->cmd.se_lun == ioctx->cmd.se_lun &&
+                   target->tag == tag &&
+                   srpt_get_cmd_state(target) != SRPT_STATE_DONE) {
+                       ret = 0;
+                       /* now let the target core abort &target->cmd; */
+                       break;
+               }
+       }
+       spin_unlock_irq(&sdev->spinlock);
+       return ret;
+}
+
+static int srp_tmr_to_tcm(int fn)
+{
+       switch (fn) {
+       case SRP_TSK_ABORT_TASK:
+               return TMR_ABORT_TASK;
+       case SRP_TSK_ABORT_TASK_SET:
+               return TMR_ABORT_TASK_SET;
+       case SRP_TSK_CLEAR_TASK_SET:
+               return TMR_CLEAR_TASK_SET;
+       case SRP_TSK_LUN_RESET:
+               return TMR_LUN_RESET;
+       case SRP_TSK_CLEAR_ACA:
+               return TMR_CLEAR_ACA;
+       default:
+               return -1;
+       }
+}
+
+/**
+ * srpt_handle_tsk_mgmt() - Process an SRP_TSK_MGMT information unit.
+ *
+ * Returns 0 if and only if the request will be processed by the target core.
+ *
+ * For more information about SRP_TSK_MGMT information units, see also section
+ * 6.7 in the SRP r16a document.
+ */
+static void srpt_handle_tsk_mgmt(struct srpt_rdma_ch *ch,
+                                struct srpt_recv_ioctx *recv_ioctx,
+                                struct srpt_send_ioctx *send_ioctx)
+{
+       struct srp_tsk_mgmt *srp_tsk;
+       struct se_cmd *cmd;
+       uint64_t unpacked_lun;
+       int tcm_tmr;
+       int res;
+
+       BUG_ON(!send_ioctx);
+
+       srp_tsk = recv_ioctx->ioctx.buf;
+       cmd = &send_ioctx->cmd;
+
+       pr_debug("recv tsk_mgmt fn %d for task_tag %lld and cmd tag %lld"
+                " cm_id %p sess %p\n", srp_tsk->tsk_mgmt_func,
+                srp_tsk->task_tag, srp_tsk->tag, ch->cm_id, ch->sess);
+
+       srpt_set_cmd_state(send_ioctx, SRPT_STATE_MGMT);
+       send_ioctx->tag = srp_tsk->tag;
+       tcm_tmr = srp_tmr_to_tcm(srp_tsk->tsk_mgmt_func);
+       if (tcm_tmr < 0) {
+               send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+               send_ioctx->cmd.se_tmr_req->response =
+                       TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
+               goto process_tmr;
+       }
+       cmd->se_tmr_req = core_tmr_alloc_req(cmd, NULL, tcm_tmr, GFP_KERNEL);
+       if (!cmd->se_tmr_req) {
+               send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+               send_ioctx->cmd.se_tmr_req->response = TMR_FUNCTION_REJECTED;
+               goto process_tmr;
+       }
+
+       unpacked_lun = srpt_unpack_lun((uint8_t *)&srp_tsk->lun,
+                                      sizeof(srp_tsk->lun));
+       res = transport_lookup_tmr_lun(&send_ioctx->cmd, unpacked_lun);
+       if (res) {
+               pr_debug("rejecting TMR for LUN %lld\n", unpacked_lun);
+               send_ioctx->cmd.se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
+               send_ioctx->cmd.se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
+               goto process_tmr;
+       }
+
+       if (srp_tsk->tsk_mgmt_func == SRP_TSK_ABORT_TASK)
+               srpt_rx_mgmt_fn_tag(send_ioctx, srp_tsk->task_tag);
+
+process_tmr:
+       kref_get(&send_ioctx->kref);
+       if (!(send_ioctx->cmd.se_cmd_flags & SCF_SCSI_CDB_EXCEPTION))
+               transport_generic_handle_tmr(&send_ioctx->cmd);
+       else
+               transport_send_check_condition_and_sense(cmd,
+                                               cmd->scsi_sense_reason, 0);
+
+}
+
+/**
+ * srpt_handle_new_iu() - Process a newly received information unit.
+ * @ch:    RDMA channel through which the information unit has been received.
+ * @ioctx: SRPT I/O context associated with the information unit.
+ */
+static void srpt_handle_new_iu(struct srpt_rdma_ch *ch,
+                              struct srpt_recv_ioctx *recv_ioctx,
+                              struct srpt_send_ioctx *send_ioctx)
+{
+       struct srp_cmd *srp_cmd;
+       enum rdma_ch_state ch_state;
+
+       BUG_ON(!ch);
+       BUG_ON(!recv_ioctx);
+
+       ib_dma_sync_single_for_cpu(ch->sport->sdev->device,
+                                  recv_ioctx->ioctx.dma, srp_max_req_size,
+                                  DMA_FROM_DEVICE);
+
+       ch_state = srpt_get_ch_state(ch);
+       if (unlikely(ch_state == CH_CONNECTING)) {
+               list_add_tail(&recv_ioctx->wait_list, &ch->cmd_wait_list);
+               goto out;
+       }
+
+       if (unlikely(ch_state != CH_LIVE))
+               goto out;
+
+       srp_cmd = recv_ioctx->ioctx.buf;
+       if (srp_cmd->opcode == SRP_CMD || srp_cmd->opcode == SRP_TSK_MGMT) {
+               if (!send_ioctx)
+                       send_ioctx = srpt_get_send_ioctx(ch);
+               if (unlikely(!send_ioctx)) {
+                       list_add_tail(&recv_ioctx->wait_list,
+                                     &ch->cmd_wait_list);
+                       goto out;
+               }
+       }
+
+       transport_init_se_cmd(&send_ioctx->cmd, &srpt_target->tf_ops, ch->sess,
+                             0, DMA_NONE, MSG_SIMPLE_TAG,
+                             send_ioctx->sense_data);
+
+       switch (srp_cmd->opcode) {
+       case SRP_CMD:
+               srpt_handle_cmd(ch, recv_ioctx, send_ioctx);
+               break;
+       case SRP_TSK_MGMT:
+               srpt_handle_tsk_mgmt(ch, recv_ioctx, send_ioctx);
+               break;
+       case SRP_I_LOGOUT:
+               printk(KERN_ERR "Not yet implemented: SRP_I_LOGOUT\n");
+               break;
+       case SRP_CRED_RSP:
+               pr_debug("received SRP_CRED_RSP\n");
+               break;
+       case SRP_AER_RSP:
+               pr_debug("received SRP_AER_RSP\n");
+               break;
+       case SRP_RSP:
+               printk(KERN_ERR "Received SRP_RSP\n");
+               break;
+       default:
+               printk(KERN_ERR "received IU with unknown opcode 0x%x\n",
+                      srp_cmd->opcode);
+               break;
+       }
+
+       srpt_post_recv(ch->sport->sdev, recv_ioctx);
+out:
+       return;
+}
+
+static void srpt_process_rcv_completion(struct ib_cq *cq,
+                                       struct srpt_rdma_ch *ch,
+                                       struct ib_wc *wc)
+{
+       struct srpt_device *sdev = ch->sport->sdev;
+       struct srpt_recv_ioctx *ioctx;
+       u32 index;
+
+       index = idx_from_wr_id(wc->wr_id);
+       if (wc->status == IB_WC_SUCCESS) {
+               int req_lim;
+
+               req_lim = atomic_dec_return(&ch->req_lim);
+               if (unlikely(req_lim < 0))
+                       printk(KERN_ERR "req_lim = %d < 0\n", req_lim);
+               ioctx = sdev->ioctx_ring[index];
+               srpt_handle_new_iu(ch, ioctx, NULL);
+       } else {
+               printk(KERN_INFO "receiving failed for idx %u with status %d\n",
+                      index, wc->status);
+       }
+}
+
+/**
+ * srpt_process_send_completion() - Process an IB send completion.
+ *
+ * Note: Although this has not yet been observed during tests, at least in
+ * theory it is possible that the srpt_get_send_ioctx() call invoked by
+ * srpt_handle_new_iu() fails. This is possible because the req_lim_delta
+ * value in each response is set to one, and it is possible that this response
+ * makes the initiator send a new request before the send completion for that
+ * response has been processed. This could e.g. happen if the call to
+ * srpt_put_send_iotcx() is delayed because of a higher priority interrupt or
+ * if IB retransmission causes generation of the send completion to be
+ * delayed. Incoming information units for which srpt_get_send_ioctx() fails
+ * are queued on cmd_wait_list. The code below processes these delayed
+ * requests one at a time.
+ */
+static void srpt_process_send_completion(struct ib_cq *cq,
+                                        struct srpt_rdma_ch *ch,
+                                        struct ib_wc *wc)
+{
+       struct srpt_send_ioctx *send_ioctx;
+       uint32_t index;
+       enum srpt_opcode opcode;
+
+       index = idx_from_wr_id(wc->wr_id);
+       opcode = opcode_from_wr_id(wc->wr_id);
+       send_ioctx = ch->ioctx_ring[index];
+       if (wc->status == IB_WC_SUCCESS) {
+               if (opcode == SRPT_SEND)
+                       srpt_handle_send_comp(ch, send_ioctx);
+               else {
+                       WARN_ON(opcode != SRPT_RDMA_ABORT &&
+                               wc->opcode != IB_WC_RDMA_READ);
+                       srpt_handle_rdma_comp(ch, send_ioctx, opcode);
+               }
+       } else {
+               if (opcode == SRPT_SEND) {
+                       printk(KERN_INFO "sending response for idx %u failed"
+                              " with status %d\n", index, wc->status);
+                       srpt_handle_send_err_comp(ch, wc->wr_id);
+               } else if (opcode != SRPT_RDMA_MID) {
+                       printk(KERN_INFO "RDMA t %d for idx %u failed with"
+                               " status %d", opcode, index, wc->status);
+                       srpt_handle_rdma_err_comp(ch, send_ioctx, opcode);
+               }
+       }
+
+       while (unlikely(opcode == SRPT_SEND
+                       && !list_empty(&ch->cmd_wait_list)
+                       && srpt_get_ch_state(ch) == CH_LIVE
+                       && (send_ioctx = srpt_get_send_ioctx(ch)) != NULL)) {
+               struct srpt_recv_ioctx *recv_ioctx;
+
+               recv_ioctx = list_first_entry(&ch->cmd_wait_list,
+                                             struct srpt_recv_ioctx,
+                                             wait_list);
+               list_del(&recv_ioctx->wait_list);
+               srpt_handle_new_iu(ch, recv_ioctx, send_ioctx);
+       }
+}
+
+static void srpt_process_completion(struct ib_cq *cq, struct srpt_rdma_ch *ch)
+{
+       struct ib_wc *const wc = ch->wc;
+       int i, n;
+
+       WARN_ON(cq != ch->cq);
+
+       ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
+       while ((n = ib_poll_cq(cq, ARRAY_SIZE(ch->wc), wc)) > 0) {
+               for (i = 0; i < n; i++) {
+                       if (opcode_from_wr_id(wc[i].wr_id) == SRPT_RECV)
+                               srpt_process_rcv_completion(cq, ch, &wc[i]);
+                       else
+                               srpt_process_send_completion(cq, ch, &wc[i]);
+               }
+       }
+}
+
+/**
+ * srpt_completion() - IB completion queue callback function.
+ *
+ * Notes:
+ * - It is guaranteed that a completion handler will never be invoked
+ *   concurrently on two different CPUs for the same completion queue. See also
+ *   Documentation/infiniband/core_locking.txt and the implementation of
+ *   handle_edge_irq() in kernel/irq/chip.c.
+ * - When threaded IRQs are enabled, completion handlers are invoked in thread
+ *   context instead of interrupt context.
+ */
+static void srpt_completion(struct ib_cq *cq, void *ctx)
+{
+       struct srpt_rdma_ch *ch = ctx;
+
+       wake_up_interruptible(&ch->wait_queue);
+}
+
+static int srpt_compl_thread(void *arg)
+{
+       struct srpt_rdma_ch *ch;
+
+       /* Hibernation / freezing of the SRPT kernel thread is not supported. */
+       current->flags |= PF_NOFREEZE;
+
+       ch = arg;
+       BUG_ON(!ch);
+       printk(KERN_INFO "Session %s: kernel thread %s (PID %d) started\n",
+              ch->sess_name, ch->thread->comm, current->pid);
+       while (!kthread_should_stop()) {
+               wait_event_interruptible(ch->wait_queue,
+                       (srpt_process_completion(ch->cq, ch),
+                        kthread_should_stop()));
+       }
+       printk(KERN_INFO "Session %s: kernel thread %s (PID %d) stopped\n",
+              ch->sess_name, ch->thread->comm, current->pid);
+       return 0;
+}
+
+/**
+ * srpt_create_ch_ib() - Create receive and send completion queues.
+ */
+static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
+{
+       struct ib_qp_init_attr *qp_init;
+       struct srpt_port *sport = ch->sport;
+       struct srpt_device *sdev = sport->sdev;
+       u32 srp_sq_size = sport->port_attrib.srp_sq_size;
+       int ret;
+
+       WARN_ON(ch->rq_size < 1);
+
+       ret = -ENOMEM;
+       qp_init = kzalloc(sizeof *qp_init, GFP_KERNEL);
+       if (!qp_init)
+               goto out;
+
+       ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
+                             ch->rq_size + srp_sq_size, 0);
+       if (IS_ERR(ch->cq)) {
+               ret = PTR_ERR(ch->cq);
+               printk(KERN_ERR "failed to create CQ cqe= %d ret= %d\n",
+                      ch->rq_size + srp_sq_size, ret);
+               goto out;
+       }
+
+       qp_init->qp_context = (void *)ch;
+       qp_init->event_handler
+               = (void(*)(struct ib_event *, void*))srpt_qp_event;
+       qp_init->send_cq = ch->cq;
+       qp_init->recv_cq = ch->cq;
+       qp_init->srq = sdev->srq;
+       qp_init->sq_sig_type = IB_SIGNAL_REQ_WR;
+       qp_init->qp_type = IB_QPT_RC;
+       qp_init->cap.max_send_wr = srp_sq_size;
+       qp_init->cap.max_send_sge = SRPT_DEF_SG_PER_WQE;
+
+       ch->qp = ib_create_qp(sdev->pd, qp_init);
+       if (IS_ERR(ch->qp)) {
+               ret = PTR_ERR(ch->qp);
+               printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
+               goto err_destroy_cq;
+       }
+
+       atomic_set(&ch->sq_wr_avail, qp_init->cap.max_send_wr);
+
+       pr_debug("%s: max_cqe= %d max_sge= %d sq_size = %d cm_id= %p\n",
+                __func__, ch->cq->cqe, qp_init->cap.max_send_sge,
+                qp_init->cap.max_send_wr, ch->cm_id);
+
+       ret = srpt_init_ch_qp(ch, ch->qp);
+       if (ret)
+               goto err_destroy_qp;
+
+       init_waitqueue_head(&ch->wait_queue);
+
+       pr_debug("creating thread for session %s\n", ch->sess_name);
+
+       ch->thread = kthread_run(srpt_compl_thread, ch, "ib_srpt_compl");
+       if (IS_ERR(ch->thread)) {
+               printk(KERN_ERR "failed to create kernel thread %ld\n",
+                      PTR_ERR(ch->thread));
+               ch->thread = NULL;
+               goto err_destroy_qp;
+       }
+
+out:
+       kfree(qp_init);
+       return ret;
+
+err_destroy_qp:
+       ib_destroy_qp(ch->qp);
+err_destroy_cq:
+       ib_destroy_cq(ch->cq);
+       goto out;
+}
+
+static void srpt_destroy_ch_ib(struct srpt_rdma_ch *ch)
+{
+       if (ch->thread)
+               kthread_stop(ch->thread);
+
+       ib_destroy_qp(ch->qp);
+       ib_destroy_cq(ch->cq);
+}
+
+/**
+ * __srpt_close_ch() - Close an RDMA channel by setting the QP error state.
+ *
+ * Reset the QP and make sure all resources associated with the channel will
+ * be deallocated at an appropriate time.
+ *
+ * Note: The caller must hold ch->sport->sdev->spinlock.
+ */
+static void __srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+       struct srpt_device *sdev;
+       enum rdma_ch_state prev_state;
+       unsigned long flags;
+
+       sdev = ch->sport->sdev;
+
+       spin_lock_irqsave(&ch->spinlock, flags);
+       prev_state = ch->state;
+       switch (prev_state) {
+       case CH_CONNECTING:
+       case CH_LIVE:
+               ch->state = CH_DISCONNECTING;
+               break;
+       default:
+               break;
+       }
+       spin_unlock_irqrestore(&ch->spinlock, flags);
+
+       switch (prev_state) {
+       case CH_CONNECTING:
+               ib_send_cm_rej(ch->cm_id, IB_CM_REJ_NO_RESOURCES, NULL, 0,
+                              NULL, 0);
+               /* fall through */
+       case CH_LIVE:
+               if (ib_send_cm_dreq(ch->cm_id, NULL, 0) < 0)
+                       printk(KERN_ERR "sending CM DREQ failed.\n");
+               break;
+       case CH_DISCONNECTING:
+               break;
+       case CH_DRAINING:
+       case CH_RELEASING:
+               break;
+       }
+}
+
+/**
+ * srpt_close_ch() - Close an RDMA channel.
+ */
+static void srpt_close_ch(struct srpt_rdma_ch *ch)
+{
+       struct srpt_device *sdev;
+
+       sdev = ch->sport->sdev;
+       spin_lock_irq(&sdev->spinlock);
+       __srpt_close_ch(ch);
+       spin_unlock_irq(&sdev->spinlock);
+}
+
+/**
+ * srpt_drain_channel() - Drain a channel by resetting the IB queue pair.
+ * @cm_id: Pointer to the CM ID of the channel to be drained.
+ *
+ * Note: Must be called from inside srpt_cm_handler to avoid a race between
+ * accessing sdev->spinlock and the call to kfree(sdev) in srpt_remove_one()
+ * (the caller of srpt_cm_handler holds the cm_id spinlock; srpt_remove_one()
+ * waits until all target sessions for the associated IB device have been
+ * unregistered and target session registration involves a call to
+ * ib_destroy_cm_id(), which locks the cm_id spinlock and hence waits until
+ * this function has finished).
+ */
+static void srpt_drain_channel(struct ib_cm_id *cm_id)
+{
+       struct srpt_device *sdev;
+       struct srpt_rdma_ch *ch;
+       int ret;
+       bool do_reset = false;
+
+       WARN_ON_ONCE(irqs_disabled());
+
+       sdev = cm_id->context;
+       BUG_ON(!sdev);
+       spin_lock_irq(&sdev->spinlock);
+       list_for_each_entry(ch, &sdev->rch_list, list) {
+               if (ch->cm_id == cm_id) {
+                       do_reset = srpt_test_and_set_ch_state(ch,
+                                       CH_CONNECTING, CH_DRAINING) ||
+                                  srpt_test_and_set_ch_state(ch,
+                                       CH_LIVE, CH_DRAINING) ||
+                                  srpt_test_and_set_ch_state(ch,
+                                       CH_DISCONNECTING, CH_DRAINING);
+                       break;
+               }
+       }
+       spin_unlock_irq(&sdev->spinlock);
+
+       if (do_reset) {
+               ret = srpt_ch_qp_err(ch);
+               if (ret < 0)
+                       printk(KERN_ERR "Setting queue pair in error state"
+                              " failed: %d\n", ret);
+       }
+}
+
+/**
+ * srpt_find_channel() - Look up an RDMA channel.
+ * @cm_id: Pointer to the CM ID of the channel to be looked up.
+ *
+ * Return NULL if no matching RDMA channel has been found.
+ */
+static struct srpt_rdma_ch *srpt_find_channel(struct srpt_device *sdev,
+                                             struct ib_cm_id *cm_id)
+{
+       struct srpt_rdma_ch *ch;
+       bool found;
+
+       WARN_ON_ONCE(irqs_disabled());
+       BUG_ON(!sdev);
+
+       found = false;
+       spin_lock_irq(&sdev->spinlock);
+       list_for_each_entry(ch, &sdev->rch_list, list) {
+               if (ch->cm_id == cm_id) {
+                       found = true;
+                       break;
+               }
+       }
+       spin_unlock_irq(&sdev->spinlock);
+
+       return found ? ch : NULL;
+}
+
+/**
+ * srpt_release_channel() - Release channel resources.
+ *
+ * Schedules the actual release because:
+ * - Calling the ib_destroy_cm_id() call from inside an IB CM callback would
+ *   trigger a deadlock.
+ * - It is not safe to call TCM transport_* functions from interrupt context.
+ */
+static void srpt_release_channel(struct srpt_rdma_ch *ch)
+{
+       schedule_work(&ch->release_work);
+}
+
+static void srpt_release_channel_work(struct work_struct *w)
+{
+       struct srpt_rdma_ch *ch;
+       struct srpt_device *sdev;
+
+       ch = container_of(w, struct srpt_rdma_ch, release_work);
+       pr_debug("ch = %p; ch->sess = %p; release_done = %p\n", ch, ch->sess,
+                ch->release_done);
+
+       sdev = ch->sport->sdev;
+       BUG_ON(!sdev);
+
+       transport_deregister_session_configfs(ch->sess);
+       transport_deregister_session(ch->sess);
+       ch->sess = NULL;
+
+       srpt_destroy_ch_ib(ch);
+
+       srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+                            ch->sport->sdev, ch->rq_size,
+                            ch->rsp_size, DMA_TO_DEVICE);
+
+       spin_lock_irq(&sdev->spinlock);
+       list_del(&ch->list);
+       spin_unlock_irq(&sdev->spinlock);
+
+       ib_destroy_cm_id(ch->cm_id);
+
+       if (ch->release_done)
+               complete(ch->release_done);
+
+       wake_up(&sdev->ch_releaseQ);
+
+       kfree(ch);
+}
+
+static struct srpt_node_acl *__srpt_lookup_acl(struct srpt_port *sport,
+                                              u8 i_port_id[16])
+{
+       struct srpt_node_acl *nacl;
+
+       list_for_each_entry(nacl, &sport->port_acl_list, list)
+               if (memcmp(nacl->i_port_id, i_port_id,
+                          sizeof(nacl->i_port_id)) == 0)
+                       return nacl;
+
+       return NULL;
+}
+
+static struct srpt_node_acl *srpt_lookup_acl(struct srpt_port *sport,
+                                            u8 i_port_id[16])
+{
+       struct srpt_node_acl *nacl;
+
+       spin_lock_irq(&sport->port_acl_lock);
+       nacl = __srpt_lookup_acl(sport, i_port_id);
+       spin_unlock_irq(&sport->port_acl_lock);
+
+       return nacl;
+}
+
+/**
+ * srpt_cm_req_recv() - Process the event IB_CM_REQ_RECEIVED.
+ *
+ * Ownership of the cm_id is transferred to the target session if this
+ * functions returns zero. Otherwise the caller remains the owner of cm_id.
+ */
+static int srpt_cm_req_recv(struct ib_cm_id *cm_id,
+                           struct ib_cm_req_event_param *param,
+                           void *private_data)
+{
+       struct srpt_device *sdev = cm_id->context;
+       struct srpt_port *sport = &sdev->port[param->port - 1];
+       struct srp_login_req *req;
+       struct srp_login_rsp *rsp;
+       struct srp_login_rej *rej;
+       struct ib_cm_rep_param *rep_param;
+       struct srpt_rdma_ch *ch, *tmp_ch;
+       struct srpt_node_acl *nacl;
+       u32 it_iu_len;
+       int i;
+       int ret = 0;
+
+       WARN_ON_ONCE(irqs_disabled());
+
+       if (WARN_ON(!sdev || !private_data))
+               return -EINVAL;
+
+       req = (struct srp_login_req *)private_data;
+
+       it_iu_len = be32_to_cpu(req->req_it_iu_len);
+
+       printk(KERN_INFO "Received SRP_LOGIN_REQ with i_port_id 0x%llx:0x%llx,"
+              " t_port_id 0x%llx:0x%llx and it_iu_len %d on port %d"
+              " (guid=0x%llx:0x%llx)\n",
+              be64_to_cpu(*(__be64 *)&req->initiator_port_id[0]),
+              be64_to_cpu(*(__be64 *)&req->initiator_port_id[8]),
+              be64_to_cpu(*(__be64 *)&req->target_port_id[0]),
+              be64_to_cpu(*(__be64 *)&req->target_port_id[8]),
+              it_iu_len,
+              param->port,
+              be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[0]),
+              be64_to_cpu(*(__be64 *)&sdev->port[param->port - 1].gid.raw[8]));
+
+       rsp = kzalloc(sizeof *rsp, GFP_KERNEL);
+       rej = kzalloc(sizeof *rej, GFP_KERNEL);
+       rep_param = kzalloc(sizeof *rep_param, GFP_KERNEL);
+
+       if (!rsp || !rej || !rep_param) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       if (it_iu_len > srp_max_req_size || it_iu_len < 64) {
+               rej->reason = __constant_cpu_to_be32(
+                               SRP_LOGIN_REJ_REQ_IT_IU_LENGTH_TOO_LARGE);
+               ret = -EINVAL;
+               printk(KERN_ERR "rejected SRP_LOGIN_REQ because its"
+                      " length (%d bytes) is out of range (%d .. %d)\n",
+                      it_iu_len, 64, srp_max_req_size);
+               goto reject;
+       }
+
+       if (!sport->enabled) {
+               rej->reason = __constant_cpu_to_be32(
+                            SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               ret = -EINVAL;
+               printk(KERN_ERR "rejected SRP_LOGIN_REQ because the target port"
+                      " has not yet been enabled\n");
+               goto reject;
+       }
+
+       if ((req->req_flags & SRP_MTCH_ACTION) == SRP_MULTICHAN_SINGLE) {
+               rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_NO_CHAN;
+
+               spin_lock_irq(&sdev->spinlock);
+
+               list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list) {
+                       if (!memcmp(ch->i_port_id, req->initiator_port_id, 16)
+                           && !memcmp(ch->t_port_id, req->target_port_id, 16)
+                           && param->port == ch->sport->port
+                           && param->listen_id == ch->sport->sdev->cm_id
+                           && ch->cm_id) {
+                               enum rdma_ch_state ch_state;
+
+                               ch_state = srpt_get_ch_state(ch);
+                               if (ch_state != CH_CONNECTING
+                                   && ch_state != CH_LIVE)
+                                       continue;
+
+                               /* found an existing channel */
+                               pr_debug("Found existing channel %s"
+                                        " cm_id= %p state= %d\n",
+                                        ch->sess_name, ch->cm_id, ch_state);
+
+                               __srpt_close_ch(ch);
+
+                               rsp->rsp_flags =
+                                       SRP_LOGIN_RSP_MULTICHAN_TERMINATED;
+                       }
+               }
+
+               spin_unlock_irq(&sdev->spinlock);
+
+       } else
+               rsp->rsp_flags = SRP_LOGIN_RSP_MULTICHAN_MAINTAINED;
+
+       if (*(__be64 *)req->target_port_id != cpu_to_be64(srpt_service_guid)
+           || *(__be64 *)(req->target_port_id + 8) !=
+              cpu_to_be64(srpt_service_guid)) {
+               rej->reason = __constant_cpu_to_be32(
+                               SRP_LOGIN_REJ_UNABLE_ASSOCIATE_CHANNEL);
+               ret = -ENOMEM;
+               printk(KERN_ERR "rejected SRP_LOGIN_REQ because it"
+                      " has an invalid target port identifier.\n");
+               goto reject;
+       }
+
+       ch = kzalloc(sizeof *ch, GFP_KERNEL);
+       if (!ch) {
+               rej->reason = __constant_cpu_to_be32(
+                                       SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               printk(KERN_ERR "rejected SRP_LOGIN_REQ because no memory.\n");
+               ret = -ENOMEM;
+               goto reject;
+       }
+
+       INIT_WORK(&ch->release_work, srpt_release_channel_work);
+       memcpy(ch->i_port_id, req->initiator_port_id, 16);
+       memcpy(ch->t_port_id, req->target_port_id, 16);
+       ch->sport = &sdev->port[param->port - 1];
+       ch->cm_id = cm_id;
+       /*
+        * Avoid QUEUE_FULL conditions by limiting the number of buffers used
+        * for the SRP protocol to the command queue size.
+        */
+       ch->rq_size = SRPT_RQ_SIZE;
+       spin_lock_init(&ch->spinlock);
+       ch->state = CH_CONNECTING;
+       INIT_LIST_HEAD(&ch->cmd_wait_list);
+       ch->rsp_size = ch->sport->port_attrib.srp_max_rsp_size;
+
+       ch->ioctx_ring = (struct srpt_send_ioctx **)
+               srpt_alloc_ioctx_ring(ch->sport->sdev, ch->rq_size,
+                                     sizeof(*ch->ioctx_ring[0]),
+                                     ch->rsp_size, DMA_TO_DEVICE);
+       if (!ch->ioctx_ring)
+               goto free_ch;
+
+       INIT_LIST_HEAD(&ch->free_list);
+       for (i = 0; i < ch->rq_size; i++) {
+               ch->ioctx_ring[i]->ch = ch;
+               list_add_tail(&ch->ioctx_ring[i]->free_list, &ch->free_list);
+       }
+
+       ret = srpt_create_ch_ib(ch);
+       if (ret) {
+               rej->reason = __constant_cpu_to_be32(
+                               SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               printk(KERN_ERR "rejected SRP_LOGIN_REQ because creating"
+                      " a new RDMA channel failed.\n");
+               goto free_ring;
+       }
+
+       ret = srpt_ch_qp_rtr(ch, ch->qp);
+       if (ret) {
+               rej->reason = __constant_cpu_to_be32(
+                               SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               printk(KERN_ERR "rejected SRP_LOGIN_REQ because enabling"
+                      " RTR failed (error code = %d)\n", ret);
+               goto destroy_ib;
+       }
+       /*
+        * Use the initator port identifier as the session name.
+        */
+       snprintf(ch->sess_name, sizeof(ch->sess_name), "0x%016llx%016llx",
+                       be64_to_cpu(*(__be64 *)ch->i_port_id),
+                       be64_to_cpu(*(__be64 *)(ch->i_port_id + 8)));
+
+       pr_debug("registering session %s\n", ch->sess_name);
+
+       nacl = srpt_lookup_acl(sport, ch->i_port_id);
+       if (!nacl) {
+               printk(KERN_INFO "Rejected login because no ACL has been"
+                      " configured yet for initiator %s.\n", ch->sess_name);
+               rej->reason = __constant_cpu_to_be32(
+                               SRP_LOGIN_REJ_CHANNEL_LIMIT_REACHED);
+               goto destroy_ib;
+       }
+
+       ch->sess = transport_init_session();
+       if (IS_ERR(ch->sess)) {
+               rej->reason = __constant_cpu_to_be32(
+                               SRP_LOGIN_REJ_INSUFFICIENT_RESOURCES);
+               pr_debug("Failed to create session\n");
+               goto deregister_session;
+       }
+       ch->sess->se_node_acl = &nacl->nacl;
+       transport_register_session(&sport->port_tpg_1, &nacl->nacl, ch->sess, ch);
+
+       pr_debug("Establish connection sess=%p name=%s cm_id=%p\n", ch->sess,
+                ch->sess_name, ch->cm_id);
+
+       /* create srp_login_response */
+       rsp->opcode = SRP_LOGIN_RSP;
+       rsp->tag = req->tag;
+       rsp->max_it_iu_len = req->req_it_iu_len;
+       rsp->max_ti_iu_len = req->req_it_iu_len;
+       ch->max_ti_iu_len = it_iu_len;
+       rsp->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+                                             | SRP_BUF_FORMAT_INDIRECT);
+       rsp->req_lim_delta = cpu_to_be32(ch->rq_size);
+       atomic_set(&ch->req_lim, ch->rq_size);
+       atomic_set(&ch->req_lim_delta, 0);
+
+       /* create cm reply */
+       rep_param->qp_num = ch->qp->qp_num;
+       rep_param->private_data = (void *)rsp;
+       rep_param->private_data_len = sizeof *rsp;
+       rep_param->rnr_retry_count = 7;
+       rep_param->flow_control = 1;
+       rep_param->failover_accepted = 0;
+       rep_param->srq = 1;
+       rep_param->responder_resources = 4;
+       rep_param->initiator_depth = 4;
+
+       ret = ib_send_cm_rep(cm_id, rep_param);
+       if (ret) {
+               printk(KERN_ERR "sending SRP_LOGIN_REQ response failed"
+                      " (error code = %d)\n", ret);
+               goto release_channel;
+       }
+
+       spin_lock_irq(&sdev->spinlock);
+       list_add_tail(&ch->list, &sdev->rch_list);
+       spin_unlock_irq(&sdev->spinlock);
+
+       goto out;
+
+release_channel:
+       srpt_set_ch_state(ch, CH_RELEASING);
+       transport_deregister_session_configfs(ch->sess);
+
+deregister_session:
+       transport_deregister_session(ch->sess);
+       ch->sess = NULL;
+
+destroy_ib:
+       srpt_destroy_ch_ib(ch);
+
+free_ring:
+       srpt_free_ioctx_ring((struct srpt_ioctx **)ch->ioctx_ring,
+                            ch->sport->sdev, ch->rq_size,
+                            ch->rsp_size, DMA_TO_DEVICE);
+free_ch:
+       kfree(ch);
+
+reject:
+       rej->opcode = SRP_LOGIN_REJ;
+       rej->tag = req->tag;
+       rej->buf_fmt = __constant_cpu_to_be16(SRP_BUF_FORMAT_DIRECT
+                                             | SRP_BUF_FORMAT_INDIRECT);
+
+       ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED, NULL, 0,
+                            (void *)rej, sizeof *rej);
+
+out:
+       kfree(rep_param);
+       kfree(rsp);
+       kfree(rej);
+
+       return ret;
+}
+
+static void srpt_cm_rej_recv(struct ib_cm_id *cm_id)
+{
+       printk(KERN_INFO "Received IB REJ for cm_id %p.\n", cm_id);
+       srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_rtu_recv() - Process an IB_CM_RTU_RECEIVED or USER_ESTABLISHED event.
+ *
+ * An IB_CM_RTU_RECEIVED message indicates that the connection is established
+ * and that the recipient may begin transmitting (RTU = ready to use).
+ */
+static void srpt_cm_rtu_recv(struct ib_cm_id *cm_id)
+{
+       struct srpt_rdma_ch *ch;
+       int ret;
+
+       ch = srpt_find_channel(cm_id->context, cm_id);
+       BUG_ON(!ch);
+
+       if (srpt_test_and_set_ch_state(ch, CH_CONNECTING, CH_LIVE)) {
+               struct srpt_recv_ioctx *ioctx, *ioctx_tmp;
+
+               ret = srpt_ch_qp_rts(ch, ch->qp);
+
+               list_for_each_entry_safe(ioctx, ioctx_tmp, &ch->cmd_wait_list,
+                                        wait_list) {
+                       list_del(&ioctx->wait_list);
+                       srpt_handle_new_iu(ch, ioctx, NULL);
+               }
+               if (ret)
+                       srpt_close_ch(ch);
+       }
+}
+
+static void srpt_cm_timewait_exit(struct ib_cm_id *cm_id)
+{
+       printk(KERN_INFO "Received IB TimeWait exit for cm_id %p.\n", cm_id);
+       srpt_drain_channel(cm_id);
+}
+
+static void srpt_cm_rep_error(struct ib_cm_id *cm_id)
+{
+       printk(KERN_INFO "Received IB REP error for cm_id %p.\n", cm_id);
+       srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_dreq_recv() - Process reception of a DREQ message.
+ */
+static void srpt_cm_dreq_recv(struct ib_cm_id *cm_id)
+{
+       struct srpt_rdma_ch *ch;
+       unsigned long flags;
+       bool send_drep = false;
+
+       ch = srpt_find_channel(cm_id->context, cm_id);
+       BUG_ON(!ch);
+
+       pr_debug("cm_id= %p ch->state= %d\n", cm_id, srpt_get_ch_state(ch));
+
+       spin_lock_irqsave(&ch->spinlock, flags);
+       switch (ch->state) {
+       case CH_CONNECTING:
+       case CH_LIVE:
+               send_drep = true;
+               ch->state = CH_DISCONNECTING;
+               break;
+       case CH_DISCONNECTING:
+       case CH_DRAINING:
+       case CH_RELEASING:
+               WARN(true, "unexpected channel state %d\n", ch->state);
+               break;
+       }
+       spin_unlock_irqrestore(&ch->spinlock, flags);
+
+       if (send_drep) {
+               if (ib_send_cm_drep(ch->cm_id, NULL, 0) < 0)
+                       printk(KERN_ERR "Sending IB DREP failed.\n");
+               printk(KERN_INFO "Received DREQ and sent DREP for session %s.\n",
+                      ch->sess_name);
+       }
+}
+
+/**
+ * srpt_cm_drep_recv() - Process reception of a DREP message.
+ */
+static void srpt_cm_drep_recv(struct ib_cm_id *cm_id)
+{
+       printk(KERN_INFO "Received InfiniBand DREP message for cm_id %p.\n",
+              cm_id);
+       srpt_drain_channel(cm_id);
+}
+
+/**
+ * srpt_cm_handler() - IB connection manager callback function.
+ *
+ * A non-zero return value will cause the caller destroy the CM ID.
+ *
+ * Note: srpt_cm_handler() must only return a non-zero value when transferring
+ * ownership of the cm_id to a channel by srpt_cm_req_recv() failed. Returning
+ * a non-zero value in any other case will trigger a race with the
+ * ib_destroy_cm_id() call in srpt_release_channel().
+ */
+static int srpt_cm_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
+{
+       int ret;
+
+       ret = 0;
+       switch (event->event) {
+       case IB_CM_REQ_RECEIVED:
+               ret = srpt_cm_req_recv(cm_id, &event->param.req_rcvd,
+                                      event->private_data);
+               break;
+       case IB_CM_REJ_RECEIVED:
+               srpt_cm_rej_recv(cm_id);
+               break;
+       case IB_CM_RTU_RECEIVED:
+       case IB_CM_USER_ESTABLISHED:
+               srpt_cm_rtu_recv(cm_id);
+               break;
+       case IB_CM_DREQ_RECEIVED:
+               srpt_cm_dreq_recv(cm_id);
+               break;
+       case IB_CM_DREP_RECEIVED:
+               srpt_cm_drep_recv(cm_id);
+               break;
+       case IB_CM_TIMEWAIT_EXIT:
+               srpt_cm_timewait_exit(cm_id);
+               break;
+       case IB_CM_REP_ERROR:
+               srpt_cm_rep_error(cm_id);
+               break;
+       case IB_CM_DREQ_ERROR:
+               printk(KERN_INFO "Received IB DREQ ERROR event.\n");
+               break;
+       case IB_CM_MRA_RECEIVED:
+               printk(KERN_INFO "Received IB MRA event\n");
+               break;
+       default:
+               printk(KERN_ERR "received unrecognized IB CM event %d\n",
+                      event->event);
+               break;
+       }
+
+       return ret;
+}
+
+/**
+ * srpt_perform_rdmas() - Perform IB RDMA.
+ *
+ * Returns zero upon success or a negative number upon failure.
+ */
+static int srpt_perform_rdmas(struct srpt_rdma_ch *ch,
+                             struct srpt_send_ioctx *ioctx)
+{
+       struct ib_send_wr wr;
+       struct ib_send_wr *bad_wr;
+       struct rdma_iu *riu;
+       int i;
+       int ret;
+       int sq_wr_avail;
+       enum dma_data_direction dir;
+       const int n_rdma = ioctx->n_rdma;
+
+       dir = ioctx->cmd.data_direction;
+       if (dir == DMA_TO_DEVICE) {
+               /* write */
+               ret = -ENOMEM;
+               sq_wr_avail = atomic_sub_return(n_rdma, &ch->sq_wr_avail);
+               if (sq_wr_avail < 0) {
+                       printk(KERN_WARNING "IB send queue full (needed %d)\n",
+                              n_rdma);
+                       goto out;
+               }
+       }
+
+       ioctx->rdma_aborted = false;
+       ret = 0;
+       riu = ioctx->rdma_ius;
+       memset(&wr, 0, sizeof wr);
+
+       for (i = 0; i < n_rdma; ++i, ++riu) {
+               if (dir == DMA_FROM_DEVICE) {
+                       wr.opcode = IB_WR_RDMA_WRITE;
+                       wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
+                                               SRPT_RDMA_WRITE_LAST :
+                                               SRPT_RDMA_MID,
+                                               ioctx->ioctx.index);
+               } else {
+                       wr.opcode = IB_WR_RDMA_READ;
+                       wr.wr_id = encode_wr_id(i == n_rdma - 1 ?
+                                               SRPT_RDMA_READ_LAST :
+                                               SRPT_RDMA_MID,
+                                               ioctx->ioctx.index);
+               }
+               wr.next = NULL;
+               wr.wr.rdma.remote_addr = riu->raddr;
+               wr.wr.rdma.rkey = riu->rkey;
+               wr.num_sge = riu->sge_cnt;
+               wr.sg_list = riu->sge;
+
+               /* only get completion event for the last rdma write */
+               if (i == (n_rdma - 1) && dir == DMA_TO_DEVICE)
+                       wr.send_flags = IB_SEND_SIGNALED;
+
+               ret = ib_post_send(ch->qp, &wr, &bad_wr);
+               if (ret)
+                       break;
+       }
+
+       if (ret)
+               printk(KERN_ERR "%s[%d]: ib_post_send() returned %d for %d/%d",
+                                __func__, __LINE__, ret, i, n_rdma);
+       if (ret && i > 0) {
+               wr.num_sge = 0;
+               wr.wr_id = encode_wr_id(SRPT_RDMA_ABORT, ioctx->ioctx.index);
+               wr.send_flags = IB_SEND_SIGNALED;
+               while (ch->state == CH_LIVE &&
+                       ib_post_send(ch->qp, &wr, &bad_wr) != 0) {
+                       printk(KERN_INFO "Trying to abort failed RDMA transfer [%d]",
+                               ioctx->ioctx.index);
+                       msleep(1000);
+               }
+               while (ch->state != CH_RELEASING && !ioctx->rdma_aborted) {
+                       printk(KERN_INFO "Waiting until RDMA abort finished [%d]",
+                               ioctx->ioctx.index);
+                       msleep(1000);
+               }
+       }
+out:
+       if (unlikely(dir == DMA_TO_DEVICE && ret < 0))
+               atomic_add(n_rdma, &ch->sq_wr_avail);
+       return ret;
+}
+
+/**
+ * srpt_xfer_data() - Start data transfer from initiator to target.
+ */
+static int srpt_xfer_data(struct srpt_rdma_ch *ch,
+                         struct srpt_send_ioctx *ioctx)
+{
+       int ret;
+
+       ret = srpt_map_sg_to_ib_sge(ch, ioctx);
+       if (ret) {
+               printk(KERN_ERR "%s[%d] ret=%d\n", __func__, __LINE__, ret);
+               goto out;
+       }
+
+       ret = srpt_perform_rdmas(ch, ioctx);
+       if (ret) {
+               if (ret == -EAGAIN || ret == -ENOMEM)
+                       printk(KERN_INFO "%s[%d] queue full -- ret=%d\n",
+                                  __func__, __LINE__, ret);
+               else
+                       printk(KERN_ERR "%s[%d] fatal error -- ret=%d\n",
+                              __func__, __LINE__, ret);
+               goto out_unmap;
+       }
+
+out:
+       return ret;
+out_unmap:
+       srpt_unmap_sg_to_ib_sge(ch, ioctx);
+       goto out;
+}
+
+static int srpt_write_pending_status(struct se_cmd *se_cmd)
+{
+       struct srpt_send_ioctx *ioctx;
+
+       ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+       return srpt_get_cmd_state(ioctx) == SRPT_STATE_NEED_DATA;
+}
+
+/*
+ * srpt_write_pending() - Start data transfer from initiator to target (write).
+ */
+static int srpt_write_pending(struct se_cmd *se_cmd)
+{
+       struct srpt_rdma_ch *ch;
+       struct srpt_send_ioctx *ioctx;
+       enum srpt_command_state new_state;
+       enum rdma_ch_state ch_state;
+       int ret;
+
+       ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+
+       new_state = srpt_set_cmd_state(ioctx, SRPT_STATE_NEED_DATA);
+       WARN_ON(new_state == SRPT_STATE_DONE);
+
+       ch = ioctx->ch;
+       BUG_ON(!ch);
+
+       ch_state = srpt_get_ch_state(ch);
+       switch (ch_state) {
+       case CH_CONNECTING:
+               WARN(true, "unexpected channel state %d\n", ch_state);
+               ret = -EINVAL;
+               goto out;
+       case CH_LIVE:
+               break;
+       case CH_DISCONNECTING:
+       case CH_DRAINING:
+       case CH_RELEASING:
+               pr_debug("cmd with tag %lld: channel disconnecting\n",
+                        ioctx->tag);
+               srpt_set_cmd_state(ioctx, SRPT_STATE_DATA_IN);
+               ret = -EINVAL;
+               goto out;
+       }
+       ret = srpt_xfer_data(ch, ioctx);
+
+out:
+       return ret;
+}
+
+static u8 tcm_to_srp_tsk_mgmt_status(const int tcm_mgmt_status)
+{
+       switch (tcm_mgmt_status) {
+       case TMR_FUNCTION_COMPLETE:
+               return SRP_TSK_MGMT_SUCCESS;
+       case TMR_FUNCTION_REJECTED:
+               return SRP_TSK_MGMT_FUNC_NOT_SUPP;
+       }
+       return SRP_TSK_MGMT_FAILED;
+}
+
+/**
+ * srpt_queue_response() - Transmits the response to a SCSI command.
+ *
+ * Callback function called by the TCM core. Must not block since it can be
+ * invoked on the context of the IB completion handler.
+ */
+static int srpt_queue_response(struct se_cmd *cmd)
+{
+       struct srpt_rdma_ch *ch;
+       struct srpt_send_ioctx *ioctx;
+       enum srpt_command_state state;
+       unsigned long flags;
+       int ret;
+       enum dma_data_direction dir;
+       int resp_len;
+       u8 srp_tm_status;
+
+       ret = 0;
+
+       ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+       ch = ioctx->ch;
+       BUG_ON(!ch);
+
+       spin_lock_irqsave(&ioctx->spinlock, flags);
+       state = ioctx->state;
+       switch (state) {
+       case SRPT_STATE_NEW:
+       case SRPT_STATE_DATA_IN:
+               ioctx->state = SRPT_STATE_CMD_RSP_SENT;
+               break;
+       case SRPT_STATE_MGMT:
+               ioctx->state = SRPT_STATE_MGMT_RSP_SENT;
+               break;
+       default:
+               WARN(true, "ch %p; cmd %d: unexpected command state %d\n",
+                       ch, ioctx->ioctx.index, ioctx->state);
+               break;
+       }
+       spin_unlock_irqrestore(&ioctx->spinlock, flags);
+
+       if (unlikely(transport_check_aborted_status(&ioctx->cmd, false)
+                    || WARN_ON_ONCE(state == SRPT_STATE_CMD_RSP_SENT))) {
+               atomic_inc(&ch->req_lim_delta);
+               srpt_abort_cmd(ioctx);
+               goto out;
+       }
+
+       dir = ioctx->cmd.data_direction;
+
+       /* For read commands, transfer the data to the initiator. */
+       if (dir == DMA_FROM_DEVICE && ioctx->cmd.data_length &&
+           !ioctx->queue_status_only) {
+               ret = srpt_xfer_data(ch, ioctx);
+               if (ret) {
+                       printk(KERN_ERR "xfer_data failed for tag %llu\n",
+                              ioctx->tag);
+                       goto out;
+               }
+       }
+
+       if (state != SRPT_STATE_MGMT)
+               resp_len = srpt_build_cmd_rsp(ch, ioctx, ioctx->tag,
+                                             cmd->scsi_status);
+       else {
+               srp_tm_status
+                       = tcm_to_srp_tsk_mgmt_status(cmd->se_tmr_req->response);
+               resp_len = srpt_build_tskmgmt_rsp(ch, ioctx, srp_tm_status,
+                                                ioctx->tag);
+       }
+       ret = srpt_post_send(ch, ioctx, resp_len);
+       if (ret) {
+               printk(KERN_ERR "sending cmd response failed for tag %llu\n",
+                      ioctx->tag);
+               srpt_unmap_sg_to_ib_sge(ch, ioctx);
+               srpt_set_cmd_state(ioctx, SRPT_STATE_DONE);
+               kref_put(&ioctx->kref, srpt_put_send_ioctx_kref);
+       }
+
+out:
+       return ret;
+}
+
+static int srpt_queue_status(struct se_cmd *cmd)
+{
+       struct srpt_send_ioctx *ioctx;
+
+       ioctx = container_of(cmd, struct srpt_send_ioctx, cmd);
+       BUG_ON(ioctx->sense_data != cmd->sense_buffer);
+       if (cmd->se_cmd_flags &
+           (SCF_TRANSPORT_TASK_SENSE | SCF_EMULATED_TASK_SENSE))
+               WARN_ON(cmd->scsi_status != SAM_STAT_CHECK_CONDITION);
+       ioctx->queue_status_only = true;
+       return srpt_queue_response(cmd);
+}
+
+static void srpt_refresh_port_work(struct work_struct *work)
+{
+       struct srpt_port *sport = container_of(work, struct srpt_port, work);
+
+       srpt_refresh_port(sport);
+}
+
+static int srpt_ch_list_empty(struct srpt_device *sdev)
+{
+       int res;
+
+       spin_lock_irq(&sdev->spinlock);
+       res = list_empty(&sdev->rch_list);
+       spin_unlock_irq(&sdev->spinlock);
+
+       return res;
+}
+
+/**
+ * srpt_release_sdev() - Free the channel resources associated with a target.
+ */
+static int srpt_release_sdev(struct srpt_device *sdev)
+{
+       struct srpt_rdma_ch *ch, *tmp_ch;
+       int res;
+
+       WARN_ON_ONCE(irqs_disabled());
+
+       BUG_ON(!sdev);
+
+       spin_lock_irq(&sdev->spinlock);
+       list_for_each_entry_safe(ch, tmp_ch, &sdev->rch_list, list)
+               __srpt_close_ch(ch);
+       spin_unlock_irq(&sdev->spinlock);
+
+       res = wait_event_interruptible(sdev->ch_releaseQ,
+                                      srpt_ch_list_empty(sdev));
+       if (res)
+               printk(KERN_ERR "%s: interrupted.\n", __func__);
+
+       return 0;
+}
+
+static struct srpt_port *__srpt_lookup_port(const char *name)
+{
+       struct ib_device *dev;
+       struct srpt_device *sdev;
+       struct srpt_port *sport;
+       int i;
+
+       list_for_each_entry(sdev, &srpt_dev_list, list) {
+               dev = sdev->device;
+               if (!dev)
+                       continue;
+
+               for (i = 0; i < dev->phys_port_cnt; i++) {
+                       sport = &sdev->port[i];
+
+                       if (!strcmp(sport->port_guid, name))
+                               return sport;
+               }
+       }
+
+       return NULL;
+}
+
+static struct srpt_port *srpt_lookup_port(const char *name)
+{
+       struct srpt_port *sport;
+
+       spin_lock(&srpt_dev_lock);
+       sport = __srpt_lookup_port(name);
+       spin_unlock(&srpt_dev_lock);
+
+       return sport;
+}
+
+/**
+ * srpt_add_one() - Infiniband device addition callback function.
+ */
+static void srpt_add_one(struct ib_device *device)
+{
+       struct srpt_device *sdev;
+       struct srpt_port *sport;
+       struct ib_srq_init_attr srq_attr;
+       int i;
+
+       pr_debug("device = %p, device->dma_ops = %p\n", device,
+                device->dma_ops);
+
+       sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
+       if (!sdev)
+               goto err;
+
+       sdev->device = device;
+       INIT_LIST_HEAD(&sdev->rch_list);
+       init_waitqueue_head(&sdev->ch_releaseQ);
+       spin_lock_init(&sdev->spinlock);
+
+       if (ib_query_device(device, &sdev->dev_attr))
+               goto free_dev;
+
+       sdev->pd = ib_alloc_pd(device);
+       if (IS_ERR(sdev->pd))
+               goto free_dev;
+
+       sdev->mr = ib_get_dma_mr(sdev->pd, IB_ACCESS_LOCAL_WRITE);
+       if (IS_ERR(sdev->mr))
+               goto err_pd;
+
+       sdev->srq_size = min(srpt_srq_size, sdev->dev_attr.max_srq_wr);
+
+       srq_attr.event_handler = srpt_srq_event;
+       srq_attr.srq_context = (void *)sdev;
+       srq_attr.attr.max_wr = sdev->srq_size;
+       srq_attr.attr.max_sge = 1;
+       srq_attr.attr.srq_limit = 0;
+
+       sdev->srq = ib_create_srq(sdev->pd, &srq_attr);
+       if (IS_ERR(sdev->srq))
+               goto err_mr;
+
+       pr_debug("%s: create SRQ #wr= %d max_allow=%d dev= %s\n",
+                __func__, sdev->srq_size, sdev->dev_attr.max_srq_wr,
+                device->name);
+
+       if (!srpt_service_guid)
+               srpt_service_guid = be64_to_cpu(device->node_guid);
+
+       sdev->cm_id = ib_create_cm_id(device, srpt_cm_handler, sdev);
+       if (IS_ERR(sdev->cm_id))
+               goto err_srq;
+
+       /* print out target login information */
+       pr_debug("Target login info: id_ext=%016llx,ioc_guid=%016llx,"
+                "pkey=ffff,service_id=%016llx\n", srpt_service_guid,
+                srpt_service_guid, srpt_service_guid);
+
+       /*
+        * We do not have a consistent service_id (ie. also id_ext of target_id)
+        * to identify this target. We currently use the guid of the first HCA
+        * in the system as service_id; therefore, the target_id will change
+        * if this HCA is gone bad and replaced by different HCA
+        */
+       if (ib_cm_listen(sdev->cm_id, cpu_to_be64(srpt_service_guid), 0, NULL))
+               goto err_cm;
+
+       INIT_IB_EVENT_HANDLER(&sdev->event_handler, sdev->device,
+                             srpt_event_handler);
+       if (ib_register_event_handler(&sdev->event_handler))
+               goto err_cm;
+
+       sdev->ioctx_ring = (struct srpt_recv_ioctx **)
+               srpt_alloc_ioctx_ring(sdev, sdev->srq_size,
+                                     sizeof(*sdev->ioctx_ring[0]),
+                                     srp_max_req_size, DMA_FROM_DEVICE);
+       if (!sdev->ioctx_ring)
+               goto err_event;
+
+       for (i = 0; i < sdev->srq_size; ++i)
+               srpt_post_recv(sdev, sdev->ioctx_ring[i]);
+
+       WARN_ON(sdev->device->phys_port_cnt > ARRAY_SIZE(sdev->port));
+
+       for (i = 1; i <= sdev->device->phys_port_cnt; i++) {
+               sport = &sdev->port[i - 1];
+               sport->sdev = sdev;
+               sport->port = i;
+               sport->port_attrib.srp_max_rdma_size = DEFAULT_MAX_RDMA_SIZE;
+               sport->port_attrib.srp_max_rsp_size = DEFAULT_MAX_RSP_SIZE;
+               sport->port_attrib.srp_sq_size = DEF_SRPT_SQ_SIZE;
+               INIT_WORK(&sport->work, srpt_refresh_port_work);
+               INIT_LIST_HEAD(&sport->port_acl_list);
+               spin_lock_init(&sport->port_acl_lock);
+
+               if (srpt_refresh_port(sport)) {
+                       printk(KERN_ERR "MAD registration failed for %s-%d.\n",
+                              srpt_sdev_name(sdev), i);
+                       goto err_ring;
+               }
+               snprintf(sport->port_guid, sizeof(sport->port_guid),
+                       "0x%016llx%016llx",
+                       be64_to_cpu(sport->gid.global.subnet_prefix),
+                       be64_to_cpu(sport->gid.global.interface_id));
+       }
+
+       spin_lock(&srpt_dev_lock);
+       list_add_tail(&sdev->list, &srpt_dev_list);
+       spin_unlock(&srpt_dev_lock);
+
+out:
+       ib_set_client_data(device, &srpt_client, sdev);
+       pr_debug("added %s.\n", device->name);
+       return;
+
+err_ring:
+       srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+                            sdev->srq_size, srp_max_req_size,
+                            DMA_FROM_DEVICE);
+err_event:
+       ib_unregister_event_handler(&sdev->event_handler);
+err_cm:
+       ib_destroy_cm_id(sdev->cm_id);
+err_srq:
+       ib_destroy_srq(sdev->srq);
+err_mr:
+       ib_dereg_mr(sdev->mr);
+err_pd:
+       ib_dealloc_pd(sdev->pd);
+free_dev:
+       kfree(sdev);
+err:
+       sdev = NULL;
+       printk(KERN_INFO "%s(%s) failed.\n", __func__, device->name);
+       goto out;
+}
+
+/**
+ * srpt_remove_one() - InfiniBand device removal callback function.
+ */
+static void srpt_remove_one(struct ib_device *device)
+{
+       struct srpt_device *sdev;
+       int i;
+
+       sdev = ib_get_client_data(device, &srpt_client);
+       if (!sdev) {
+               printk(KERN_INFO "%s(%s): nothing to do.\n", __func__,
+                      device->name);
+               return;
+       }
+
+       srpt_unregister_mad_agent(sdev);
+
+       ib_unregister_event_handler(&sdev->event_handler);
+
+       /* Cancel any work queued by the just unregistered IB event handler. */
+       for (i = 0; i < sdev->device->phys_port_cnt; i++)
+               cancel_work_sync(&sdev->port[i].work);
+
+       ib_destroy_cm_id(sdev->cm_id);
+
+       /*
+        * Unregistering a target must happen after destroying sdev->cm_id
+        * such that no new SRP_LOGIN_REQ information units can arrive while
+        * destroying the target.
+        */
+       spin_lock(&srpt_dev_lock);
+       list_del(&sdev->list);
+       spin_unlock(&srpt_dev_lock);
+       srpt_release_sdev(sdev);
+
+       ib_destroy_srq(sdev->srq);
+       ib_dereg_mr(sdev->mr);
+       ib_dealloc_pd(sdev->pd);
+
+       srpt_free_ioctx_ring((struct srpt_ioctx **)sdev->ioctx_ring, sdev,
+                            sdev->srq_size, srp_max_req_size, DMA_FROM_DEVICE);
+       sdev->ioctx_ring = NULL;
+       kfree(sdev);
+}
+
+static struct ib_client srpt_client = {
+       .name = DRV_NAME,
+       .add = srpt_add_one,
+       .remove = srpt_remove_one
+};
+
+static int srpt_check_true(struct se_portal_group *se_tpg)
+{
+       return 1;
+}
+
+static int srpt_check_false(struct se_portal_group *se_tpg)
+{
+       return 0;
+}
+
+static char *srpt_get_fabric_name(void)
+{
+       return "srpt";
+}
+
+static u8 srpt_get_fabric_proto_ident(struct se_portal_group *se_tpg)
+{
+       return SCSI_TRANSPORTID_PROTOCOLID_SRP;
+}
+
+static char *srpt_get_fabric_wwn(struct se_portal_group *tpg)
+{
+       struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
+
+       return sport->port_guid;
+}
+
+static u16 srpt_get_tag(struct se_portal_group *tpg)
+{
+       return 1;
+}
+
+static u32 srpt_get_default_depth(struct se_portal_group *se_tpg)
+{
+       return 1;
+}
+
+static u32 srpt_get_pr_transport_id(struct se_portal_group *se_tpg,
+                                   struct se_node_acl *se_nacl,
+                                   struct t10_pr_registration *pr_reg,
+                                   int *format_code, unsigned char *buf)
+{
+       struct srpt_node_acl *nacl;
+       struct spc_rdma_transport_id *tr_id;
+
+       nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+       tr_id = (void *)buf;
+       tr_id->protocol_identifier = SCSI_TRANSPORTID_PROTOCOLID_SRP;
+       memcpy(tr_id->i_port_id, nacl->i_port_id, sizeof(tr_id->i_port_id));
+       return sizeof(*tr_id);
+}
+
+static u32 srpt_get_pr_transport_id_len(struct se_portal_group *se_tpg,
+                                       struct se_node_acl *se_nacl,
+                                       struct t10_pr_registration *pr_reg,
+                                       int *format_code)
+{
+       *format_code = 0;
+       return sizeof(struct spc_rdma_transport_id);
+}
+
+static char *srpt_parse_pr_out_transport_id(struct se_portal_group *se_tpg,
+                                           const char *buf, u32 *out_tid_len,
+                                           char **port_nexus_ptr)
+{
+       struct spc_rdma_transport_id *tr_id;
+
+       *port_nexus_ptr = NULL;
+       *out_tid_len = sizeof(struct spc_rdma_transport_id);
+       tr_id = (void *)buf;
+       return (char *)tr_id->i_port_id;
+}
+
+static struct se_node_acl *srpt_alloc_fabric_acl(struct se_portal_group *se_tpg)
+{
+       struct srpt_node_acl *nacl;
+
+       nacl = kzalloc(sizeof(struct srpt_node_acl), GFP_KERNEL);
+       if (!nacl) {
+               printk(KERN_ERR "Unable to alocate struct srpt_node_acl\n");
+               return NULL;
+       }
+
+       return &nacl->nacl;
+}
+
+static void srpt_release_fabric_acl(struct se_portal_group *se_tpg,
+                                   struct se_node_acl *se_nacl)
+{
+       struct srpt_node_acl *nacl;
+
+       nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+       kfree(nacl);
+}
+
+static u32 srpt_tpg_get_inst_index(struct se_portal_group *se_tpg)
+{
+       return 1;
+}
+
+static void srpt_release_cmd(struct se_cmd *se_cmd)
+{
+}
+
+/**
+ * srpt_shutdown_session() - Whether or not a session may be shut down.
+ */
+static int srpt_shutdown_session(struct se_session *se_sess)
+{
+       return true;
+}
+
+/**
+ * srpt_close_session() - Forcibly close a session.
+ *
+ * Callback function invoked by the TCM core to clean up sessions associated
+ * with a node ACL when the user invokes
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static void srpt_close_session(struct se_session *se_sess)
+{
+       DECLARE_COMPLETION_ONSTACK(release_done);
+       struct srpt_rdma_ch *ch;
+       struct srpt_device *sdev;
+       int res;
+
+       ch = se_sess->fabric_sess_ptr;
+       WARN_ON(ch->sess != se_sess);
+
+       pr_debug("ch %p state %d\n", ch, srpt_get_ch_state(ch));
+
+       sdev = ch->sport->sdev;
+       spin_lock_irq(&sdev->spinlock);
+       BUG_ON(ch->release_done);
+       ch->release_done = &release_done;
+       __srpt_close_ch(ch);
+       spin_unlock_irq(&sdev->spinlock);
+
+       res = wait_for_completion_timeout(&release_done, 60 * HZ);
+       WARN_ON(res <= 0);
+}
+
+/**
+ * To do: Find out whether stop_session() has a meaning for transports
+ * other than iSCSI.
+ */
+static void srpt_stop_session(struct se_session *se_sess, int sess_sleep,
+                             int conn_sleep)
+{
+}
+
+static void srpt_reset_nexus(struct se_session *sess)
+{
+       printk(KERN_ERR "This is the SRP protocol, not iSCSI\n");
+}
+
+static int srpt_sess_logged_in(struct se_session *se_sess)
+{
+       return true;
+}
+
+/**
+ * srpt_sess_get_index() - Return the value of scsiAttIntrPortIndex (SCSI-MIB).
+ *
+ * A quote from RFC 4455 (SCSI-MIB) about this MIB object:
+ * This object represents an arbitrary integer used to uniquely identify a
+ * particular attached remote initiator port to a particular SCSI target port
+ * within a particular SCSI target device within a particular SCSI instance.
+ */
+static u32 srpt_sess_get_index(struct se_session *se_sess)
+{
+       return 0;
+}
+
+static void srpt_set_default_node_attrs(struct se_node_acl *nacl)
+{
+}
+
+static u32 srpt_get_task_tag(struct se_cmd *se_cmd)
+{
+       struct srpt_send_ioctx *ioctx;
+
+       ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+       return ioctx->tag;
+}
+
+/* Note: only used from inside debug printk's by the TCM core. */
+static int srpt_get_tcm_cmd_state(struct se_cmd *se_cmd)
+{
+       struct srpt_send_ioctx *ioctx;
+
+       ioctx = container_of(se_cmd, struct srpt_send_ioctx, cmd);
+       return srpt_get_cmd_state(ioctx);
+}
+
+static u16 srpt_set_fabric_sense_len(struct se_cmd *cmd, u32 sense_length)
+{
+       return 0;
+}
+
+static u16 srpt_get_fabric_sense_len(void)
+{
+       return 0;
+}
+
+static int srpt_is_state_remove(struct se_cmd *se_cmd)
+{
+       return 0;
+}
+
+/**
+ * srpt_parse_i_port_id() - Parse an initiator port ID.
+ * @name: ASCII representation of a 128-bit initiator port ID.
+ * @i_port_id: Binary 128-bit port ID.
+ */
+static int srpt_parse_i_port_id(u8 i_port_id[16], const char *name)
+{
+       const char *p;
+       unsigned len, count, leading_zero_bytes;
+       int ret, rc;
+
+       p = name;
+       if (strnicmp(p, "0x", 2) == 0)
+               p += 2;
+       ret = -EINVAL;
+       len = strlen(p);
+       if (len % 2)
+               goto out;
+       count = min(len / 2, 16U);
+       leading_zero_bytes = 16 - count;
+       memset(i_port_id, 0, leading_zero_bytes);
+       rc = hex2bin(i_port_id + leading_zero_bytes, p, count);
+       if (rc < 0)
+               pr_debug("hex2bin failed for srpt_parse_i_port_id: %d\n", rc);
+       ret = 0;
+out:
+       return ret;
+}
+
+/*
+ * configfs callback function invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static struct se_node_acl *srpt_make_nodeacl(struct se_portal_group *tpg,
+                                            struct config_group *group,
+                                            const char *name)
+{
+       struct srpt_port *sport = container_of(tpg, struct srpt_port, port_tpg_1);
+       struct se_node_acl *se_nacl, *se_nacl_new;
+       struct srpt_node_acl *nacl;
+       int ret = 0;
+       u32 nexus_depth = 1;
+       u8 i_port_id[16];
+
+       if (srpt_parse_i_port_id(i_port_id, name) < 0) {
+               printk(KERN_ERR "invalid initiator port ID %s\n", name);
+               ret = -EINVAL;
+               goto err;
+       }
+
+       se_nacl_new = srpt_alloc_fabric_acl(tpg);
+       if (!se_nacl_new) {
+               ret = -ENOMEM;
+               goto err;
+       }
+       /*
+        * nacl_new may be released by core_tpg_add_initiator_node_acl()
+        * when converting a node ACL from demo mode to explict
+        */
+       se_nacl = core_tpg_add_initiator_node_acl(tpg, se_nacl_new, name,
+                                                 nexus_depth);
+       if (IS_ERR(se_nacl)) {
+               ret = PTR_ERR(se_nacl);
+               goto err;
+       }
+       /* Locate our struct srpt_node_acl and set sdev and i_port_id. */
+       nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+       memcpy(&nacl->i_port_id[0], &i_port_id[0], 16);
+       nacl->sport = sport;
+
+       spin_lock_irq(&sport->port_acl_lock);
+       list_add_tail(&nacl->list, &sport->port_acl_list);
+       spin_unlock_irq(&sport->port_acl_lock);
+
+       return se_nacl;
+err:
+       return ERR_PTR(ret);
+}
+
+/*
+ * configfs callback function invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg/acls/$i_port_id
+ */
+static void srpt_drop_nodeacl(struct se_node_acl *se_nacl)
+{
+       struct srpt_node_acl *nacl;
+       struct srpt_device *sdev;
+       struct srpt_port *sport;
+
+       nacl = container_of(se_nacl, struct srpt_node_acl, nacl);
+       sport = nacl->sport;
+       sdev = sport->sdev;
+       spin_lock_irq(&sport->port_acl_lock);
+       list_del(&nacl->list);
+       spin_unlock_irq(&sport->port_acl_lock);
+       core_tpg_del_initiator_node_acl(&sport->port_tpg_1, se_nacl, 1);
+       srpt_release_fabric_acl(NULL, se_nacl);
+}
+
+static ssize_t srpt_tpg_attrib_show_srp_max_rdma_size(
+       struct se_portal_group *se_tpg,
+       char *page)
+{
+       struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+       return sprintf(page, "%u\n", sport->port_attrib.srp_max_rdma_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
+       struct se_portal_group *se_tpg,
+       const char *page,
+       size_t count)
+{
+       struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(page, 0, &val);
+       if (ret < 0) {
+               pr_err("strict_strtoul() failed with ret: %d\n", ret);
+               return -EINVAL;
+       }
+       if (val > MAX_SRPT_RDMA_SIZE) {
+               pr_err("val: %lu exceeds MAX_SRPT_RDMA_SIZE: %d\n", val,
+                       MAX_SRPT_RDMA_SIZE);
+               return -EINVAL;
+       }
+       if (val < DEFAULT_MAX_RDMA_SIZE) {
+               pr_err("val: %lu smaller than DEFAULT_MAX_RDMA_SIZE: %d\n",
+                       val, DEFAULT_MAX_RDMA_SIZE);
+               return -EINVAL;
+       }
+       sport->port_attrib.srp_max_rdma_size = val;
+
+       return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_max_rdma_size, S_IRUGO | S_IWUSR);
+
+static ssize_t srpt_tpg_attrib_show_srp_max_rsp_size(
+       struct se_portal_group *se_tpg,
+       char *page)
+{
+       struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+       return sprintf(page, "%u\n", sport->port_attrib.srp_max_rsp_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
+       struct se_portal_group *se_tpg,
+       const char *page,
+       size_t count)
+{
+       struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(page, 0, &val);
+       if (ret < 0) {
+               pr_err("strict_strtoul() failed with ret: %d\n", ret);
+               return -EINVAL;
+       }
+       if (val > MAX_SRPT_RSP_SIZE) {
+               pr_err("val: %lu exceeds MAX_SRPT_RSP_SIZE: %d\n", val,
+                       MAX_SRPT_RSP_SIZE);
+               return -EINVAL;
+       }
+       if (val < MIN_MAX_RSP_SIZE) {
+               pr_err("val: %lu smaller than MIN_MAX_RSP_SIZE: %d\n", val,
+                       MIN_MAX_RSP_SIZE);
+               return -EINVAL;
+       }
+       sport->port_attrib.srp_max_rsp_size = val;
+
+       return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_max_rsp_size, S_IRUGO | S_IWUSR);
+
+static ssize_t srpt_tpg_attrib_show_srp_sq_size(
+       struct se_portal_group *se_tpg,
+       char *page)
+{
+       struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+       return sprintf(page, "%u\n", sport->port_attrib.srp_sq_size);
+}
+
+static ssize_t srpt_tpg_attrib_store_srp_sq_size(
+       struct se_portal_group *se_tpg,
+       const char *page,
+       size_t count)
+{
+       struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+       unsigned long val;
+       int ret;
+
+       ret = strict_strtoul(page, 0, &val);
+       if (ret < 0) {
+               pr_err("strict_strtoul() failed with ret: %d\n", ret);
+               return -EINVAL;
+       }
+       if (val > MAX_SRPT_SRQ_SIZE) {
+               pr_err("val: %lu exceeds MAX_SRPT_SRQ_SIZE: %d\n", val,
+                       MAX_SRPT_SRQ_SIZE);
+               return -EINVAL;
+       }
+       if (val < MIN_SRPT_SRQ_SIZE) {
+               pr_err("val: %lu smaller than MIN_SRPT_SRQ_SIZE: %d\n", val,
+                       MIN_SRPT_SRQ_SIZE);
+               return -EINVAL;
+       }
+       sport->port_attrib.srp_sq_size = val;
+
+       return count;
+}
+
+TF_TPG_ATTRIB_ATTR(srpt, srp_sq_size, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *srpt_tpg_attrib_attrs[] = {
+       &srpt_tpg_attrib_srp_max_rdma_size.attr,
+       &srpt_tpg_attrib_srp_max_rsp_size.attr,
+       &srpt_tpg_attrib_srp_sq_size.attr,
+       NULL,
+};
+
+static ssize_t srpt_tpg_show_enable(
+       struct se_portal_group *se_tpg,
+       char *page)
+{
+       struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+
+       return snprintf(page, PAGE_SIZE, "%d\n", (sport->enabled) ? 1: 0);
+}
+
+static ssize_t srpt_tpg_store_enable(
+       struct se_portal_group *se_tpg,
+       const char *page,
+       size_t count)
+{
+       struct srpt_port *sport = container_of(se_tpg, struct srpt_port, port_tpg_1);
+       unsigned long tmp;
+        int ret;
+
+       ret = strict_strtoul(page, 0, &tmp);
+       if (ret < 0) {
+               printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
+               return -EINVAL;
+       }
+
+       if ((tmp != 0) && (tmp != 1)) {
+               printk(KERN_ERR "Illegal value for srpt_tpg_store_enable: %lu\n", tmp);
+               return -EINVAL;
+       }
+       if (tmp == 1)
+               sport->enabled = true;
+       else
+               sport->enabled = false;
+
+       return count;
+}
+
+TF_TPG_BASE_ATTR(srpt, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *srpt_tpg_attrs[] = {
+       &srpt_tpg_enable.attr,
+       NULL,
+};
+
+/**
+ * configfs callback invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port/$tpg
+ */
+static struct se_portal_group *srpt_make_tpg(struct se_wwn *wwn,
+                                            struct config_group *group,
+                                            const char *name)
+{
+       struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
+       int res;
+
+       /* Initialize sport->port_wwn and sport->port_tpg_1 */
+       res = core_tpg_register(&srpt_target->tf_ops, &sport->port_wwn,
+                       &sport->port_tpg_1, sport, TRANSPORT_TPG_TYPE_NORMAL);
+       if (res)
+               return ERR_PTR(res);
+
+       return &sport->port_tpg_1;
+}
+
+/**
+ * configfs callback invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port/$tpg
+ */
+static void srpt_drop_tpg(struct se_portal_group *tpg)
+{
+       struct srpt_port *sport = container_of(tpg,
+                               struct srpt_port, port_tpg_1);
+
+       sport->enabled = false;
+       core_tpg_deregister(&sport->port_tpg_1);
+}
+
+/**
+ * configfs callback invoked for
+ * mkdir /sys/kernel/config/target/$driver/$port
+ */
+static struct se_wwn *srpt_make_tport(struct target_fabric_configfs *tf,
+                                     struct config_group *group,
+                                     const char *name)
+{
+       struct srpt_port *sport;
+       int ret;
+
+       sport = srpt_lookup_port(name);
+       pr_debug("make_tport(%s)\n", name);
+       ret = -EINVAL;
+       if (!sport)
+               goto err;
+
+       return &sport->port_wwn;
+
+err:
+       return ERR_PTR(ret);
+}
+
+/**
+ * configfs callback invoked for
+ * rmdir /sys/kernel/config/target/$driver/$port
+ */
+static void srpt_drop_tport(struct se_wwn *wwn)
+{
+       struct srpt_port *sport = container_of(wwn, struct srpt_port, port_wwn);
+
+       pr_debug("drop_tport(%s\n", config_item_name(&sport->port_wwn.wwn_group.cg_item));
+}
+
+static ssize_t srpt_wwn_show_attr_version(struct target_fabric_configfs *tf,
+                                             char *buf)
+{
+       return scnprintf(buf, PAGE_SIZE, "%s\n", DRV_VERSION);
+}
+
+TF_WWN_ATTR_RO(srpt, version);
+
+static struct configfs_attribute *srpt_wwn_attrs[] = {
+       &srpt_wwn_version.attr,
+       NULL,
+};
+
+static struct target_core_fabric_ops srpt_template = {
+       .get_fabric_name                = srpt_get_fabric_name,
+       .get_fabric_proto_ident         = srpt_get_fabric_proto_ident,
+       .tpg_get_wwn                    = srpt_get_fabric_wwn,
+       .tpg_get_tag                    = srpt_get_tag,
+       .tpg_get_default_depth          = srpt_get_default_depth,
+       .tpg_get_pr_transport_id        = srpt_get_pr_transport_id,
+       .tpg_get_pr_transport_id_len    = srpt_get_pr_transport_id_len,
+       .tpg_parse_pr_out_transport_id  = srpt_parse_pr_out_transport_id,
+       .tpg_check_demo_mode            = srpt_check_false,
+       .tpg_check_demo_mode_cache      = srpt_check_true,
+       .tpg_check_demo_mode_write_protect = srpt_check_true,
+       .tpg_check_prod_mode_write_protect = srpt_check_false,
+       .tpg_alloc_fabric_acl           = srpt_alloc_fabric_acl,
+       .tpg_release_fabric_acl         = srpt_release_fabric_acl,
+       .tpg_get_inst_index             = srpt_tpg_get_inst_index,
+       .release_cmd                    = srpt_release_cmd,
+       .check_stop_free                = srpt_check_stop_free,
+       .shutdown_session               = srpt_shutdown_session,
+       .close_session                  = srpt_close_session,
+       .stop_session                   = srpt_stop_session,
+       .fall_back_to_erl0              = srpt_reset_nexus,
+       .sess_logged_in                 = srpt_sess_logged_in,
+       .sess_get_index                 = srpt_sess_get_index,
+       .sess_get_initiator_sid         = NULL,
+       .write_pending                  = srpt_write_pending,
+       .write_pending_status           = srpt_write_pending_status,
+       .set_default_node_attributes    = srpt_set_default_node_attrs,
+       .get_task_tag                   = srpt_get_task_tag,
+       .get_cmd_state                  = srpt_get_tcm_cmd_state,
+       .queue_data_in                  = srpt_queue_response,
+       .queue_status                   = srpt_queue_status,
+       .queue_tm_rsp                   = srpt_queue_response,
+       .get_fabric_sense_len           = srpt_get_fabric_sense_len,
+       .set_fabric_sense_len           = srpt_set_fabric_sense_len,
+       .is_state_remove                = srpt_is_state_remove,
+       /*
+        * Setup function pointers for generic logic in
+        * target_core_fabric_configfs.c
+        */
+       .fabric_make_wwn                = srpt_make_tport,
+       .fabric_drop_wwn                = srpt_drop_tport,
+       .fabric_make_tpg                = srpt_make_tpg,
+       .fabric_drop_tpg                = srpt_drop_tpg,
+       .fabric_post_link               = NULL,
+       .fabric_pre_unlink              = NULL,
+       .fabric_make_np                 = NULL,
+       .fabric_drop_np                 = NULL,
+       .fabric_make_nodeacl            = srpt_make_nodeacl,
+       .fabric_drop_nodeacl            = srpt_drop_nodeacl,
+};
+
+/**
+ * srpt_init_module() - Kernel module initialization.
+ *
+ * Note: Since ib_register_client() registers callback functions, and since at
+ * least one of these callback functions (srpt_add_one()) calls target core
+ * functions, this driver must be registered with the target core before
+ * ib_register_client() is called.
+ */
+static int __init srpt_init_module(void)
+{
+       int ret;
+
+       ret = -EINVAL;
+       if (srp_max_req_size < MIN_MAX_REQ_SIZE) {
+               printk(KERN_ERR "invalid value %d for kernel module parameter"
+                      " srp_max_req_size -- must be at least %d.\n",
+                      srp_max_req_size, MIN_MAX_REQ_SIZE);
+               goto out;
+       }
+
+       if (srpt_srq_size < MIN_SRPT_SRQ_SIZE
+           || srpt_srq_size > MAX_SRPT_SRQ_SIZE) {
+               printk(KERN_ERR "invalid value %d for kernel module parameter"
+                      " srpt_srq_size -- must be in the range [%d..%d].\n",
+                      srpt_srq_size, MIN_SRPT_SRQ_SIZE, MAX_SRPT_SRQ_SIZE);
+               goto out;
+       }
+
+       srpt_target = target_fabric_configfs_init(THIS_MODULE, "srpt");
+       if (IS_ERR(srpt_target)) {
+               printk(KERN_ERR "couldn't register\n");
+               ret = PTR_ERR(srpt_target);
+               goto out;
+       }
+
+       srpt_target->tf_ops = srpt_template;
+
+       /* Enable SG chaining */
+       srpt_target->tf_ops.task_sg_chaining = true;
+
+       /*
+        * Set up default attribute lists.
+        */
+       srpt_target->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = srpt_wwn_attrs;
+       srpt_target->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs = srpt_tpg_attrs;
+       srpt_target->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = srpt_tpg_attrib_attrs;
+       srpt_target->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
+       srpt_target->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
+       srpt_target->tf_cit_tmpl.tfc_tpg_nacl_base_cit.ct_attrs = NULL;
+       srpt_target->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
+       srpt_target->tf_cit_tmpl.tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
+       srpt_target->tf_cit_tmpl.tfc_tpg_nacl_param_cit.ct_attrs = NULL;
+
+       ret = target_fabric_configfs_register(srpt_target);
+       if (ret < 0) {
+               printk(KERN_ERR "couldn't register\n");
+               goto out_free_target;
+       }
+
+       ret = ib_register_client(&srpt_client);
+       if (ret) {
+               printk(KERN_ERR "couldn't register IB client\n");
+               goto out_unregister_target;
+       }
+
+       return 0;
+
+out_unregister_target:
+       target_fabric_configfs_deregister(srpt_target);
+       srpt_target = NULL;
+out_free_target:
+       if (srpt_target)
+               target_fabric_configfs_free(srpt_target);
+out:
+       return ret;
+}
+
+static void __exit srpt_cleanup_module(void)
+{
+       ib_unregister_client(&srpt_client);
+       target_fabric_configfs_deregister(srpt_target);
+       srpt_target = NULL;
+}
+
+module_init(srpt_init_module);
+module_exit(srpt_cleanup_module);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.h b/drivers/infiniband/ulp/srpt/ib_srpt.h
new file mode 100644 (file)
index 0000000..61e52b8
--- /dev/null
@@ -0,0 +1,443 @@
+/*
+ * Copyright (c) 2006 - 2009 Mellanox Technology Inc.  All rights reserved.
+ * Copyright (C) 2009 - 2010 Bart Van Assche <bvanassche@acm.org>.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef IB_SRPT_H
+#define IB_SRPT_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+
+#include <rdma/ib_verbs.h>
+#include <rdma/ib_sa.h>
+#include <rdma/ib_cm.h>
+
+#include <scsi/srp.h>
+
+#include "ib_dm_mad.h"
+
+/*
+ * The prefix the ServiceName field must start with in the device management
+ * ServiceEntries attribute pair. See also the SRP specification.
+ */
+#define SRP_SERVICE_NAME_PREFIX                "SRP.T10:"
+
+enum {
+       /*
+        * SRP IOControllerProfile attributes for SRP target ports that have
+        * not been defined in <scsi/srp.h>. Source: section B.7, table B.7
+        * in the SRP specification.
+        */
+       SRP_PROTOCOL = 0x0108,
+       SRP_PROTOCOL_VERSION = 0x0001,
+       SRP_IO_SUBCLASS = 0x609e,
+       SRP_SEND_TO_IOC = 0x01,
+       SRP_SEND_FROM_IOC = 0x02,
+       SRP_RDMA_READ_FROM_IOC = 0x08,
+       SRP_RDMA_WRITE_FROM_IOC = 0x20,
+
+       /*
+        * srp_login_cmd.req_flags bitmasks. See also table 9 in the SRP
+        * specification.
+        */
+       SRP_MTCH_ACTION = 0x03, /* MULTI-CHANNEL ACTION */
+       SRP_LOSOLNT = 0x10, /* logout solicited notification */
+       SRP_CRSOLNT = 0x20, /* credit request solicited notification */
+       SRP_AESOLNT = 0x40, /* asynchronous event solicited notification */
+
+       /*
+        * srp_cmd.sol_nt / srp_tsk_mgmt.sol_not bitmasks. See also tables
+        * 18 and 20 in the SRP specification.
+        */
+       SRP_SCSOLNT = 0x02, /* SCSOLNT = successful solicited notification */
+       SRP_UCSOLNT = 0x04, /* UCSOLNT = unsuccessful solicited notification */
+
+       /*
+        * srp_rsp.sol_not / srp_t_logout.sol_not bitmasks. See also tables
+        * 16 and 22 in the SRP specification.
+        */
+       SRP_SOLNT = 0x01, /* SOLNT = solicited notification */
+
+       /* See also table 24 in the SRP specification. */
+       SRP_TSK_MGMT_SUCCESS = 0x00,
+       SRP_TSK_MGMT_FUNC_NOT_SUPP = 0x04,
+       SRP_TSK_MGMT_FAILED = 0x05,
+
+       /* See also table 21 in the SRP specification. */
+       SRP_CMD_SIMPLE_Q = 0x0,
+       SRP_CMD_HEAD_OF_Q = 0x1,
+       SRP_CMD_ORDERED_Q = 0x2,
+       SRP_CMD_ACA = 0x4,
+
+       SRP_LOGIN_RSP_MULTICHAN_NO_CHAN = 0x0,
+       SRP_LOGIN_RSP_MULTICHAN_TERMINATED = 0x1,
+       SRP_LOGIN_RSP_MULTICHAN_MAINTAINED = 0x2,
+
+       SRPT_DEF_SG_TABLESIZE = 128,
+       SRPT_DEF_SG_PER_WQE = 16,
+
+       MIN_SRPT_SQ_SIZE = 16,
+       DEF_SRPT_SQ_SIZE = 4096,
+       SRPT_RQ_SIZE = 128,
+       MIN_SRPT_SRQ_SIZE = 4,
+       DEFAULT_SRPT_SRQ_SIZE = 4095,
+       MAX_SRPT_SRQ_SIZE = 65535,
+       MAX_SRPT_RDMA_SIZE = 1U << 24,
+       MAX_SRPT_RSP_SIZE = 1024,
+
+       MIN_MAX_REQ_SIZE = 996,
+       DEFAULT_MAX_REQ_SIZE
+               = sizeof(struct srp_cmd)/*48*/
+               + sizeof(struct srp_indirect_buf)/*20*/
+               + 128 * sizeof(struct srp_direct_buf)/*16*/,
+
+       MIN_MAX_RSP_SIZE = sizeof(struct srp_rsp)/*36*/ + 4,
+       DEFAULT_MAX_RSP_SIZE = 256, /* leaves 220 bytes for sense data */
+
+       DEFAULT_MAX_RDMA_SIZE = 65536,
+};
+
+enum srpt_opcode {
+       SRPT_RECV,
+       SRPT_SEND,
+       SRPT_RDMA_MID,
+       SRPT_RDMA_ABORT,
+       SRPT_RDMA_READ_LAST,
+       SRPT_RDMA_WRITE_LAST,
+};
+
+static inline u64 encode_wr_id(u8 opcode, u32 idx)
+{
+       return ((u64)opcode << 32) | idx;
+}
+static inline enum srpt_opcode opcode_from_wr_id(u64 wr_id)
+{
+       return wr_id >> 32;
+}
+static inline u32 idx_from_wr_id(u64 wr_id)
+{
+       return (u32)wr_id;
+}
+
+struct rdma_iu {
+       u64             raddr;
+       u32             rkey;
+       struct ib_sge   *sge;
+       u32             sge_cnt;
+       int             mem_id;
+};
+
+/**
+ * enum srpt_command_state - SCSI command state managed by SRPT.
+ * @SRPT_STATE_NEW:           New command arrived and is being processed.
+ * @SRPT_STATE_NEED_DATA:     Processing a write or bidir command and waiting
+ *                            for data arrival.
+ * @SRPT_STATE_DATA_IN:       Data for the write or bidir command arrived and is
+ *                            being processed.
+ * @SRPT_STATE_CMD_RSP_SENT:  SRP_RSP for SRP_CMD has been sent.
+ * @SRPT_STATE_MGMT:          Processing a SCSI task management command.
+ * @SRPT_STATE_MGMT_RSP_SENT: SRP_RSP for SRP_TSK_MGMT has been sent.
+ * @SRPT_STATE_DONE:          Command processing finished successfully, command
+ *                            processing has been aborted or command processing
+ *                            failed.
+ */
+enum srpt_command_state {
+       SRPT_STATE_NEW           = 0,
+       SRPT_STATE_NEED_DATA     = 1,
+       SRPT_STATE_DATA_IN       = 2,
+       SRPT_STATE_CMD_RSP_SENT  = 3,
+       SRPT_STATE_MGMT          = 4,
+       SRPT_STATE_MGMT_RSP_SENT = 5,
+       SRPT_STATE_DONE          = 6,
+};
+
+/**
+ * struct srpt_ioctx - Shared SRPT I/O context information.
+ * @buf:   Pointer to the buffer.
+ * @dma:   DMA address of the buffer.
+ * @index: Index of the I/O context in its ioctx_ring array.
+ */
+struct srpt_ioctx {
+       void                    *buf;
+       dma_addr_t              dma;
+       uint32_t                index;
+};
+
+/**
+ * struct srpt_recv_ioctx - SRPT receive I/O context.
+ * @ioctx:     See above.
+ * @wait_list: Node for insertion in srpt_rdma_ch.cmd_wait_list.
+ */
+struct srpt_recv_ioctx {
+       struct srpt_ioctx       ioctx;
+       struct list_head        wait_list;
+};
+
+/**
+ * struct srpt_send_ioctx - SRPT send I/O context.
+ * @ioctx:       See above.
+ * @ch:          Channel pointer.
+ * @free_list:   Node in srpt_rdma_ch.free_list.
+ * @n_rbuf:      Number of data buffers in the received SRP command.
+ * @rbufs:       Pointer to SRP data buffer array.
+ * @single_rbuf: SRP data buffer if the command has only a single buffer.
+ * @sg:          Pointer to sg-list associated with this I/O context.
+ * @sg_cnt:      SG-list size.
+ * @mapped_sg_count: ib_dma_map_sg() return value.
+ * @n_rdma_ius:  Number of elements in the rdma_ius array.
+ * @rdma_ius:    Array with information about the RDMA mapping.
+ * @tag:         Tag of the received SRP information unit.
+ * @spinlock:    Protects 'state'.
+ * @state:       I/O context state.
+ * @rdma_aborted: If initiating a multipart RDMA transfer failed, whether
+ *              the already initiated transfers have finished.
+ * @cmd:         Target core command data structure.
+ * @sense_data:  SCSI sense data.
+ */
+struct srpt_send_ioctx {
+       struct srpt_ioctx       ioctx;
+       struct srpt_rdma_ch     *ch;
+       struct kref              kref;
+       struct rdma_iu          *rdma_ius;
+       struct srp_direct_buf   *rbufs;
+       struct srp_direct_buf   single_rbuf;
+       struct scatterlist      *sg;
+       struct list_head        free_list;
+       spinlock_t              spinlock;
+       enum srpt_command_state state;
+       bool                    rdma_aborted;
+       struct se_cmd           cmd;
+       struct completion       tx_done;
+       u64                     tag;
+       int                     sg_cnt;
+       int                     mapped_sg_count;
+       u16                     n_rdma_ius;
+       u8                      n_rdma;
+       u8                      n_rbuf;
+       bool                    queue_status_only;
+       u8                      sense_data[SCSI_SENSE_BUFFERSIZE];
+};
+
+/**
+ * enum rdma_ch_state - SRP channel state.
+ * @CH_CONNECTING:      QP is in RTR state; waiting for RTU.
+ * @CH_LIVE:            QP is in RTS state.
+ * @CH_DISCONNECTING:    DREQ has been received; waiting for DREP
+ *                       or DREQ has been send and waiting for DREP
+ *                       or .
+ * @CH_DRAINING:        QP is in ERR state; waiting for last WQE event.
+ * @CH_RELEASING:       Last WQE event has been received; releasing resources.
+ */
+enum rdma_ch_state {
+       CH_CONNECTING,
+       CH_LIVE,
+       CH_DISCONNECTING,
+       CH_DRAINING,
+       CH_RELEASING
+};
+
+/**
+ * struct srpt_rdma_ch - RDMA channel.
+ * @wait_queue:    Allows the kernel thread to wait for more work.
+ * @thread:        Kernel thread that processes the IB queues associated with
+ *                 the channel.
+ * @cm_id:         IB CM ID associated with the channel.
+ * @qp:            IB queue pair used for communicating over this channel.
+ * @cq:            IB completion queue for this channel.
+ * @rq_size:       IB receive queue size.
+ * @rsp_size      IB response message size in bytes.
+ * @sq_wr_avail:   number of work requests available in the send queue.
+ * @sport:         pointer to the information of the HCA port used by this
+ *                 channel.
+ * @i_port_id:     128-bit initiator port identifier copied from SRP_LOGIN_REQ.
+ * @t_port_id:     128-bit target port identifier copied from SRP_LOGIN_REQ.
+ * @max_ti_iu_len: maximum target-to-initiator information unit length.
+ * @req_lim:       request limit: maximum number of requests that may be sent
+ *                 by the initiator without having received a response.
+ * @req_lim_delta: Number of credits not yet sent back to the initiator.
+ * @spinlock:      Protects free_list and state.
+ * @free_list:     Head of list with free send I/O contexts.
+ * @state:         channel state. See also enum rdma_ch_state.
+ * @ioctx_ring:    Send ring.
+ * @wc:            IB work completion array for srpt_process_completion().
+ * @list:          Node for insertion in the srpt_device.rch_list list.
+ * @cmd_wait_list: List of SCSI commands that arrived before the RTU event. This
+ *                 list contains struct srpt_ioctx elements and is protected
+ *                 against concurrent modification by the cm_id spinlock.
+ * @sess:          Session information associated with this SRP channel.
+ * @sess_name:     Session name.
+ * @release_work:  Allows scheduling of srpt_release_channel().
+ * @release_done:  Enables waiting for srpt_release_channel() completion.
+ */
+struct srpt_rdma_ch {
+       wait_queue_head_t       wait_queue;
+       struct task_struct      *thread;
+       struct ib_cm_id         *cm_id;
+       struct ib_qp            *qp;
+       struct ib_cq            *cq;
+       int                     rq_size;
+       u32                     rsp_size;
+       atomic_t                sq_wr_avail;
+       struct srpt_port        *sport;
+       u8                      i_port_id[16];
+       u8                      t_port_id[16];
+       int                     max_ti_iu_len;
+       atomic_t                req_lim;
+       atomic_t                req_lim_delta;
+       spinlock_t              spinlock;
+       struct list_head        free_list;
+       enum rdma_ch_state      state;
+       struct srpt_send_ioctx  **ioctx_ring;
+       struct ib_wc            wc[16];
+       struct list_head        list;
+       struct list_head        cmd_wait_list;
+       struct se_session       *sess;
+       u8                      sess_name[36];
+       struct work_struct      release_work;
+       struct completion       *release_done;
+};
+
+/**
+ * struct srpt_port_attib - Attributes for SRPT port
+ * @srp_max_rdma_size: Maximum size of SRP RDMA transfers for new connections.
+ * @srp_max_rsp_size: Maximum size of SRP response messages in bytes.
+ * @srp_sq_size: Shared receive queue (SRQ) size.
+ */
+struct srpt_port_attrib {
+       u32                     srp_max_rdma_size;
+       u32                     srp_max_rsp_size;
+       u32                     srp_sq_size;
+};
+
+/**
+ * struct srpt_port - Information associated by SRPT with a single IB port.
+ * @sdev:      backpointer to the HCA information.
+ * @mad_agent: per-port management datagram processing information.
+ * @enabled:   Whether or not this target port is enabled.
+ * @port_guid: ASCII representation of Port GUID
+ * @port:      one-based port number.
+ * @sm_lid:    cached value of the port's sm_lid.
+ * @lid:       cached value of the port's lid.
+ * @gid:       cached value of the port's gid.
+ * @port_acl_lock spinlock for port_acl_list:
+ * @work:      work structure for refreshing the aforementioned cached values.
+ * @port_tpg_1 Target portal group = 1 data.
+ * @port_wwn:  Target core WWN data.
+ * @port_acl_list: Head of the list with all node ACLs for this port.
+ */
+struct srpt_port {
+       struct srpt_device      *sdev;
+       struct ib_mad_agent     *mad_agent;
+       bool                    enabled;
+       u8                      port_guid[64];
+       u8                      port;
+       u16                     sm_lid;
+       u16                     lid;
+       union ib_gid            gid;
+       spinlock_t              port_acl_lock;
+       struct work_struct      work;
+       struct se_portal_group  port_tpg_1;
+       struct se_wwn           port_wwn;
+       struct list_head        port_acl_list;
+       struct srpt_port_attrib port_attrib;
+};
+
+/**
+ * struct srpt_device - Information associated by SRPT with a single HCA.
+ * @device:        Backpointer to the struct ib_device managed by the IB core.
+ * @pd:            IB protection domain.
+ * @mr:            L_Key (local key) with write access to all local memory.
+ * @srq:           Per-HCA SRQ (shared receive queue).
+ * @cm_id:         Connection identifier.
+ * @dev_attr:      Attributes of the InfiniBand device as obtained during the
+ *                 ib_client.add() callback.
+ * @srq_size:      SRQ size.
+ * @ioctx_ring:    Per-HCA SRQ.
+ * @rch_list:      Per-device channel list -- see also srpt_rdma_ch.list.
+ * @ch_releaseQ:   Enables waiting for removal from rch_list.
+ * @spinlock:      Protects rch_list and tpg.
+ * @port:          Information about the ports owned by this HCA.
+ * @event_handler: Per-HCA asynchronous IB event handler.
+ * @list:          Node in srpt_dev_list.
+ */
+struct srpt_device {
+       struct ib_device        *device;
+       struct ib_pd            *pd;
+       struct ib_mr            *mr;
+       struct ib_srq           *srq;
+       struct ib_cm_id         *cm_id;
+       struct ib_device_attr   dev_attr;
+       int                     srq_size;
+       struct srpt_recv_ioctx  **ioctx_ring;
+       struct list_head        rch_list;
+       wait_queue_head_t       ch_releaseQ;
+       spinlock_t              spinlock;
+       struct srpt_port        port[2];
+       struct ib_event_handler event_handler;
+       struct list_head        list;
+};
+
+/**
+ * struct srpt_node_acl - Per-initiator ACL data (managed via configfs).
+ * @i_port_id: 128-bit SRP initiator port ID.
+ * @sport:     port information.
+ * @nacl:      Target core node ACL information.
+ * @list:      Element of the per-HCA ACL list.
+ */
+struct srpt_node_acl {
+       u8                      i_port_id[16];
+       struct srpt_port        *sport;
+       struct se_node_acl      nacl;
+       struct list_head        list;
+};
+
+/*
+ * SRP-releated SCSI persistent reservation definitions.
+ *
+ * See also SPC4r28, section 7.6.1 (Protocol specific parameters introduction).
+ * See also SPC4r28, section 7.6.4.5 (TransportID for initiator ports using
+ * SCSI over an RDMA interface).
+ */
+
+enum {
+       SCSI_TRANSPORTID_PROTOCOLID_SRP = 4,
+};
+
+struct spc_rdma_transport_id {
+       uint8_t protocol_identifier;
+       uint8_t reserved[7];
+       uint8_t i_port_id[16];
+};
+
+#endif                         /* IB_SRPT_H */
index 76457d50bc3493e351c40fbe5b667d1b2e33ff6a..afc166fcc3d9c41ad6654bae17d5c893382c3705 100644 (file)
@@ -386,7 +386,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
        struct evdev_client *client = file->private_data;
        struct evdev *evdev = client->evdev;
        struct input_event event;
-       int retval;
+       int retval = 0;
 
        if (count < input_event_size())
                return -EINVAL;
index a588578037ebe7bb05481c221839a54287e6910b..67bec14e8b963de66d6ad7f5df966ff1343c0be9 100644 (file)
@@ -34,7 +34,6 @@
 #include <linux/i2c/twl.h>
 #include <linux/slab.h>
 
-
 /*
  * The TWL4030 family chips include a keypad controller that supports
  * up to an 8x8 switch matrix.  The controller can issue system wakeup
@@ -302,7 +301,7 @@ static int __devinit twl4030_kp_program(struct twl4030_keypad *kp)
        if (twl4030_kpwrite_u8(kp, i, KEYP_DEB) < 0)
                return -EIO;
 
-       /* Set timeout period to 100 ms */
+       /* Set timeout period to 200 ms */
        i = KEYP_PERIOD_US(200000, PTV_PRESCALER);
        if (twl4030_kpwrite_u8(kp, (i & 0xFF), KEYP_TIMEOUT_L) < 0)
                return -EIO;
@@ -466,4 +465,3 @@ MODULE_AUTHOR("Texas Instruments");
 MODULE_DESCRIPTION("TWL4030 Keypad Driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:twl4030_keypad");
-
index b4cfc6c8be89db327134dd7dd722dfa2623d472a..5ec774d6c82b4be13f06c97efffe7cc42860ebab 100644 (file)
@@ -512,6 +512,13 @@ static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"),
                },
        },
+       {
+               /* Lenovo Ideapad U455 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "20046"),
+               },
+       },
        { }
 };
 
index 8250299fd64ff37d78d97910a702f00710157532..4494233d331ac4689eef8ca58366c7138904627d 100644 (file)
@@ -164,7 +164,8 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer,
        struct serio_raw_client *client = file->private_data;
        struct serio_raw *serio_raw = client->serio_raw;
        char uninitialized_var(c);
-       ssize_t retval = 0;
+       ssize_t read = 0;
+       int retval;
 
        if (serio_raw->dead)
                return -ENODEV;
@@ -180,13 +181,15 @@ static ssize_t serio_raw_read(struct file *file, char __user *buffer,
        if (serio_raw->dead)
                return -ENODEV;
 
-       while (retval < count && serio_raw_fetch_byte(serio_raw, &c)) {
-               if (put_user(c, buffer++))
-                       return -EFAULT;
-               retval++;
+       while (read < count && serio_raw_fetch_byte(serio_raw, &c)) {
+               if (put_user(c, buffer++)) {
+                       retval = -EFAULT;
+                       break;
+               }
+               read++;
        }
 
-       return retval;
+       return read ?: retval;
 }
 
 static ssize_t serio_raw_write(struct file *file, const char __user *buffer,
index cce1f03b8895324d7d6e92cd6094cc39e7673cbe..f75e0608be5bb298d40443462bc31316bc8165df 100644 (file)
@@ -2863,6 +2863,9 @@ static unsigned device_dma_ops_init(void)
 
        for_each_pci_dev(pdev) {
                if (!check_device(&pdev->dev)) {
+
+                       iommu_ignore_device(&pdev->dev);
+
                        unhandled += 1;
                        continue;
                }
index 08a90b88e40d80feb9d01185e534a1351cf7473d..cee307e866060ca30aa2627eaaf3f0cb7789802e 100644 (file)
@@ -482,23 +482,19 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
 
        priv = domain->priv;
 
-       if (!priv) {
-               ret = -ENODEV;
+       if (!priv)
                goto fail;
-       }
 
        fl_table = priv->pgtable;
 
        if (len != SZ_16M && len != SZ_1M &&
            len != SZ_64K && len != SZ_4K) {
                pr_debug("Bad length: %d\n", len);
-               ret = -EINVAL;
                goto fail;
        }
 
        if (!fl_table) {
                pr_debug("Null page table\n");
-               ret = -EINVAL;
                goto fail;
        }
 
@@ -507,7 +503,6 @@ static size_t msm_iommu_unmap(struct iommu_domain *domain, unsigned long va,
 
        if (*fl_pte == 0) {
                pr_debug("First level PTE is 0\n");
-               ret = -ENODEV;
                goto fail;
        }
 
index c957c344233f4ed283df710222492f1ee38d9bf0..9ca28fced2b9d4d17facfbe03922a8079d831615 100644 (file)
@@ -403,6 +403,13 @@ config LEDS_MAX8997
          This option enables support for on-chip LED drivers on
          MAXIM MAX8997 PMIC.
 
+config LEDS_OT200
+       tristate "LED support for the Bachmann OT200"
+       depends on LEDS_CLASS && HAS_IOMEM
+       help
+         This option enables support for the LEDs on the Bachmann OT200.
+         Say Y to enable LEDs on the Bachmann OT200.
+
 config LEDS_TRIGGERS
        bool "LED Trigger support"
        depends on LEDS_CLASS
index b8a9723477f0819b2fb07e2c19a61847888945e6..1fc6875a8b201d02a1d59122548948955829de1d 100644 (file)
@@ -28,6 +28,7 @@ obj-$(CONFIG_LEDS_LP5523)             += leds-lp5523.o
 obj-$(CONFIG_LEDS_TCA6507)             += leds-tca6507.o
 obj-$(CONFIG_LEDS_CLEVO_MAIL)          += leds-clevo-mail.o
 obj-$(CONFIG_LEDS_HP6XX)               += leds-hp6xx.o
+obj-$(CONFIG_LEDS_OT200)               += leds-ot200.o
 obj-$(CONFIG_LEDS_FSG)                 += leds-fsg.o
 obj-$(CONFIG_LEDS_PCA955X)             += leds-pca955x.o
 obj-$(CONFIG_LEDS_DA903X)              += leds-da903x.o
index 45e6878d73741d8359db2069b68bbe1df2589f2e..e59c166a0ce2261dd2515930ff297c1b57b5d25a 100644 (file)
@@ -164,8 +164,8 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
 
        if (drvdata->mode == LM3530_BL_MODE_ALS) {
                if (pltfm->als_vmax == 0) {
-                       pltfm->als_vmin = als_vmin = 0;
-                       pltfm->als_vmin = als_vmax = LM3530_ALS_WINDOW_mV;
+                       pltfm->als_vmin = 0;
+                       pltfm->als_vmax = LM3530_ALS_WINDOW_mV;
                }
 
                als_vmin = pltfm->als_vmin;
diff --git a/drivers/leds/leds-ot200.c b/drivers/leds/leds-ot200.c
new file mode 100644 (file)
index 0000000..c464682
--- /dev/null
@@ -0,0 +1,171 @@
+/*
+ * Bachmann ot200 leds driver.
+ *
+ * Author: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+ *         Christian Gmeiner <christian.gmeiner@gmail.com>
+ *
+ * License: GPL as published by the FSF.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/leds.h>
+#include <linux/io.h>
+#include <linux/module.h>
+
+
+struct ot200_led {
+       struct led_classdev cdev;
+       const char *name;
+       unsigned long port;
+       u8 mask;
+};
+
+/*
+ * The device has three leds on the back panel (led_err, led_init and led_run)
+ * and can handle up to seven leds on the front panel.
+ */
+
+static struct ot200_led leds[] = {
+       {
+               .name = "led_run",
+               .port = 0x5a,
+               .mask = BIT(0),
+       },
+       {
+               .name = "led_init",
+               .port = 0x5a,
+               .mask = BIT(1),
+       },
+       {
+               .name = "led_err",
+               .port = 0x5a,
+               .mask = BIT(2),
+       },
+       {
+               .name = "led_1",
+               .port = 0x49,
+               .mask = BIT(7),
+       },
+       {
+               .name = "led_2",
+               .port = 0x49,
+               .mask = BIT(6),
+       },
+       {
+               .name = "led_3",
+               .port = 0x49,
+               .mask = BIT(5),
+       },
+       {
+               .name = "led_4",
+               .port = 0x49,
+               .mask = BIT(4),
+       },
+       {
+               .name = "led_5",
+               .port = 0x49,
+               .mask = BIT(3),
+       },
+       {
+               .name = "led_6",
+               .port = 0x49,
+               .mask = BIT(2),
+       },
+       {
+               .name = "led_7",
+               .port = 0x49,
+               .mask = BIT(1),
+       }
+};
+
+static DEFINE_SPINLOCK(value_lock);
+
+/*
+ * we need to store the current led states, as it is not
+ * possible to read the current led state via inb().
+ */
+static u8 leds_back;
+static u8 leds_front;
+
+static void ot200_led_brightness_set(struct led_classdev *led_cdev,
+               enum led_brightness value)
+{
+       struct ot200_led *led = container_of(led_cdev, struct ot200_led, cdev);
+       u8 *val;
+       unsigned long flags;
+
+       spin_lock_irqsave(&value_lock, flags);
+
+       if (led->port == 0x49)
+               val = &leds_front;
+       else if (led->port == 0x5a)
+               val = &leds_back;
+       else
+               BUG();
+
+       if (value == LED_OFF)
+               *val &= ~led->mask;
+       else
+               *val |= led->mask;
+
+       outb(*val, led->port);
+       spin_unlock_irqrestore(&value_lock, flags);
+}
+
+static int __devinit ot200_led_probe(struct platform_device *pdev)
+{
+       int i;
+       int ret;
+
+       for (i = 0; i < ARRAY_SIZE(leds); i++) {
+
+               leds[i].cdev.name = leds[i].name;
+               leds[i].cdev.brightness_set = ot200_led_brightness_set;
+
+               ret = led_classdev_register(&pdev->dev, &leds[i].cdev);
+               if (ret < 0)
+                       goto err;
+       }
+
+       leds_front = 0;         /* turn off all front leds */
+       leds_back = BIT(1);     /* turn on init led */
+       outb(leds_front, 0x49);
+       outb(leds_back, 0x5a);
+
+       return 0;
+
+err:
+       for (i = i - 1; i >= 0; i--)
+               led_classdev_unregister(&leds[i].cdev);
+
+       return ret;
+}
+
+static int __devexit ot200_led_remove(struct platform_device *pdev)
+{
+       int i;
+
+       for (i = 0; i < ARRAY_SIZE(leds); i++)
+               led_classdev_unregister(&leds[i].cdev);
+
+       return 0;
+}
+
+static struct platform_driver ot200_led_driver = {
+       .probe          = ot200_led_probe,
+       .remove         = __devexit_p(ot200_led_remove),
+       .driver         = {
+               .name   = "leds-ot200",
+               .owner  = THIS_MODULE,
+       },
+};
+
+module_platform_driver(ot200_led_driver);
+
+MODULE_AUTHOR("Sebastian A. Siewior <bigeasy@linutronix.de>");
+MODULE_DESCRIPTION("ot200 LED driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:leds-ot200");
index c2907d836e4e563ba50834e2524ae91919ddbd81..86cb7e5d83d545f1cca9706e689665780148c5db 100644 (file)
@@ -56,7 +56,8 @@ struct raid_dev {
 struct raid_set {
        struct dm_target *ti;
 
-       uint64_t print_flags;
+       uint32_t bitmap_loaded;
+       uint32_t print_flags;
 
        struct mddev md;
        struct raid_type *raid_type;
@@ -1085,7 +1086,7 @@ static int raid_status(struct dm_target *ti, status_type_t type,
                                raid_param_cnt += 2;
                }
 
-               raid_param_cnt += (hweight64(rs->print_flags & ~DMPF_REBUILD) * 2);
+               raid_param_cnt += (hweight32(rs->print_flags & ~DMPF_REBUILD) * 2);
                if (rs->print_flags & (DMPF_SYNC | DMPF_NOSYNC))
                        raid_param_cnt--;
 
@@ -1197,7 +1198,12 @@ static void raid_resume(struct dm_target *ti)
 {
        struct raid_set *rs = ti->private;
 
-       bitmap_load(&rs->md);
+       if (!rs->bitmap_loaded) {
+               bitmap_load(&rs->md);
+               rs->bitmap_loaded = 1;
+       } else
+               md_wakeup_thread(rs->md.thread);
+
        mddev_resume(&rs->md);
 }
 
index 9417ae2fa0bbc68b061d6b50ba23f5127dae5b2c..ce88755baf4a91a6216b628117e84a697c21d3c8 100644 (file)
@@ -7333,7 +7333,8 @@ void md_do_sync(struct mddev *mddev)
                                        printk(KERN_INFO
                                               "md: checkpointing %s of %s.\n",
                                               desc, mdname(mddev));
-                                       mddev->recovery_cp = mddev->curr_resync;
+                                       mddev->recovery_cp =
+                                               mddev->curr_resync_completed;
                                }
                        } else
                                mddev->recovery_cp = MaxSector;
@@ -7351,9 +7352,9 @@ void md_do_sync(struct mddev *mddev)
                        rcu_read_unlock();
                }
        }
+ skip:
        set_bit(MD_CHANGE_DEVS, &mddev->flags);
 
- skip:
        if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) {
                /* We completed so min/max setting can be forgotten if used. */
                if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
index 27555995f7e4c344f74afdfa14824ecbfac2b912..b5ee3ebfcfca03e2f9fd5973cd5d72b52d1c5f53 100644 (file)
 #include <linux/dvb/frontend.h>
 #include "dvb_frontend.h"
 
+/* Registers (Write-only) */
+#define XREG_INIT         0x00
+#define XREG_RF_FREQ      0x02
+#define XREG_POWER_DOWN   0x08
+
+/* Registers (Read-only) */
+#define XREG_FREQ_ERROR   0x01
+#define XREG_LOCK         0x02
+#define XREG_VERSION      0x04
+#define XREG_PRODUCT_ID   0x08
+#define XREG_HSYNC_FREQ   0x10
+#define XREG_FRAME_LINES  0x20
+#define XREG_SNR          0x40
+
+#define XREG_ADC_ENV      0x0100
 
 static int debug;
 module_param(debug, int, 0644);
@@ -885,7 +900,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
        mutex_lock(&priv->lock);
 
        /* Sync Lock Indicator */
-       rc = xc2028_get_reg(priv, 0x0002, &frq_lock);
+       rc = xc2028_get_reg(priv, XREG_LOCK, &frq_lock);
        if (rc < 0)
                goto ret;
 
@@ -894,7 +909,7 @@ static int xc2028_signal(struct dvb_frontend *fe, u16 *strength)
                signal = 1 << 11;
 
        /* Get SNR of the video signal */
-       rc = xc2028_get_reg(priv, 0x0040, &signal);
+       rc = xc2028_get_reg(priv, XREG_SNR, &signal);
        if (rc < 0)
                goto ret;
 
@@ -1019,9 +1034,9 @@ static int generic_set_freq(struct dvb_frontend *fe, u32 freq /* in HZ */,
 
        /* CMD= Set frequency */
        if (priv->firm_version < 0x0202)
-               rc = send_seq(priv, {0x00, 0x02, 0x00, 0x00});
+               rc = send_seq(priv, {0x00, XREG_RF_FREQ, 0x00, 0x00});
        else
-               rc = send_seq(priv, {0x80, 0x02, 0x00, 0x00});
+               rc = send_seq(priv, {0x80, XREG_RF_FREQ, 0x00, 0x00});
        if (rc < 0)
                goto ret;
 
@@ -1201,9 +1216,9 @@ static int xc2028_sleep(struct dvb_frontend *fe)
        mutex_lock(&priv->lock);
 
        if (priv->firm_version < 0x0202)
-               rc = send_seq(priv, {0x00, 0x08, 0x00, 0x00});
+               rc = send_seq(priv, {0x00, XREG_POWER_DOWN, 0x00, 0x00});
        else
-               rc = send_seq(priv, {0x80, 0x08, 0x00, 0x00});
+               rc = send_seq(priv, {0x80, XREG_POWER_DOWN, 0x00, 0x00});
 
        priv->cur_fw.type = 0;  /* need firmware reload */
 
index d218c1d68c33a622cdc791aa684e5645441965bf..68397110b7d932fec46c3cdd4ecc0b9d2e99ddf4 100644 (file)
@@ -154,6 +154,8 @@ struct xc4000_priv {
 #define XREG_SNR          0x06
 #define XREG_VERSION      0x07
 #define XREG_PRODUCT_ID   0x08
+#define XREG_SIGNAL_LEVEL 0x0A
+#define XREG_NOISE_LEVEL  0x0B
 
 /*
    Basic firmware description. This will remain with
@@ -486,6 +488,16 @@ static int xc_get_quality(struct xc4000_priv *priv, u16 *quality)
        return xc4000_readreg(priv, XREG_QUALITY, quality);
 }
 
+static int xc_get_signal_level(struct xc4000_priv *priv, u16 *signal)
+{
+       return xc4000_readreg(priv, XREG_SIGNAL_LEVEL, signal);
+}
+
+static int xc_get_noise_level(struct xc4000_priv *priv, u16 *noise)
+{
+       return xc4000_readreg(priv, XREG_NOISE_LEVEL, noise);
+}
+
 static u16 xc_wait_for_lock(struct xc4000_priv *priv)
 {
        u16     lock_state = 0;
@@ -1089,6 +1101,8 @@ static void xc_debug_dump(struct xc4000_priv *priv)
        u32     hsync_freq_hz = 0;
        u16     frame_lines;
        u16     quality;
+       u16     signal = 0;
+       u16     noise = 0;
        u8      hw_majorversion = 0, hw_minorversion = 0;
        u8      fw_majorversion = 0, fw_minorversion = 0;
 
@@ -1119,6 +1133,12 @@ static void xc_debug_dump(struct xc4000_priv *priv)
 
        xc_get_quality(priv, &quality);
        dprintk(1, "*** Quality (0:<8dB, 7:>56dB) = %d\n", quality);
+
+       xc_get_signal_level(priv, &signal);
+       dprintk(1, "*** Signal level = -%ddB (%d)\n", signal >> 8, signal);
+
+       xc_get_noise_level(priv, &noise);
+       dprintk(1, "*** Noise level = %ddB (%d)\n", noise >> 8, noise);
 }
 
 static int xc4000_set_params(struct dvb_frontend *fe)
@@ -1432,6 +1452,71 @@ fail:
        return ret;
 }
 
+static int xc4000_get_signal(struct dvb_frontend *fe, u16 *strength)
+{
+       struct xc4000_priv *priv = fe->tuner_priv;
+       u16 value = 0;
+       int rc;
+
+       mutex_lock(&priv->lock);
+       rc = xc4000_readreg(priv, XREG_SIGNAL_LEVEL, &value);
+       mutex_unlock(&priv->lock);
+
+       if (rc < 0)
+               goto ret;
+
+       /* Informations from real testing of DVB-T and radio part,
+          coeficient for one dB is 0xff.
+        */
+       tuner_dbg("Signal strength: -%ddB (%05d)\n", value >> 8, value);
+
+       /* all known digital modes */
+       if ((priv->video_standard == XC4000_DTV6) ||
+           (priv->video_standard == XC4000_DTV7) ||
+           (priv->video_standard == XC4000_DTV7_8) ||
+           (priv->video_standard == XC4000_DTV8))
+               goto digital;
+
+       /* Analog mode has NOISE LEVEL important, signal
+          depends only on gain of antenna and amplifiers,
+          but it doesn't tell anything about real quality
+          of reception.
+        */
+       mutex_lock(&priv->lock);
+       rc = xc4000_readreg(priv, XREG_NOISE_LEVEL, &value);
+       mutex_unlock(&priv->lock);
+
+       tuner_dbg("Noise level: %ddB (%05d)\n", value >> 8, value);
+
+       /* highest noise level: 32dB */
+       if (value >= 0x2000) {
+               value = 0;
+       } else {
+               value = ~value << 3;
+       }
+
+       goto ret;
+
+       /* Digital mode has SIGNAL LEVEL important and real
+          noise level is stored in demodulator registers.
+        */
+digital:
+       /* best signal: -50dB */
+       if (value <= 0x3200) {
+               value = 0xffff;
+       /* minimum: -114dB - should be 0x7200 but real zero is 0x713A */
+       } else if (value >= 0x713A) {
+               value = 0;
+       } else {
+               value = ~(value - 0x3200) << 2;
+       }
+
+ret:
+       *strength = value;
+
+       return rc;
+}
+
 static int xc4000_get_frequency(struct dvb_frontend *fe, u32 *freq)
 {
        struct xc4000_priv *priv = fe->tuner_priv;
@@ -1559,6 +1644,7 @@ static const struct dvb_tuner_ops xc4000_tuner_ops = {
        .set_params        = xc4000_set_params,
        .set_analog_params = xc4000_set_analog_params,
        .get_frequency     = xc4000_get_frequency,
+       .get_rf_strength   = xc4000_get_signal,
        .get_bandwidth     = xc4000_get_bandwidth,
        .get_status        = xc4000_get_status
 };
index b15db4fe347b9218ec8b9e348802f424abb5d72b..fbbe545a74cb5357c3c1cdf445511c80f32dd724 100644 (file)
@@ -904,8 +904,11 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
 {
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
        int i;
+       u32 delsys;
 
+       delsys = c->delivery_system;
        memset(c, 0, sizeof(struct dtv_frontend_properties));
+       c->delivery_system = delsys;
 
        c->state = DTV_CLEAR;
 
@@ -1009,25 +1012,6 @@ static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = {
        _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0),
        _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0),
 
-       _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 0, 0),
-       _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 0, 0),
-       _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 0, 0),
-       _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 0, 0),
-       _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 0, 0),
-       _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 0, 0),
-
        _DTV_CMD(DTV_ISDBS_TS_ID, 1, 0),
        _DTV_CMD(DTV_DVBT2_PLP_ID, 1, 0),
 
@@ -1413,6 +1397,15 @@ static int set_delivery_system(struct dvb_frontend *fe, u32 desired_system)
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
        enum dvbv3_emulation_type type;
 
+       /*
+        * It was reported that some old DVBv5 applications were
+        * filling delivery_system with SYS_UNDEFINED. If this happens,
+        * assume that the application wants to use the first supported
+        * delivery system.
+        */
+       if (c->delivery_system == SYS_UNDEFINED)
+               c->delivery_system = fe->ops.delsys[0];
+
        if (desired_system == SYS_UNDEFINED) {
                /*
                 * A DVBv3 call doesn't know what's the desired system.
@@ -1732,6 +1725,7 @@ static int dvb_frontend_ioctl_properties(struct file *file,
 {
        struct dvb_device *dvbdev = file->private_data;
        struct dvb_frontend *fe = dvbdev->priv;
+       struct dvb_frontend_private *fepriv = fe->frontend_priv;
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
        int err = 0;
 
@@ -1798,9 +1792,14 @@ static int dvb_frontend_ioctl_properties(struct file *file,
 
                /*
                 * Fills the cache out struct with the cache contents, plus
-                * the data retrieved from get_frontend.
+                * the data retrieved from get_frontend, if the frontend
+                * is not idle. Otherwise, returns the cached content
                 */
-               dtv_get_frontend(fe, NULL);
+               if (fepriv->state != FESTATE_IDLE) {
+                       err = dtv_get_frontend(fe, NULL);
+                       if (err < 0)
+                               goto out;
+               }
                for (i = 0; i < tvps->num; i++) {
                        err = dtv_property_process_get(fe, c, tvp + i, file);
                        if (err < 0)
index d66192974d68a0a2de44fd41f156f7b6c9c90a85..cf0c318d6989e11eda05286c7dd209e8e41fc673 100644 (file)
@@ -877,24 +877,17 @@ static int anysee_frontend_attach(struct dvb_usb_adapter *adap)
        case ANYSEE_HW_508T2C: /* 20 */
                /* E7 T2C */
 
+               if (state->fe_id)
+                       break;
+
                /* enable DVB-T/T2/C demod on IOE[5] */
                ret = anysee_wr_reg_mask(adap->dev, REG_IOE, (1 << 5), 0x20);
                if (ret)
                        goto error;
 
-               if (state->fe_id == 0)  {
-                       /* DVB-T/T2 */
-                       adap->fe_adap[state->fe_id].fe =
-                               dvb_attach(cxd2820r_attach,
-                               &anysee_cxd2820r_config,
-                               &adap->dev->i2c_adap, NULL);
-               } else {
-                       /* DVB-C */
-                       adap->fe_adap[state->fe_id].fe =
-                               dvb_attach(cxd2820r_attach,
-                               &anysee_cxd2820r_config,
-                               &adap->dev->i2c_adap, adap->fe_adap[0].fe);
-               }
+               /* attach demod */
+               adap->fe_adap[state->fe_id].fe = dvb_attach(cxd2820r_attach,
+                               &anysee_cxd2820r_config, &adap->dev->i2c_adap);
 
                state->has_ci = true;
 
@@ -1195,6 +1188,14 @@ static int anysee_ci_init(struct dvb_usb_device *d)
        if (ret)
                return ret;
 
+       ret = anysee_wr_reg_mask(d, REG_IOD, (0 << 2)|(0 << 1)|(0 << 0), 0x07);
+       if (ret)
+               return ret;
+
+       ret = anysee_wr_reg_mask(d, REG_IOD, (1 << 2)|(1 << 1)|(1 << 0), 0x07);
+       if (ret)
+               return ret;
+
        ret = dvb_ca_en50221_init(&d->adapter[0].dvb_adap, &state->ci, 0, 1);
        if (ret)
                return ret;
index 8a57ed8272dec10adfbbd989f81442ed6cc48ef9..1efc028a76c9c5bb937f6c8979d672ec68104596 100644 (file)
@@ -276,14 +276,15 @@ static int cinergyt2_fe_set_frontend(struct dvb_frontend *fe)
        param.flags = 0;
 
        switch (fep->bandwidth_hz) {
+       default:
        case 8000000:
-               param.bandwidth = 0;
+               param.bandwidth = 8;
                break;
        case 7000000:
-               param.bandwidth = 1;
+               param.bandwidth = 7;
                break;
        case 6000000:
-               param.bandwidth = 2;
+               param.bandwidth = 6;
                break;
        }
 
index 9bd6d51b3b938b21659dffe90f7ee68877ad6302..7de125c0b36f4576f951ea8e90a87d7c308f0857 100644 (file)
@@ -48,6 +48,8 @@ struct dib0700_state {
        u8 disable_streaming_master_mode;
        u32 fw_version;
        u32 nb_packet_buffer_size;
+       int (*read_status)(struct dvb_frontend *, fe_status_t *);
+       int (*sleep)(struct dvb_frontend* fe);
        u8 buf[255];
 };
 
index 206999476f028238c430797ba11b762b55dee379..070e82aa53f53248f33448d9e61d22b484e6b28a 100644 (file)
@@ -834,6 +834,7 @@ static struct usb_driver dib0700_driver = {
 
 module_usb_driver(dib0700_driver);
 
+MODULE_FIRMWARE("dvb-usb-dib0700-1.20.fw");
 MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>");
 MODULE_DESCRIPTION("Driver for devices based on DiBcom DiB0700 - USB bridge");
 MODULE_VERSION("1.0");
index 81ef4b46f790f936129b849e55878e8b8a9ff89e..f9e966aa26e75d19b0ce129221be038bd6787fe5 100644 (file)
@@ -3066,19 +3066,25 @@ static struct dib7000p_config stk7070pd_dib7000p_config[2] = {
        }
 };
 
-static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
+static void stk7070pd_init(struct dvb_usb_device *dev)
 {
-       dib0700_set_gpio(adap->dev, GPIO6, GPIO_OUT, 1);
+       dib0700_set_gpio(dev, GPIO6, GPIO_OUT, 1);
        msleep(10);
-       dib0700_set_gpio(adap->dev, GPIO9, GPIO_OUT, 1);
-       dib0700_set_gpio(adap->dev, GPIO4, GPIO_OUT, 1);
-       dib0700_set_gpio(adap->dev, GPIO7, GPIO_OUT, 1);
-       dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 0);
+       dib0700_set_gpio(dev, GPIO9, GPIO_OUT, 1);
+       dib0700_set_gpio(dev, GPIO4, GPIO_OUT, 1);
+       dib0700_set_gpio(dev, GPIO7, GPIO_OUT, 1);
+       dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 0);
 
-       dib0700_ctrl_clock(adap->dev, 72, 1);
+       dib0700_ctrl_clock(dev, 72, 1);
 
        msleep(10);
-       dib0700_set_gpio(adap->dev, GPIO10, GPIO_OUT, 1);
+       dib0700_set_gpio(dev, GPIO10, GPIO_OUT, 1);
+}
+
+static int stk7070pd_frontend_attach0(struct dvb_usb_adapter *adap)
+{
+       stk7070pd_init(adap->dev);
+
        msleep(10);
        dib0700_set_gpio(adap->dev, GPIO0, GPIO_OUT, 1);
 
@@ -3099,6 +3105,77 @@ static int stk7070pd_frontend_attach1(struct dvb_usb_adapter *adap)
        return adap->fe_adap[0].fe == NULL ? -ENODEV : 0;
 }
 
+static int novatd_read_status_override(struct dvb_frontend *fe,
+               fe_status_t *stat)
+{
+       struct dvb_usb_adapter *adap = fe->dvb->priv;
+       struct dvb_usb_device *dev = adap->dev;
+       struct dib0700_state *state = dev->priv;
+       int ret;
+
+       ret = state->read_status(fe, stat);
+
+       if (!ret)
+               dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT,
+                               !!(*stat & FE_HAS_LOCK));
+
+       return ret;
+}
+
+static int novatd_sleep_override(struct dvb_frontend* fe)
+{
+       struct dvb_usb_adapter *adap = fe->dvb->priv;
+       struct dvb_usb_device *dev = adap->dev;
+       struct dib0700_state *state = dev->priv;
+
+       /* turn off LED */
+       dib0700_set_gpio(dev, adap->id == 0 ? GPIO1 : GPIO0, GPIO_OUT, 0);
+
+       return state->sleep(fe);
+}
+
+/**
+ * novatd_frontend_attach - Nova-TD specific attach
+ *
+ * Nova-TD has GPIO0, 1 and 2 for LEDs. So do not fiddle with them except for
+ * information purposes.
+ */
+static int novatd_frontend_attach(struct dvb_usb_adapter *adap)
+{
+       struct dvb_usb_device *dev = adap->dev;
+       struct dib0700_state *st = dev->priv;
+
+       if (adap->id == 0) {
+               stk7070pd_init(dev);
+
+               /* turn the power LED on, the other two off (just in case) */
+               dib0700_set_gpio(dev, GPIO0, GPIO_OUT, 0);
+               dib0700_set_gpio(dev, GPIO1, GPIO_OUT, 0);
+               dib0700_set_gpio(dev, GPIO2, GPIO_OUT, 1);
+
+               if (dib7000p_i2c_enumeration(&dev->i2c_adap, 2, 18,
+                                            stk7070pd_dib7000p_config) != 0) {
+                       err("%s: dib7000p_i2c_enumeration failed.  Cannot continue\n",
+                           __func__);
+                       return -ENODEV;
+               }
+       }
+
+       adap->fe_adap[0].fe = dvb_attach(dib7000p_attach, &dev->i2c_adap,
+                       adap->id == 0 ? 0x80 : 0x82,
+                       &stk7070pd_dib7000p_config[adap->id]);
+
+       if (adap->fe_adap[0].fe == NULL)
+               return -ENODEV;
+
+       st->read_status = adap->fe_adap[0].fe->ops.read_status;
+       adap->fe_adap[0].fe->ops.read_status = novatd_read_status_override;
+       st->sleep = adap->fe_adap[0].fe->ops.sleep;
+       adap->fe_adap[0].fe->ops.sleep = novatd_sleep_override;
+
+       return 0;
+}
+
 /* S5H1411 */
 static struct s5h1411_config pinnacle_801e_config = {
        .output_mode   = S5H1411_PARALLEL_OUTPUT,
@@ -3861,6 +3938,57 @@ struct dvb_usb_device_properties dib0700_devices[] = {
                },
        }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
 
+               .num_adapters = 2,
+               .adapter = {
+                       {
+                       .num_frontends = 1,
+                       .fe = {{
+                               .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+                               .pid_filter_count = 32,
+                               .pid_filter       = stk70x0p_pid_filter,
+                               .pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
+                               .frontend_attach  = novatd_frontend_attach,
+                               .tuner_attach     = dib7070p_tuner_attach,
+
+                               DIB0700_DEFAULT_STREAMING_CONFIG(0x02),
+                       }},
+                               .size_of_priv     = sizeof(struct dib0700_adapter_state),
+                       }, {
+                       .num_frontends = 1,
+                       .fe = {{
+                               .caps = DVB_USB_ADAP_HAS_PID_FILTER | DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF,
+                               .pid_filter_count = 32,
+                               .pid_filter       = stk70x0p_pid_filter,
+                               .pid_filter_ctrl  = stk70x0p_pid_filter_ctrl,
+                               .frontend_attach  = novatd_frontend_attach,
+                               .tuner_attach     = dib7070p_tuner_attach,
+
+                               DIB0700_DEFAULT_STREAMING_CONFIG(0x03),
+                       }},
+                               .size_of_priv     = sizeof(struct dib0700_adapter_state),
+                       }
+               },
+
+               .num_device_descs = 1,
+               .devices = {
+                       {   "Hauppauge Nova-TD Stick (52009)",
+                               { &dib0700_usb_id_table[35], NULL },
+                               { NULL },
+                       },
+               },
+
+               .rc.core = {
+                       .rc_interval      = DEFAULT_RC_INTERVAL,
+                       .rc_codes         = RC_MAP_DIB0700_RC5_TABLE,
+                       .module_name      = "dib0700",
+                       .rc_query         = dib0700_rc_query_old_firmware,
+                       .allowed_protos   = RC_TYPE_RC5 |
+                                           RC_TYPE_RC6 |
+                                           RC_TYPE_NEC,
+                       .change_protocol = dib0700_change_protocol,
+               },
+       }, { DIB0700_DEFAULT_DEVICE_PROPERTIES,
+
                .num_adapters = 2,
                .adapter = {
                        {
@@ -3892,7 +4020,7 @@ struct dvb_usb_device_properties dib0700_devices[] = {
                        }
                },
 
-               .num_device_descs = 6,
+               .num_device_descs = 5,
                .devices = {
                        {   "DiBcom STK7070PD reference design",
                                { &dib0700_usb_id_table[17], NULL },
@@ -3902,10 +4030,6 @@ struct dvb_usb_device_properties dib0700_devices[] = {
                                { &dib0700_usb_id_table[18], NULL },
                                { NULL },
                        },
-                       {   "Hauppauge Nova-TD Stick (52009)",
-                               { &dib0700_usb_id_table[35], NULL },
-                               { NULL },
-                       },
                        {   "Hauppauge Nova-TD-500 (84xxx)",
                                { &dib0700_usb_id_table[36], NULL },
                                { NULL },
index cf0f546aa1d1bdf94f47666692c0313cf69d6643..5aa306ebb7ef93bd8f356e6bc98325735c88503c 100644 (file)
@@ -77,14 +77,12 @@ struct cxd2820r_config {
        (defined(CONFIG_DVB_CXD2820R_MODULE) && defined(MODULE))
 extern struct dvb_frontend *cxd2820r_attach(
        const struct cxd2820r_config *config,
-       struct i2c_adapter *i2c,
-       struct dvb_frontend *fe
+       struct i2c_adapter *i2c
 );
 #else
 static inline struct dvb_frontend *cxd2820r_attach(
        const struct cxd2820r_config *config,
-       struct i2c_adapter *i2c,
-       struct dvb_frontend *fe
+       struct i2c_adapter *i2c
 )
 {
        printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
index 93e1b12e79077ddc851ffd5fd85c8240c795eed7..5c7c2aaf9bf58583eb52ed6f3ba7b4d48df6d29b 100644 (file)
@@ -309,9 +309,14 @@ static int cxd2820r_read_status(struct dvb_frontend *fe, fe_status_t *status)
 
 static int cxd2820r_get_frontend(struct dvb_frontend *fe)
 {
+       struct cxd2820r_priv *priv = fe->demodulator_priv;
        int ret;
 
        dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
+
+       if (priv->delivery_system == SYS_UNDEFINED)
+               return 0;
+
        switch (fe->dtv_property_cache.delivery_system) {
        case SYS_DVBT:
                ret = cxd2820r_get_frontend_t(fe);
@@ -476,11 +481,20 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
        dbg("%s: delsys=%d", __func__, fe->dtv_property_cache.delivery_system);
 
        /* switch between DVB-T and DVB-T2 when tune fails */
-       if (priv->last_tune_failed && (priv->delivery_system != SYS_DVBC_ANNEX_A)) {
-               if (priv->delivery_system == SYS_DVBT)
+       if (priv->last_tune_failed) {
+               if (priv->delivery_system == SYS_DVBT) {
+                       ret = cxd2820r_sleep_t(fe);
+                       if (ret)
+                               goto error;
+
                        c->delivery_system = SYS_DVBT2;
-               else
+               } else if (priv->delivery_system == SYS_DVBT2) {
+                       ret = cxd2820r_sleep_t2(fe);
+                       if (ret)
+                               goto error;
+
                        c->delivery_system = SYS_DVBT;
+               }
        }
 
        /* set frontend */
@@ -492,6 +506,7 @@ static enum dvbfe_search cxd2820r_search(struct dvb_frontend *fe)
        /* frontend lock wait loop count */
        switch (priv->delivery_system) {
        case SYS_DVBT:
+       case SYS_DVBC_ANNEX_A:
                i = 20;
                break;
        case SYS_DVBT2:
@@ -556,7 +571,7 @@ static const struct dvb_frontend_ops cxd2820r_ops = {
        .delsys = { SYS_DVBT, SYS_DVBT2, SYS_DVBC_ANNEX_A },
        /* default: DVB-T/T2 */
        .info = {
-               .name = "Sony CXD2820R (DVB-T/T2)",
+               .name = "Sony CXD2820R",
 
                .caps = FE_CAN_FEC_1_2                  |
                        FE_CAN_FEC_2_3                  |
@@ -566,7 +581,9 @@ static const struct dvb_frontend_ops cxd2820r_ops = {
                        FE_CAN_FEC_AUTO                 |
                        FE_CAN_QPSK                     |
                        FE_CAN_QAM_16                   |
+                       FE_CAN_QAM_32                   |
                        FE_CAN_QAM_64                   |
+                       FE_CAN_QAM_128                  |
                        FE_CAN_QAM_256                  |
                        FE_CAN_QAM_AUTO                 |
                        FE_CAN_TRANSMISSION_MODE_AUTO   |
@@ -596,8 +613,7 @@ static const struct dvb_frontend_ops cxd2820r_ops = {
 };
 
 struct dvb_frontend *cxd2820r_attach(const struct cxd2820r_config *cfg,
-                                    struct i2c_adapter *i2c,
-                                    struct dvb_frontend *fe)
+               struct i2c_adapter *i2c)
 {
        struct cxd2820r_priv *priv = NULL;
        int ret;
index 938777065de6d1d14effed1ab4f9a01b3f7dc8c7..af65d013db11e8ed843c555fe0dc73d114103729 100644 (file)
@@ -1195,7 +1195,7 @@ static int ds3000_set_frontend(struct dvb_frontend *fe)
 
        for (i = 0; i < 30 ; i++) {
                ds3000_read_status(fe, &status);
-               if (status && FE_HAS_LOCK)
+               if (status & FE_HAS_LOCK)
                        break;
 
                msleep(10);
index 7fa3e472cdcaf40e0af6d97a4515a6b383dea0dd..fade566927c3ee93d65cba68a14309c798051d7b 100644 (file)
@@ -402,7 +402,7 @@ static int mb86a20s_get_modulation(struct mb86a20s_state *state,
                [2] = 0x8e,     /* Layer C */
        };
 
-       if (layer > ARRAY_SIZE(reg))
+       if (layer >= ARRAY_SIZE(reg))
                return -EINVAL;
        rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
        if (rc < 0)
@@ -435,7 +435,7 @@ static int mb86a20s_get_fec(struct mb86a20s_state *state,
                [2] = 0x8f,     /* Layer C */
        };
 
-       if (layer > ARRAY_SIZE(reg))
+       if (layer >= ARRAY_SIZE(reg))
                return -EINVAL;
        rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
        if (rc < 0)
@@ -470,7 +470,7 @@ static int mb86a20s_get_interleaving(struct mb86a20s_state *state,
                [2] = 0x90,     /* Layer C */
        };
 
-       if (layer > ARRAY_SIZE(reg))
+       if (layer >= ARRAY_SIZE(reg))
                return -EINVAL;
        rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
        if (rc < 0)
@@ -494,7 +494,7 @@ static int mb86a20s_get_segment_count(struct mb86a20s_state *state,
                [2] = 0x91,     /* Layer C */
        };
 
-       if (layer > ARRAY_SIZE(reg))
+       if (layer >= ARRAY_SIZE(reg))
                return -EINVAL;
        rc = mb86a20s_writereg(state, 0x6d, reg[layer]);
        if (rc < 0)
index 86da3d816498e320772a3a094702d9699b894e19..ad7c72e8f517728539f93adde89f63ddf980d137 100644 (file)
@@ -29,7 +29,6 @@
 #include <linux/delay.h>
 #include <linux/firmware.h>
 #include <linux/i2c.h>
-#include <linux/version.h>
 #include <asm/div64.h>
 
 #include "dvb_frontend.h"
index ec859a580651e327475f0ce57f7faba7f5a1d26b..f241702a0f36442aa7e3ab1c8d9fe55213e00d78 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/i2c.h>
 #include <linux/module.h>
 #include <linux/mutex.h>
+#include <linux/slab.h>
 
 #include <media/as3645a.h>
 #include <media/v4l2-ctrls.h>
index 9fe4519176a4850ceecda1d29fb961eeb7b03fb2..ec3f6a06f9c3baf5f92e09abbb4e2bcef54196bd 100644 (file)
@@ -922,7 +922,9 @@ static int __devexit atmel_isi_remove(struct platform_device *pdev)
                        isi->fb_descriptors_phys);
 
        iounmap(isi->regs);
+       clk_unprepare(isi->mck);
        clk_put(isi->mck);
+       clk_unprepare(isi->pclk);
        clk_put(isi->pclk);
        kfree(isi);
 
@@ -955,6 +957,10 @@ static int __devinit atmel_isi_probe(struct platform_device *pdev)
        if (IS_ERR(pclk))
                return PTR_ERR(pclk);
 
+       ret = clk_prepare(pclk);
+       if (ret)
+               goto err_clk_prepare_pclk;
+
        isi = kzalloc(sizeof(struct atmel_isi), GFP_KERNEL);
        if (!isi) {
                ret = -ENOMEM;
@@ -978,6 +984,10 @@ static int __devinit atmel_isi_probe(struct platform_device *pdev)
                goto err_clk_get;
        }
 
+       ret = clk_prepare(isi->mck);
+       if (ret)
+               goto err_clk_prepare_mck;
+
        /* Set ISI_MCK's frequency, it should be faster than pixel clock */
        ret = clk_set_rate(isi->mck, pdata->mck_hz);
        if (ret < 0)
@@ -1059,10 +1069,14 @@ err_alloc_ctx:
                        isi->fb_descriptors_phys);
 err_alloc_descriptors:
 err_set_mck_rate:
+       clk_unprepare(isi->mck);
+err_clk_prepare_mck:
        clk_put(isi->mck);
 err_clk_get:
        kfree(isi);
 err_alloc_isi:
+       clk_unprepare(pclk);
+err_clk_prepare_pclk:
        clk_put(pclk);
 
        return ret;
index 14cb961c22bdba5ef506ba56ad55e59d2dc1ade5..4bfd865a4106069538b893720622e424b0097915 100644 (file)
@@ -751,20 +751,10 @@ int cx18_v4l2_close(struct file *filp)
 
        CX18_DEBUG_IOCTL("close() of %s\n", s->name);
 
-       v4l2_fh_del(fh);
-       v4l2_fh_exit(fh);
-
-       /* Easy case first: this stream was never claimed by us */
-       if (s->id != id->open_id) {
-               kfree(id);
-               return 0;
-       }
-
-       /* 'Unclaim' this stream */
-
-       /* Stop radio */
        mutex_lock(&cx->serialize_lock);
-       if (id->type == CX18_ENC_STREAM_TYPE_RAD) {
+       /* Stop radio */
+       if (id->type == CX18_ENC_STREAM_TYPE_RAD &&
+                       v4l2_fh_is_singular_file(filp)) {
                /* Closing radio device, return to TV mode */
                cx18_mute(cx);
                /* Mark that the radio is no longer in use */
@@ -781,10 +771,14 @@ int cx18_v4l2_close(struct file *filp)
                }
                /* Done! Unmute and continue. */
                cx18_unmute(cx);
-               cx18_release_stream(s);
-       } else {
-               cx18_stop_capture(id, 0);
        }
+
+       v4l2_fh_del(fh);
+       v4l2_fh_exit(fh);
+
+       /* 'Unclaim' this stream */
+       if (s->id == id->open_id)
+               cx18_stop_capture(id, 0);
        kfree(id);
        mutex_unlock(&cx->serialize_lock);
        return 0;
@@ -810,21 +804,15 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
 
        item->open_id = cx->open_id++;
        filp->private_data = &item->fh;
+       v4l2_fh_add(&item->fh);
 
-       if (item->type == CX18_ENC_STREAM_TYPE_RAD) {
-               /* Try to claim this stream */
-               if (cx18_claim_stream(item, item->type)) {
-                       /* No, it's already in use */
-                       v4l2_fh_exit(&item->fh);
-                       kfree(item);
-                       return -EBUSY;
-               }
-
+       if (item->type == CX18_ENC_STREAM_TYPE_RAD &&
+                       v4l2_fh_is_singular_file(filp)) {
                if (!test_bit(CX18_F_I_RADIO_USER, &cx->i_flags)) {
                        if (atomic_read(&cx->ana_capturing) > 0) {
                                /* switching to radio while capture is
                                   in progress is not polite */
-                               cx18_release_stream(s);
+                               v4l2_fh_del(&item->fh);
                                v4l2_fh_exit(&item->fh);
                                kfree(item);
                                return -EBUSY;
@@ -842,7 +830,6 @@ static int cx18_serialized_open(struct cx18_stream *s, struct file *filp)
                /* Done! Unmute and continue. */
                cx18_unmute(cx);
        }
-       v4l2_fh_add(&item->fh);
        return 0;
 }
 
index 919ed77b32f2dec7fc6e6b195a9722c6f77f6fbb..875a7ce947361ffb720ced4e811dd909cb03b15b 100644 (file)
@@ -1052,7 +1052,7 @@ static int cx231xx_usb_probe(struct usb_interface *interface,
        dev = kzalloc(sizeof(*dev), GFP_KERNEL);
        if (dev == NULL) {
                cx231xx_err(DRIVER_NAME ": out of memory!\n");
-               clear_bit(dev->devno, &cx231xx_devused);
+               clear_bit(nr, &cx231xx_devused);
                return -ENOMEM;
        }
 
index 3c01be999e353817718ab342c27943fc1a52d78f..19b5499d2624cc9b130e8340062222fd12a465e3 100644 (file)
@@ -213,8 +213,8 @@ struct cx23885_board cx23885_boards[] = {
                .portc          = CX23885_MPEG_DVB,
                .tuner_type     = TUNER_XC4000,
                .tuner_addr     = 0x61,
-               .radio_type     = TUNER_XC4000,
-               .radio_addr     = 0x61,
+               .radio_type     = UNSET,
+               .radio_addr     = ADDR_UNSET,
                .input          = {{
                        .type   = CX23885_VMUX_TELEVISION,
                        .vmux   = CX25840_VIN2_CH1 |
index af8a225763d3dbfc3a41068855625a83bf13d4cd..6835eb1fc09319cb52c006f6af6e299f7fc76833 100644 (file)
@@ -943,6 +943,11 @@ static int dvb_register(struct cx23885_tsport *port)
 
                        fe = dvb_attach(xc4000_attach, fe0->dvb.frontend,
                                        &dev->i2c_bus[1].i2c_adap, &cfg);
+                       if (!fe) {
+                               printk(KERN_ERR "%s/2: xc4000 attach failed\n",
+                                      dev->name);
+                               goto frontend_detach;
+                       }
                }
                break;
        case CX23885_BOARD_TBS_6920:
index 4bbf9bb97bde984e23c0665fda59a72bc7dbd80a..c654bdc7ccb201dd4e285c0b7ddfe703ab89cb61 100644 (file)
@@ -1550,7 +1550,6 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
        struct v4l2_control ctrl;
        struct videobuf_dvb_frontend *vfe;
        struct dvb_frontend *fe;
-       int err = 0;
 
        struct analog_parameters params = {
                .mode      = V4L2_TUNER_ANALOG_TV,
@@ -1572,8 +1571,10 @@ static int cx23885_set_freq_via_ops(struct cx23885_dev *dev,
                params.frequency, f->tuner, params.std);
 
        vfe = videobuf_dvb_get_frontend(&dev->ts2.frontends, 1);
-       if (!vfe)
-               err = -EINVAL;
+       if (!vfe) {
+               mutex_unlock(&dev->lock);
+               return -EINVAL;
+       }
 
        fe = vfe->dvb.frontend;
 
index 62c7ad050f9bb0b067a963405ddda6e8cbcd1838..cbd5d119a2c660ebd7f8e9aba9b4206795d290e9 100644 (file)
@@ -1573,8 +1573,8 @@ static const struct cx88_board cx88_boards[] = {
                .name           = "Pinnacle Hybrid PCTV",
                .tuner_type     = TUNER_XC2028,
                .tuner_addr     = 0x61,
-               .radio_type     = TUNER_XC2028,
-               .radio_addr     = 0x61,
+               .radio_type     = UNSET,
+               .radio_addr     = ADDR_UNSET,
                .input          = { {
                        .type   = CX88_VMUX_TELEVISION,
                        .vmux   = 0,
@@ -1611,8 +1611,8 @@ static const struct cx88_board cx88_boards[] = {
                .name           = "Leadtek TV2000 XP Global",
                .tuner_type     = TUNER_XC2028,
                .tuner_addr     = 0x61,
-               .radio_type     = TUNER_XC2028,
-               .radio_addr     = 0x61,
+               .radio_type     = UNSET,
+               .radio_addr     = ADDR_UNSET,
                .input          = { {
                        .type   = CX88_VMUX_TELEVISION,
                        .vmux   = 0,
@@ -2115,8 +2115,8 @@ static const struct cx88_board cx88_boards[] = {
                .name           = "Terratec Cinergy HT PCI MKII",
                .tuner_type     = TUNER_XC2028,
                .tuner_addr     = 0x61,
-               .radio_type     = TUNER_XC2028,
-               .radio_addr     = 0x61,
+               .radio_type     = UNSET,
+               .radio_addr     = ADDR_UNSET,
                .input          = { {
                        .type   = CX88_VMUX_TELEVISION,
                        .vmux   = 0,
@@ -2154,9 +2154,9 @@ static const struct cx88_board cx88_boards[] = {
        [CX88_BOARD_WINFAST_DTV1800H] = {
                .name           = "Leadtek WinFast DTV1800 Hybrid",
                .tuner_type     = TUNER_XC2028,
-               .radio_type     = TUNER_XC2028,
+               .radio_type     = UNSET,
                .tuner_addr     = 0x61,
-               .radio_addr     = 0x61,
+               .radio_addr     = ADDR_UNSET,
                /*
                 * GPIO setting
                 *
@@ -2195,9 +2195,9 @@ static const struct cx88_board cx88_boards[] = {
        [CX88_BOARD_WINFAST_DTV1800H_XC4000] = {
                .name           = "Leadtek WinFast DTV1800 H (XC4000)",
                .tuner_type     = TUNER_XC4000,
-               .radio_type     = TUNER_XC4000,
+               .radio_type     = UNSET,
                .tuner_addr     = 0x61,
-               .radio_addr     = 0x61,
+               .radio_addr     = ADDR_UNSET,
                /*
                 * GPIO setting
                 *
@@ -2236,9 +2236,9 @@ static const struct cx88_board cx88_boards[] = {
        [CX88_BOARD_WINFAST_DTV2000H_PLUS] = {
                .name           = "Leadtek WinFast DTV2000 H PLUS",
                .tuner_type     = TUNER_XC4000,
-               .radio_type     = TUNER_XC4000,
+               .radio_type     = UNSET,
                .tuner_addr     = 0x61,
-               .radio_addr     = 0x61,
+               .radio_addr     = ADDR_UNSET,
                /*
                 * GPIO
                 *   2: 1: mute audio
index 9449423098e0d4163eabe8d7a0b0c57cdb484cc2..aabbf4854f6629d9d1c14939642bb2879b7df70f 100644 (file)
@@ -853,8 +853,7 @@ static int em28xx_dvb_init(struct em28xx *dev)
        case EM28174_BOARD_PCTV_290E:
                dvb->fe[0] = dvb_attach(cxd2820r_attach,
                                        &em28xx_cxd2820r_config,
-                                       &dev->i2c_adap,
-                                       NULL);
+                                       &dev->i2c_adap);
                if (dvb->fe[0]) {
                        /* FE 0 attach tuner */
                        if (!dvb_attach(tda18271_attach,
index 544af91cbdc1b0fb9962d6550cb9c4892299a74b..3949b7dc2368370ebe6d28f15b3b83bc2999babe 100644 (file)
@@ -731,9 +731,6 @@ static int __devinit ivtv_init_struct1(struct ivtv *itv)
 
        init_kthread_work(&itv->irq_work, ivtv_irq_work_handler);
 
-       /* start counting open_id at 1 */
-       itv->open_id = 1;
-
        /* Initial settings */
        itv->cxhdl.port = CX2341X_PORT_MEMORY;
        itv->cxhdl.capabilities = CX2341X_CAP_HAS_SLICED_VBI;
index 8f9cc17b518eaaf6320e092d0a667d8d4876e586..06f3d78389bfbb250f89d655a9d6e57ebe359d24 100644 (file)
@@ -332,7 +332,7 @@ struct ivtv_stream {
        const char *name;               /* name of the stream */
        int type;                       /* stream type */
 
-       u32 id;
+       struct v4l2_fh *fh;             /* pointer to the streaming filehandle */
        spinlock_t qlock;               /* locks access to the queues */
        unsigned long s_flags;          /* status flags, see above */
        int dma;                        /* can be PCI_DMA_TODEVICE, PCI_DMA_FROMDEVICE or PCI_DMA_NONE */
@@ -379,7 +379,6 @@ struct ivtv_stream {
 
 struct ivtv_open_id {
        struct v4l2_fh fh;
-       u32 open_id;                    /* unique ID for this file descriptor */
        int type;                       /* stream type */
        int yuv_frames;                 /* 1: started OUT_UDMA_YUV output mode */
        struct ivtv *itv;
index 38f052257f4620498d6b23e0bf3b7cfb88aa5ba9..2cd6c89b7d917d618de7791bf919bb1015c899b9 100644 (file)
@@ -50,16 +50,16 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
 
        if (test_and_set_bit(IVTV_F_S_CLAIMED, &s->s_flags)) {
                /* someone already claimed this stream */
-               if (s->id == id->open_id) {
+               if (s->fh == &id->fh) {
                        /* yes, this file descriptor did. So that's OK. */
                        return 0;
                }
-               if (s->id == -1 && (type == IVTV_DEC_STREAM_TYPE_VBI ||
+               if (s->fh == NULL && (type == IVTV_DEC_STREAM_TYPE_VBI ||
                                         type == IVTV_ENC_STREAM_TYPE_VBI)) {
                        /* VBI is handled already internally, now also assign
                           the file descriptor to this stream for external
                           reading of the stream. */
-                       s->id = id->open_id;
+                       s->fh = &id->fh;
                        IVTV_DEBUG_INFO("Start Read VBI\n");
                        return 0;
                }
@@ -67,7 +67,7 @@ static int ivtv_claim_stream(struct ivtv_open_id *id, int type)
                IVTV_DEBUG_INFO("Stream %d is busy\n", type);
                return -EBUSY;
        }
-       s->id = id->open_id;
+       s->fh = &id->fh;
        if (type == IVTV_DEC_STREAM_TYPE_VBI) {
                /* Enable reinsertion interrupt */
                ivtv_clear_irq_mask(itv, IVTV_IRQ_DEC_VBI_RE_INSERT);
@@ -104,7 +104,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
        struct ivtv *itv = s->itv;
        struct ivtv_stream *s_vbi;
 
-       s->id = -1;
+       s->fh = NULL;
        if ((s->type == IVTV_DEC_STREAM_TYPE_VBI || s->type == IVTV_ENC_STREAM_TYPE_VBI) &&
                test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
                /* this stream is still in use internally */
@@ -136,7 +136,7 @@ void ivtv_release_stream(struct ivtv_stream *s)
                /* was already cleared */
                return;
        }
-       if (s_vbi->id != -1) {
+       if (s_vbi->fh) {
                /* VBI stream still claimed by a file descriptor */
                return;
        }
@@ -268,11 +268,13 @@ static struct ivtv_buffer *ivtv_get_buffer(struct ivtv_stream *s, int non_block,
                }
 
                /* wait for more data to arrive */
+               mutex_unlock(&itv->serialize_lock);
                prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
                /* New buffers might have become available before we were added to the waitqueue */
                if (!s->q_full.buffers)
                        schedule();
                finish_wait(&s->waitq, &wait);
+               mutex_lock(&itv->serialize_lock);
                if (signal_pending(current)) {
                        /* return if a signal was received */
                        IVTV_DEBUG_INFO("User stopped %s\n", s->name);
@@ -357,7 +359,7 @@ static ssize_t ivtv_read(struct ivtv_stream *s, char __user *ubuf, size_t tot_co
        size_t tot_written = 0;
        int single_frame = 0;
 
-       if (atomic_read(&itv->capturing) == 0 && s->id == -1) {
+       if (atomic_read(&itv->capturing) == 0 && s->fh == NULL) {
                /* shouldn't happen */
                IVTV_DEBUG_WARN("Stream %s not initialized before read\n", s->name);
                return -EIO;
@@ -507,9 +509,7 @@ ssize_t ivtv_v4l2_read(struct file * filp, char __user *buf, size_t count, loff_
 
        IVTV_DEBUG_HI_FILE("read %zd bytes from %s\n", count, s->name);
 
-       mutex_lock(&itv->serialize_lock);
        rc = ivtv_start_capture(id);
-       mutex_unlock(&itv->serialize_lock);
        if (rc)
                return rc;
        return ivtv_read_pos(s, buf, count, pos, filp->f_flags & O_NONBLOCK);
@@ -584,9 +584,7 @@ ssize_t ivtv_v4l2_write(struct file *filp, const char __user *user_buf, size_t c
        set_bit(IVTV_F_S_APPL_IO, &s->s_flags);
 
        /* Start decoder (returns 0 if already started) */
-       mutex_lock(&itv->serialize_lock);
        rc = ivtv_start_decoding(id, itv->speed);
-       mutex_unlock(&itv->serialize_lock);
        if (rc) {
                IVTV_DEBUG_WARN("Failed start decode stream %s\n", s->name);
 
@@ -627,11 +625,13 @@ retry:
                        break;
                if (filp->f_flags & O_NONBLOCK)
                        return -EAGAIN;
+               mutex_unlock(&itv->serialize_lock);
                prepare_to_wait(&s->waitq, &wait, TASK_INTERRUPTIBLE);
                /* New buffers might have become free before we were added to the waitqueue */
                if (!s->q_free.buffers)
                        schedule();
                finish_wait(&s->waitq, &wait);
+               mutex_lock(&itv->serialize_lock);
                if (signal_pending(current)) {
                        IVTV_DEBUG_INFO("User stopped %s\n", s->name);
                        return -EINTR;
@@ -686,12 +686,14 @@ retry:
                        if (mode == OUT_YUV)
                                ivtv_yuv_setup_stream_frame(itv);
 
+                       mutex_unlock(&itv->serialize_lock);
                        prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
                        while (!(got_sig = signal_pending(current)) &&
                                        test_bit(IVTV_F_S_DMA_PENDING, &s->s_flags)) {
                                schedule();
                        }
                        finish_wait(&itv->dma_waitq, &wait);
+                       mutex_lock(&itv->serialize_lock);
                        if (got_sig) {
                                IVTV_DEBUG_INFO("User interrupted %s\n", s->name);
                                return -EINTR;
@@ -756,9 +758,7 @@ unsigned int ivtv_v4l2_enc_poll(struct file *filp, poll_table * wait)
        if (!eof && !test_bit(IVTV_F_S_STREAMING, &s->s_flags)) {
                int rc;
 
-               mutex_lock(&itv->serialize_lock);
                rc = ivtv_start_capture(id);
-               mutex_unlock(&itv->serialize_lock);
                if (rc) {
                        IVTV_DEBUG_INFO("Could not start capture for %s (%d)\n",
                                        s->name, rc);
@@ -808,7 +808,7 @@ void ivtv_stop_capture(struct ivtv_open_id *id, int gop_end)
                     id->type == IVTV_ENC_STREAM_TYPE_VBI) &&
                    test_bit(IVTV_F_S_INTERNAL_USE, &s->s_flags)) {
                        /* Also used internally, don't stop capturing */
-                       s->id = -1;
+                       s->fh = NULL;
                }
                else {
                        ivtv_stop_v4l2_encode_stream(s, gop_end);
@@ -861,20 +861,9 @@ int ivtv_v4l2_close(struct file *filp)
 
        IVTV_DEBUG_FILE("close %s\n", s->name);
 
-       v4l2_fh_del(fh);
-       v4l2_fh_exit(fh);
-
-       /* Easy case first: this stream was never claimed by us */
-       if (s->id != id->open_id) {
-               kfree(id);
-               return 0;
-       }
-
-       /* 'Unclaim' this stream */
-
        /* Stop radio */
-       mutex_lock(&itv->serialize_lock);
-       if (id->type == IVTV_ENC_STREAM_TYPE_RAD) {
+       if (id->type == IVTV_ENC_STREAM_TYPE_RAD &&
+                       v4l2_fh_is_singular_file(filp)) {
                /* Closing radio device, return to TV mode */
                ivtv_mute(itv);
                /* Mark that the radio is no longer in use */
@@ -890,13 +879,25 @@ int ivtv_v4l2_close(struct file *filp)
                if (atomic_read(&itv->capturing) > 0) {
                        /* Undo video mute */
                        ivtv_vapi(itv, CX2341X_ENC_MUTE_VIDEO, 1,
-                               v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
-                               (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
+                                       v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute) |
+                                       (v4l2_ctrl_g_ctrl(itv->cxhdl.video_mute_yuv) << 8));
                }
                /* Done! Unmute and continue. */
                ivtv_unmute(itv);
-               ivtv_release_stream(s);
-       } else if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
+       }
+
+       v4l2_fh_del(fh);
+       v4l2_fh_exit(fh);
+
+       /* Easy case first: this stream was never claimed by us */
+       if (s->fh != &id->fh) {
+               kfree(id);
+               return 0;
+       }
+
+       /* 'Unclaim' this stream */
+
+       if (s->type >= IVTV_DEC_STREAM_TYPE_MPG) {
                struct ivtv_stream *s_vout = &itv->streams[IVTV_DEC_STREAM_TYPE_VOUT];
 
                ivtv_stop_decoding(id, VIDEO_CMD_STOP_TO_BLACK | VIDEO_CMD_STOP_IMMEDIATELY, 0);
@@ -911,21 +912,25 @@ int ivtv_v4l2_close(struct file *filp)
                ivtv_stop_capture(id, 0);
        }
        kfree(id);
-       mutex_unlock(&itv->serialize_lock);
        return 0;
 }
 
-static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
+int ivtv_v4l2_open(struct file *filp)
 {
-#ifdef CONFIG_VIDEO_ADV_DEBUG
        struct video_device *vdev = video_devdata(filp);
-#endif
+       struct ivtv_stream *s = video_get_drvdata(vdev);
        struct ivtv *itv = s->itv;
        struct ivtv_open_id *item;
        int res = 0;
 
        IVTV_DEBUG_FILE("open %s\n", s->name);
 
+       if (ivtv_init_on_first_open(itv)) {
+               IVTV_ERR("Failed to initialize on device %s\n",
+                        video_device_node_name(vdev));
+               return -ENXIO;
+       }
+
 #ifdef CONFIG_VIDEO_ADV_DEBUG
        /* Unless ivtv_fw_debug is set, error out if firmware dead. */
        if (ivtv_fw_debug) {
@@ -966,31 +971,19 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
                return -ENOMEM;
        }
        v4l2_fh_init(&item->fh, s->vdev);
-       if (res < 0) {
-               v4l2_fh_exit(&item->fh);
-               kfree(item);
-               return res;
-       }
        item->itv = itv;
        item->type = s->type;
 
-       item->open_id = itv->open_id++;
        filp->private_data = &item->fh;
+       v4l2_fh_add(&item->fh);
 
-       if (item->type == IVTV_ENC_STREAM_TYPE_RAD) {
-               /* Try to claim this stream */
-               if (ivtv_claim_stream(item, item->type)) {
-                       /* No, it's already in use */
-                       v4l2_fh_exit(&item->fh);
-                       kfree(item);
-                       return -EBUSY;
-               }
-
+       if (item->type == IVTV_ENC_STREAM_TYPE_RAD &&
+                       v4l2_fh_is_singular_file(filp)) {
                if (!test_bit(IVTV_F_I_RADIO_USER, &itv->i_flags)) {
                        if (atomic_read(&itv->capturing) > 0) {
                                /* switching to radio while capture is
                                   in progress is not polite */
-                               ivtv_release_stream(s);
+                               v4l2_fh_del(&item->fh);
                                v4l2_fh_exit(&item->fh);
                                kfree(item);
                                return -EBUSY;
@@ -1022,32 +1015,9 @@ static int ivtv_serialized_open(struct ivtv_stream *s, struct file *filp)
                                1080 * ((itv->yuv_info.v4l2_src_h + 31) & ~31);
                itv->yuv_info.stream_size = 0;
        }
-       v4l2_fh_add(&item->fh);
        return 0;
 }
 
-int ivtv_v4l2_open(struct file *filp)
-{
-       int res;
-       struct ivtv *itv = NULL;
-       struct ivtv_stream *s = NULL;
-       struct video_device *vdev = video_devdata(filp);
-
-       s = video_get_drvdata(vdev);
-       itv = s->itv;
-
-       mutex_lock(&itv->serialize_lock);
-       if (ivtv_init_on_first_open(itv)) {
-               IVTV_ERR("Failed to initialize on device %s\n",
-                        video_device_node_name(vdev));
-               mutex_unlock(&itv->serialize_lock);
-               return -ENXIO;
-       }
-       res = ivtv_serialized_open(s, filp);
-       mutex_unlock(&itv->serialize_lock);
-       return res;
-}
-
 void ivtv_mute(struct ivtv *itv)
 {
        if (atomic_read(&itv->capturing))
index ecafa697326e302803ab85604c0a7ab8aec697f6..c4bc481430985c6405580a461e2cc8e518ae2d73 100644 (file)
@@ -179,6 +179,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
                ivtv_vapi(itv, CX2341X_DEC_PAUSE_PLAYBACK, 1, 0);
 
                /* Wait for any DMA to finish */
+               mutex_unlock(&itv->serialize_lock);
                prepare_to_wait(&itv->dma_waitq, &wait, TASK_INTERRUPTIBLE);
                while (test_bit(IVTV_F_I_DMA, &itv->i_flags)) {
                        got_sig = signal_pending(current);
@@ -188,6 +189,7 @@ int ivtv_set_speed(struct ivtv *itv, int speed)
                        schedule();
                }
                finish_wait(&itv->dma_waitq, &wait);
+               mutex_lock(&itv->serialize_lock);
                if (got_sig)
                        return -EINTR;
 
@@ -1107,6 +1109,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
         * happens within the first 100 lines of the top field.
         * Make 4 attempts to sync to the decoder before giving up.
         */
+       mutex_unlock(&itv->serialize_lock);
        for (f = 0; f < 4; f++) {
                prepare_to_wait(&itv->vsync_waitq, &wait,
                                TASK_UNINTERRUPTIBLE);
@@ -1115,6 +1118,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id *std)
                schedule_timeout(msecs_to_jiffies(25));
        }
        finish_wait(&itv->vsync_waitq, &wait);
+       mutex_lock(&itv->serialize_lock);
 
        if (f == 4)
                IVTV_WARN("Mode change failed to sync to decoder\n");
@@ -1842,8 +1846,7 @@ static long ivtv_default(struct file *file, void *fh, bool valid_prio,
        return 0;
 }
 
-static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
-               unsigned int cmd, unsigned long arg)
+long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 {
        struct video_device *vfd = video_devdata(filp);
        long ret;
@@ -1855,21 +1858,6 @@ static long ivtv_serialized_ioctl(struct ivtv *itv, struct file *filp,
        return ret;
 }
 
-long ivtv_v4l2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
-       struct ivtv_open_id *id = fh2id(filp->private_data);
-       struct ivtv *itv = id->itv;
-       long res;
-
-       /* DQEVENT can block, so this should not run with the serialize lock */
-       if (cmd == VIDIOC_DQEVENT)
-               return ivtv_serialized_ioctl(itv, filp, cmd, arg);
-       mutex_lock(&itv->serialize_lock);
-       res = ivtv_serialized_ioctl(itv, filp, cmd, arg);
-       mutex_unlock(&itv->serialize_lock);
-       return res;
-}
-
 static const struct v4l2_ioctl_ops ivtv_ioctl_ops = {
        .vidioc_querycap                    = ivtv_querycap,
        .vidioc_s_audio                     = ivtv_s_audio,
index 9c29e964d400b955d36074487c17c22e1195bbcd..1b3b9578bf47657915b464e9b9513f48b481aa0e 100644 (file)
@@ -288,13 +288,13 @@ static void dma_post(struct ivtv_stream *s)
                        ivtv_process_vbi_data(itv, buf, 0, s->type);
                        s->q_dma.bytesused += buf->bytesused;
                }
-               if (s->id == -1) {
+               if (s->fh == NULL) {
                        ivtv_queue_move(s, &s->q_dma, NULL, &s->q_free, 0);
                        return;
                }
        }
        ivtv_queue_move(s, &s->q_dma, NULL, &s->q_full, s->q_dma.bytesused);
-       if (s->id != -1)
+       if (s->fh)
                wake_up(&s->waitq);
 }
 
index e7794dc1330e50348a5e358714abf9f54702e56d..c6e28b4ebbed6dfe3e821e443dcff04d8e4694e3 100644 (file)
@@ -159,7 +159,6 @@ static void ivtv_stream_init(struct ivtv *itv, int type)
                s->buffers = (itv->options.kilobytes[type] * 1024 + s->buf_size - 1) / s->buf_size;
        spin_lock_init(&s->qlock);
        init_waitqueue_head(&s->waitq);
-       s->id = -1;
        s->sg_handle = IVTV_DMA_UNMAPPED;
        ivtv_queue_init(&s->q_free);
        ivtv_queue_init(&s->q_full);
@@ -214,6 +213,7 @@ static int ivtv_prep_dev(struct ivtv *itv, int type)
        s->vdev->fops = ivtv_stream_info[type].fops;
        s->vdev->release = video_device_release;
        s->vdev->tvnorms = V4L2_STD_ALL;
+       s->vdev->lock = &itv->serialize_lock;
        set_bit(V4L2_FL_USE_FH_PRIO, &s->vdev->flags);
        ivtv_set_funcs(s->vdev);
        return 0;
index dcbab6ad4c26ee8843336f673aeb1f9f7fe7ef80..2ad65eb29832754b54a5d190257cadd5d6a09031 100644 (file)
@@ -1149,23 +1149,37 @@ int ivtv_yuv_udma_stream_frame(struct ivtv *itv, void __user *src)
 {
        struct yuv_playback_info *yi = &itv->yuv_info;
        struct ivtv_dma_frame dma_args;
+       int res;
 
        ivtv_yuv_setup_stream_frame(itv);
 
        /* We only need to supply source addresses for this */
        dma_args.y_source = src;
        dma_args.uv_source = src + 720 * ((yi->v4l2_src_h + 31) & ~31);
-       return ivtv_yuv_udma_frame(itv, &dma_args);
+       /* Wait for frame DMA. Note that serialize_lock is locked,
+          so to allow other processes to access the driver while
+          we are waiting unlock first and later lock again. */
+       mutex_unlock(&itv->serialize_lock);
+       res = ivtv_yuv_udma_frame(itv, &dma_args);
+       mutex_lock(&itv->serialize_lock);
+       return res;
 }
 
 /* IVTV_IOC_DMA_FRAME ioctl handler */
 int ivtv_yuv_prep_frame(struct ivtv *itv, struct ivtv_dma_frame *args)
 {
-/*     IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
+       int res;
 
+/*     IVTV_DEBUG_INFO("yuv_prep_frame\n"); */
        ivtv_yuv_next_free(itv);
        ivtv_yuv_setup_frame(itv, args);
-       return ivtv_yuv_udma_frame(itv, args);
+       /* Wait for frame DMA. Note that serialize_lock is locked,
+          so to allow other processes to access the driver while
+          we are waiting unlock first and later lock again. */
+       mutex_unlock(&itv->serialize_lock);
+       res = ivtv_yuv_udma_frame(itv, args);
+       mutex_lock(&itv->serialize_lock);
+       return res;
 }
 
 void ivtv_yuv_close(struct ivtv *itv)
@@ -1174,7 +1188,9 @@ void ivtv_yuv_close(struct ivtv *itv)
        int h_filter, v_filter_1, v_filter_2;
 
        IVTV_DEBUG_YUV("ivtv_yuv_close\n");
+       mutex_unlock(&itv->serialize_lock);
        ivtv_waitq(&itv->vsync_waitq);
+       mutex_lock(&itv->serialize_lock);
 
        yi->running = 0;
        atomic_set(&yi->next_dma_frame, -1);
index 0cb461dd396af39ffbff65b387fdae11742d4ebf..74522773e934c18e417f36b36aed40017b35e02f 100644 (file)
@@ -287,7 +287,7 @@ static void mx3_videobuf_queue(struct vb2_buffer *vb)
                sg_dma_len(sg)          = new_size;
 
                txd = ichan->dma_chan.device->device_prep_slave_sg(
-                       &ichan->dma_chan, sg, 1, DMA_FROM_DEVICE,
+                       &ichan->dma_chan, sg, 1, DMA_DEV_TO_MEM,
                        DMA_PREP_INTERRUPT);
                if (!txd)
                        goto error;
index a277f95091ef2b481b369dca5cacc30cbf04afe4..1fb7d5bd5ec26a3f8955e8a6cdc3e7a6cd3841f9 100644 (file)
@@ -1042,7 +1042,8 @@ static int vidioc_querycap(struct file *file, void *fh,
        strlcpy(cap->driver, VOUT_NAME, sizeof(cap->driver));
        strlcpy(cap->card, vout->vfd->name, sizeof(cap->card));
        cap->bus_info[0] = '\0';
-       cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT;
+       cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_OUTPUT |
+               V4L2_CAP_VIDEO_OUTPUT_OVERLAY;
 
        return 0;
 }
@@ -1825,7 +1826,9 @@ static int vidioc_g_fbuf(struct file *file, void *fh,
        ovid = &vout->vid_info;
        ovl = ovid->overlays[0];
 
-       a->flags = 0x0;
+       /* The video overlay must stay within the framebuffer and can't be
+          positioned independently. */
+       a->flags = V4L2_FBUF_FLAG_OVERLAY;
        a->capability = V4L2_FBUF_CAP_LOCAL_ALPHA | V4L2_FBUF_CAP_CHROMAKEY
                | V4L2_FBUF_CAP_SRC_CHROMAKEY;
 
index 905d41d90c6aa816b7162ddf1d0f5e99bee4912a..1f506fde97d0a52433a7477f6ac843cc60af1f2f 100644 (file)
@@ -104,47 +104,16 @@ static struct Nala_table_entry Nala_table[PSZ_MAX][PWC_FPS_MAX_NALA] =
 
 /****************************************************************************/
 
-static int _send_control_msg(struct pwc_device *pdev,
-       u8 request, u16 value, int index, void *buf, int buflen)
-{
-       int rc;
-       void *kbuf = NULL;
-
-       if (buflen) {
-               kbuf = kmemdup(buf, buflen, GFP_KERNEL); /* not allowed on stack */
-               if (kbuf == NULL)
-                       return -ENOMEM;
-       }
-
-       rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
-               request,
-               USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-               value,
-               index,
-               kbuf, buflen, USB_CTRL_SET_TIMEOUT);
-
-       kfree(kbuf);
-       return rc;
-}
-
 static int recv_control_msg(struct pwc_device *pdev,
-       u8 request, u16 value, void *buf, int buflen)
+       u8 request, u16 value, int recv_count)
 {
        int rc;
-       void *kbuf = kmalloc(buflen, GFP_KERNEL); /* not allowed on stack */
-
-       if (kbuf == NULL)
-               return -ENOMEM;
 
        rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0),
                request,
                USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-               value,
-               pdev->vcinterface,
-               kbuf, buflen, USB_CTRL_GET_TIMEOUT);
-       memcpy(buf, kbuf, buflen);
-       kfree(kbuf);
-
+               value, pdev->vcinterface,
+               pdev->ctrl_buf, recv_count, USB_CTRL_GET_TIMEOUT);
        if (rc < 0)
                PWC_ERROR("recv_control_msg error %d req %02x val %04x\n",
                          rc, request, value);
@@ -152,27 +121,39 @@ static int recv_control_msg(struct pwc_device *pdev,
 }
 
 static inline int send_video_command(struct pwc_device *pdev,
-       int index, void *buf, int buflen)
+       int index, const unsigned char *buf, int buflen)
 {
-       return _send_control_msg(pdev,
-               SET_EP_STREAM_CTL,
-               VIDEO_OUTPUT_CONTROL_FORMATTER,
-               index,
-               buf, buflen);
+       int rc;
+
+       memcpy(pdev->ctrl_buf, buf, buflen);
+
+       rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
+                       SET_EP_STREAM_CTL,
+                       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                       VIDEO_OUTPUT_CONTROL_FORMATTER, index,
+                       pdev->ctrl_buf, buflen, USB_CTRL_SET_TIMEOUT);
+       if (rc >= 0)
+               memcpy(pdev->cmd_buf, buf, buflen);
+       else
+               PWC_ERROR("send_video_command error %d\n", rc);
+
+       return rc;
 }
 
 int send_control_msg(struct pwc_device *pdev,
        u8 request, u16 value, void *buf, int buflen)
 {
-       return _send_control_msg(pdev,
-               request, value, pdev->vcinterface, buf, buflen);
+       return usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
+                       request,
+                       USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
+                       value, pdev->vcinterface,
+                       buf, buflen, USB_CTRL_SET_TIMEOUT);
 }
 
-static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
-                              int *compression)
+static int set_video_mode_Nala(struct pwc_device *pdev, int size, int pixfmt,
+                              int frames, int *compression, int send_to_cam)
 {
-       unsigned char buf[3];
-       int ret, fps;
+       int fps, ret = 0;
        struct Nala_table_entry *pEntry;
        int frames2frames[31] =
        { /* closest match of framerate */
@@ -194,30 +175,29 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
          7              /* 30    */
        };
 
-       if (size < 0 || size > PSZ_CIF || frames < 4 || frames > 25)
+       if (size < 0 || size > PSZ_CIF)
                return -EINVAL;
+       if (frames < 4)
+               frames = 4;
+       else if (frames > 25)
+               frames = 25;
        frames = frames2frames[frames];
        fps = frames2table[frames];
        pEntry = &Nala_table[size][fps];
        if (pEntry->alternate == 0)
                return -EINVAL;
 
-       memcpy(buf, pEntry->mode, 3);
-       ret = send_video_command(pdev, pdev->vendpoint, buf, 3);
-       if (ret < 0) {
-               PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret);
+       if (send_to_cam)
+               ret = send_video_command(pdev, pdev->vendpoint,
+                                        pEntry->mode, 3);
+       if (ret < 0)
                return ret;
-       }
-       if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
-               ret = pwc_dec1_init(pdev, pdev->type, pdev->release, buf);
-               if (ret < 0)
-                       return ret;
-       }
 
-       pdev->cmd_len = 3;
-       memcpy(pdev->cmd_buf, buf, 3);
+       if (pEntry->compressed && pixfmt == V4L2_PIX_FMT_YUV420)
+               pwc_dec1_init(pdev, pEntry->mode);
 
        /* Set various parameters */
+       pdev->pixfmt = pixfmt;
        pdev->vframes = frames;
        pdev->valternate = pEntry->alternate;
        pdev->width  = pwc_image_sizes[size][0];
@@ -243,18 +223,20 @@ static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames,
 }
 
 
-static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
-       int *compression)
+static int set_video_mode_Timon(struct pwc_device *pdev, int size, int pixfmt,
+                               int frames, int *compression, int send_to_cam)
 {
-       unsigned char buf[13];
        const struct Timon_table_entry *pChoose;
-       int ret, fps;
+       int fps, ret = 0;
 
-       if (size >= PSZ_MAX || frames < 5 || frames > 30 ||
-           *compression < 0 || *compression > 3)
-               return -EINVAL;
-       if (size == PSZ_VGA && frames > 15)
+       if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
                return -EINVAL;
+       if (frames < 5)
+               frames = 5;
+       else if (size == PSZ_VGA && frames > 15)
+               frames = 15;
+       else if (frames > 30)
+               frames = 30;
        fps = (frames / 5) - 1;
 
        /* Find a supported framerate with progressively higher compression */
@@ -268,22 +250,18 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
        if (pChoose == NULL || pChoose->alternate == 0)
                return -ENOENT; /* Not supported. */
 
-       memcpy(buf, pChoose->mode, 13);
-       ret = send_video_command(pdev, pdev->vendpoint, buf, 13);
+       if (send_to_cam)
+               ret = send_video_command(pdev, pdev->vendpoint,
+                                        pChoose->mode, 13);
        if (ret < 0)
                return ret;
 
-       if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
-               ret = pwc_dec23_init(pdev, pdev->type, buf);
-               if (ret < 0)
-                       return ret;
-       }
-
-       pdev->cmd_len = 13;
-       memcpy(pdev->cmd_buf, buf, 13);
+       if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
+               pwc_dec23_init(pdev, pChoose->mode);
 
        /* Set various parameters */
-       pdev->vframes = frames;
+       pdev->pixfmt = pixfmt;
+       pdev->vframes = (fps + 1) * 5;
        pdev->valternate = pChoose->alternate;
        pdev->width  = pwc_image_sizes[size][0];
        pdev->height = pwc_image_sizes[size][1];
@@ -296,18 +274,20 @@ static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames,
 }
 
 
-static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
-       int *compression)
+static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int pixfmt,
+                               int frames, int *compression, int send_to_cam)
 {
        const struct Kiara_table_entry *pChoose = NULL;
-       int fps, ret;
-       unsigned char buf[12];
+       int fps, ret = 0;
 
-       if (size >= PSZ_MAX || frames < 5 || frames > 30 ||
-           *compression < 0 || *compression > 3)
-               return -EINVAL;
-       if (size == PSZ_VGA && frames > 15)
+       if (size >= PSZ_MAX || *compression < 0 || *compression > 3)
                return -EINVAL;
+       if (frames < 5)
+               frames = 5;
+       else if (size == PSZ_VGA && frames > 15)
+               frames = 15;
+       else if (frames > 30)
+               frames = 30;
        fps = (frames / 5) - 1;
 
        /* Find a supported framerate with progressively higher compression */
@@ -320,26 +300,18 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
        if (pChoose == NULL || pChoose->alternate == 0)
                return -ENOENT; /* Not supported. */
 
-       PWC_TRACE("Using alternate setting %d.\n", pChoose->alternate);
-
-       /* usb_control_msg won't take staticly allocated arrays as argument?? */
-       memcpy(buf, pChoose->mode, 12);
-
        /* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */
-       ret = send_video_command(pdev, 4 /* pdev->vendpoint */, buf, 12);
+       if (send_to_cam)
+               ret = send_video_command(pdev, 4, pChoose->mode, 12);
        if (ret < 0)
                return ret;
 
-       if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420) {
-               ret = pwc_dec23_init(pdev, pdev->type, buf);
-               if (ret < 0)
-                       return ret;
-       }
+       if (pChoose->bandlength > 0 && pixfmt == V4L2_PIX_FMT_YUV420)
+               pwc_dec23_init(pdev, pChoose->mode);
 
-       pdev->cmd_len = 12;
-       memcpy(pdev->cmd_buf, buf, 12);
        /* All set and go */
-       pdev->vframes = frames;
+       pdev->pixfmt = pixfmt;
+       pdev->vframes = (fps + 1) * 5;
        pdev->valternate = pChoose->alternate;
        pdev->width  = pwc_image_sizes[size][0];
        pdev->height = pwc_image_sizes[size][1];
@@ -354,22 +326,24 @@ static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames,
 }
 
 int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
-       int frames, int *compression)
+       int pixfmt, int frames, int *compression, int send_to_cam)
 {
        int ret, size;
 
-       PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pdev->pixfmt);
+       PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n",
+                      width, height, frames, pixfmt);
        size = pwc_get_size(pdev, width, height);
        PWC_TRACE("decode_size = %d.\n", size);
 
        if (DEVICE_USE_CODEC1(pdev->type)) {
-               ret = set_video_mode_Nala(pdev, size, frames, compression);
-
+               ret = set_video_mode_Nala(pdev, size, pixfmt, frames,
+                                         compression, send_to_cam);
        } else if (DEVICE_USE_CODEC3(pdev->type)) {
-               ret = set_video_mode_Kiara(pdev, size, frames, compression);
-
+               ret = set_video_mode_Kiara(pdev, size, pixfmt, frames,
+                                          compression, send_to_cam);
        } else {
-               ret = set_video_mode_Timon(pdev, size, frames, compression);
+               ret = set_video_mode_Timon(pdev, size, pixfmt, frames,
+                                          compression, send_to_cam);
        }
        if (ret < 0) {
                PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret);
@@ -436,13 +410,12 @@ unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned i
 int pwc_get_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
 {
        int ret;
-       u8 buf;
 
-       ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
+       ret = recv_control_msg(pdev, request, value, 1);
        if (ret < 0)
                return ret;
 
-       *data = buf;
+       *data = pdev->ctrl_buf[0];
        return 0;
 }
 
@@ -450,7 +423,8 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
 {
        int ret;
 
-       ret = send_control_msg(pdev, request, value, &data, sizeof(data));
+       pdev->ctrl_buf[0] = data;
+       ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 1);
        if (ret < 0)
                return ret;
 
@@ -460,37 +434,34 @@ int pwc_set_u8_ctrl(struct pwc_device *pdev, u8 request, u16 value, u8 data)
 int pwc_get_s8_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
 {
        int ret;
-       s8 buf;
 
-       ret = recv_control_msg(pdev, request, value, &buf, sizeof(buf));
+       ret = recv_control_msg(pdev, request, value, 1);
        if (ret < 0)
                return ret;
 
-       *data = buf;
+       *data = ((s8 *)pdev->ctrl_buf)[0];
        return 0;
 }
 
 int pwc_get_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, int *data)
 {
        int ret;
-       u8 buf[2];
 
-       ret = recv_control_msg(pdev, request, value, buf, sizeof(buf));
+       ret = recv_control_msg(pdev, request, value, 2);
        if (ret < 0)
                return ret;
 
-       *data = (buf[1] << 8) | buf[0];
+       *data = (pdev->ctrl_buf[1] << 8) | pdev->ctrl_buf[0];
        return 0;
 }
 
 int pwc_set_u16_ctrl(struct pwc_device *pdev, u8 request, u16 value, u16 data)
 {
        int ret;
-       u8 buf[2];
 
-       buf[0] = data & 0xff;
-       buf[1] = data >> 8;
-       ret = send_control_msg(pdev, request, value, buf, sizeof(buf));
+       pdev->ctrl_buf[0] = data & 0xff;
+       pdev->ctrl_buf[1] = data >> 8;
+       ret = send_control_msg(pdev, request, value, pdev->ctrl_buf, 2);
        if (ret < 0)
                return ret;
 
@@ -511,7 +482,6 @@ int pwc_button_ctrl(struct pwc_device *pdev, u16 value)
 /* POWER */
 void pwc_camera_power(struct pwc_device *pdev, int power)
 {
-       char buf;
        int r;
 
        if (!pdev->power_save)
@@ -521,13 +491,11 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
                return; /* Not supported by Nala or Timon < release 6 */
 
        if (power)
-               buf = 0x00; /* active */
+               pdev->ctrl_buf[0] = 0x00; /* active */
        else
-               buf = 0xFF; /* power save */
-       r = send_control_msg(pdev,
-               SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER,
-               &buf, sizeof(buf));
-
+               pdev->ctrl_buf[0] = 0xFF; /* power save */
+       r = send_control_msg(pdev, SET_STATUS_CTL,
+               SET_POWER_SAVE_MODE_FORMATTER, pdev->ctrl_buf, 1);
        if (r < 0)
                PWC_ERROR("Failed to power %s camera (%d)\n",
                          power ? "on" : "off", r);
@@ -535,7 +503,6 @@ void pwc_camera_power(struct pwc_device *pdev, int power)
 
 int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
 {
-       unsigned char buf[2];
        int r;
 
        if (pdev->type < 730)
@@ -551,11 +518,11 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
        if (off_value > 0xff)
                off_value = 0xff;
 
-       buf[0] = on_value;
-       buf[1] = off_value;
+       pdev->ctrl_buf[0] = on_value;
+       pdev->ctrl_buf[1] = off_value;
 
        r = send_control_msg(pdev,
-               SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf));
+               SET_STATUS_CTL, LED_FORMATTER, pdev->ctrl_buf, 2);
        if (r < 0)
                PWC_ERROR("Failed to set LED on/off time (%d)\n", r);
 
@@ -565,7 +532,6 @@ int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
 #ifdef CONFIG_USB_PWC_DEBUG
 int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
 {
-       unsigned char buf;
        int ret = -1, request;
 
        if (pdev->type < 675)
@@ -575,14 +541,13 @@ int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
        else
                request = SENSOR_TYPE_FORMATTER2;
 
-       ret = recv_control_msg(pdev,
-               GET_STATUS_CTL, request, &buf, sizeof(buf));
+       ret = recv_control_msg(pdev, GET_STATUS_CTL, request, 1);
        if (ret < 0)
                return ret;
        if (pdev->type < 675)
-               *sensor = buf | 0x100;
+               *sensor = pdev->ctrl_buf[0] | 0x100;
        else
-               *sensor = buf;
+               *sensor = pdev->ctrl_buf[0];
        return 0;
 }
 #endif
index be0e02cb487f1ff2cba5f32fbccd1826056eb93f..e899036aadf4a4734e0dd611386bf47909fe34bb 100644 (file)
    along with this program; if not, write to the Free Software
    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 */
-#include "pwc-dec1.h"
+#include "pwc.h"
 
-int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer)
+void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd)
 {
-       struct pwc_dec1_private *pdec;
+       struct pwc_dec1_private *pdec = &pdev->dec1;
 
-       if (pwc->decompress_data == NULL) {
-               pdec = kmalloc(sizeof(struct pwc_dec1_private), GFP_KERNEL);
-               if (pdec == NULL)
-                       return -ENOMEM;
-               pwc->decompress_data = pdec;
-       }
-       pdec = pwc->decompress_data;
-
-       return 0;
+       pdec->version = pdev->release;
 }
index a57d8601080babf0ca5ed403aef63220f900f2af..c565ef8f52fb3a83e29f5c428744aaf354b16db6 100644 (file)
 #ifndef PWC_DEC1_H
 #define PWC_DEC1_H
 
-#include "pwc.h"
+#include <linux/mutex.h>
+
+struct pwc_device;
 
 struct pwc_dec1_private
 {
        int version;
 };
 
-int pwc_dec1_init(struct pwc_device *pwc, int type, int release, void *buffer);
+void pwc_dec1_init(struct pwc_device *pdev, const unsigned char *cmd);
 
 #endif
index 2c6709112b2f2c4d206e40701505658c73483a38..3792fedff9515e85734200d8ffb22aa1ed15bb93 100644 (file)
@@ -294,22 +294,17 @@ static unsigned char pwc_crop_table[256 + 2*MAX_OUTER_CROP_VALUE];
 
 
 /* If the type or the command change, we rebuild the lookup table */
-int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
+void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd)
 {
        int flags, version, shift, i;
-       struct pwc_dec23_private *pdec;
-
-       if (pwc->decompress_data == NULL) {
-               pdec = kmalloc(sizeof(struct pwc_dec23_private), GFP_KERNEL);
-               if (pdec == NULL)
-                       return -ENOMEM;
-               pwc->decompress_data = pdec;
-       }
-       pdec = pwc->decompress_data;
+       struct pwc_dec23_private *pdec = &pdev->dec23;
 
        mutex_init(&pdec->lock);
 
-       if (DEVICE_USE_CODEC3(type)) {
+       if (pdec->last_cmd_valid && pdec->last_cmd == cmd[2])
+               return;
+
+       if (DEVICE_USE_CODEC3(pdev->type)) {
                flags = cmd[2] & 0x18;
                if (flags == 8)
                        pdec->nbits = 7;        /* More bits, mean more bits to encode the stream, but better quality */
@@ -356,7 +351,8 @@ int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd)
                pwc_crop_table[MAX_OUTER_CROP_VALUE+256+i] = 255;
 #endif
 
-       return 0;
+       pdec->last_cmd = cmd[2];
+       pdec->last_cmd_valid = 1;
 }
 
 /*
@@ -659,12 +655,12 @@ static void DecompressBand23(struct pwc_dec23_private *pdec,
  * src: raw data
  * dst: image output
  */
-void pwc_dec23_decompress(const struct pwc_device *pwc,
+void pwc_dec23_decompress(struct pwc_device *pdev,
                          const void *src,
                          void *dst)
 {
        int bandlines_left, bytes_per_block;
-       struct pwc_dec23_private *pdec = pwc->decompress_data;
+       struct pwc_dec23_private *pdec = &pdev->dec23;
 
        /* YUV420P image format */
        unsigned char *pout_planar_y;
@@ -674,23 +670,22 @@ void pwc_dec23_decompress(const struct pwc_device *pwc,
 
        mutex_lock(&pdec->lock);
 
-       bandlines_left = pwc->height / 4;
-       bytes_per_block = pwc->width * 4;
-       plane_size = pwc->height * pwc->width;
+       bandlines_left = pdev->height / 4;
+       bytes_per_block = pdev->width * 4;
+       plane_size = pdev->height * pdev->width;
 
        pout_planar_y = dst;
        pout_planar_u = dst + plane_size;
        pout_planar_v = dst + plane_size + plane_size / 4;
 
        while (bandlines_left--) {
-               DecompressBand23(pwc->decompress_data,
-                                src,
+               DecompressBand23(pdec, src,
                                 pout_planar_y, pout_planar_u, pout_planar_v,
-                                pwc->width, pwc->width);
-               src += pwc->vbandlength;
+                                pdev->width, pdev->width);
+               src += pdev->vbandlength;
                pout_planar_y += bytes_per_block;
-               pout_planar_u += pwc->width;
-               pout_planar_v += pwc->width;
+               pout_planar_u += pdev->width;
+               pout_planar_v += pdev->width;
        }
        mutex_unlock(&pdec->lock);
 }
index d64a3c281af6aa0d5fe03471c5cd03d16f010588..c655b1c1e6a9f7d7c7e8ff725672190bb2829e0c 100644 (file)
 #ifndef PWC_DEC23_H
 #define PWC_DEC23_H
 
-#include "pwc.h"
+struct pwc_device;
 
 struct pwc_dec23_private
 {
        struct mutex lock;
 
+       unsigned char last_cmd, last_cmd_valid;
+
   unsigned int scalebits;
   unsigned int nbitsmask, nbits; /* Number of bits of a color in the compressed stream */
 
   unsigned int reservoir;
   unsigned int nbits_in_reservoir;
+
   const unsigned char *stream;
   int temp_colors[16];
 
@@ -51,8 +54,8 @@ struct pwc_dec23_private
 
 };
 
-int pwc_dec23_init(struct pwc_device *pwc, int type, unsigned char *cmd);
-void pwc_dec23_decompress(const struct pwc_device *pwc,
+void pwc_dec23_init(struct pwc_device *pdev, const unsigned char *cmd);
+void pwc_dec23_decompress(struct pwc_device *pdev,
                          const void *src,
                          void *dst);
 #endif
index 943d37ad0d33dd3983318bbddd0f4d19f185f55b..122fbd0081eb548a3416f2848661cc144066a78b 100644 (file)
@@ -128,18 +128,11 @@ static struct usb_driver pwc_driver = {
 #define MAX_DEV_HINTS  20
 #define MAX_ISOC_ERRORS        20
 
-static int default_fps = 10;
 #ifdef CONFIG_USB_PWC_DEBUG
        int pwc_trace = PWC_DEBUG_LEVEL;
 #endif
 static int power_save = -1;
-static int led_on = 100, led_off; /* defaults to LED that is on while in use */
-static struct {
-       int type;
-       char serial_number[30];
-       int device_node;
-       struct pwc_device *pdev;
-} device_hint[MAX_DEV_HINTS];
+static int leds[2] = { 100, 0 };
 
 /***/
 
@@ -386,8 +379,8 @@ static int pwc_isoc_init(struct pwc_device *pdev)
 retry:
        /* We first try with low compression and then retry with a higher
           compression setting if there is not enough bandwidth. */
-       ret = pwc_set_video_mode(pdev, pdev->width, pdev->height,
-                                pdev->vframes, &compression);
+       ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
+                                pdev->vframes, &compression, 1);
 
        /* Get the current alternate interface, adjust packet size */
        intf = usb_ifnum_to_if(udev, 0);
@@ -597,23 +590,9 @@ leave:
 static void pwc_video_release(struct v4l2_device *v)
 {
        struct pwc_device *pdev = container_of(v, struct pwc_device, v4l2_dev);
-       int hint;
-
-       /* search device_hint[] table if we occupy a slot, by any chance */
-       for (hint = 0; hint < MAX_DEV_HINTS; hint++)
-               if (device_hint[hint].pdev == pdev)
-                       device_hint[hint].pdev = NULL;
-
-       /* Free intermediate decompression buffer & tables */
-       if (pdev->decompress_data != NULL) {
-               PWC_DEBUG_MEMORY("Freeing decompression buffer at %p.\n",
-                                pdev->decompress_data);
-               kfree(pdev->decompress_data);
-               pdev->decompress_data = NULL;
-       }
 
        v4l2_ctrl_handler_free(&pdev->ctrl_handler);
-
+       kfree(pdev->ctrl_buf);
        kfree(pdev);
 }
 
@@ -758,7 +737,7 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
 
        /* Turn on camera and set LEDS on */
        pwc_camera_power(pdev, 1);
-       pwc_set_leds(pdev, led_on, led_off);
+       pwc_set_leds(pdev, leds[0], leds[1]);
 
        r = pwc_isoc_init(pdev);
        if (r) {
@@ -813,10 +792,9 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        struct usb_device *udev = interface_to_usbdev(intf);
        struct pwc_device *pdev = NULL;
        int vendor_id, product_id, type_id;
-       int hint, rc;
+       int rc;
        int features = 0;
        int compression = 0;
-       int video_nr = -1; /* default: use next available device */
        int my_power_save = power_save;
        char serial_number[30], *name;
 
@@ -1076,7 +1054,6 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
                return -ENOMEM;
        }
        pdev->type = type_id;
-       pdev->vframes = default_fps;
        pdev->features = features;
        pwc_construct(pdev); /* set min/max sizes correct */
 
@@ -1107,24 +1084,14 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        pdev->release = le16_to_cpu(udev->descriptor.bcdDevice);
        PWC_DEBUG_PROBE("Release: %04x\n", pdev->release);
 
-       /* Now search device_hint[] table for a match, so we can hint a node number. */
-       for (hint = 0; hint < MAX_DEV_HINTS; hint++) {
-               if (((device_hint[hint].type == -1) || (device_hint[hint].type == pdev->type)) &&
-                    (device_hint[hint].pdev == NULL)) {
-                       /* so far, so good... try serial number */
-                       if ((device_hint[hint].serial_number[0] == '*') || !strcmp(device_hint[hint].serial_number, serial_number)) {
-                               /* match! */
-                               video_nr = device_hint[hint].device_node;
-                               PWC_DEBUG_PROBE("Found hint, will try to register as /dev/video%d\n", video_nr);
-                               break;
-                       }
-               }
+       /* Allocate USB command buffers */
+       pdev->ctrl_buf = kmalloc(sizeof(pdev->cmd_buf), GFP_KERNEL);
+       if (!pdev->ctrl_buf) {
+               PWC_ERROR("Oops, could not allocate memory for pwc_device.\n");
+               rc = -ENOMEM;
+               goto err_free_mem;
        }
 
-       /* occupy slot */
-       if (hint < MAX_DEV_HINTS)
-               device_hint[hint].pdev = pdev;
-
 #ifdef CONFIG_USB_PWC_DEBUG
        /* Query sensor type */
        if (pwc_get_cmos_sensor(pdev, &rc) >= 0) {
@@ -1138,8 +1105,8 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        pwc_set_leds(pdev, 0, 0);
 
        /* Setup intial videomode */
-       rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT, pdev->vframes,
-                               &compression);
+       rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT,
+                               V4L2_PIX_FMT_YUV420, 30, &compression, 1);
        if (rc)
                goto err_free_mem;
 
@@ -1164,7 +1131,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
        pdev->v4l2_dev.ctrl_handler = &pdev->ctrl_handler;
        pdev->vdev.v4l2_dev = &pdev->v4l2_dev;
 
-       rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, video_nr);
+       rc = video_register_device(&pdev->vdev, VFL_TYPE_GRABBER, -1);
        if (rc < 0) {
                PWC_ERROR("Failed to register as video device (%d).\n", rc);
                goto err_unregister_v4l2_dev;
@@ -1207,8 +1174,7 @@ err_unregister_v4l2_dev:
 err_free_controls:
        v4l2_ctrl_handler_free(&pdev->ctrl_handler);
 err_free_mem:
-       if (hint < MAX_DEV_HINTS)
-               device_hint[hint].pdev = NULL;
+       kfree(pdev->ctrl_buf);
        kfree(pdev);
        return rc;
 }
@@ -1243,27 +1209,19 @@ static void usb_pwc_disconnect(struct usb_interface *intf)
  * Initialization code & module stuff
  */
 
-static int fps;
-static int leds[2] = { -1, -1 };
 static unsigned int leds_nargs;
-static char *dev_hint[MAX_DEV_HINTS];
-static unsigned int dev_hint_nargs;
 
-module_param(fps, int, 0444);
 #ifdef CONFIG_USB_PWC_DEBUG
 module_param_named(trace, pwc_trace, int, 0644);
 #endif
 module_param(power_save, int, 0644);
 module_param_array(leds, int, &leds_nargs, 0444);
-module_param_array(dev_hint, charp, &dev_hint_nargs, 0444);
 
-MODULE_PARM_DESC(fps, "Initial frames per second. Varies with model, useful range 5-30");
 #ifdef CONFIG_USB_PWC_DEBUG
 MODULE_PARM_DESC(trace, "For debugging purposes");
 #endif
 MODULE_PARM_DESC(power_save, "Turn power saving for new cameras on or off");
 MODULE_PARM_DESC(leds, "LED on,off time in milliseconds");
-MODULE_PARM_DESC(dev_hint, "Device node hints");
 
 MODULE_DESCRIPTION("Philips & OEM USB webcam driver");
 MODULE_AUTHOR("Luc Saillard <luc@saillard.org>");
@@ -1273,114 +1231,13 @@ MODULE_VERSION( PWC_VERSION );
 
 static int __init usb_pwc_init(void)
 {
-       int i;
-
-#ifdef CONFIG_USB_PWC_DEBUG
-       PWC_INFO("Philips webcam module version " PWC_VERSION " loaded.\n");
-       PWC_INFO("Supports Philips PCA645/646, PCVC675/680/690, PCVC720[40]/730/740/750 & PCVC830/840.\n");
-       PWC_INFO("Also supports the Askey VC010, various Logitech Quickcams, Samsung MPC-C10 and MPC-C30,\n");
-       PWC_INFO("the Creative WebCam 5 & Pro Ex, SOTEC Afina Eye and Visionite VCS-UC300 and VCS-UM100.\n");
-
-       if (pwc_trace >= 0) {
-               PWC_DEBUG_MODULE("Trace options: 0x%04x\n", pwc_trace);
-       }
-#endif
-
-       if (fps) {
-               if (fps < 4 || fps > 30) {
-                       PWC_ERROR("Framerate out of bounds (4-30).\n");
-                       return -EINVAL;
-               }
-               default_fps = fps;
-               PWC_DEBUG_MODULE("Default framerate set to %d.\n", default_fps);
-       }
-
-       if (leds[0] >= 0)
-               led_on = leds[0];
-       if (leds[1] >= 0)
-               led_off = leds[1];
-
-       /* Big device node whoopla. Basically, it allows you to assign a
-          device node (/dev/videoX) to a camera, based on its type
-          & serial number. The format is [type[.serialnumber]:]node.
-
-          Any camera that isn't matched by these rules gets the next
-          available free device node.
-        */
-       for (i = 0; i < MAX_DEV_HINTS; i++) {
-               char *s, *colon, *dot;
-
-               /* This loop also initializes the array */
-               device_hint[i].pdev = NULL;
-               s = dev_hint[i];
-               if (s != NULL && *s != '\0') {
-                       device_hint[i].type = -1; /* wildcard */
-                       strcpy(device_hint[i].serial_number, "*");
-
-                       /* parse string: chop at ':' & '/' */
-                       colon = dot = s;
-                       while (*colon != '\0' && *colon != ':')
-                               colon++;
-                       while (*dot != '\0' && *dot != '.')
-                               dot++;
-                       /* Few sanity checks */
-                       if (*dot != '\0' && dot > colon) {
-                               PWC_ERROR("Malformed camera hint: the colon must be after the dot.\n");
-                               return -EINVAL;
-                       }
-
-                       if (*colon == '\0') {
-                               /* No colon */
-                               if (*dot != '\0') {
-                                       PWC_ERROR("Malformed camera hint: no colon + device node given.\n");
-                                       return -EINVAL;
-                               }
-                               else {
-                                       /* No type or serial number specified, just a number. */
-                                       device_hint[i].device_node =
-                                               simple_strtol(s, NULL, 10);
-                               }
-                       }
-                       else {
-                               /* There's a colon, so we have at least a type and a device node */
-                               device_hint[i].type =
-                                       simple_strtol(s, NULL, 10);
-                               device_hint[i].device_node =
-                                       simple_strtol(colon + 1, NULL, 10);
-                               if (*dot != '\0') {
-                                       /* There's a serial number as well */
-                                       int k;
-
-                                       dot++;
-                                       k = 0;
-                                       while (*dot != ':' && k < 29) {
-                                               device_hint[i].serial_number[k++] = *dot;
-                                               dot++;
-                                       }
-                                       device_hint[i].serial_number[k] = '\0';
-                               }
-                       }
-                       PWC_TRACE("device_hint[%d]:\n", i);
-                       PWC_TRACE("  type    : %d\n", device_hint[i].type);
-                       PWC_TRACE("  serial# : %s\n", device_hint[i].serial_number);
-                       PWC_TRACE("  node    : %d\n", device_hint[i].device_node);
-               }
-               else
-                       device_hint[i].type = 0; /* not filled */
-       } /* ..for MAX_DEV_HINTS */
-
-       PWC_DEBUG_PROBE("Registering driver at address 0x%p.\n", &pwc_driver);
        return usb_register(&pwc_driver);
 }
 
 static void __exit usb_pwc_exit(void)
 {
-       PWC_DEBUG_MODULE("Deregistering driver.\n");
        usb_deregister(&pwc_driver);
-       PWC_INFO("Philips webcam module removed.\n");
 }
 
 module_init(usb_pwc_init);
 module_exit(usb_pwc_exit);
-
-/* vim: set cino= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
index 23a55b5814fc494ea2f2c95e8fb37b4142769726..9be5adffa874683929ec57d41e17eeb36bf600aa 100644 (file)
@@ -90,5 +90,4 @@ void pwc_construct(struct pwc_device *pdev)
                pdev->frame_header_size = 0;
                pdev->frame_trailer_size = 0;
        }
-       pdev->pixfmt = V4L2_PIX_FMT_YUV420; /* default */
 }
index 80e25842e84a218dc3dada13e96ced27e68c3b39..f495eeb5403aaff31dd66ba582a07d86297752b9 100644 (file)
@@ -493,16 +493,11 @@ static int pwc_s_fmt_vid_cap(struct file *file, void *fh, struct v4l2_format *f)
                        (pixelformat>>24)&255);
 
        ret = pwc_set_video_mode(pdev, f->fmt.pix.width, f->fmt.pix.height,
-                                pdev->vframes, &compression);
+                                pixelformat, 30, &compression, 0);
 
        PWC_DEBUG_IOCTL("pwc_set_video_mode(), return=%d\n", ret);
 
-       if (ret == 0) {
-               pdev->pixfmt = pixelformat;
-               pwc_vidioc_fill_fmt(f, pdev->width, pdev->height,
-                                   pdev->pixfmt);
-       }
-
+       pwc_vidioc_fill_fmt(f, pdev->width, pdev->height, pdev->pixfmt);
 leave:
        mutex_unlock(&pdev->udevlock);
        return ret;
@@ -777,33 +772,33 @@ static int pwc_set_autogain_expo(struct pwc_device *pdev)
 static int pwc_set_motor(struct pwc_device *pdev)
 {
        int ret;
-       u8 buf[4];
 
-       buf[0] = 0;
+       pdev->ctrl_buf[0] = 0;
        if (pdev->motor_pan_reset->is_new)
-               buf[0] |= 0x01;
+               pdev->ctrl_buf[0] |= 0x01;
        if (pdev->motor_tilt_reset->is_new)
-               buf[0] |= 0x02;
+               pdev->ctrl_buf[0] |= 0x02;
        if (pdev->motor_pan_reset->is_new || pdev->motor_tilt_reset->is_new) {
                ret = send_control_msg(pdev, SET_MPT_CTL,
-                                      PT_RESET_CONTROL_FORMATTER, buf, 1);
+                                      PT_RESET_CONTROL_FORMATTER,
+                                      pdev->ctrl_buf, 1);
                if (ret < 0)
                        return ret;
        }
 
-       memset(buf, 0, sizeof(buf));
+       memset(pdev->ctrl_buf, 0, 4);
        if (pdev->motor_pan->is_new) {
-               buf[0] = pdev->motor_pan->val & 0xFF;
-               buf[1] = (pdev->motor_pan->val >> 8);
+               pdev->ctrl_buf[0] = pdev->motor_pan->val & 0xFF;
+               pdev->ctrl_buf[1] = (pdev->motor_pan->val >> 8);
        }
        if (pdev->motor_tilt->is_new) {
-               buf[2] = pdev->motor_tilt->val & 0xFF;
-               buf[3] = (pdev->motor_tilt->val >> 8);
+               pdev->ctrl_buf[2] = pdev->motor_tilt->val & 0xFF;
+               pdev->ctrl_buf[3] = (pdev->motor_tilt->val >> 8);
        }
        if (pdev->motor_pan->is_new || pdev->motor_tilt->is_new) {
                ret = send_control_msg(pdev, SET_MPT_CTL,
                                       PT_RELATIVE_CONTROL_FORMATTER,
-                                      buf, sizeof(buf));
+                                      pdev->ctrl_buf, 4);
                if (ret < 0)
                        return ret;
        }
@@ -1094,6 +1089,63 @@ static int pwc_enum_frameintervals(struct file *file, void *fh,
        return 0;
 }
 
+static int pwc_g_parm(struct file *file, void *fh,
+                     struct v4l2_streamparm *parm)
+{
+       struct pwc_device *pdev = video_drvdata(file);
+
+       if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE)
+               return -EINVAL;
+
+       memset(parm, 0, sizeof(*parm));
+
+       parm->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
+       parm->parm.capture.readbuffers = MIN_FRAMES;
+       parm->parm.capture.capability |= V4L2_CAP_TIMEPERFRAME;
+       parm->parm.capture.timeperframe.denominator = pdev->vframes;
+       parm->parm.capture.timeperframe.numerator = 1;
+
+       return 0;
+}
+
+static int pwc_s_parm(struct file *file, void *fh,
+                     struct v4l2_streamparm *parm)
+{
+       struct pwc_device *pdev = video_drvdata(file);
+       int compression = 0;
+       int ret, fps;
+
+       if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE ||
+           parm->parm.capture.timeperframe.numerator == 0)
+               return -EINVAL;
+
+       if (pwc_test_n_set_capt_file(pdev, file))
+               return -EBUSY;
+
+       fps = parm->parm.capture.timeperframe.denominator /
+             parm->parm.capture.timeperframe.numerator;
+
+       mutex_lock(&pdev->udevlock);
+       if (!pdev->udev) {
+               ret = -ENODEV;
+               goto leave;
+       }
+
+       if (pdev->iso_init) {
+               ret = -EBUSY;
+               goto leave;
+       }
+
+       ret = pwc_set_video_mode(pdev, pdev->width, pdev->height, pdev->pixfmt,
+                                fps, &compression, 0);
+
+       pwc_g_parm(file, fh, parm);
+
+leave:
+       mutex_unlock(&pdev->udevlock);
+       return ret;
+}
+
 static int pwc_log_status(struct file *file, void *priv)
 {
        struct pwc_device *pdev = video_drvdata(file);
@@ -1120,4 +1172,6 @@ const struct v4l2_ioctl_ops pwc_ioctl_ops = {
        .vidioc_log_status                  = pwc_log_status,
        .vidioc_enum_framesizes             = pwc_enum_framesizes,
        .vidioc_enum_frameintervals         = pwc_enum_frameintervals,
+       .vidioc_g_parm                      = pwc_g_parm,
+       .vidioc_s_parm                      = pwc_s_parm,
 };
index 47c518fef179949be25f50c51eac12f95ea7c819..e4d4d711dd1f4df7182d20bcc3591fda9e32f628 100644 (file)
@@ -44,6 +44,8 @@
 #ifdef CONFIG_USB_PWC_INPUT_EVDEV
 #include <linux/input.h>
 #endif
+#include "pwc-dec1.h"
+#include "pwc-dec23.h"
 
 /* Version block */
 #define PWC_VERSION    "10.0.15"
 #define DEVICE_USE_CODEC3(x) ((x)>=700)
 #define DEVICE_USE_CODEC23(x) ((x)>=675)
 
-/* from pwc-dec.h */
-#define PWCX_FLAG_PLANAR        0x0001
-
 /* Request types: video */
 #define SET_LUM_CTL                    0x01
 #define GET_LUM_CTL                    0x02
@@ -248,8 +247,8 @@ struct pwc_device
        char vmirror;           /* for ToUCaM series */
        char power_save;        /* Do powersaving for this cam */
 
-       int cmd_len;
        unsigned char cmd_buf[13];
+       unsigned char *ctrl_buf;
 
        struct urb *urbs[MAX_ISO_BUFS];
        char iso_init;
@@ -272,7 +271,10 @@ struct pwc_device
        int frame_total_size;   /* including header & trailer */
        int drop_frames;
 
-       void *decompress_data;  /* private data for decompression engine */
+       union { /* private data for decompression engine */
+               struct pwc_dec1_private dec1;
+               struct pwc_dec23_private dec23;
+       };
 
        /*
         * We have an 'image' and a 'view', where 'image' is the fixed-size img
@@ -364,7 +366,7 @@ void pwc_construct(struct pwc_device *pdev);
 /** Functions in pwc-ctrl.c */
 /* Request a certain video mode. Returns < 0 if not possible */
 extern int pwc_set_video_mode(struct pwc_device *pdev, int width, int height,
-                             int frames, int *compression);
+       int pixfmt, int frames, int *compression, int send_to_cam);
 extern unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size);
 extern int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value);
 extern int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor);
index 510cfab477fff48c57ddd2d9151b53ec144f91ea..a9e9653beeb4581a90ea34eed1b3d96eab2aae5d 100644 (file)
@@ -693,7 +693,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
                        mf->code = 0;
                        continue;
                }
-               if (mf->width != tfmt->width || mf->width != tfmt->width) {
+               if (mf->width != tfmt->width || mf->height != tfmt->height) {
                        u32 fcc = ffmt->fourcc;
                        tfmt->width  = mf->width;
                        tfmt->height = mf->height;
@@ -702,7 +702,8 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
                                               NULL, &fcc, FIMC_SD_PAD_SOURCE);
                        if (ffmt && ffmt->mbus_code)
                                mf->code = ffmt->mbus_code;
-                       if (mf->width != tfmt->width || mf->width != tfmt->width)
+                       if (mf->width != tfmt->width ||
+                           mf->height != tfmt->height)
                                continue;
                        tfmt->code = mf->code;
                }
@@ -710,7 +711,7 @@ static int fimc_pipeline_try_format(struct fimc_ctx *ctx,
                        ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt);
 
                if (mf->code == tfmt->code &&
-                   mf->width == tfmt->width && mf->width == tfmt->width)
+                   mf->width == tfmt->width && mf->height == tfmt->height)
                        break;
        }
 
index f5cbb8a4c540fc7d6beac2891c36aa812f05f281..81bcbb9492ea0a6d425375a57f3600837082d773 100644 (file)
@@ -848,11 +848,11 @@ int fimc_ctrls_create(struct fimc_ctx *ctx)
        v4l2_ctrl_handler_init(&ctx->ctrl_handler, 4);
 
        ctx->ctrl_rotate = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
-                                    V4L2_CID_HFLIP, 0, 1, 1, 0);
+                                       V4L2_CID_ROTATE, 0, 270, 90, 0);
        ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
-                                   V4L2_CID_VFLIP, 0, 1, 1, 0);
+                                       V4L2_CID_HFLIP, 0, 1, 1, 0);
        ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &fimc_ctrl_ops,
-                                   V4L2_CID_ROTATE, 0, 270, 90, 0);
+                                       V4L2_CID_VFLIP, 0, 1, 1, 0);
        if (variant->has_alpha)
                ctx->ctrl_alpha = v4l2_ctrl_new_std(&ctx->ctrl_handler,
                                    &fimc_ctrl_ops, V4L2_CID_ALPHA_COMPONENT,
index 615c862f0360ef20061e28701d1a99f460d0a7e1..8ea4ee116e46900224a528fa415d300d2c43196b 100644 (file)
@@ -21,7 +21,6 @@
 #include <linux/pm_runtime.h>
 #include <linux/types.h>
 #include <linux/slab.h>
-#include <linux/version.h>
 #include <media/v4l2-ctrls.h>
 #include <media/media-device.h>
 
index c40b0dde188353eb9dba6b9f88e894c7f586d4c3..febaa673d3635c019011ff174c70eee17818de17 100644 (file)
@@ -184,6 +184,7 @@ static int g2d_s_ctrl(struct v4l2_ctrl *ctrl)
                        ctx->rop = ROP4_INVERT;
                else
                        ctx->rop = ROP4_COPY;
+               break;
        default:
                v4l2_err(&ctx->dev->v4l2_dev, "unknown control\n");
                return -EINVAL;
index f841a3e9845c6f6a00028884beedd27595a0da1e..1105a8749c8b23922c979adf2abe7dee222fe14c 100644 (file)
@@ -989,9 +989,10 @@ static struct v4l2_m2m_ops s5p_jpeg_m2m_ops = {
  * ============================================================================
  */
 
-static int s5p_jpeg_queue_setup(struct vb2_queue *vq, unsigned int *nbuffers,
-                               unsigned int *nplanes, unsigned int sizes[],
-                               void *alloc_ctxs[])
+static int s5p_jpeg_queue_setup(struct vb2_queue *vq,
+                          const struct v4l2_format *fmt,
+                          unsigned int *nbuffers, unsigned int *nplanes,
+                          unsigned int sizes[], void *alloc_ctxs[])
 {
        struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq);
        struct s5p_jpeg_q_data *q_data = NULL;
index e43e128baf5f7c3ec7051337a1574821e9e863a2..83fe461af263529df7502f020886f58d63b68c1c 100644 (file)
@@ -18,7 +18,6 @@
 #include <linux/platform_device.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
-#include <linux/version.h>
 #include <linux/videodev2.h>
 #include <linux/workqueue.h>
 #include <media/videobuf2-core.h>
@@ -475,7 +474,7 @@ static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx,
                        ctx->mv_size = 0;
                }
                ctx->dpb_count = s5p_mfc_get_dpb_count();
-               if (ctx->img_width == 0 || ctx->img_width == 0)
+               if (ctx->img_width == 0 || ctx->img_height == 0)
                        ctx->state = MFCINST_ERROR;
                else
                        ctx->state = MFCINST_HEAD_PARSED;
index 844a4d7797bc0be85ff01eae08837d5d49dbee69..c25ec022d2678f734e46b1f1f3b5b0d496150c8d 100644 (file)
@@ -165,7 +165,7 @@ static struct mfc_control controls[] = {
                .maximum = 32,
                .step = 1,
                .default_value = 1,
-               .flags = V4L2_CTRL_FLAG_VOLATILE,
+               .is_volatile = 1,
        },
 };
 
index 971591d6450fccc5a7164adb94cb40e3404fb635..5b72da5ce4183c15bf98dd570762f2f8f2ffb9de 100644 (file)
@@ -269,8 +269,6 @@ struct saa7164_board saa7164_boards[] = {
                .portb          = SAA7164_MPEG_DVB,
                .portc          = SAA7164_MPEG_ENCODER,
                .portd          = SAA7164_MPEG_ENCODER,
-               .portc          = SAA7164_MPEG_ENCODER,
-               .portd          = SAA7164_MPEG_ENCODER,
                .porte          = SAA7164_MPEG_VBI,
                .portf          = SAA7164_MPEG_VBI,
                .chiprev        = SAA7164_CHIP_REV3,
@@ -333,8 +331,6 @@ struct saa7164_board saa7164_boards[] = {
                .portd          = SAA7164_MPEG_ENCODER,
                .porte          = SAA7164_MPEG_VBI,
                .portf          = SAA7164_MPEG_VBI,
-               .porte          = SAA7164_MPEG_VBI,
-               .portf          = SAA7164_MPEG_VBI,
                .chiprev        = SAA7164_CHIP_REV3,
                .unit           = {{
                        .id             = 0x28,
index 0a2d75f0406631f582b17b50aaf6c4cd11e074df..4ed1c7c28ae704b4f1e843cb8db0ae7b74981c02 100644 (file)
@@ -565,7 +565,7 @@ static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
        spin_unlock_irq(&fh->queue_lock);
 
        desc = fh->chan->device->device_prep_slave_sg(fh->chan,
-               buf->sg, sg_elems, DMA_FROM_DEVICE,
+               buf->sg, sg_elems, DMA_DEV_TO_MEM,
                DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
        if (!desc) {
                spin_lock_irq(&fh->queue_lock);
index 129f135d5a5fe1deffaa12fdc81611bd2ecd12d4..c096b3f742003bcbcfc40df79b49a2728f8b9539 100644 (file)
@@ -374,7 +374,7 @@ static inline void set_map_flags(struct poseidon *pd, struct usb_device *udev)
 }
 #endif
 
-static bool check_firmware(struct usb_device *udev, int *down_firmware)
+static int check_firmware(struct usb_device *udev, int *down_firmware)
 {
        void *buf;
        int ret;
@@ -398,7 +398,7 @@ static bool check_firmware(struct usb_device *udev, int *down_firmware)
                *down_firmware = 1;
                return firmware_download(udev);
        }
-       return ret;
+       return 0;
 }
 
 static int poseidon_probe(struct usb_interface *interface,
index da1f4c2d2d4b8d217cd2e3dafc716b88a25ef762..cccd42be718ae7cc25d2b21189f9880528f38225 100644 (file)
@@ -465,8 +465,8 @@ const char *v4l2_ctrl_get_name(u32 id)
        case V4L2_CID_CHROMA_GAIN:              return "Chroma Gain";
        case V4L2_CID_ILLUMINATORS_1:           return "Illuminator 1";
        case V4L2_CID_ILLUMINATORS_2:           return "Illuminator 2";
-       case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:  return "Minimum Number of Capture Buffers";
-       case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:   return "Minimum Number of Output Buffers";
+       case V4L2_CID_MIN_BUFFERS_FOR_CAPTURE:  return "Min Number of Capture Buffers";
+       case V4L2_CID_MIN_BUFFERS_FOR_OUTPUT:   return "Min Number of Output Buffers";
        case V4L2_CID_ALPHA_COMPONENT:          return "Alpha Component";
 
        /* MPEG controls */
@@ -506,25 +506,25 @@ const char *v4l2_ctrl_get_name(u32 id)
        case V4L2_CID_MPEG_VIDEO_MUTE_YUV:      return "Video Mute YUV";
        case V4L2_CID_MPEG_VIDEO_DECODER_SLICE_INTERFACE:       return "Decoder Slice Interface";
        case V4L2_CID_MPEG_VIDEO_DECODER_MPEG4_DEBLOCK_FILTER:  return "MPEG4 Loop Filter Enable";
-       case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:       return "The Number of Intra Refresh MBs";
+       case V4L2_CID_MPEG_VIDEO_CYCLIC_INTRA_REFRESH_MB:       return "Number of Intra Refresh MBs";
        case V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE:               return "Frame Level Rate Control Enable";
        case V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE:                  return "H264 MB Level Rate Control";
        case V4L2_CID_MPEG_VIDEO_HEADER_MODE:                   return "Sequence Header Mode";
-       case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC:                   return "The Max Number of Reference Picture";
+       case V4L2_CID_MPEG_VIDEO_MAX_REF_PIC:                   return "Max Number of Reference Pics";
        case V4L2_CID_MPEG_VIDEO_H263_I_FRAME_QP:               return "H263 I-Frame QP Value";
-       case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP:               return "H263 P frame QP Value";
-       case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP:               return "H263 B frame QP Value";
+       case V4L2_CID_MPEG_VIDEO_H263_P_FRAME_QP:               return "H263 P-Frame QP Value";
+       case V4L2_CID_MPEG_VIDEO_H263_B_FRAME_QP:               return "H263 B-Frame QP Value";
        case V4L2_CID_MPEG_VIDEO_H263_MIN_QP:                   return "H263 Minimum QP Value";
        case V4L2_CID_MPEG_VIDEO_H263_MAX_QP:                   return "H263 Maximum QP Value";
        case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP:               return "H264 I-Frame QP Value";
-       case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:               return "H264 P frame QP Value";
-       case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:               return "H264 B frame QP Value";
+       case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP:               return "H264 P-Frame QP Value";
+       case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP:               return "H264 B-Frame QP Value";
        case V4L2_CID_MPEG_VIDEO_H264_MAX_QP:                   return "H264 Maximum QP Value";
        case V4L2_CID_MPEG_VIDEO_H264_MIN_QP:                   return "H264 Minimum QP Value";
        case V4L2_CID_MPEG_VIDEO_H264_8X8_TRANSFORM:            return "H264 8x8 Transform Enable";
        case V4L2_CID_MPEG_VIDEO_H264_CPB_SIZE:                 return "H264 CPB Buffer Size";
-       case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:             return "H264 Entorpy Mode";
-       case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:                 return "H264 I Period";
+       case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE:             return "H264 Entropy Mode";
+       case V4L2_CID_MPEG_VIDEO_H264_I_PERIOD:                 return "H264 I-Frame Period";
        case V4L2_CID_MPEG_VIDEO_H264_LEVEL:                    return "H264 Level";
        case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA:        return "H264 Loop Filter Alpha Offset";
        case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA:         return "H264 Loop Filter Beta Offset";
@@ -535,16 +535,16 @@ const char *v4l2_ctrl_get_name(u32 id)
        case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_ENABLE:           return "Aspect Ratio VUI Enable";
        case V4L2_CID_MPEG_VIDEO_H264_VUI_SAR_IDC:              return "VUI Aspect Ratio IDC";
        case V4L2_CID_MPEG_VIDEO_MPEG4_I_FRAME_QP:              return "MPEG4 I-Frame QP Value";
-       case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:              return "MPEG4 P frame QP Value";
-       case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP:              return "MPEG4 B frame QP Value";
+       case V4L2_CID_MPEG_VIDEO_MPEG4_P_FRAME_QP:              return "MPEG4 P-Frame QP Value";
+       case V4L2_CID_MPEG_VIDEO_MPEG4_B_FRAME_QP:              return "MPEG4 B-Frame QP Value";
        case V4L2_CID_MPEG_VIDEO_MPEG4_MIN_QP:                  return "MPEG4 Minimum QP Value";
        case V4L2_CID_MPEG_VIDEO_MPEG4_MAX_QP:                  return "MPEG4 Maximum QP Value";
        case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL:                   return "MPEG4 Level";
        case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE:                 return "MPEG4 Profile";
        case V4L2_CID_MPEG_VIDEO_MPEG4_QPEL:                    return "Quarter Pixel Search Enable";
-       case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:         return "The Maximum Bytes Per Slice";
-       case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:            return "The Number of MB in a Slice";
-       case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:              return "The Slice Partitioning Method";
+       case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES:         return "Maximum Bytes in a Slice";
+       case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB:            return "Number of MBs in a Slice";
+       case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE:              return "Slice Partitioning Method";
        case V4L2_CID_MPEG_VIDEO_VBV_SIZE:                      return "VBV Buffer Size";
 
        /* CAMERA controls */
@@ -580,7 +580,7 @@ const char *v4l2_ctrl_get_name(u32 id)
        case V4L2_CID_AUDIO_LIMITER_ENABLED:    return "Audio Limiter Feature Enabled";
        case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME: return "Audio Limiter Release Time";
        case V4L2_CID_AUDIO_LIMITER_DEVIATION:  return "Audio Limiter Deviation";
-       case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Feature Enabled";
+       case V4L2_CID_AUDIO_COMPRESSION_ENABLED: return "Audio Compression Enabled";
        case V4L2_CID_AUDIO_COMPRESSION_GAIN:   return "Audio Compression Gain";
        case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD: return "Audio Compression Threshold";
        case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME: return "Audio Compression Attack Time";
@@ -588,24 +588,24 @@ const char *v4l2_ctrl_get_name(u32 id)
        case V4L2_CID_PILOT_TONE_ENABLED:       return "Pilot Tone Feature Enabled";
        case V4L2_CID_PILOT_TONE_DEVIATION:     return "Pilot Tone Deviation";
        case V4L2_CID_PILOT_TONE_FREQUENCY:     return "Pilot Tone Frequency";
-       case V4L2_CID_TUNE_PREEMPHASIS:         return "Pre-emphasis settings";
+       case V4L2_CID_TUNE_PREEMPHASIS:         return "Pre-Emphasis";
        case V4L2_CID_TUNE_POWER_LEVEL:         return "Tune Power Level";
        case V4L2_CID_TUNE_ANTENNA_CAPACITOR:   return "Tune Antenna Capacitor";
 
        /* Flash controls */
-       case V4L2_CID_FLASH_CLASS:              return "Flash controls";
-       case V4L2_CID_FLASH_LED_MODE:           return "LED mode";
-       case V4L2_CID_FLASH_STROBE_SOURCE:      return "Strobe source";
+       case V4L2_CID_FLASH_CLASS:              return "Flash Controls";
+       case V4L2_CID_FLASH_LED_MODE:           return "LED Mode";
+       case V4L2_CID_FLASH_STROBE_SOURCE:      return "Strobe Source";
        case V4L2_CID_FLASH_STROBE:             return "Strobe";
-       case V4L2_CID_FLASH_STROBE_STOP:        return "Stop strobe";
-       case V4L2_CID_FLASH_STROBE_STATUS:      return "Strobe status";
-       case V4L2_CID_FLASH_TIMEOUT:            return "Strobe timeout";
-       case V4L2_CID_FLASH_INTENSITY:          return "Intensity, flash mode";
-       case V4L2_CID_FLASH_TORCH_INTENSITY:    return "Intensity, torch mode";
-       case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, indicator";
+       case V4L2_CID_FLASH_STROBE_STOP:        return "Stop Strobe";
+       case V4L2_CID_FLASH_STROBE_STATUS:      return "Strobe Status";
+       case V4L2_CID_FLASH_TIMEOUT:            return "Strobe Timeout";
+       case V4L2_CID_FLASH_INTENSITY:          return "Intensity, Flash Mode";
+       case V4L2_CID_FLASH_TORCH_INTENSITY:    return "Intensity, Torch Mode";
+       case V4L2_CID_FLASH_INDICATOR_INTENSITY: return "Intensity, Indicator";
        case V4L2_CID_FLASH_FAULT:              return "Faults";
        case V4L2_CID_FLASH_CHARGE:             return "Charge";
-       case V4L2_CID_FLASH_READY:              return "Ready to strobe";
+       case V4L2_CID_FLASH_READY:              return "Ready to Strobe";
 
        default:
                return NULL;
index 77feeb67e2db389334e186299c882ce4fd87e847..3f623859a337126aa035df7c00817a6ad5da6fee 100644 (file)
@@ -1871,6 +1871,7 @@ static long __video_do_ioctl(struct file *file,
        case VIDIOC_S_FREQUENCY:
        {
                struct v4l2_frequency *p = arg;
+               enum v4l2_tuner_type type;
 
                if (!ops->vidioc_s_frequency)
                        break;
@@ -1878,9 +1879,14 @@ static long __video_do_ioctl(struct file *file,
                        ret = ret_prio;
                        break;
                }
+               type = (vfd->vfl_type == VFL_TYPE_RADIO) ?
+                       V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
                dbgarg(cmd, "tuner=%d, type=%d, frequency=%d\n",
                                p->tuner, p->type, p->frequency);
-               ret = ops->vidioc_s_frequency(file, fh, p);
+               if (p->type != type)
+                       ret = -EINVAL;
+               else
+                       ret = ops->vidioc_s_frequency(file, fh, p);
                break;
        }
        case VIDIOC_G_SLICED_VBI_CAP:
index f6d26419445ee8728367fe925ed3f80d2d484092..4c09ab781ec3a3cbf503f2626c64b449e94dabf3 100644 (file)
@@ -1958,7 +1958,6 @@ static int zoran_g_fbuf(struct file *file, void *__fh,
        mutex_unlock(&zr->resource_lock);
        fb->fmt.colorspace = V4L2_COLORSPACE_SRGB;
        fb->fmt.field = V4L2_FIELD_INTERLACED;
-       fb->flags = V4L2_FBUF_FLAG_OVERLAY;
        fb->capability = V4L2_FBUF_CAP_LIST_CLIPPING;
 
        return 0;
index 63be60bc3455396e764fa57661f497d5670b04a8..86cc3f7841cdfdb2714368b1459a5fd54ac6491a 100644 (file)
 #define to_mcp(d)              container_of(d, struct mcp, attached_device)
 #define to_mcp_driver(d)       container_of(d, struct mcp_driver, drv)
 
-static const struct mcp_device_id *mcp_match_id(const struct mcp_device_id *id,
-                                               const char *codec)
-{
-       while (id->name[0]) {
-               if (strcmp(codec, id->name) == 0)
-                       return id;
-               id++;
-       }
-       return NULL;
-}
-
-const struct mcp_device_id *mcp_get_device_id(const struct mcp *mcp)
-{
-       const struct mcp_driver *driver =
-               to_mcp_driver(mcp->attached_device.driver);
-
-       return mcp_match_id(driver->id_table, mcp->codec);
-}
-EXPORT_SYMBOL(mcp_get_device_id);
-
 static int mcp_bus_match(struct device *dev, struct device_driver *drv)
 {
-       const struct mcp *mcp = to_mcp(dev);
-       const struct mcp_driver *driver = to_mcp_driver(drv);
-
-       if (driver->id_table)
-               return !!mcp_match_id(driver->id_table, mcp->codec);
-
-       return 0;
+       return 1;
 }
 
 static int mcp_bus_probe(struct device *dev)
@@ -100,18 +74,9 @@ static int mcp_bus_resume(struct device *dev)
        return ret;
 }
 
-static int mcp_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
-{
-       struct mcp *mcp = to_mcp(dev);
-
-       add_uevent_var(env, "MODALIAS=%s%s", MCP_MODULE_PREFIX, mcp->codec);
-       return 0;
-}
-
 static struct bus_type mcp_bus_type = {
        .name           = "mcp",
        .match          = mcp_bus_match,
-       .uevent         = mcp_bus_uevent,
        .probe          = mcp_bus_probe,
        .remove         = mcp_bus_remove,
        .suspend        = mcp_bus_suspend,
@@ -128,9 +93,11 @@ static struct bus_type mcp_bus_type = {
  */
 void mcp_set_telecom_divisor(struct mcp *mcp, unsigned int div)
 {
-       spin_lock_irq(&mcp->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&mcp->lock, flags);
        mcp->ops->set_telecom_divisor(mcp, div);
-       spin_unlock_irq(&mcp->lock);
+       spin_unlock_irqrestore(&mcp->lock, flags);
 }
 EXPORT_SYMBOL(mcp_set_telecom_divisor);
 
@@ -143,9 +110,11 @@ EXPORT_SYMBOL(mcp_set_telecom_divisor);
  */
 void mcp_set_audio_divisor(struct mcp *mcp, unsigned int div)
 {
-       spin_lock_irq(&mcp->lock);
+       unsigned long flags;
+
+       spin_lock_irqsave(&mcp->lock, flags);
        mcp->ops->set_audio_divisor(mcp, div);
-       spin_unlock_irq(&mcp->lock);
+       spin_unlock_irqrestore(&mcp->lock, flags);
 }
 EXPORT_SYMBOL(mcp_set_audio_divisor);
 
@@ -198,10 +167,11 @@ EXPORT_SYMBOL(mcp_reg_read);
  */
 void mcp_enable(struct mcp *mcp)
 {
-       spin_lock_irq(&mcp->lock);
+       unsigned long flags;
+       spin_lock_irqsave(&mcp->lock, flags);
        if (mcp->use_count++ == 0)
                mcp->ops->enable(mcp);
-       spin_unlock_irq(&mcp->lock);
+       spin_unlock_irqrestore(&mcp->lock, flags);
 }
 EXPORT_SYMBOL(mcp_enable);
 
@@ -247,14 +217,9 @@ struct mcp *mcp_host_alloc(struct device *parent, size_t size)
 }
 EXPORT_SYMBOL(mcp_host_alloc);
 
-int mcp_host_register(struct mcp *mcp, void *pdata)
+int mcp_host_register(struct mcp *mcp)
 {
-       if (!mcp->codec)
-               return -EINVAL;
-
-       mcp->attached_device.platform_data = pdata;
        dev_set_name(&mcp->attached_device, "mcp0");
-       request_module("%s%s", MCP_MODULE_PREFIX, mcp->codec);
        return device_register(&mcp->attached_device);
 }
 EXPORT_SYMBOL(mcp_host_register);
index 9adc2eb6949252031b3964320d3e6e1dfa4d4ed0..02c53a0766c4275d7496ece5b96cb20b0e1ddc49 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/spinlock.h>
 #include <linux/platform_device.h>
 #include <linux/mfd/mcp.h>
-#include <linux/io.h>
 
 #include <mach/dma.h>
 #include <mach/hardware.h>
 #include <asm/system.h>
 #include <mach/mcp.h>
 
-/* Register offsets */
-#define MCCR0  0x00
-#define MCDR0  0x08
-#define MCDR1  0x0C
-#define MCDR2  0x10
-#define MCSR   0x18
-#define MCCR1  0x00
+#include <mach/assabet.h>
+
 
 struct mcp_sa11x0 {
-       u32             mccr0;
-       u32             mccr1;
-       unsigned char   *mccr0_base;
-       unsigned char   *mccr1_base;
+       u32     mccr0;
+       u32     mccr1;
 };
 
 #define priv(mcp)      ((struct mcp_sa11x0 *)mcp_priv(mcp))
@@ -47,25 +39,25 @@ struct mcp_sa11x0 {
 static void
 mcp_sa11x0_set_telecom_divisor(struct mcp *mcp, unsigned int divisor)
 {
-       struct mcp_sa11x0 *priv = priv(mcp);
+       unsigned int mccr0;
 
        divisor /= 32;
 
-       priv->mccr0 &= ~0x00007f00;
-       priv->mccr0 |= divisor << 8;
-       __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0);
+       mccr0 = Ser4MCCR0 & ~0x00007f00;
+       mccr0 |= divisor << 8;
+       Ser4MCCR0 = mccr0;
 }
 
 static void
 mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor)
 {
-       struct mcp_sa11x0 *priv = priv(mcp);
+       unsigned int mccr0;
 
        divisor /= 32;
 
-       priv->mccr0 &= ~0x0000007f;
-       priv->mccr0 |= divisor;
-       __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0);
+       mccr0 = Ser4MCCR0 & ~0x0000007f;
+       mccr0 |= divisor;
+       Ser4MCCR0 = mccr0;
 }
 
 /*
@@ -79,16 +71,12 @@ mcp_sa11x0_write(struct mcp *mcp, unsigned int reg, unsigned int val)
 {
        int ret = -ETIME;
        int i;
-       u32 mcpreg;
-       struct mcp_sa11x0 *priv = priv(mcp);
 
-       mcpreg = reg << 17 | MCDR2_Wr | (val & 0xffff);
-       __raw_writel(mcpreg, priv->mccr0_base + MCDR2);
+       Ser4MCDR2 = reg << 17 | MCDR2_Wr | (val & 0xffff);
 
        for (i = 0; i < 2; i++) {
                udelay(mcp->rw_timeout);
-               mcpreg = __raw_readl(priv->mccr0_base + MCSR);
-               if (mcpreg & MCSR_CWC) {
+               if (Ser4MCSR & MCSR_CWC) {
                        ret = 0;
                        break;
                }
@@ -109,18 +97,13 @@ mcp_sa11x0_read(struct mcp *mcp, unsigned int reg)
 {
        int ret = -ETIME;
        int i;
-       u32 mcpreg;
-       struct mcp_sa11x0 *priv = priv(mcp);
 
-       mcpreg = reg << 17 | MCDR2_Rd;
-       __raw_writel(mcpreg, priv->mccr0_base + MCDR2);
+       Ser4MCDR2 = reg << 17 | MCDR2_Rd;
 
        for (i = 0; i < 2; i++) {
                udelay(mcp->rw_timeout);
-               mcpreg = __raw_readl(priv->mccr0_base + MCSR);
-               if (mcpreg & MCSR_CRC) {
-                       ret = __raw_readl(priv->mccr0_base + MCDR2)
-                               & 0xffff;
+               if (Ser4MCSR & MCSR_CRC) {
+                       ret = Ser4MCDR2 & 0xffff;
                        break;
                }
        }
@@ -133,19 +116,13 @@ mcp_sa11x0_read(struct mcp *mcp, unsigned int reg)
 
 static void mcp_sa11x0_enable(struct mcp *mcp)
 {
-       struct mcp_sa11x0 *priv = priv(mcp);
-
-       __raw_writel(-1, priv->mccr0_base + MCSR);
-       priv->mccr0 |= MCCR0_MCE;
-       __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0);
+       Ser4MCSR = -1;
+       Ser4MCCR0 |= MCCR0_MCE;
 }
 
 static void mcp_sa11x0_disable(struct mcp *mcp)
 {
-       struct mcp_sa11x0 *priv = priv(mcp);
-
-       priv->mccr0 &= ~MCCR0_MCE;
-       __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0);
+       Ser4MCCR0 &= ~MCCR0_MCE;
 }
 
 /*
@@ -165,69 +142,50 @@ static int mcp_sa11x0_probe(struct platform_device *pdev)
        struct mcp_plat_data *data = pdev->dev.platform_data;
        struct mcp *mcp;
        int ret;
-       struct mcp_sa11x0 *priv;
-       struct resource *res_mem0, *res_mem1;
-       u32 size0, size1;
 
        if (!data)
                return -ENODEV;
 
-       if (!data->codec)
-               return -ENODEV;
-
-       res_mem0 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res_mem0)
-               return -ENODEV;
-       size0 = res_mem0->end - res_mem0->start + 1;
-
-       res_mem1 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (!res_mem1)
-               return -ENODEV;
-       size1 = res_mem1->end - res_mem1->start + 1;
-
-       if (!request_mem_region(res_mem0->start, size0, "sa11x0-mcp"))
+       if (!request_mem_region(0x80060000, 0x60, "sa11x0-mcp"))
                return -EBUSY;
 
-       if (!request_mem_region(res_mem1->start, size1, "sa11x0-mcp")) {
-               ret = -EBUSY;
-               goto release;
-       }
-
        mcp = mcp_host_alloc(&pdev->dev, sizeof(struct mcp_sa11x0));
        if (!mcp) {
                ret = -ENOMEM;
-               goto release2;
+               goto release;
        }
 
-       priv = priv(mcp);
-
        mcp->owner              = THIS_MODULE;
        mcp->ops                = &mcp_sa11x0;
        mcp->sclk_rate          = data->sclk_rate;
-       mcp->dma_audio_rd       = DDAR_DevAdd(res_mem0->start + MCDR0)
-                               + DDAR_DevRd + DDAR_Brst4 + DDAR_8BitDev;
-       mcp->dma_audio_wr       = DDAR_DevAdd(res_mem0->start + MCDR0)
-                               + DDAR_DevWr + DDAR_Brst4 + DDAR_8BitDev;
-       mcp->dma_telco_rd       = DDAR_DevAdd(res_mem0->start + MCDR1)
-                               + DDAR_DevRd + DDAR_Brst4 + DDAR_8BitDev;
-       mcp->dma_telco_wr       = DDAR_DevAdd(res_mem0->start + MCDR1)
-                               + DDAR_DevWr + DDAR_Brst4 + DDAR_8BitDev;
-       mcp->codec              = data->codec;
+       mcp->dma_audio_rd       = DMA_Ser4MCP0Rd;
+       mcp->dma_audio_wr       = DMA_Ser4MCP0Wr;
+       mcp->dma_telco_rd       = DMA_Ser4MCP1Rd;
+       mcp->dma_telco_wr       = DMA_Ser4MCP1Wr;
+       mcp->gpio_base          = data->gpio_base;
 
        platform_set_drvdata(pdev, mcp);
 
+       if (machine_is_assabet()) {
+               ASSABET_BCR_set(ASSABET_BCR_CODEC_RST);
+       }
+
+       /*
+        * Setup the PPC unit correctly.
+        */
+       PPDR &= ~PPC_RXD4;
+       PPDR |= PPC_TXD4 | PPC_SCLK | PPC_SFRM;
+       PSDR |= PPC_RXD4;
+       PSDR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
+       PPSR &= ~(PPC_TXD4 | PPC_SCLK | PPC_SFRM);
+
        /*
         * Initialise device.  Note that we initially
         * set the sampling rate to minimum.
         */
-       priv->mccr0_base = ioremap(res_mem0->start, size0);
-       priv->mccr1_base = ioremap(res_mem1->start, size1);
-
-       __raw_writel(-1, priv->mccr0_base + MCSR);
-       priv->mccr1 = data->mccr1;
-       priv->mccr0 = data->mccr0 | 0x7f7f;
-       __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0);
-       __raw_writel(priv->mccr1, priv->mccr1_base + MCCR1);
+       Ser4MCSR = -1;
+       Ser4MCCR1 = data->mccr1;
+       Ser4MCCR0 = data->mccr0 | 0x7f7f;
 
        /*
         * Calculate the read/write timeout (us) from the bit clock
@@ -237,53 +195,36 @@ static int mcp_sa11x0_probe(struct platform_device *pdev)
        mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) /
                          mcp->sclk_rate;
 
-       ret = mcp_host_register(mcp, data->codec_pdata);
+       ret = mcp_host_register(mcp);
        if (ret == 0)
                goto out;
 
- release2:
-       release_mem_region(res_mem1->start, size1);
  release:
-       release_mem_region(res_mem0->start, size0);
+       release_mem_region(0x80060000, 0x60);
        platform_set_drvdata(pdev, NULL);
 
  out:
        return ret;
 }
 
-static int mcp_sa11x0_remove(struct platform_device *pdev)
+static int mcp_sa11x0_remove(struct platform_device *dev)
 {
-       struct mcp *mcp = platform_get_drvdata(pdev);
-       struct mcp_sa11x0 *priv = priv(mcp);
-       struct resource *res_mem;
-       u32 size;
+       struct mcp *mcp = platform_get_drvdata(dev);
 
-       platform_set_drvdata(pdev, NULL);
+       platform_set_drvdata(dev, NULL);
        mcp_host_unregister(mcp);
+       release_mem_region(0x80060000, 0x60);
 
-       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (res_mem) {
-               size = res_mem->end - res_mem->start + 1;
-               release_mem_region(res_mem->start, size);
-       }
-       res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-       if (res_mem) {
-               size = res_mem->end - res_mem->start + 1;
-               release_mem_region(res_mem->start, size);
-       }
-       iounmap(priv->mccr0_base);
-       iounmap(priv->mccr1_base);
        return 0;
 }
 
 static int mcp_sa11x0_suspend(struct platform_device *dev, pm_message_t state)
 {
        struct mcp *mcp = platform_get_drvdata(dev);
-       struct mcp_sa11x0 *priv = priv(mcp);
-       u32 mccr0;
 
-       mccr0 = priv->mccr0 & ~MCCR0_MCE;
-       __raw_writel(mccr0, priv->mccr0_base + MCCR0);
+       priv(mcp)->mccr0 = Ser4MCCR0;
+       priv(mcp)->mccr1 = Ser4MCCR1;
+       Ser4MCCR0 &= ~MCCR0_MCE;
 
        return 0;
 }
@@ -291,10 +232,9 @@ static int mcp_sa11x0_suspend(struct platform_device *dev, pm_message_t state)
 static int mcp_sa11x0_resume(struct platform_device *dev)
 {
        struct mcp *mcp = platform_get_drvdata(dev);
-       struct mcp_sa11x0 *priv = priv(mcp);
 
-       __raw_writel(priv->mccr0, priv->mccr0_base + MCCR0);
-       __raw_writel(priv->mccr1, priv->mccr1_base + MCCR1);
+       Ser4MCCR1 = priv(mcp)->mccr1;
+       Ser4MCCR0 = priv(mcp)->mccr0;
 
        return 0;
 }
@@ -311,7 +251,6 @@ static struct platform_driver mcp_sa11x0_driver = {
        .resume         = mcp_sa11x0_resume,
        .driver         = {
                .name   = "sa11x0-mcp",
-               .owner  = THIS_MODULE,
        },
 };
 
index dda86293dc9fc0a3f8baaf678591d764824cfe14..b2d8e512d3cb002b6f3e809e47a12f01f96de588 100644 (file)
@@ -282,6 +282,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
                /* Default PLL configuration after power up */
                twl6040->pll = TWL6040_SYSCLK_SEL_LPPLL;
                twl6040->sysclk = 19200000;
+               twl6040->mclk = 32768;
        } else {
                /* already powered-down */
                if (!twl6040->power_count) {
@@ -305,6 +306,7 @@ int twl6040_power(struct twl6040 *twl6040, int on)
                        twl6040_power_down(twl6040);
                }
                twl6040->sysclk = 0;
+               twl6040->mclk = 0;
        }
 
 out:
@@ -324,23 +326,38 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
        hppllctl = twl6040_reg_read(twl6040, TWL6040_REG_HPPLLCTL);
        lppllctl = twl6040_reg_read(twl6040, TWL6040_REG_LPPLLCTL);
 
+       /* Force full reconfiguration when switching between PLL */
+       if (pll_id != twl6040->pll) {
+               twl6040->sysclk = 0;
+               twl6040->mclk = 0;
+       }
+
        switch (pll_id) {
        case TWL6040_SYSCLK_SEL_LPPLL:
                /* low-power PLL divider */
-               switch (freq_out) {
-               case 17640000:
-                       lppllctl |= TWL6040_LPLLFIN;
-                       break;
-               case 19200000:
-                       lppllctl &= ~TWL6040_LPLLFIN;
-                       break;
-               default:
-                       dev_err(twl6040->dev,
-                               "freq_out %d not supported\n", freq_out);
-                       ret = -EINVAL;
-                       goto pll_out;
+               /* Change the sysclk configuration only if it has been canged */
+               if (twl6040->sysclk != freq_out) {
+                       switch (freq_out) {
+                       case 17640000:
+                               lppllctl |= TWL6040_LPLLFIN;
+                               break;
+                       case 19200000:
+                               lppllctl &= ~TWL6040_LPLLFIN;
+                               break;
+                       default:
+                               dev_err(twl6040->dev,
+                                       "freq_out %d not supported\n",
+                                       freq_out);
+                               ret = -EINVAL;
+                               goto pll_out;
+                       }
+                       twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
+                                         lppllctl);
                }
-               twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+
+               /* The PLL in use has not been change, we can exit */
+               if (twl6040->pll == pll_id)
+                       break;
 
                switch (freq_in) {
                case 32768:
@@ -371,48 +388,56 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
                        goto pll_out;
                }
 
-               hppllctl &= ~TWL6040_MCLK_MSK;
+               if (twl6040->mclk != freq_in) {
+                       hppllctl &= ~TWL6040_MCLK_MSK;
+
+                       switch (freq_in) {
+                       case 12000000:
+                               /* PLL enabled, active mode */
+                               hppllctl |= TWL6040_MCLK_12000KHZ |
+                                           TWL6040_HPLLENA;
+                               break;
+                       case 19200000:
+                               /*
+                               * PLL disabled
+                               * (enable PLL if MCLK jitter quality
+                               *  doesn't meet specification)
+                               */
+                               hppllctl |= TWL6040_MCLK_19200KHZ;
+                               break;
+                       case 26000000:
+                               /* PLL enabled, active mode */
+                               hppllctl |= TWL6040_MCLK_26000KHZ |
+                                           TWL6040_HPLLENA;
+                               break;
+                       case 38400000:
+                               /* PLL enabled, active mode */
+                               hppllctl |= TWL6040_MCLK_38400KHZ |
+                                           TWL6040_HPLLENA;
+                               break;
+                       default:
+                               dev_err(twl6040->dev,
+                                       "freq_in %d not supported\n", freq_in);
+                               ret = -EINVAL;
+                               goto pll_out;
+                       }
 
-               switch (freq_in) {
-               case 12000000:
-                       /* PLL enabled, active mode */
-                       hppllctl |= TWL6040_MCLK_12000KHZ |
-                                   TWL6040_HPLLENA;
-                       break;
-               case 19200000:
                        /*
-                        * PLL disabled
-                        * (enable PLL if MCLK jitter quality
-                        *  doesn't meet specification)
+                        * enable clock slicer to ensure input waveform is
+                        * square
                         */
-                       hppllctl |= TWL6040_MCLK_19200KHZ;
-                       break;
-               case 26000000:
-                       /* PLL enabled, active mode */
-                       hppllctl |= TWL6040_MCLK_26000KHZ |
-                                   TWL6040_HPLLENA;
-                       break;
-               case 38400000:
-                       /* PLL enabled, active mode */
-                       hppllctl |= TWL6040_MCLK_38400KHZ |
-                                   TWL6040_HPLLENA;
-                       break;
-               default:
-                       dev_err(twl6040->dev,
-                               "freq_in %d not supported\n", freq_in);
-                       ret = -EINVAL;
-                       goto pll_out;
-               }
+                       hppllctl |= TWL6040_HPLLSQRENA;
 
-               /* enable clock slicer to ensure input waveform is square */
-               hppllctl |= TWL6040_HPLLSQRENA;
-
-               twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL, hppllctl);
-               usleep_range(500, 700);
-               lppllctl |= TWL6040_HPLLSEL;
-               twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
-               lppllctl &= ~TWL6040_LPLLENA;
-               twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL, lppllctl);
+                       twl6040_reg_write(twl6040, TWL6040_REG_HPPLLCTL,
+                                         hppllctl);
+                       usleep_range(500, 700);
+                       lppllctl |= TWL6040_HPLLSEL;
+                       twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
+                                         lppllctl);
+                       lppllctl &= ~TWL6040_LPLLENA;
+                       twl6040_reg_write(twl6040, TWL6040_REG_LPPLLCTL,
+                                         lppllctl);
+               }
                break;
        default:
                dev_err(twl6040->dev, "unknown pll id %d\n", pll_id);
@@ -421,6 +446,7 @@ int twl6040_set_pll(struct twl6040 *twl6040, int pll_id,
        }
 
        twl6040->sysclk = freq_out;
+       twl6040->mclk = freq_in;
        twl6040->pll = pll_id;
 
 pll_out:
index 91c4f25e0e558fe20a04ae10e1554d98f0f3fff9..febc90cdef7ea4a97885f7cc88b36ceb543e8b7e 100644 (file)
@@ -36,15 +36,6 @@ static DEFINE_MUTEX(ucb1x00_mutex);
 static LIST_HEAD(ucb1x00_drivers);
 static LIST_HEAD(ucb1x00_devices);
 
-static struct mcp_device_id ucb1x00_id[] = {
-       { "ucb1x00", 0 },  /* auto-detection */
-       { "ucb1200", UCB_ID_1200 },
-       { "ucb1300", UCB_ID_1300 },
-       { "tc35143", UCB_ID_TC35143 },
-       { }
-};
-MODULE_DEVICE_TABLE(mcp, ucb1x00_id);
-
 /**
  *     ucb1x00_io_set_dir - set IO direction
  *     @ucb: UCB1x00 structure describing chip
@@ -157,16 +148,22 @@ static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset
 {
        struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio);
        unsigned long flags;
+       unsigned old, mask = 1 << offset;
 
        spin_lock_irqsave(&ucb->io_lock, flags);
-       ucb->io_dir |= (1 << offset);
-       ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
-
+       old = ucb->io_out;
        if (value)
-               ucb->io_out |= 1 << offset;
+               ucb->io_out |= mask;
        else
-               ucb->io_out &= ~(1 << offset);
-       ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
+               ucb->io_out &= ~mask;
+
+       if (old != ucb->io_out)
+               ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
+
+       if (!(ucb->io_dir & mask)) {
+               ucb->io_dir |= mask;
+               ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
+       }
        spin_unlock_irqrestore(&ucb->io_lock, flags);
 
        return 0;
@@ -536,33 +533,17 @@ static struct class ucb1x00_class = {
 
 static int ucb1x00_probe(struct mcp *mcp)
 {
-       const struct mcp_device_id *mid;
        struct ucb1x00 *ucb;
        struct ucb1x00_driver *drv;
-       struct ucb1x00_plat_data *pdata;
        unsigned int id;
        int ret = -ENODEV;
        int temp;
 
        mcp_enable(mcp);
        id = mcp_reg_read(mcp, UCB_ID);
-       mid = mcp_get_device_id(mcp);
 
-       if (mid && mid->driver_data) {
-               if (id != mid->driver_data) {
-                       printk(KERN_WARNING "%s wrong ID %04x found: %04x\n",
-                               mid->name, (unsigned int) mid->driver_data, id);
-                       goto err_disable;
-               }
-       } else {
-               mid = &ucb1x00_id[1];
-               while (mid->driver_data) {
-                       if (id == mid->driver_data)
-                               break;
-                       mid++;
-               }
-               printk(KERN_WARNING "%s ID not found: %04x\n",
-                       ucb1x00_id[0].name, id);
+       if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) {
+               printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id);
                goto err_disable;
        }
 
@@ -571,28 +552,28 @@ static int ucb1x00_probe(struct mcp *mcp)
        if (!ucb)
                goto err_disable;
 
-       pdata = mcp->attached_device.platform_data;
+
        ucb->dev.class = &ucb1x00_class;
        ucb->dev.parent = &mcp->attached_device;
-       dev_set_name(&ucb->dev, mid->name);
+       dev_set_name(&ucb->dev, "ucb1x00");
 
        spin_lock_init(&ucb->lock);
        spin_lock_init(&ucb->io_lock);
        sema_init(&ucb->adc_sem, 1);
 
-       ucb->id  = mid;
+       ucb->id  = id;
        ucb->mcp = mcp;
        ucb->irq = ucb1x00_detect_irq(ucb);
        if (ucb->irq == NO_IRQ) {
-               printk(KERN_ERR "%s: IRQ probe failed\n", mid->name);
+               printk(KERN_ERR "UCB1x00: IRQ probe failed\n");
                ret = -ENODEV;
                goto err_free;
        }
 
        ucb->gpio.base = -1;
-       if (pdata && (pdata->gpio_base >= 0)) {
+       if (mcp->gpio_base != 0) {
                ucb->gpio.label = dev_name(&ucb->dev);
-               ucb->gpio.base = pdata->gpio_base;
+               ucb->gpio.base = mcp->gpio_base;
                ucb->gpio.ngpio = 10;
                ucb->gpio.set = ucb1x00_gpio_set;
                ucb->gpio.get = ucb1x00_gpio_get;
@@ -605,10 +586,10 @@ static int ucb1x00_probe(struct mcp *mcp)
                dev_info(&ucb->dev, "gpio_base not set so no gpiolib support");
 
        ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING,
-                         mid->name, ucb);
+                         "UCB1x00", ucb);
        if (ret) {
-               printk(KERN_ERR "%s: unable to grab irq%d: %d\n",
-                       mid->name, ucb->irq, ret);
+               printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n",
+                       ucb->irq, ret);
                goto err_gpio;
        }
 
@@ -712,6 +693,7 @@ static int ucb1x00_resume(struct mcp *mcp)
        struct ucb1x00 *ucb = mcp_get_drvdata(mcp);
        struct ucb1x00_dev *dev;
 
+       ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out);
        ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir);
        mutex_lock(&ucb1x00_mutex);
        list_for_each_entry(dev, &ucb->devs, dev_node) {
@@ -730,7 +712,6 @@ static struct mcp_driver ucb1x00_driver = {
        .remove         = ucb1x00_remove,
        .suspend        = ucb1x00_suspend,
        .resume         = ucb1x00_resume,
-       .id_table       = ucb1x00_id,
 };
 
 static int __init ucb1x00_init(void)
index 40ec3c118868d7f02e104d2ed10e2f766840a0f4..63a3cbdfa3f33a85a146f34fff7459cc6b46c17a 100644 (file)
@@ -47,7 +47,6 @@ struct ucb1x00_ts {
        u16                     x_res;
        u16                     y_res;
 
-       unsigned int            restart:1;
        unsigned int            adcsync:1;
 };
 
@@ -207,15 +206,17 @@ static int ucb1x00_thread(void *_ts)
 {
        struct ucb1x00_ts *ts = _ts;
        DECLARE_WAITQUEUE(wait, current);
+       bool frozen, ignore = false;
        int valid = 0;
 
        set_freezable();
        add_wait_queue(&ts->irq_wait, &wait);
-       while (!kthread_should_stop()) {
+       while (!kthread_freezable_should_stop(&frozen)) {
                unsigned int x, y, p;
                signed long timeout;
 
-               ts->restart = 0;
+               if (frozen)
+                       ignore = true;
 
                ucb1x00_adc_enable(ts->ucb);
 
@@ -258,7 +259,7 @@ static int ucb1x00_thread(void *_ts)
                         * space.  We therefore leave it to user space
                         * to do any filtering they please.
                         */
-                       if (!ts->restart) {
+                       if (!ignore) {
                                ucb1x00_ts_evt_add(ts, p, x, y);
                                valid = 1;
                        }
@@ -267,8 +268,6 @@ static int ucb1x00_thread(void *_ts)
                        timeout = HZ / 100;
                }
 
-               try_to_freeze();
-
                schedule_timeout(timeout);
        }
 
@@ -340,26 +339,6 @@ static void ucb1x00_ts_close(struct input_dev *idev)
        ucb1x00_disable(ts->ucb);
 }
 
-#ifdef CONFIG_PM
-static int ucb1x00_ts_resume(struct ucb1x00_dev *dev)
-{
-       struct ucb1x00_ts *ts = dev->priv;
-
-       if (ts->rtask != NULL) {
-               /*
-                * Restart the TS thread to ensure the
-                * TS interrupt mode is set up again
-                * after sleep.
-                */
-               ts->restart = 1;
-               wake_up(&ts->irq_wait);
-       }
-       return 0;
-}
-#else
-#define ucb1x00_ts_resume NULL
-#endif
-
 
 /*
  * Initialisation.
@@ -382,7 +361,7 @@ static int ucb1x00_ts_add(struct ucb1x00_dev *dev)
        ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC;
 
        idev->name       = "Touchscreen panel";
-       idev->id.product = ts->ucb->id->driver_data;
+       idev->id.product = ts->ucb->id;
        idev->open       = ucb1x00_ts_open;
        idev->close      = ucb1x00_ts_close;
 
@@ -425,7 +404,6 @@ static void ucb1x00_ts_remove(struct ucb1x00_dev *dev)
 static struct ucb1x00_driver ucb1x00_ts_driver = {
        .add            = ucb1x00_ts_add,
        .remove         = ucb1x00_ts_remove,
-       .resume         = ucb1x00_ts_resume,
 };
 
 static int __init ucb1x00_ts_init(void)
index eb5cd28bc6d8d7a1917f7f15d82b267b692315f8..a2d25e4857e31387fc0457b5a47efc4c1e438bd0 100644 (file)
@@ -513,7 +513,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
         * transaction, and then put it under external control
         */
        memset(&config, 0, sizeof(config));
-       config.direction = DMA_TO_DEVICE;
+       config.direction = DMA_MEM_TO_DEV;
        config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
        config.dst_maxburst = fpga_fifo_size(priv->regs) / 2 / 4;
        ret = chan->device->device_control(chan, DMA_SLAVE_CONFIG,
index 150cd7061b808019d4a775ab29fc287558d7508e..28adefe70f96274c93198ee50acc9ee65adec1ae 100644 (file)
@@ -354,6 +354,7 @@ static void lkdtm_do_action(enum ctype which)
 static void lkdtm_handler(void)
 {
        unsigned long flags;
+       bool do_it = false;
 
        spin_lock_irqsave(&count_lock, flags);
        count--;
@@ -361,10 +362,13 @@ static void lkdtm_handler(void)
                        cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
 
        if (count == 0) {
-               lkdtm_do_action(cptype);
+               do_it = true;
                count = cpoint_count;
        }
        spin_unlock_irqrestore(&count_lock, flags);
+
+       if (do_it)
+               lkdtm_do_action(cptype);
 }
 
 static int lkdtm_register_cpoint(enum cname which)
index a7ee5027146528aafc6b18fa8c2e55f32e32a174..fcfe1eb5acc8f421a1e10a3c387c3c139171ef4d 100644 (file)
@@ -823,6 +823,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
        struct scatterlist              *sg;
        unsigned int                    i;
        enum dma_data_direction         direction;
+       enum dma_transfer_direction     slave_dirn;
        unsigned int                    sglen;
        u32 iflags;
 
@@ -860,16 +861,19 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
        if (host->caps.has_dma)
                atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
 
-       if (data->flags & MMC_DATA_READ)
+       if (data->flags & MMC_DATA_READ) {
                direction = DMA_FROM_DEVICE;
-       else
+               slave_dirn = DMA_DEV_TO_MEM;
+       } else {
                direction = DMA_TO_DEVICE;
+               slave_dirn = DMA_MEM_TO_DEV;
+       }
 
        sglen = dma_map_sg(chan->device->dev, data->sg,
                        data->sg_len, direction);
 
        desc = chan->device->device_prep_slave_sg(chan,
-                       data->sg, sglen, direction,
+                       data->sg, sglen, slave_dirn,
                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc)
                goto unmap_exit;
index ece03b491c7db824fe7a698353f2c14f9f0cb6d2..0d955ffaf44e2c3ec5961f966687da3e675819d9 100644 (file)
@@ -374,6 +374,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
        struct dma_chan *chan;
        struct dma_device *device;
        struct dma_async_tx_descriptor *desc;
+       enum dma_data_direction buffer_dirn;
        int nr_sg;
 
        /* Check if next job is already prepared */
@@ -387,10 +388,12 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
        }
 
        if (data->flags & MMC_DATA_READ) {
-               conf.direction = DMA_FROM_DEVICE;
+               conf.direction = DMA_DEV_TO_MEM;
+               buffer_dirn = DMA_FROM_DEVICE;
                chan = host->dma_rx_channel;
        } else {
-               conf.direction = DMA_TO_DEVICE;
+               conf.direction = DMA_MEM_TO_DEV;
+               buffer_dirn = DMA_TO_DEVICE;
                chan = host->dma_tx_channel;
        }
 
@@ -403,7 +406,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
                return -EINVAL;
 
        device = chan->device;
-       nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, conf.direction);
+       nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
        if (nr_sg == 0)
                return -EINVAL;
 
@@ -426,7 +429,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
  unmap_exit:
        if (!next)
                dmaengine_terminate_all(chan);
-       dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
+       dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
        return -ENOMEM;
 }
 
index 7088b40f95797b00172edf0bdf4f50437b18947f..4184b7946bbf34fd459c671a921ae6baf659e4d2 100644 (file)
@@ -218,6 +218,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
        unsigned int blksz = data->blksz;
        unsigned int datasize = nob * blksz;
        struct scatterlist *sg;
+       enum dma_transfer_direction slave_dirn;
        int i, nents;
 
        if (data->flags & MMC_DATA_STREAM)
@@ -240,10 +241,13 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
                }
        }
 
-       if (data->flags & MMC_DATA_READ)
+       if (data->flags & MMC_DATA_READ) {
                host->dma_dir = DMA_FROM_DEVICE;
-       else
+               slave_dirn = DMA_DEV_TO_MEM;
+       } else {
                host->dma_dir = DMA_TO_DEVICE;
+               slave_dirn = DMA_MEM_TO_DEV;
+       }
 
        nents = dma_map_sg(host->dma->device->dev, data->sg,
                                     data->sg_len,  host->dma_dir);
@@ -251,7 +255,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)
                return -EINVAL;
 
        host->desc = host->dma->device->device_prep_slave_sg(host->dma,
-               data->sg, data->sg_len, host->dma_dir,
+               data->sg, data->sg_len, slave_dirn,
                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
        if (!host->desc) {
index 4e2e019dd5c94e02a8a6b54b413dcf2e162e7777..382c835d217cf23372594b230dcd7923971288d5 100644 (file)
@@ -154,6 +154,7 @@ struct mxs_mmc_host {
        struct dma_chan                 *dmach;
        struct mxs_dma_data             dma_data;
        unsigned int                    dma_dir;
+       enum dma_transfer_direction     slave_dirn;
        u32                             ssp_pio_words[SSP_PIO_NUM];
 
        unsigned int                    version;
@@ -324,7 +325,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma(
        }
 
        desc = host->dmach->device->device_prep_slave_sg(host->dmach,
-                               sgl, sg_len, host->dma_dir, append);
+                               sgl, sg_len, host->slave_dirn, append);
        if (desc) {
                desc->callback = mxs_mmc_dma_irq_callback;
                desc->callback_param = host;
@@ -356,6 +357,7 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host)
        host->ssp_pio_words[1] = cmd0;
        host->ssp_pio_words[2] = cmd1;
        host->dma_dir = DMA_NONE;
+       host->slave_dirn = DMA_TRANS_NONE;
        desc = mxs_mmc_prep_dma(host, 0);
        if (!desc)
                goto out;
@@ -395,6 +397,7 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
        host->ssp_pio_words[1] = cmd0;
        host->ssp_pio_words[2] = cmd1;
        host->dma_dir = DMA_NONE;
+       host->slave_dirn = DMA_TRANS_NONE;
        desc = mxs_mmc_prep_dma(host, 0);
        if (!desc)
                goto out;
@@ -433,6 +436,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
        int i;
 
        unsigned short dma_data_dir, timeout;
+       enum dma_transfer_direction slave_dirn;
        unsigned int data_size = 0, log2_blksz;
        unsigned int blocks = data->blocks;
 
@@ -448,9 +452,11 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
 
        if (data->flags & MMC_DATA_WRITE) {
                dma_data_dir = DMA_TO_DEVICE;
+               slave_dirn = DMA_MEM_TO_DEV;
                read = 0;
        } else {
                dma_data_dir = DMA_FROM_DEVICE;
+               slave_dirn = DMA_DEV_TO_MEM;
                read = BM_SSP_CTRL0_READ;
        }
 
@@ -510,6 +516,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
        host->ssp_pio_words[1] = cmd0;
        host->ssp_pio_words[2] = cmd1;
        host->dma_dir = DMA_NONE;
+       host->slave_dirn = DMA_TRANS_NONE;
        desc = mxs_mmc_prep_dma(host, 0);
        if (!desc)
                goto out;
@@ -518,6 +525,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
        WARN_ON(host->data != NULL);
        host->data = data;
        host->dma_dir = dma_data_dir;
+       host->slave_dirn = slave_dirn;
        desc = mxs_mmc_prep_dma(host, 1);
        if (!desc)
                goto out;
index 4a2c5b2355f21d81887e0f92d7f3350c1c51f288..f5d8b53be333aa9c997b0e82c1e8e69a8204eb0d 100644 (file)
@@ -286,7 +286,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
        if (ret > 0) {
                host->dma_active = true;
                desc = chan->device->device_prep_slave_sg(chan, sg, ret,
-                       DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+                       DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        }
 
        if (desc) {
@@ -335,7 +335,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
        if (ret > 0) {
                host->dma_active = true;
                desc = chan->device->device_prep_slave_sg(chan, sg, ret,
-                       DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+                       DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        }
 
        if (desc) {
index 86f259cdfcbcbe620206cc1c479395759fb2b2ad..7a6e6cc8f8b842ddb065208aea07d093809bc79d 100644 (file)
@@ -77,7 +77,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host)
        ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE);
        if (ret > 0)
                desc = chan->device->device_prep_slave_sg(chan, sg, ret,
-                       DMA_FROM_DEVICE, DMA_CTRL_ACK);
+                       DMA_DEV_TO_MEM, DMA_CTRL_ACK);
 
        if (desc) {
                cookie = dmaengine_submit(desc);
@@ -158,7 +158,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host)
        ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE);
        if (ret > 0)
                desc = chan->device->device_prep_slave_sg(chan, sg, ret,
-                       DMA_TO_DEVICE, DMA_CTRL_ACK);
+                       DMA_MEM_TO_DEV, DMA_CTRL_ACK);
 
        if (desc) {
                cookie = dmaengine_submit(desc);
index 6ae9ca01388b76b06798749aba0a35cae34f8b9c..9a9ce71a71fcbb2e004c0c0eb2f9a680202ee30d 100644 (file)
@@ -119,7 +119,7 @@ static int mtd_cls_suspend(struct device *dev, pm_message_t state)
 {
        struct mtd_info *mtd = dev_get_drvdata(dev);
 
-       return mtd_suspend(mtd);
+       return mtd ? mtd_suspend(mtd) : 0;
 }
 
 static int mtd_cls_resume(struct device *dev)
index 4dd056e2e16ac3e6798300e4b1cb13dde0948fb1..35b4fb55dbd6569dad6d4c927c9807698804b882 100644 (file)
@@ -161,6 +161,37 @@ static int atmel_nand_device_ready(struct mtd_info *mtd)
                 !!host->board->rdy_pin_active_low;
 }
 
+/*
+ * Minimal-overhead PIO for data access.
+ */
+static void atmel_read_buf8(struct mtd_info *mtd, u8 *buf, int len)
+{
+       struct nand_chip        *nand_chip = mtd->priv;
+
+       __raw_readsb(nand_chip->IO_ADDR_R, buf, len);
+}
+
+static void atmel_read_buf16(struct mtd_info *mtd, u8 *buf, int len)
+{
+       struct nand_chip        *nand_chip = mtd->priv;
+
+       __raw_readsw(nand_chip->IO_ADDR_R, buf, len / 2);
+}
+
+static void atmel_write_buf8(struct mtd_info *mtd, const u8 *buf, int len)
+{
+       struct nand_chip        *nand_chip = mtd->priv;
+
+       __raw_writesb(nand_chip->IO_ADDR_W, buf, len);
+}
+
+static void atmel_write_buf16(struct mtd_info *mtd, const u8 *buf, int len)
+{
+       struct nand_chip        *nand_chip = mtd->priv;
+
+       __raw_writesw(nand_chip->IO_ADDR_W, buf, len / 2);
+}
+
 static void dma_complete_func(void *completion)
 {
        complete(completion);
@@ -235,27 +266,33 @@ err_buf:
 static void atmel_read_buf(struct mtd_info *mtd, u8 *buf, int len)
 {
        struct nand_chip *chip = mtd->priv;
+       struct atmel_nand_host *host = chip->priv;
 
        if (use_dma && len > mtd->oobsize)
                /* only use DMA for bigger than oob size: better performances */
                if (atmel_nand_dma_op(mtd, buf, len, 1) == 0)
                        return;
 
-       /* if no DMA operation possible, use PIO */
-       memcpy_fromio(buf, chip->IO_ADDR_R, len);
+       if (host->board->bus_width_16)
+               atmel_read_buf16(mtd, buf, len);
+       else
+               atmel_read_buf8(mtd, buf, len);
 }
 
 static void atmel_write_buf(struct mtd_info *mtd, const u8 *buf, int len)
 {
        struct nand_chip *chip = mtd->priv;
+       struct atmel_nand_host *host = chip->priv;
 
        if (use_dma && len > mtd->oobsize)
                /* only use DMA for bigger than oob size: better performances */
                if (atmel_nand_dma_op(mtd, (void *)buf, len, 0) == 0)
                        return;
 
-       /* if no DMA operation possible, use PIO */
-       memcpy_toio(chip->IO_ADDR_W, buf, len);
+       if (host->board->bus_width_16)
+               atmel_write_buf16(mtd, buf, len);
+       else
+               atmel_write_buf8(mtd, buf, len);
 }
 
 /*
index 2a56fc6f399a871d2794445324a920eff5a0b228..7db6555ed3ba630f2935ce65b25200295b1754db 100644 (file)
@@ -69,17 +69,19 @@ static int clear_poll_bit(void __iomem *addr, u32 mask)
  *  [1] enable the module.
  *  [2] reset the module.
  *
- * In most of the cases, it's ok. But there is a hardware bug in the BCH block.
+ * In most of the cases, it's ok.
+ * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
  * If you try to soft reset the BCH block, it becomes unusable until
  * the next hard reset. This case occurs in the NAND boot mode. When the board
  * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
  * So If the driver tries to reset the BCH again, the BCH will not work anymore.
- * You will see a DMA timeout in this case.
+ * You will see a DMA timeout in this case. The bug has been fixed
+ * in the following chips, such as MX28.
  *
  * To avoid this bug, just add a new parameter `just_enable` for
  * the mxs_reset_block(), and rewrite it here.
  */
-int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
+static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
 {
        int ret;
        int timeout = 0x400;
@@ -206,7 +208,15 @@ int bch_set_geometry(struct gpmi_nand_data *this)
        if (ret)
                goto err_out;
 
-       ret = gpmi_reset_block(r->bch_regs, true);
+       /*
+       * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
+       * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
+       * On the other hand, the MX28 needs the reset, because one case has been
+       * seen where the BCH produced ECC errors constantly after 10000
+       * consecutive reboots. The latter case has not been seen on the MX23 yet,
+       * still we don't know if it could happen there as well.
+       */
+       ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MX23(this));
        if (ret)
                goto err_out;
 
@@ -827,7 +837,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
        pio[1] = pio[2] = 0;
        desc = channel->device->device_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_NONE, 0);
+                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
        if (!desc) {
                pr_err("step 1 error\n");
                return -1;
@@ -839,7 +849,7 @@ int gpmi_send_command(struct gpmi_nand_data *this)
        sg_init_one(sgl, this->cmd_buffer, this->command_length);
        dma_map_sg(this->dev, sgl, 1, DMA_TO_DEVICE);
        desc = channel->device->device_prep_slave_sg(channel,
-                                       sgl, 1, DMA_TO_DEVICE, 1);
+                                       sgl, 1, DMA_MEM_TO_DEV, 1);
        if (!desc) {
                pr_err("step 2 error\n");
                return -1;
@@ -872,7 +882,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
        pio[1] = 0;
        desc = channel->device->device_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_NONE, 0);
+                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
        if (!desc) {
                pr_err("step 1 error\n");
                return -1;
@@ -881,7 +891,7 @@ int gpmi_send_data(struct gpmi_nand_data *this)
        /* [2] send DMA request */
        prepare_data_dma(this, DMA_TO_DEVICE);
        desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
-                                               1, DMA_TO_DEVICE, 1);
+                                               1, DMA_MEM_TO_DEV, 1);
        if (!desc) {
                pr_err("step 2 error\n");
                return -1;
@@ -908,7 +918,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
        pio[1] = 0;
        desc = channel->device->device_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_NONE, 0);
+                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
        if (!desc) {
                pr_err("step 1 error\n");
                return -1;
@@ -917,7 +927,7 @@ int gpmi_read_data(struct gpmi_nand_data *this)
        /* [2] : send DMA request */
        prepare_data_dma(this, DMA_FROM_DEVICE);
        desc = channel->device->device_prep_slave_sg(channel, &this->data_sgl,
-                                               1, DMA_FROM_DEVICE, 1);
+                                               1, DMA_DEV_TO_MEM, 1);
        if (!desc) {
                pr_err("step 2 error\n");
                return -1;
@@ -964,7 +974,7 @@ int gpmi_send_page(struct gpmi_nand_data *this,
 
        desc = channel->device->device_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_NONE, 0);
+                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 0);
        if (!desc) {
                pr_err("step 2 error\n");
                return -1;
@@ -998,7 +1008,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
                | BF_GPMI_CTRL0_XFER_COUNT(0);
        pio[1] = 0;
        desc = channel->device->device_prep_slave_sg(channel,
-                               (struct scatterlist *)pio, 2, DMA_NONE, 0);
+                               (struct scatterlist *)pio, 2,
+                               DMA_TRANS_NONE, 0);
        if (!desc) {
                pr_err("step 1 error\n");
                return -1;
@@ -1027,7 +1038,7 @@ int gpmi_read_page(struct gpmi_nand_data *this,
        pio[5] = auxiliary;
        desc = channel->device->device_prep_slave_sg(channel,
                                        (struct scatterlist *)pio,
-                                       ARRAY_SIZE(pio), DMA_NONE, 1);
+                                       ARRAY_SIZE(pio), DMA_TRANS_NONE, 1);
        if (!desc) {
                pr_err("step 2 error\n");
                return -1;
@@ -1045,7 +1056,8 @@ int gpmi_read_page(struct gpmi_nand_data *this,
                | BF_GPMI_CTRL0_XFER_COUNT(geo->page_size);
        pio[1] = 0;
        desc = channel->device->device_prep_slave_sg(channel,
-                               (struct scatterlist *)pio, 2, DMA_NONE, 1);
+                               (struct scatterlist *)pio, 2,
+                               DMA_TRANS_NONE, 1);
        if (!desc) {
                pr_err("step 3 error\n");
                return -1;
index 35b4565050f1430d425ed3615d3c4a806cda4b7a..8a393f9e6027d1be3dcf3d149dfc277e8941fca2 100644 (file)
@@ -2588,7 +2588,7 @@ int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
        instr->state = MTD_ERASING;
 
        while (len) {
-               /* Heck if we have a bad block, we do not erase bad blocks! */
+               /* Check if we have a bad block, we do not erase bad blocks! */
                if (nand_block_checkbad(mtd, ((loff_t) page) <<
                                        chip->page_shift, 0, allowbbt)) {
                        pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
index 342626f4bc4641d197f1c322bbdfebb0619d289c..f820b26b9db3562e7878feffceeba32f13a0d4f3 100644 (file)
@@ -909,16 +909,12 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[])
        }
 }
 
-/* hw is a boolean parameter that determines whether we should try and
- * set the hw address of the device as well as the hw address of the
- * net_device
- */
-static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[], int hw)
+static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
 {
        struct net_device *dev = slave->dev;
        struct sockaddr s_addr;
 
-       if (!hw) {
+       if (slave->bond->params.mode == BOND_MODE_TLB) {
                memcpy(dev->dev_addr, addr, dev->addr_len);
                return 0;
        }
@@ -948,8 +944,8 @@ static void alb_swap_mac_addr(struct bonding *bond, struct slave *slave1, struct
        u8 tmp_mac_addr[ETH_ALEN];
 
        memcpy(tmp_mac_addr, slave1->dev->dev_addr, ETH_ALEN);
-       alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr, bond->alb_info.rlb_enabled);
-       alb_set_slave_mac_addr(slave2, tmp_mac_addr, bond->alb_info.rlb_enabled);
+       alb_set_slave_mac_addr(slave1, slave2->dev->dev_addr);
+       alb_set_slave_mac_addr(slave2, tmp_mac_addr);
 
 }
 
@@ -1096,8 +1092,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
 
                /* Try setting slave mac to bond address and fall-through
                   to code handling that situation below... */
-               alb_set_slave_mac_addr(slave, bond->dev->dev_addr,
-                                      bond->alb_info.rlb_enabled);
+               alb_set_slave_mac_addr(slave, bond->dev->dev_addr);
        }
 
        /* The slave's address is equal to the address of the bond.
@@ -1133,8 +1128,7 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
        }
 
        if (free_mac_slave) {
-               alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr,
-                                      bond->alb_info.rlb_enabled);
+               alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
 
                pr_warning("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
                           bond->dev->name, slave->dev->name,
@@ -1491,8 +1485,7 @@ int bond_alb_init_slave(struct bonding *bond, struct slave *slave)
 {
        int res;
 
-       res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr,
-                                    bond->alb_info.rlb_enabled);
+       res = alb_set_slave_mac_addr(slave, slave->perm_hwaddr);
        if (res) {
                return res;
        }
@@ -1643,8 +1636,7 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
                alb_swap_mac_addr(bond, swap_slave, new_slave);
        } else {
                /* set the new_slave to the bond mac address */
-               alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr,
-                                      bond->alb_info.rlb_enabled);
+               alb_set_slave_mac_addr(new_slave, bond->dev->dev_addr);
        }
 
        if (swap_slave) {
@@ -1704,8 +1696,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
                alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
                alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
        } else {
-               alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
-                                      bond->alb_info.rlb_enabled);
+               alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
 
                read_lock(&bond->lock);
                alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
index 7fc4e81d4d4353f6a85658d5be49ee6350264b8b..325391d19badb8366f27f726dbee042d1fe430ad 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/list.h>
+#include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <net/dsa.h>
index c0a458fc698fad1306641c03ba50c1567d964598..c17c75b9f531f49c661b132267653e2984212059 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/list.h>
+#include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <net/dsa.h>
@@ -20,12 +21,25 @@ static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr)
 
        ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03);
        if (ret >= 0) {
-               ret &= 0xfff0;
-               if (ret == 0x1210)
+               if (ret == 0x1212)
+                       return "Marvell 88E6123 (A1)";
+               if (ret == 0x1213)
+                       return "Marvell 88E6123 (A2)";
+               if ((ret & 0xfff0) == 0x1210)
                        return "Marvell 88E6123";
-               if (ret == 0x1610)
+
+               if (ret == 0x1612)
+                       return "Marvell 88E6161 (A1)";
+               if (ret == 0x1613)
+                       return "Marvell 88E6161 (A2)";
+               if ((ret & 0xfff0) == 0x1610)
                        return "Marvell 88E6161";
-               if (ret == 0x1650)
+
+               if (ret == 0x1652)
+                       return "Marvell 88E6165 (A1)";
+               if (ret == 0x1653)
+                       return "Marvell 88e6165 (A2)";
+               if ((ret & 0xfff0) == 0x1650)
                        return "Marvell 88E6165";
        }
 
index e0eb682438343b774130769b24c42472c25e1af4..55888b06d8b47af7c90564027db08e2f54bf512d 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/list.h>
+#include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <net/dsa.h>
index 5467c040824a6d9d96c947f8a39c8e10e7afc753..a2c62c2f30ee40f7ecb11d460958cae0f5c94e80 100644 (file)
@@ -9,6 +9,7 @@
  */
 
 #include <linux/list.h>
+#include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <net/dsa.h>
index 2b731b253598e9f94da434cdd322cfd197b0cb74..03f3935fd8c2d60f71a1b8cf7f65726e59bd147d 100644 (file)
@@ -3117,7 +3117,7 @@ static int bnx2x_alloc_fp_mem_at(struct bnx2x *bp, int index)
        int rx_ring_size = 0;
 
 #ifdef BCM_CNIC
-       if (IS_MF_ISCSI_SD(bp)) {
+       if (!bp->rx_ring_size && IS_MF_ISCSI_SD(bp)) {
                rx_ring_size = MIN_RX_SIZE_NONTPA;
                bp->rx_ring_size = rx_ring_size;
        } else
index a688b9d975a2576f5285417d3a23ed53a5c3fb5b..31a8b38ab15ebc2bff2bf19fa7110359827d0555 100644 (file)
@@ -365,13 +365,18 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
        DP(NETIF_MSG_LINK, "cfg_idx = %x\n", cfg_idx);
 
        if (cmd->autoneg == AUTONEG_ENABLE) {
+               u32 an_supported_speed = bp->port.supported[cfg_idx];
+               if (bp->link_params.phy[EXT_PHY1].type ==
+                   PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+                       an_supported_speed |= (SUPPORTED_100baseT_Half |
+                                              SUPPORTED_100baseT_Full);
                if (!(bp->port.supported[cfg_idx] & SUPPORTED_Autoneg)) {
                        DP(NETIF_MSG_LINK, "Autoneg not supported\n");
                        return -EINVAL;
                }
 
                /* advertise the requested speed and duplex if supported */
-               if (cmd->advertising & ~(bp->port.supported[cfg_idx])) {
+               if (cmd->advertising & ~an_supported_speed) {
                        DP(NETIF_MSG_LINK, "Advertisement parameters "
                                           "are not supported\n");
                        return -EINVAL;
@@ -1733,7 +1738,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        struct bnx2x_fp_txdata *txdata = &fp_tx->txdata[0];
        u16 tx_start_idx, tx_idx;
        u16 rx_start_idx, rx_idx;
-       u16 pkt_prod, bd_prod, rx_comp_cons;
+       u16 pkt_prod, bd_prod;
        struct sw_tx_bd *tx_buf;
        struct eth_tx_start_bd *tx_start_bd;
        struct eth_tx_parse_bd_e1x  *pbd_e1x = NULL;
@@ -1868,8 +1873,7 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode)
        if (rx_idx != rx_start_idx + num_pkts)
                goto test_loopback_exit;
 
-       rx_comp_cons = le16_to_cpu(fp_rx->rx_comp_cons);
-       cqe = &fp_rx->rx_comp_ring[RCQ_BD(rx_comp_cons)];
+       cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
        cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
        cqe_fp_type = cqe_fp_flags & ETH_FAST_PATH_RX_CQE_TYPE;
        if (!CQE_TYPE_FAST(cqe_fp_type) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
@@ -2116,18 +2120,16 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
        case ETH_SS_STATS:
                if (is_multi(bp)) {
                        num_stats = bnx2x_num_stat_queues(bp) *
-                               BNX2X_NUM_Q_STATS;
-                       if (!IS_MF_MODE_STAT(bp))
-                               num_stats += BNX2X_NUM_STATS;
-               } else {
-                       if (IS_MF_MODE_STAT(bp)) {
-                               num_stats = 0;
-                               for (i = 0; i < BNX2X_NUM_STATS; i++)
-                                       if (IS_FUNC_STAT(i))
-                                               num_stats++;
-                       } else
-                               num_stats = BNX2X_NUM_STATS;
-               }
+                                               BNX2X_NUM_Q_STATS;
+               } else
+                       num_stats = 0;
+               if (IS_MF_MODE_STAT(bp)) {
+                       for (i = 0; i < BNX2X_NUM_STATS; i++)
+                               if (IS_FUNC_STAT(i))
+                                       num_stats++;
+               } else
+                       num_stats += BNX2X_NUM_STATS;
+
                return num_stats;
 
        case ETH_SS_TEST:
@@ -2146,8 +2148,8 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
 
        switch (stringset) {
        case ETH_SS_STATS:
+               k = 0;
                if (is_multi(bp)) {
-                       k = 0;
                        for_each_eth_queue(bp, i) {
                                memset(queue_name, 0, sizeof(queue_name));
                                sprintf(queue_name, "%d", i);
@@ -2158,20 +2160,17 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
                                                queue_name);
                                k += BNX2X_NUM_Q_STATS;
                        }
-                       if (IS_MF_MODE_STAT(bp))
-                               break;
-                       for (j = 0; j < BNX2X_NUM_STATS; j++)
-                               strcpy(buf + (k + j)*ETH_GSTRING_LEN,
-                                      bnx2x_stats_arr[j].string);
-               } else {
-                       for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
-                               if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
-                                       continue;
-                               strcpy(buf + j*ETH_GSTRING_LEN,
-                                      bnx2x_stats_arr[i].string);
-                               j++;
-                       }
                }
+
+
+               for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+                       if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+                               continue;
+                       strcpy(buf + (k + j)*ETH_GSTRING_LEN,
+                                  bnx2x_stats_arr[i].string);
+                       j++;
+               }
+
                break;
 
        case ETH_SS_TEST:
@@ -2185,10 +2184,9 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
 {
        struct bnx2x *bp = netdev_priv(dev);
        u32 *hw_stats, *offset;
-       int i, j, k;
+       int i, j, k = 0;
 
        if (is_multi(bp)) {
-               k = 0;
                for_each_eth_queue(bp, i) {
                        hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
                        for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
@@ -2209,46 +2207,28 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
                        }
                        k += BNX2X_NUM_Q_STATS;
                }
-               if (IS_MF_MODE_STAT(bp))
-                       return;
-               hw_stats = (u32 *)&bp->eth_stats;
-               for (j = 0; j < BNX2X_NUM_STATS; j++) {
-                       if (bnx2x_stats_arr[j].size == 0) {
-                               /* skip this counter */
-                               buf[k + j] = 0;
-                               continue;
-                       }
-                       offset = (hw_stats + bnx2x_stats_arr[j].offset);
-                       if (bnx2x_stats_arr[j].size == 4) {
-                               /* 4-byte counter */
-                               buf[k + j] = (u64) *offset;
-                               continue;
-                       }
-                       /* 8-byte counter */
-                       buf[k + j] = HILO_U64(*offset, *(offset + 1));
+       }
+
+       hw_stats = (u32 *)&bp->eth_stats;
+       for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
+               if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
+                       continue;
+               if (bnx2x_stats_arr[i].size == 0) {
+                       /* skip this counter */
+                       buf[k + j] = 0;
+                       j++;
+                       continue;
                }
-       } else {
-               hw_stats = (u32 *)&bp->eth_stats;
-               for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
-                       if (IS_MF_MODE_STAT(bp) && IS_PORT_STAT(i))
-                               continue;
-                       if (bnx2x_stats_arr[i].size == 0) {
-                               /* skip this counter */
-                               buf[j] = 0;
-                               j++;
-                               continue;
-                       }
-                       offset = (hw_stats + bnx2x_stats_arr[i].offset);
-                       if (bnx2x_stats_arr[i].size == 4) {
-                               /* 4-byte counter */
-                               buf[j] = (u64) *offset;
-                               j++;
-                               continue;
-                       }
-                       /* 8-byte counter */
-                       buf[j] = HILO_U64(*offset, *(offset + 1));
+               offset = (hw_stats + bnx2x_stats_arr[i].offset);
+               if (bnx2x_stats_arr[i].size == 4) {
+                       /* 4-byte counter */
+                       buf[k + j] = (u64) *offset;
                        j++;
+                       continue;
                }
+               /* 8-byte counter */
+               buf[k + j] = HILO_U64(*offset, *(offset + 1));
+               j++;
        }
 }
 
index 4df9505b67b62e1d482460cc83fcfb6999a83746..2091e5dbbcdd9f50d0d0dec422cd2e1c3aac2d4f 100644 (file)
@@ -2502,7 +2502,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
                struct bnx2x_nig_brb_pfc_port_params *nig_params)
 {
        u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
-       u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
+       u32 llfc_enable = 0, xcm_out_en = 0, hwpfc_enable = 0;
        u32 pkt_priority_to_cos = 0;
        struct bnx2x *bp = params->bp;
        u8 port = params->port;
@@ -2516,9 +2516,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
         * MAC control frames (that are not pause packets)
         * will be forwarded to the XCM.
         */
-       xcm_mask = REG_RD(bp,
-                               port ? NIG_REG_LLH1_XCM_MASK :
-                               NIG_REG_LLH0_XCM_MASK);
+       xcm_mask = REG_RD(bp, port ? NIG_REG_LLH1_XCM_MASK :
+                         NIG_REG_LLH0_XCM_MASK);
        /*
         * nig params will override non PFC params, since it's possible to
         * do transition from PFC to SAFC
@@ -2533,8 +2532,8 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
                ppp_enable = 1;
                xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
                                     NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
-               xcm0_out_en = 0;
-               p0_hwpfc_enable = 1;
+               xcm_out_en = 0;
+               hwpfc_enable = 1;
        } else  {
                if (nig_params) {
                        llfc_out_en = nig_params->llfc_out_en;
@@ -2545,7 +2544,7 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
 
                xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
                        NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
-               xcm0_out_en = 1;
+               xcm_out_en = 1;
        }
 
        if (CHIP_IS_E3(bp))
@@ -2564,13 +2563,16 @@ static void bnx2x_update_pfc_nig(struct link_params *params,
        REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
               NIG_REG_LLH0_XCM_MASK, xcm_mask);
 
-       REG_WR(bp,  NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
+       REG_WR(bp, port ? NIG_REG_LLFC_EGRESS_SRC_ENABLE_1 :
+              NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
 
        /* output enable for RX_XCM # IF */
-       REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
+       REG_WR(bp, port ? NIG_REG_XCM1_OUT_EN :
+              NIG_REG_XCM0_OUT_EN, xcm_out_en);
 
        /* HW PFC TX enable */
-       REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
+       REG_WR(bp, port ? NIG_REG_P1_HWPFC_ENABLE :
+              NIG_REG_P0_HWPFC_ENABLE, hwpfc_enable);
 
        if (nig_params) {
                u8 i = 0;
@@ -3761,7 +3763,15 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
        /* Advertise pause */
        bnx2x_ext_phy_set_pause(params, phy, vars);
 
-       vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+       /*
+        * Set KR Autoneg Work-Around flag for Warpcore version older than D108
+        */
+       bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
+                       MDIO_WC_REG_UC_INFO_B1_VERSION, &val16);
+       if (val16 < 0xd108) {
+               DP(NETIF_MSG_LINK, "Enable AN KR work-around\n");
+               vars->rx_tx_asic_rst = MAX_KR_LINK_RETRY;
+       }
 
        bnx2x_cl45_read(bp, phy, MDIO_WC_DEVAD,
                        MDIO_WC_REG_DIGITAL5_MISC7, &val16);
@@ -9266,62 +9276,68 @@ static void bnx2x_8727_link_reset(struct bnx2x_phy *phy,
 /*             BCM8481/BCM84823/BCM84833 PHY SECTION             */
 /******************************************************************/
 static void bnx2x_save_848xx_spirom_version(struct bnx2x_phy *phy,
-                                          struct link_params *params)
+                                           struct bnx2x *bp,
+                                           u8 port)
 {
        u16 val, fw_ver1, fw_ver2, cnt;
-       u8 port;
-       struct bnx2x *bp = params->bp;
 
-       port = params->port;
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+               bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
+               bnx2x_save_spirom_version(bp, port,
+                               ((fw_ver1 & 0xf000)>>5) | (fw_ver1 & 0x7f),
+                               phy->ver_addr);
+       } else {
+               /* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
+               /* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
+
+               for (cnt = 0; cnt < 100; cnt++) {
+                       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+                       if (val & 1)
+                               break;
+                       udelay(5);
+               }
+               if (cnt == 100) {
+                       DP(NETIF_MSG_LINK, "Unable to read 848xx "
+                                       "phy fw version(1)\n");
+                       bnx2x_save_spirom_version(bp, port, 0,
+                                                 phy->ver_addr);
+                       return;
+               }
 
-       /* For the 32 bits registers in 848xx, access via MDIO2ARM interface.*/
-       /* (1) set register 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0014);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81B, 0x0000);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81C, 0x0300);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x0009);
 
-       for (cnt = 0; cnt < 100; cnt++) {
-               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
-               if (val & 1)
-                       break;
-               udelay(5);
-       }
-       if (cnt == 100) {
-               DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(1)\n");
-               bnx2x_save_spirom_version(bp, port, 0,
-                                         phy->ver_addr);
-               return;
-       }
+               /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
+               bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
+               for (cnt = 0; cnt < 100; cnt++) {
+                       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
+                       if (val & 1)
+                               break;
+                       udelay(5);
+               }
+               if (cnt == 100) {
+                       DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw "
+                                       "version(2)\n");
+                       bnx2x_save_spirom_version(bp, port, 0,
+                                                 phy->ver_addr);
+                       return;
+               }
 
+               /* lower 16 bits of the register SPI_FW_STATUS */
+               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
+               /* upper 16 bits of register SPI_FW_STATUS */
+               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
 
-       /* 2) read register 0xc200_0000 (SPI_FW_STATUS) */
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA819, 0x0000);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA81A, 0xc200);
-       bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, 0xA817, 0x000A);
-       for (cnt = 0; cnt < 100; cnt++) {
-               bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA818, &val);
-               if (val & 1)
-                       break;
-               udelay(5);
-       }
-       if (cnt == 100) {
-               DP(NETIF_MSG_LINK, "Unable to read 848xx phy fw version(2)\n");
-               bnx2x_save_spirom_version(bp, port, 0,
+               bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
                                          phy->ver_addr);
-               return;
        }
 
-       /* lower 16 bits of the register SPI_FW_STATUS */
-       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81B, &fw_ver1);
-       /* upper 16 bits of register SPI_FW_STATUS */
-       bnx2x_cl45_read(bp, phy, MDIO_PMA_DEVAD, 0xA81C, &fw_ver2);
-
-       bnx2x_save_spirom_version(bp, port, (fw_ver2<<16) | fw_ver1,
-                                 phy->ver_addr);
 }
-
 static void bnx2x_848xx_set_led(struct bnx2x *bp,
                                struct bnx2x_phy *phy)
 {
@@ -9392,10 +9408,13 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
        u16 tmp_req_line_speed;
 
        tmp_req_line_speed = phy->req_line_speed;
-       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
                if (phy->req_line_speed == SPEED_10000)
                        phy->req_line_speed = SPEED_AUTO_NEG;
-
+       } else {
+               /* Save spirom version */
+               bnx2x_save_848xx_spirom_version(phy, bp, params->port);
+       }
        /*
         * This phy uses the NIG latch mechanism since link indication
         * arrives through its LED4 and not via its LASI signal, so we
@@ -9443,13 +9462,10 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                         an_1000_val);
 
        /* set 100 speed advertisement */
-       if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+       if ((phy->req_line_speed == SPEED_AUTO_NEG) &&
             (phy->speed_cap_mask &
              (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
-              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)) &&
-            (phy->supported &
-             (SUPPORTED_100baseT_Half |
-              SUPPORTED_100baseT_Full)))) {
+              PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))) {
                an_10_100_val |= (1<<7);
                /* Enable autoneg and restart autoneg for legacy speeds */
                autoneg_val |= (1<<9 | 1<<12);
@@ -9539,9 +9555,6 @@ static int bnx2x_848xx_cmn_config_init(struct bnx2x_phy *phy,
                                 MDIO_AN_REG_8481_10GBASE_T_AN_CTRL,
                                 1);
 
-       /* Save spirom version */
-       bnx2x_save_848xx_spirom_version(phy, params);
-
        phy->req_line_speed = tmp_req_line_speed;
 
        return 0;
@@ -9749,17 +9762,7 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
 
        /* Wait for GPHY to come out of reset */
        msleep(50);
-       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
-               /* Bring PHY out of super isolate mode */
-               bnx2x_cl45_read(bp, phy,
-                               MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
-               val &= ~MDIO_84833_SUPER_ISOLATE;
-               bnx2x_cl45_write(bp, phy,
-                               MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
-               bnx2x_84833_pair_swap_cfg(phy, params, vars);
-       } else {
+       if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
                /*
                 * BCM84823 requires that XGXS links up first @ 10G for normal
                 * behavior.
@@ -9816,24 +9819,23 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
        DP(NETIF_MSG_LINK, "Multi_phy config = 0x%x, Media control = 0x%x\n",
                   params->multi_phy_config, val);
 
-       /* AutogrEEEn */
-       if (params->feature_config_flags &
-               FEATURE_CONFIG_AUTOGREEEN_ENABLED)
-               cmd_args[0] = 0x2;
-       else
-               cmd_args[0] = 0x0;
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+               bnx2x_84833_pair_swap_cfg(phy, params, vars);
 
-       cmd_args[1] = 0x0;
-       cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
-       cmd_args[3] = PHY84833_CONSTANT_LATENCY;
-       rc = bnx2x_84833_cmd_hdlr(phy, params,
-               PHY84833_CMD_SET_EEE_MODE, cmd_args);
-       if (rc != 0)
-               DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
+               /* Keep AutogrEEEn disabled. */
+               cmd_args[0] = 0x0;
+               cmd_args[1] = 0x0;
+               cmd_args[2] = PHY84833_CONSTANT_LATENCY + 1;
+               cmd_args[3] = PHY84833_CONSTANT_LATENCY;
+               rc = bnx2x_84833_cmd_hdlr(phy, params,
+                       PHY84833_CMD_SET_EEE_MODE, cmd_args);
+               if (rc != 0)
+                       DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
+       }
        if (initialize)
                rc = bnx2x_848xx_cmn_config_init(phy, params, vars);
        else
-               bnx2x_save_848xx_spirom_version(phy, params);
+               bnx2x_save_848xx_spirom_version(phy, bp, params->port);
        /* 84833 PHY has a better feature and doesn't need to support this. */
        if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823) {
                cms_enable = REG_RD(bp, params->shmem_base +
@@ -9851,6 +9853,16 @@ static int bnx2x_848x3_config_init(struct bnx2x_phy *phy,
                                 MDIO_CTL_REG_84823_USER_CTRL_REG, val);
        }
 
+       if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+               /* Bring PHY out of super isolate mode as the final step. */
+               bnx2x_cl45_read(bp, phy,
+                               MDIO_CTL_DEVAD,
+                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+               val &= ~MDIO_84833_SUPER_ISOLATE;
+               bnx2x_cl45_write(bp, phy,
+                               MDIO_CTL_DEVAD,
+                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+       }
        return rc;
 }
 
@@ -9988,10 +10000,11 @@ static void bnx2x_848x3_link_reset(struct bnx2x_phy *phy,
        } else {
                bnx2x_cl45_read(bp, phy,
                                MDIO_CTL_DEVAD,
-                               0x400f, &val16);
+                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val16);
+               val16 |= MDIO_84833_SUPER_ISOLATE;
                bnx2x_cl45_write(bp, phy,
-                               MDIO_PMA_DEVAD,
-                               MDIO_PMA_REG_CTRL, 0x800);
+                                MDIO_CTL_DEVAD,
+                                MDIO_84833_TOP_CFG_XGPHY_STRAP1, val16);
        }
 }
 
@@ -11516,6 +11529,19 @@ static int bnx2x_populate_ext_phy(struct bnx2x *bp,
        }
        phy->mdio_ctrl = bnx2x_get_emac_base(bp, mdc_mdio_access, port);
 
+       if ((phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) &&
+           (phy->ver_addr)) {
+               /*
+                * Remove 100Mb link supported for BCM84833 when phy fw
+                * version lower than or equal to 1.39
+                */
+               u32 raw_ver = REG_RD(bp, phy->ver_addr);
+               if (((raw_ver & 0x7F) <= 39) &&
+                   (((raw_ver & 0xF80) >> 7) <= 1))
+                       phy->supported &= ~(SUPPORTED_100baseT_Half |
+                                           SUPPORTED_100baseT_Full);
+       }
+
        /*
         * In case mdc/mdio_access of the external phy is different than the
         * mdc/mdio access of the XGXS, a HW lock must be taken in each access
@@ -12333,55 +12359,69 @@ static int bnx2x_84833_common_init_phy(struct bnx2x *bp,
                                                u32 chip_id)
 {
        u8 reset_gpios;
-       struct bnx2x_phy phy;
-       u32 shmem_base, shmem2_base, cnt;
-       s8 port = 0;
-       u16 val;
-
        reset_gpios = bnx2x_84833_get_reset_gpios(bp, shmem_base_path, chip_id);
        bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_LOW);
        udelay(10);
        bnx2x_set_mult_gpio(bp, reset_gpios, MISC_REGISTERS_GPIO_OUTPUT_HIGH);
        DP(NETIF_MSG_LINK, "84833 reset pulse on pin values 0x%x\n",
                reset_gpios);
-       for (port = PORT_MAX - 1; port >= PORT_0; port--) {
-               /* This PHY is for E2 and E3. */
-               shmem_base = shmem_base_path[port];
-               shmem2_base = shmem2_base_path[port];
-               /* Extract the ext phy address for the port */
-               if (bnx2x_populate_phy(bp, phy_index, shmem_base, shmem2_base,
-                                      0, &phy) !=
-                   0) {
-                       DP(NETIF_MSG_LINK, "populate_phy failed\n");
-                       return -EINVAL;
-               }
+       return 0;
+}
 
-               /* Wait for FW completing its initialization. */
-               for (cnt = 0; cnt < 1000; cnt++) {
-                       bnx2x_cl45_read(bp, &phy,
+static int bnx2x_84833_pre_init_phy(struct bnx2x *bp,
+                                              struct bnx2x_phy *phy)
+{
+       u16 val, cnt;
+       /* Wait for FW completing its initialization. */
+       for (cnt = 0; cnt < 1500; cnt++) {
+               bnx2x_cl45_read(bp, phy,
                                MDIO_PMA_DEVAD,
                                MDIO_PMA_REG_CTRL, &val);
-                       if (!(val & (1<<15)))
-                               break;
-                       msleep(1);
-               }
-               if (cnt >= 1000)
-                       DP(NETIF_MSG_LINK,
-                               "84833 Cmn reset timeout (%d)\n", port);
-
-               /* Put the port in super isolate mode. */
-               bnx2x_cl45_read(bp, &phy,
-                               MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
-               val |= MDIO_84833_SUPER_ISOLATE;
-               bnx2x_cl45_write(bp, &phy,
-                               MDIO_CTL_DEVAD,
-                               MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+               if (!(val & (1<<15)))
+                       break;
+               msleep(1);
+       }
+       if (cnt >= 1500) {
+               DP(NETIF_MSG_LINK, "84833 reset timeout\n");
+               return -EINVAL;
        }
 
+       /* Put the port in super isolate mode. */
+       bnx2x_cl45_read(bp, phy,
+                       MDIO_CTL_DEVAD,
+                       MDIO_84833_TOP_CFG_XGPHY_STRAP1, &val);
+       val |= MDIO_84833_SUPER_ISOLATE;
+       bnx2x_cl45_write(bp, phy,
+                        MDIO_CTL_DEVAD,
+                        MDIO_84833_TOP_CFG_XGPHY_STRAP1, val);
+
+       /* Save spirom version */
+       bnx2x_save_848xx_spirom_version(phy, bp, PORT_0);
        return 0;
 }
 
+int bnx2x_pre_init_phy(struct bnx2x *bp,
+                                 u32 shmem_base,
+                                 u32 shmem2_base,
+                                 u32 chip_id)
+{
+       int rc = 0;
+       struct bnx2x_phy phy;
+       bnx2x_set_mdio_clk(bp, chip_id, PORT_0);
+       if (bnx2x_populate_phy(bp, EXT_PHY1, shmem_base, shmem2_base,
+                              PORT_0, &phy)) {
+               DP(NETIF_MSG_LINK, "populate_phy failed\n");
+               return -EINVAL;
+       }
+       switch (phy.type) {
+       case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833:
+               rc = bnx2x_84833_pre_init_phy(bp, &phy);
+               break;
+       default:
+               break;
+       }
+       return rc;
+}
 
 static int bnx2x_ext_phy_common_init(struct bnx2x *bp, u32 shmem_base_path[],
                                     u32 shmem2_base_path[], u8 phy_index,
index ffeaaa95ed96eb983cfd60d5c5c2395a92b3b952..1e3f978ee6daffea0107268f77c3a1eec0ee664b 100644 (file)
@@ -941,7 +941,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
                        struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
 
                        BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
-                                 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
+                                 i, j, rx_bd[1], rx_bd[0], sw_bd->data);
                }
 
                start = RX_SGE(fp->rx_sge_prod);
@@ -10536,6 +10536,9 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
 {
        struct bnx2x *bp;
        int rc;
+       bool chip_is_e1x = (board_type == BCM57710 ||
+                           board_type == BCM57711 ||
+                           board_type == BCM57711E);
 
        SET_NETDEV_DEV(dev, &pdev->dev);
        bp = netdev_priv(dev);
@@ -10624,7 +10627,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
        REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0, 0);
        REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0, 0);
 
-       if (CHIP_IS_E1x(bp)) {
+       if (chip_is_e1x) {
                REG_WR(bp, PXP2_REG_PGL_ADDR_88_F1, 0);
                REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F1, 0);
                REG_WR(bp, PXP2_REG_PGL_ADDR_90_F1, 0);
@@ -10635,9 +10638,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
         * Enable internal target-read (in case we are probed after PF FLR).
         * Must be done prior to any BAR read access. Only for 57712 and up
         */
-       if (board_type != BCM57710 &&
-           board_type != BCM57711 &&
-           board_type != BCM57711E)
+       if (!chip_is_e1x)
                REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1);
 
        /* Reset the load counter */
index 44609de4e5dc59f7ecbe41d3ea8c0e01f17d837e..dddbcf6e154ec58d8d1f0720706df5ad0076c338 100644 (file)
  * set to 0x345678021. This is a new register (with 2_) added in E3 B0 to
  * accommodate the 9 input clients to ETS arbiter. */
 #define NIG_REG_P0_TX_ARB_PRIORITY_CLIENT2_MSB                  0x18684
+#define NIG_REG_P1_HWPFC_ENABLE                                         0x181d0
 #define NIG_REG_P1_MAC_IN_EN                                    0x185c0
 /* [RW 1] Output enable for TX MAC interface */
 #define NIG_REG_P1_MAC_OUT_EN                                   0x185c4
index 5ac616093f9f7d4c4dfd5696dd966ff614091c45..cb6339c35571f6a9d86b85d92324fb4680b80406 100644 (file)
@@ -50,6 +50,7 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
                                        int exe_len,
                                        union bnx2x_qable_obj *owner,
                                        exe_q_validate validate,
+                                       exe_q_remove remove,
                                        exe_q_optimize optimize,
                                        exe_q_execute exec,
                                        exe_q_get get)
@@ -66,6 +67,7 @@ static inline void bnx2x_exe_queue_init(struct bnx2x *bp,
 
        /* Owner specific callbacks */
        o->validate      = validate;
+       o->remove        = remove;
        o->optimize      = optimize;
        o->execute       = exec;
        o->get           = get;
@@ -1340,6 +1342,35 @@ static int bnx2x_validate_vlan_mac(struct bnx2x *bp,
        }
 }
 
+static int bnx2x_remove_vlan_mac(struct bnx2x *bp,
+                                 union bnx2x_qable_obj *qo,
+                                 struct bnx2x_exeq_elem *elem)
+{
+       int rc = 0;
+
+       /* If consumption wasn't required, nothing to do */
+       if (test_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+                    &elem->cmd_data.vlan_mac.vlan_mac_flags))
+               return 0;
+
+       switch (elem->cmd_data.vlan_mac.cmd) {
+       case BNX2X_VLAN_MAC_ADD:
+       case BNX2X_VLAN_MAC_MOVE:
+               rc = qo->vlan_mac.put_credit(&qo->vlan_mac);
+               break;
+       case BNX2X_VLAN_MAC_DEL:
+               rc = qo->vlan_mac.get_credit(&qo->vlan_mac);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       if (rc != true)
+               return -EINVAL;
+
+       return 0;
+}
+
 /**
  * bnx2x_wait_vlan_mac - passivly wait for 5 seconds until all work completes.
  *
@@ -1801,8 +1832,14 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
 
        list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
                if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
-                   *vlan_mac_flags)
+                   *vlan_mac_flags) {
+                       rc = exeq->remove(bp, exeq->owner, exeq_pos);
+                       if (rc) {
+                               BNX2X_ERR("Failed to remove command\n");
+                               return rc;
+                       }
                        list_del(&exeq_pos->link);
+               }
        }
 
        spin_unlock_bh(&exeq->lock);
@@ -1908,6 +1945,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
                bnx2x_exe_queue_init(bp,
                                     &mac_obj->exe_queue, 1, qable_obj,
                                     bnx2x_validate_vlan_mac,
+                                    bnx2x_remove_vlan_mac,
                                     bnx2x_optimize_vlan_mac,
                                     bnx2x_execute_vlan_mac,
                                     bnx2x_exeq_get_mac);
@@ -1924,6 +1962,7 @@ void bnx2x_init_mac_obj(struct bnx2x *bp,
                bnx2x_exe_queue_init(bp,
                                     &mac_obj->exe_queue, CLASSIFY_RULES_COUNT,
                                     qable_obj, bnx2x_validate_vlan_mac,
+                                    bnx2x_remove_vlan_mac,
                                     bnx2x_optimize_vlan_mac,
                                     bnx2x_execute_vlan_mac,
                                     bnx2x_exeq_get_mac);
@@ -1963,6 +2002,7 @@ void bnx2x_init_vlan_obj(struct bnx2x *bp,
                bnx2x_exe_queue_init(bp,
                                     &vlan_obj->exe_queue, CLASSIFY_RULES_COUNT,
                                     qable_obj, bnx2x_validate_vlan_mac,
+                                    bnx2x_remove_vlan_mac,
                                     bnx2x_optimize_vlan_mac,
                                     bnx2x_execute_vlan_mac,
                                     bnx2x_exeq_get_vlan);
@@ -2009,6 +2049,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
                bnx2x_exe_queue_init(bp,
                                     &vlan_mac_obj->exe_queue, 1, qable_obj,
                                     bnx2x_validate_vlan_mac,
+                                    bnx2x_remove_vlan_mac,
                                     bnx2x_optimize_vlan_mac,
                                     bnx2x_execute_vlan_mac,
                                     bnx2x_exeq_get_vlan_mac);
@@ -2025,6 +2066,7 @@ void bnx2x_init_vlan_mac_obj(struct bnx2x *bp,
                                     &vlan_mac_obj->exe_queue,
                                     CLASSIFY_RULES_COUNT,
                                     qable_obj, bnx2x_validate_vlan_mac,
+                                    bnx2x_remove_vlan_mac,
                                     bnx2x_optimize_vlan_mac,
                                     bnx2x_execute_vlan_mac,
                                     bnx2x_exeq_get_vlan_mac);
index 992308ff82e845b8698f1647a258b8bdc968aa7e..66da39f0c84a4f4b476eaaea82af8afcc21b0fe9 100644 (file)
@@ -161,6 +161,10 @@ typedef int (*exe_q_validate)(struct bnx2x *bp,
                              union bnx2x_qable_obj *o,
                              struct bnx2x_exeq_elem *elem);
 
+typedef int (*exe_q_remove)(struct bnx2x *bp,
+                           union bnx2x_qable_obj *o,
+                           struct bnx2x_exeq_elem *elem);
+
 /**
  * @return positive is entry was optimized, 0 - if not, negative
  *         in case of an error.
@@ -203,11 +207,18 @@ struct bnx2x_exe_queue_obj {
         */
        exe_q_validate          validate;
 
+       /**
+        * Called before removing pending commands, cleaning allocated
+        * resources (e.g., credits from validate)
+        */
+        exe_q_remove           remove;
 
        /**
         * This will try to cancel the current pending commands list
         * considering the new command.
         *
+        * Returns the number of optimized commands or a negative error code
+        *
         * Must run under exe_queue->lock
         */
        exe_q_optimize          optimize;
index 076e02a415a09d09878db3ea1c0071c03ac4a685..a1f2e0fed78bc2b23b7caa2c1457255e6eae9b89 100644 (file)
@@ -6667,14 +6667,9 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
                iph = ip_hdr(skb);
                tcp_opt_len = tcp_optlen(skb);
 
-               if (skb_is_gso_v6(skb)) {
-                       hdr_len = skb_headlen(skb) - ETH_HLEN;
-               } else {
-                       u32 ip_tcp_len;
-
-                       ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
-                       hdr_len = ip_tcp_len + tcp_opt_len;
+               hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
 
+               if (!skb_is_gso_v6(skb)) {
                        iph->check = 0;
                        iph->tot_len = htons(mss + hdr_len);
                }
@@ -8846,9 +8841,11 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
        udelay(100);
 
-       if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1) {
+       if (tg3_flag(tp, USING_MSIX)) {
                val = tr32(MSGINT_MODE);
-               val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
+               val |= MSGINT_MODE_ENABLE;
+               if (tp->irq_cnt > 1)
+                       val |= MSGINT_MODE_MULTIVEC_EN;
                if (!tg3_flag(tp, 1SHOT_MSI))
                        val |= MSGINT_MODE_ONE_SHOT_DISABLE;
                tw32(MSGINT_MODE, val);
@@ -9548,19 +9545,18 @@ static int tg3_request_firmware(struct tg3 *tp)
 
 static bool tg3_enable_msix(struct tg3 *tp)
 {
-       int i, rc, cpus = num_online_cpus();
+       int i, rc;
        struct msix_entry msix_ent[tp->irq_max];
 
-       if (cpus == 1)
-               /* Just fallback to the simpler MSI mode. */
-               return false;
-
-       /*
-        * We want as many rx rings enabled as there are cpus.
-        * The first MSIX vector only deals with link interrupts, etc,
-        * so we add one to the number of vectors we are requesting.
-        */
-       tp->irq_cnt = min_t(unsigned, cpus + 1, tp->irq_max);
+       tp->irq_cnt = num_online_cpus();
+       if (tp->irq_cnt > 1) {
+               /* We want as many rx rings enabled as there are cpus.
+                * In multiqueue MSI-X mode, the first MSI-X vector
+                * only deals with link interrupts, etc, so we add
+                * one to the number of vectors we are requesting.
+                */
+               tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
+       }
 
        for (i = 0; i < tp->irq_max; i++) {
                msix_ent[i].entry  = i;
index fe0c29acdbe6da46bd99386a13c528b57f19c04b..ee93a2087fe6c0cfd116dcf3f4536185480ba362 100644 (file)
@@ -32,7 +32,7 @@
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "2.1.1.28"
+#define DRV_VERSION            "2.1.1.31"
 #define DRV_COPYRIGHT          "Copyright 2008-2011 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
index 2fd9db4b1be57b440b1d07ab5d8e582c49625d05..ab3f67f980d8c3f7554d19bf3fdbaf275deefed3 100644 (file)
 
 #define PCI_DEVICE_ID_CISCO_VIC_ENET         0x0043  /* ethernet vnic */
 #define PCI_DEVICE_ID_CISCO_VIC_ENET_DYN     0x0044  /* enet dynamic vnic */
+#define PCI_DEVICE_ID_CISCO_VIC_ENET_VF      0x0071  /* enet SRIOV VF */
 
 /* Supported devices */
 static DEFINE_PCI_DEVICE_TABLE(enic_id_table) = {
        { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET) },
        { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_DYN) },
+       { PCI_VDEVICE(CISCO, PCI_DEVICE_ID_CISCO_VIC_ENET_VF) },
        { 0, }  /* end of table */
 };
 
@@ -132,6 +134,11 @@ int enic_sriov_enabled(struct enic *enic)
        return (enic->priv_flags & ENIC_SRIOV_ENABLED) ? 1 : 0;
 }
 
+static int enic_is_sriov_vf(struct enic *enic)
+{
+       return enic->pdev->device == PCI_DEVICE_ID_CISCO_VIC_ENET_VF;
+}
+
 int enic_is_valid_vf(struct enic *enic, int vf)
 {
 #ifdef CONFIG_PCI_IOV
@@ -437,7 +444,7 @@ static void enic_mtu_check(struct enic *enic)
 
        if (mtu && mtu != enic->port_mtu) {
                enic->port_mtu = mtu;
-               if (enic_is_dynamic(enic)) {
+               if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
                        mtu = max_t(int, ENIC_MIN_MTU,
                                min_t(int, ENIC_MAX_MTU, mtu));
                        if (mtu != netdev->mtu)
@@ -849,7 +856,7 @@ static int enic_set_mac_addr(struct net_device *netdev, char *addr)
 {
        struct enic *enic = netdev_priv(netdev);
 
-       if (enic_is_dynamic(enic)) {
+       if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic)) {
                if (!is_valid_ether_addr(addr) && !is_zero_ether_addr(addr))
                        return -EADDRNOTAVAIL;
        } else {
@@ -1608,7 +1615,7 @@ static int enic_open(struct net_device *netdev)
        for (i = 0; i < enic->rq_count; i++)
                vnic_rq_enable(&enic->rq[i]);
 
-       if (!enic_is_dynamic(enic))
+       if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
                enic_dev_add_station_addr(enic);
 
        enic_set_rx_mode(netdev);
@@ -1659,7 +1666,7 @@ static int enic_stop(struct net_device *netdev)
        netif_carrier_off(netdev);
        netif_tx_disable(netdev);
 
-       if (!enic_is_dynamic(enic))
+       if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
                enic_dev_del_station_addr(enic);
 
        for (i = 0; i < enic->wq_count; i++) {
@@ -1696,7 +1703,7 @@ static int enic_change_mtu(struct net_device *netdev, int new_mtu)
        if (new_mtu < ENIC_MIN_MTU || new_mtu > ENIC_MAX_MTU)
                return -EINVAL;
 
-       if (enic_is_dynamic(enic))
+       if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
                return -EOPNOTSUPP;
 
        if (running)
@@ -2263,10 +2270,10 @@ static int __devinit enic_probe(struct pci_dev *pdev,
        int using_dac = 0;
        unsigned int i;
        int err;
-       int num_pps = 1;
 #ifdef CONFIG_PCI_IOV
        int pos = 0;
 #endif
+       int num_pps = 1;
 
        /* Allocate net device structure and initialize.  Private
         * instance data is initialized to zero.
@@ -2376,14 +2383,14 @@ static int __devinit enic_probe(struct pci_dev *pdev,
                        num_pps = enic->num_vfs;
                }
        }
-
 #endif
+
        /* Allocate structure for port profiles */
        enic->pp = kcalloc(num_pps, sizeof(*enic->pp), GFP_KERNEL);
        if (!enic->pp) {
                pr_err("port profile alloc failed, aborting\n");
                err = -ENOMEM;
-               goto err_out_disable_sriov;
+               goto err_out_disable_sriov_pp;
        }
 
        /* Issue device open to get device in known state
@@ -2392,7 +2399,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
        err = enic_dev_open(enic);
        if (err) {
                dev_err(dev, "vNIC dev open failed, aborting\n");
-               goto err_out_free_pp;
+               goto err_out_disable_sriov;
        }
 
        /* Setup devcmd lock
@@ -2426,7 +2433,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
         * called later by an upper layer.
         */
 
-       if (!enic_is_dynamic(enic)) {
+       if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic)) {
                err = vnic_dev_init(enic->vdev, 0);
                if (err) {
                        dev_err(dev, "vNIC dev init failed, aborting\n");
@@ -2460,8 +2467,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
        (void)enic_change_mtu(netdev, enic->port_mtu);
 
 #ifdef CONFIG_PCI_IOV
-       if (enic_is_dynamic(enic) && pdev->is_virtfn &&
-               is_zero_ether_addr(enic->mac_addr))
+       if (enic_is_sriov_vf(enic) && is_zero_ether_addr(enic->mac_addr))
                random_ether_addr(enic->mac_addr);
 #endif
 
@@ -2474,7 +2480,7 @@ static int __devinit enic_probe(struct pci_dev *pdev,
        enic->tx_coalesce_usecs = enic->config.intr_timer_usec;
        enic->rx_coalesce_usecs = enic->tx_coalesce_usecs;
 
-       if (enic_is_dynamic(enic))
+       if (enic_is_dynamic(enic) || enic_is_sriov_vf(enic))
                netdev->netdev_ops = &enic_netdev_dynamic_ops;
        else
                netdev->netdev_ops = &enic_netdev_ops;
@@ -2516,17 +2522,17 @@ err_out_dev_deinit:
        enic_dev_deinit(enic);
 err_out_dev_close:
        vnic_dev_close(enic->vdev);
-err_out_free_pp:
-       kfree(enic->pp);
 err_out_disable_sriov:
+       kfree(enic->pp);
+err_out_disable_sriov_pp:
 #ifdef CONFIG_PCI_IOV
        if (enic_sriov_enabled(enic)) {
                pci_disable_sriov(pdev);
                enic->priv_flags &= ~ENIC_SRIOV_ENABLED;
        }
 err_out_vnic_unregister:
-       vnic_dev_unregister(enic->vdev);
 #endif
+       vnic_dev_unregister(enic->vdev);
 err_out_iounmap:
        enic_iounmap(enic);
 err_out_release_regions:
index a6bcdb5cd2be4a0b3880cfbe8d72dfb3b07045ce..e703d64434f89cec738cf2a28fc3b37167fb4c11 100644 (file)
@@ -1786,8 +1786,7 @@ static void be_rx_queues_destroy(struct be_adapter *adapter)
 static u32 be_num_rxqs_want(struct be_adapter *adapter)
 {
        if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
-            !sriov_enabled(adapter) && be_physfn(adapter) &&
-            !be_is_mc(adapter)) {
+            !sriov_enabled(adapter) && be_physfn(adapter)) {
                return 1 + MAX_RSS_QS; /* one default non-RSS queue */
        } else {
                dev_warn(&adapter->pdev->dev,
index fb5579a3b19d60f5e38dd995bcaad6dc384a0633..47f85c337cf75cac6034990e68e978367886a0db 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/module.h>
 #include <linux/netdevice.h>
index a127cb2476c71ff89f0e343134421ada4b52911e..bb336a0959c9147624ff6c23d63ee71138f7663d 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/init.h>
+#include <linux/interrupt.h>
 #include <linux/io.h>
 #include <linux/mii.h>
 #include <linux/module.h>
index 05b7359bde8da1931ac53ebc6fc95310bd398e11..6bdd8e36e564c4868eac9259b63a81c23f345337 100644 (file)
@@ -263,7 +263,7 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
                data[i++] = atomic_read(&port->port_res[k].swqe_avail);
 }
 
-const struct ethtool_ops ehea_ethtool_ops = {
+static const struct ethtool_ops ehea_ethtool_ops = {
        .get_settings = ehea_get_settings,
        .get_drvinfo = ehea_get_drvinfo,
        .get_msglevel = ehea_get_msglevel,
index 3554414eb5e289287e3a9ab174320c7e9fa1bf30..5d5fb2627184f9b7f303d64c22875182c63d88e6 100644 (file)
@@ -94,8 +94,8 @@ static int port_name_cnt;
 static LIST_HEAD(adapter_list);
 static unsigned long ehea_driver_flags;
 static DEFINE_MUTEX(dlpar_mem_lock);
-struct ehea_fw_handle_array ehea_fw_handles;
-struct ehea_bcmc_reg_array ehea_bcmc_regs;
+static struct ehea_fw_handle_array ehea_fw_handles;
+static struct ehea_bcmc_reg_array ehea_bcmc_regs;
 
 
 static int __devinit ehea_probe_adapter(struct platform_device *dev,
@@ -133,7 +133,7 @@ void ehea_dump(void *adr, int len, char *msg)
        }
 }
 
-void ehea_schedule_port_reset(struct ehea_port *port)
+static void ehea_schedule_port_reset(struct ehea_port *port)
 {
        if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
                schedule_work(&port->reset_task);
@@ -1404,7 +1404,7 @@ out:
        return ret;
 }
 
-int ehea_gen_smrs(struct ehea_port_res *pr)
+static int ehea_gen_smrs(struct ehea_port_res *pr)
 {
        int ret;
        struct ehea_adapter *adapter = pr->port->adapter;
@@ -1426,7 +1426,7 @@ out:
        return -EIO;
 }
 
-int ehea_rem_smrs(struct ehea_port_res *pr)
+static int ehea_rem_smrs(struct ehea_port_res *pr)
 {
        if ((ehea_rem_mr(&pr->send_mr)) ||
            (ehea_rem_mr(&pr->recv_mr)))
@@ -2190,7 +2190,7 @@ out:
        return err;
 }
 
-int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
+static int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
 {
        int ret = -EIO;
        u64 hret;
@@ -2531,7 +2531,7 @@ static void ehea_flush_sq(struct ehea_port *port)
        }
 }
 
-int ehea_stop_qps(struct net_device *dev)
+static int ehea_stop_qps(struct net_device *dev)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
@@ -2600,7 +2600,7 @@ out:
        return ret;
 }
 
-void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
+static void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
 {
        struct ehea_qp qp = *orig_qp;
        struct ehea_qp_init_attr *init_attr = &qp.init_attr;
@@ -2633,7 +2633,7 @@ void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
        }
 }
 
-int ehea_restart_qps(struct net_device *dev)
+static int ehea_restart_qps(struct net_device *dev)
 {
        struct ehea_port *port = netdev_priv(dev);
        struct ehea_adapter *adapter = port->adapter;
@@ -2824,7 +2824,7 @@ static void ehea_tx_watchdog(struct net_device *dev)
                ehea_schedule_port_reset(port);
 }
 
-int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
+static int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
 {
        struct hcp_query_ehea *cb;
        u64 hret;
@@ -2852,7 +2852,7 @@ out:
        return ret;
 }
 
-int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
+static int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
 {
        struct hcp_ehea_port_cb4 *cb4;
        u64 hret;
@@ -2966,7 +2966,7 @@ static const struct net_device_ops ehea_netdev_ops = {
        .ndo_tx_timeout         = ehea_tx_watchdog,
 };
 
-struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
+static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
                                         u32 logical_port_id,
                                         struct device_node *dn)
 {
@@ -3237,7 +3237,7 @@ static ssize_t ehea_remove_port(struct device *dev,
 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
 
-int ehea_create_device_sysfs(struct platform_device *dev)
+static int ehea_create_device_sysfs(struct platform_device *dev)
 {
        int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
        if (ret)
@@ -3248,7 +3248,7 @@ out:
        return ret;
 }
 
-void ehea_remove_device_sysfs(struct platform_device *dev)
+static void ehea_remove_device_sysfs(struct platform_device *dev)
 {
        device_remove_file(&dev->dev, &dev_attr_probe_port);
        device_remove_file(&dev->dev, &dev_attr_remove_port);
@@ -3379,7 +3379,7 @@ static int __devexit ehea_remove(struct platform_device *dev)
        return 0;
 }
 
-void ehea_crash_handler(void)
+static void ehea_crash_handler(void)
 {
        int i;
 
@@ -3491,7 +3491,7 @@ static ssize_t ehea_show_capabilities(struct device_driver *drv,
 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
                   ehea_show_capabilities, NULL);
 
-int __init ehea_module_init(void)
+static int __init ehea_module_init(void)
 {
        int ret;
 
index 95b9f4fa811e7102cc6db2248dbabf1af7e3d1e9..c25b05b94daae698e0b8640e40da8614ea9ad4f7 100644 (file)
@@ -34,9 +34,7 @@
 #include "ehea_phyp.h"
 #include "ehea_qmr.h"
 
-struct ehea_bmap *ehea_bmap = NULL;
-
-
+static struct ehea_bmap *ehea_bmap;
 
 static void *hw_qpageit_get_inc(struct hw_queue *queue)
 {
@@ -212,7 +210,7 @@ out_nomem:
        return NULL;
 }
 
-u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
+static u64 ehea_destroy_cq_res(struct ehea_cq *cq, u64 force)
 {
        u64 hret;
        u64 adapter_handle = cq->adapter->handle;
@@ -337,7 +335,7 @@ struct ehea_eqe *ehea_poll_eq(struct ehea_eq *eq)
        return eqe;
 }
 
-u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
+static u64 ehea_destroy_eq_res(struct ehea_eq *eq, u64 force)
 {
        u64 hret;
        unsigned long flags;
@@ -381,7 +379,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
 /**
  * allocates memory for a queue and registers pages in phyp
  */
-int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
+static int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
                           int nr_pages, int wqe_size, int act_nr_sges,
                           struct ehea_adapter *adapter, int h_call_q_selector)
 {
@@ -516,7 +514,7 @@ out_freemem:
        return NULL;
 }
 
-u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
+static u64 ehea_destroy_qp_res(struct ehea_qp *qp, u64 force)
 {
        u64 hret;
        struct ehea_qp_init_attr *qp_attr = &qp->init_attr;
@@ -976,7 +974,7 @@ int ehea_gen_smr(struct ehea_adapter *adapter, struct ehea_mr *old_mr,
        return 0;
 }
 
-void print_error_data(u64 *data)
+static void print_error_data(u64 *data)
 {
        int length;
        u64 type = EHEA_BMASK_GET(ERROR_DATA_TYPE, data[2]);
index c6e4621b62624991ec1e0f32a0bbfee410f9f55c..6565c463185c2a8f705ceee712005643f9a7389d 100644 (file)
@@ -1,7 +1,7 @@
 ################################################################################
 #
 # Intel 82575 PCI-Express Ethernet Linux driver
-# Copyright(c) 1999 - 2011 Intel Corporation.
+# Copyright(c) 1999 - 2012 Intel Corporation.
 #
 # This program is free software; you can redistribute it and/or modify it
 # under the terms and conditions of the GNU General Public License,
index b8e20f037d0a8314789f236a31b4f723ef549b72..08bdc33715eeabbb793b778fd98a4aafdc8c5bc5 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 08a757eb6608c73bc05e4a8569301e71c4ba05cf..b927d79ab536c19a09ce0fdf8ac00b6ebb18a47a 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index f5fc5725ea94b33c81480fab0fa200498513b013..aed217449f0dd459eebd13dfff3006a5dd414b06 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 4519a13671708c0ba493a3e3e3c332a8b75a3431..f67cbd3fa307a966a377f6f7b9a089b3b7163c92 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 73aac082c44dbe5b3cbbbbca2d01a225c3440df0..f57338afd71f47681a3436ee769d6fb02cd14b06 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -151,7 +151,7 @@ void igb_clear_vfta_i350(struct e1000_hw *hw)
  *  Writes value at the given offset in the register array which stores
  *  the VLAN filter table.
  **/
-void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
+static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
 {
        int i;
 
index e45996b4ea346e299070bafe20fd73931c1dd0c3..cbddc4e51e30d9c304b11c75250d04ddde39c5b7 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 469d95eaa1547ae5136c4c8f2e1cf8b14e0396c5..5988b8958baff22cfb0f2de6f71eae4fc578681a 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index eddb0f83dceac6a3ad82e7ad2e9dd0541bb6ca36..dbcfa3d5caeca753cf22a4f37e9cb313d9c30fa7 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 40407124e7222090c364aa960cfc303115a9571f..fa2c6ba6213941d31824d594f30b9ed9c08ba5b5 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index a2a7ca9fa733148fbfc5f42eb7c49df35f7e0bad..825b0228cac0ac5c17dcd9a903240b9a17d95e6b 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2011 Intel Corporation.
+  Copyright(c) 2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index b17d7c20f8177816434f914f15635b3624361d5e..789de5b83aad94071aec54f04e7979e9cfd82d7c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 8510797b9d810a95007c129f591f3c239acc2ef7..4c32ac66ff39413e78eaeb214ff0a55de259cfee 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 0a860bc1198ef977f984579a2f9dd0656e1d6ae2..ccdf36d503fdc54ea753e02bfa780c9c1b9055d9 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 3d12e67eebb4af328b03f00196841a228cfca65e..8e33bdd33eea5663c208dc6db16ca7f5371102ae 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 7998bf4d594683b73aa476a1ee9156dc1a220511..aa399a8a8f0df2ba23bc8cf0e1ae0b04f58f9469 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
index 01e5e89ef959eb9317be078f2f3a0e50607d9abc..e91d73c8aa4e3e241ba89ad51f559f69fe1b3062 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel(R) Gigabit Ethernet Linux driver
-  Copyright(c) 2007-2011 Intel Corporation.
+  Copyright(c) 2007-2012 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -68,7 +68,7 @@ char igb_driver_name[] = "igb";
 char igb_driver_version[] = DRV_VERSION;
 static const char igb_driver_string[] =
                                "Intel(R) Gigabit Ethernet Network Driver";
-static const char igb_copyright[] = "Copyright (c) 2007-2011 Intel Corporation.";
+static const char igb_copyright[] = "Copyright (c) 2007-2012 Intel Corporation.";
 
 static const struct e1000_info *igb_info_tbl[] = {
        [board_82575] = &e1000_82575_info,
@@ -4003,8 +4003,8 @@ set_itr_now:
        }
 }
 
-void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
-                    u32 type_tucmd, u32 mss_l4len_idx)
+static void igb_tx_ctxtdesc(struct igb_ring *tx_ring, u32 vlan_macip_lens,
+                           u32 type_tucmd, u32 mss_l4len_idx)
 {
        struct e1000_adv_tx_context_desc *context_desc;
        u16 i = tx_ring->next_to_use;
@@ -5623,7 +5623,7 @@ static irqreturn_t igb_intr(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-void igb_ring_irq_enable(struct igb_q_vector *q_vector)
+static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
 {
        struct igb_adapter *adapter = q_vector->adapter;
        struct e1000_hw *hw = &adapter->hw;
index 7b600a1f6366f811d31ae38fd12cab1c6d452505..2dba534460645915d14242b0b6d3ea0e4bd102ad 100644 (file)
@@ -468,6 +468,5 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
 
 void igbvf_set_ethtool_ops(struct net_device *netdev)
 {
-       /* have to "undeclare" const on this struct to remove warnings */
-       SET_ETHTOOL_OPS(netdev, (struct ethtool_ops *)&igbvf_ethtool_ops);
+       SET_ETHTOOL_OPS(netdev, &igbvf_ethtool_ops);
 }
index fd3da3076c2f3bc6b10032defa1a838d0b5c59ef..a4b20c865759a28973407ad30181be1b733f05a3 100644 (file)
@@ -1194,11 +1194,6 @@ static int igbvf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
        struct igbvf_adapter *adapter = netdev_priv(netdev);
        struct e1000_hw *hw = &adapter->hw;
 
-       igbvf_irq_disable(adapter);
-
-       if (!test_bit(__IGBVF_DOWN, &adapter->state))
-               igbvf_irq_enable(adapter);
-
        if (hw->mac.ops.set_vfta(hw, vid, false)) {
                dev_err(&adapter->pdev->dev,
                        "Failed to remove vlan id %d\n", vid);
index 802bfa0f62cc022c34965c32343719749e1c4ca7..775602ef90e5d2176548bece6477aeacc0c6c347 100644 (file)
 
 /* Receive DMA Registers */
 #define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \
-                         (0x0D000 + ((_i - 64) * 0x40)))
+                        (0x0D000 + (((_i) - 64) * 0x40)))
 #define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \
-                         (0x0D004 + ((_i - 64) * 0x40)))
+                        (0x0D004 + (((_i) - 64) * 0x40)))
 #define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \
-                         (0x0D008 + ((_i - 64) * 0x40)))
+                        (0x0D008 + (((_i) - 64) * 0x40)))
 #define IXGBE_RDH(_i)   (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \
-                         (0x0D010 + ((_i - 64) * 0x40)))
+                        (0x0D010 + (((_i) - 64) * 0x40)))
 #define IXGBE_RDT(_i)   (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \
-                         (0x0D018 + ((_i - 64) * 0x40)))
+                        (0x0D018 + (((_i) - 64) * 0x40)))
 #define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \
-                          (0x0D028 + ((_i - 64) * 0x40)))
+                        (0x0D028 + (((_i) - 64) * 0x40)))
 #define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \
-                          (0x0D02C + ((_i - 64) * 0x40)))
+                        (0x0D02C + (((_i) - 64) * 0x40)))
 #define IXGBE_RSCDBU     0x03028
 #define IXGBE_RDDCC      0x02F20
 #define IXGBE_RXMEMWRAP  0x03190
  */
 #define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \
                           (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \
-                          (0x0D014 + ((_i - 64) * 0x40))))
+                         (0x0D014 + (((_i) - 64) * 0x40))))
 /*
  * Rx DCA Control Register:
  * 00-15 : 0x02200 + n*4
  */
 #define IXGBE_DCA_RXCTRL(_i)    (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \
                                  (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \
-                                 (0x0D00C + ((_i - 64) * 0x40))))
+                                (0x0D00C + (((_i) - 64) * 0x40))))
 #define IXGBE_RDRXCTL           0x02F00
 #define IXGBE_RXPBSIZE(_i)      (0x03C00 + ((_i) * 4))
                                              /* 8 of these 0x03C00 - 0x03C1C */
 
 #define IXGBE_WUPL      0x05900
 #define IXGBE_WUPM      0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */
-#define IXGBE_FHFT(_n)     (0x09000 + (_n * 0x100)) /* Flex host filter table */
-#define IXGBE_FHFT_EXT(_n) (0x09800 + (_n * 0x100)) /* Ext Flexible Host
-                                                     * Filter Table */
+#define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */
+#define IXGBE_FHFT_EXT(_n)     (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host
+                                                           * Filter Table */
 
 #define IXGBE_FLEXIBLE_FILTER_COUNT_MAX         4
 #define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX     2
@@ -1485,7 +1485,7 @@ enum {
 #define IXGBE_LED_BLINK_BASE     0x00000080
 #define IXGBE_LED_MODE_MASK_BASE 0x0000000F
 #define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i)))
-#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i))
+#define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i))
 #define IXGBE_LED_IVRT(_i)       IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i)
 #define IXGBE_LED_BLINK(_i)      IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i)
 #define IXGBE_LED_MODE_MASK(_i)  IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i)
@@ -2068,9 +2068,9 @@ enum {
 
 /* SR-IOV specific macros */
 #define IXGBE_MBVFICR_INDEX(vf_number)   (vf_number >> 4)
-#define IXGBE_MBVFICR(_i)                (0x00710 + (_i * 4))
-#define IXGBE_VFLRE(_i)                  (((_i & 1) ? 0x001C0 : 0x00600))
-#define IXGBE_VFLREC(_i)                 (0x00700 + (_i * 4))
+#define IXGBE_MBVFICR(_i)              (0x00710 + ((_i) * 4))
+#define IXGBE_VFLRE(_i)                ((((_i) & 1) ? 0x001C0 : 0x00600))
+#define IXGBE_VFLREC(_i)               (0x00700 + ((_i) * 4))
 
 enum ixgbe_fdir_pballoc_type {
        IXGBE_FDIR_PBALLOC_NONE = 0,
index dc8e6511c64068debdb4cec9887b321e348a9ea9..c857003181475e919b62551c7f954e9e849b3813 100644 (file)
@@ -56,7 +56,8 @@ struct ixgbe_stats {
                            offsetof(struct ixgbevf_adapter, m),         \
                            offsetof(struct ixgbevf_adapter, b),         \
                            offsetof(struct ixgbevf_adapter, r)
-static struct ixgbe_stats ixgbe_gstrings_stats[] = {
+
+static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
        {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
                                    stats.saved_reset_vfgprc)},
        {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
@@ -671,7 +672,7 @@ static int ixgbevf_nway_reset(struct net_device *netdev)
        return 0;
 }
 
-static struct ethtool_ops ixgbevf_ethtool_ops = {
+static const struct ethtool_ops ixgbevf_ethtool_ops = {
        .get_settings           = ixgbevf_get_settings,
        .get_drvinfo            = ixgbevf_get_drvinfo,
        .get_regs_len           = ixgbevf_get_regs_len,
index e6c9d1a927a9d0e06b003341b78f9b44620ed8c5..9075c1d610390bad233a1e97fda7f1606bc30fec 100644 (file)
@@ -279,12 +279,12 @@ enum ixgbevf_boards {
        board_X540_vf,
 };
 
-extern struct ixgbevf_info ixgbevf_82599_vf_info;
-extern struct ixgbevf_info ixgbevf_X540_vf_info;
-extern struct ixgbe_mbx_operations ixgbevf_mbx_ops;
+extern const struct ixgbevf_info ixgbevf_82599_vf_info;
+extern const struct ixgbevf_info ixgbevf_X540_vf_info;
+extern const struct ixgbe_mbx_operations ixgbevf_mbx_ops;
 
 /* needed by ethtool.c */
-extern char ixgbevf_driver_name[];
+extern const char ixgbevf_driver_name[];
 extern const char ixgbevf_driver_version[];
 
 extern int ixgbevf_up(struct ixgbevf_adapter *adapter);
index 891162d1610ca6e21eaf88ab41cd07c918292327..bed411bada21bf8d9f349dd360a20daa9f76be23 100644 (file)
@@ -53,7 +53,7 @@
 
 #include "ixgbevf.h"
 
-char ixgbevf_driver_name[] = "ixgbevf";
+const char ixgbevf_driver_name[] = "ixgbevf";
 static const char ixgbevf_driver_string[] =
        "Intel(R) 10 Gigabit PCI Express Virtual Function Network Driver";
 
@@ -917,31 +917,34 @@ static irqreturn_t ixgbevf_msix_mbx(int irq, void *data)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 eicr;
        u32 msg;
+       bool got_ack = false;
 
        eicr = IXGBE_READ_REG(hw, IXGBE_VTEICS);
        IXGBE_WRITE_REG(hw, IXGBE_VTEICR, eicr);
 
-       if (!hw->mbx.ops.check_for_ack(hw)) {
-               /*
-                * checking for the ack clears the PFACK bit.  Place
-                * it back in the v2p_mailbox cache so that anyone
-                * polling for an ack will not miss it.  Also
-                * avoid the read below because the code to read
-                * the mailbox will also clear the ack bit.  This was
-                * causing lost acks.  Just cache the bit and exit
-                * the IRQ handler.
-                */
-               hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
-               goto out;
-       }
+       if (!hw->mbx.ops.check_for_ack(hw))
+               got_ack = true;
 
-       /* Not an ack interrupt, go ahead and read the message */
-       hw->mbx.ops.read(hw, &msg, 1);
+       if (!hw->mbx.ops.check_for_msg(hw)) {
+               hw->mbx.ops.read(hw, &msg, 1);
 
-       if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
-               mod_timer(&adapter->watchdog_timer,
-                         round_jiffies(jiffies + 1));
+               if ((msg & IXGBE_MBVFICR_VFREQ_MASK) == IXGBE_PF_CONTROL_MSG)
+                       mod_timer(&adapter->watchdog_timer,
+                                 round_jiffies(jiffies + 1));
 
+               if (msg & IXGBE_VT_MSGTYPE_NACK)
+                       pr_warn("Last Request of type %2.2x to PF Nacked\n",
+                               msg & 0xFF);
+               goto out;
+       }
+
+       /*
+        * checking for the ack clears the PFACK bit.  Place
+        * it back in the v2p_mailbox cache so that anyone
+        * polling for an ack will not miss it
+        */
+       if (got_ack)
+               hw->mbx.v2p_mailbox |= IXGBE_VFMAILBOX_PFACK;
 out:
        return IRQ_HANDLED;
 }
index 930fa83f256881eb1bebbe29ad4c1f60fc20af9e..13532d9ba72de8b82762a46b5e07e0f71a2c1f8b 100644 (file)
@@ -26,6 +26,7 @@
 *******************************************************************************/
 
 #include "mbx.h"
+#include "ixgbevf.h"
 
 /**
  *  ixgbevf_poll_for_msg - Wait for message notification
@@ -328,7 +329,7 @@ static s32 ixgbevf_init_mbx_params_vf(struct ixgbe_hw *hw)
        return 0;
 }
 
-struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
+const struct ixgbe_mbx_operations ixgbevf_mbx_ops = {
        .init_params   = ixgbevf_init_mbx_params_vf,
        .read          = ixgbevf_read_mbx_vf,
        .write         = ixgbevf_write_mbx_vf,
index 21533e300367f78dd4058ee2df7ba31c7df1122c..d0138d7a31a1d607012c08d6dd8c4f1f1d6ca953 100644 (file)
@@ -26,6 +26,7 @@
 *******************************************************************************/
 
 #include "vf.h"
+#include "ixgbevf.h"
 
 /**
  *  ixgbevf_start_hw_vf - Prepare hardware for Tx/Rx
@@ -401,7 +402,7 @@ static s32 ixgbevf_check_mac_link_vf(struct ixgbe_hw *hw,
        return 0;
 }
 
-static struct ixgbe_mac_operations ixgbevf_mac_ops = {
+static const struct ixgbe_mac_operations ixgbevf_mac_ops = {
        .init_hw             = ixgbevf_init_hw_vf,
        .reset_hw            = ixgbevf_reset_hw_vf,
        .start_hw            = ixgbevf_start_hw_vf,
@@ -415,12 +416,12 @@ static struct ixgbe_mac_operations ixgbevf_mac_ops = {
        .set_vfta            = ixgbevf_set_vfta_vf,
 };
 
-struct ixgbevf_info ixgbevf_82599_vf_info = {
+const struct ixgbevf_info ixgbevf_82599_vf_info = {
        .mac = ixgbe_mac_82599_vf,
        .mac_ops = &ixgbevf_mac_ops,
 };
 
-struct ixgbevf_info ixgbevf_X540_vf_info = {
+const struct ixgbevf_info ixgbevf_X540_vf_info = {
        .mac = ixgbe_mac_X540_vf,
        .mac_ops = &ixgbevf_mac_ops,
 };
index 10306b492ee61b5b9e686503839281b35b9df1fb..d556619a92120e46d87c73f250c8d5839ae30027 100644 (file)
@@ -167,7 +167,7 @@ struct ixgbevf_hw_stats {
 
 struct ixgbevf_info {
        enum ixgbe_mac_type             mac;
-       struct ixgbe_mac_operations     *mac_ops;
+       const struct ixgbe_mac_operations *mac_ops;
 };
 
 #endif /* __IXGBE_VF_H__ */
index 9c049d2cb97d00142b1fbfaa6a1b14b6f8fac580..9edecfa1f0f4ffb556d73cbda06db380f9bdeeb8 100644 (file)
@@ -136,6 +136,8 @@ static char mv643xx_eth_driver_version[] = "1.4";
 #define INT_MASK                       0x0068
 #define INT_MASK_EXT                   0x006c
 #define TX_FIFO_URGENT_THRESHOLD       0x0074
+#define RX_DISCARD_FRAME_CNT           0x0084
+#define RX_OVERRUN_FRAME_CNT           0x0088
 #define TXQ_FIX_PRIO_CONF_MOVED                0x00dc
 #define TX_BW_RATE_MOVED               0x00e0
 #define TX_BW_MTU_MOVED                        0x00e8
@@ -334,6 +336,9 @@ struct mib_counters {
        u32 bad_crc_event;
        u32 collision;
        u32 late_collision;
+       /* Non MIB hardware counters */
+       u32 rx_discard;
+       u32 rx_overrun;
 };
 
 struct lro_counters {
@@ -1225,6 +1230,10 @@ static void mib_counters_clear(struct mv643xx_eth_private *mp)
 
        for (i = 0; i < 0x80; i += 4)
                mib_read(mp, i);
+
+       /* Clear non MIB hw counters also */
+       rdlp(mp, RX_DISCARD_FRAME_CNT);
+       rdlp(mp, RX_OVERRUN_FRAME_CNT);
 }
 
 static void mib_counters_update(struct mv643xx_eth_private *mp)
@@ -1262,6 +1271,9 @@ static void mib_counters_update(struct mv643xx_eth_private *mp)
        p->bad_crc_event += mib_read(mp, 0x74);
        p->collision += mib_read(mp, 0x78);
        p->late_collision += mib_read(mp, 0x7c);
+       /* Non MIB hardware counters */
+       p->rx_discard += rdlp(mp, RX_DISCARD_FRAME_CNT);
+       p->rx_overrun += rdlp(mp, RX_OVERRUN_FRAME_CNT);
        spin_unlock_bh(&mp->mib_counters_lock);
 
        mod_timer(&mp->mib_counters_timer, jiffies + 30 * HZ);
@@ -1413,6 +1425,8 @@ static const struct mv643xx_eth_stats mv643xx_eth_stats[] = {
        MIBSTAT(bad_crc_event),
        MIBSTAT(collision),
        MIBSTAT(late_collision),
+       MIBSTAT(rx_discard),
+       MIBSTAT(rx_overrun),
        LROSTAT(lro_aggregated),
        LROSTAT(lro_flushed),
        LROSTAT(lro_no_desc),
index 18a87a57fc0aa4ca9ff3bb406a9633fc281bc308..edb9bda55d556132d73e72ddcebef91ccf20c747 100644 (file)
@@ -931,17 +931,20 @@ static int skge_ring_alloc(struct skge_ring *ring, void *vaddr, u32 base)
 }
 
 /* Allocate and setup a new buffer for receiving */
-static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
-                         struct sk_buff *skb, unsigned int bufsize)
+static int skge_rx_setup(struct pci_dev *pdev,
+                        struct skge_element *e,
+                        struct sk_buff *skb, unsigned int bufsize)
 {
        struct skge_rx_desc *rd = e->desc;
-       u64 map;
+       dma_addr_t map;
 
-       map = pci_map_single(skge->hw->pdev, skb->data, bufsize,
+       map = pci_map_single(pdev, skb->data, bufsize,
                             PCI_DMA_FROMDEVICE);
+       if (pci_dma_mapping_error(pdev, map))
+               goto mapping_error;
 
-       rd->dma_lo = map;
-       rd->dma_hi = map >> 32;
+       rd->dma_lo = lower_32_bits(map);
+       rd->dma_hi = upper_32_bits(map);
        e->skb = skb;
        rd->csum1_start = ETH_HLEN;
        rd->csum2_start = ETH_HLEN;
@@ -953,6 +956,13 @@ static void skge_rx_setup(struct skge_port *skge, struct skge_element *e,
        rd->control = BMU_OWN | BMU_STF | BMU_IRQ_EOF | BMU_TCP_CHECK | bufsize;
        dma_unmap_addr_set(e, mapaddr, map);
        dma_unmap_len_set(e, maplen, bufsize);
+       return 0;
+
+mapping_error:
+       if (net_ratelimit())
+               dev_warn(&pdev->dev, "%s: rx mapping error\n",
+                        skb->dev->name);
+       return -EIO;
 }
 
 /* Resume receiving using existing skb,
@@ -1014,7 +1024,11 @@ static int skge_rx_fill(struct net_device *dev)
                        return -ENOMEM;
 
                skb_reserve(skb, NET_IP_ALIGN);
-               skge_rx_setup(skge, e, skb, skge->rx_buf_size);
+               if (skge_rx_setup(skge->hw->pdev, e, skb, skge->rx_buf_size)) {
+                       kfree_skb(skb);
+                       return -ENOMEM;
+               }
+
        } while ((e = e->next) != ring->start);
 
        ring->to_clean = ring->start;
@@ -2576,6 +2590,7 @@ static int skge_up(struct net_device *dev)
        }
 
        /* Initialize MAC */
+       netif_carrier_off(dev);
        spin_lock_bh(&hw->phy_lock);
        if (is_genesis(hw))
                genesis_mac_init(hw, port);
@@ -2728,7 +2743,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
        struct skge_tx_desc *td;
        int i;
        u32 control, len;
-       u64 map;
+       dma_addr_t map;
 
        if (skb_padto(skb, ETH_ZLEN))
                return NETDEV_TX_OK;
@@ -2742,11 +2757,14 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
        e->skb = skb;
        len = skb_headlen(skb);
        map = pci_map_single(hw->pdev, skb->data, len, PCI_DMA_TODEVICE);
+       if (pci_dma_mapping_error(hw->pdev, map))
+               goto mapping_error;
+
        dma_unmap_addr_set(e, mapaddr, map);
        dma_unmap_len_set(e, maplen, len);
 
-       td->dma_lo = map;
-       td->dma_hi = map >> 32;
+       td->dma_lo = lower_32_bits(map);
+       td->dma_hi = upper_32_bits(map);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                const int offset = skb_checksum_start_offset(skb);
@@ -2777,14 +2795,16 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
 
                        map = skb_frag_dma_map(&hw->pdev->dev, frag, 0,
                                               skb_frag_size(frag), DMA_TO_DEVICE);
+                       if (dma_mapping_error(&hw->pdev->dev, map))
+                               goto mapping_unwind;
 
                        e = e->next;
                        e->skb = skb;
                        tf = e->desc;
                        BUG_ON(tf->control & BMU_OWN);
 
-                       tf->dma_lo = map;
-                       tf->dma_hi = (u64) map >> 32;
+                       tf->dma_lo = lower_32_bits(map);
+                       tf->dma_hi = upper_32_bits(map);
                        dma_unmap_addr_set(e, mapaddr, map);
                        dma_unmap_len_set(e, maplen, skb_frag_size(frag));
 
@@ -2797,6 +2817,8 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
        td->control = BMU_OWN | BMU_SW | BMU_STF | control | len;
        wmb();
 
+       netdev_sent_queue(dev, skb->len);
+
        skge_write8(hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_START);
 
        netif_printk(skge, tx_queued, KERN_DEBUG, skge->netdev,
@@ -2812,15 +2834,35 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
        }
 
        return NETDEV_TX_OK;
+
+mapping_unwind:
+       /* unroll any pages that were already mapped.  */
+       if (e != skge->tx_ring.to_use) {
+               struct skge_element *u;
+
+               for (u = skge->tx_ring.to_use->next; u != e; u = u->next)
+                       pci_unmap_page(hw->pdev, dma_unmap_addr(u, mapaddr),
+                                      dma_unmap_len(u, maplen),
+                                      PCI_DMA_TODEVICE);
+               e = skge->tx_ring.to_use;
+       }
+       /* undo the mapping for the skb header */
+       pci_unmap_single(hw->pdev, dma_unmap_addr(e, mapaddr),
+                        dma_unmap_len(e, maplen),
+                        PCI_DMA_TODEVICE);
+mapping_error:
+       /* mapping error causes error message and packet to be discarded. */
+       if (net_ratelimit())
+               dev_warn(&hw->pdev->dev, "%s: tx mapping error\n", dev->name);
+       dev_kfree_skb(skb);
+       return NETDEV_TX_OK;
 }
 
 
 /* Free resources associated with this reing element */
-static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
-                        u32 control)
+static inline void skge_tx_unmap(struct pci_dev *pdev, struct skge_element *e,
+                                u32 control)
 {
-       struct pci_dev *pdev = skge->hw->pdev;
-
        /* skb header vs. fragment */
        if (control & BMU_STF)
                pci_unmap_single(pdev, dma_unmap_addr(e, mapaddr),
@@ -2830,13 +2872,6 @@ static void skge_tx_free(struct skge_port *skge, struct skge_element *e,
                pci_unmap_page(pdev, dma_unmap_addr(e, mapaddr),
                               dma_unmap_len(e, maplen),
                               PCI_DMA_TODEVICE);
-
-       if (control & BMU_EOF) {
-               netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
-                            "tx done slot %td\n", e - skge->tx_ring.start);
-
-               dev_kfree_skb(e->skb);
-       }
 }
 
 /* Free all buffers in transmit ring */
@@ -2847,10 +2882,15 @@ static void skge_tx_clean(struct net_device *dev)
 
        for (e = skge->tx_ring.to_clean; e != skge->tx_ring.to_use; e = e->next) {
                struct skge_tx_desc *td = e->desc;
-               skge_tx_free(skge, e, td->control);
+
+               skge_tx_unmap(skge->hw->pdev, e, td->control);
+
+               if (td->control & BMU_EOF)
+                       dev_kfree_skb(e->skb);
                td->control = 0;
        }
 
+       netdev_reset_queue(dev);
        skge->tx_ring.to_clean = e;
 }
 
@@ -3059,13 +3099,17 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
                if (!nskb)
                        goto resubmit;
 
+               if (unlikely(skge_rx_setup(skge->hw->pdev, e, nskb, skge->rx_buf_size))) {
+                       dev_kfree_skb(nskb);
+                       goto resubmit;
+               }
+
                pci_unmap_single(skge->hw->pdev,
                                 dma_unmap_addr(e, mapaddr),
                                 dma_unmap_len(e, maplen),
                                 PCI_DMA_FROMDEVICE);
                skb = e->skb;
                prefetch(skb->data);
-               skge_rx_setup(skge, e, nskb, skge->rx_buf_size);
        }
 
        skb_put(skb, len);
@@ -3111,6 +3155,7 @@ static void skge_tx_done(struct net_device *dev)
        struct skge_port *skge = netdev_priv(dev);
        struct skge_ring *ring = &skge->tx_ring;
        struct skge_element *e;
+       unsigned int bytes_compl = 0, pkts_compl = 0;
 
        skge_write8(skge->hw, Q_ADDR(txqaddr[skge->port], Q_CSR), CSR_IRQ_CL_F);
 
@@ -3120,8 +3165,20 @@ static void skge_tx_done(struct net_device *dev)
                if (control & BMU_OWN)
                        break;
 
-               skge_tx_free(skge, e, control);
+               skge_tx_unmap(skge->hw->pdev, e, control);
+
+               if (control & BMU_EOF) {
+                       netif_printk(skge, tx_done, KERN_DEBUG, skge->netdev,
+                                    "tx done slot %td\n",
+                                    e - skge->tx_ring.start);
+
+                       pkts_compl++;
+                       bytes_compl += e->skb->len;
+
+                       dev_kfree_skb(e->skb);
+               }
        }
+       netdev_completed_queue(dev, pkts_compl, bytes_compl);
        skge->tx_ring.to_clean = e;
 
        /* Can run lockless until we need to synchronize to restart queue. */
index 978f593094c0402baa207d667c1c5b65a852f7c9..405e6ac3faf617c0beeb0778167a1f44bb7509d0 100644 (file)
@@ -1247,6 +1247,7 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
        u32 reply;
        u32 slave_status = 0;
        u8 is_going_down = 0;
+       int i;
 
        slave_state[slave].comm_toggle ^= 1;
        reply = (u32) slave_state[slave].comm_toggle << 31;
@@ -1258,6 +1259,10 @@ static void mlx4_master_do_cmd(struct mlx4_dev *dev, int slave, u8 cmd,
        if (cmd == MLX4_COMM_CMD_RESET) {
                mlx4_warn(dev, "Received reset from slave:%d\n", slave);
                slave_state[slave].active = false;
+               for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i) {
+                               slave_state[slave].event_eq[i].eqn = -1;
+                               slave_state[slave].event_eq[i].token = 0;
+               }
                /*check if we are in the middle of FLR process,
                if so return "retry" status to the slave*/
                if (MLX4_COMM_CMD_FLR == slave_state[slave].last_cmd) {
@@ -1452,7 +1457,7 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_slave_state *s_state;
-       int i, err, port;
+       int i, j, err, port;
 
        priv->mfunc.vhcr = dma_alloc_coherent(&(dev->pdev->dev), PAGE_SIZE,
                                            &priv->mfunc.vhcr_dma,
@@ -1485,6 +1490,8 @@ int mlx4_multi_func_init(struct mlx4_dev *dev)
                for (i = 0; i < dev->num_slaves; ++i) {
                        s_state = &priv->mfunc.master.slave_state[i];
                        s_state->last_cmd = MLX4_COMM_CMD_RESET;
+                       for (j = 0; j < MLX4_EVENT_TYPES_NUM; ++j)
+                               s_state->event_eq[j].eqn = -1;
                        __raw_writel((__force u32) 0,
                                     &priv->mfunc.comm[i].slave_write);
                        __raw_writel((__force u32) 0,
index 475f9d6af9552b1c31c29bbcabc112653499d89e..7e64033d7de39ed7723c162c75e24cf49cd3d6fa 100644 (file)
@@ -96,7 +96,7 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
 static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int cq_num)
 {
-       return mlx4_cmd(dev, mailbox->dma | dev->caps.function, cq_num, 0,
+       return mlx4_cmd(dev, mailbox->dma, cq_num, 0,
                        MLX4_CMD_SW2HW_CQ, MLX4_CMD_TIME_CLASS_A,
                        MLX4_CMD_WRAPPED);
 }
@@ -111,7 +111,7 @@ static int mlx4_MODIFY_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox
 static int mlx4_HW2SW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int cq_num)
 {
-       return mlx4_cmd_box(dev, dev->caps.function, mailbox ? mailbox->dma : 0,
+       return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0,
                            cq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_CQ,
                            MLX4_CMD_TIME_CLASS_A, MLX4_CMD_WRAPPED);
 }
index 7dbc6a2307798a164b0e7f7520bcbfcb88634aa0..70346fd7f9c480d20bfd70ad90079fc99ae30181 100644 (file)
@@ -183,10 +183,11 @@ static int mlx4_en_set_wol(struct net_device *netdev,
 static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
+       int bit_count = hweight64(priv->stats_bitmap);
 
        switch (sset) {
        case ETH_SS_STATS:
-               return NUM_ALL_STATS +
+               return (priv->stats_bitmap ? bit_count : NUM_ALL_STATS) +
                        (priv->tx_ring_num + priv->rx_ring_num) * 2;
        case ETH_SS_TEST:
                return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
@@ -201,14 +202,34 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        int index = 0;
-       int i;
+       int i, j = 0;
 
        spin_lock_bh(&priv->stats_lock);
 
-       for (i = 0; i < NUM_MAIN_STATS; i++)
-               data[index++] = ((unsigned long *) &priv->stats)[i];
-       for (i = 0; i < NUM_PORT_STATS; i++)
-               data[index++] = ((unsigned long *) &priv->port_stats)[i];
+       if (!(priv->stats_bitmap)) {
+               for (i = 0; i < NUM_MAIN_STATS; i++)
+                       data[index++] =
+                               ((unsigned long *) &priv->stats)[i];
+               for (i = 0; i < NUM_PORT_STATS; i++)
+                       data[index++] =
+                               ((unsigned long *) &priv->port_stats)[i];
+               for (i = 0; i < NUM_PKT_STATS; i++)
+                       data[index++] =
+                               ((unsigned long *) &priv->pkstats)[i];
+       } else {
+               for (i = 0; i < NUM_MAIN_STATS; i++) {
+                       if ((priv->stats_bitmap >> j) & 1)
+                               data[index++] =
+                               ((unsigned long *) &priv->stats)[i];
+                       j++;
+               }
+               for (i = 0; i < NUM_PORT_STATS; i++) {
+                       if ((priv->stats_bitmap >> j) & 1)
+                               data[index++] =
+                               ((unsigned long *) &priv->port_stats)[i];
+                       j++;
+               }
+       }
        for (i = 0; i < priv->tx_ring_num; i++) {
                data[index++] = priv->tx_ring[i].packets;
                data[index++] = priv->tx_ring[i].bytes;
@@ -217,8 +238,6 @@ static void mlx4_en_get_ethtool_stats(struct net_device *dev,
                data[index++] = priv->rx_ring[i].packets;
                data[index++] = priv->rx_ring[i].bytes;
        }
-       for (i = 0; i < NUM_PKT_STATS; i++)
-               data[index++] = ((unsigned long *) &priv->pkstats)[i];
        spin_unlock_bh(&priv->stats_lock);
 
 }
@@ -247,11 +266,29 @@ static void mlx4_en_get_strings(struct net_device *dev,
 
        case ETH_SS_STATS:
                /* Add main counters */
-               for (i = 0; i < NUM_MAIN_STATS; i++)
-                       strcpy(data + (index++) * ETH_GSTRING_LEN, main_strings[i]);
-               for (i = 0; i< NUM_PORT_STATS; i++)
-                       strcpy(data + (index++) * ETH_GSTRING_LEN,
-                       main_strings[i + NUM_MAIN_STATS]);
+               if (!priv->stats_bitmap) {
+                       for (i = 0; i < NUM_MAIN_STATS; i++)
+                               strcpy(data + (index++) * ETH_GSTRING_LEN,
+                                       main_strings[i]);
+                       for (i = 0; i < NUM_PORT_STATS; i++)
+                               strcpy(data + (index++) * ETH_GSTRING_LEN,
+                                       main_strings[i +
+                                       NUM_MAIN_STATS]);
+                       for (i = 0; i < NUM_PKT_STATS; i++)
+                               strcpy(data + (index++) * ETH_GSTRING_LEN,
+                                       main_strings[i +
+                                       NUM_MAIN_STATS +
+                                       NUM_PORT_STATS]);
+               } else
+                       for (i = 0; i < NUM_MAIN_STATS + NUM_PORT_STATS; i++) {
+                               if ((priv->stats_bitmap >> i) & 1) {
+                                       strcpy(data +
+                                              (index++) * ETH_GSTRING_LEN,
+                                              main_strings[i]);
+                               }
+                               if (!(priv->stats_bitmap >> i))
+                                       break;
+                       }
                for (i = 0; i < priv->tx_ring_num; i++) {
                        sprintf(data + (index++) * ETH_GSTRING_LEN,
                                "tx%d_packets", i);
@@ -264,9 +301,6 @@ static void mlx4_en_get_strings(struct net_device *dev,
                        sprintf(data + (index++) * ETH_GSTRING_LEN,
                                "rx%d_bytes", i);
                }
-               for (i = 0; i< NUM_PKT_STATS; i++)
-                       strcpy(data + (index++) * ETH_GSTRING_LEN,
-                       main_strings[i + NUM_MAIN_STATS + NUM_PORT_STATS]);
                break;
        }
 }
@@ -479,6 +513,95 @@ static void mlx4_en_get_ringparam(struct net_device *dev,
        param->tx_pending = priv->tx_ring[0].size;
 }
 
+static u32 mlx4_en_get_rxfh_indir_size(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+
+       return priv->rx_ring_num;
+}
+
+static int mlx4_en_get_rxfh_indir(struct net_device *dev, u32 *ring_index)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_rss_map *rss_map = &priv->rss_map;
+       int rss_rings;
+       size_t n = priv->rx_ring_num;
+       int err = 0;
+
+       rss_rings = priv->prof->rss_rings ?: priv->rx_ring_num;
+
+       while (n--) {
+               ring_index[n] = rss_map->qps[n % rss_rings].qpn -
+                       rss_map->base_qpn;
+       }
+
+       return err;
+}
+
+static int mlx4_en_set_rxfh_indir(struct net_device *dev,
+               const u32 *ring_index)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int port_up = 0;
+       int err = 0;
+       int i;
+       int rss_rings = 0;
+
+       /* Calculate RSS table size and make sure flows are spread evenly
+        * between rings
+        */
+       for (i = 0; i < priv->rx_ring_num; i++) {
+               if (i > 0 && !ring_index[i] && !rss_rings)
+                       rss_rings = i;
+
+               if (ring_index[i] != (i % (rss_rings ?: priv->rx_ring_num)))
+                       return -EINVAL;
+       }
+
+       if (!rss_rings)
+               rss_rings = priv->rx_ring_num;
+
+       /* RSS table size must be an order of 2 */
+       if (!is_power_of_2(rss_rings))
+               return -EINVAL;
+
+       mutex_lock(&mdev->state_lock);
+       if (priv->port_up) {
+               port_up = 1;
+               mlx4_en_stop_port(dev);
+       }
+
+       priv->prof->rss_rings = rss_rings;
+
+       if (port_up) {
+               err = mlx4_en_start_port(dev);
+               if (err)
+                       en_err(priv, "Failed starting port\n");
+       }
+
+       mutex_unlock(&mdev->state_lock);
+       return err;
+}
+
+static int mlx4_en_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+                            u32 *rule_locs)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       int err = 0;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_GRXRINGS:
+               cmd->data = priv->rx_ring_num;
+               break;
+       default:
+               err = -EOPNOTSUPP;
+               break;
+       }
+
+       return err;
+}
+
 const struct ethtool_ops mlx4_en_ethtool_ops = {
        .get_drvinfo = mlx4_en_get_drvinfo,
        .get_settings = mlx4_en_get_settings,
@@ -498,6 +621,10 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
        .set_pauseparam = mlx4_en_set_pauseparam,
        .get_ringparam = mlx4_en_get_ringparam,
        .set_ringparam = mlx4_en_set_ringparam,
+       .get_rxnfc = mlx4_en_get_rxnfc,
+       .get_rxfh_indir_size = mlx4_en_get_rxfh_indir_size,
+       .get_rxfh_indir = mlx4_en_get_rxfh_indir,
+       .set_rxfh_indir = mlx4_en_set_rxfh_indir,
 };
 
 
index a06096fcc0b8bed5428b2b084fc2a66e94c10ad9..2097a7d3c5b82dabb482d9b670feef0ec1733d19 100644 (file)
@@ -62,10 +62,6 @@ static const char mlx4_en_version[] =
  * Device scope module parameters
  */
 
-
-/* Enable RSS TCP traffic */
-MLX4_EN_PARM_INT(tcp_rss, 1,
-                "Enable RSS for incomming TCP traffic or disabled (0)");
 /* Enable RSS UDP traffic */
 MLX4_EN_PARM_INT(udp_rss, 1,
                 "Enable RSS for incomming UDP traffic or disabled (0)");
@@ -104,7 +100,6 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
        struct mlx4_en_profile *params = &mdev->profile;
        int i;
 
-       params->tcp_rss = tcp_rss;
        params->udp_rss = udp_rss;
        if (params->udp_rss && !(mdev->dev->caps.flags
                                        & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
@@ -120,6 +115,7 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
                params->prof[i].rx_ring_size = MLX4_EN_DEF_RX_RING_SIZE;
                params->prof[i].tx_ring_num = MLX4_EN_NUM_TX_RINGS +
                        (!!pfcrx) * MLX4_EN_NUM_PPP_RINGS;
+               params->prof[i].rss_rings = 0;
        }
 
        return 0;
index 72fa807b69ce1581ac62dea599ce121e19e74f9c..467ae5824875c9009b94527fdeef7ef4d4992d9d 100644 (file)
@@ -702,6 +702,8 @@ int mlx4_en_start_port(struct net_device *dev)
        /* Schedule multicast task to populate multicast list */
        queue_work(mdev->workqueue, &priv->mcast_task);
 
+       mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
+
        priv->port_up = true;
        netif_tx_start_all_queues(dev);
        return 0;
@@ -807,38 +809,50 @@ static void mlx4_en_restart(struct work_struct *work)
        mutex_unlock(&mdev->state_lock);
 }
 
-
-static int mlx4_en_open(struct net_device *dev)
+static void mlx4_en_clear_stats(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
        int i;
-       int err = 0;
-
-       mutex_lock(&mdev->state_lock);
-
-       if (!mdev->device_up) {
-               en_err(priv, "Cannot open - device down/disabled\n");
-               err = -EBUSY;
-               goto out;
-       }
 
-       /* Reset HW statistics and performance counters */
        if (mlx4_en_DUMP_ETH_STATS(mdev, priv->port, 1))
                en_dbg(HW, priv, "Failed dumping statistics\n");
 
        memset(&priv->stats, 0, sizeof(priv->stats));
        memset(&priv->pstats, 0, sizeof(priv->pstats));
+       memset(&priv->pkstats, 0, sizeof(priv->pkstats));
+       memset(&priv->port_stats, 0, sizeof(priv->port_stats));
 
        for (i = 0; i < priv->tx_ring_num; i++) {
                priv->tx_ring[i].bytes = 0;
                priv->tx_ring[i].packets = 0;
+               priv->tx_ring[i].tx_csum = 0;
        }
        for (i = 0; i < priv->rx_ring_num; i++) {
                priv->rx_ring[i].bytes = 0;
                priv->rx_ring[i].packets = 0;
+               priv->rx_ring[i].csum_ok = 0;
+               priv->rx_ring[i].csum_none = 0;
+       }
+}
+
+static int mlx4_en_open(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       struct mlx4_en_dev *mdev = priv->mdev;
+       int err = 0;
+
+       mutex_lock(&mdev->state_lock);
+
+       if (!mdev->device_up) {
+               en_err(priv, "Cannot open - device down/disabled\n");
+               err = -EBUSY;
+               goto out;
        }
 
+       /* Reset HW statistics and SW counters */
+       mlx4_en_clear_stats(dev);
+
        err = mlx4_en_start_port(dev);
        if (err)
                en_err(priv, "Failed starting port:%d\n", priv->port);
index e8d6ad2dce0afaaa2ffd27017238cd0482334039..971d4b6b8dfee21aed00724fd3af8a36fc8f6608 100644 (file)
@@ -853,6 +853,7 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
        struct mlx4_en_rss_map *rss_map = &priv->rss_map;
        struct mlx4_qp_context context;
        struct mlx4_rss_context *rss_context;
+       int rss_rings;
        void *ptr;
        u8 rss_mask = (MLX4_RSS_IPV4 | MLX4_RSS_TCP_IPV4 | MLX4_RSS_IPV6 |
                        MLX4_RSS_TCP_IPV6);
@@ -893,10 +894,15 @@ int mlx4_en_config_rss_steer(struct mlx4_en_priv *priv)
        mlx4_en_fill_qp_context(priv, 0, 0, 0, 1, priv->base_qpn,
                                priv->rx_ring[0].cqn, &context);
 
+       if (!priv->prof->rss_rings || priv->prof->rss_rings > priv->rx_ring_num)
+               rss_rings = priv->rx_ring_num;
+       else
+               rss_rings = priv->prof->rss_rings;
+
        ptr = ((void *) &context) + offsetof(struct mlx4_qp_context, pri_path)
                                        + MLX4_RSS_OFFSET_IN_QPC_PRI_PATH;
        rss_context = ptr;
-       rss_context->base_qpn = cpu_to_be32(ilog2(priv->rx_ring_num) << 24 |
+       rss_context->base_qpn = cpu_to_be32(ilog2(rss_rings) << 24 |
                                            (rss_map->base_qpn));
        rss_context->default_qpn = cpu_to_be32(rss_map->base_qpn);
        if (priv->mdev->profile.udp_rss) {
index 1e9b55eb7217a8c725c8e39b2880855e7f88c04a..55d7bd4e210aadd6ffa0cb5dd81f012a0b71a498 100644 (file)
@@ -513,25 +513,22 @@ int mlx4_MAP_EQ_wrapper(struct mlx4_dev *dev, int slave,
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
        struct mlx4_slave_event_eq_info *event_eq =
-               &priv->mfunc.master.slave_state[slave].event_eq;
+               priv->mfunc.master.slave_state[slave].event_eq;
        u32 in_modifier = vhcr->in_modifier;
        u32 eqn = in_modifier & 0x1FF;
        u64 in_param =  vhcr->in_param;
        int err = 0;
+       int i;
 
        if (slave == dev->caps.function)
                err = mlx4_cmd(dev, in_param, (in_modifier & 0x80000000) | eqn,
                               0, MLX4_CMD_MAP_EQ, MLX4_CMD_TIME_CLASS_B,
                               MLX4_CMD_NATIVE);
-       if (!err) {
-               if (in_modifier >> 31) {
-                       /* unmap */
-                       event_eq->event_type &= ~in_param;
-               } else {
-                       event_eq->eqn = eqn;
-                       event_eq->event_type = in_param;
-               }
-       }
+       if (!err)
+               for (i = 0; i < MLX4_EVENT_TYPES_NUM; ++i)
+                       if (in_param & (1LL << i))
+                               event_eq[i].eqn = in_modifier >> 31 ? -1 : eqn;
+
        return err;
 }
 
@@ -546,7 +543,7 @@ static int mlx4_MAP_EQ(struct mlx4_dev *dev, u64 event_mask, int unmap,
 static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int eq_num)
 {
-       return mlx4_cmd(dev, mailbox->dma | dev->caps.function, eq_num, 0,
+       return mlx4_cmd(dev, mailbox->dma, eq_num, 0,
                        MLX4_CMD_SW2HW_EQ, MLX4_CMD_TIME_CLASS_A,
                        MLX4_CMD_WRAPPED);
 }
@@ -554,7 +551,7 @@ static int mlx4_SW2HW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
 static int mlx4_HW2SW_EQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                         int eq_num)
 {
-       return mlx4_cmd_box(dev, dev->caps.function, mailbox->dma, eq_num,
+       return mlx4_cmd_box(dev, 0, mailbox->dma, eq_num,
                            0, MLX4_CMD_HW2SW_EQ, MLX4_CMD_TIME_CLASS_A,
                            MLX4_CMD_WRAPPED);
 }
index a424a19280cc466eefba7ab08236f6306e505e12..8a21e10952ea23260ad503cee702ed31437ff1e1 100644 (file)
@@ -158,7 +158,6 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
 
 #define QUERY_FUNC_CAP_FLAGS_OFFSET            0x0
 #define QUERY_FUNC_CAP_NUM_PORTS_OFFSET                0x1
-#define QUERY_FUNC_CAP_FUNCTION_OFFSET         0x3
 #define QUERY_FUNC_CAP_PF_BHVR_OFFSET          0x4
 #define QUERY_FUNC_CAP_QP_QUOTA_OFFSET         0x10
 #define QUERY_FUNC_CAP_CQ_QUOTA_OFFSET         0x14
@@ -182,9 +181,6 @@ int mlx4_QUERY_FUNC_CAP_wrapper(struct mlx4_dev *dev, int slave,
                field = 1 << 7; /* enable only ethernet interface */
                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FLAGS_OFFSET);
 
-               field = slave;
-               MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_FUNCTION_OFFSET);
-
                field = dev->caps.num_ports;
                MLX4_PUT(outbox->buf, field, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
 
@@ -249,9 +245,6 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, struct mlx4_func_cap *func_cap)
                goto out;
        }
 
-       MLX4_GET(field, outbox, QUERY_FUNC_CAP_FUNCTION_OFFSET);
-       func_cap->function = field;
-
        MLX4_GET(field, outbox, QUERY_FUNC_CAP_NUM_PORTS_OFFSET);
        func_cap->num_ports = field;
 
index 119e0cc9fab3d852259e7f4df0651382511f9d68..e1a5fa56bcbc584e4a525dee3d1567a06ebdde8f 100644 (file)
@@ -119,7 +119,6 @@ struct mlx4_dev_cap {
 };
 
 struct mlx4_func_cap {
-       u8      function;
        u8      num_ports;
        u8      flags;
        u32     pf_context_behaviour;
index 6bb62c580e2d50eab48565e170650b25efac00c9..678558b502fc31e506633e0713805f58c8676108 100644 (file)
@@ -108,7 +108,7 @@ static struct mlx4_profile default_profile = {
        .num_cq         = 1 << 16,
        .num_mcg        = 1 << 13,
        .num_mpt        = 1 << 19,
-       .num_mtt        = 1 << 20,
+       .num_mtt        = 1 << 20, /* It is really num mtt segements */
 };
 
 static int log_num_mac = 7;
@@ -471,7 +471,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
                return -ENOSYS;
        }
 
-       dev->caps.function              = func_cap.function;
        dev->caps.num_ports             = func_cap.num_ports;
        dev->caps.num_qps               = func_cap.qp_quota;
        dev->caps.num_srqs              = func_cap.srq_quota;
index a80121a2b5195cf245bd16842f3755c822646781..c92269f8c0570a5b7e6d374beb7ffb9f48e79877 100644 (file)
@@ -388,9 +388,8 @@ struct mlx4_slave_eqe {
 };
 
 struct mlx4_slave_event_eq_info {
-       u32 eqn;
+       int eqn;
        u16 token;
-       u64 event_type;
 };
 
 struct mlx4_profile {
@@ -449,6 +448,8 @@ struct mlx4_steer_index {
        struct list_head duplicates;
 };
 
+#define MLX4_EVENT_TYPES_NUM 64
+
 struct mlx4_slave_state {
        u8 comm_toggle;
        u8 last_cmd;
@@ -461,7 +462,8 @@ struct mlx4_slave_state {
        struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
        struct list_head mcast_filters[MLX4_MAX_PORTS + 1];
        struct mlx4_vlan_fltr *vlan_filter[MLX4_MAX_PORTS + 1];
-       struct mlx4_slave_event_eq_info event_eq;
+       /* event type to eq number lookup */
+       struct mlx4_slave_event_eq_info event_eq[MLX4_EVENT_TYPES_NUM];
        u16 eq_pi;
        u16 eq_ci;
        spinlock_t lock;
index f2a8e65f5f88a4df9ff9861bddfbfeae458d6ec6..35f08840813c2b8b679851d8a63271011674b598 100644 (file)
@@ -325,11 +325,11 @@ struct mlx4_en_port_profile {
        u8 rx_ppp;
        u8 tx_pause;
        u8 tx_ppp;
+       int rss_rings;
 };
 
 struct mlx4_en_profile {
        int rss_xor;
-       int tcp_rss;
        int udp_rss;
        u8 rss_mask;
        u32 active_ports;
@@ -476,6 +476,7 @@ struct mlx4_en_priv {
        struct mlx4_en_perf_stats pstats;
        struct mlx4_en_pkt_stats pkstats;
        struct mlx4_en_port_stats port_stats;
+       u64 stats_bitmap;
        char *mc_addrs;
        int mc_addrs_cnt;
        struct mlx4_en_stat_out_mbox hw_stats;
index 01df5567e16e48398c2a119d2c39de060934e712..8deeef98280c9a31358c9c0f8343c32f3d78329f 100644 (file)
@@ -291,7 +291,7 @@ static u32 key_to_hw_index(u32 key)
 static int mlx4_SW2HW_MPT(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int mpt_index)
 {
-       return mlx4_cmd(dev, mailbox->dma | dev->caps.function , mpt_index,
+       return mlx4_cmd(dev, mailbox->dma, mpt_index,
                        0, MLX4_CMD_SW2HW_MPT, MLX4_CMD_TIME_CLASS_B,
                        MLX4_CMD_WRAPPED);
 }
index 5c9a54df17aba89d233926d9f37710ac84c0e504..db4746d0dca7e0edb91889446606a5bd2b96bbda 100644 (file)
@@ -52,8 +52,7 @@ int mlx4_pd_alloc(struct mlx4_dev *dev, u32 *pdn)
        *pdn = mlx4_bitmap_alloc(&priv->pd_bitmap);
        if (*pdn == -1)
                return -ENOMEM;
-       if (mlx4_is_mfunc(dev))
-               *pdn |= (dev->caps.function + 1) << NOT_MASKED_PD_BITS;
+
        return 0;
 }
 EXPORT_SYMBOL_GPL(mlx4_pd_alloc);
index 88b52e547524efaee71ffc67f00b4f44e7844f30..f44ae555bf43906bfba98cb25838542f5052c25d 100644 (file)
 #define MLX4_VLAN_VALID                (1u << 31)
 #define MLX4_VLAN_MASK         0xfff
 
+#define MLX4_STATS_TRAFFIC_COUNTERS_MASK       0xfULL
+#define MLX4_STATS_TRAFFIC_DROPS_MASK          0xc0ULL
+#define MLX4_STATS_ERROR_COUNTERS_MASK         0x1ffc30ULL
+#define MLX4_STATS_PORT_COUNTERS_MASK          0x1fe00000ULL
+
 void mlx4_init_mac_table(struct mlx4_dev *dev, struct mlx4_mac_table *table)
 {
        int i;
@@ -898,6 +903,24 @@ int mlx4_DUMP_ETH_STATS_wrapper(struct mlx4_dev *dev, int slave,
                                struct mlx4_cmd_mailbox *outbox,
                                struct mlx4_cmd_info *cmd)
 {
+       if (slave != dev->caps.function)
+               return 0;
        return mlx4_common_dump_eth_stats(dev, slave,
                                          vhcr->in_modifier, outbox);
 }
+
+void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap)
+{
+       if (!mlx4_is_mfunc(dev)) {
+               *stats_bitmap = 0;
+               return;
+       }
+
+       *stats_bitmap = (MLX4_STATS_TRAFFIC_COUNTERS_MASK |
+                        MLX4_STATS_TRAFFIC_DROPS_MASK |
+                        MLX4_STATS_PORT_COUNTERS_MASK);
+
+       if (mlx4_is_master(dev))
+               *stats_bitmap |= MLX4_STATS_ERROR_COUNTERS_MASK;
+}
+EXPORT_SYMBOL(mlx4_set_stats_bitmap);
index 66f91ca7a7c6c277778e58d5b4a59b749ab9becf..1129677daa62608e1ab18bfcd5da8dc20abf8101 100644 (file)
@@ -110,7 +110,7 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
        profile[MLX4_RES_EQ].num      = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
        profile[MLX4_RES_DMPT].num    = request->num_mpt;
        profile[MLX4_RES_CMPT].num    = MLX4_NUM_CMPTS;
-       profile[MLX4_RES_MTT].num     = request->num_mtt;
+       profile[MLX4_RES_MTT].num     = request->num_mtt * (1 << log_mtts_per_seg);
        profile[MLX4_RES_MCG].num     = request->num_mcg;
 
        for (i = 0; i < MLX4_RES_NUM; ++i) {
index 6b03ac8b9002bdb530b72969ccb0945f19f49c92..738f950a1ce59e69c6297517882913d90f22f784 100644 (file)
@@ -162,7 +162,7 @@ static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt,
        ((struct mlx4_qp_context *) (mailbox->buf + 8))->local_qpn =
                cpu_to_be32(qp->qpn);
 
-       ret = mlx4_cmd(dev, mailbox->dma | dev->caps.function,
+       ret = mlx4_cmd(dev, mailbox->dma,
                       qp->qpn | (!!sqd_event << 31),
                       new_state == MLX4_QP_STATE_RST ? 2 : 0,
                       op[cur_state][new_state], MLX4_CMD_TIME_CLASS_C, native);
index ed20751a057dde297a64cf25e8edc8d193ac0cb0..dcd819bfb2f05968d1d567bc36d43938c2a4fc91 100644 (file)
@@ -1561,11 +1561,6 @@ static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
        return be32_to_cpu(mpt->mtt_sz);
 }
 
-static int mr_get_pdn(struct mlx4_mpt_entry *mpt)
-{
-       return be32_to_cpu(mpt->pd_flags) & 0xffffff;
-}
-
 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
 {
        return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
@@ -1602,16 +1597,6 @@ static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
        return total_pages;
 }
 
-static int qp_get_pdn(struct mlx4_qp_context *qpc)
-{
-       return be32_to_cpu(qpc->pd) & 0xffffff;
-}
-
-static int pdn2slave(int pdn)
-{
-       return (pdn >> NOT_MASKED_PD_BITS) - 1;
-}
-
 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
                           int size, struct res_mtt *mtt)
 {
@@ -1656,11 +1641,6 @@ int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
                mpt->mtt = mtt;
        }
 
-       if (pdn2slave(mr_get_pdn(inbox->buf)) != slave) {
-               err = -EPERM;
-               goto ex_put;
-       }
-
        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
        if (err)
                goto ex_put;
@@ -1792,11 +1772,6 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
        if (err)
                goto ex_put_mtt;
 
-       if (pdn2slave(qp_get_pdn(qpc)) != slave) {
-               err = -EPERM;
-               goto ex_put_mtt;
-       }
-
        err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
        if (err)
                goto ex_put_mtt;
@@ -2048,10 +2023,10 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
        if (!priv->mfunc.master.slave_state)
                return -EINVAL;
 
-       event_eq = &priv->mfunc.master.slave_state[slave].event_eq;
+       event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
 
        /* Create the event only if the slave is registered */
-       if ((event_eq->event_type & (1 << eqe->type)) == 0)
+       if (event_eq->eqn < 0)
                return 0;
 
        mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
@@ -2289,11 +2264,6 @@ ex_put:
        return err;
 }
 
-static int srq_get_pdn(struct mlx4_srq_context *srqc)
-{
-       return be32_to_cpu(srqc->pd) & 0xffffff;
-}
-
 static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
 {
        int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
@@ -2333,11 +2303,6 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
        if (err)
                goto ex_put_mtt;
 
-       if (pdn2slave(srq_get_pdn(srqc)) != slave) {
-               err = -EPERM;
-               goto ex_put_mtt;
-       }
-
        err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
        if (err)
                goto ex_put_mtt;
index 2823fffc6383989c985e54884b826cdf69ca50fc..feda6c00829f391e0d5bd822db970c5d6a2f7ca2 100644 (file)
@@ -67,7 +67,7 @@ void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type)
 static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
                          int srq_num)
 {
-       return mlx4_cmd(dev, mailbox->dma | dev->caps.function, srq_num, 0,
+       return mlx4_cmd(dev, mailbox->dma, srq_num, 0,
                        MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A,
                        MLX4_CMD_WRAPPED);
 }
index 75ec87a822b8e2d1ee12486bc6cfcbf0f3e0a248..0a85690a1321ecc17cf24f2398f496d60dc8d175 100644 (file)
@@ -459,7 +459,7 @@ static int ks8842_tx_frame_dma(struct sk_buff *skb, struct net_device *netdev)
                sg_dma_len(&ctl->sg) += 4 - sg_dma_len(&ctl->sg) % 4;
 
        ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
-               &ctl->sg, 1, DMA_TO_DEVICE,
+               &ctl->sg, 1, DMA_MEM_TO_DEV,
                DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
        if (!ctl->adesc)
                return NETDEV_TX_BUSY;
@@ -571,7 +571,7 @@ static int __ks8842_start_new_rx_dma(struct net_device *netdev)
                sg_dma_len(sg) = DMA_BUFFER_SIZE;
 
                ctl->adesc = ctl->chan->device->device_prep_slave_sg(ctl->chan,
-                       sg, 1, DMA_FROM_DEVICE,
+                       sg, 1, DMA_DEV_TO_MEM,
                        DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_SRC_UNMAP);
 
                if (!ctl->adesc)
index 964e9c0948bce19cf1a09ca5229575006ed1934f..3ead111111e1f1bec8eaa10e3f5c9dee0650f4a9 100644 (file)
@@ -1745,6 +1745,12 @@ int pch_gbe_up(struct pch_gbe_adapter *adapter)
        struct pch_gbe_rx_ring *rx_ring = adapter->rx_ring;
        int err;
 
+       /* Ensure we have a valid MAC */
+       if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
+               pr_err("Error: Invalid MAC address\n");
+               return -EINVAL;
+       }
+
        /* hardware has been reset, we need to reload some things */
        pch_gbe_set_multi(netdev);
 
@@ -2468,9 +2474,14 @@ static int pch_gbe_probe(struct pci_dev *pdev,
 
        memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
        if (!is_valid_ether_addr(netdev->dev_addr)) {
-               dev_err(&pdev->dev, "Invalid MAC Address\n");
-               ret = -EIO;
-               goto err_free_adapter;
+               /*
+                * If the MAC is invalid (or just missing), display a warning
+                * but do not abort setting up the device. pch_gbe_up will
+                * prevent the interface from being brought up until a valid MAC
+                * is set.
+                */
+               dev_err(&pdev->dev, "Invalid MAC address, "
+                                   "interface disabled.\n");
        }
        setup_timer(&adapter->watchdog_timer, pch_gbe_watchdog,
                    (unsigned long)adapter);
index 6ece4295d78fd8cd16deafd28cf377f937b4aa92..813d41c4a845501bd37e772f094cc356ce029aab 100644 (file)
@@ -1703,7 +1703,7 @@ static int sh_mdio_init(struct net_device *ndev, int id,
        mdp->mii_bus->name = "sh_mii";
        mdp->mii_bus->parent = &ndev->dev;
        snprintf(mdp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
-               mdp->pdev->name, pdid);
+               mdp->pdev->name, id);
 
        /* PHY IRQ */
        mdp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
index da4a1042523a633e5781f86df7ffe4e123c09910..73195329aa464b8b5b9a7c4cadd2aaaa36f134ad 100644 (file)
@@ -154,7 +154,7 @@ int stmmac_mdio_register(struct net_device *ndev)
        else
                irqlist = priv->mii_irq;
 
-       new_bus->name = "STMMAC MII Bus";
+       new_bus->name = "stmmac";
        new_bus->read = &stmmac_mdio_read;
        new_bus->write = &stmmac_mdio_write;
        new_bus->reset = &stmmac_mdio_reset;
index 54a819a364871b458ab18559156d82369445156c..c796de9eed7226886ec67b453181c20fb129f111 100644 (file)
@@ -170,9 +170,9 @@ static int stmmac_pci_resume(struct pci_dev *pdev)
 #define STMMAC_DEVICE_ID 0x1108
 
 static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
-       {
-       PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)}, {
-       }
+       {PCI_DEVICE(STMMAC_VENDOR_ID, STMMAC_DEVICE_ID)},
+       {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_MAC)},
+       {}
 };
 
 MODULE_DEVICE_TABLE(pci, stmmac_id_table);
index 462d05f05e84d85d558f036a58b3786768514b03..1a1ca6cfc74aada9edf64b494cefa743b95276ae 100644 (file)
@@ -68,11 +68,11 @@ static void do_set_multicast(struct work_struct *w)
 
        nvdev = hv_get_drvdata(ndevctx->device_ctx);
        if (nvdev == NULL)
-               return;
+               goto out;
 
        rdev = nvdev->extension;
        if (rdev == NULL)
-               return;
+               goto out;
 
        if (net->flags & IFF_PROMISC)
                rndis_filter_set_packet_filter(rdev,
@@ -83,6 +83,7 @@ static void do_set_multicast(struct work_struct *w)
                        NDIS_PACKET_TYPE_ALL_MULTICAST |
                        NDIS_PACKET_TYPE_DIRECTED);
 
+out:
        kfree(w);
 }
 
index f2f820c4b40a4fd8c3384905fb30b35dbe7c61fc..9ea99217f11609176d56459c3e35f55185e6f2af 100644 (file)
@@ -173,6 +173,7 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb)
                skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN);
                if (!skb)
                        return RX_HANDLER_CONSUMED;
+               eth = eth_hdr(skb);
                src = macvlan_hash_lookup(port, eth->h_source);
                if (!src)
                        /* frame comes from an external address */
index 88cc5db9affd7a4923fe29de85c63cd98d80c3aa..8985cc62cf41c888ed5a31c9e0877bb2463c5555 100644 (file)
 
 /**
  * mdiobus_alloc_size - allocate a mii_bus structure
+ * @size: extra amount of memory to allocate for private storage.
+ * If non-zero, then bus->priv is points to that memory.
  *
  * Description: called by a bus driver to allocate an mii_bus
  * structure to fill in.
- *
- * 'size' is an an extra amount of memory to allocate for private storage.
- * If non-zero, then bus->priv is points to that memory.
  */
 struct mii_bus *mdiobus_alloc_size(size_t size)
 {
index ed2a862b835df3d9a50b43ac64544ca3e5911036..6b678f38e5ced5336cf81c632767e5e13100fe65 100644 (file)
@@ -92,9 +92,9 @@ struct team_option *__team_find_option(struct team *team, const char *opt_name)
        return NULL;
 }
 
-int team_options_register(struct team *team,
-                         const struct team_option *option,
-                         size_t option_count)
+int __team_options_register(struct team *team,
+                           const struct team_option *option,
+                           size_t option_count)
 {
        int i;
        struct team_option **dst_opts;
@@ -116,8 +116,11 @@ int team_options_register(struct team *team,
                }
        }
 
-       for (i = 0; i < option_count; i++)
+       for (i = 0; i < option_count; i++) {
+               dst_opts[i]->changed = true;
+               dst_opts[i]->removed = false;
                list_add_tail(&dst_opts[i]->list, &team->option_list);
+       }
 
        kfree(dst_opts);
        return 0;
@@ -130,10 +133,22 @@ rollback:
        return err;
 }
 
-EXPORT_SYMBOL(team_options_register);
+static void __team_options_mark_removed(struct team *team,
+                                       const struct team_option *option,
+                                       size_t option_count)
+{
+       int i;
+
+       for (i = 0; i < option_count; i++, option++) {
+               struct team_option *del_opt;
 
-static void __team_options_change_check(struct team *team,
-                                       struct team_option *changed_option);
+               del_opt = __team_find_option(team, option->name);
+               if (del_opt) {
+                       del_opt->changed = true;
+                       del_opt->removed = true;
+               }
+       }
+}
 
 static void __team_options_unregister(struct team *team,
                                      const struct team_option *option,
@@ -152,12 +167,29 @@ static void __team_options_unregister(struct team *team,
        }
 }
 
+static void __team_options_change_check(struct team *team);
+
+int team_options_register(struct team *team,
+                         const struct team_option *option,
+                         size_t option_count)
+{
+       int err;
+
+       err = __team_options_register(team, option, option_count);
+       if (err)
+               return err;
+       __team_options_change_check(team);
+       return 0;
+}
+EXPORT_SYMBOL(team_options_register);
+
 void team_options_unregister(struct team *team,
                             const struct team_option *option,
                             size_t option_count)
 {
+       __team_options_mark_removed(team, option, option_count);
+       __team_options_change_check(team);
        __team_options_unregister(team, option, option_count);
-       __team_options_change_check(team, NULL);
 }
 EXPORT_SYMBOL(team_options_unregister);
 
@@ -176,7 +208,8 @@ static int team_option_set(struct team *team, struct team_option *option,
        if (err)
                return err;
 
-       __team_options_change_check(team, option);
+       option->changed = true;
+       __team_options_change_check(team);
        return err;
 }
 
@@ -653,6 +686,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
                return -ENOENT;
        }
 
+       port->removed = true;
        __team_port_change_check(port, false);
        team_port_list_del_port(team, port);
        team_adjust_ops(team);
@@ -1200,10 +1234,9 @@ err_fill:
        return err;
 }
 
-static int team_nl_fill_options_get_changed(struct sk_buff *skb,
-                                           u32 pid, u32 seq, int flags,
-                                           struct team *team,
-                                           struct team_option *changed_option)
+static int team_nl_fill_options_get(struct sk_buff *skb,
+                                   u32 pid, u32 seq, int flags,
+                                   struct team *team, bool fillall)
 {
        struct nlattr *option_list;
        void *hdr;
@@ -1223,12 +1256,19 @@ static int team_nl_fill_options_get_changed(struct sk_buff *skb,
                struct nlattr *option_item;
                long arg;
 
+               /* Include only changed options if fill all mode is not on */
+               if (!fillall && !option->changed)
+                       continue;
                option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
                if (!option_item)
                        goto nla_put_failure;
                NLA_PUT_STRING(skb, TEAM_ATTR_OPTION_NAME, option->name);
-               if (option == changed_option)
+               if (option->changed) {
                        NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_CHANGED);
+                       option->changed = false;
+               }
+               if (option->removed)
+                       NLA_PUT_FLAG(skb, TEAM_ATTR_OPTION_REMOVED);
                switch (option->type) {
                case TEAM_OPTION_TYPE_U32:
                        NLA_PUT_U8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32);
@@ -1255,13 +1295,13 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-static int team_nl_fill_options_get(struct sk_buff *skb,
-                                   struct genl_info *info, int flags,
-                                   struct team *team)
+static int team_nl_fill_options_get_all(struct sk_buff *skb,
+                                       struct genl_info *info, int flags,
+                                       struct team *team)
 {
-       return team_nl_fill_options_get_changed(skb, info->snd_pid,
-                                               info->snd_seq, NLM_F_ACK,
-                                               team, NULL);
+       return team_nl_fill_options_get(skb, info->snd_pid,
+                                       info->snd_seq, NLM_F_ACK,
+                                       team, true);
 }
 
 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
@@ -1273,7 +1313,7 @@ static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
        if (!team)
                return -EINVAL;
 
-       err = team_nl_send_generic(info, team, team_nl_fill_options_get);
+       err = team_nl_send_generic(info, team, team_nl_fill_options_get_all);
 
        team_nl_team_put(team);
 
@@ -1365,10 +1405,10 @@ team_put:
        return err;
 }
 
-static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
-                                             u32 pid, u32 seq, int flags,
-                                             struct team *team,
-                                             struct team_port *changed_port)
+static int team_nl_fill_port_list_get(struct sk_buff *skb,
+                                     u32 pid, u32 seq, int flags,
+                                     struct team *team,
+                                     bool fillall)
 {
        struct nlattr *port_list;
        void *hdr;
@@ -1387,12 +1427,19 @@ static int team_nl_fill_port_list_get_changed(struct sk_buff *skb,
        list_for_each_entry(port, &team->port_list, list) {
                struct nlattr *port_item;
 
+               /* Include only changed ports if fill all mode is not on */
+               if (!fillall && !port->changed)
+                       continue;
                port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
                if (!port_item)
                        goto nla_put_failure;
                NLA_PUT_U32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex);
-               if (port == changed_port)
+               if (port->changed) {
                        NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_CHANGED);
+                       port->changed = false;
+               }
+               if (port->removed)
+                       NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_REMOVED);
                if (port->linkup)
                        NLA_PUT_FLAG(skb, TEAM_ATTR_PORT_LINKUP);
                NLA_PUT_U32(skb, TEAM_ATTR_PORT_SPEED, port->speed);
@@ -1408,13 +1455,13 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
-static int team_nl_fill_port_list_get(struct sk_buff *skb,
-                                     struct genl_info *info, int flags,
-                                     struct team *team)
+static int team_nl_fill_port_list_get_all(struct sk_buff *skb,
+                                         struct genl_info *info, int flags,
+                                         struct team *team)
 {
-       return team_nl_fill_port_list_get_changed(skb, info->snd_pid,
-                                                 info->snd_seq, NLM_F_ACK,
-                                                 team, NULL);
+       return team_nl_fill_port_list_get(skb, info->snd_pid,
+                                         info->snd_seq, NLM_F_ACK,
+                                         team, true);
 }
 
 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
@@ -1427,7 +1474,7 @@ static int team_nl_cmd_port_list_get(struct sk_buff *skb,
        if (!team)
                return -EINVAL;
 
-       err = team_nl_send_generic(info, team, team_nl_fill_port_list_get);
+       err = team_nl_send_generic(info, team, team_nl_fill_port_list_get_all);
 
        team_nl_team_put(team);
 
@@ -1464,8 +1511,7 @@ static struct genl_multicast_group team_change_event_mcgrp = {
        .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME,
 };
 
-static int team_nl_send_event_options_get(struct team *team,
-                                         struct team_option *changed_option)
+static int team_nl_send_event_options_get(struct team *team)
 {
        struct sk_buff *skb;
        int err;
@@ -1475,8 +1521,7 @@ static int team_nl_send_event_options_get(struct team *team,
        if (!skb)
                return -ENOMEM;
 
-       err = team_nl_fill_options_get_changed(skb, 0, 0, 0, team,
-                                              changed_option);
+       err = team_nl_fill_options_get(skb, 0, 0, 0, team, false);
        if (err < 0)
                goto err_fill;
 
@@ -1489,18 +1534,17 @@ err_fill:
        return err;
 }
 
-static int team_nl_send_event_port_list_get(struct team_port *port)
+static int team_nl_send_event_port_list_get(struct team *team)
 {
        struct sk_buff *skb;
        int err;
-       struct net *net = dev_net(port->team->dev);
+       struct net *net = dev_net(team->dev);
 
        skb = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
        if (!skb)
                return -ENOMEM;
 
-       err = team_nl_fill_port_list_get_changed(skb, 0, 0, 0,
-                                                port->team, port);
+       err = team_nl_fill_port_list_get(skb, 0, 0, 0, team, false);
        if (err < 0)
                goto err_fill;
 
@@ -1544,12 +1588,11 @@ static void team_nl_fini(void)
  * Change checkers
  ******************/
 
-static void __team_options_change_check(struct team *team,
-                                       struct team_option *changed_option)
+static void __team_options_change_check(struct team *team)
 {
        int err;
 
-       err = team_nl_send_event_options_get(team, changed_option);
+       err = team_nl_send_event_options_get(team);
        if (err)
                netdev_warn(team->dev, "Failed to send options change via netlink\n");
 }
@@ -1559,9 +1602,10 @@ static void __team_port_change_check(struct team_port *port, bool linkup)
 {
        int err;
 
-       if (port->linkup == linkup)
+       if (!port->removed && port->linkup == linkup)
                return;
 
+       port->changed = true;
        port->linkup = linkup;
        if (linkup) {
                struct ethtool_cmd ecmd;
@@ -1577,7 +1621,7 @@ static void __team_port_change_check(struct team_port *port, bool linkup)
        port->duplex = 0;
 
 send_event:
-       err = team_nl_send_event_port_list_get(port);
+       err = team_nl_send_event_port_list_get(port->team);
        if (err)
                netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink\n",
                            port->dev->name);
index 88c81c5706b249a0e08a25c28217775463dd53dc..09b8c9dbf78f5e3267ebfadc2d4bed15f9562487 100644 (file)
@@ -557,10 +557,11 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
                        rxs->rs_status |= ATH9K_RXERR_DECRYPT;
                else if (rxsp->status11 & AR_MichaelErr)
                        rxs->rs_status |= ATH9K_RXERR_MIC;
-               if (rxsp->status11 & AR_KeyMiss)
-                       rxs->rs_status |= ATH9K_RXERR_KEYMISS;
        }
 
+       if (rxsp->status11 & AR_KeyMiss)
+               rxs->rs_status |= ATH9K_RXERR_KEYMISS;
+
        return 0;
 }
 EXPORT_SYMBOL(ath9k_hw_process_rxdesc_edma);
index fd3f19c2e550e4e9991c4adc9b430a91799c9bd4..e196aba77acf568387d7a4b2b5852e316d801e4f 100644 (file)
@@ -618,10 +618,11 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
                        rs->rs_status |= ATH9K_RXERR_DECRYPT;
                else if (ads.ds_rxstatus8 & AR_MichaelErr)
                        rs->rs_status |= ATH9K_RXERR_MIC;
-               if (ads.ds_rxstatus8 & AR_KeyMiss)
-                       rs->rs_status |= ATH9K_RXERR_KEYMISS;
        }
 
+       if (ads.ds_rxstatus8 & AR_KeyMiss)
+               rs->rs_status |= ATH9K_RXERR_KEYMISS;
+
        return 0;
 }
 EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
index b97a40ed5fff104864a9cfbe296a31c8a96bf170..3876c7ea54f49bd3a988d159f7f7bf66ab80fe69 100644 (file)
@@ -31,6 +31,12 @@ config B43_BCMA
        depends on B43 && BCMA
        default y
 
+config B43_BCMA_EXTRA
+       bool "Hardware support that overlaps with the brcmsmac driver"
+       depends on B43_BCMA
+       default n if BRCMSMAC || BRCMSMAC_MODULE
+       default y
+
 config B43_SSB
        bool
        depends on B43 && SSB
index 1c6f19393efa72037b7369993bf7af463a7bb51a..23ffb1b9a86f441f771c8f2ac200d823d76cf440 100644 (file)
@@ -116,8 +116,10 @@ MODULE_PARM_DESC(pio, "Use PIO accesses by default: 0=DMA, 1=PIO");
 #ifdef CONFIG_B43_BCMA
 static const struct bcma_device_id b43_bcma_tbl[] = {
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x11, BCMA_ANY_CLASS),
+#ifdef CONFIG_B43_BCMA_EXTRA
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x17, BCMA_ANY_CLASS),
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x18, BCMA_ANY_CLASS),
+#endif
        BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_80211, 0x1D, BCMA_ANY_CLASS),
        BCMA_CORETABLE_END
 };
@@ -4852,6 +4854,9 @@ static void b43_op_stop(struct ieee80211_hw *hw)
 
        cancel_work_sync(&(wl->beacon_update_trigger));
 
+       if (!dev)
+               goto out;
+
        mutex_lock(&wl->mutex);
        if (b43_status(dev) >= B43_STAT_STARTED) {
                dev = b43_wireless_core_stop(dev);
@@ -4863,7 +4868,7 @@ static void b43_op_stop(struct ieee80211_hw *hw)
 
 out_unlock:
        mutex_unlock(&wl->mutex);
-
+out:
        cancel_work_sync(&(wl->txpower_adjust_work));
 }
 
index f23b0c3e4ea3d94985cbdc4b61d6f2688ef52164..bf11850a20f11b1201179c7c7659a16397f2d4cc 100644 (file)
@@ -2475,7 +2475,7 @@ static s32 brcmf_init_iscan(struct brcmf_cfg80211_priv *cfg_priv)
        return err;
 }
 
-static void brcmf_delay(u32 ms)
+static __always_inline void brcmf_delay(u32 ms)
 {
        if (ms < 1000 / HZ) {
                cond_resched();
index d106576ce338980b4645ced6084139084f74fc21..448ab9c4eb47b00db5ff558db6a4ab978a4f4f77 100644 (file)
@@ -1128,14 +1128,7 @@ static int __devinit brcms_bcma_probe(struct bcma_device *pdev)
        return 0;
 }
 
-static int brcms_pci_suspend(struct pci_dev *pdev)
-{
-       pci_save_state(pdev);
-       pci_disable_device(pdev);
-       return pci_set_power_state(pdev, PCI_D3hot);
-}
-
-static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
+static int brcms_suspend(struct bcma_device *pdev)
 {
        struct brcms_info *wl;
        struct ieee80211_hw *hw;
@@ -1153,40 +1146,15 @@ static int brcms_suspend(struct bcma_device *pdev, pm_message_t state)
        wl->pub->hw_up = false;
        spin_unlock_bh(&wl->lock);
 
-       /* temporarily do suspend ourselves */
-       return brcms_pci_suspend(pdev->bus->host_pci);
-}
-
-static int brcms_pci_resume(struct pci_dev *pdev)
-{
-       int err = 0;
-       uint val;
-
-       err = pci_set_power_state(pdev, PCI_D0);
-       if (err)
-               return err;
-
-       pci_restore_state(pdev);
-
-       err = pci_enable_device(pdev);
-       if (err)
-               return err;
-
-       pci_set_master(pdev);
-
-       pci_read_config_dword(pdev, 0x40, &val);
-       if ((val & 0x0000ff00) != 0)
-               pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
+       pr_debug("brcms_suspend ok\n");
 
        return 0;
 }
 
 static int brcms_resume(struct bcma_device *pdev)
 {
-       /*
-       *  just do pci resume for now until bcma supports it.
-       */
-       return brcms_pci_resume(pdev->bus->host_pci);
+       pr_debug("brcms_resume ok\n");
+       return 0;
 }
 
 static struct bcma_driver brcms_bcma_driver = {
index f7ed34034f88cf12ed53694edd6621e14cf26a44..f6affc6fd12a511831d07d7333ea0b45f0bb8d5f 100644 (file)
@@ -7981,13 +7981,21 @@ int brcms_c_get_curband(struct brcms_c_info *wlc)
 
 void brcms_c_wait_for_tx_completion(struct brcms_c_info *wlc, bool drop)
 {
+       int timeout = 20;
+
        /* flush packet queue when requested */
        if (drop)
                brcmu_pktq_flush(&wlc->pkt_queue->q, false, NULL, NULL);
 
        /* wait for queue and DMA fifos to run dry */
-       while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0)
+       while (!pktq_empty(&wlc->pkt_queue->q) || brcms_txpktpendtot(wlc) > 0) {
                brcms_msleep(wlc->wl, 1);
+
+               if (--timeout == 0)
+                       break;
+       }
+
+       WARN_ON_ONCE(timeout == 0);
 }
 
 void brcms_c_set_beacon_listen_interval(struct brcms_c_info *wlc, u8 interval)
index 018a8deb88a83b5b2ab2febcc2487b89eeb4ba4f..4fcdac63a3007aff3822e8dd17ccd5db57ce69a6 100644 (file)
@@ -7848,7 +7848,7 @@ static void ipw_handle_data_packet_monitor(struct ipw_priv *priv,
         * more efficiently than we can parse it. ORDER MATTERS HERE */
        struct ipw_rt_hdr *ipw_rt;
 
-       short len = le16_to_cpu(pkt->u.frame.length);
+       unsigned short len = le16_to_cpu(pkt->u.frame.length);
 
        /* We received data from the HW, so stop the watchdog */
        dev->trans_start = jiffies;
@@ -8023,7 +8023,7 @@ static void ipw_handle_promiscuous_rx(struct ipw_priv *priv,
        s8 signal = frame->rssi_dbm - IPW_RSSI_TO_DBM;
        s8 noise = (s8) le16_to_cpu(frame->noise);
        u8 rate = frame->rate;
-       short len = le16_to_cpu(pkt->u.frame.length);
+       unsigned short len = le16_to_cpu(pkt->u.frame.length);
        struct sk_buff *skb;
        int hdr_only = 0;
        u16 filter = priv->prom_priv->filter;
index 084aa2c4ccfb272dab28d57fbc9969644360f314..a6454726737e04b207d0c9ae710098f98cef6c63 100644 (file)
@@ -569,7 +569,7 @@ static int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
        struct iwl_scan_cmd *scan;
        struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
        u32 rate_flags = 0;
-       u16 cmd_len;
+       u16 cmd_len = 0;
        u16 rx_chain = 0;
        enum ieee80211_band band;
        u8 n_probes = 0;
index 752493f00406a0a808c0984d0ddc96b215ce3fb1..65d1f05007be0e0b6ba421e24b273b3bb77adc97 100644 (file)
@@ -972,11 +972,11 @@ void iwl_irq_tasklet(struct iwl_trans *trans)
        }
 #endif
 
-       spin_unlock_irqrestore(&trans->shrd->lock, flags);
-
        /* saved interrupt in inta variable now we can reset trans_pcie->inta */
        trans_pcie->inta = 0;
 
+       spin_unlock_irqrestore(&trans->shrd->lock, flags);
+
        /* Now service all interrupt bits discovered above. */
        if (inta & CSR_INT_BIT_HW_ERR) {
                IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
index 7becea3dec654de21ab5090fc55b8a5068b7703f..dd5aeaff44ba3483804df003bd5779cbf0138794 100644 (file)
@@ -2777,7 +2777,7 @@ static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
        else if (channel->band == IEEE80211_BAND_5GHZ)
                cmd->band = cpu_to_le16(0x4);
 
-       cmd->channel = channel->hw_value;
+       cmd->channel = cpu_to_le16(channel->hw_value);
 
        if (conf->channel_type == NL80211_CHAN_NO_HT ||
            conf->channel_type == NL80211_CHAN_HT20) {
@@ -4066,7 +4066,7 @@ static int mwl8k_cmd_encryption_remove_key(struct ieee80211_hw *hw,
                goto done;
 
        if (key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
-                       WLAN_CIPHER_SUITE_WEP104)
+                       key->cipher == WLAN_CIPHER_SUITE_WEP104)
                mwl8k_vif->wep_key_conf[key->keyidx].enabled = 0;
 
        cmd->action = cpu_to_le32(MWL8K_ENCR_REMOVE_KEY);
index 4941a1a2321907fb93cc8a2fe08a3ce87d8ecf16..dc88baefa72e88bd2b2f60b3ea2e8f7174ff34ed 100644 (file)
@@ -422,7 +422,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
 static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
                                 enum dev_state state)
 {
-       int mask = (state == STATE_RADIO_IRQ_ON);
        u32 reg;
        unsigned long flags;
 
@@ -436,25 +435,14 @@ static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
        }
 
        spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags);
-       rt2x00pci_register_read(rt2x00dev, INT_MASK_CSR, &reg);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, 0);
-       rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, 0);
+       reg = 0;
+       if (state == STATE_RADIO_IRQ_ON) {
+               rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1);
+               rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1);
+               rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1);
+               rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1);
+               rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1);
+       }
        rt2x00pci_register_write(rt2x00dev, INT_MASK_CSR, reg);
        spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags);
 
index fa679057630f9152f418e9e2f9b60f549dded7e3..698b905058dd23435b3f13b40ab4e0b7c870b5c5 100644 (file)
@@ -68,7 +68,7 @@ struct netfront_cb {
 
 #define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
 #define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
-#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
+#define TX_MAX_TARGET min_t(int, NET_TX_RING_SIZE, 256)
 
 struct netfront_stats {
        u64                     rx_packets;
index 97fff785e97e7027f7d54563186db969075a2558..af295bb21d62890020cc2c0084f3570bd42babdc 100644 (file)
@@ -2802,7 +2802,7 @@ pci_intx(struct pci_dev *pdev, int enable)
 
 /**
  * pci_intx_mask_supported - probe for INTx masking support
- * @pdev: the PCI device to operate on
+ * @dev: the PCI device to operate on
  *
  * Check if the device dev support INTx masking via the config space
  * command word.
@@ -2884,7 +2884,7 @@ done:
 
 /**
  * pci_check_and_mask_intx - mask INTx on pending interrupt
- * @pdev: the PCI device to operate on
+ * @dev: the PCI device to operate on
  *
  * Check if the device dev has its INTx line asserted, mask it and
  * return true in that case. False is returned if not interrupt was
@@ -2898,7 +2898,7 @@ EXPORT_SYMBOL_GPL(pci_check_and_mask_intx);
 
 /**
  * pci_check_and_mask_intx - unmask INTx of no interrupt is pending
- * @pdev: the PCI device to operate on
+ * @dev: the PCI device to operate on
  *
  * Check if the device dev has its INTx line asserted, unmask it if not
  * and return true. False is returned and the mask remains active if
index 749c2a16012c582bca165db93f14cf4c21ec5293..1932029de48d67cf7dd10ed18f2de11e40056c29 100644 (file)
@@ -1269,10 +1269,8 @@ static int pcmcia_bus_add(struct pcmcia_socket *skt)
 
 static int pcmcia_bus_early_resume(struct pcmcia_socket *skt)
 {
-       if (!verify_cis_cache(skt)) {
-               pcmcia_put_socket(skt);
+       if (!verify_cis_cache(skt))
                return 0;
-       }
 
        dev_dbg(&skt->dev, "cis mismatch - different card\n");
 
index 59866905ea37e5f2150221506cf9bac73b2eaa6d..27f2fe3b7fb4a5e41a44a03fa75e2becce0d63d7 100644 (file)
@@ -205,7 +205,8 @@ static int __devexit pcmcia_remove(struct sa1111_dev *dev)
 
        dev_set_drvdata(&dev->dev, NULL);
 
-       for (; next = s->next, s; s = next) {
+       for (; s; s = next) {
+               next = s->next;
                soc_pcmcia_remove_one(&s->soc);
                kfree(s);
        }
index 569bdb3ef1046155b021648390087a9010591d3b..8fe15cf15ac8f2687234db70509d40210672d3f8 100644 (file)
@@ -510,10 +510,12 @@ static struct dentry *debugfs_root;
 
 static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
 {
-       static struct dentry *device_root;
+       struct dentry *device_root;
 
        device_root = debugfs_create_dir(dev_name(pctldev->dev),
                                         debugfs_root);
+       pctldev->device_root = device_root;
+
        if (IS_ERR(device_root) || !device_root) {
                pr_warn("failed to create debugfs directory for %s\n",
                        dev_name(pctldev->dev));
@@ -529,6 +531,11 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
        pinconf_init_device_debugfs(device_root, pctldev);
 }
 
+static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
+{
+       debugfs_remove_recursive(pctldev->device_root);
+}
+
 static void pinctrl_init_debugfs(void)
 {
        debugfs_root = debugfs_create_dir("pinctrl", NULL);
@@ -553,6 +560,10 @@ static void pinctrl_init_debugfs(void)
 {
 }
 
+static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
+{
+}
+
 #endif
 
 /**
@@ -572,40 +583,40 @@ struct pinctrl_dev *pinctrl_register(struct pinctrl_desc *pctldesc,
        if (pctldesc->name == NULL)
                return NULL;
 
+       pctldev = kzalloc(sizeof(struct pinctrl_dev), GFP_KERNEL);
+       if (pctldev == NULL)
+               return NULL;
+
+       /* Initialize pin control device struct */
+       pctldev->owner = pctldesc->owner;
+       pctldev->desc = pctldesc;
+       pctldev->driver_data = driver_data;
+       INIT_RADIX_TREE(&pctldev->pin_desc_tree, GFP_KERNEL);
+       spin_lock_init(&pctldev->pin_desc_tree_lock);
+       INIT_LIST_HEAD(&pctldev->gpio_ranges);
+       mutex_init(&pctldev->gpio_ranges_lock);
+       pctldev->dev = dev;
+
        /* If we're implementing pinmuxing, check the ops for sanity */
        if (pctldesc->pmxops) {
-               ret = pinmux_check_ops(pctldesc->pmxops);
+               ret = pinmux_check_ops(pctldev);
                if (ret) {
                        pr_err("%s pinmux ops lacks necessary functions\n",
                               pctldesc->name);
-                       return NULL;
+                       goto out_err;
                }
        }
 
        /* If we're implementing pinconfig, check the ops for sanity */
        if (pctldesc->confops) {
-               ret = pinconf_check_ops(pctldesc->confops);
+               ret = pinconf_check_ops(pctldev);
                if (ret) {
                        pr_err("%s pin config ops lacks necessary functions\n",
                               pctldesc->name);
-                       return NULL;
+                       goto out_err;
                }
        }
 
-       pctldev = kzalloc(sizeof(struct pinctrl_dev), GFP_KERNEL);
-       if (pctldev == NULL)
-               return NULL;
-
-       /* Initialize pin control device struct */
-       pctldev->owner = pctldesc->owner;
-       pctldev->desc = pctldesc;
-       pctldev->driver_data = driver_data;
-       INIT_RADIX_TREE(&pctldev->pin_desc_tree, GFP_KERNEL);
-       spin_lock_init(&pctldev->pin_desc_tree_lock);
-       INIT_LIST_HEAD(&pctldev->gpio_ranges);
-       mutex_init(&pctldev->gpio_ranges_lock);
-       pctldev->dev = dev;
-
        /* Register all the pins */
        pr_debug("try to register %d pins on %s...\n",
                 pctldesc->npins, pctldesc->name);
@@ -641,6 +652,7 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
        if (pctldev == NULL)
                return;
 
+       pinctrl_remove_device_debugfs(pctldev);
        pinmux_unhog_maps(pctldev);
        /* TODO: check that no pinmuxes are still active? */
        mutex_lock(&pinctrldev_list_mutex);
index 177a3310547f31ceb8a9cd1191cb55f7f253469d..cfa86da6b4b15b546cbf78de371247812c162a9c 100644 (file)
@@ -41,6 +41,9 @@ struct pinctrl_dev {
        struct device *dev;
        struct module *owner;
        void *driver_data;
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *device_root;
+#endif
 #ifdef CONFIG_PINMUX
        struct mutex pinmux_hogs_lock;
        struct list_head pinmux_hogs;
index 1259872b0a1d641e9da7f34cfd57a1f9e2ae1c91..9fb75456824c3b562fe11fa19f23c5f20d9dcc26 100644 (file)
@@ -205,8 +205,10 @@ int pin_config_group_set(const char *dev_name, const char *pin_group,
 }
 EXPORT_SYMBOL(pin_config_group_set);
 
-int pinconf_check_ops(const struct pinconf_ops *ops)
+int pinconf_check_ops(struct pinctrl_dev *pctldev)
 {
+       const struct pinconf_ops *ops = pctldev->desc->confops;
+
        /* We must be able to read out pin status */
        if (!ops->pin_config_get && !ops->pin_config_group_get)
                return -EINVAL;
@@ -236,7 +238,7 @@ static int pinconf_pins_show(struct seq_file *s, void *what)
        seq_puts(s, "Format: pin (name): pinmux setting array\n");
 
        /* The pin number can be retrived from the pin controller descriptor */
-       for (i = 0; pin < pctldev->desc->npins; i++) {
+       for (i = 0; i < pctldev->desc->npins; i++) {
                struct pin_desc *desc;
 
                pin = pctldev->desc->pins[i].number;
index e7dc6165032a35e540216d870d6ef75b2acb78e6..006b77fa737e8841ff84713fe3ea24b74a82fd37 100644 (file)
@@ -13,7 +13,7 @@
 
 #ifdef CONFIG_PINCONF
 
-int pinconf_check_ops(const struct pinconf_ops *ops);
+int pinconf_check_ops(struct pinctrl_dev *pctldev);
 void pinconf_init_device_debugfs(struct dentry *devroot,
                                 struct pinctrl_dev *pctldev);
 int pin_config_get_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
@@ -23,7 +23,7 @@ int pin_config_set_for_pin(struct pinctrl_dev *pctldev, unsigned pin,
 
 #else
 
-static inline int pinconf_check_ops(const struct pinconf_ops *ops)
+static inline int pinconf_check_ops(struct pinctrl_dev *pctldev)
 {
        return 0;
 }
index a76a348321bb4284d6820279fed3bb679cd23fcf..7c3193f7a04430bcc8076dfd18aafc05983e915b 100644 (file)
@@ -53,11 +53,6 @@ struct pinmux_group {
  * @dev: the device using this pinmux
  * @usecount: the number of active users of this mux setting, used to keep
  *     track of nested use cases
- * @pins: an array of discrete physical pins used in this mapping, taken
- *     from the global pin enumeration space (copied from pinmux map)
- * @num_pins: the number of pins in this mapping array, i.e. the number of
- *     elements in .pins so we can iterate over that array (copied from
- *     pinmux map)
  * @pctldev: pin control device handling this pinmux
  * @func_selector: the function selector for the pinmux device handling
  *     this pinmux
@@ -152,8 +147,7 @@ static int pin_request(struct pinctrl_dev *pctldev,
                status = 0;
 
        if (status)
-               dev_err(pctldev->dev, "->request on device %s failed "
-                      "for pin %d\n",
+               dev_err(pctldev->dev, "->request on device %s failed for pin %d\n",
                       pctldev->desc->name, pin);
 out_free_pin:
        if (status) {
@@ -355,21 +349,20 @@ int __init pinmux_register_mappings(struct pinmux_map const *maps,
        /* First sanity check the new mapping */
        for (i = 0; i < num_maps; i++) {
                if (!maps[i].name) {
-                       pr_err("failed to register map %d: "
-                              "no map name given\n", i);
+                       pr_err("failed to register map %d: no map name given\n",
+                                       i);
                        return -EINVAL;
                }
 
                if (!maps[i].ctrl_dev && !maps[i].ctrl_dev_name) {
-                       pr_err("failed to register map %s (%d): "
-                              "no pin control device given\n",
+                       pr_err("failed to register map %s (%d): no pin control device given\n",
                               maps[i].name, i);
                        return -EINVAL;
                }
 
                if (!maps[i].function) {
-                       pr_err("failed to register map %s (%d): "
-                              "no function ID given\n", maps[i].name, i);
+                       pr_err("failed to register map %s (%d): no function ID given\n",
+                                       maps[i].name, i);
                        return -EINVAL;
                }
 
@@ -411,7 +404,7 @@ int __init pinmux_register_mappings(struct pinmux_map const *maps,
 }
 
 /**
- * acquire_pins() - acquire all the pins for a certain funcion on a pinmux
+ * acquire_pins() - acquire all the pins for a certain function on a pinmux
  * @pctldev: the device to take the pins on
  * @func_selector: the function selector to acquire the pins for
  * @group_selector: the group selector containing the pins to acquire
@@ -442,8 +435,7 @@ static int acquire_pins(struct pinctrl_dev *pctldev,
                ret = pin_request(pctldev, pins[i], func, NULL);
                if (ret) {
                        dev_err(pctldev->dev,
-                               "could not get pin %d for function %s "
-                               "on device %s - conflicting mux mappings?\n",
+                               "could not get pin %d for function %s on device %s - conflicting mux mappings?\n",
                                pins[i], func ? : "(undefined)",
                                pinctrl_dev_get_name(pctldev));
                        /* On error release all taken pins */
@@ -458,7 +450,7 @@ static int acquire_pins(struct pinctrl_dev *pctldev,
 
 /**
  * release_pins() - release pins taken by earlier acquirement
- * @pctldev: the device to free the pinx on
+ * @pctldev: the device to free the pins on
  * @group_selector: the group selector containing the pins to free
  */
 static void release_pins(struct pinctrl_dev *pctldev,
@@ -473,8 +465,7 @@ static void release_pins(struct pinctrl_dev *pctldev,
        ret = pctlops->get_group_pins(pctldev, group_selector,
                                      &pins, &num_pins);
        if (ret) {
-               dev_err(pctldev->dev, "could not get pins to release for "
-                       "group selector %d\n",
+               dev_err(pctldev->dev, "could not get pins to release for group selector %d\n",
                        group_selector);
                return;
        }
@@ -526,8 +517,7 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
                ret = pinctrl_get_group_selector(pctldev, groups[0]);
                if (ret < 0) {
                        dev_err(pctldev->dev,
-                               "function %s wants group %s but the pin "
-                               "controller does not seem to have that group\n",
+                               "function %s wants group %s but the pin controller does not seem to have that group\n",
                                pmxops->get_function_name(pctldev, func_selector),
                                groups[0]);
                        return ret;
@@ -535,8 +525,7 @@ static int pinmux_check_pin_group(struct pinctrl_dev *pctldev,
 
                if (num_groups > 1)
                        dev_dbg(pctldev->dev,
-                               "function %s support more than one group, "
-                               "default-selecting first group %s (%d)\n",
+                               "function %s support more than one group, default-selecting first group %s (%d)\n",
                                pmxops->get_function_name(pctldev, func_selector),
                                groups[0],
                                ret);
@@ -628,10 +617,8 @@ static int pinmux_enable_muxmap(struct pinctrl_dev *pctldev,
 
        if (pmx->pctldev && pmx->pctldev != pctldev) {
                dev_err(pctldev->dev,
-                       "different pin control devices given for device %s, "
-                       "function %s\n",
-                       devname,
-                       map->function);
+                       "different pin control devices given for device %s, function %s\n",
+                       devname, map->function);
                return -EINVAL;
        }
        pmx->dev = dev;
@@ -695,7 +682,6 @@ static void pinmux_free_groups(struct pinmux *pmx)
  */
 struct pinmux *pinmux_get(struct device *dev, const char *name)
 {
-
        struct pinmux_map const *map = NULL;
        struct pinctrl_dev *pctldev = NULL;
        const char *devname = NULL;
@@ -745,8 +731,7 @@ struct pinmux *pinmux_get(struct device *dev, const char *name)
                        else if (map->ctrl_dev_name)
                                devname = map->ctrl_dev_name;
 
-                       pr_warning("could not find a pinctrl device for pinmux "
-                                  "function %s, fishy, they shall all have one\n",
+                       pr_warning("could not find a pinctrl device for pinmux function %s, fishy, they shall all have one\n",
                                   map->function);
                        pr_warning("given pinctrl device name: %s",
                                   devname ? devname : "UNDEFINED");
@@ -904,8 +889,11 @@ void pinmux_disable(struct pinmux *pmx)
 }
 EXPORT_SYMBOL_GPL(pinmux_disable);
 
-int pinmux_check_ops(const struct pinmux_ops *ops)
+int pinmux_check_ops(struct pinctrl_dev *pctldev)
 {
+       const struct pinmux_ops *ops = pctldev->desc->pmxops;
+       unsigned selector = 0;
+
        /* Check that we implement required operations */
        if (!ops->list_functions ||
            !ops->get_function_name ||
@@ -914,6 +902,18 @@ int pinmux_check_ops(const struct pinmux_ops *ops)
            !ops->disable)
                return -EINVAL;
 
+       /* Check that all functions registered have names */
+       while (ops->list_functions(pctldev, selector) >= 0) {
+               const char *fname = ops->get_function_name(pctldev,
+                                                          selector);
+               if (!fname) {
+                       pr_err("pinmux ops has no name for function%u\n",
+                               selector);
+                       return -EINVAL;
+               }
+               selector++;
+       }
+
        return 0;
 }
 
@@ -932,8 +932,8 @@ static int pinmux_hog_map(struct pinctrl_dev *pctldev,
                 * without any problems, so then we can hog pinmuxes for
                 * all devices that just want a static pin mux at this point.
                 */
-               dev_err(pctldev->dev, "map %s wants to hog a non-system "
-                       "pinmux, this is not going to work\n", map->name);
+               dev_err(pctldev->dev, "map %s wants to hog a non-system pinmux, this is not going to work\n",
+                               map->name);
                return -EINVAL;
        }
 
@@ -993,9 +993,12 @@ int pinmux_hog_maps(struct pinctrl_dev *pctldev)
        for (i = 0; i < pinmux_maps_num; i++) {
                struct pinmux_map const *map = &pinmux_maps[i];
 
-               if (((map->ctrl_dev == dev) ||
-                    !strcmp(map->ctrl_dev_name, devname)) &&
-                   map->hog_on_boot) {
+               if (!map->hog_on_boot)
+                       continue;
+
+               if ((map->ctrl_dev == dev) ||
+                       (map->ctrl_dev_name &&
+                               !strcmp(map->ctrl_dev_name, devname))) {
                        /* OK time to hog! */
                        ret = pinmux_hog_map(pctldev, map);
                        if (ret)
@@ -1122,13 +1125,15 @@ static int pinmux_show(struct seq_file *s, void *what)
 
                seq_printf(s, "device: %s function: %s (%u),",
                           pinctrl_dev_get_name(pmx->pctldev),
-                          pmxops->get_function_name(pctldev, pmx->func_selector),
+                          pmxops->get_function_name(pctldev,
+                                  pmx->func_selector),
                           pmx->func_selector);
 
                seq_printf(s, " groups: [");
                list_for_each_entry(grp, &pmx->groups, node) {
                        seq_printf(s, " %s (%u)",
-                                  pctlops->get_group_name(pctldev, grp->group_selector),
+                                  pctlops->get_group_name(pctldev,
+                                          grp->group_selector),
                                   grp->group_selector);
                }
                seq_printf(s, " ]");
index 844500b3331bf8ee53ce5a3ae95f806c494e9372..97f52223fbc2968a58d6bf57aea5519cff1e6500 100644 (file)
@@ -12,7 +12,7 @@
  */
 #ifdef CONFIG_PINMUX
 
-int pinmux_check_ops(const struct pinmux_ops *ops);
+int pinmux_check_ops(struct pinctrl_dev *pctldev);
 void pinmux_init_device_debugfs(struct dentry *devroot,
                                struct pinctrl_dev *pctldev);
 void pinmux_init_debugfs(struct dentry *subsys_root);
@@ -21,7 +21,7 @@ void pinmux_unhog_maps(struct pinctrl_dev *pctldev);
 
 #else
 
-static inline int pinmux_check_ops(const struct pinmux_ops *ops)
+static inline int pinmux_check_ops(struct pinctrl_dev *pctldev)
 {
        return 0;
 }
index ca86f39a0fdc824fba463f0eb30157ce933bbe9c..e9a83f84adaf53771d94bd3b3761deb7cad2ca32 100644 (file)
@@ -2731,6 +2731,8 @@ static void rdev_init_debugfs(struct regulator_dev *rdev)
  * @dev: struct device for the regulator
  * @init_data: platform provided init data, passed through by driver
  * @driver_data: private regulator data
+ * @of_node: OpenFirmware node to parse for device tree bindings (may be
+ *           NULL).
  *
  * Called by regulator drivers to register a regulator.
  * Returns 0 on success.
index f1651eb6964807d9ae86c6ed88032199d5950199..679734d26a16961ef100f1731c66ba864f5057e3 100644 (file)
@@ -35,7 +35,7 @@ static void of_get_regulation_constraints(struct device_node *np,
        if (constraints->min_uV != constraints->max_uV)
                constraints->valid_ops_mask |= REGULATOR_CHANGE_VOLTAGE;
        /* Only one voltage?  Then make sure it's set. */
-       if (constraints->min_uV == constraints->max_uV)
+       if (min_uV && max_uV && constraints->min_uV == constraints->max_uV)
                constraints->apply_uV = true;
 
        uV_offset = of_get_property(np, "regulator-microvolt-offset", NULL);
index e19a4031f45e9b3ad3e1e677afa2dffd271bb4fc..3a125b835546e692e3a9c70cd25434fd54398f90 100644 (file)
@@ -774,7 +774,7 @@ config RTC_DRV_EP93XX
 
 config RTC_DRV_SA1100
        tristate "SA11x0/PXA2xx"
-       depends on ARCH_SA1100 || ARCH_PXA || ARCH_MMP
+       depends on ARCH_SA1100 || ARCH_PXA
        help
          If you say Y here you will get access to the real time clock
          built into your SA11x0 or PXA2xx CPU.
index 4595d3e645a7358676b5409b8ced918f306e07c2..cb9a585312cc765c3e08a6d60720c3f669c8ba7b 100644 (file)
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/interrupt.h>
+#include <linux/string.h>
 #include <linux/pm.h>
-#include <linux/slab.h>
-#include <linux/clk.h>
-#include <linux/io.h>
+#include <linux/bitops.h>
 
 #include <mach/hardware.h>
 #include <asm/irq.h>
 
+#ifdef CONFIG_ARCH_PXA
+#include <mach/regs-rtc.h>
+#endif
+
 #define RTC_DEF_DIVIDER                (32768 - 1)
 #define RTC_DEF_TRIM           0
-#define RTC_FREQ               1024
-
-#define RCNR           0x00    /* RTC Count Register */
-#define RTAR           0x04    /* RTC Alarm Register */
-#define RTSR           0x08    /* RTC Status Register */
-#define RTTR           0x0c    /* RTC Timer Trim Register */
-
-#define RTSR_HZE       (1 << 3)        /* HZ interrupt enable */
-#define RTSR_ALE       (1 << 2)        /* RTC alarm interrupt enable */
-#define RTSR_HZ                (1 << 1)        /* HZ rising-edge detected */
-#define RTSR_AL                (1 << 0)        /* RTC alarm detected */
-
-#define rtc_readl(sa1100_rtc, reg)     \
-       readl_relaxed((sa1100_rtc)->base + (reg))
-#define rtc_writel(sa1100_rtc, reg, value)     \
-       writel_relaxed((value), (sa1100_rtc)->base + (reg))
-
-struct sa1100_rtc {
-       struct resource         *ress;
-       void __iomem            *base;
-       struct clk              *clk;
-       int                     irq_1Hz;
-       int                     irq_Alrm;
-       struct rtc_device       *rtc;
-       spinlock_t              lock;           /* Protects this structure */
-};
+
+static const unsigned long RTC_FREQ = 1024;
+static struct rtc_time rtc_alarm;
+static DEFINE_SPINLOCK(sa1100_rtc_lock);
+
+static inline int rtc_periodic_alarm(struct rtc_time *tm)
+{
+       return  (tm->tm_year == -1) ||
+               ((unsigned)tm->tm_mon >= 12) ||
+               ((unsigned)(tm->tm_mday - 1) >= 31) ||
+               ((unsigned)tm->tm_hour > 23) ||
+               ((unsigned)tm->tm_min > 59) ||
+               ((unsigned)tm->tm_sec > 59);
+}
+
 /*
  * Calculate the next alarm time given the requested alarm time mask
  * and the current time.
@@ -90,26 +82,46 @@ static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now,
        }
 }
 
+static int rtc_update_alarm(struct rtc_time *alrm)
+{
+       struct rtc_time alarm_tm, now_tm;
+       unsigned long now, time;
+       int ret;
+
+       do {
+               now = RCNR;
+               rtc_time_to_tm(now, &now_tm);
+               rtc_next_alarm_time(&alarm_tm, &now_tm, alrm);
+               ret = rtc_tm_to_time(&alarm_tm, &time);
+               if (ret != 0)
+                       break;
+
+               RTSR = RTSR & (RTSR_HZE|RTSR_ALE|RTSR_AL);
+               RTAR = time;
+       } while (now != RCNR);
+
+       return ret;
+}
+
 static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
 {
        struct platform_device *pdev = to_platform_device(dev_id);
-       struct sa1100_rtc *sa1100_rtc = platform_get_drvdata(pdev);
+       struct rtc_device *rtc = platform_get_drvdata(pdev);
        unsigned int rtsr;
        unsigned long events = 0;
 
-       spin_lock(&sa1100_rtc->lock);
+       spin_lock(&sa1100_rtc_lock);
 
+       rtsr = RTSR;
        /* clear interrupt sources */
-       rtsr = rtc_readl(sa1100_rtc, RTSR);
-       rtc_writel(sa1100_rtc, RTSR, 0);
-
+       RTSR = 0;
        /* Fix for a nasty initialization problem the in SA11xx RTSR register.
         * See also the comments in sa1100_rtc_probe(). */
        if (rtsr & (RTSR_ALE | RTSR_HZE)) {
                /* This is the original code, before there was the if test
                 * above. This code does not clear interrupts that were not
                 * enabled. */
-               rtc_writel(sa1100_rtc, RTSR, (RTSR_AL | RTSR_HZ) & (rtsr >> 2));
+               RTSR = (RTSR_AL | RTSR_HZ) & (rtsr >> 2);
        } else {
                /* For some reason, it is possible to enter this routine
                 * without interruptions enabled, it has been tested with
@@ -118,13 +130,13 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
                 * This situation leads to an infinite "loop" of interrupt
                 * routine calling and as a result the processor seems to
                 * lock on its first call to open(). */
-               rtc_writel(sa1100_rtc, RTSR, (RTSR_AL | RTSR_HZ));
+               RTSR = RTSR_AL | RTSR_HZ;
        }
 
        /* clear alarm interrupt if it has occurred */
        if (rtsr & RTSR_AL)
                rtsr &= ~RTSR_ALE;
-       rtc_writel(sa1100_rtc, RTSR, rtsr & (RTSR_ALE | RTSR_HZE));
+       RTSR = rtsr & (RTSR_ALE | RTSR_HZE);
 
        /* update irq data & counter */
        if (rtsr & RTSR_AL)
@@ -132,100 +144,89 @@ static irqreturn_t sa1100_rtc_interrupt(int irq, void *dev_id)
        if (rtsr & RTSR_HZ)
                events |= RTC_UF | RTC_IRQF;
 
-       rtc_update_irq(sa1100_rtc->rtc, 1, events);
+       rtc_update_irq(rtc, 1, events);
 
-       spin_unlock(&sa1100_rtc->lock);
+       if (rtsr & RTSR_AL && rtc_periodic_alarm(&rtc_alarm))
+               rtc_update_alarm(&rtc_alarm);
+
+       spin_unlock(&sa1100_rtc_lock);
 
        return IRQ_HANDLED;
 }
 
 static int sa1100_rtc_open(struct device *dev)
 {
-       struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
        int ret;
+       struct platform_device *plat_dev = to_platform_device(dev);
+       struct rtc_device *rtc = platform_get_drvdata(plat_dev);
 
-       ret = request_irq(sa1100_rtc->irq_1Hz, sa1100_rtc_interrupt,
-                               IRQF_DISABLED, "rtc 1Hz", dev);
+       ret = request_irq(IRQ_RTC1Hz, sa1100_rtc_interrupt, IRQF_DISABLED,
+               "rtc 1Hz", dev);
        if (ret) {
-               dev_err(dev, "IRQ %d already in use.\n", sa1100_rtc->irq_1Hz);
+               dev_err(dev, "IRQ %d already in use.\n", IRQ_RTC1Hz);
                goto fail_ui;
        }
-       ret = request_irq(sa1100_rtc->irq_Alrm, sa1100_rtc_interrupt,
-                               IRQF_DISABLED, "rtc Alrm", dev);
+       ret = request_irq(IRQ_RTCAlrm, sa1100_rtc_interrupt, IRQF_DISABLED,
+               "rtc Alrm", dev);
        if (ret) {
-               dev_err(dev, "IRQ %d already in use.\n", sa1100_rtc->irq_Alrm);
+               dev_err(dev, "IRQ %d already in use.\n", IRQ_RTCAlrm);
                goto fail_ai;
        }
-       sa1100_rtc->rtc->max_user_freq = RTC_FREQ;
-       rtc_irq_set_freq(sa1100_rtc->rtc, NULL, RTC_FREQ);
+       rtc->max_user_freq = RTC_FREQ;
+       rtc_irq_set_freq(rtc, NULL, RTC_FREQ);
 
        return 0;
 
  fail_ai:
-       free_irq(sa1100_rtc->irq_1Hz, dev);
+       free_irq(IRQ_RTC1Hz, dev);
  fail_ui:
        return ret;
 }
 
 static void sa1100_rtc_release(struct device *dev)
 {
-       struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
-
-       spin_lock_irq(&sa1100_rtc->lock);
-       rtc_writel(sa1100_rtc, RTSR, 0);
-       spin_unlock_irq(&sa1100_rtc->lock);
+       spin_lock_irq(&sa1100_rtc_lock);
+       RTSR = 0;
+       spin_unlock_irq(&sa1100_rtc_lock);
 
-       free_irq(sa1100_rtc->irq_Alrm, dev);
-       free_irq(sa1100_rtc->irq_1Hz, dev);
+       free_irq(IRQ_RTCAlrm, dev);
+       free_irq(IRQ_RTC1Hz, dev);
 }
 
 static int sa1100_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
 {
-       struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
-       unsigned int rtsr;
-
-       spin_lock_irq(&sa1100_rtc->lock);
-
-       rtsr = rtc_readl(sa1100_rtc, RTSR);
+       spin_lock_irq(&sa1100_rtc_lock);
        if (enabled)
-               rtsr |= RTSR_ALE;
+               RTSR |= RTSR_ALE;
        else
-               rtsr &= ~RTSR_ALE;
-       rtc_writel(sa1100_rtc, RTSR, rtsr);
-
-       spin_unlock_irq(&sa1100_rtc->lock);
+               RTSR &= ~RTSR_ALE;
+       spin_unlock_irq(&sa1100_rtc_lock);
        return 0;
 }
 
 static int sa1100_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
-       struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
-
-       rtc_time_to_tm(rtc_readl(sa1100_rtc, RCNR), tm);
+       rtc_time_to_tm(RCNR, tm);
        return 0;
 }
 
 static int sa1100_rtc_set_time(struct device *dev, struct rtc_time *tm)
 {
-       struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
        unsigned long time;
        int ret;
 
        ret = rtc_tm_to_time(tm, &time);
        if (ret == 0)
-               rtc_writel(sa1100_rtc, RCNR, time);
+               RCNR = time;
        return ret;
 }
 
 static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
-       struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
-       unsigned long time;
-       unsigned int rtsr;
+       u32     rtsr;
 
-       time = rtc_readl(sa1100_rtc, RCNR);
-       rtc_time_to_tm(time, &alrm->time);
-       rtsr = rtc_readl(sa1100_rtc, RTSR);
+       memcpy(&alrm->time, &rtc_alarm, sizeof(struct rtc_time));
+       rtsr = RTSR;
        alrm->enabled = (rtsr & RTSR_ALE) ? 1 : 0;
        alrm->pending = (rtsr & RTSR_AL) ? 1 : 0;
        return 0;
@@ -233,39 +234,26 @@ static int sa1100_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 
 static int sa1100_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
 {
-       struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
-       struct rtc_time now_tm, alarm_tm;
-       unsigned long time, alarm;
-       unsigned int rtsr;
-
-       spin_lock_irq(&sa1100_rtc->lock);
-
-       time = rtc_readl(sa1100_rtc, RCNR);
-       rtc_time_to_tm(time, &now_tm);
-       rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time);
-       rtc_tm_to_time(&alarm_tm, &alarm);
-       rtc_writel(sa1100_rtc, RTAR, alarm);
-
-       rtsr = rtc_readl(sa1100_rtc, RTSR);
-       if (alrm->enabled)
-               rtsr |= RTSR_ALE;
-       else
-               rtsr &= ~RTSR_ALE;
-       rtc_writel(sa1100_rtc, RTSR, rtsr);
+       int ret;
 
-       spin_unlock_irq(&sa1100_rtc->lock);
+       spin_lock_irq(&sa1100_rtc_lock);
+       ret = rtc_update_alarm(&alrm->time);
+       if (ret == 0) {
+               if (alrm->enabled)
+                       RTSR |= RTSR_ALE;
+               else
+                       RTSR &= ~RTSR_ALE;
+       }
+       spin_unlock_irq(&sa1100_rtc_lock);
 
-       return 0;
+       return ret;
 }
 
 static int sa1100_rtc_proc(struct device *dev, struct seq_file *seq)
 {
-       struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
+       seq_printf(seq, "trim/divider\t\t: 0x%08x\n", (u32) RTTR);
+       seq_printf(seq, "RTSR\t\t\t: 0x%08x\n", (u32)RTSR);
 
-       seq_printf(seq, "trim/divider\t\t: 0x%08x\n",
-                       rtc_readl(sa1100_rtc, RTTR));
-       seq_printf(seq, "RTSR\t\t\t: 0x%08x\n",
-                       rtc_readl(sa1100_rtc, RTSR));
        return 0;
 }
 
@@ -282,51 +270,7 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
 
 static int sa1100_rtc_probe(struct platform_device *pdev)
 {
-       struct sa1100_rtc *sa1100_rtc;
-       unsigned int rttr;
-       int ret;
-
-       sa1100_rtc = kzalloc(sizeof(struct sa1100_rtc), GFP_KERNEL);
-       if (!sa1100_rtc)
-               return -ENOMEM;
-
-       spin_lock_init(&sa1100_rtc->lock);
-       platform_set_drvdata(pdev, sa1100_rtc);
-
-       ret = -ENXIO;
-       sa1100_rtc->ress = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!sa1100_rtc->ress) {
-               dev_err(&pdev->dev, "No I/O memory resource defined\n");
-               goto err_ress;
-       }
-
-       sa1100_rtc->irq_1Hz = platform_get_irq(pdev, 0);
-       if (sa1100_rtc->irq_1Hz < 0) {
-               dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n");
-               goto err_ress;
-       }
-       sa1100_rtc->irq_Alrm = platform_get_irq(pdev, 1);
-       if (sa1100_rtc->irq_Alrm < 0) {
-               dev_err(&pdev->dev, "No alarm IRQ resource defined\n");
-               goto err_ress;
-       }
-
-       ret = -ENOMEM;
-       sa1100_rtc->base = ioremap(sa1100_rtc->ress->start,
-                               resource_size(sa1100_rtc->ress));
-       if (!sa1100_rtc->base) {
-               dev_err(&pdev->dev, "Unable to map pxa RTC I/O memory\n");
-               goto err_map;
-       }
-
-       sa1100_rtc->clk = clk_get(&pdev->dev, NULL);
-       if (IS_ERR(sa1100_rtc->clk)) {
-               dev_err(&pdev->dev, "failed to find rtc clock source\n");
-               ret = PTR_ERR(sa1100_rtc->clk);
-               goto err_clk;
-       }
-       clk_prepare(sa1100_rtc->clk);
-       clk_enable(sa1100_rtc->clk);
+       struct rtc_device *rtc;
 
        /*
         * According to the manual we should be able to let RTTR be zero
@@ -335,24 +279,24 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
         * If the clock divider is uninitialized then reset it to the
         * default value to get the 1Hz clock.
         */
-       if (rtc_readl(sa1100_rtc, RTTR) == 0) {
-               rttr = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16);
-               rtc_writel(sa1100_rtc, RTTR, rttr);
-               dev_warn(&pdev->dev, "warning: initializing default clock"
-                        " divider/trim value\n");
+       if (RTTR == 0) {
+               RTTR = RTC_DEF_DIVIDER + (RTC_DEF_TRIM << 16);
+               dev_warn(&pdev->dev, "warning: "
+                       "initializing default clock divider/trim value\n");
                /* The current RTC value probably doesn't make sense either */
-               rtc_writel(sa1100_rtc, RCNR, 0);
+               RCNR = 0;
        }
 
        device_init_wakeup(&pdev->dev, 1);
 
-       sa1100_rtc->rtc = rtc_device_register(pdev->name, &pdev->dev,
-                                               &sa1100_rtc_ops, THIS_MODULE);
-       if (IS_ERR(sa1100_rtc->rtc)) {
-               dev_err(&pdev->dev, "Failed to register RTC device -> %d\n",
-                       ret);
-               goto err_rtc_reg;
-       }
+       rtc = rtc_device_register(pdev->name, &pdev->dev, &sa1100_rtc_ops,
+               THIS_MODULE);
+
+       if (IS_ERR(rtc))
+               return PTR_ERR(rtc);
+
+       platform_set_drvdata(pdev, rtc);
+
        /* Fix for a nasty initialization problem the in SA11xx RTSR register.
         * See also the comments in sa1100_rtc_interrupt().
         *
@@ -375,46 +319,33 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
         *
         * Notice that clearing bit 1 and 0 is accomplished by writting ONES to
         * the corresponding bits in RTSR. */
-       rtc_writel(sa1100_rtc, RTSR, (RTSR_AL | RTSR_HZ));
+       RTSR = RTSR_AL | RTSR_HZ;
 
        return 0;
-
-err_rtc_reg:
-err_clk:
-       iounmap(sa1100_rtc->base);
-err_ress:
-err_map:
-       kfree(sa1100_rtc);
-       return ret;
 }
 
 static int sa1100_rtc_remove(struct platform_device *pdev)
 {
-       struct sa1100_rtc *sa1100_rtc = platform_get_drvdata(pdev);
+       struct rtc_device *rtc = platform_get_drvdata(pdev);
+
+       if (rtc)
+               rtc_device_unregister(rtc);
 
-       rtc_device_unregister(sa1100_rtc->rtc);
-       clk_disable(sa1100_rtc->clk);
-       clk_unprepare(sa1100_rtc->clk);
-       iounmap(sa1100_rtc->base);
        return 0;
 }
 
 #ifdef CONFIG_PM
 static int sa1100_rtc_suspend(struct device *dev)
 {
-       struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
-
        if (device_may_wakeup(dev))
-               enable_irq_wake(sa1100_rtc->irq_Alrm);
+               enable_irq_wake(IRQ_RTCAlrm);
        return 0;
 }
 
 static int sa1100_rtc_resume(struct device *dev)
 {
-       struct sa1100_rtc *sa1100_rtc = dev_get_drvdata(dev);
-
        if (device_may_wakeup(dev))
-               disable_irq_wake(sa1100_rtc->irq_Alrm);
+               disable_irq_wake(IRQ_RTCAlrm);
        return 0;
 }
 
index eef27a197c00f9d38bb529660e1cb6d3e80f47a3..110137e7ec81caae83eee0d914f61b73e38c746a 100644 (file)
@@ -3261,6 +3261,12 @@ void dasd_generic_path_event(struct ccw_device *cdev, int *path_event)
                        device->path_data.tbvpm |= eventlpm;
                        dasd_schedule_device_bh(device);
                }
+               if (path_event[chp] & PE_PATHGROUP_ESTABLISHED) {
+                       DBF_DEV_EVENT(DBF_WARNING, device, "%s",
+                                     "Pathgroup re-established\n");
+                       if (device->discipline->kick_validate)
+                               device->discipline->kick_validate(device);
+               }
        }
        dasd_put_device(device);
 }
index 553b3c5abb0abf37faa0507f28512e1d3d7ac363..b3beed5434e43d8064cbf48659a4ff9426ea3c36 100644 (file)
@@ -189,14 +189,12 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
        unsigned long flags;
        struct alias_server *server, *newserver;
        struct alias_lcu *lcu, *newlcu;
-       int is_lcu_known;
        struct dasd_uid uid;
 
        private = (struct dasd_eckd_private *) device->private;
 
        device->discipline->get_uid(device, &uid);
        spin_lock_irqsave(&aliastree.lock, flags);
-       is_lcu_known = 1;
        server = _find_server(&uid);
        if (!server) {
                spin_unlock_irqrestore(&aliastree.lock, flags);
@@ -208,7 +206,6 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
                if (!server) {
                        list_add(&newserver->server, &aliastree.serverlist);
                        server = newserver;
-                       is_lcu_known = 0;
                } else {
                        /* someone was faster */
                        _free_server(newserver);
@@ -226,12 +223,10 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
                if (!lcu) {
                        list_add(&newlcu->lcu, &server->lculist);
                        lcu = newlcu;
-                       is_lcu_known = 0;
                } else {
                        /* someone was faster */
                        _free_lcu(newlcu);
                }
-               is_lcu_known = 0;
        }
        spin_lock(&lcu->lock);
        list_add(&device->alias_list, &lcu->inactive_devices);
@@ -239,64 +234,7 @@ int dasd_alias_make_device_known_to_lcu(struct dasd_device *device)
        spin_unlock(&lcu->lock);
        spin_unlock_irqrestore(&aliastree.lock, flags);
 
-       return is_lcu_known;
-}
-
-/*
- * The first device to be registered on an LCU will have to do
- * some additional setup steps to configure that LCU on the
- * storage server. All further devices should wait with their
- * initialization until the first device is done.
- * To synchronize this work, the first device will call
- * dasd_alias_lcu_setup_complete when it is done, and all
- * other devices will wait for it with dasd_alias_wait_for_lcu_setup.
- */
-void dasd_alias_lcu_setup_complete(struct dasd_device *device)
-{
-       unsigned long flags;
-       struct alias_server *server;
-       struct alias_lcu *lcu;
-       struct dasd_uid uid;
-
-       device->discipline->get_uid(device, &uid);
-       lcu = NULL;
-       spin_lock_irqsave(&aliastree.lock, flags);
-       server = _find_server(&uid);
-       if (server)
-               lcu = _find_lcu(server, &uid);
-       spin_unlock_irqrestore(&aliastree.lock, flags);
-       if (!lcu) {
-               DBF_EVENT_DEVID(DBF_ERR, device->cdev,
-                               "could not find lcu for %04x %02x",
-                               uid.ssid, uid.real_unit_addr);
-               WARN_ON(1);
-               return;
-       }
-       complete_all(&lcu->lcu_setup);
-}
-
-void dasd_alias_wait_for_lcu_setup(struct dasd_device *device)
-{
-       unsigned long flags;
-       struct alias_server *server;
-       struct alias_lcu *lcu;
-       struct dasd_uid uid;
-
-       device->discipline->get_uid(device, &uid);
-       lcu = NULL;
-       spin_lock_irqsave(&aliastree.lock, flags);
-       server = _find_server(&uid);
-       if (server)
-               lcu = _find_lcu(server, &uid);
-       spin_unlock_irqrestore(&aliastree.lock, flags);
-       if (!lcu) {
-               DBF_EVENT_DEVID(DBF_ERR, device->cdev,
-                               "could not find lcu for %04x %02x",
-                               uid.ssid, uid.real_unit_addr);
-               WARN_ON(1);
-               return;
-       }
-       wait_for_completion(&lcu->lcu_setup);
+       return 0;
 }
 
 /*
index bbcd5e9206ee27dff5c85fc78ef09353135e3656..70880be260151b62dce8ff5724a47bf8a28be7ac 100644 (file)
@@ -1534,6 +1534,10 @@ static void dasd_eckd_validate_server(struct dasd_device *device)
        struct dasd_eckd_private *private;
        int enable_pav;
 
+       private = (struct dasd_eckd_private *) device->private;
+       if (private->uid.type == UA_BASE_PAV_ALIAS ||
+           private->uid.type == UA_HYPER_PAV_ALIAS)
+               return;
        if (dasd_nopav || MACHINE_IS_VM)
                enable_pav = 0;
        else
@@ -1542,11 +1546,28 @@ static void dasd_eckd_validate_server(struct dasd_device *device)
 
        /* may be requested feature is not available on server,
         * therefore just report error and go ahead */
-       private = (struct dasd_eckd_private *) device->private;
        DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "PSF-SSC for SSID %04x "
                        "returned rc=%d", private->uid.ssid, rc);
 }
 
+/*
+ * worker to do a validate server in case of a lost pathgroup
+ */
+static void dasd_eckd_do_validate_server(struct work_struct *work)
+{
+       struct dasd_device *device = container_of(work, struct dasd_device,
+                                                 kick_validate);
+       dasd_eckd_validate_server(device);
+       dasd_put_device(device);
+}
+
+static void dasd_eckd_kick_validate_server(struct dasd_device *device)
+{
+       dasd_get_device(device);
+       /* queue call to do_validate_server to the kernel event daemon. */
+       schedule_work(&device->kick_validate);
+}
+
 static u32 get_fcx_max_data(struct dasd_device *device)
 {
 #if defined(CONFIG_64BIT)
@@ -1588,10 +1609,13 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
        struct dasd_eckd_private *private;
        struct dasd_block *block;
        struct dasd_uid temp_uid;
-       int is_known, rc, i;
+       int rc, i;
        int readonly;
        unsigned long value;
 
+       /* setup work queue for validate server*/
+       INIT_WORK(&device->kick_validate, dasd_eckd_do_validate_server);
+
        if (!ccw_device_is_pathgroup(device->cdev)) {
                dev_warn(&device->cdev->dev,
                         "A channel path group could not be established\n");
@@ -1651,22 +1675,12 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
                block->base = device;
        }
 
-       /* register lcu with alias handling, enable PAV if this is a new lcu */
-       is_known = dasd_alias_make_device_known_to_lcu(device);
-       if (is_known < 0) {
-               rc = is_known;
+       /* register lcu with alias handling, enable PAV */
+       rc = dasd_alias_make_device_known_to_lcu(device);
+       if (rc)
                goto out_err2;
-       }
-       /*
-        * dasd_eckd_validate_server is done on the first device that
-        * is found for an LCU. All later other devices have to wait
-        * for it, so they will read the correct feature codes.
-        */
-       if (!is_known) {
-               dasd_eckd_validate_server(device);
-               dasd_alias_lcu_setup_complete(device);
-       } else
-               dasd_alias_wait_for_lcu_setup(device);
+
+       dasd_eckd_validate_server(device);
 
        /* device may report different configuration data after LCU setup */
        rc = dasd_eckd_read_conf(device);
@@ -4098,7 +4112,7 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
 {
        struct dasd_eckd_private *private;
        struct dasd_eckd_characteristics temp_rdc_data;
-       int is_known, rc;
+       int rc;
        struct dasd_uid temp_uid;
        unsigned long flags;
 
@@ -4121,14 +4135,10 @@ static int dasd_eckd_restore_device(struct dasd_device *device)
                goto out_err;
 
        /* register lcu with alias handling, enable PAV if this is a new lcu */
-       is_known = dasd_alias_make_device_known_to_lcu(device);
-       if (is_known < 0)
-               return is_known;
-       if (!is_known) {
-               dasd_eckd_validate_server(device);
-               dasd_alias_lcu_setup_complete(device);
-       } else
-               dasd_alias_wait_for_lcu_setup(device);
+       rc = dasd_alias_make_device_known_to_lcu(device);
+       if (rc)
+               return rc;
+       dasd_eckd_validate_server(device);
 
        /* RE-Read Configuration Data */
        rc = dasd_eckd_read_conf(device);
@@ -4270,6 +4280,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
        .restore = dasd_eckd_restore_device,
        .reload = dasd_eckd_reload_device,
        .get_uid = dasd_eckd_get_uid,
+       .kick_validate = dasd_eckd_kick_validate_server,
 };
 
 static int __init
index afe8c33422edae5f6e90ae8afa597f54f0346959..33a6743ddc558c11e9b07e09e230efb40e80da28 100644 (file)
@@ -355,6 +355,7 @@ struct dasd_discipline {
        int (*reload) (struct dasd_device *);
 
        int (*get_uid) (struct dasd_device *, struct dasd_uid *);
+       void (*kick_validate) (struct dasd_device *);
 };
 
 extern struct dasd_discipline *dasd_diag_discipline_pointer;
@@ -455,6 +456,7 @@ struct dasd_device {
        struct work_struct kick_work;
        struct work_struct restore_device;
        struct work_struct reload_device;
+       struct work_struct kick_validate;
        struct timer_list timer;
 
        debug_info_t *debug_area;
index 06ea3bcfdd2a3064f462592bbed67ca4b24e9c7e..16570aa84aac0e1e420a541d7224ecfd0e567bbc 100644 (file)
@@ -830,16 +830,11 @@ config SCSI_ISCI
        tristate "Intel(R) C600 Series Chipset SAS Controller"
        depends on PCI && SCSI
        depends on X86
-       # (temporary): known alpha quality driver
-       depends on EXPERIMENTAL
        select SCSI_SAS_LIBSAS
-       select SCSI_SAS_HOST_SMP
        ---help---
          This driver supports the 6Gb/s SAS capabilities of the storage
          control unit found in the Intel(R) C600 series chipset.
 
-         The experimental tag will be removed after the driver exits alpha
-
 config SCSI_GENERIC_NCR5380
        tristate "Generic NCR5380/53c400 SCSI PIO support"
        depends on ISA && SCSI
index 78963be2c4fb308f09df6f38b3cf5097b3bb5f53..cb07c628b2f1856a3a26dadba052e2587edd3ae7 100644 (file)
@@ -673,12 +673,7 @@ struct bfa_itnim_iostats_s {
        u32     tm_iocdowns;            /*  TM cleaned-up due to IOC down   */
        u32     tm_cleanups;            /*  TM cleanup requests */
        u32     tm_cleanup_comps;       /*  TM cleanup completions      */
-       u32     lm_lun_across_sg;       /*  LM lun is across sg data buf */
-       u32     lm_lun_not_sup;         /*  LM lun not supported */
-       u32     lm_rpl_data_changed;    /*  LM report-lun data changed */
-       u32     lm_wire_residue_changed; /* LM report-lun rsp residue changed */
-       u32     lm_small_buf_addresidue; /* LM buf smaller than reported cnt */
-       u32     lm_lun_not_rdy;         /* LM lun not ready */
+       u32     rsvd[6];
 };
 
 /* Modify char* port_stt[] in bfal_port.c if a new state was added */
index 50b6a1c86195ac6d6c394295fcdd8150dbd43852..8d0b88f67a382e3582c40382d7dfe3892a19f3c8 100644 (file)
@@ -56,161 +56,6 @@ struct scsi_cdb_s {
 
 #define SCSI_MAX_ALLOC_LEN      0xFF    /* maximum allocarion length */
 
-#define SCSI_SENSE_CUR_ERR     0x70
-#define SCSI_SENSE_DEF_ERR     0x71
-
-/*
- * SCSI additional sense codes
- */
-#define SCSI_ASC_LUN_NOT_READY         0x04
-#define SCSI_ASC_LUN_NOT_SUPPORTED     0x25
-#define SCSI_ASC_TOCC                  0x3F
-
-/*
- * SCSI additional sense code qualifiers
- */
-#define SCSI_ASCQ_MAN_INTR_REQ         0x03    /* manual intervention req */
-#define SCSI_ASCQ_RL_DATA_CHANGED      0x0E    /* report luns data changed */
-
-/*
- * Methods of reporting informational exceptions
- */
-#define SCSI_MP_IEC_UNIT_ATTN          0x2     /* generate unit attention */
-
-struct scsi_report_luns_data_s {
-       u32             lun_list_length;        /* length of LUN list length */
-       u32             reserved;
-       struct scsi_lun lun[1];                 /* first LUN in lun list */
-};
-
-struct scsi_inquiry_vendor_s {
-       u8      vendor_id[8];
-};
-
-struct scsi_inquiry_prodid_s {
-       u8      product_id[16];
-};
-
-struct scsi_inquiry_prodrev_s {
-       u8      product_rev[4];
-};
-
-struct scsi_inquiry_data_s {
-#ifdef __BIG_ENDIAN
-       u8              peripheral_qual:3;      /* peripheral qualifier */
-       u8              device_type:5;          /* peripheral device type */
-       u8              rmb:1;                  /* removable medium bit */
-       u8              device_type_mod:7;      /* device type modifier */
-       u8              version;
-       u8              aenc:1;         /* async evt notification capability */
-       u8              trm_iop:1;      /* terminate I/O process */
-       u8              norm_aca:1;     /* normal ACA supported */
-       u8              hi_support:1;   /* SCSI-3: supports REPORT LUNS */
-       u8              rsp_data_format:4;
-       u8              additional_len;
-       u8              sccs:1;
-       u8              reserved1:7;
-       u8              reserved2:1;
-       u8              enc_serv:1;     /* enclosure service component */
-       u8              reserved3:1;
-       u8              multi_port:1;   /* multi-port device */
-       u8              m_chngr:1;      /* device in medium transport element */
-       u8              ack_req_q:1;    /* SIP specific bit */
-       u8              addr32:1;       /* SIP specific bit */
-       u8              addr16:1;       /* SIP specific bit */
-       u8              rel_adr:1;      /* relative address */
-       u8              w_bus32:1;
-       u8              w_bus16:1;
-       u8              synchronous:1;
-       u8              linked_commands:1;
-       u8              trans_dis:1;
-       u8              cmd_queue:1;    /* command queueing supported */
-       u8              soft_reset:1;   /* soft reset alternative (VS) */
-#else
-       u8              device_type:5;  /* peripheral device type */
-       u8              peripheral_qual:3; /* peripheral qualifier */
-       u8              device_type_mod:7; /* device type modifier */
-       u8              rmb:1;          /* removable medium bit */
-       u8              version;
-       u8              rsp_data_format:4;
-       u8              hi_support:1;   /* SCSI-3: supports REPORT LUNS */
-       u8              norm_aca:1;     /* normal ACA supported */
-       u8              terminate_iop:1;/* terminate I/O process */
-       u8              aenc:1;         /* async evt notification capability */
-       u8              additional_len;
-       u8              reserved1:7;
-       u8              sccs:1;
-       u8              addr16:1;       /* SIP specific bit */
-       u8              addr32:1;       /* SIP specific bit */
-       u8              ack_req_q:1;    /* SIP specific bit */
-       u8              m_chngr:1;      /* device in medium transport element */
-       u8              multi_port:1;   /* multi-port device */
-       u8              reserved3:1;    /* TBD - Vendor Specific */
-       u8              enc_serv:1;     /* enclosure service component */
-       u8              reserved2:1;
-       u8              soft_seset:1;   /* soft reset alternative (VS) */
-       u8              cmd_queue:1;    /* command queueing supported */
-       u8              trans_dis:1;
-       u8              linked_commands:1;
-       u8              synchronous:1;
-       u8              w_bus16:1;
-       u8              w_bus32:1;
-       u8              rel_adr:1;      /* relative address */
-#endif
-       struct scsi_inquiry_vendor_s    vendor_id;
-       struct scsi_inquiry_prodid_s    product_id;
-       struct scsi_inquiry_prodrev_s   product_rev;
-       u8              vendor_specific[20];
-       u8              reserved4[40];
-};
-
-/*
- *     SCSI sense data format
- */
-struct scsi_sense_s {
-#ifdef __BIG_ENDIAN
-       u8              valid:1;
-       u8              rsp_code:7;
-#else
-       u8              rsp_code:7;
-       u8              valid:1;
-#endif
-       u8              seg_num;
-#ifdef __BIG_ENDIAN
-       u8              file_mark:1;
-       u8              eom:1;          /* end of media */
-       u8              ili:1;          /* incorrect length indicator */
-       u8              reserved:1;
-       u8              sense_key:4;
-#else
-       u8              sense_key:4;
-       u8              reserved:1;
-       u8              ili:1;          /* incorrect length indicator */
-       u8              eom:1;          /* end of media */
-       u8              file_mark:1;
-#endif
-       u8              information[4]; /* device-type or cmd specific info */
-       u8              add_sense_length; /* additional sense length */
-       u8              command_info[4];/* command specific information */
-       u8              asc;            /* additional sense code */
-       u8              ascq;           /* additional sense code qualifier */
-       u8              fru_code;       /* field replaceable unit code */
-#ifdef __BIG_ENDIAN
-       u8              sksv:1;         /* sense key specific valid */
-       u8              c_d:1;          /* command/data bit */
-       u8              res1:2;
-       u8              bpv:1;          /* bit pointer valid */
-       u8              bpointer:3;     /* bit pointer */
-#else
-       u8              bpointer:3;     /* bit pointer */
-       u8              bpv:1;          /* bit pointer valid */
-       u8              res1:2;
-       u8              c_d:1;          /* command/data bit */
-       u8              sksv:1;         /* sense key specific valid */
-#endif
-       u8              fpointer[2];    /* field pointer */
-};
-
 /*
  * Fibre Channel Header Structure (FCHS) definition
  */
index e07bd4745d8ba5b968ded24e81785096c1535b84..f0f80e282e39cc023a72dacef7ee23b09c06a79e 100644 (file)
@@ -24,8 +24,6 @@ BFA_TRC_FILE(HAL, FCPIM);
  *  BFA ITNIM Related definitions
  */
 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
-static bfa_boolean_t bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim);
-static bfa_boolean_t bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim);
 static void bfa_ioim_lm_init(struct bfa_s *bfa);
 
 #define BFA_ITNIM_FROM_TAG(_fcpim, _tag)                                \
@@ -60,14 +58,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
        }                                                               \
 } while (0)
 
-#define bfa_ioim_rp_wwn(__ioim)                                                \
-       (((struct bfa_fcs_rport_s *)                                    \
-        (__ioim)->itnim->rport->rport_drv)->pwwn)
-
-#define bfa_ioim_lp_wwn(__ioim)                                                \
-       ((BFA_LPS_FROM_TAG(BFA_LPS_MOD((__ioim)->bfa),                  \
-       (__ioim)->itnim->rport->rport_info.lp_tag))->pwwn)              \
-
 #define bfa_itnim_sler_cb(__itnim) do {                                        \
        if ((__itnim)->bfa->fcs)                                        \
                bfa_cb_itnim_sler((__itnim)->ditn);      \
@@ -77,13 +67,6 @@ static void bfa_ioim_lm_init(struct bfa_s *bfa);
        }                                                               \
 } while (0)
 
-enum bfa_ioim_lm_status {
-       BFA_IOIM_LM_PRESENT = 1,
-       BFA_IOIM_LM_LUN_NOT_SUP = 2,
-       BFA_IOIM_LM_RPL_DATA_CHANGED = 3,
-       BFA_IOIM_LM_LUN_NOT_RDY = 4,
-};
-
 enum bfa_ioim_lm_ua_status {
        BFA_IOIM_LM_UA_RESET = 0,
        BFA_IOIM_LM_UA_SET = 1,
@@ -145,9 +128,6 @@ enum bfa_ioim_event {
        BFA_IOIM_SM_TMDONE      = 16,   /*  IO cleanup from tskim */
        BFA_IOIM_SM_HWFAIL      = 17,   /*  IOC h/w failure event */
        BFA_IOIM_SM_IOTOV       = 18,   /*  ITN offline TOV */
-       BFA_IOIM_SM_LM_LUN_NOT_SUP = 19,/*  lunmask lun not supported */
-       BFA_IOIM_SM_LM_RPL_DC = 20,     /*  lunmask report-lun data changed */
-       BFA_IOIM_SM_LM_LUN_NOT_RDY = 21,/*  lunmask lun not ready */
 };
 
 
@@ -245,9 +225,6 @@ static void __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete);
 static void __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete);
 static void __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete);
 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
-static void __bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete);
-static void __bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete);
 
 /*
  * forward declaration of BFA IO state machine
@@ -445,12 +422,6 @@ bfa_fcpim_add_stats(struct bfa_itnim_iostats_s *lstats,
        bfa_fcpim_add_iostats(lstats, rstats, output_reqs);
        bfa_fcpim_add_iostats(lstats, rstats, rd_throughput);
        bfa_fcpim_add_iostats(lstats, rstats, wr_throughput);
-       bfa_fcpim_add_iostats(lstats, rstats, lm_lun_across_sg);
-       bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_sup);
-       bfa_fcpim_add_iostats(lstats, rstats, lm_rpl_data_changed);
-       bfa_fcpim_add_iostats(lstats, rstats, lm_wire_residue_changed);
-       bfa_fcpim_add_iostats(lstats, rstats, lm_small_buf_addresidue);
-       bfa_fcpim_add_iostats(lstats, rstats, lm_lun_not_rdy);
 }
 
 bfa_status_t
@@ -1580,27 +1551,6 @@ bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim, enum bfa_ioim_event event)
                        __bfa_cb_ioim_abort, ioim);
                break;
 
-       case BFA_IOIM_SM_LM_LUN_NOT_SUP:
-               bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-               bfa_ioim_move_to_comp_q(ioim);
-               bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-                       __bfa_cb_ioim_lm_lun_not_sup, ioim);
-               break;
-
-       case BFA_IOIM_SM_LM_RPL_DC:
-               bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-               bfa_ioim_move_to_comp_q(ioim);
-               bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-                               __bfa_cb_ioim_lm_rpl_dc, ioim);
-               break;
-
-       case BFA_IOIM_SM_LM_LUN_NOT_RDY:
-               bfa_sm_set_state(ioim, bfa_ioim_sm_hcb);
-               bfa_ioim_move_to_comp_q(ioim);
-               bfa_cb_queue(ioim->bfa, &ioim->hcb_qe,
-                       __bfa_cb_ioim_lm_lun_not_rdy, ioim);
-               break;
-
        default:
                bfa_sm_fault(ioim->bfa, event);
        }
@@ -2160,243 +2110,6 @@ bfa_ioim_lm_init(struct bfa_s *bfa)
        }
 }
 
-/*
- * Validate LUN for LUN masking
- */
-static enum bfa_ioim_lm_status
-bfa_ioim_lm_check(struct bfa_ioim_s *ioim, struct bfa_lps_s *lps,
-               struct bfa_rport_s *rp, struct scsi_lun lun)
-{
-       u8 i;
-       struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
-       struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
-       struct scsi_cdb_s *cdb = (struct scsi_cdb_s *)cmnd->cmnd;
-
-       if ((cdb->scsi_cdb[0] == REPORT_LUNS) &&
-           (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
-               ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
-               return BFA_IOIM_LM_PRESENT;
-       }
-
-       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
-
-               if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
-                       continue;
-
-               if ((scsilun_to_int((struct scsi_lun *)&lun_list[i].lun) ==
-                   scsilun_to_int((struct scsi_lun *)&lun))
-                   && (rp->rport_tag == lun_list[i].rp_tag)
-                   && ((u8)ioim->itnim->rport->rport_info.lp_tag ==
-                                               lun_list[i].lp_tag)) {
-                       bfa_trc(ioim->bfa, lun_list[i].rp_tag);
-                       bfa_trc(ioim->bfa, lun_list[i].lp_tag);
-                       bfa_trc(ioim->bfa, scsilun_to_int(
-                               (struct scsi_lun *)&lun_list[i].lun));
-
-                       if ((lun_list[i].ua == BFA_IOIM_LM_UA_SET) &&
-                           ((cdb->scsi_cdb[0] != INQUIRY) ||
-                           (cdb->scsi_cdb[0] != REPORT_LUNS))) {
-                               lun_list[i].ua = BFA_IOIM_LM_UA_RESET;
-                               return BFA_IOIM_LM_RPL_DATA_CHANGED;
-                       }
-
-                       if (cdb->scsi_cdb[0] == REPORT_LUNS)
-                               ioim->proc_rsp_data = bfa_ioim_lm_proc_rpl_data;
-
-                       return BFA_IOIM_LM_PRESENT;
-               }
-       }
-
-       if ((cdb->scsi_cdb[0] == INQUIRY) &&
-           (scsilun_to_int((struct scsi_lun *)&lun) == 0)) {
-               ioim->proc_rsp_data = bfa_ioim_lm_proc_inq_data;
-               return BFA_IOIM_LM_PRESENT;
-       }
-
-       if (cdb->scsi_cdb[0] == TEST_UNIT_READY)
-               return BFA_IOIM_LM_LUN_NOT_RDY;
-
-       return BFA_IOIM_LM_LUN_NOT_SUP;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rsp_data_dummy(struct bfa_ioim_s *ioim)
-{
-       return BFA_TRUE;
-}
-
-static void
-bfa_ioim_lm_fetch_lun(struct bfa_ioim_s *ioim, u8 *rl_data, int offset,
-               int buf_lun_cnt)
-{
-       struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
-       struct scsi_lun *lun_data = (struct scsi_lun *)(rl_data + offset);
-       struct scsi_lun lun;
-       int i, j;
-
-       bfa_trc(ioim->bfa, buf_lun_cnt);
-       for (j = 0; j < buf_lun_cnt; j++) {
-               lun = *((struct scsi_lun *)(lun_data + j));
-               for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
-                       if (lun_list[i].state != BFA_IOIM_LUN_MASK_ACTIVE)
-                               continue;
-                       if ((lun_list[i].rp_wwn == bfa_ioim_rp_wwn(ioim)) &&
-                           (lun_list[i].lp_wwn == bfa_ioim_lp_wwn(ioim)) &&
-                           (scsilun_to_int((struct scsi_lun *)&lun_list[i].lun)
-                               == scsilun_to_int((struct scsi_lun *)&lun))) {
-                               lun_list[i].state = BFA_IOIM_LUN_MASK_FETCHED;
-                               break;
-                       }
-               } /* next lun in mask DB */
-       } /* next lun in buf */
-}
-
-static int
-bfa_ioim_lm_update_lun_sg(struct bfa_ioim_s *ioim, u32 *pgdlen,
-               struct scsi_report_luns_data_s *rl)
-{
-       struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
-       struct scatterlist *sg = scsi_sglist(cmnd);
-       struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(ioim->bfa);
-       struct scsi_lun *prev_rl_data = NULL, *base_rl_data;
-       int i, j, sgeid, lun_fetched_cnt = 0, prev_sg_len = 0, base_count;
-       int lun_across_sg_bytes, bytes_from_next_buf;
-       u64     last_lun, temp_last_lun;
-
-       /* fetch luns from the first sg element */
-       bfa_ioim_lm_fetch_lun(ioim, (u8 *)(rl->lun), 0,
-                       (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1);
-
-       /* fetch luns from multiple sg elements */
-       scsi_for_each_sg(cmnd, sg, scsi_sg_count(cmnd), sgeid) {
-               if (sgeid == 0) {
-                       prev_sg_len = sg_dma_len(sg);
-                       prev_rl_data = (struct scsi_lun *)
-                                       phys_to_virt(sg_dma_address(sg));
-                       continue;
-               }
-
-               /* if the buf is having more data */
-               lun_across_sg_bytes = prev_sg_len % sizeof(struct scsi_lun);
-               if (lun_across_sg_bytes) {
-                       bfa_trc(ioim->bfa, lun_across_sg_bytes);
-                       bfa_stats(ioim->itnim, lm_lun_across_sg);
-                       bytes_from_next_buf = sizeof(struct scsi_lun) -
-                                             lun_across_sg_bytes;
-
-                       /* from next buf take higher bytes */
-                       temp_last_lun = *((u64 *)
-                                         phys_to_virt(sg_dma_address(sg)));
-                       last_lun |= temp_last_lun >>
-                                   (lun_across_sg_bytes * BITS_PER_BYTE);
-
-                       /* from prev buf take higher bytes */
-                       temp_last_lun = *((u64 *)(prev_rl_data +
-                                         (prev_sg_len - lun_across_sg_bytes)));
-                       temp_last_lun >>= bytes_from_next_buf * BITS_PER_BYTE;
-                       last_lun = last_lun | (temp_last_lun <<
-                                  (bytes_from_next_buf * BITS_PER_BYTE));
-
-                       bfa_ioim_lm_fetch_lun(ioim, (u8 *)&last_lun, 0, 1);
-               } else
-                       bytes_from_next_buf = 0;
-
-               *pgdlen += sg_dma_len(sg);
-               prev_sg_len = sg_dma_len(sg);
-               prev_rl_data = (struct scsi_lun *)
-                               phys_to_virt(sg_dma_address(sg));
-               bfa_ioim_lm_fetch_lun(ioim, (u8 *)prev_rl_data,
-                               bytes_from_next_buf,
-                               sg_dma_len(sg) / sizeof(struct scsi_lun));
-       }
-
-       /* update the report luns data - based on fetched luns */
-       sg = scsi_sglist(cmnd);
-       base_rl_data = (struct scsi_lun *)rl->lun;
-       base_count = (sg_dma_len(sg) / sizeof(struct scsi_lun)) - 1;
-       for (i = 0, j = 0; i < MAX_LUN_MASK_CFG; i++) {
-               if (lun_list[i].state == BFA_IOIM_LUN_MASK_FETCHED) {
-                       base_rl_data[j] = lun_list[i].lun;
-                       lun_list[i].state = BFA_IOIM_LUN_MASK_ACTIVE;
-                       j++;
-                       lun_fetched_cnt++;
-               }
-
-               if (j > base_count) {
-                       j = 0;
-                       sg = sg_next(sg);
-                       base_rl_data = (struct scsi_lun *)
-                                       phys_to_virt(sg_dma_address(sg));
-                       base_count = sg_dma_len(sg) / sizeof(struct scsi_lun);
-               }
-       }
-
-       bfa_trc(ioim->bfa, lun_fetched_cnt);
-       return lun_fetched_cnt;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_inq_data(struct bfa_ioim_s *ioim)
-{
-       struct scsi_inquiry_data_s *inq;
-       struct scatterlist *sg = scsi_sglist((struct scsi_cmnd *)ioim->dio);
-
-       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
-       inq = (struct scsi_inquiry_data_s *)phys_to_virt(sg_dma_address(sg));
-
-       bfa_trc(ioim->bfa, inq->device_type);
-       inq->peripheral_qual = SCSI_INQ_PQ_NOT_CON;
-       return 0;
-}
-
-static bfa_boolean_t
-bfa_ioim_lm_proc_rpl_data(struct bfa_ioim_s *ioim)
-{
-       struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
-       struct scatterlist *sg = scsi_sglist(cmnd);
-       struct bfi_ioim_rsp_s *m;
-       struct scsi_report_luns_data_s *rl = NULL;
-       int lun_count = 0, lun_fetched_cnt = 0;
-       u32 residue, pgdlen = 0;
-
-       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
-       if (bfa_get_lun_mask_status(ioim->bfa) != BFA_LUNMASK_ENABLED)
-               return BFA_TRUE;
-
-       m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
-       if (m->scsi_status == SCSI_STATUS_CHECK_CONDITION)
-               return BFA_TRUE;
-
-       pgdlen = sg_dma_len(sg);
-       bfa_trc(ioim->bfa, pgdlen);
-       rl = (struct scsi_report_luns_data_s *)phys_to_virt(sg_dma_address(sg));
-       lun_count = cpu_to_be32(rl->lun_list_length) / sizeof(struct scsi_lun);
-       lun_fetched_cnt = bfa_ioim_lm_update_lun_sg(ioim, &pgdlen, rl);
-
-       if (lun_count == lun_fetched_cnt)
-               return BFA_TRUE;
-
-       bfa_trc(ioim->bfa, lun_count);
-       bfa_trc(ioim->bfa, lun_fetched_cnt);
-       bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
-
-       if (be32_to_cpu(rl->lun_list_length) <= pgdlen)
-               rl->lun_list_length = be32_to_cpu(lun_fetched_cnt) *
-                                     sizeof(struct scsi_lun);
-       else
-               bfa_stats(ioim->itnim, lm_small_buf_addresidue);
-
-       bfa_trc(ioim->bfa, be32_to_cpu(rl->lun_list_length));
-       bfa_trc(ioim->bfa, be32_to_cpu(m->residue));
-
-       residue = be32_to_cpu(m->residue);
-       residue += (lun_count - lun_fetched_cnt) * sizeof(struct scsi_lun);
-       bfa_stats(ioim->itnim, lm_wire_residue_changed);
-       m->residue = be32_to_cpu(residue);
-       bfa_trc(ioim->bfa, ioim->nsges);
-       return BFA_FALSE;
-}
-
 static void
 __bfa_cb_ioim_good_comp(void *cbarg, bfa_boolean_t complete)
 {
@@ -2454,83 +2167,6 @@ __bfa_cb_ioim_comp(void *cbarg, bfa_boolean_t complete)
                          m->scsi_status, sns_len, snsinfo, residue);
 }
 
-static void
-__bfa_cb_ioim_lm_lun_not_sup(void *cbarg, bfa_boolean_t complete)
-{
-       struct bfa_ioim_s *ioim = cbarg;
-       int sns_len = 0xD;
-       u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
-       struct scsi_sense_s *snsinfo;
-
-       if (!complete) {
-               bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-               return;
-       }
-
-       snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
-                                       ioim->fcpim->fcp, ioim->iotag);
-       snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
-       snsinfo->add_sense_length = 0xa;
-       snsinfo->asc = SCSI_ASC_LUN_NOT_SUPPORTED;
-       snsinfo->sense_key = ILLEGAL_REQUEST;
-       bfa_trc(ioim->bfa, residue);
-       bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
-                       SCSI_STATUS_CHECK_CONDITION, sns_len,
-                       (u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_rpl_dc(void *cbarg, bfa_boolean_t complete)
-{
-       struct bfa_ioim_s *ioim = cbarg;
-       int sns_len = 0xD;
-       u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
-       struct scsi_sense_s *snsinfo;
-
-       if (!complete) {
-               bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-               return;
-       }
-
-       snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(ioim->fcpim->fcp,
-                                                      ioim->iotag);
-       snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
-       snsinfo->sense_key = SCSI_MP_IEC_UNIT_ATTN;
-       snsinfo->asc = SCSI_ASC_TOCC;
-       snsinfo->add_sense_length = 0x6;
-       snsinfo->ascq = SCSI_ASCQ_RL_DATA_CHANGED;
-       bfa_trc(ioim->bfa, residue);
-       bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
-                       SCSI_STATUS_CHECK_CONDITION, sns_len,
-                       (u8 *)snsinfo, residue);
-}
-
-static void
-__bfa_cb_ioim_lm_lun_not_rdy(void *cbarg, bfa_boolean_t complete)
-{
-       struct bfa_ioim_s *ioim = cbarg;
-       int sns_len = 0xD;
-       u32 residue = scsi_bufflen((struct scsi_cmnd *)ioim->dio);
-       struct scsi_sense_s *snsinfo;
-
-       if (!complete) {
-               bfa_sm_send_event(ioim, BFA_IOIM_SM_HCB);
-               return;
-       }
-
-       snsinfo = (struct scsi_sense_s *)BFA_SNSINFO_FROM_TAG(
-                                       ioim->fcpim->fcp, ioim->iotag);
-       snsinfo->rsp_code = SCSI_SENSE_CUR_ERR;
-       snsinfo->add_sense_length = 0xa;
-       snsinfo->sense_key = NOT_READY;
-       snsinfo->asc = SCSI_ASC_LUN_NOT_READY;
-       snsinfo->ascq = SCSI_ASCQ_MAN_INTR_REQ;
-       bfa_trc(ioim->bfa, residue);
-       bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_OK,
-                       SCSI_STATUS_CHECK_CONDITION, sns_len,
-                       (u8 *)snsinfo, residue);
-}
-
 void
 bfa_fcpim_lunmask_rp_update(struct bfa_s *bfa, wwn_t lp_wwn, wwn_t rp_wwn,
                        u16 rp_tag, u8 lp_tag)
@@ -2647,7 +2283,8 @@ bfa_fcpim_lunmask_add(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
        if (port) {
                *pwwn = port->port_cfg.pwwn;
                rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
-               rp = rp_fcs->bfa_rport;
+               if (rp_fcs)
+                       rp = rp_fcs->bfa_rport;
        }
 
        lunm_list = bfa_get_lun_mask_list(bfa);
@@ -2715,7 +2352,8 @@ bfa_fcpim_lunmask_delete(struct bfa_s *bfa, u16 vf_id, wwn_t *pwwn,
                if (port) {
                        *pwwn = port->port_cfg.pwwn;
                        rp_fcs = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
-                       rp = rp_fcs->bfa_rport;
+                       if (rp_fcs)
+                               rp = rp_fcs->bfa_rport;
                }
        }
 
@@ -2757,7 +2395,6 @@ __bfa_cb_ioim_failed(void *cbarg, bfa_boolean_t complete)
                return;
        }
 
-       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
        bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_ABORTED,
                          0, 0, NULL, 0);
 }
@@ -2773,7 +2410,6 @@ __bfa_cb_ioim_pathtov(void *cbarg, bfa_boolean_t complete)
                return;
        }
 
-       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
        bfa_cb_ioim_done(ioim->bfa->bfad, ioim->dio, BFI_IOIM_STS_PATHTOV,
                          0, 0, NULL, 0);
 }
@@ -2788,7 +2424,6 @@ __bfa_cb_ioim_abort(void *cbarg, bfa_boolean_t complete)
                return;
        }
 
-       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
        bfa_cb_ioim_abort(ioim->bfa->bfad, ioim->dio);
 }
 
@@ -3132,7 +2767,6 @@ bfa_ioim_attach(struct bfa_fcpim_s *fcpim)
                ioim->bfa     = fcpim->bfa;
                ioim->fcpim   = fcpim;
                ioim->iosp    = iosp;
-               ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
                INIT_LIST_HEAD(&ioim->sgpg_q);
                bfa_reqq_winit(&ioim->iosp->reqq_wait,
                                   bfa_ioim_qresume, ioim);
@@ -3170,7 +2804,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                        evt = BFA_IOIM_SM_DONE;
                else
                        evt = BFA_IOIM_SM_COMP;
-               ioim->proc_rsp_data(ioim);
                break;
 
        case BFI_IOIM_STS_TIMEDOUT:
@@ -3206,7 +2839,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                if (rsp->abort_tag != ioim->abort_tag) {
                        bfa_trc(ioim->bfa, rsp->abort_tag);
                        bfa_trc(ioim->bfa, ioim->abort_tag);
-                       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
                        return;
                }
 
@@ -3225,7 +2857,6 @@ bfa_ioim_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
                WARN_ON(1);
        }
 
-       ioim->proc_rsp_data = bfa_ioim_lm_proc_rsp_data_dummy;
        bfa_sm_send_event(ioim, evt);
 }
 
@@ -3244,15 +2875,7 @@ bfa_ioim_good_comp_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
 
        bfa_ioim_cb_profile_comp(fcpim, ioim);
 
-       if (bfa_get_lun_mask_status(bfa) != BFA_LUNMASK_ENABLED)  {
-               bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
-               return;
-       }
-
-       if (ioim->proc_rsp_data(ioim) == BFA_TRUE)
-               bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
-       else
-               bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP);
+       bfa_sm_send_event(ioim, BFA_IOIM_SM_COMP_GOOD);
 }
 
 /*
@@ -3364,35 +2987,6 @@ bfa_ioim_free(struct bfa_ioim_s *ioim)
 void
 bfa_ioim_start(struct bfa_ioim_s *ioim)
 {
-       struct scsi_cmnd *cmnd = (struct scsi_cmnd *)ioim->dio;
-       struct bfa_lps_s        *lps;
-       enum bfa_ioim_lm_status status;
-       struct scsi_lun scsilun;
-
-       if (bfa_get_lun_mask_status(ioim->bfa) == BFA_LUNMASK_ENABLED) {
-               lps = BFA_IOIM_TO_LPS(ioim);
-               int_to_scsilun(cmnd->device->lun, &scsilun);
-               status = bfa_ioim_lm_check(ioim, lps,
-                               ioim->itnim->rport, scsilun);
-               if (status == BFA_IOIM_LM_LUN_NOT_RDY) {
-                       bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_RDY);
-                       bfa_stats(ioim->itnim, lm_lun_not_rdy);
-                       return;
-               }
-
-               if (status == BFA_IOIM_LM_LUN_NOT_SUP) {
-                       bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_LUN_NOT_SUP);
-                       bfa_stats(ioim->itnim, lm_lun_not_sup);
-                       return;
-               }
-
-               if (status == BFA_IOIM_LM_RPL_DATA_CHANGED) {
-                       bfa_sm_send_event(ioim, BFA_IOIM_SM_LM_RPL_DC);
-                       bfa_stats(ioim->itnim, lm_rpl_data_changed);
-                       return;
-               }
-       }
-
        bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
 
        /*
index 1080bcb81cb73a2caf409d800da3b78deed8234f..36f26da80f76c1c496ec20bc1e42ae93e878df11 100644 (file)
@@ -110,7 +110,6 @@ struct bfad_ioim_s;
 struct bfad_tskim_s;
 
 typedef void    (*bfa_fcpim_profile_t) (struct bfa_ioim_s *ioim);
-typedef bfa_boolean_t (*bfa_ioim_lm_proc_rsp_data_t) (struct bfa_ioim_s *ioim);
 
 struct bfa_fcpim_s {
        struct bfa_s            *bfa;
@@ -124,7 +123,6 @@ struct bfa_fcpim_s {
        u32                     path_tov;
        u16                     q_depth;
        u8                      reqq;           /*  Request queue to be used */
-       u8                      lun_masking_pending;
        struct list_head        itnim_q;        /*  queue of active itnim */
        struct list_head        ioim_resfree_q; /*  IOs waiting for f/w */
        struct list_head        ioim_comp_q;    /*  IO global comp Q    */
@@ -181,7 +179,6 @@ struct bfa_ioim_s {
        u8                      reqq;           /*  Request queue for I/O */
        u8                      mode;           /*  IO is passthrough or not */
        u64                     start_time;     /*  IO's Profile start val */
-       bfa_ioim_lm_proc_rsp_data_t proc_rsp_data; /* RSP data adjust */
 };
 
 struct bfa_ioim_sp_s {
@@ -261,10 +258,6 @@ struct bfa_itnim_s {
        (__ioim)->iotag |= k << BFA_IOIM_RETRY_TAG_OFFSET;              \
 } while (0)
 
-#define BFA_IOIM_TO_LPS(__ioim)                \
-       BFA_LPS_FROM_TAG(BFA_LPS_MOD(__ioim->bfa),      \
-               __ioim->itnim->rport->rport_info.lp_tag)
-
 static inline bfa_boolean_t
 bfa_ioim_maxretry_reached(struct bfa_ioim_s *ioim)
 {
index 95adb86d3769d477bdbf0b652aa704621eca665b..b52cbb6bcd5a3b6b4c7623753df79630892686ea 100644 (file)
@@ -582,11 +582,6 @@ void bfa_cb_rport_qos_scn_prio(void *rport,
 #define BFA_LP_TAG_INVALID     0xff
 void   bfa_rport_set_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
 void   bfa_rport_unset_lunmask(struct bfa_s *bfa, struct bfa_rport_s *rp);
-bfa_boolean_t  bfa_rport_lunmask_active(struct bfa_rport_s *rp);
-wwn_t  bfa_rport_get_pwwn(struct bfa_s *bfa, struct bfa_rport_s *rp);
-struct bfa_rport_s *bfa_rport_get_by_wwn(struct bfa_s *bfa, u16 vf_id,
-                                        wwn_t *lpwwn, wwn_t rpwwn);
-void *bfa_cb_get_rp_by_wwn(void *arg, u16 vf_id, wwn_t *lpwwn, wwn_t rpwwn);
 
 /*
  * bfa fcxp API functions
index 66fb72531b34caab0323797761d68ac2ab0e6fbc..404fd10ddb21cd6b89821694807e402d30e3bf21 100644 (file)
@@ -674,6 +674,7 @@ bfad_vport_create(struct bfad_s *bfad, u16 vf_id,
 
        spin_lock_irqsave(&bfad->bfad_lock, flags);
        bfa_fcs_vport_start(&vport->fcs_vport);
+       list_add_tail(&vport->list_entry, &bfad->vport_list);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
        return BFA_STATUS_OK;
@@ -1404,6 +1405,7 @@ bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
        bfad->ref_count = 0;
        bfad->pport.bfad = bfad;
        INIT_LIST_HEAD(&bfad->pbc_vport_list);
+       INIT_LIST_HEAD(&bfad->vport_list);
 
        /* Setup the debugfs node for this bfad */
        if (bfa_debugfs_enable)
index 9d95844ab463ededc29b22e417d413e912b7cc48..1938fe0473e99b9aa24a5ee6e50a4e6fe9e9ac4d 100644 (file)
@@ -491,7 +491,7 @@ bfad_im_vport_delete(struct fc_vport *fc_vport)
 
 free_scsi_host:
        bfad_scsi_host_free(bfad, im_port);
-
+       list_del(&vport->list_entry);
        kfree(vport);
 
        return 0;
index 06fc00caeb41f725750a1ddafd44b5731e7eaa4a..530de2b1200a20c58b0e88a49c299ed97c2d1126 100644 (file)
@@ -2394,6 +2394,21 @@ out:
        return 0;
 }
 
+/* Function to reset the LUN SCAN mode */
+static void
+bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
+{
+       struct bfad_im_port_s *pport_im = bfad->pport.im_port;
+       struct bfad_vport_s *vport = NULL;
+
+       /* Set the scsi device LUN SCAN flags for base port */
+       bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
+
+       /* Set the scsi device LUN SCAN flags for the vports */
+       list_for_each_entry(vport, &bfad->vport_list, list_entry)
+               bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
+}
+
 int
 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
 {
@@ -2401,11 +2416,17 @@ bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
        unsigned long   flags;
 
        spin_lock_irqsave(&bfad->bfad_lock, flags);
-       if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE)
+       if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
                iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
-       else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE)
+               /* Set the LUN Scanning mode to be Sequential scan */
+               if (iocmd->status == BFA_STATUS_OK)
+                       bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
+       } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
                iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
-       else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
+               /* Set the LUN Scanning mode to default REPORT_LUNS scan */
+               if (iocmd->status == BFA_STATUS_OK)
+                       bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
+       } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
                iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
        spin_unlock_irqrestore(&bfad->bfad_lock, flags);
        return 0;
index 5e19a5f820ec6d515773ec942ff4ba1d9184608e..dc5b9d99c4505f1356f7e0bed16d05bd992285ef 100644 (file)
@@ -43,6 +43,7 @@
 #include <scsi/scsi_transport_fc.h>
 #include <scsi/scsi_transport.h>
 #include <scsi/scsi_bsg_fc.h>
+#include <scsi/scsi_devinfo.h>
 
 #include "bfa_modules.h"
 #include "bfa_fcs.h"
@@ -227,6 +228,7 @@ struct bfad_s {
        struct list_head        active_aen_q;
        struct bfa_aen_entry_s  aen_list[BFA_AEN_MAX_ENTRY];
        spinlock_t              bfad_aen_spinlock;
+       struct list_head        vport_list;
 };
 
 /* BFAD state machine events */
index e5db649e8eb757dbe79241645c024734d76dd21a..3153923f5b6027f1c16d806e14092e1df5356218 100644 (file)
@@ -917,6 +917,37 @@ bfad_get_itnim(struct bfad_im_port_s *im_port, int id)
        return NULL;
 }
 
+/*
+ * Function is invoked from the SCSI Host Template slave_alloc() entry point.
+ * Has the logic to query the LUN Mask database to check if this LUN needs to
+ * be made visible to the SCSI mid-layer or not.
+ *
+ * Returns BFA_STATUS_OK if this LUN needs to be added to the OS stack.
+ * Returns -ENXIO to notify SCSI mid-layer to not add this LUN to the OS stack.
+ */
+static int
+bfad_im_check_if_make_lun_visible(struct scsi_device *sdev,
+                                 struct fc_rport *rport)
+{
+       struct bfad_itnim_data_s *itnim_data =
+                               (struct bfad_itnim_data_s *) rport->dd_data;
+       struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
+       struct bfa_rport_s *bfa_rport = itnim_data->itnim->bfa_itnim->rport;
+       struct bfa_lun_mask_s *lun_list = bfa_get_lun_mask_list(bfa);
+       int i = 0, ret = -ENXIO;
+
+       for (i = 0; i < MAX_LUN_MASK_CFG; i++) {
+               if (lun_list[i].state == BFA_IOIM_LUN_MASK_ACTIVE &&
+                   scsilun_to_int(&lun_list[i].lun) == sdev->lun &&
+                   lun_list[i].rp_tag == bfa_rport->rport_tag &&
+                   lun_list[i].lp_tag == (u8)bfa_rport->rport_info.lp_tag) {
+                       ret = BFA_STATUS_OK;
+                       break;
+               }
+       }
+       return ret;
+}
+
 /*
  * Scsi_Host template entry slave_alloc
  */
@@ -924,10 +955,33 @@ static int
 bfad_im_slave_alloc(struct scsi_device *sdev)
 {
        struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+       struct bfad_itnim_data_s *itnim_data =
+                               (struct bfad_itnim_data_s *) rport->dd_data;
+       struct bfa_s *bfa = itnim_data->itnim->bfa_itnim->bfa;
 
        if (!rport || fc_remote_port_chkready(rport))
                return -ENXIO;
 
+       if (bfa_get_lun_mask_status(bfa) == BFA_LUNMASK_ENABLED) {
+               /*
+                * We should not mask LUN 0 - since this will translate
+                * to no LUN / TARGET for SCSI ml resulting no scan.
+                */
+               if (sdev->lun == 0) {
+                       sdev->sdev_bflags |= BLIST_NOREPORTLUN |
+                                            BLIST_SPARSELUN;
+                       goto done;
+               }
+
+               /*
+                * Query LUN Mask configuration - to expose this LUN
+                * to the SCSI mid-layer or to mask it.
+                */
+               if (bfad_im_check_if_make_lun_visible(sdev, rport) !=
+                                                       BFA_STATUS_OK)
+                       return -ENXIO;
+       }
+done:
        sdev->hostdata = rport->dd_data;
 
        return 0;
@@ -1037,6 +1091,8 @@ bfad_im_fc_rport_add(struct bfad_im_port_s *im_port, struct bfad_itnim_s *itnim)
            && (fc_rport->scsi_target_id < MAX_FCP_TARGET))
                itnim->scsi_tgt_id = fc_rport->scsi_target_id;
 
+       itnim->channel = fc_rport->channel;
+
        return;
 }
 
index 004b6cf848d943288934452237c1cfa2ef8b8cfd..0814367ef101a1c075c0cfd4f5a52bc34dea920d 100644 (file)
@@ -91,6 +91,7 @@ struct bfad_itnim_s {
        struct fc_rport *fc_rport;
        struct bfa_itnim_s *bfa_itnim;
        u16        scsi_tgt_id;
+       u16        channel;
        u16        queue_work;
        unsigned long   last_ramp_up_time;
        unsigned long   last_queue_full_time;
@@ -166,4 +167,30 @@ irqreturn_t bfad_intx(int irq, void *dev_id);
 int bfad_im_bsg_request(struct fc_bsg_job *job);
 int bfad_im_bsg_timeout(struct fc_bsg_job *job);
 
+/*
+ * Macro to set the SCSI device sdev_bflags - sdev_bflags are used by the
+ * SCSI mid-layer to choose LUN Scanning mode REPORT_LUNS vs. Sequential Scan
+ *
+ * Internally iterate's over all the ITNIM's part of the im_port & set's the
+ * sdev_bflags for the scsi_device associated with LUN #0.
+ */
+#define bfad_reset_sdev_bflags(__im_port, __lunmask_cfg) do {          \
+       struct scsi_device *__sdev = NULL;                              \
+       struct bfad_itnim_s *__itnim = NULL;                            \
+       u32 scan_flags = BLIST_NOREPORTLUN | BLIST_SPARSELUN;           \
+       list_for_each_entry(__itnim, &((__im_port)->itnim_mapped_list), \
+                           list_entry) {                               \
+               __sdev = scsi_device_lookup((__im_port)->shost,         \
+                                           __itnim->channel,           \
+                                           __itnim->scsi_tgt_id, 0);   \
+               if (__sdev) {                                           \
+                       if ((__lunmask_cfg) == BFA_TRUE)                \
+                               __sdev->sdev_bflags |= scan_flags;      \
+                       else                                            \
+                               __sdev->sdev_bflags &= ~scan_flags;     \
+                       scsi_device_put(__sdev);                        \
+               }                                                       \
+       }                                                               \
+} while (0)
+
 #endif
index c5360ffb4bed35ae9bd06fa1a0195f2517b82778..d3ff9cd40234f5d5ea478020ecbf0a04bac1503e 100644 (file)
@@ -1868,8 +1868,9 @@ int cxgbi_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
 
        tdata->skb = alloc_skb(cdev->skb_tx_rsvd + headroom, GFP_ATOMIC);
        if (!tdata->skb) {
-               pr_warn("alloc skb %u+%u, opcode 0x%x failed.\n",
-                       cdev->skb_tx_rsvd, headroom, opcode);
+               struct cxgbi_sock *csk = cconn->cep->csk;
+               struct net_device *ndev = cdev->ports[csk->port_id];
+               ndev->stats.tx_dropped++;
                return -ENOMEM;
        }
 
index 4ef021291a4d06d2ecd4340f5ba1c35658a5c9d1..04c5cea47a2258a2156f692a6441253db31a8506 100644 (file)
@@ -466,6 +466,11 @@ static int alua_check_sense(struct scsi_device *sdev,
                         * Power On, Reset, or Bus Device Reset, just retry.
                         */
                        return ADD_TO_MLQUEUE;
+               if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01)
+                       /*
+                        * Mode Parameters Changed
+                        */
+                       return ADD_TO_MLQUEUE;
                if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06)
                        /*
                         * ALUA state changed
index 841ebf4a6788fc3895d5ddd31b8a67f7ce49ab55..53a31c753cb1e682ead92268901bc966845a1996 100644 (file)
@@ -953,6 +953,8 @@ static int __init rdac_init(void)
        if (!kmpath_rdacd) {
                scsi_unregister_device_handler(&rdac_dh);
                printk(KERN_ERR "kmpath_rdacd creation failed.\n");
+
+               r = -EINVAL;
        }
 done:
        return r;
index 8d67467dd9cec100f52b51803fbe943192421a58..e9599600aa230b8b6315c405730352f99e6a7d9a 100644 (file)
@@ -58,7 +58,11 @@ module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for "     \
                 "Direct Data Placement (DDP).");
 
-DEFINE_MUTEX(fcoe_config_mutex);
+unsigned int fcoe_debug_logging;
+module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+
+static DEFINE_MUTEX(fcoe_config_mutex);
 
 static struct workqueue_struct *fcoe_wq;
 
@@ -67,8 +71,8 @@ static DECLARE_COMPLETION(fcoe_flush_completion);
 
 /* fcoe host list */
 /* must only by accessed under the RTNL mutex */
-LIST_HEAD(fcoe_hostlist);
-DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
+static LIST_HEAD(fcoe_hostlist);
+static DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
 
 /* Function Prototypes */
 static int fcoe_reset(struct Scsi_Host *);
@@ -157,7 +161,7 @@ static struct libfc_function_template fcoe_libfc_fcn_templ = {
        .lport_set_port_id = fcoe_set_port_id,
 };
 
-struct fc_function_template fcoe_nport_fc_functions = {
+static struct fc_function_template fcoe_nport_fc_functions = {
        .show_host_node_name = 1,
        .show_host_port_name = 1,
        .show_host_supported_classes = 1,
@@ -197,7 +201,7 @@ struct fc_function_template fcoe_nport_fc_functions = {
        .bsg_request = fc_lport_bsg_request,
 };
 
-struct fc_function_template fcoe_vport_fc_functions = {
+static struct fc_function_template fcoe_vport_fc_functions = {
        .show_host_node_name = 1,
        .show_host_port_name = 1,
        .show_host_supported_classes = 1,
@@ -433,7 +437,7 @@ static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
  *
  * Caller must be holding the RTNL mutex
  */
-void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
 {
        struct net_device *netdev = fcoe->netdev;
        struct fcoe_ctlr *fip = &fcoe->ctlr;
@@ -748,7 +752,7 @@ static int fcoe_shost_config(struct fc_lport *lport, struct device *dev)
  *
  * Returns: True for read types I/O, otherwise returns false.
  */
-bool fcoe_oem_match(struct fc_frame *fp)
+static bool fcoe_oem_match(struct fc_frame *fp)
 {
        struct fc_frame_header *fh = fc_frame_header_get(fp);
        struct fcp_cmnd *fcp;
@@ -756,11 +760,12 @@ bool fcoe_oem_match(struct fc_frame *fp)
        if (fc_fcp_is_read(fr_fsp(fp)) &&
            (fr_fsp(fp)->data_len > fcoe_ddp_min))
                return true;
-       else if (!(ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)) {
+       else if ((fr_fsp(fp) == NULL) &&
+                (fh->fh_r_ctl == FC_RCTL_DD_UNSOL_CMD) &&
+                (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN)) {
                fcp = fc_frame_payload_get(fp, sizeof(*fcp));
-               if (ntohs(fh->fh_rx_id) == FC_XID_UNKNOWN &&
-                   fcp && (ntohl(fcp->fc_dl) > fcoe_ddp_min) &&
-                   (fcp->fc_flags & FCP_CFL_WRDATA))
+               if ((fcp->fc_flags & FCP_CFL_WRDATA) &&
+                   (ntohl(fcp->fc_dl) > fcoe_ddp_min))
                        return true;
        }
        return false;
@@ -1106,7 +1111,7 @@ static int __init fcoe_if_init(void)
  *
  * Returns: 0 on success
  */
-int __exit fcoe_if_exit(void)
+static int __exit fcoe_if_exit(void)
 {
        fc_release_transport(fcoe_nport_scsi_transport);
        fc_release_transport(fcoe_vport_scsi_transport);
@@ -1295,7 +1300,7 @@ static inline unsigned int fcoe_select_cpu(void)
  *
  * Returns: 0 for success
  */
-int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
+static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
             struct packet_type *ptype, struct net_device *olddev)
 {
        struct fc_lport *lport;
@@ -1451,7 +1456,7 @@ static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
  *
  * Return: 0 for success
  */
-int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
+static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
 {
        int wlen;
        u32 crc;
@@ -1671,8 +1676,7 @@ static void fcoe_recv_frame(struct sk_buff *skb)
                        skb->dev ? skb->dev->name : "<NULL>");
 
        port = lport_priv(lport);
-       if (skb_is_nonlinear(skb))
-               skb_linearize(skb);     /* not ideal */
+       skb_linearize(skb); /* check for skb_is_nonlinear is within skb_linearize */
 
        /*
         * Frame length checks and setting up the header pointers
@@ -1728,7 +1732,7 @@ drop:
  *
  * Return: 0 for success
  */
-int fcoe_percpu_receive_thread(void *arg)
+static int fcoe_percpu_receive_thread(void *arg)
 {
        struct fcoe_percpu_s *p = arg;
        struct sk_buff *skb;
@@ -2146,7 +2150,7 @@ out_nortnl:
  * Returns: 0 if the ethtool query was successful
  *          -1 if the ethtool query failed
  */
-int fcoe_link_speed_update(struct fc_lport *lport)
+static int fcoe_link_speed_update(struct fc_lport *lport)
 {
        struct net_device *netdev = fcoe_netdev(lport);
        struct ethtool_cmd ecmd;
@@ -2180,7 +2184,7 @@ int fcoe_link_speed_update(struct fc_lport *lport)
  * Returns: 0 if link is UP and OK, -1 if not
  *
  */
-int fcoe_link_ok(struct fc_lport *lport)
+static int fcoe_link_ok(struct fc_lport *lport)
 {
        struct net_device *netdev = fcoe_netdev(lport);
 
@@ -2200,7 +2204,7 @@ int fcoe_link_ok(struct fc_lport *lport)
  * there no packets that will be handled by the lport, but also that any
  * threads already handling packet have returned.
  */
-void fcoe_percpu_clean(struct fc_lport *lport)
+static void fcoe_percpu_clean(struct fc_lport *lport)
 {
        struct fcoe_percpu_s *pp;
        struct fcoe_rcv_info *fr;
@@ -2251,7 +2255,7 @@ void fcoe_percpu_clean(struct fc_lport *lport)
  *
  * Returns: Always 0 (return value required by FC transport template)
  */
-int fcoe_reset(struct Scsi_Host *shost)
+static int fcoe_reset(struct Scsi_Host *shost)
 {
        struct fc_lport *lport = shost_priv(shost);
        struct fcoe_port *port = lport_priv(lport);
index 6c6884bcf84004e7f792ccccc71bf9728cc24095..bcc89e63949573173e20fac339765d72633b96de 100644 (file)
@@ -40,9 +40,7 @@
 #define FCOE_MIN_XID           0x0000  /* the min xid supported by fcoe_sw */
 #define FCOE_MAX_XID           0x0FFF  /* the max xid supported by fcoe_sw */
 
-unsigned int fcoe_debug_logging;
-module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
-MODULE_PARM_DESC(debug_logging, "a bit mask of logging levels");
+extern unsigned int fcoe_debug_logging;
 
 #define FCOE_LOGGING       0x01 /* General logging, not categorized */
 #define FCOE_NETDEV_LOGGING 0x02 /* Netdevice logging */
index 5140f5d0fd6be610f5038368fdec6627be4e9cea..b96962c394492604cd2fc2a73ce80731db28882d 100644 (file)
@@ -4271,7 +4271,9 @@ static void stop_controller_lockup_detector(struct ctlr_info *h)
        remove_ctlr_from_lockup_detector_list(h);
        /* If the list of ctlr's to monitor is empty, stop the thread */
        if (list_empty(&hpsa_ctlr_list)) {
+               spin_unlock_irqrestore(&lockup_detector_lock, flags);
                kthread_stop(hpsa_lockup_detector);
+               spin_lock_irqsave(&lockup_detector_lock, flags);
                hpsa_lockup_detector = NULL;
        }
        spin_unlock_irqrestore(&lockup_detector_lock, flags);
diff --git a/drivers/scsi/isci/firmware/Makefile b/drivers/scsi/isci/firmware/Makefile
deleted file mode 100644 (file)
index 5f54461..0000000
+++ /dev/null
@@ -1,19 +0,0 @@
-# Makefile for create_fw
-#
-CC=gcc
-CFLAGS=-c -Wall -O2 -g
-LDFLAGS=
-SOURCES=create_fw.c
-OBJECTS=$(SOURCES:.cpp=.o)
-EXECUTABLE=create_fw
-
-all: $(SOURCES) $(EXECUTABLE)
-
-$(EXECUTABLE): $(OBJECTS)
-       $(CC) $(LDFLAGS) $(OBJECTS) -o $@
-
-.c.o:
-       $(CC) $(CFLAGS) $< -O $@
-
-clean:
-       rm -f *.o $(EXECUTABLE)
diff --git a/drivers/scsi/isci/firmware/README b/drivers/scsi/isci/firmware/README
deleted file mode 100644 (file)
index 8056d2b..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-This defines the temporary binary blow we are to pass to the SCU
-driver to emulate the binary firmware that we will eventually be
-able to access via NVRAM on the SCU controller.
-
-The current size of the binary blob is expected to be 149 bytes or larger
-
-Header Types:
-0x1: Phy Masks
-0x2: Phy Gens
-0x3: SAS Addrs
-0xff: End of Data
-
-ID string - u8[12]: "#SCU MAGIC#\0"
-Version - u8: 1
-SubVersion - u8: 0
-
-Header Type - u8: 0x1
-Size - u8: 8
-Phy Mask - u32[8]
-
-Header Type - u8: 0x2
-Size - u8: 8
-Phy Gen - u32[8]
-
-Header Type - u8: 0x3
-Size - u8: 8
-Sas Addr - u64[8]
-
-Header Type - u8: 0xf
-
-
-==============================================================================
-
-Place isci_firmware.bin in /lib/firmware
-Be sure to recreate the initramfs image to include the firmware.
-
diff --git a/drivers/scsi/isci/firmware/create_fw.c b/drivers/scsi/isci/firmware/create_fw.c
deleted file mode 100644 (file)
index c7a2887..0000000
+++ /dev/null
@@ -1,99 +0,0 @@
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <string.h>
-#include <errno.h>
-#include <asm/types.h>
-#include <strings.h>
-#include <stdint.h>
-
-#include "create_fw.h"
-#include "../probe_roms.h"
-
-int write_blob(struct isci_orom *isci_orom)
-{
-       FILE *fd;
-       int err;
-       size_t count;
-
-       fd = fopen(blob_name, "w+");
-       if (!fd) {
-               perror("Open file for write failed");
-               fclose(fd);
-               return -EIO;
-       }
-
-       count = fwrite(isci_orom, sizeof(struct isci_orom), 1, fd);
-       if (count != 1) {
-               perror("Write data failed");
-               fclose(fd);
-               return -EIO;
-       }
-
-       fclose(fd);
-
-       return 0;
-}
-
-void set_binary_values(struct isci_orom *isci_orom)
-{
-       int ctrl_idx, phy_idx, port_idx;
-
-       /* setting OROM signature */
-       strncpy(isci_orom->hdr.signature, sig, strlen(sig));
-       isci_orom->hdr.version = version;
-       isci_orom->hdr.total_block_length = sizeof(struct isci_orom);
-       isci_orom->hdr.hdr_length = sizeof(struct sci_bios_oem_param_block_hdr);
-       isci_orom->hdr.num_elements = num_elements;
-
-       for (ctrl_idx = 0; ctrl_idx < 2; ctrl_idx++) {
-               isci_orom->ctrl[ctrl_idx].controller.mode_type = mode_type;
-               isci_orom->ctrl[ctrl_idx].controller.max_concurrent_dev_spin_up =
-                       max_num_concurrent_dev_spin_up;
-               isci_orom->ctrl[ctrl_idx].controller.do_enable_ssc =
-                       enable_ssc;
-
-               for (port_idx = 0; port_idx < 4; port_idx++)
-                       isci_orom->ctrl[ctrl_idx].ports[port_idx].phy_mask =
-                               phy_mask[ctrl_idx][port_idx];
-
-               for (phy_idx = 0; phy_idx < 4; phy_idx++) {
-                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.high =
-                               (__u32)(sas_addr[ctrl_idx][phy_idx] >> 32);
-                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].sas_address.low =
-                               (__u32)(sas_addr[ctrl_idx][phy_idx]);
-
-                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control0 =
-                               afe_tx_amp_control0;
-                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control1 =
-                               afe_tx_amp_control1;
-                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control2 =
-                               afe_tx_amp_control2;
-                       isci_orom->ctrl[ctrl_idx].phys[phy_idx].afe_tx_amp_control3 =
-                               afe_tx_amp_control3;
-               }
-       }
-}
-
-int main(void)
-{
-       int err;
-       struct isci_orom *isci_orom;
-
-       isci_orom = malloc(sizeof(struct isci_orom));
-       memset(isci_orom, 0, sizeof(struct isci_orom));
-
-       set_binary_values(isci_orom);
-
-       err = write_blob(isci_orom);
-       if (err < 0) {
-               free(isci_orom);
-               return err;
-       }
-
-       free(isci_orom);
-       return 0;
-}
diff --git a/drivers/scsi/isci/firmware/create_fw.h b/drivers/scsi/isci/firmware/create_fw.h
deleted file mode 100644 (file)
index 5f29882..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-#ifndef _CREATE_FW_H_
-#define _CREATE_FW_H_
-#include "../probe_roms.h"
-
-
-/* we are configuring for 2 SCUs */
-static const int num_elements = 2;
-
-/*
- * For all defined arrays:
- * elements 0-3 are for SCU0, ports 0-3
- * elements 4-7 are for SCU1, ports 0-3
- *
- * valid configurations for one SCU are:
- *  P0  P1  P2  P3
- * ----------------
- * 0xF,0x0,0x0,0x0 # 1 x4 port
- * 0x3,0x0,0x4,0x8 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are each x1
- *                 # ports
- * 0x1,0x2,0xC,0x0 # Phys 0 and 1 are each x1 ports, phy 2 and phy 3 are a x2
- *                 # port
- * 0x3,0x0,0xC,0x0 # Phys 0 and 1 are a x2 port, phy 2 and phy 3 are a x2 port
- * 0x1,0x2,0x4,0x8 # Each phy is a x1 port (this is the default configuration)
- *
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value assigned to UNINIT_PARAM (255).
- */
-
-/* discovery mode type (port auto config mode by default ) */
-
-/*
- * if there is a port/phy on which you do not wish to override the default
- * values, use the value "0000000000000000". SAS address of zero's is
- * considered invalid and will not be used.
- */
-#ifdef MPC
-static const int mode_type = SCIC_PORT_MANUAL_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4] = { {1, 2, 4, 8},
-                                    {1, 2, 4, 8} };
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFFF0000001ULL,
-                                                    0x5FCFFFFFF0000002ULL,
-                                                    0x5FCFFFFFF0000003ULL,
-                                                    0x5FCFFFFFF0000004ULL },
-                                                  { 0x5FCFFFFFF0000005ULL,
-                                                    0x5FCFFFFFF0000006ULL,
-                                                    0x5FCFFFFFF0000007ULL,
-                                                    0x5FCFFFFFF0000008ULL } };
-#else  /* APC (default) */
-static const int mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE;
-static const __u8 phy_mask[2][4];
-static const unsigned long long sas_addr[2][4] = { { 0x5FCFFFFF00000001ULL,
-                                                    0x5FCFFFFF00000001ULL,
-                                                    0x5FCFFFFF00000001ULL,
-                                                    0x5FCFFFFF00000001ULL },
-                                                  { 0x5FCFFFFF00000002ULL,
-                                                    0x5FCFFFFF00000002ULL,
-                                                    0x5FCFFFFF00000002ULL,
-                                                    0x5FCFFFFF00000002ULL } };
-#endif
-
-/* Maximum number of concurrent device spin up */
-static const int max_num_concurrent_dev_spin_up = 1;
-
-/* enable of ssc operation */
-static const int enable_ssc;
-
-/* AFE_TX_AMP_CONTROL */
-static const unsigned int afe_tx_amp_control0 = 0x000bdd08;
-static const unsigned int afe_tx_amp_control1 = 0x000ffc00;
-static const unsigned int afe_tx_amp_control2 = 0x000b7c09;
-static const unsigned int afe_tx_amp_control3 = 0x000afc6e;
-
-static const char blob_name[] = "isci_firmware.bin";
-static const char sig[] = "ISCUOEMB";
-static const unsigned char version = 0x10;
-
-#endif
index e7fe9c4c85b84d7098850eb16288caaae8b20578..1a65d6514237dd59ac24c8deb89f1afce27a4e3d 100644 (file)
@@ -899,7 +899,8 @@ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost)
                         */
                        if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) ||
                            (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) ||
-                           (iphy->is_in_link_training == true && is_phy_starting(iphy))) {
+                           (iphy->is_in_link_training == true && is_phy_starting(iphy)) ||
+                           (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) {
                                is_controller_start_complete = false;
                                break;
                        }
@@ -1666,6 +1667,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
        /* Default to no SSC operation. */
        ihost->oem_parameters.controller.do_enable_ssc = false;
 
+       /* Default to short cables on all phys. */
+       ihost->oem_parameters.controller.cable_selection_mask = 0;
+
        /* Initialize all of the port parameter information to narrow ports. */
        for (index = 0; index < SCI_MAX_PORTS; index++) {
                ihost->oem_parameters.ports[index].phy_mask = 0;
@@ -1673,8 +1677,9 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
 
        /* Initialize all of the phy parameter information. */
        for (index = 0; index < SCI_MAX_PHYS; index++) {
-               /* Default to 6G (i.e. Gen 3) for now. */
-               ihost->user_parameters.phys[index].max_speed_generation = 3;
+               /* Default to 3G (i.e. Gen 2). */
+               ihost->user_parameters.phys[index].max_speed_generation =
+                       SCIC_SDS_PARM_GEN2_SPEED;
 
                /* the frequencies cannot be 0 */
                ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f;
@@ -1694,7 +1699,7 @@ static void sci_controller_set_default_config_parameters(struct isci_host *ihost
        ihost->user_parameters.ssp_inactivity_timeout = 5;
        ihost->user_parameters.stp_max_occupancy_timeout = 5;
        ihost->user_parameters.ssp_max_occupancy_timeout = 20;
-       ihost->user_parameters.no_outbound_task_timeout = 20;
+       ihost->user_parameters.no_outbound_task_timeout = 2;
 }
 
 static void controller_timeout(unsigned long data)
@@ -1759,7 +1764,7 @@ static enum sci_status sci_controller_construct(struct isci_host *ihost,
        return sci_controller_reset(ihost);
 }
 
-int sci_oem_parameters_validate(struct sci_oem_params *oem)
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version)
 {
        int i;
 
@@ -1791,18 +1796,61 @@ int sci_oem_parameters_validate(struct sci_oem_params *oem)
            oem->controller.max_concurr_spin_up < 1)
                return -EINVAL;
 
+       if (oem->controller.do_enable_ssc) {
+               if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1)
+                       return -EINVAL;
+
+               if (version >= ISCI_ROM_VER_1_1) {
+                       u8 test = oem->controller.ssc_sata_tx_spread_level;
+
+                       switch (test) {
+                       case 0:
+                       case 2:
+                       case 3:
+                       case 6:
+                       case 7:
+                               break;
+                       default:
+                               return -EINVAL;
+                       }
+
+                       test = oem->controller.ssc_sas_tx_spread_level;
+                       if (oem->controller.ssc_sas_tx_type == 0) {
+                               switch (test) {
+                               case 0:
+                               case 2:
+                               case 3:
+                                       break;
+                               default:
+                                       return -EINVAL;
+                               }
+                       } else if (oem->controller.ssc_sas_tx_type == 1) {
+                               switch (test) {
+                               case 0:
+                               case 3:
+                               case 6:
+                                       break;
+                               default:
+                                       return -EINVAL;
+                               }
+                       }
+               }
+       }
+
        return 0;
 }
 
 static enum sci_status sci_oem_parameters_set(struct isci_host *ihost)
 {
        u32 state = ihost->sm.current_state_id;
+       struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
 
        if (state == SCIC_RESET ||
            state == SCIC_INITIALIZING ||
            state == SCIC_INITIALIZED) {
 
-               if (sci_oem_parameters_validate(&ihost->oem_parameters))
+               if (sci_oem_parameters_validate(&ihost->oem_parameters,
+                                               pci_info->orom->hdr.version))
                        return SCI_FAILURE_INVALID_PARAMETER_VALUE;
 
                return SCI_SUCCESS;
@@ -1857,6 +1905,31 @@ static void power_control_timeout(unsigned long data)
                ihost->power_control.phys_waiting--;
                ihost->power_control.phys_granted_power++;
                sci_phy_consume_power_handler(iphy);
+
+               if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) {
+                       u8 j;
+
+                       for (j = 0; j < SCI_MAX_PHYS; j++) {
+                               struct isci_phy *requester = ihost->power_control.requesters[j];
+
+                               /*
+                                * Search the power_control queue to see if there are other phys
+                                * attached to the same remote device. If found, take all of
+                                * them out of await_sas_power state.
+                                */
+                               if (requester != NULL && requester != iphy) {
+                                       u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr,
+                                                         iphy->frame_rcvd.iaf.sas_addr,
+                                                         sizeof(requester->frame_rcvd.iaf.sas_addr));
+
+                                       if (other == 0) {
+                                               ihost->power_control.requesters[j] = NULL;
+                                               ihost->power_control.phys_waiting--;
+                                               sci_phy_consume_power_handler(requester);
+                                       }
+                               }
+                       }
+               }
        }
 
        /*
@@ -1891,9 +1964,34 @@ void sci_controller_power_control_queue_insert(struct isci_host *ihost,
                ihost->power_control.timer_started = true;
 
        } else {
-               /* Add the phy in the waiting list */
-               ihost->power_control.requesters[iphy->phy_index] = iphy;
-               ihost->power_control.phys_waiting++;
+               /*
+                * There are phys, attached to the same sas address as this phy, are
+                * already in READY state, this phy don't need wait.
+                */
+               u8 i;
+               struct isci_phy *current_phy;
+
+               for (i = 0; i < SCI_MAX_PHYS; i++) {
+                       u8 other;
+                       current_phy = &ihost->phys[i];
+
+                       other = memcmp(current_phy->frame_rcvd.iaf.sas_addr,
+                                      iphy->frame_rcvd.iaf.sas_addr,
+                                      sizeof(current_phy->frame_rcvd.iaf.sas_addr));
+
+                       if (current_phy->sm.current_state_id == SCI_PHY_READY &&
+                           current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS &&
+                           other == 0) {
+                               sci_phy_consume_power_handler(iphy);
+                               break;
+                       }
+               }
+
+               if (i == SCI_MAX_PHYS) {
+                       /* Add the phy in the waiting list */
+                       ihost->power_control.requesters[iphy->phy_index] = iphy;
+                       ihost->power_control.phys_waiting++;
+               }
        }
 }
 
@@ -1908,162 +2006,250 @@ void sci_controller_power_control_queue_remove(struct isci_host *ihost,
        ihost->power_control.requesters[iphy->phy_index] = NULL;
 }
 
+static int is_long_cable(int phy, unsigned char selection_byte)
+{
+       return !!(selection_byte & (1 << phy));
+}
+
+static int is_medium_cable(int phy, unsigned char selection_byte)
+{
+       return !!(selection_byte & (1 << (phy + 4)));
+}
+
+static enum cable_selections decode_selection_byte(
+       int phy,
+       unsigned char selection_byte)
+{
+       return ((selection_byte & (1 << phy)) ? 1 : 0)
+               + (selection_byte & (1 << (phy + 4)) ? 2 : 0);
+}
+
+static unsigned char *to_cable_select(struct isci_host *ihost)
+{
+       if (is_cable_select_overridden())
+               return ((unsigned char *)&cable_selection_override)
+                       + ihost->id;
+       else
+               return &ihost->oem_parameters.controller.cable_selection_mask;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy)
+{
+       return decode_selection_byte(phy, *to_cable_select(ihost));
+}
+
+char *lookup_cable_names(enum cable_selections selection)
+{
+       static char *cable_names[] = {
+               [short_cable]     = "short",
+               [long_cable]      = "long",
+               [medium_cable]    = "medium",
+               [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */
+       };
+       return (selection <= undefined_cable) ? cable_names[selection]
+                                             : cable_names[undefined_cable];
+}
+
 #define AFE_REGISTER_WRITE_DELAY 10
 
-/* Initialize the AFE for this phy index. We need to read the AFE setup from
- * the OEM parameters
- */
 static void sci_controller_afe_initialization(struct isci_host *ihost)
 {
+       struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
        const struct sci_oem_params *oem = &ihost->oem_parameters;
        struct pci_dev *pdev = ihost->pdev;
        u32 afe_status;
        u32 phy_id;
+       unsigned char cable_selection_mask = *to_cable_select(ihost);
 
        /* Clear DFX Status registers */
-       writel(0x0081000f, &ihost->scu_registers->afe.afe_dfx_master_control0);
+       writel(0x0081000f, &afe->afe_dfx_master_control0);
        udelay(AFE_REGISTER_WRITE_DELAY);
 
-       if (is_b0(pdev)) {
+       if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) {
                /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement
-                * Timer, PM Stagger Timer */
-               writel(0x0007BFFF, &ihost->scu_registers->afe.afe_pmsn_master_control2);
+                * Timer, PM Stagger Timer
+                */
+               writel(0x0007FFFF, &afe->afe_pmsn_master_control2);
                udelay(AFE_REGISTER_WRITE_DELAY);
        }
 
        /* Configure bias currents to normal */
        if (is_a2(pdev))
-               writel(0x00005A00, &ihost->scu_registers->afe.afe_bias_control);
+               writel(0x00005A00, &afe->afe_bias_control);
        else if (is_b0(pdev) || is_c0(pdev))
-               writel(0x00005F00, &ihost->scu_registers->afe.afe_bias_control);
+               writel(0x00005F00, &afe->afe_bias_control);
+       else if (is_c1(pdev))
+               writel(0x00005500, &afe->afe_bias_control);
 
        udelay(AFE_REGISTER_WRITE_DELAY);
 
        /* Enable PLL */
-       if (is_b0(pdev) || is_c0(pdev))
-               writel(0x80040A08, &ihost->scu_registers->afe.afe_pll_control0);
-       else
-               writel(0x80040908, &ihost->scu_registers->afe.afe_pll_control0);
+       if (is_a2(pdev))
+               writel(0x80040908, &afe->afe_pll_control0);
+       else if (is_b0(pdev) || is_c0(pdev))
+               writel(0x80040A08, &afe->afe_pll_control0);
+       else if (is_c1(pdev)) {
+               writel(0x80000B08, &afe->afe_pll_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+               writel(0x00000B08, &afe->afe_pll_control0);
+               udelay(AFE_REGISTER_WRITE_DELAY);
+               writel(0x80000B08, &afe->afe_pll_control0);
+       }
 
        udelay(AFE_REGISTER_WRITE_DELAY);
 
        /* Wait for the PLL to lock */
        do {
-               afe_status = readl(&ihost->scu_registers->afe.afe_common_block_status);
+               afe_status = readl(&afe->afe_common_block_status);
                udelay(AFE_REGISTER_WRITE_DELAY);
        } while ((afe_status & 0x00001000) == 0);
 
        if (is_a2(pdev)) {
-               /* Shorten SAS SNW lock time (RxLock timer value from 76 us to 50 us) */
-               writel(0x7bcc96ad, &ihost->scu_registers->afe.afe_pmsn_master_control0);
+               /* Shorten SAS SNW lock time (RxLock timer value from 76
+                * us to 50 us)
+                */
+               writel(0x7bcc96ad, &afe->afe_pmsn_master_control0);
                udelay(AFE_REGISTER_WRITE_DELAY);
        }
 
        for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) {
+               struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id];
                const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id];
+               int cable_length_long =
+                       is_long_cable(phy_id, cable_selection_mask);
+               int cable_length_medium =
+                       is_medium_cable(phy_id, cable_selection_mask);
 
-               if (is_b0(pdev)) {
-                        /* Configure transmitter SSC parameters */
-                       writel(0x00030000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+               if (is_a2(pdev)) {
+                       /* All defaults, except the Receive Word
+                        * Alignament/Comma Detect Enable....(0xe800)
+                        */
+                       writel(0x00004512, &xcvr->afe_xcvr_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(0x0050100F, &xcvr->afe_xcvr_control1);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+               } else if (is_b0(pdev)) {
+                       /* Configure transmitter SSC parameters */
+                       writel(0x00030000, &xcvr->afe_tx_ssc_control);
                        udelay(AFE_REGISTER_WRITE_DELAY);
                } else if (is_c0(pdev)) {
-                        /* Configure transmitter SSC parameters */
-                       writel(0x0003000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_ssc_control);
+                       /* Configure transmitter SSC parameters */
+                       writel(0x00010202, &xcvr->afe_tx_ssc_control);
                        udelay(AFE_REGISTER_WRITE_DELAY);
 
-                       /*
-                        * All defaults, except the Receive Word Alignament/Comma Detect
-                        * Enable....(0xe800) */
-                       writel(0x00004500, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+                       /* All defaults, except the Receive Word
+                        * Alignament/Comma Detect Enable....(0xe800)
+                        */
+                       writel(0x00014500, &xcvr->afe_xcvr_control0);
                        udelay(AFE_REGISTER_WRITE_DELAY);
-               } else {
-                       /*
-                        * All defaults, except the Receive Word Alignament/Comma Detect
-                        * Enable....(0xe800) */
-                       writel(0x00004512, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+               } else if (is_c1(pdev)) {
+                       /* Configure transmitter SSC parameters */
+                       writel(0x00010202, &xcvr->afe_tx_ssc_control);
                        udelay(AFE_REGISTER_WRITE_DELAY);
 
-                       writel(0x0050100F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control1);
+                       /* All defaults, except the Receive Word
+                        * Alignament/Comma Detect Enable....(0xe800)
+                        */
+                       writel(0x0001C500, &xcvr->afe_xcvr_control0);
                        udelay(AFE_REGISTER_WRITE_DELAY);
                }
 
-               /*
-                * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
-                * & increase TX int & ext bias 20%....(0xe85c) */
+               /* Power up TX and RX out from power down (PWRDNTX and
+                * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c)
+                */
                if (is_a2(pdev))
-                       writel(0x000003F0, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       writel(0x000003F0, &xcvr->afe_channel_control);
                else if (is_b0(pdev)) {
-                        /* Power down TX and RX (PWRDNTX and PWRDNRX) */
-                       writel(0x000003D7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       writel(0x000003D7, &xcvr->afe_channel_control);
                        udelay(AFE_REGISTER_WRITE_DELAY);
 
-                       /*
-                        * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
-                        * & increase TX int & ext bias 20%....(0xe85c) */
-                       writel(0x000003D4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
-               } else {
-                       writel(0x000001E7, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       writel(0x000003D4, &xcvr->afe_channel_control);
+               } else if (is_c0(pdev)) {
+                       writel(0x000001E7, &xcvr->afe_channel_control);
                        udelay(AFE_REGISTER_WRITE_DELAY);
 
-                       /*
-                        * Power up TX and RX out from power down (PWRDNTX and PWRDNRX)
-                        * & increase TX int & ext bias 20%....(0xe85c) */
-                       writel(0x000001E4, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_channel_control);
+                       writel(0x000001E4, &xcvr->afe_channel_control);
+               } else if (is_c1(pdev)) {
+                       writel(cable_length_long ? 0x000002F7 : 0x000001F7,
+                              &xcvr->afe_channel_control);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(cable_length_long ? 0x000002F4 : 0x000001F4,
+                              &xcvr->afe_channel_control);
                }
                udelay(AFE_REGISTER_WRITE_DELAY);
 
                if (is_a2(pdev)) {
                        /* Enable TX equalization (0xe824) */
-                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+                       writel(0x00040000, &xcvr->afe_tx_control);
                        udelay(AFE_REGISTER_WRITE_DELAY);
                }
 
-               /*
-                * RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, TPD=0x0(TX Power On),
-                * RDD=0x0(RX Detect Enabled) ....(0xe800) */
-               writel(0x00004100, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_xcvr_control0);
+               if (is_a2(pdev) || is_b0(pdev))
+                       /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0,
+                        * TPD=0x0(TX Power On), RDD=0x0(RX Detect
+                        * Enabled) ....(0xe800)
+                        */
+                       writel(0x00004100, &xcvr->afe_xcvr_control0);
+               else if (is_c0(pdev))
+                       writel(0x00014100, &xcvr->afe_xcvr_control0);
+               else if (is_c1(pdev))
+                       writel(0x0001C100, &xcvr->afe_xcvr_control0);
                udelay(AFE_REGISTER_WRITE_DELAY);
 
                /* Leave DFE/FFE on */
                if (is_a2(pdev))
-                       writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+                       writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
                else if (is_b0(pdev)) {
-                       writel(0x3F11103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+                       writel(0x3F11103F, &xcvr->afe_rx_ssc_control0);
                        udelay(AFE_REGISTER_WRITE_DELAY);
                        /* Enable TX equalization (0xe824) */
-                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
-               } else {
-                       writel(0x0140DF0F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control1);
+                       writel(0x00040000, &xcvr->afe_tx_control);
+               } else if (is_c0(pdev)) {
+                       writel(0x01400C0F, &xcvr->afe_rx_ssc_control1);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       /* Enable TX equalization (0xe824) */
+                       writel(0x00040000, &xcvr->afe_tx_control);
+               } else if (is_c1(pdev)) {
+                       writel(cable_length_long ? 0x01500C0C :
+                              cable_length_medium ? 0x01400C0D : 0x02400C0D,
+                              &xcvr->afe_xcvr_control1);
+                       udelay(AFE_REGISTER_WRITE_DELAY);
+
+                       writel(0x000003E0, &xcvr->afe_dfx_rx_control1);
                        udelay(AFE_REGISTER_WRITE_DELAY);
 
-                       writel(0x3F6F103F, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_rx_ssc_control0);
+                       writel(cable_length_long ? 0x33091C1F :
+                              cable_length_medium ? 0x3315181F : 0x2B17161F,
+                              &xcvr->afe_rx_ssc_control0);
                        udelay(AFE_REGISTER_WRITE_DELAY);
 
                        /* Enable TX equalization (0xe824) */
-                       writel(0x00040000, &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_control);
+                       writel(0x00040000, &xcvr->afe_tx_control);
                }
 
                udelay(AFE_REGISTER_WRITE_DELAY);
 
-               writel(oem_phy->afe_tx_amp_control0,
-                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control0);
+               writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0);
                udelay(AFE_REGISTER_WRITE_DELAY);
 
-               writel(oem_phy->afe_tx_amp_control1,
-                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control1);
+               writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1);
                udelay(AFE_REGISTER_WRITE_DELAY);
 
-               writel(oem_phy->afe_tx_amp_control2,
-                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control2);
+               writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2);
                udelay(AFE_REGISTER_WRITE_DELAY);
 
-               writel(oem_phy->afe_tx_amp_control3,
-                       &ihost->scu_registers->afe.scu_afe_xcvr[phy_id].afe_tx_amp_control3);
+               writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3);
                udelay(AFE_REGISTER_WRITE_DELAY);
        }
 
        /* Transfer control to the PEs */
-       writel(0x00010f00, &ihost->scu_registers->afe.afe_dfx_master_control0);
+       writel(0x00010f00, &afe->afe_dfx_master_control0);
        udelay(AFE_REGISTER_WRITE_DELAY);
 }
 
index 646051afd3cbd07e2ab6761a18edef7355cf39f6..5477f0fa8233198e691945a08c14eba074be2f21 100644 (file)
@@ -435,11 +435,36 @@ static inline bool is_b0(struct pci_dev *pdev)
 
 static inline bool is_c0(struct pci_dev *pdev)
 {
-       if (pdev->revision >= 5)
+       if (pdev->revision == 5)
                return true;
        return false;
 }
 
+static inline bool is_c1(struct pci_dev *pdev)
+{
+       if (pdev->revision >= 6)
+               return true;
+       return false;
+}
+
+enum cable_selections {
+       short_cable     = 0,
+       long_cable      = 1,
+       medium_cable    = 2,
+       undefined_cable = 3
+};
+
+#define CABLE_OVERRIDE_DISABLED (0x10000)
+
+static inline int is_cable_select_overridden(void)
+{
+       return cable_selection_override < CABLE_OVERRIDE_DISABLED;
+}
+
+enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy);
+void validate_cable_selections(struct isci_host *ihost);
+char *lookup_cable_names(enum cable_selections);
+
 /* set hw control for 'activity', even though active enclosures seem to drive
  * the activity led on their own.  Skip setting FSENG control on 'status' due
  * to unexpected operation and 'error' due to not being a supported automatic
index a97edabcb85a29e96e1741171fcce2ea4c7cb4c0..17c4c2c89c2e5d9fe56f7e3b08010fc5ded3d71b 100644 (file)
@@ -65,7 +65,7 @@
 #include "probe_roms.h"
 
 #define MAJ 1
-#define MIN 0
+#define MIN 1
 #define BUILD 0
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
        __stringify(BUILD)
@@ -94,7 +94,7 @@ MODULE_DEVICE_TABLE(pci, isci_id_table);
 
 /* linux isci specific settings */
 
-unsigned char no_outbound_task_to = 20;
+unsigned char no_outbound_task_to = 2;
 module_param(no_outbound_task_to, byte, 0);
 MODULE_PARM_DESC(no_outbound_task_to, "No Outbound Task Timeout (1us incr)");
 
@@ -114,7 +114,7 @@ u16 stp_inactive_to = 5;
 module_param(stp_inactive_to, ushort, 0);
 MODULE_PARM_DESC(stp_inactive_to, "STP inactivity timeout (100us incr)");
 
-unsigned char phy_gen = 3;
+unsigned char phy_gen = SCIC_SDS_PARM_GEN2_SPEED;
 module_param(phy_gen, byte, 0);
 MODULE_PARM_DESC(phy_gen, "PHY generation (1: 1.5Gbps 2: 3.0Gbps 3: 6.0Gbps)");
 
@@ -122,6 +122,14 @@ unsigned char max_concurr_spinup;
 module_param(max_concurr_spinup, byte, 0);
 MODULE_PARM_DESC(max_concurr_spinup, "Max concurrent device spinup");
 
+uint cable_selection_override = CABLE_OVERRIDE_DISABLED;
+module_param(cable_selection_override, uint, 0);
+
+MODULE_PARM_DESC(cable_selection_override,
+                "This field indicates length of the SAS/SATA cable between "
+                "host and device. If any bits > 15 are set (default) "
+                "indicates \"use platform defaults\"");
+
 static ssize_t isci_show_id(struct device *dev, struct device_attribute *attr, char *buf)
 {
        struct Scsi_Host *shost = container_of(dev, typeof(*shost), shost_dev);
@@ -412,6 +420,14 @@ static struct isci_host *isci_host_alloc(struct pci_dev *pdev, int id)
                return NULL;
        isci_host->shost = shost;
 
+       dev_info(&pdev->dev, "%sSCU controller %d: phy 3-0 cables: "
+                "{%s, %s, %s, %s}\n",
+                (is_cable_select_overridden() ? "* " : ""), isci_host->id,
+                lookup_cable_names(decode_cable_selection(isci_host, 3)),
+                lookup_cable_names(decode_cable_selection(isci_host, 2)),
+                lookup_cable_names(decode_cable_selection(isci_host, 1)),
+                lookup_cable_names(decode_cable_selection(isci_host, 0)));
+
        err = isci_host_init(isci_host);
        if (err)
                goto err_shost;
@@ -466,7 +482,8 @@ static int __devinit isci_pci_probe(struct pci_dev *pdev, const struct pci_devic
                orom = isci_request_oprom(pdev);
 
        for (i = 0; orom && i < ARRAY_SIZE(orom->ctrl); i++) {
-               if (sci_oem_parameters_validate(&orom->ctrl[i])) {
+               if (sci_oem_parameters_validate(&orom->ctrl[i],
+                                               orom->hdr.version)) {
                        dev_warn(&pdev->dev,
                                 "[%d]: invalid oem parameters detected, falling back to firmware\n", i);
                        devm_kfree(&pdev->dev, orom);
index 8efeb6b083213bb20d15ca3a9ca0a954515ec23f..234ab46fce3346948300d9ae95364e33103792ee 100644 (file)
@@ -480,6 +480,7 @@ extern u16 ssp_inactive_to;
 extern u16 stp_inactive_to;
 extern unsigned char phy_gen;
 extern unsigned char max_concurr_spinup;
+extern uint cable_selection_override;
 
 irqreturn_t isci_msix_isr(int vec, void *data);
 irqreturn_t isci_intx_isr(int vec, void *data);
index 35f50c2183e18a4c6460b07db081bb7d82ff6beb..fe18acfd6eb3ad9b62a775665a1dbedecb32e011 100644 (file)
@@ -91,22 +91,23 @@ sci_phy_transport_layer_initialization(struct isci_phy *iphy,
 
 static enum sci_status
 sci_phy_link_layer_initialization(struct isci_phy *iphy,
-                                 struct scu_link_layer_registers __iomem *reg)
+                                 struct scu_link_layer_registers __iomem *llr)
 {
        struct isci_host *ihost = iphy->owning_port->owning_controller;
+       struct sci_phy_user_params *phy_user;
+       struct sci_phy_oem_params *phy_oem;
        int phy_idx = iphy->phy_index;
-       struct sci_phy_user_params *phy_user = &ihost->user_parameters.phys[phy_idx];
-       struct sci_phy_oem_params *phy_oem =
-               &ihost->oem_parameters.phys[phy_idx];
-       u32 phy_configuration;
        struct sci_phy_cap phy_cap;
+       u32 phy_configuration;
        u32 parity_check = 0;
        u32 parity_count = 0;
        u32 llctl, link_rate;
        u32 clksm_value = 0;
        u32 sp_timeouts = 0;
 
-       iphy->link_layer_registers = reg;
+       phy_user = &ihost->user_parameters.phys[phy_idx];
+       phy_oem = &ihost->oem_parameters.phys[phy_idx];
+       iphy->link_layer_registers = llr;
 
        /* Set our IDENTIFY frame data */
        #define SCI_END_DEVICE 0x01
@@ -116,32 +117,26 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
               SCU_SAS_TIID_GEN_BIT(STP_INITIATOR) |
               SCU_SAS_TIID_GEN_BIT(DA_SATA_HOST) |
               SCU_SAS_TIID_GEN_VAL(DEVICE_TYPE, SCI_END_DEVICE),
-              &iphy->link_layer_registers->transmit_identification);
+              &llr->transmit_identification);
 
        /* Write the device SAS Address */
-       writel(0xFEDCBA98,
-              &iphy->link_layer_registers->sas_device_name_high);
-       writel(phy_idx, &iphy->link_layer_registers->sas_device_name_low);
+       writel(0xFEDCBA98, &llr->sas_device_name_high);
+       writel(phy_idx, &llr->sas_device_name_low);
 
        /* Write the source SAS Address */
-       writel(phy_oem->sas_address.high,
-               &iphy->link_layer_registers->source_sas_address_high);
-       writel(phy_oem->sas_address.low,
-               &iphy->link_layer_registers->source_sas_address_low);
+       writel(phy_oem->sas_address.high, &llr->source_sas_address_high);
+       writel(phy_oem->sas_address.low, &llr->source_sas_address_low);
 
        /* Clear and Set the PHY Identifier */
-       writel(0, &iphy->link_layer_registers->identify_frame_phy_id);
-       writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx),
-               &iphy->link_layer_registers->identify_frame_phy_id);
+       writel(0, &llr->identify_frame_phy_id);
+       writel(SCU_SAS_TIPID_GEN_VALUE(ID, phy_idx), &llr->identify_frame_phy_id);
 
        /* Change the initial state of the phy configuration register */
-       phy_configuration =
-               readl(&iphy->link_layer_registers->phy_configuration);
+       phy_configuration = readl(&llr->phy_configuration);
 
        /* Hold OOB state machine in reset */
        phy_configuration |=  SCU_SAS_PCFG_GEN_BIT(OOB_RESET);
-       writel(phy_configuration,
-               &iphy->link_layer_registers->phy_configuration);
+       writel(phy_configuration, &llr->phy_configuration);
 
        /* Configure the SNW capabilities */
        phy_cap.all = 0;
@@ -149,15 +144,64 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
        phy_cap.gen3_no_ssc = 1;
        phy_cap.gen2_no_ssc = 1;
        phy_cap.gen1_no_ssc = 1;
-       if (ihost->oem_parameters.controller.do_enable_ssc == true) {
+       if (ihost->oem_parameters.controller.do_enable_ssc) {
+               struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe;
+               struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_idx];
+               struct isci_pci_info *pci_info = to_pci_info(ihost->pdev);
+               bool en_sas = false;
+               bool en_sata = false;
+               u32 sas_type = 0;
+               u32 sata_spread = 0x2;
+               u32 sas_spread = 0x2;
+
                phy_cap.gen3_ssc = 1;
                phy_cap.gen2_ssc = 1;
                phy_cap.gen1_ssc = 1;
+
+               if (pci_info->orom->hdr.version < ISCI_ROM_VER_1_1)
+                       en_sas = en_sata = true;
+               else {
+                       sata_spread = ihost->oem_parameters.controller.ssc_sata_tx_spread_level;
+                       sas_spread = ihost->oem_parameters.controller.ssc_sas_tx_spread_level;
+
+                       if (sata_spread)
+                               en_sata = true;
+
+                       if (sas_spread) {
+                               en_sas = true;
+                               sas_type = ihost->oem_parameters.controller.ssc_sas_tx_type;
+                       }
+
+               }
+
+               if (en_sas) {
+                       u32 reg;
+
+                       reg = readl(&xcvr->afe_xcvr_control0);
+                       reg |= (0x00100000 | (sas_type << 19));
+                       writel(reg, &xcvr->afe_xcvr_control0);
+
+                       reg = readl(&xcvr->afe_tx_ssc_control);
+                       reg |= sas_spread << 8;
+                       writel(reg, &xcvr->afe_tx_ssc_control);
+               }
+
+               if (en_sata) {
+                       u32 reg;
+
+                       reg = readl(&xcvr->afe_tx_ssc_control);
+                       reg |= sata_spread;
+                       writel(reg, &xcvr->afe_tx_ssc_control);
+
+                       reg = readl(&llr->stp_control);
+                       reg |= 1 << 12;
+                       writel(reg, &llr->stp_control);
+               }
        }
 
-       /*
-        * The SAS specification indicates that the phy_capabilities that
-        * are transmitted shall have an even parity.  Calculate the parity. */
+       /* The SAS specification indicates that the phy_capabilities that
+        * are transmitted shall have an even parity.  Calculate the parity.
+        */
        parity_check = phy_cap.all;
        while (parity_check != 0) {
                if (parity_check & 0x1)
@@ -165,20 +209,20 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
                parity_check >>= 1;
        }
 
-       /*
-        * If parity indicates there are an odd number of bits set, then
-        * set the parity bit to 1 in the phy capabilities. */
+       /* If parity indicates there are an odd number of bits set, then
+        * set the parity bit to 1 in the phy capabilities.
+        */
        if ((parity_count % 2) != 0)
                phy_cap.parity = 1;
 
-       writel(phy_cap.all, &iphy->link_layer_registers->phy_capabilities);
+       writel(phy_cap.all, &llr->phy_capabilities);
 
        /* Set the enable spinup period but disable the ability to send
         * notify enable spinup
         */
        writel(SCU_ENSPINUP_GEN_VAL(COUNT,
                        phy_user->notify_enable_spin_up_insertion_frequency),
-               &iphy->link_layer_registers->notify_enable_spinup_control);
+               &llr->notify_enable_spinup_control);
 
        /* Write the ALIGN Insertion Ferequency for connected phy and
         * inpendent of connected state
@@ -189,11 +233,13 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
        clksm_value |= SCU_ALIGN_INSERTION_FREQUENCY_GEN_VAL(GENERAL,
                        phy_user->align_insertion_frequency);
 
-       writel(clksm_value, &iphy->link_layer_registers->clock_skew_management);
+       writel(clksm_value, &llr->clock_skew_management);
 
-       /* @todo Provide a way to write this register correctly */
-       writel(0x02108421,
-               &iphy->link_layer_registers->afe_lookup_table_control);
+       if (is_c0(ihost->pdev) || is_c1(ihost->pdev)) {
+               writel(0x04210400, &llr->afe_lookup_table_control);
+               writel(0x020A7C05, &llr->sas_primitive_timeout);
+       } else
+               writel(0x02108421, &llr->afe_lookup_table_control);
 
        llctl = SCU_SAS_LLCTL_GEN_VAL(NO_OUTBOUND_TASK_TIMEOUT,
                (u8)ihost->user_parameters.no_outbound_task_timeout);
@@ -210,9 +256,9 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
                break;
        }
        llctl |= SCU_SAS_LLCTL_GEN_VAL(MAX_LINK_RATE, link_rate);
-       writel(llctl, &iphy->link_layer_registers->link_layer_control);
+       writel(llctl, &llr->link_layer_control);
 
-       sp_timeouts = readl(&iphy->link_layer_registers->sas_phy_timeouts);
+       sp_timeouts = readl(&llr->sas_phy_timeouts);
 
        /* Clear the default 0x36 (54us) RATE_CHANGE timeout value. */
        sp_timeouts &= ~SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0xFF);
@@ -222,20 +268,23 @@ sci_phy_link_layer_initialization(struct isci_phy *iphy,
         */
        sp_timeouts |= SCU_SAS_PHYTOV_GEN_VAL(RATE_CHANGE, 0x3B);
 
-       writel(sp_timeouts, &iphy->link_layer_registers->sas_phy_timeouts);
+       writel(sp_timeouts, &llr->sas_phy_timeouts);
 
        if (is_a2(ihost->pdev)) {
-               /* Program the max ARB time for the PHY to 700us so we inter-operate with
-                * the PMC expander which shuts down PHYs if the expander PHY generates too
-                * many breaks.  This time value will guarantee that the initiator PHY will
-                * generate the break.
+               /* Program the max ARB time for the PHY to 700us so we
+                * inter-operate with the PMC expander which shuts down
+                * PHYs if the expander PHY generates too many breaks.
+                * This time value will guarantee that the initiator PHY
+                * will generate the break.
                 */
                writel(SCIC_SDS_PHY_MAX_ARBITRATION_WAIT_TIME,
-                       &iphy->link_layer_registers->maximum_arbitration_wait_timer_timeout);
+                      &llr->maximum_arbitration_wait_timer_timeout);
        }
 
-       /* Disable link layer hang detection, rely on the OS timeout for I/O timeouts. */
-       writel(0, &iphy->link_layer_registers->link_layer_hang_detection_timeout);
+       /* Disable link layer hang detection, rely on the OS timeout for
+        * I/O timeouts.
+        */
+       writel(0, &llr->link_layer_hang_detection_timeout);
 
        /* We can exit the initial state to the stopped state */
        sci_change_state(&iphy->sm, SCI_PHY_STOPPED);
@@ -1049,24 +1098,25 @@ static void scu_link_layer_stop_protocol_engine(
        writel(enable_spinup_value, &iphy->link_layer_registers->notify_enable_spinup_control);
 }
 
-/**
- *
- *
- * This method will start the OOB/SN state machine for this struct isci_phy object.
- */
-static void scu_link_layer_start_oob(
-       struct isci_phy *iphy)
+static void scu_link_layer_start_oob(struct isci_phy *iphy)
 {
-       u32 scu_sas_pcfg_value;
-
-       scu_sas_pcfg_value =
-               readl(&iphy->link_layer_registers->phy_configuration);
-       scu_sas_pcfg_value |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
-       scu_sas_pcfg_value &=
-               ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
-               SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
-       writel(scu_sas_pcfg_value,
-              &iphy->link_layer_registers->phy_configuration);
+       struct scu_link_layer_registers __iomem *ll = iphy->link_layer_registers;
+       u32 val;
+
+       /** Reset OOB sequence - start */
+       val = readl(&ll->phy_configuration);
+       val &= ~(SCU_SAS_PCFG_GEN_BIT(OOB_RESET) |
+                SCU_SAS_PCFG_GEN_BIT(HARD_RESET));
+       writel(val, &ll->phy_configuration);
+       readl(&ll->phy_configuration); /* flush */
+       /** Reset OOB sequence - end */
+
+       /** Start OOB sequence - start */
+       val = readl(&ll->phy_configuration);
+       val |= SCU_SAS_PCFG_GEN_BIT(OOB_ENABLE);
+       writel(val, &ll->phy_configuration);
+       readl(&ll->phy_configuration); /* flush */
+       /** Start OOB sequence - end */
 }
 
 /**
index ac7f27749f975761a1beefc7f1b1741a5c02e0bc..7c6ac58a5c4c45c37af504f8e10bfa84cb7df3ee 100644 (file)
@@ -114,7 +114,7 @@ static u32 sci_port_get_phys(struct isci_port *iport)
  * value is returned if the specified port is not valid.  When this value is
  * returned, no data is copied to the properties output parameter.
  */
-static enum sci_status sci_port_get_properties(struct isci_port *iport,
+enum sci_status sci_port_get_properties(struct isci_port *iport,
                                                struct sci_port_properties *prop)
 {
        if (!iport || iport->logical_port_index == SCIC_SDS_DUMMY_PORT)
@@ -647,19 +647,26 @@ void sci_port_setup_transports(struct isci_port *iport, u32 device_id)
        }
 }
 
-static void sci_port_activate_phy(struct isci_port *iport, struct isci_phy *iphy,
-                                 bool do_notify_user)
+static void sci_port_resume_phy(struct isci_port *iport, struct isci_phy *iphy)
+{
+       sci_phy_resume(iphy);
+       iport->enabled_phy_mask |= 1 << iphy->phy_index;
+}
+
+static void sci_port_activate_phy(struct isci_port *iport,
+                                 struct isci_phy *iphy,
+                                 u8 flags)
 {
        struct isci_host *ihost = iport->owning_controller;
 
-       if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA)
+       if (iphy->protocol != SCIC_SDS_PHY_PROTOCOL_SATA && (flags & PF_RESUME))
                sci_phy_resume(iphy);
 
        iport->active_phy_mask |= 1 << iphy->phy_index;
 
        sci_controller_clear_invalid_phy(ihost, iphy);
 
-       if (do_notify_user == true)
+       if (flags & PF_NOTIFY)
                isci_port_link_up(ihost, iport, iphy);
 }
 
@@ -669,14 +676,19 @@ void sci_port_deactivate_phy(struct isci_port *iport, struct isci_phy *iphy,
        struct isci_host *ihost = iport->owning_controller;
 
        iport->active_phy_mask &= ~(1 << iphy->phy_index);
+       iport->enabled_phy_mask &= ~(1 << iphy->phy_index);
        if (!iport->active_phy_mask)
                iport->last_active_phy = iphy->phy_index;
 
        iphy->max_negotiated_speed = SAS_LINK_RATE_UNKNOWN;
 
-       /* Re-assign the phy back to the LP as if it were a narrow port */
-       writel(iphy->phy_index,
-               &iport->port_pe_configuration_register[iphy->phy_index]);
+       /* Re-assign the phy back to the LP as if it were a narrow port for APC
+        * mode. For MPC mode, the phy will remain in the port.
+        */
+       if (iport->owning_controller->oem_parameters.controller.mode_type ==
+               SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE)
+               writel(iphy->phy_index,
+                       &iport->port_pe_configuration_register[iphy->phy_index]);
 
        if (do_notify_user == true)
                isci_port_link_down(ihost, iphy, iport);
@@ -701,18 +713,16 @@ static void sci_port_invalid_link_up(struct isci_port *iport, struct isci_phy *i
  * sci_port_general_link_up_handler - phy can be assigned to port?
  * @sci_port: sci_port object for which has a phy that has gone link up.
  * @sci_phy: This is the struct isci_phy object that has gone link up.
- * @do_notify_user: This parameter specifies whether to inform the user (via
- *    sci_port_link_up()) as to the fact that a new phy as become ready.
+ * @flags: PF_RESUME, PF_NOTIFY to sci_port_activate_phy
  *
- * Determine if this phy can be assigned to this
- * port . If the phy is not a valid PHY for
- * this port then the function will notify the user. A PHY can only be
- * part of a port if it's attached SAS ADDRESS is the same as all other PHYs in
- * the same port. none
+ * Determine if this phy can be assigned to this port . If the phy is
+ * not a valid PHY for this port then the function will notify the user.
+ * A PHY can only be part of a port if it's attached SAS ADDRESS is the
+ * same as all other PHYs in the same port.
  */
 static void sci_port_general_link_up_handler(struct isci_port *iport,
-                                                 struct isci_phy *iphy,
-                                                 bool do_notify_user)
+                                            struct isci_phy *iphy,
+                                            u8 flags)
 {
        struct sci_sas_address port_sas_address;
        struct sci_sas_address phy_sas_address;
@@ -730,7 +740,7 @@ static void sci_port_general_link_up_handler(struct isci_port *iport,
            iport->active_phy_mask == 0) {
                struct sci_base_state_machine *sm = &iport->sm;
 
-               sci_port_activate_phy(iport, iphy, do_notify_user);
+               sci_port_activate_phy(iport, iphy, flags);
                if (sm->current_state_id == SCI_PORT_RESETTING)
                        port_state_machine_change(iport, SCI_PORT_READY);
        } else
@@ -781,11 +791,16 @@ bool sci_port_link_detected(
        struct isci_phy *iphy)
 {
        if ((iport->logical_port_index != SCIC_SDS_DUMMY_PORT) &&
-           (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA) &&
-           sci_port_is_wide(iport)) {
-               sci_port_invalid_link_up(iport, iphy);
-
-               return false;
+           (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SATA)) {
+               if (sci_port_is_wide(iport)) {
+                       sci_port_invalid_link_up(iport, iphy);
+                       return false;
+               } else {
+                       struct isci_host *ihost = iport->owning_controller;
+                       struct isci_port *dst_port = &(ihost->ports[iphy->phy_index]);
+                       writel(iphy->phy_index,
+                              &dst_port->port_pe_configuration_register[iphy->phy_index]);
+               }
        }
 
        return true;
@@ -975,6 +990,13 @@ static void sci_port_ready_substate_waiting_enter(struct sci_base_state_machine
        }
 }
 
+static void scic_sds_port_ready_substate_waiting_exit(
+                                       struct sci_base_state_machine *sm)
+{
+       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
+       sci_port_resume_port_task_scheduler(iport);
+}
+
 static void sci_port_ready_substate_operational_enter(struct sci_base_state_machine *sm)
 {
        u32 index;
@@ -988,13 +1010,13 @@ static void sci_port_ready_substate_operational_enter(struct sci_base_state_mach
                        writel(iport->physical_port_index,
                                &iport->port_pe_configuration_register[
                                        iport->phy_table[index]->phy_index]);
+                       if (((iport->active_phy_mask^iport->enabled_phy_mask) & (1 << index)) != 0)
+                               sci_port_resume_phy(iport, iport->phy_table[index]);
                }
        }
 
        sci_port_update_viit_entry(iport);
 
-       sci_port_resume_port_task_scheduler(iport);
-
        /*
         * Post the dummy task for the port so the hardware can schedule
         * io correctly
@@ -1061,20 +1083,9 @@ static void sci_port_ready_substate_configuring_enter(struct sci_base_state_mach
        if (iport->active_phy_mask == 0) {
                isci_port_not_ready(ihost, iport);
 
-               port_state_machine_change(iport,
-                                         SCI_PORT_SUB_WAITING);
-       } else if (iport->started_request_count == 0)
-               port_state_machine_change(iport,
-                                         SCI_PORT_SUB_OPERATIONAL);
-}
-
-static void sci_port_ready_substate_configuring_exit(struct sci_base_state_machine *sm)
-{
-       struct isci_port *iport = container_of(sm, typeof(*iport), sm);
-
-       sci_port_suspend_port_task_scheduler(iport);
-       if (iport->ready_exit)
-               sci_port_invalidate_dummy_remote_node(iport);
+               port_state_machine_change(iport, SCI_PORT_SUB_WAITING);
+       } else
+               port_state_machine_change(iport, SCI_PORT_SUB_OPERATIONAL);
 }
 
 enum sci_status sci_port_start(struct isci_port *iport)
@@ -1252,7 +1263,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
                if (status != SCI_SUCCESS)
                        return status;
 
-               sci_port_general_link_up_handler(iport, iphy, true);
+               sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
                iport->not_ready_reason = SCIC_PORT_NOT_READY_RECONFIGURING;
                port_state_machine_change(iport, SCI_PORT_SUB_CONFIGURING);
 
@@ -1262,7 +1273,7 @@ enum sci_status sci_port_add_phy(struct isci_port *iport,
 
                if (status != SCI_SUCCESS)
                        return status;
-               sci_port_general_link_up_handler(iport, iphy, true);
+               sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY);
 
                /* Re-enter the configuring state since this may be the last phy in
                 * the port.
@@ -1338,13 +1349,13 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
                /* Since this is the first phy going link up for the port we
                 * can just enable it and continue
                 */
-               sci_port_activate_phy(iport, iphy, true);
+               sci_port_activate_phy(iport, iphy, PF_NOTIFY|PF_RESUME);
 
                port_state_machine_change(iport,
                                          SCI_PORT_SUB_OPERATIONAL);
                return SCI_SUCCESS;
        case SCI_PORT_SUB_OPERATIONAL:
-               sci_port_general_link_up_handler(iport, iphy, true);
+               sci_port_general_link_up_handler(iport, iphy, PF_NOTIFY|PF_RESUME);
                return SCI_SUCCESS;
        case SCI_PORT_RESETTING:
                /* TODO We should  make  sure  that  the phy  that  has gone
@@ -1361,7 +1372,7 @@ enum sci_status sci_port_link_up(struct isci_port *iport,
                /* In the resetting state we don't notify the user regarding
                 * link up and link down notifications.
                 */
-               sci_port_general_link_up_handler(iport, iphy, false);
+               sci_port_general_link_up_handler(iport, iphy, PF_RESUME);
                return SCI_SUCCESS;
        default:
                dev_warn(sciport_to_dev(iport),
@@ -1584,14 +1595,14 @@ static const struct sci_base_state sci_port_state_table[] = {
        },
        [SCI_PORT_SUB_WAITING] = {
                .enter_state = sci_port_ready_substate_waiting_enter,
+               .exit_state  = scic_sds_port_ready_substate_waiting_exit,
        },
        [SCI_PORT_SUB_OPERATIONAL] = {
                .enter_state = sci_port_ready_substate_operational_enter,
                .exit_state  = sci_port_ready_substate_operational_exit
        },
        [SCI_PORT_SUB_CONFIGURING] = {
-               .enter_state = sci_port_ready_substate_configuring_enter,
-               .exit_state  = sci_port_ready_substate_configuring_exit
+               .enter_state = sci_port_ready_substate_configuring_enter
        },
        [SCI_PORT_RESETTING] = {
                .exit_state  = sci_port_resetting_state_exit
@@ -1609,6 +1620,7 @@ void sci_port_construct(struct isci_port *iport, u8 index,
        iport->logical_port_index  = SCIC_SDS_DUMMY_PORT;
        iport->physical_port_index = index;
        iport->active_phy_mask     = 0;
+       iport->enabled_phy_mask    = 0;
        iport->last_active_phy     = 0;
        iport->ready_exit          = false;
 
index cb5ffbc386038136812da1e596ca5059ff391615..08116090eb7015dbe7c4b3862748a20601e6d765 100644 (file)
@@ -63,6 +63,9 @@
 
 #define SCIC_SDS_DUMMY_PORT   0xFF
 
+#define PF_NOTIFY (1 << 0)
+#define PF_RESUME (1 << 1)
+
 struct isci_phy;
 struct isci_host;
 
@@ -83,6 +86,8 @@ enum isci_status {
  * @logical_port_index: software port index
  * @physical_port_index: hardware port index
  * @active_phy_mask: identifies phy members
+ * @enabled_phy_mask: phy mask for the port
+ *                    that are already part of the port
  * @reserved_tag:
  * @reserved_rni: reserver for port task scheduler workaround
  * @started_request_count: reference count for outstanding commands
@@ -104,6 +109,7 @@ struct isci_port {
        u8 logical_port_index;
        u8 physical_port_index;
        u8 active_phy_mask;
+       u8 enabled_phy_mask;
        u8 last_active_phy;
        u16 reserved_rni;
        u16 reserved_tag;
@@ -250,6 +256,10 @@ bool sci_port_link_detected(
        struct isci_port *iport,
        struct isci_phy *iphy);
 
+enum sci_status sci_port_get_properties(
+       struct isci_port *iport,
+       struct sci_port_properties *prop);
+
 enum sci_status sci_port_link_up(struct isci_port *iport,
                                      struct isci_phy *iphy);
 enum sci_status sci_port_link_down(struct isci_port *iport,
index 38a99d2811411d102220a96bdf918579fb47546b..6d1e9544cbe5c03b7f84059b7651c1b9d560ffb2 100644 (file)
@@ -57,7 +57,7 @@
 
 #define SCIC_SDS_MPC_RECONFIGURATION_TIMEOUT    (10)
 #define SCIC_SDS_APC_RECONFIGURATION_TIMEOUT    (10)
-#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION  (100)
+#define SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION  (250)
 
 enum SCIC_SDS_APC_ACTIVITY {
        SCIC_SDS_APC_SKIP_PHY,
@@ -466,6 +466,23 @@ sci_apc_agent_validate_phy_configuration(struct isci_host *ihost,
        return sci_port_configuration_agent_validate_ports(ihost, port_agent);
 }
 
+/*
+ * This routine will restart the automatic port configuration timeout
+ * timer for the next time period. This could be caused by either a link
+ * down event or a link up event where we can not yet tell to which a phy
+ * belongs.
+ */
+static void sci_apc_agent_start_timer(
+       struct sci_port_configuration_agent *port_agent,
+       u32 timeout)
+{
+       if (port_agent->timer_pending)
+               sci_del_timer(&port_agent->timer);
+
+       port_agent->timer_pending = true;
+       sci_mod_timer(&port_agent->timer, timeout);
+}
+
 static void sci_apc_agent_configure_ports(struct isci_host *ihost,
                                               struct sci_port_configuration_agent *port_agent,
                                               struct isci_phy *iphy,
@@ -565,17 +582,8 @@ static void sci_apc_agent_configure_ports(struct isci_host *ihost,
                break;
 
        case SCIC_SDS_APC_START_TIMER:
-               /*
-                * This can occur for either a link down event, or a link
-                * up event where we cannot yet tell the port to which a
-                * phy belongs.
-                */
-               if (port_agent->timer_pending)
-                       sci_del_timer(&port_agent->timer);
-
-               port_agent->timer_pending = true;
-               sci_mod_timer(&port_agent->timer,
-                             SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
+               sci_apc_agent_start_timer(port_agent,
+                                         SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
                break;
 
        case SCIC_SDS_APC_SKIP_PHY:
@@ -607,7 +615,8 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
        if (!iport) {
                /* the phy is not the part of this port */
                port_agent->phy_ready_mask |= 1 << phy_index;
-               sci_apc_agent_configure_ports(ihost, port_agent, iphy, true);
+               sci_apc_agent_start_timer(port_agent,
+                                         SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
        } else {
                /* the phy is already the part of the port */
                u32 port_state = iport->sm.current_state_id;
index b5f4341de2434ead30763b4228e49ff5ee2edc67..9b8117b9d7569e7a8bda7623ec2aa3a07fcfffb6 100644 (file)
@@ -147,7 +147,7 @@ struct isci_orom *isci_request_firmware(struct pci_dev *pdev, const struct firmw
 
        memcpy(orom, fw->data, fw->size);
 
-       if (is_c0(pdev))
+       if (is_c0(pdev) || is_c1(pdev))
                goto out;
 
        /*
index 2c75248ca326ea38e3a9256da8e966fddd516d41..bb0e9d4d97c9a8f0437e51aee3440c8ff0060004 100644 (file)
@@ -152,7 +152,7 @@ struct sci_user_parameters {
 #define MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT 4
 
 struct sci_oem_params;
-int sci_oem_parameters_validate(struct sci_oem_params *oem);
+int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version);
 
 struct isci_orom;
 struct isci_orom *isci_request_oprom(struct pci_dev *pdev);
@@ -191,6 +191,11 @@ struct isci_oem_hdr {
                        0x1a, 0x04, 0xc6)
 #define ISCI_EFI_VAR_NAME      "RstScuO"
 
+#define ISCI_ROM_VER_1_0       0x10
+#define ISCI_ROM_VER_1_1       0x11
+#define ISCI_ROM_VER_1_3       0x13
+#define ISCI_ROM_VER_LATEST    ISCI_ROM_VER_1_3
+
 /* Allowed PORT configuration modes APC Automatic PORT configuration mode is
  * defined by the OEM configuration parameters providing no PHY_MASK parameters
  * for any PORT. i.e. There are no phys assigned to any of the ports at start.
@@ -220,8 +225,86 @@ struct sci_oem_params {
        struct {
                uint8_t mode_type;
                uint8_t max_concurr_spin_up;
-               uint8_t do_enable_ssc;
-               uint8_t reserved;
+               /*
+                * This bitfield indicates the OEM's desired default Tx
+                * Spread Spectrum Clocking (SSC) settings for SATA and SAS.
+                * NOTE: Default SSC Modulation Frequency is 31.5KHz.
+                */
+               union {
+                       struct {
+                       /*
+                        * NOTE: Max spread for SATA is +0 / -5000 PPM.
+                        * Down-spreading SSC (only method allowed for SATA):
+                        *  SATA SSC Tx Disabled                    = 0x0
+                        *  SATA SSC Tx at +0 / -1419 PPM Spread    = 0x2
+                        *  SATA SSC Tx at +0 / -2129 PPM Spread    = 0x3
+                        *  SATA SSC Tx at +0 / -4257 PPM Spread    = 0x6
+                        *  SATA SSC Tx at +0 / -4967 PPM Spread    = 0x7
+                        */
+                               uint8_t ssc_sata_tx_spread_level:4;
+                       /*
+                        * SAS SSC Tx Disabled                     = 0x0
+                        *
+                        * NOTE: Max spread for SAS down-spreading +0 /
+                        *       -2300 PPM
+                        * Down-spreading SSC:
+                        *  SAS SSC Tx at +0 / -1419 PPM Spread     = 0x2
+                        *  SAS SSC Tx at +0 / -2129 PPM Spread     = 0x3
+                        *
+                        * NOTE: Max spread for SAS center-spreading +2300 /
+                        *       -2300 PPM
+                        * Center-spreading SSC:
+                        *  SAS SSC Tx at +1064 / -1064 PPM Spread  = 0x3
+                        *  SAS SSC Tx at +2129 / -2129 PPM Spread  = 0x6
+                        */
+                               uint8_t ssc_sas_tx_spread_level:3;
+                       /*
+                        * NOTE: Refer to the SSC section of the SAS 2.x
+                        * Specification for proper setting of this field.
+                        * For standard SAS Initiator SAS PHY operation it
+                        * should be 0 for Down-spreading.
+                        * SAS SSC Tx spread type:
+                        *  Down-spreading SSC      = 0
+                        *  Center-spreading SSC    = 1
+                        */
+                               uint8_t ssc_sas_tx_type:1;
+                       };
+                       uint8_t do_enable_ssc;
+               };
+               /*
+                * This field indicates length of the SAS/SATA cable between
+                * host and device.
+                * This field is used make relationship between analog
+                * parameters of the phy in the silicon and length of the cable.
+                * Supported cable attenuation levels:
+                * "short"- up to 3m, "medium"-3m to 6m, and "long"- more than
+                * 6m.
+                *
+                * This is bit mask field:
+                *
+                * BIT:      (MSB) 7     6     5     4
+                * ASSIGNMENT:   <phy3><phy2><phy1><phy0>  - Medium cable
+                *                                           length assignment
+                * BIT:            3     2     1     0  (LSB)
+                * ASSIGNMENT:   <phy3><phy2><phy1><phy0>  - Long cable length
+                *                                           assignment
+                *
+                * BITS 7-4 are set when the cable length is assigned to medium
+                * BITS 3-0 are set when the cable length is assigned to long
+                *
+                * The BIT positions are clear when the cable length is
+                * assigned to short.
+                *
+                * Setting the bits for both long and medium cable length is
+                * undefined.
+                *
+                * A value of 0x84 would assign
+                *    phy3 - medium
+                *    phy2 - long
+                *    phy1 - short
+                *    phy0 - short
+                */
+               uint8_t cable_selection_mask;
        } controller;
 
        struct {
index b207cd3b15a0514da4579e9e869556d6b95dc961..dd74b6ceeb823df92bd69e69a0964069f1b5011a 100644 (file)
@@ -53,6 +53,7 @@
  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 #include <scsi/sas.h>
+#include <linux/bitops.h>
 #include "isci.h"
 #include "port.h"
 #include "remote_device.h"
@@ -1101,6 +1102,7 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
                                                       struct isci_remote_device *idev)
 {
        enum sci_status status;
+       struct sci_port_properties properties;
        struct domain_device *dev = idev->domain_dev;
 
        sci_remote_device_construct(iport, idev);
@@ -1110,6 +1112,11 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
         * entries will be needed to store the remote node.
         */
        idev->is_direct_attached = true;
+
+       sci_port_get_properties(iport, &properties);
+       /* Get accurate port width from port's phy mask for a DA device. */
+       idev->device_port_width = hweight32(properties.phy_mask);
+
        status = sci_controller_allocate_remote_node_context(iport->owning_controller,
                                                                  idev,
                                                                  &idev->rnc.remote_node_index);
@@ -1125,9 +1132,6 @@ static enum sci_status sci_remote_device_da_construct(struct isci_port *iport,
 
        idev->connection_rate = sci_port_get_max_allowed_speed(iport);
 
-       /* / @todo Should I assign the port width by reading all of the phys on the port? */
-       idev->device_port_width = 1;
-
        return SCI_SUCCESS;
 }
 
index 66ad3dc89498a3ab305de95bc179ecaf42afa2f3..f5a3f7d2bdab29059af48d4b333d39b91c57d3d0 100644 (file)
@@ -496,7 +496,7 @@ static int isci_task_execute_tmf(struct isci_host *ihost,
                }
        }
 
-       isci_print_tmf(tmf);
+       isci_print_tmf(ihost, tmf);
 
        if (tmf->status == SCI_SUCCESS)
                ret =  TMF_RESP_FUNC_COMPLETE;
index bc78c0a41d5cac86df69d6a23af08e6dac949552..1b27b3797c6c9cce8c1160c11d43240ccc4a52fe 100644 (file)
@@ -106,7 +106,6 @@ struct isci_tmf {
        } resp;
        unsigned char lun[8];
        u16 io_tag;
-       struct isci_remote_device *device;
        enum isci_tmf_function_codes tmf_code;
        int status;
 
@@ -120,10 +119,10 @@ struct isci_tmf {
 
 };
 
-static inline void isci_print_tmf(struct isci_tmf *tmf)
+static inline void isci_print_tmf(struct isci_host *ihost, struct isci_tmf *tmf)
 {
        if (SAS_PROTOCOL_SATA == tmf->proto)
-               dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+               dev_dbg(&ihost->pdev->dev,
                        "%s: status = %x\n"
                        "tmf->resp.d2h_fis.status = %x\n"
                        "tmf->resp.d2h_fis.error = %x\n",
@@ -132,7 +131,7 @@ static inline void isci_print_tmf(struct isci_tmf *tmf)
                        tmf->resp.d2h_fis.status,
                        tmf->resp.d2h_fis.error);
        else
-               dev_dbg(&tmf->device->isci_port->isci_host->pdev->dev,
+               dev_dbg(&ihost->pdev->dev,
                        "%s: status = %x\n"
                        "tmf->resp.resp_iu.data_present = %x\n"
                        "tmf->resp.resp_iu.status = %x\n"
index 7269e928824a07f93efb1e570eb136f6cbedfbeb..1d1b0c9da29ba684100515b337431ccfb8be2b08 100644 (file)
@@ -61,7 +61,7 @@ static void fc_disc_restart(struct fc_disc *);
  * Locking Note: This function expects that the lport mutex is locked before
  * calling it.
  */
-void fc_disc_stop_rports(struct fc_disc *disc)
+static void fc_disc_stop_rports(struct fc_disc *disc)
 {
        struct fc_lport *lport;
        struct fc_rport_priv *rdata;
@@ -682,7 +682,7 @@ static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
  * fc_disc_stop() - Stop discovery for a given lport
  * @lport: The local port that discovery should stop on
  */
-void fc_disc_stop(struct fc_lport *lport)
+static void fc_disc_stop(struct fc_lport *lport)
 {
        struct fc_disc *disc = &lport->disc;
 
@@ -698,7 +698,7 @@ void fc_disc_stop(struct fc_lport *lport)
  * This function will block until discovery has been
  * completely stopped and all rports have been deleted.
  */
-void fc_disc_stop_final(struct fc_lport *lport)
+static void fc_disc_stop_final(struct fc_lport *lport)
 {
        fc_disc_stop(lport);
        lport->tt.rport_flush_queue();
index fb9161dc4ca67411f07413018562286fcaeff017..e17a28d324d04ccdf903043ae4549e04c94ccb85 100644 (file)
@@ -28,6 +28,7 @@
 #include <scsi/fc/fc_els.h>
 #include <scsi/libfc.h>
 #include <scsi/fc_encode.h>
+#include "fc_libfc.h"
 
 /**
  * fc_elsct_send() - Send an ELS or CT frame
index 9de9db27e87401b1f5cf244a15e9320011faa276..4d70d96fa5dc5730016a00a3c1753370ca5e980e 100644 (file)
@@ -91,7 +91,7 @@ struct fc_exch_pool {
  * It manages the allocation of exchange IDs.
  */
 struct fc_exch_mgr {
-       struct fc_exch_pool *pool;
+       struct fc_exch_pool __percpu *pool;
        mempool_t       *ep_pool;
        enum fc_class   class;
        struct kref     kref;
index 221875ec3d7c64de19c4f6404961be441fb16f34..f607314810accf03b38bee7b1f36fde6d4c2da9f 100644 (file)
@@ -155,6 +155,7 @@ static struct fc_fcp_pkt *fc_fcp_pkt_alloc(struct fc_lport *lport, gfp_t gfp)
                fsp->xfer_ddp = FC_XID_UNKNOWN;
                atomic_set(&fsp->ref_cnt, 1);
                init_timer(&fsp->timer);
+               fsp->timer.data = (unsigned long)fsp;
                INIT_LIST_HEAD(&fsp->list);
                spin_lock_init(&fsp->scsi_pkt_lock);
        }
@@ -1850,9 +1851,6 @@ int fc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *sc_cmd)
        }
        put_cpu();
 
-       init_timer(&fsp->timer);
-       fsp->timer.data = (unsigned long)fsp;
-
        /*
         * send it to the lower layer
         * if we get -1 return then put the request in the pending
index e77094a587ed62aa17e1c1479d6a1ba04ad2af06..83750ebb527f5e2841f26a06d70a063658d8603f 100644 (file)
@@ -677,7 +677,8 @@ EXPORT_SYMBOL(fc_set_mfs);
  * @lport: The local port receiving the event
  * @event: The discovery event
  */
-void fc_lport_disc_callback(struct fc_lport *lport, enum fc_disc_event event)
+static void fc_lport_disc_callback(struct fc_lport *lport,
+                                  enum fc_disc_event event)
 {
        switch (event) {
        case DISC_EV_SUCCESS:
@@ -1568,7 +1569,7 @@ EXPORT_SYMBOL(fc_lport_flogi_resp);
  * Locking Note: The lport lock is expected to be held before calling
  * this routine.
  */
-void fc_lport_enter_flogi(struct fc_lport *lport)
+static void fc_lport_enter_flogi(struct fc_lport *lport)
 {
        struct fc_frame *fp;
 
index b9e434844a69bb93fe12e153896973a1c8df2968..83aa1efec875999d5c980ab27dcb93cfc71e734c 100644 (file)
@@ -391,7 +391,7 @@ static void fc_rport_work(struct work_struct *work)
  * If it appears we are already logged in, ADISC is used to verify
  * the setup.
  */
-int fc_rport_login(struct fc_rport_priv *rdata)
+static int fc_rport_login(struct fc_rport_priv *rdata)
 {
        mutex_lock(&rdata->rp_mutex);
 
@@ -451,7 +451,7 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
  * function will hold the rport lock, call an _enter_*
  * function and then unlock the rport.
  */
-int fc_rport_logoff(struct fc_rport_priv *rdata)
+static int fc_rport_logoff(struct fc_rport_priv *rdata)
 {
        mutex_lock(&rdata->rp_mutex);
 
@@ -653,8 +653,8 @@ static int fc_rport_login_complete(struct fc_rport_priv *rdata,
  * @fp:            The FLOGI response frame
  * @rp_arg: The remote port that received the FLOGI response
  */
-void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
-                        void *rp_arg)
+static void fc_rport_flogi_resp(struct fc_seq *sp, struct fc_frame *fp,
+                               void *rp_arg)
 {
        struct fc_rport_priv *rdata = rp_arg;
        struct fc_lport *lport = rdata->local_port;
@@ -1520,7 +1520,7 @@ reject:
  *
  * Locking Note: Called with the lport lock held.
  */
-void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
+static void fc_rport_recv_req(struct fc_lport *lport, struct fc_frame *fp)
 {
        struct fc_seq_els_data els_data;
 
index 4ceeace804533c42b287aa9c54a3f952176dab9f..70eb1f79b1ba2f9fd08568e190bfb9126ece962c 100644 (file)
@@ -565,8 +565,7 @@ static int __devinit esp_mac_probe(struct platform_device *dev)
        esp_chips[dev->id] = esp;
        mb();
        if (esp_chips[!dev->id] == NULL) {
-               err = request_irq(host->irq, mac_scsi_esp_intr, 0,
-                                 "Mac ESP", NULL);
+               err = request_irq(host->irq, mac_scsi_esp_intr, 0, "ESP", NULL);
                if (err < 0) {
                        esp_chips[dev->id] = NULL;
                        goto fail_free_priv;
index ea2bde206f7f951ee66abe3baa2b2abd94b3980e..2bccfbe5661e652bc24736cc572004698a7aecca 100644 (file)
@@ -339,9 +339,6 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance)
 
        printk(KERN_INFO "Macintosh SCSI: resetting the SCSI bus..." );
 
-       /* switch off SCSI IRQ - catch an interrupt without IRQ bit set else */
-       disable_irq(IRQ_MAC_SCSI);
-
        /* get in phase */
        NCR5380_write( TARGET_COMMAND_REG,
                      PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) ));
@@ -357,9 +354,6 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance)
        for( end = jiffies + AFTER_RESET_DELAY; time_before(jiffies, end); )
                barrier();
 
-       /* switch on SCSI IRQ again */
-       enable_irq(IRQ_MAC_SCSI);
-
        printk(KERN_INFO " done\n" );
 }
 #endif
index 5c1776406c963f996e16aac4781fe4540112d250..15eefa1d61fd8dfbd25d696bb519a28570005454 100644 (file)
@@ -306,19 +306,22 @@ mega_query_adapter(adapter_t *adapter)
        adapter->host->sg_tablesize = adapter->sglen;
 
 
-       /* use HP firmware and bios version encoding */
+       /* use HP firmware and bios version encoding
+          Note: fw_version[0|1] and bios_version[0|1] were originally shifted
+          right 8 bits making them zero. This 0 value was hardcoded to fix
+          sparse warnings. */
        if (adapter->product_info.subsysvid == HP_SUBSYS_VID) {
                sprintf (adapter->fw_version, "%c%d%d.%d%d",
                         adapter->product_info.fw_version[2],
-                        adapter->product_info.fw_version[1] >> 8,
+                        0,
                         adapter->product_info.fw_version[1] & 0x0f,
-                        adapter->product_info.fw_version[0] >> 8,
+                        0,
                         adapter->product_info.fw_version[0] & 0x0f);
                sprintf (adapter->bios_version, "%c%d%d.%d%d",
                         adapter->product_info.bios_version[2],
-                        adapter->product_info.bios_version[1] >> 8,
+                        0,
                         adapter->product_info.bios_version[1] & 0x0f,
-                        adapter->product_info.bios_version[0] >> 8,
+                        0,
                         adapter->product_info.bios_version[0] & 0x0f);
        } else {
                memcpy(adapter->fw_version,
index dd94c7d574fb8b8027f574657cbdc43b46fa906f..e5f416f8042d45620cab8ced05fc56738cc8764a 100644 (file)
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION                                "00.00.06.12-rc1"
-#define MEGASAS_RELDATE                                "Oct. 5, 2011"
-#define MEGASAS_EXT_VERSION                    "Wed. Oct. 5 17:00:00 PDT 2011"
+#define MEGASAS_VERSION                                "00.00.06.14-rc1"
+#define MEGASAS_RELDATE                                "Jan. 6, 2012"
+#define MEGASAS_EXT_VERSION                    "Fri. Jan. 6 17:00:00 PDT 2012"
 
 /*
  * Device IDs
@@ -773,7 +773,6 @@ struct megasas_ctrl_info {
 
 #define MFI_OB_INTR_STATUS_MASK                        0x00000002
 #define MFI_POLL_TIMEOUT_SECS                  60
-#define MEGASAS_COMPLETION_TIMER_INTERVAL      (HZ/10)
 
 #define MFI_REPLY_1078_MESSAGE_INTERRUPT       0x80000000
 #define MFI_REPLY_GEN2_MESSAGE_INTERRUPT       0x00000001
@@ -1353,7 +1352,6 @@ struct megasas_instance {
        u32 mfiStatus;
        u32 last_seq_num;
 
-       struct timer_list io_completion_timer;
        struct list_head internal_reset_pending_q;
 
        /* Ptr to hba specific information */
index 29a994f9c4f1e6a9c0973ecd3cebe413f4937c7b..8b300be442849336d296768c69a44e126e24a40d 100644 (file)
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : v00.00.06.12-rc1
+ *  Version : v00.00.06.14-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
 #include "megaraid_sas_fusion.h"
 #include "megaraid_sas.h"
 
-/*
- * poll_mode_io:1- schedule complete completion from q cmd
- */
-static unsigned int poll_mode_io;
-module_param_named(poll_mode_io, poll_mode_io, int, 0);
-MODULE_PARM_DESC(poll_mode_io,
-       "Complete cmds from IO path, (default=0)");
-
 /*
  * Number of sectors per IO command
  * Will be set in megasas_init_mfi if user does not provide
@@ -1439,11 +1431,6 @@ megasas_build_and_issue_cmd(struct megasas_instance *instance,
 
        instance->instancet->fire_cmd(instance, cmd->frame_phys_addr,
                                cmd->frame_count-1, instance->reg_set);
-       /*
-        * Check if we have pend cmds to be completed
-        */
-       if (poll_mode_io && atomic_read(&instance->fw_outstanding))
-               tasklet_schedule(&instance->isr_tasklet);
 
        return 0;
 out_return_cmd:
@@ -3370,47 +3357,6 @@ fail_fw_init:
        return -EINVAL;
 }
 
-/**
- * megasas_start_timer - Initializes a timer object
- * @instance:          Adapter soft state
- * @timer:             timer object to be initialized
- * @fn:                        timer function
- * @interval:          time interval between timer function call
- */
-static inline void
-megasas_start_timer(struct megasas_instance *instance,
-                       struct timer_list *timer,
-                       void *fn, unsigned long interval)
-{
-       init_timer(timer);
-       timer->expires = jiffies + interval;
-       timer->data = (unsigned long)instance;
-       timer->function = fn;
-       add_timer(timer);
-}
-
-/**
- * megasas_io_completion_timer - Timer fn
- * @instance_addr:     Address of adapter soft state
- *
- * Schedules tasklet for cmd completion
- * if poll_mode_io is set
- */
-static void
-megasas_io_completion_timer(unsigned long instance_addr)
-{
-       struct megasas_instance *instance =
-                       (struct megasas_instance *)instance_addr;
-
-       if (atomic_read(&instance->fw_outstanding))
-               tasklet_schedule(&instance->isr_tasklet);
-
-       /* Restart timer */
-       if (poll_mode_io)
-               mod_timer(&instance->io_completion_timer,
-                       jiffies + MEGASAS_COMPLETION_TIMER_INTERVAL);
-}
-
 static u32
 megasas_init_adapter_mfi(struct megasas_instance *instance)
 {
@@ -3638,11 +3584,6 @@ static int megasas_init_fw(struct megasas_instance *instance)
        tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
                (unsigned long)instance);
 
-       /* Initialize the cmd completion timer */
-       if (poll_mode_io)
-               megasas_start_timer(instance, &instance->io_completion_timer,
-                               megasas_io_completion_timer,
-                               MEGASAS_COMPLETION_TIMER_INTERVAL);
        return 0;
 
 fail_init_adapter:
@@ -4369,9 +4310,6 @@ megasas_suspend(struct pci_dev *pdev, pm_message_t state)
        host = instance->host;
        instance->unload = 1;
 
-       if (poll_mode_io)
-               del_timer_sync(&instance->io_completion_timer);
-
        megasas_flush_cache(instance);
        megasas_shutdown_controller(instance, MR_DCMD_HIBERNATE_SHUTDOWN);
 
@@ -4511,12 +4449,6 @@ megasas_resume(struct pci_dev *pdev)
        }
 
        instance->instancet->enable_intr(instance->reg_set);
-
-       /* Initialize the cmd completion timer */
-       if (poll_mode_io)
-               megasas_start_timer(instance, &instance->io_completion_timer,
-                               megasas_io_completion_timer,
-                               MEGASAS_COMPLETION_TIMER_INTERVAL);
        instance->unload = 0;
 
        /*
@@ -4570,9 +4502,6 @@ static void __devexit megasas_detach_one(struct pci_dev *pdev)
        host = instance->host;
        fusion = instance->ctrl_context;
 
-       if (poll_mode_io)
-               del_timer_sync(&instance->io_completion_timer);
-
        scsi_remove_host(instance->host);
        megasas_flush_cache(instance);
        megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -4773,6 +4702,8 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance,
        memcpy(cmd->frame, ioc->frame.raw, 2 * MEGAMFI_FRAME_SIZE);
        cmd->frame->hdr.context = cmd->index;
        cmd->frame->hdr.pad_0 = 0;
+       cmd->frame->hdr.flags &= ~(MFI_FRAME_IEEE | MFI_FRAME_SGL64 |
+                                  MFI_FRAME_SENSE64);
 
        /*
         * The management interface between applications and the fw uses
@@ -5219,60 +5150,6 @@ megasas_sysfs_set_dbg_lvl(struct device_driver *dd, const char *buf, size_t coun
 static DRIVER_ATTR(dbg_lvl, S_IRUGO|S_IWUSR, megasas_sysfs_show_dbg_lvl,
                megasas_sysfs_set_dbg_lvl);
 
-static ssize_t
-megasas_sysfs_show_poll_mode_io(struct device_driver *dd, char *buf)
-{
-       return sprintf(buf, "%u\n", poll_mode_io);
-}
-
-static ssize_t
-megasas_sysfs_set_poll_mode_io(struct device_driver *dd,
-                               const char *buf, size_t count)
-{
-       int retval = count;
-       int tmp = poll_mode_io;
-       int i;
-       struct megasas_instance *instance;
-
-       if (sscanf(buf, "%u", &poll_mode_io) < 1) {
-               printk(KERN_ERR "megasas: could not set poll_mode_io\n");
-               retval = -EINVAL;
-       }
-
-       /*
-        * Check if poll_mode_io is already set or is same as previous value
-        */
-       if ((tmp && poll_mode_io) || (tmp == poll_mode_io))
-               goto out;
-
-       if (poll_mode_io) {
-               /*
-                * Start timers for all adapters
-                */
-               for (i = 0; i < megasas_mgmt_info.max_index; i++) {
-                       instance = megasas_mgmt_info.instance[i];
-                       if (instance) {
-                               megasas_start_timer(instance,
-                                       &instance->io_completion_timer,
-                                       megasas_io_completion_timer,
-                                       MEGASAS_COMPLETION_TIMER_INTERVAL);
-                       }
-               }
-       } else {
-               /*
-                * Delete timers for all adapters
-                */
-               for (i = 0; i < megasas_mgmt_info.max_index; i++) {
-                       instance = megasas_mgmt_info.instance[i];
-                       if (instance)
-                               del_timer_sync(&instance->io_completion_timer);
-               }
-       }
-
-out:
-       return retval;
-}
-
 static void
 megasas_aen_polling(struct work_struct *work)
 {
@@ -5502,11 +5379,6 @@ megasas_aen_polling(struct work_struct *work)
        kfree(ev);
 }
 
-
-static DRIVER_ATTR(poll_mode_io, S_IRUGO|S_IWUSR,
-               megasas_sysfs_show_poll_mode_io,
-               megasas_sysfs_set_poll_mode_io);
-
 /**
  * megasas_init - Driver load entry point
  */
@@ -5565,11 +5437,6 @@ static int __init megasas_init(void)
                                  &driver_attr_dbg_lvl);
        if (rval)
                goto err_dcf_dbg_lvl;
-       rval = driver_create_file(&megasas_pci_driver.driver,
-                                 &driver_attr_poll_mode_io);
-       if (rval)
-               goto err_dcf_poll_mode_io;
-
        rval = driver_create_file(&megasas_pci_driver.driver,
                                &driver_attr_support_device_change);
        if (rval)
@@ -5578,10 +5445,6 @@ static int __init megasas_init(void)
        return rval;
 
 err_dcf_support_device_change:
-       driver_remove_file(&megasas_pci_driver.driver,
-                 &driver_attr_poll_mode_io);
-
-err_dcf_poll_mode_io:
        driver_remove_file(&megasas_pci_driver.driver,
                           &driver_attr_dbg_lvl);
 err_dcf_dbg_lvl:
@@ -5606,8 +5469,6 @@ err_pcidrv:
  */
 static void __exit megasas_exit(void)
 {
-       driver_remove_file(&megasas_pci_driver.driver,
-                          &driver_attr_poll_mode_io);
        driver_remove_file(&megasas_pci_driver.driver,
                           &driver_attr_dbg_lvl);
        driver_remove_file(&megasas_pci_driver.driver,
index 5255dd688aca49b807f430ffa26dd80a41fcf42e..294abb0defa66e4b34bb8eacf9d91d534bfe144f 100644 (file)
@@ -282,7 +282,9 @@ u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
        else {
                *pDevHandle = MR_PD_INVALID; /* set dev handle as invalid. */
                if ((raid->level >= 5) &&
-                   (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER))
+                   ((instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) ||
+                    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER &&
+                     raid->regTypeReqOnRead != REGION_TYPE_UNUSED)))
                        pRAID_Context->regLockFlags = REGION_TYPE_EXCLUSIVE;
                else if (raid->level == 1) {
                        /* Get alternate Pd. */
index 22a3ff02e48a419d0e9353d72e53f8d13a3e41f0..bfe68545203ff9123a58f1f9ae896b3b59293811 100644 (file)
 #define QL4_SESS_RECOVERY_TMO          120     /* iSCSI session */
                                                /* recovery timeout */
 
+#define MSB(x) ((uint8_t)((uint16_t)(x) >> 8))
+#define LSW(x) ((uint16_t)(x))
 #define LSDW(x) ((u32)((u64)(x)))
 #define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
 
@@ -671,6 +673,7 @@ struct scsi_qla_host {
        uint16_t pri_ddb_idx;
        uint16_t sec_ddb_idx;
        int is_reset;
+       uint16_t temperature;
 };
 
 struct ql4_task_data {
index 1bdfa8120ac888c65c304c28dc3f3aba806ea403..90614f38b55d54541bba90d114d37e1ae4c553d7 100644 (file)
@@ -697,6 +697,9 @@ int qla4xxx_start_firmware(struct scsi_qla_host *ha)
                        writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
                               &ha->reg->ctrl_status);
                        readl(&ha->reg->ctrl_status);
+                       writel(set_rmask(CSR_SCSI_COMPLETION_INTR),
+                              &ha->reg->ctrl_status);
+                       readl(&ha->reg->ctrl_status);
                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
                        if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) {
                                DEBUG2(printk("scsi%ld: %s: Get firmware "
index c2593782fbbef8c203148b1661c92a3e1dbe35f6..e1e66a45e4d06f59ab7e35a4b46999d3bba90106 100644 (file)
@@ -219,6 +219,13 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
                ha->mailbox_timeout_count++;
                mbx_sts[0] = (-1);
                set_bit(DPC_RESET_HA, &ha->dpc_flags);
+               if (is_qla8022(ha)) {
+                       ql4_printk(KERN_INFO, ha,
+                                  "disabling pause transmit on port 0 & 1.\n");
+                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                                       CRB_NIU_XG_PAUSE_CTL_P0 |
+                                       CRB_NIU_XG_PAUSE_CTL_P1);
+               }
                goto mbox_exit;
        }
 
index 8d6bc1b2ff17266a45d66dfb923f5c7757cf1493..78f1111158d75379d4d1c1ef04a139889c280d3d 100644 (file)
@@ -1875,6 +1875,11 @@ exit:
 int qla4_8xxx_load_risc(struct scsi_qla_host *ha)
 {
        int retval;
+
+       /* clear the interrupt */
+       writel(0, &ha->qla4_8xxx_reg->host_int);
+       readl(&ha->qla4_8xxx_reg->host_int);
+
        retval = qla4_8xxx_device_state_handler(ha);
 
        if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
index 35376a1c3f1bc109fb8bea9deb54cec9b7acf0c8..dc45ac92369150eea7821fac2bacc24c8fe11311 100644 (file)
 #define PHAN_PEG_RCV_INITIALIZED       0xff01
 
 /*CRB_RELATED*/
-#define QLA82XX_CRB_BASE       QLA82XX_CAM_RAM(0x200)
-#define QLA82XX_REG(X)         (QLA82XX_CRB_BASE+(X))
-
+#define QLA82XX_CRB_BASE               (QLA82XX_CAM_RAM(0x200))
+#define QLA82XX_REG(X)                 (QLA82XX_CRB_BASE+(X))
 #define CRB_CMDPEG_STATE               QLA82XX_REG(0x50)
 #define CRB_RCVPEG_STATE               QLA82XX_REG(0x13c)
 #define CRB_DMA_SHIFT                  QLA82XX_REG(0xcc)
+#define CRB_TEMP_STATE                 QLA82XX_REG(0x1b4)
+
+#define qla82xx_get_temp_val(x)                ((x) >> 16)
+#define qla82xx_get_temp_state(x)      ((x) & 0xffff)
+#define qla82xx_encode_temp(val, state)        (((val) << 16) | (state))
+
+/*
+ * Temperature control.
+ */
+enum {
+       QLA82XX_TEMP_NORMAL = 0x1,      /* Normal operating range */
+       QLA82XX_TEMP_WARN,      /* Sound alert, temperature getting high */
+       QLA82XX_TEMP_PANIC      /* Fatal error, hardware has shut down. */
+};
+
+#define CRB_NIU_XG_PAUSE_CTL_P0                0x1
+#define CRB_NIU_XG_PAUSE_CTL_P1                0x8
 
 #define QLA82XX_HW_H0_CH_HUB_ADR       0x05
 #define QLA82XX_HW_H1_CH_HUB_ADR       0x0E
index ec393a00c03816f52515fc7e294187a2527e2bf0..ce6d3b7f0c616a3adc14677c27996e99711d4f75 100644 (file)
@@ -35,43 +35,44 @@ static struct kmem_cache *srb_cachep;
 int ql4xdisablesysfsboot = 1;
 module_param(ql4xdisablesysfsboot, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdisablesysfsboot,
-               "Set to disable exporting boot targets to sysfs\n"
-               " 0 - Export boot targets\n"
-               " 1 - Do not export boot targets (Default)");
+                " Set to disable exporting boot targets to sysfs.\n"
+                "\t\t  0 - Export boot targets\n"
+                "\t\t  1 - Do not export boot targets (Default)");
 
 int ql4xdontresethba = 0;
 module_param(ql4xdontresethba, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xdontresethba,
-               "Don't reset the HBA for driver recovery \n"
-               " 0 - It will reset HBA (Default)\n"
-               " 1 - It will NOT reset HBA");
+                " Don't reset the HBA for driver recovery.\n"
+                "\t\t  0 - It will reset HBA (Default)\n"
+                "\t\t  1 - It will NOT reset HBA");
 
-int ql4xextended_error_logging = 0; /* 0 = off, 1 = log errors */
+int ql4xextended_error_logging;
 module_param(ql4xextended_error_logging, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xextended_error_logging,
-                "Option to enable extended error logging, "
-                "Default is 0 - no logging, 1 - debug logging");
+                " Option to enable extended error logging.\n"
+                "\t\t  0 - no logging (Default)\n"
+                "\t\t  2 - debug logging");
 
 int ql4xenablemsix = 1;
 module_param(ql4xenablemsix, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql4xenablemsix,
-               "Set to enable MSI or MSI-X interrupt mechanism.\n"
-               " 0 = enable INTx interrupt mechanism.\n"
-               " 1 = enable MSI-X interrupt mechanism (Default).\n"
-               " 2 = enable MSI interrupt mechanism.");
+                " Set to enable MSI or MSI-X interrupt mechanism.\n"
+                "\t\t  0 = enable INTx interrupt mechanism.\n"
+                "\t\t  1 = enable MSI-X interrupt mechanism (Default).\n"
+                "\t\t  2 = enable MSI interrupt mechanism.");
 
 #define QL4_DEF_QDEPTH 32
 static int ql4xmaxqdepth = QL4_DEF_QDEPTH;
 module_param(ql4xmaxqdepth, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(ql4xmaxqdepth,
-               "Maximum queue depth to report for target devices.\n"
-               " Default: 32.");
+                " Maximum queue depth to report for target devices.\n"
+                "\t\t  Default: 32.");
 
 static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
 module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
 MODULE_PARM_DESC(ql4xsess_recovery_tmo,
                "Target Session Recovery Timeout.\n"
-               " Default: 120 sec.");
+               "\t\t  Default: 120 sec.");
 
 static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
 /*
@@ -1630,7 +1631,9 @@ void qla4xxx_update_session_conn_param(struct scsi_qla_host *ha,
 
        /* Update timers after login */
        ddb_entry->default_relogin_timeout =
-                               le16_to_cpu(fw_ddb_entry->def_timeout);
+               (le16_to_cpu(fw_ddb_entry->def_timeout) > LOGIN_TOV) &&
+                (le16_to_cpu(fw_ddb_entry->def_timeout) < LOGIN_TOV * 10) ?
+                le16_to_cpu(fw_ddb_entry->def_timeout) : LOGIN_TOV;
        ddb_entry->default_time2wait =
                                le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
 
@@ -1969,6 +1972,42 @@ mem_alloc_error_exit:
        return QLA_ERROR;
 }
 
+/**
+ * qla4_8xxx_check_temp - Check the ISP82XX temperature.
+ * @ha: adapter block pointer.
+ *
+ * Note: The caller should not hold the idc lock.
+ **/
+static int qla4_8xxx_check_temp(struct scsi_qla_host *ha)
+{
+       uint32_t temp, temp_state, temp_val;
+       int status = QLA_SUCCESS;
+
+       temp = qla4_8xxx_rd_32(ha, CRB_TEMP_STATE);
+
+       temp_state = qla82xx_get_temp_state(temp);
+       temp_val = qla82xx_get_temp_val(temp);
+
+       if (temp_state == QLA82XX_TEMP_PANIC) {
+               ql4_printk(KERN_WARNING, ha, "Device temperature %d degrees C"
+                          " exceeds maximum allowed. Hardware has been shut"
+                          " down.\n", temp_val);
+               status = QLA_ERROR;
+       } else if (temp_state == QLA82XX_TEMP_WARN) {
+               if (ha->temperature == QLA82XX_TEMP_NORMAL)
+                       ql4_printk(KERN_WARNING, ha, "Device temperature %d"
+                                  " degrees C exceeds operating range."
+                                  " Immediate action needed.\n", temp_val);
+       } else {
+               if (ha->temperature == QLA82XX_TEMP_WARN)
+                       ql4_printk(KERN_INFO, ha, "Device temperature is"
+                                  " now %d degrees C in normal range.\n",
+                                  temp_val);
+       }
+       ha->temperature = temp_state;
+       return status;
+}
+
 /**
  * qla4_8xxx_check_fw_alive  - Check firmware health
  * @ha: Pointer to host adapter structure.
@@ -2040,7 +2079,16 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
            test_bit(DPC_RESET_HA, &ha->dpc_flags) ||
            test_bit(DPC_RETRY_RESET_HA, &ha->dpc_flags))) {
                dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
-               if (dev_state == QLA82XX_DEV_NEED_RESET &&
+
+               if (qla4_8xxx_check_temp(ha)) {
+                       ql4_printk(KERN_INFO, ha, "disabling pause"
+                                  " transmit on port 0 & 1.\n");
+                       qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                                       CRB_NIU_XG_PAUSE_CTL_P0 |
+                                       CRB_NIU_XG_PAUSE_CTL_P1);
+                       set_bit(DPC_HA_UNRECOVERABLE, &ha->dpc_flags);
+                       qla4xxx_wake_dpc(ha);
+               } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
                    !test_bit(DPC_RESET_HA, &ha->dpc_flags)) {
                        if (!ql4xdontresethba) {
                                ql4_printk(KERN_INFO, ha, "%s: HW State: "
@@ -2057,9 +2105,21 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
                } else  {
                        /* Check firmware health */
                        if (qla4_8xxx_check_fw_alive(ha)) {
+                               ql4_printk(KERN_INFO, ha, "disabling pause"
+                                          " transmit on port 0 & 1.\n");
+                               qla4_8xxx_wr_32(ha, QLA82XX_CRB_NIU + 0x98,
+                                               CRB_NIU_XG_PAUSE_CTL_P0 |
+                                               CRB_NIU_XG_PAUSE_CTL_P1);
                                halt_status = qla4_8xxx_rd_32(ha,
                                                QLA82XX_PEG_HALT_STATUS1);
 
+                               if (LSW(MSB(halt_status)) == 0x67)
+                                       ql4_printk(KERN_ERR, ha, "%s:"
+                                                  " Firmware aborted with"
+                                                  " error code 0x00006700."
+                                                  " Device is being reset\n",
+                                                  __func__);
+
                                /* Since we cannot change dev_state in interrupt
                                 * context, set appropriate DPC flag then wakeup
                                 * DPC */
@@ -2078,7 +2138,7 @@ void qla4_8xxx_watchdog(struct scsi_qla_host *ha)
        }
 }
 
-void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
+static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
 {
        struct iscsi_session *sess;
        struct ddb_entry *ddb_entry;
@@ -3826,16 +3886,14 @@ exit_check:
        return ret;
 }
 
-static void qla4xxx_free_nt_list(struct list_head *list_nt)
+static void qla4xxx_free_ddb_list(struct list_head *list_ddb)
 {
-       struct qla_ddb_index  *nt_ddb_idx, *nt_ddb_idx_tmp;
+       struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
 
-       /* Free up the normaltargets list */
-       list_for_each_entry_safe(nt_ddb_idx, nt_ddb_idx_tmp, list_nt, list) {
-               list_del_init(&nt_ddb_idx->list);
-               vfree(nt_ddb_idx);
+       list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+               list_del_init(&ddb_idx->list);
+               vfree(ddb_idx);
        }
-
 }
 
 static struct iscsi_endpoint *qla4xxx_get_ep_fwdb(struct scsi_qla_host *ha,
@@ -3884,6 +3942,8 @@ static int qla4xxx_verify_boot_idx(struct scsi_qla_host *ha, uint16_t idx)
 static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
                                          struct ddb_entry *ddb_entry)
 {
+       uint16_t def_timeout;
+
        ddb_entry->ddb_type = FLASH_DDB;
        ddb_entry->fw_ddb_index = INVALID_ENTRY;
        ddb_entry->fw_ddb_device_state = DDB_DS_NO_CONNECTION_ACTIVE;
@@ -3894,9 +3954,10 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
        atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
        atomic_set(&ddb_entry->relogin_timer, 0);
        atomic_set(&ddb_entry->relogin_retry_count, 0);
-
+       def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
        ddb_entry->default_relogin_timeout =
-               le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
+               (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
+               def_timeout : LOGIN_TOV;
        ddb_entry->default_time2wait =
                le16_to_cpu(ddb_entry->fw_ddb_entry.iscsi_def_time2wait);
 }
@@ -3934,7 +3995,6 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
                            ip_state == IP_ADDRSTATE_DEPRICATED ||
                            ip_state == IP_ADDRSTATE_DISABLING)
                                ip_idx[idx] = -1;
-
                }
 
                /* Break if all IP states checked */
@@ -3947,58 +4007,37 @@ static void qla4xxx_wait_for_ip_configuration(struct scsi_qla_host *ha)
        } while (time_after(wtime, jiffies));
 }
 
-void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+static void qla4xxx_build_st_list(struct scsi_qla_host *ha,
+                                 struct list_head *list_st)
 {
+       struct qla_ddb_index  *st_ddb_idx;
        int max_ddbs;
+       int fw_idx_size;
+       struct dev_db_entry *fw_ddb_entry;
+       dma_addr_t fw_ddb_dma;
        int ret;
        uint32_t idx = 0, next_idx = 0;
        uint32_t state = 0, conn_err = 0;
-       uint16_t conn_id;
-       struct dev_db_entry *fw_ddb_entry;
-       struct ddb_entry *ddb_entry = NULL;
-       dma_addr_t fw_ddb_dma;
-       struct iscsi_cls_session *cls_sess;
-       struct iscsi_session *sess;
-       struct iscsi_cls_conn *cls_conn;
-       struct iscsi_endpoint *ep;
-       uint16_t cmds_max = 32, tmo = 0;
-       uint32_t initial_cmdsn = 0;
-       struct list_head list_st, list_nt; /* List of sendtargets */
-       struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
-       int fw_idx_size;
-       unsigned long wtime;
-       struct qla_ddb_index  *nt_ddb_idx;
-
-       if (!test_bit(AF_LINK_UP, &ha->flags)) {
-               set_bit(AF_BUILD_DDB_LIST, &ha->flags);
-               ha->is_reset = is_reset;
-               return;
-       }
-       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
-                                    MAX_DEV_DB_ENTRIES;
+       uint16_t conn_id = 0;
 
        fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
                                      &fw_ddb_dma);
        if (fw_ddb_entry == NULL) {
                DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
-               goto exit_ddb_list;
+               goto exit_st_list;
        }
 
-       INIT_LIST_HEAD(&list_st);
-       INIT_LIST_HEAD(&list_nt);
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
        fw_idx_size = sizeof(struct qla_ddb_index);
 
        for (idx = 0; idx < max_ddbs; idx = next_idx) {
-               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
-                                             fw_ddb_dma, NULL,
-                                             &next_idx, &state, &conn_err,
-                                             NULL, &conn_id);
+               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+                                             NULL, &next_idx, &state,
+                                             &conn_err, NULL, &conn_id);
                if (ret == QLA_ERROR)
                        break;
 
-               if (qla4xxx_verify_boot_idx(ha, idx) != QLA_SUCCESS)
-                       goto continue_next_st;
-
                /* Check if ST, add to the list_st */
                if (strlen((char *) fw_ddb_entry->iscsi_name) != 0)
                        goto continue_next_st;
@@ -4009,59 +4048,155 @@ void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
 
                st_ddb_idx->fw_ddb_idx = idx;
 
-               list_add_tail(&st_ddb_idx->list, &list_st);
+               list_add_tail(&st_ddb_idx->list, list_st);
 continue_next_st:
                if (next_idx == 0)
                        break;
        }
 
-       /* Before issuing conn open mbox, ensure all IPs states are configured
-        * Note, conn open fails if IPs are not configured
+exit_st_list:
+       if (fw_ddb_entry)
+               dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_remove_failed_ddb - Remove inactive or failed ddb from list
+ * @ha: pointer to adapter structure
+ * @list_ddb: List from which failed ddb to be removed
+ *
+ * Iterate over the list of DDBs and find and remove DDBs that are either in
+ * no connection active state or failed state
+ **/
+static void qla4xxx_remove_failed_ddb(struct scsi_qla_host *ha,
+                                     struct list_head *list_ddb)
+{
+       struct qla_ddb_index  *ddb_idx, *ddb_idx_tmp;
+       uint32_t next_idx = 0;
+       uint32_t state = 0, conn_err = 0;
+       int ret;
+
+       list_for_each_entry_safe(ddb_idx, ddb_idx_tmp, list_ddb, list) {
+               ret = qla4xxx_get_fwddb_entry(ha, ddb_idx->fw_ddb_idx,
+                                             NULL, 0, NULL, &next_idx, &state,
+                                             &conn_err, NULL, NULL);
+               if (ret == QLA_ERROR)
+                       continue;
+
+               if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
+                   state == DDB_DS_SESSION_FAILED) {
+                       list_del_init(&ddb_idx->list);
+                       vfree(ddb_idx);
+               }
+       }
+}
+
+static int qla4xxx_sess_conn_setup(struct scsi_qla_host *ha,
+                                  struct dev_db_entry *fw_ddb_entry,
+                                  int is_reset)
+{
+       struct iscsi_cls_session *cls_sess;
+       struct iscsi_session *sess;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_endpoint *ep;
+       uint16_t cmds_max = 32;
+       uint16_t conn_id = 0;
+       uint32_t initial_cmdsn = 0;
+       int ret = QLA_SUCCESS;
+
+       struct ddb_entry *ddb_entry = NULL;
+
+       /* Create session object, with INVALID_ENTRY,
+        * the targer_id would get set when we issue the login
         */
-       qla4xxx_wait_for_ip_configuration(ha);
+       cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport, ha->host,
+                                      cmds_max, sizeof(struct ddb_entry),
+                                      sizeof(struct ql4_task_data),
+                                      initial_cmdsn, INVALID_ENTRY);
+       if (!cls_sess) {
+               ret = QLA_ERROR;
+               goto exit_setup;
+       }
 
-       /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
-       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
-               qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+       /*
+        * so calling module_put function to decrement the
+        * reference count.
+        **/
+       module_put(qla4xxx_iscsi_transport.owner);
+       sess = cls_sess->dd_data;
+       ddb_entry = sess->dd_data;
+       ddb_entry->sess = cls_sess;
+
+       cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
+       memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+              sizeof(struct dev_db_entry));
+
+       qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
+
+       cls_conn = iscsi_conn_setup(cls_sess, sizeof(struct qla_conn), conn_id);
+
+       if (!cls_conn) {
+               ret = QLA_ERROR;
+               goto exit_setup;
        }
 
-       /* Wait to ensure all sendtargets are done for min 12 sec wait */
-       tmo = ((ha->def_timeout < LOGIN_TOV) ? LOGIN_TOV : ha->def_timeout);
-       DEBUG2(ql4_printk(KERN_INFO, ha,
-                         "Default time to wait for build ddb %d\n", tmo));
+       ddb_entry->conn = cls_conn;
 
-       wtime = jiffies + (HZ * tmo);
-       do {
-               list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st,
-                                        list) {
-                       ret = qla4xxx_get_fwddb_entry(ha,
-                                                     st_ddb_idx->fw_ddb_idx,
-                                                     NULL, 0, NULL, &next_idx,
-                                                     &state, &conn_err, NULL,
-                                                     NULL);
-                       if (ret == QLA_ERROR)
-                               continue;
+       /* Setup ep, for displaying attributes in sysfs */
+       ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
+       if (ep) {
+               ep->conn = cls_conn;
+               cls_conn->ep = ep;
+       } else {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Unable to get ep\n"));
+               ret = QLA_ERROR;
+               goto exit_setup;
+       }
 
-                       if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
-                           state == DDB_DS_SESSION_FAILED) {
-                               list_del_init(&st_ddb_idx->list);
-                               vfree(st_ddb_idx);
-                       }
-               }
-               schedule_timeout_uninterruptible(HZ / 10);
-       } while (time_after(wtime, jiffies));
+       /* Update sess/conn params */
+       qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess, cls_conn);
 
-       /* Free up the sendtargets list */
-       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
-               list_del_init(&st_ddb_idx->list);
-               vfree(st_ddb_idx);
+       if (is_reset == RESET_ADAPTER) {
+               iscsi_block_session(cls_sess);
+               /* Use the relogin path to discover new devices
+                *  by short-circuting the logic of setting
+                *  timer to relogin - instead set the flags
+                *  to initiate login right away.
+                */
+               set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
+               set_bit(DF_RELOGIN, &ddb_entry->flags);
        }
 
+exit_setup:
+       return ret;
+}
+
+static void qla4xxx_build_nt_list(struct scsi_qla_host *ha,
+                                 struct list_head *list_nt, int is_reset)
+{
+       struct dev_db_entry *fw_ddb_entry;
+       dma_addr_t fw_ddb_dma;
+       int max_ddbs;
+       int fw_idx_size;
+       int ret;
+       uint32_t idx = 0, next_idx = 0;
+       uint32_t state = 0, conn_err = 0;
+       uint16_t conn_id = 0;
+       struct qla_ddb_index  *nt_ddb_idx;
+
+       fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL,
+                                     &fw_ddb_dma);
+       if (fw_ddb_entry == NULL) {
+               DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n"));
+               goto exit_nt_list;
+       }
+       max_ddbs =  is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX :
+                                    MAX_DEV_DB_ENTRIES;
+       fw_idx_size = sizeof(struct qla_ddb_index);
+
        for (idx = 0; idx < max_ddbs; idx = next_idx) {
-               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry,
-                                             fw_ddb_dma, NULL,
-                                             &next_idx, &state, &conn_err,
-                                             NULL, &conn_id);
+               ret = qla4xxx_get_fwddb_entry(ha, idx, fw_ddb_entry, fw_ddb_dma,
+                                             NULL, &next_idx, &state,
+                                             &conn_err, NULL, &conn_id);
                if (ret == QLA_ERROR)
                        break;
 
@@ -4072,107 +4207,113 @@ continue_next_st:
                if (strlen((char *) fw_ddb_entry->iscsi_name) == 0)
                        goto continue_next_nt;
 
-               if (state == DDB_DS_NO_CONNECTION_ACTIVE ||
-                   state == DDB_DS_SESSION_FAILED) {
-                       DEBUG2(ql4_printk(KERN_INFO, ha,
-                                         "Adding  DDB to session = 0x%x\n",
-                                         idx));
-                       if (is_reset == INIT_ADAPTER) {
-                               nt_ddb_idx = vmalloc(fw_idx_size);
-                               if (!nt_ddb_idx)
-                                       break;
-
-                               nt_ddb_idx->fw_ddb_idx = idx;
-
-                               memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
-                                      sizeof(struct dev_db_entry));
-
-                               if (qla4xxx_is_flash_ddb_exists(ha, &list_nt,
-                                               fw_ddb_entry) == QLA_SUCCESS) {
-                                       vfree(nt_ddb_idx);
-                                       goto continue_next_nt;
-                               }
-                               list_add_tail(&nt_ddb_idx->list, &list_nt);
-                       } else if (is_reset == RESET_ADAPTER) {
-                               if (qla4xxx_is_session_exists(ha,
-                                                  fw_ddb_entry) == QLA_SUCCESS)
-                                       goto continue_next_nt;
-                       }
+               if (!(state == DDB_DS_NO_CONNECTION_ACTIVE ||
+                   state == DDB_DS_SESSION_FAILED))
+                       goto continue_next_nt;
 
-                       /* Create session object, with INVALID_ENTRY,
-                        * the targer_id would get set when we issue the login
-                        */
-                       cls_sess = iscsi_session_setup(&qla4xxx_iscsi_transport,
-                                               ha->host, cmds_max,
-                                               sizeof(struct ddb_entry),
-                                               sizeof(struct ql4_task_data),
-                                               initial_cmdsn, INVALID_ENTRY);
-                       if (!cls_sess)
-                               goto exit_ddb_list;
+               DEBUG2(ql4_printk(KERN_INFO, ha,
+                                 "Adding  DDB to session = 0x%x\n", idx));
+               if (is_reset == INIT_ADAPTER) {
+                       nt_ddb_idx = vmalloc(fw_idx_size);
+                       if (!nt_ddb_idx)
+                               break;
 
-                       /*
-                        * iscsi_session_setup increments the driver reference
-                        * count which wouldn't let the driver to be unloaded.
-                        * so calling module_put function to decrement the
-                        * reference count.
-                        **/
-                       module_put(qla4xxx_iscsi_transport.owner);
-                       sess = cls_sess->dd_data;
-                       ddb_entry = sess->dd_data;
-                       ddb_entry->sess = cls_sess;
+                       nt_ddb_idx->fw_ddb_idx = idx;
 
-                       cls_sess->recovery_tmo = ql4xsess_recovery_tmo;
-                       memcpy(&ddb_entry->fw_ddb_entry, fw_ddb_entry,
+                       memcpy(&nt_ddb_idx->fw_ddb, fw_ddb_entry,
                               sizeof(struct dev_db_entry));
 
-                       qla4xxx_setup_flash_ddb_entry(ha, ddb_entry);
-
-                       cls_conn = iscsi_conn_setup(cls_sess,
-                                                   sizeof(struct qla_conn),
-                                                   conn_id);
-                       if (!cls_conn)
-                               goto exit_ddb_list;
-
-                       ddb_entry->conn = cls_conn;
-
-                       /* Setup ep, for displaying attributes in sysfs */
-                       ep = qla4xxx_get_ep_fwdb(ha, fw_ddb_entry);
-                       if (ep) {
-                               ep->conn = cls_conn;
-                               cls_conn->ep = ep;
-                       } else {
-                               DEBUG2(ql4_printk(KERN_ERR, ha,
-                                                 "Unable to get ep\n"));
-                       }
-
-                       /* Update sess/conn params */
-                       qla4xxx_copy_fwddb_param(ha, fw_ddb_entry, cls_sess,
-                                                cls_conn);
-
-                       if (is_reset == RESET_ADAPTER) {
-                               iscsi_block_session(cls_sess);
-                               /* Use the relogin path to discover new devices
-                                *  by short-circuting the logic of setting
-                                *  timer to relogin - instead set the flags
-                                *  to initiate login right away.
-                                */
-                               set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
-                               set_bit(DF_RELOGIN, &ddb_entry->flags);
+                       if (qla4xxx_is_flash_ddb_exists(ha, list_nt,
+                                       fw_ddb_entry) == QLA_SUCCESS) {
+                               vfree(nt_ddb_idx);
+                               goto continue_next_nt;
                        }
+                       list_add_tail(&nt_ddb_idx->list, list_nt);
+               } else if (is_reset == RESET_ADAPTER) {
+                       if (qla4xxx_is_session_exists(ha, fw_ddb_entry) ==
+                                                               QLA_SUCCESS)
+                               goto continue_next_nt;
                }
+
+               ret = qla4xxx_sess_conn_setup(ha, fw_ddb_entry, is_reset);
+               if (ret == QLA_ERROR)
+                       goto exit_nt_list;
+
 continue_next_nt:
                if (next_idx == 0)
                        break;
        }
-exit_ddb_list:
-       qla4xxx_free_nt_list(&list_nt);
+
+exit_nt_list:
        if (fw_ddb_entry)
                dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma);
+}
+
+/**
+ * qla4xxx_build_ddb_list - Build ddb list and setup sessions
+ * @ha: pointer to adapter structure
+ * @is_reset: Is this init path or reset path
+ *
+ * Create a list of sendtargets (st) from firmware DDBs, issue send targets
+ * using connection open, then create the list of normal targets (nt)
+ * from firmware DDBs. Based on the list of nt setup session and connection
+ * objects.
+ **/
+void qla4xxx_build_ddb_list(struct scsi_qla_host *ha, int is_reset)
+{
+       uint16_t tmo = 0;
+       struct list_head list_st, list_nt;
+       struct qla_ddb_index  *st_ddb_idx, *st_ddb_idx_tmp;
+       unsigned long wtime;
+
+       if (!test_bit(AF_LINK_UP, &ha->flags)) {
+               set_bit(AF_BUILD_DDB_LIST, &ha->flags);
+               ha->is_reset = is_reset;
+               return;
+       }
+
+       INIT_LIST_HEAD(&list_st);
+       INIT_LIST_HEAD(&list_nt);
+
+       qla4xxx_build_st_list(ha, &list_st);
+
+       /* Before issuing conn open mbox, ensure all IPs states are configured
+        * Note, conn open fails if IPs are not configured
+        */
+       qla4xxx_wait_for_ip_configuration(ha);
+
+       /* Go thru the STs and fire the sendtargets by issuing conn open mbx */
+       list_for_each_entry_safe(st_ddb_idx, st_ddb_idx_tmp, &list_st, list) {
+               qla4xxx_conn_open(ha, st_ddb_idx->fw_ddb_idx);
+       }
+
+       /* Wait to ensure all sendtargets are done for min 12 sec wait */
+       tmo = ((ha->def_timeout > LOGIN_TOV) &&
+              (ha->def_timeout < LOGIN_TOV * 10) ?
+              ha->def_timeout : LOGIN_TOV);
+
+       DEBUG2(ql4_printk(KERN_INFO, ha,
+                         "Default time to wait for build ddb %d\n", tmo));
+
+       wtime = jiffies + (HZ * tmo);
+       do {
+               if (list_empty(&list_st))
+                       break;
+
+               qla4xxx_remove_failed_ddb(ha, &list_st);
+               schedule_timeout_uninterruptible(HZ / 10);
+       } while (time_after(wtime, jiffies));
+
+       /* Free up the sendtargets list */
+       qla4xxx_free_ddb_list(&list_st);
+
+       qla4xxx_build_nt_list(ha, &list_nt, is_reset);
+
+       qla4xxx_free_ddb_list(&list_nt);
 
        qla4xxx_free_ddb_index(ha);
 }
 
-
 /**
  * qla4xxx_probe_adapter - callback function to probe HBA
  * @pdev: pointer to pci_dev structure
index 26a3fa34a33c0594c3dfd4fa78f73c75db0fb28c..133989b3a9f4da0d7deed83682c6924c3a654330 100644 (file)
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION "5.02.00-k10"
+#define QLA4XXX_DRIVER_VERSION "5.02.00-k12"
index f85cfa6c47b5212982bcbfcd9065936c27360f3d..b2c95dbe9d651d3a82b9e7a269ec3b5bd7e7c167 100644 (file)
@@ -1316,15 +1316,10 @@ static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
        }
 
        if (scsi_target_is_busy(starget)) {
-               if (list_empty(&sdev->starved_entry))
-                       list_add_tail(&sdev->starved_entry,
-                                     &shost->starved_list);
+               list_move_tail(&sdev->starved_entry, &shost->starved_list);
                return 0;
        }
 
-       /* We're OK to process the command, so we can't be starved */
-       if (!list_empty(&sdev->starved_entry))
-               list_del_init(&sdev->starved_entry);
        return 1;
 }
 
index 1b214910b71414804f6d0c6dee0932ead1f98156..f59d4a05ecd74168910790d2051673508d3a40e4 100644 (file)
@@ -3048,7 +3048,8 @@ fc_remote_port_rolechg(struct fc_rport  *rport, u32 roles)
 
                spin_lock_irqsave(shost->host_lock, flags);
                rport->flags &= ~(FC_RPORT_FAST_FAIL_TIMEDOUT |
-                                 FC_RPORT_DEVLOSS_PENDING);
+                                 FC_RPORT_DEVLOSS_PENDING |
+                                 FC_RPORT_DEVLOSS_CALLBK_DONE);
                spin_unlock_irqrestore(shost->host_lock, flags);
 
                /* ensure any stgt delete functions are done */
index 02d99982a74d9048edfb73359ed2e38d08b163f8..eacd46bb36b95fe2b5caf6e82f3940427a250910 100644 (file)
@@ -2368,16 +2368,15 @@ static ssize_t
 sg_proc_write_adio(struct file *filp, const char __user *buffer,
                   size_t count, loff_t *off)
 {
-       int num;
-       char buff[11];
+       int err;
+       unsigned long num;
 
        if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
                return -EACCES;
-       num = (count < 10) ? count : 10;
-       if (copy_from_user(buff, buffer, num))
-               return -EFAULT;
-       buff[num] = '\0';
-       sg_allow_dio = simple_strtoul(buff, NULL, 10) ? 1 : 0;
+       err = kstrtoul_from_user(buffer, count, 0, &num);
+       if (err)
+               return err;
+       sg_allow_dio = num ? 1 : 0;
        return count;
 }
 
@@ -2390,17 +2389,15 @@ static ssize_t
 sg_proc_write_dressz(struct file *filp, const char __user *buffer,
                     size_t count, loff_t *off)
 {
-       int num;
+       int err;
        unsigned long k = ULONG_MAX;
-       char buff[11];
 
        if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO))
                return -EACCES;
-       num = (count < 10) ? count : 10;
-       if (copy_from_user(buff, buffer, num))
-               return -EFAULT;
-       buff[num] = '\0';
-       k = simple_strtoul(buff, NULL, 10);
+
+       err = kstrtoul_from_user(buffer, count, 0, &k);
+       if (err)
+               return err;
        if (k <= 1048576) {     /* limit "big buff" to 1 MB */
                sg_big_buff = k;
                return count;
index b4543f575f466fc3c7fed5f3edad05be9595d0cd..36d1ed7817ebf9d52631c970023d31b31ff494e0 100644 (file)
@@ -839,6 +839,10 @@ static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
        struct sym_lcb *lp = sym_lp(tp, sdev->lun);
        unsigned long flags;
 
+       /* if slave_alloc returned before allocating a sym_lcb, return */
+       if (!lp)
+               return;
+
        spin_lock_irqsave(np->s.host->host_lock, flags);
 
        if (lp->busy_itlq || lp->busy_itl) {
index 3f9a47ec67dc814764bbee0d22bdd03f7680400a..8293658e7cf910d12942ff0581a7f50735256c92 100644 (file)
@@ -299,7 +299,7 @@ config SPI_S3C24XX_FIQ
 
 config SPI_S3C64XX
        tristate "Samsung S3C64XX series type SPI"
-       depends on (ARCH_S3C64XX || ARCH_S5P64X0)
+       depends on (ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
        select S3C64XX_DMA if ARCH_S3C64XX
        help
          SPI driver for Samsung S3C64XX and newer SoCs.
index e743a45ee92c265bd8a2e13ecec9f98cf3eb926c..8418eb03665121db4a6caf809ea8efc4f12b170c 100644 (file)
@@ -131,7 +131,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
        rxchan = dws->rxchan;
 
        /* 2. Prepare the TX dma transfer */
-       txconf.direction = DMA_TO_DEVICE;
+       txconf.direction = DMA_MEM_TO_DEV;
        txconf.dst_addr = dws->dma_addr;
        txconf.dst_maxburst = LNW_DMA_MSIZE_16;
        txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -147,13 +147,13 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
        txdesc = txchan->device->device_prep_slave_sg(txchan,
                                &dws->tx_sgl,
                                1,
-                               DMA_TO_DEVICE,
+                               DMA_MEM_TO_DEV,
                                DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
        txdesc->callback = dw_spi_dma_done;
        txdesc->callback_param = dws;
 
        /* 3. Prepare the RX dma transfer */
-       rxconf.direction = DMA_FROM_DEVICE;
+       rxconf.direction = DMA_DEV_TO_MEM;
        rxconf.src_addr = dws->dma_addr;
        rxconf.src_maxburst = LNW_DMA_MSIZE_16;
        rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
@@ -169,7 +169,7 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, int cs_change)
        rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
                                &dws->rx_sgl,
                                1,
-                               DMA_FROM_DEVICE,
+                               DMA_DEV_TO_MEM,
                                DMA_PREP_INTERRUPT | DMA_COMPL_SKIP_DEST_UNMAP);
        rxdesc->callback = dw_spi_dma_done;
        rxdesc->callback_param = dws;
index 0a282e5fcc9c3fe4931a871acd6d89708162bd0d..d46e55c720b7f71717d5772b034c5b7f3e159dc4 100644 (file)
@@ -551,6 +551,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
        struct dma_async_tx_descriptor *txd;
        enum dma_slave_buswidth buswidth;
        struct dma_slave_config conf;
+       enum dma_transfer_direction slave_dirn;
        struct scatterlist *sg;
        struct sg_table *sgt;
        struct dma_chan *chan;
@@ -573,6 +574,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
 
                conf.src_addr = espi->sspdr_phys;
                conf.src_addr_width = buswidth;
+               slave_dirn = DMA_DEV_TO_MEM;
        } else {
                chan = espi->dma_tx;
                buf = t->tx_buf;
@@ -580,6 +582,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
 
                conf.dst_addr = espi->sspdr_phys;
                conf.dst_addr_width = buswidth;
+               slave_dirn = DMA_MEM_TO_DEV;
        }
 
        ret = dmaengine_slave_config(chan, &conf);
@@ -631,7 +634,7 @@ ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_data_direction dir)
                return ERR_PTR(-ENOMEM);
 
        txd = chan->device->device_prep_slave_sg(chan, sgt->sgl, nents,
-                                                dir, DMA_CTRL_ACK);
+                                                slave_dirn, DMA_CTRL_ACK);
        if (!txd) {
                dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
                return ERR_PTR(-ENOMEM);
@@ -979,7 +982,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
        dma_cap_set(DMA_SLAVE, mask);
 
        espi->dma_rx_data.port = EP93XX_DMA_SSP;
-       espi->dma_rx_data.direction = DMA_FROM_DEVICE;
+       espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
        espi->dma_rx_data.name = "ep93xx-spi-rx";
 
        espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
@@ -990,7 +993,7 @@ static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
        }
 
        espi->dma_tx_data.port = EP93XX_DMA_SSP;
-       espi->dma_tx_data.direction = DMA_TO_DEVICE;
+       espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
        espi->dma_tx_data.name = "ep93xx-spi-tx";
 
        espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
index f1f5efbc3404aefc4aa68dba8ded5a4c72275bdc..2f9cb43a239870b6db396b8507fb6a67893f1f37 100644 (file)
@@ -900,11 +900,11 @@ static int configure_dma(struct pl022 *pl022)
 {
        struct dma_slave_config rx_conf = {
                .src_addr = SSP_DR(pl022->phybase),
-               .direction = DMA_FROM_DEVICE,
+               .direction = DMA_DEV_TO_MEM,
        };
        struct dma_slave_config tx_conf = {
                .dst_addr = SSP_DR(pl022->phybase),
-               .direction = DMA_TO_DEVICE,
+               .direction = DMA_MEM_TO_DEV,
        };
        unsigned int pages;
        int ret;
@@ -1041,7 +1041,7 @@ static int configure_dma(struct pl022 *pl022)
        rxdesc = rxchan->device->device_prep_slave_sg(rxchan,
                                      pl022->sgt_rx.sgl,
                                      rx_sglen,
-                                     DMA_FROM_DEVICE,
+                                     DMA_DEV_TO_MEM,
                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!rxdesc)
                goto err_rxdesc;
@@ -1049,7 +1049,7 @@ static int configure_dma(struct pl022 *pl022)
        txdesc = txchan->device->device_prep_slave_sg(txchan,
                                      pl022->sgt_tx.sgl,
                                      tx_sglen,
-                                     DMA_TO_DEVICE,
+                                     DMA_MEM_TO_DEV,
                                      DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!txdesc)
                goto err_txdesc;
index 7086583b910708e80cf1b133187eb9be236ee4ff..10182eb500681719fc0f4db5e017c76d56f60370 100644 (file)
@@ -1079,7 +1079,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
        }
        sg = dma->sg_rx_p;
        desc_rx = dma->chan_rx->device->device_prep_slave_sg(dma->chan_rx, sg,
-                                       num, DMA_FROM_DEVICE,
+                                       num, DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc_rx) {
                dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
@@ -1124,7 +1124,7 @@ static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
        }
        sg = dma->sg_tx_p;
        desc_tx = dma->chan_tx->device->device_prep_slave_sg(dma->chan_tx,
-                                       sg, num, DMA_TO_DEVICE,
+                                       sg, num, DMA_MEM_TO_DEV,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc_tx) {
                dev_err(&data->master->dev, "%s:device_prep_slave_sg Failed\n",
@@ -1720,7 +1720,7 @@ static int pch_spi_resume(struct pci_dev *pdev)
 
 #endif
 
-static struct pci_driver pch_spi_pcidev = {
+static struct pci_driver pch_spi_pcidev_driver = {
        .name = "pch_spi",
        .id_table = pch_spi_pcidev_id,
        .probe = pch_spi_probe,
@@ -1736,7 +1736,7 @@ static int __init pch_spi_init(void)
        if (ret)
                return ret;
 
-       ret = pci_register_driver(&pch_spi_pcidev);
+       ret = pci_register_driver(&pch_spi_pcidev_driver);
        if (ret)
                return ret;
 
@@ -1746,7 +1746,7 @@ module_init(pch_spi_init);
 
 static void __exit pch_spi_exit(void)
 {
-       pci_unregister_driver(&pch_spi_pcidev);
+       pci_unregister_driver(&pch_spi_pcidev_driver);
        platform_driver_unregister(&pch_spi_pd_driver);
 }
 module_exit(pch_spi_exit);
index 70e006b50f292259025b50a5b03a2aa26a9c2b5f..5443e25086e9f33b00980d314a874863653c82f8 100644 (file)
@@ -1279,3 +1279,4 @@ static struct usb_driver go7007_usb_driver = {
 };
 
 module_usb_driver(go7007_usb_driver);
+MODULE_LICENSE("GPL v2");
index 8599545cdf9e6d6c90a07510844f6290ab0a1be6..44262908def547557e5c3fe0a8a8cd34a00dfbd7 100644 (file)
@@ -27,8 +27,7 @@
 #include <scsi/scsi_device.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_parameters.h"
@@ -284,8 +283,8 @@ static struct iscsi_np *iscsit_get_np(
                        sock_in6 = (struct sockaddr_in6 *)sockaddr;
                        sock_in6_e = (struct sockaddr_in6 *)&np->np_sockaddr;
 
-                       if (!memcmp((void *)&sock_in6->sin6_addr.in6_u,
-                                   (void *)&sock_in6_e->sin6_addr.in6_u,
+                       if (!memcmp(&sock_in6->sin6_addr.in6_u,
+                                   &sock_in6_e->sin6_addr.in6_u,
                                    sizeof(struct in6_addr)))
                                ip_match = 1;
 
@@ -1062,7 +1061,7 @@ attach_cmd:
        if (ret < 0)
                return iscsit_add_reject_from_cmd(
                                ISCSI_REASON_BOOKMARK_NO_RESOURCES,
-                               1, 1, buf, cmd);
+                               1, 0, buf, cmd);
        /*
         * Check the CmdSN against ExpCmdSN/MaxCmdSN here if
         * the Immediate Bit is not set, and no Immediate
@@ -1225,7 +1224,7 @@ static void iscsit_do_crypto_hash_buf(
 
        crypto_hash_init(hash);
 
-       sg_init_one(&sg, (u8 *)buf, payload_length);
+       sg_init_one(&sg, buf, payload_length);
        crypto_hash_update(hash, &sg, payload_length);
 
        if (padding) {
@@ -1603,7 +1602,7 @@ static int iscsit_handle_nop_out(
                /*
                 * Attach ping data to struct iscsi_cmd->buf_ptr.
                 */
-               cmd->buf_ptr = (void *)ping_data;
+               cmd->buf_ptr = ping_data;
                cmd->buf_ptr_size = payload_length;
 
                pr_debug("Got %u bytes of NOPOUT ping"
@@ -3165,6 +3164,30 @@ static int iscsit_send_task_mgt_rsp(
        return 0;
 }
 
+static bool iscsit_check_inaddr_any(struct iscsi_np *np)
+{
+       bool ret = false;
+
+       if (np->np_sockaddr.ss_family == AF_INET6) {
+               const struct sockaddr_in6 sin6 = {
+                       .sin6_addr = IN6ADDR_ANY_INIT };
+               struct sockaddr_in6 *sock_in6 =
+                        (struct sockaddr_in6 *)&np->np_sockaddr;
+
+               if (!memcmp(sock_in6->sin6_addr.s6_addr,
+                               sin6.sin6_addr.s6_addr, 16))
+                       ret = true;
+       } else {
+               struct sockaddr_in * sock_in =
+                       (struct sockaddr_in *)&np->np_sockaddr;
+
+               if (sock_in->sin_addr.s_addr == INADDR_ANY)
+                       ret = true;
+       }
+
+       return ret;
+}
+
 static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
 {
        char *payload = NULL;
@@ -3197,7 +3220,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
                        end_of_buf = 1;
                        goto eob;
                }
-               memcpy((void *)payload + payload_len, buf, len);
+               memcpy(payload + payload_len, buf, len);
                payload_len += len;
 
                spin_lock(&tiqn->tiqn_tpg_lock);
@@ -3214,12 +3237,17 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
                        spin_lock(&tpg->tpg_np_lock);
                        list_for_each_entry(tpg_np, &tpg->tpg_gnp_list,
                                                tpg_np_list) {
+                               struct iscsi_np *np = tpg_np->tpg_np;
+                               bool inaddr_any = iscsit_check_inaddr_any(np);
+
                                len = sprintf(buf, "TargetAddress="
                                        "%s%s%s:%hu,%hu",
-                                       (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
-                                       "[" : "", tpg_np->tpg_np->np_ip,
-                                       (tpg_np->tpg_np->np_sockaddr.ss_family == AF_INET6) ?
-                                       "]" : "", tpg_np->tpg_np->np_port,
+                                       (np->np_sockaddr.ss_family == AF_INET6) ?
+                                       "[" : "", (inaddr_any == false) ?
+                                               np->np_ip : conn->local_ip,
+                                       (np->np_sockaddr.ss_family == AF_INET6) ?
+                                       "]" : "", (inaddr_any == false) ?
+                                               np->np_port : conn->local_port,
                                        tpg->tpgt);
                                len += 1;
 
@@ -3229,7 +3257,7 @@ static int iscsit_build_sendtargets_response(struct iscsi_cmd *cmd)
                                        end_of_buf = 1;
                                        goto eob;
                                }
-                               memcpy((void *)payload + payload_len, buf, len);
+                               memcpy(payload + payload_len, buf, len);
                                payload_len += len;
                        }
                        spin_unlock(&tpg->tpg_np_lock);
@@ -3486,7 +3514,7 @@ int iscsi_target_tx_thread(void *arg)
        struct iscsi_conn *conn;
        struct iscsi_queue_req *qr = NULL;
        struct se_cmd *se_cmd;
-       struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+       struct iscsi_thread_set *ts = arg;
        /*
         * Allow ourselves to be interrupted by SIGINT so that a
         * connection recovery / failure event can be triggered externally.
@@ -3775,7 +3803,7 @@ int iscsi_target_rx_thread(void *arg)
        u8 buffer[ISCSI_HDR_LEN], opcode;
        u32 checksum = 0, digest = 0;
        struct iscsi_conn *conn = NULL;
-       struct iscsi_thread_set *ts = (struct iscsi_thread_set *)arg;
+       struct iscsi_thread_set *ts = arg;
        struct kvec iov;
        /*
         * Allow ourselves to be interrupted by SIGINT so that a
index 1cd6ce373b83508fd396f82b80290b91372c9e96..db0cf7c8adde04db2b272660a6047297fa848a78 100644 (file)
@@ -82,7 +82,7 @@ static void chap_gen_challenge(
        unsigned int *c_len)
 {
        unsigned char challenge_asciihex[CHAP_CHALLENGE_LENGTH * 2 + 1];
-       struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+       struct iscsi_chap *chap = conn->auth_protocol;
 
        memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
 
@@ -120,7 +120,7 @@ static struct iscsi_chap *chap_server_open(
        if (!conn->auth_protocol)
                return NULL;
 
-       chap = (struct iscsi_chap *) conn->auth_protocol;
+       chap = conn->auth_protocol;
        /*
         * We only support MD5 MDA presently.
         */
@@ -165,14 +165,15 @@ static int chap_server_compute_md5(
        unsigned int *nr_out_len)
 {
        char *endptr;
-       unsigned char id, digest[MD5_SIGNATURE_SIZE];
+       unsigned long id;
+       unsigned char digest[MD5_SIGNATURE_SIZE];
        unsigned char type, response[MD5_SIGNATURE_SIZE * 2 + 2];
        unsigned char identifier[10], *challenge = NULL;
        unsigned char *challenge_binhex = NULL;
        unsigned char client_digest[MD5_SIGNATURE_SIZE];
        unsigned char server_digest[MD5_SIGNATURE_SIZE];
        unsigned char chap_n[MAX_CHAP_N_SIZE], chap_r[MAX_RESPONSE_LENGTH];
-       struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+       struct iscsi_chap *chap = conn->auth_protocol;
        struct crypto_hash *tfm;
        struct hash_desc desc;
        struct scatterlist sg;
@@ -246,7 +247,7 @@ static int chap_server_compute_md5(
                goto out;
        }
 
-       sg_init_one(&sg, (void *)&chap->id, 1);
+       sg_init_one(&sg, &chap->id, 1);
        ret = crypto_hash_update(&desc, &sg, 1);
        if (ret < 0) {
                pr_err("crypto_hash_update() failed for id\n");
@@ -254,7 +255,7 @@ static int chap_server_compute_md5(
                goto out;
        }
 
-       sg_init_one(&sg, (void *)&auth->password, strlen(auth->password));
+       sg_init_one(&sg, &auth->password, strlen(auth->password));
        ret = crypto_hash_update(&desc, &sg, strlen(auth->password));
        if (ret < 0) {
                pr_err("crypto_hash_update() failed for password\n");
@@ -262,7 +263,7 @@ static int chap_server_compute_md5(
                goto out;
        }
 
-       sg_init_one(&sg, (void *)chap->challenge, CHAP_CHALLENGE_LENGTH);
+       sg_init_one(&sg, chap->challenge, CHAP_CHALLENGE_LENGTH);
        ret = crypto_hash_update(&desc, &sg, CHAP_CHALLENGE_LENGTH);
        if (ret < 0) {
                pr_err("crypto_hash_update() failed for challenge\n");
@@ -305,14 +306,17 @@ static int chap_server_compute_md5(
        }
 
        if (type == HEX)
-               id = (unsigned char)simple_strtoul((char *)&identifier[2],
-                                       &endptr, 0);
+               id = simple_strtoul(&identifier[2], &endptr, 0);
        else
-               id = (unsigned char)simple_strtoul(identifier, &endptr, 0);
+               id = simple_strtoul(identifier, &endptr, 0);
+       if (id > 255) {
+               pr_err("chap identifier: %lu greater than 255\n", id);
+               goto out;
+       }
        /*
         * RFC 1994 says Identifier is no more than octet (8 bits).
         */
-       pr_debug("[server] Got CHAP_I=%d\n", id);
+       pr_debug("[server] Got CHAP_I=%lu\n", id);
        /*
         * Get CHAP_C.
         */
@@ -351,7 +355,7 @@ static int chap_server_compute_md5(
                goto out;
        }
 
-       sg_init_one(&sg, (void *)&id, 1);
+       sg_init_one(&sg, &id, 1);
        ret = crypto_hash_update(&desc, &sg, 1);
        if (ret < 0) {
                pr_err("crypto_hash_update() failed for id\n");
@@ -359,7 +363,7 @@ static int chap_server_compute_md5(
                goto out;
        }
 
-       sg_init_one(&sg, (void *)auth->password_mutual,
+       sg_init_one(&sg, auth->password_mutual,
                                strlen(auth->password_mutual));
        ret = crypto_hash_update(&desc, &sg, strlen(auth->password_mutual));
        if (ret < 0) {
@@ -371,7 +375,7 @@ static int chap_server_compute_md5(
        /*
         * Convert received challenge to binary hex.
         */
-       sg_init_one(&sg, (void *)challenge_binhex, challenge_len);
+       sg_init_one(&sg, challenge_binhex, challenge_len);
        ret = crypto_hash_update(&desc, &sg, challenge_len);
        if (ret < 0) {
                pr_err("crypto_hash_update() failed for ma challenge\n");
@@ -414,7 +418,7 @@ static int chap_got_response(
        char *nr_out_ptr,
        unsigned int *nr_out_len)
 {
-       struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+       struct iscsi_chap *chap = conn->auth_protocol;
 
        switch (chap->digest_type) {
        case CHAP_DIGEST_MD5:
@@ -437,7 +441,7 @@ u32 chap_main_loop(
        int *in_len,
        int *out_len)
 {
-       struct iscsi_chap *chap = (struct iscsi_chap *) conn->auth_protocol;
+       struct iscsi_chap *chap = conn->auth_protocol;
 
        if (!chap) {
                chap = chap_server_open(conn, auth, in_text, out_text, out_len);
index db327845e46b46fc04d6f2be7a21d68d9aa53d53..6b35b37988edef2015df364158baad734a119e93 100644 (file)
 
 #include <linux/configfs.h>
 #include <linux/export.h>
+#include <linux/inet.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_fabric_lib.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
@@ -56,8 +53,7 @@ struct iscsi_portal_group *lio_get_tpg_from_tpg_item(
 {
        struct se_portal_group *se_tpg = container_of(to_config_group(item),
                                        struct se_portal_group, tpg_group);
-       struct iscsi_portal_group *tpg =
-                       (struct iscsi_portal_group *)se_tpg->se_tpg_fabric_ptr;
+       struct iscsi_portal_group *tpg = se_tpg->se_tpg_fabric_ptr;
        int ret;
 
        if (!tpg) {
@@ -1225,7 +1221,7 @@ struct se_portal_group *lio_target_tiqn_addtpg(
 
        ret = core_tpg_register(
                        &lio_target_fabric_configfs->tf_ops,
-                       wwn, &tpg->tpg_se_tpg, (void *)tpg,
+                       wwn, &tpg->tpg_se_tpg, tpg,
                        TRANSPORT_TPG_TYPE_NORMAL);
        if (ret < 0)
                return NULL;
index f1a02dad05a02855b4ef59a6341e4bb61660ef30..0ec3b77a0c272e39066fdd8104771a88f59cce08 100644 (file)
@@ -508,6 +508,7 @@ struct iscsi_conn {
        u16                     cid;
        /* Remote TCP Port */
        u16                     login_port;
+       u16                     local_port;
        int                     net_size;
        u32                     auth_id;
 #define CONNFLAG_SCTP_STRUCT_FILE                      0x01
@@ -527,6 +528,7 @@ struct iscsi_conn {
        unsigned char           bad_hdr[ISCSI_HDR_LEN];
 #define IPV6_ADDRESS_SPACE                             48
        unsigned char           login_ip[IPV6_ADDRESS_SPACE];
+       unsigned char           local_ip[IPV6_ADDRESS_SPACE];
        int                     conn_usage_count;
        int                     conn_waiting_on_uc;
        atomic_t                check_immediate_queue;
@@ -561,8 +563,8 @@ struct iscsi_conn {
        struct hash_desc        conn_tx_hash;
        /* Used for scheduling TX and RX connection kthreads */
        cpumask_var_t           conn_cpumask;
-       int                     conn_rx_reset_cpumask:1;
-       int                     conn_tx_reset_cpumask:1;
+       unsigned int            conn_rx_reset_cpumask:1;
+       unsigned int            conn_tx_reset_cpumask:1;
        /* list_head of struct iscsi_cmd for this connection */
        struct list_head        conn_cmd_list;
        struct list_head        immed_queue_list;
index a19fa5eea88e8edcd3db9ab57bc399ad150dd15d..f63ea35bc4ae7737c7ad16b443b2a6ea9c017e2e 100644 (file)
@@ -21,8 +21,7 @@
 
 #include <scsi/scsi_device.h>
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_device.h"
index b7ffc3cd40cca0141ac02441894a1bd018fc2f0e..478451167b62b4c1b3b443e08b1bb6c06ff10cad 100644 (file)
@@ -21,7 +21,7 @@
 
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_seq_pdu_list.h"
index 101b1beb3bca205aed7611ec4424f54cc5b20671..27901e37c1256c9daad677824e5931cc41485da1 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/list.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_seq_pdu_list.h"
@@ -1238,7 +1238,7 @@ void iscsit_mod_dataout_timer(struct iscsi_cmd *cmd)
 {
        struct iscsi_conn *conn = cmd->conn;
        struct iscsi_session *sess = conn->sess;
-       struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
+       struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
 
        spin_lock_bh(&cmd->dataout_timeout_lock);
        if (!(cmd->dataout_timer_flags & ISCSI_TF_RUNNING)) {
@@ -1261,7 +1261,7 @@ void iscsit_start_dataout_timer(
        struct iscsi_conn *conn)
 {
        struct iscsi_session *sess = conn->sess;
-       struct iscsi_node_attrib *na = na = iscsit_tpg_get_node_attrib(sess);
+       struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
 
        if (cmd->dataout_timer_flags & ISCSI_TF_RUNNING)
                return;
index 0b8404c30125256a99bff82082516c324ff16364..1af1f21af21fd01735eab9cec3f2369246e69bd4 100644 (file)
@@ -21,7 +21,7 @@
 
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_datain_values.h"
index d734bdec24f9cf2b451a7f905b07fab421a19b3b..38cb7ce8469ed36084eb739bd7336f4376f03cbd 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/crypto.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_tq.h"
@@ -143,7 +143,7 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
        list_for_each_entry_safe(se_sess, se_sess_tmp, &se_tpg->tpg_sess_list,
                        sess_list) {
 
-               sess_p = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+               sess_p = se_sess->fabric_sess_ptr;
                spin_lock(&sess_p->conn_lock);
                if (atomic_read(&sess_p->session_fall_back_to_erl0) ||
                    atomic_read(&sess_p->session_logout) ||
@@ -151,9 +151,9 @@ int iscsi_check_for_session_reinstatement(struct iscsi_conn *conn)
                        spin_unlock(&sess_p->conn_lock);
                        continue;
                }
-               if (!memcmp((void *)sess_p->isid, (void *)conn->sess->isid, 6) &&
-                  (!strcmp((void *)sess_p->sess_ops->InitiatorName,
-                           (void *)initiatorname_param->value) &&
+               if (!memcmp(sess_p->isid, conn->sess->isid, 6) &&
+                  (!strcmp(sess_p->sess_ops->InitiatorName,
+                           initiatorname_param->value) &&
                   (sess_p->sess_ops->SessionType == sessiontype))) {
                        atomic_set(&sess_p->session_reinstatement, 1);
                        spin_unlock(&sess_p->conn_lock);
@@ -229,7 +229,7 @@ static int iscsi_login_zero_tsih_s1(
 
        iscsi_login_set_conn_values(sess, conn, pdu->cid);
        sess->init_task_tag     = pdu->itt;
-       memcpy((void *)&sess->isid, (void *)pdu->isid, 6);
+       memcpy(&sess->isid, pdu->isid, 6);
        sess->exp_cmd_sn        = pdu->cmdsn;
        INIT_LIST_HEAD(&sess->sess_conn_list);
        INIT_LIST_HEAD(&sess->sess_ooo_cmdsn_list);
@@ -440,8 +440,7 @@ static int iscsi_login_non_zero_tsih_s2(
                    atomic_read(&sess_p->session_logout) ||
                   (sess_p->time2retain_timer_flags & ISCSI_TF_EXPIRED))
                        continue;
-               if (!memcmp((const void *)sess_p->isid,
-                    (const void *)pdu->isid, 6) &&
+               if (!memcmp(sess_p->isid, pdu->isid, 6) &&
                     (sess_p->tsih == pdu->tsih)) {
                        iscsit_inc_session_usage_count(sess_p);
                        iscsit_stop_time2retain_timer(sess_p);
@@ -616,8 +615,8 @@ static int iscsi_post_login_handler(
                }
 
                pr_debug("iSCSI Login successful on CID: %hu from %s to"
-                       " %s:%hu,%hu\n", conn->cid, conn->login_ip, np->np_ip,
-                               np->np_port, tpg->tpgt);
+                       " %s:%hu,%hu\n", conn->cid, conn->login_ip,
+                       conn->local_ip, conn->local_port, tpg->tpgt);
 
                list_add_tail(&conn->conn_list, &sess->sess_conn_list);
                atomic_inc(&sess->nconn);
@@ -654,12 +653,13 @@ static int iscsi_post_login_handler(
 
        spin_lock_bh(&se_tpg->session_lock);
        __transport_register_session(&sess->tpg->tpg_se_tpg,
-                       se_sess->se_node_acl, se_sess, (void *)sess);
+                       se_sess->se_node_acl, se_sess, sess);
        pr_debug("Moving to TARG_SESS_STATE_LOGGED_IN.\n");
        sess->session_state = TARG_SESS_STATE_LOGGED_IN;
 
        pr_debug("iSCSI Login successful on CID: %hu from %s to %s:%hu,%hu\n",
-               conn->cid, conn->login_ip, np->np_ip, np->np_port, tpg->tpgt);
+               conn->cid, conn->login_ip, conn->local_ip, conn->local_port,
+               tpg->tpgt);
 
        spin_lock_bh(&sess->conn_lock);
        list_add_tail(&conn->conn_list, &sess->sess_conn_list);
@@ -811,7 +811,7 @@ int iscsi_target_setup_login_socket(
         * Setup the np->np_sockaddr from the passed sockaddr setup
         * in iscsi_target_configfs.c code..
         */
-       memcpy((void *)&np->np_sockaddr, (void *)sockaddr,
+       memcpy(&np->np_sockaddr, sockaddr,
                        sizeof(struct __kernel_sockaddr_storage));
 
        if (sockaddr->ss_family == AF_INET6)
@@ -821,6 +821,7 @@ int iscsi_target_setup_login_socket(
        /*
         * Set SO_REUSEADDR, and disable Nagel Algorithm with TCP_NODELAY.
         */
+       /* FIXME: Someone please explain why this is endian-safe */
        opt = 1;
        if (np->np_network_transport == ISCSI_TCP) {
                ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_NODELAY,
@@ -832,6 +833,7 @@ int iscsi_target_setup_login_socket(
                }
        }
 
+       /* FIXME: Someone please explain why this is endian-safe */
        ret = kernel_setsockopt(sock, SOL_SOCKET, SO_REUSEADDR,
                        (char *)&opt, sizeof(opt));
        if (ret < 0) {
@@ -840,6 +842,14 @@ int iscsi_target_setup_login_socket(
                goto fail;
        }
 
+       ret = kernel_setsockopt(sock, IPPROTO_IP, IP_FREEBIND,
+                       (char *)&opt, sizeof(opt));
+       if (ret < 0) {
+               pr_err("kernel_setsockopt() for IP_FREEBIND"
+                       " failed\n");
+               goto fail;
+       }
+
        ret = kernel_bind(sock, (struct sockaddr *)&np->np_sockaddr, len);
        if (ret < 0) {
                pr_err("kernel_bind() failed: %d\n", ret);
@@ -1019,6 +1029,18 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
                snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c",
                                &sock_in6.sin6_addr.in6_u);
                conn->login_port = ntohs(sock_in6.sin6_port);
+
+               if (conn->sock->ops->getname(conn->sock,
+                               (struct sockaddr *)&sock_in6, &err, 0) < 0) {
+                       pr_err("sock_ops->getname() failed.\n");
+                       iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                                       ISCSI_LOGIN_STATUS_TARGET_ERROR);
+                       goto new_sess_out;
+               }
+               snprintf(conn->local_ip, sizeof(conn->local_ip), "%pI6c",
+                               &sock_in6.sin6_addr.in6_u);
+               conn->local_port = ntohs(sock_in6.sin6_port);
+
        } else {
                memset(&sock_in, 0, sizeof(struct sockaddr_in));
 
@@ -1031,6 +1053,16 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
                }
                sprintf(conn->login_ip, "%pI4", &sock_in.sin_addr.s_addr);
                conn->login_port = ntohs(sock_in.sin_port);
+
+               if (conn->sock->ops->getname(conn->sock,
+                               (struct sockaddr *)&sock_in, &err, 0) < 0) {
+                       pr_err("sock_ops->getname() failed.\n");
+                       iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
+                                       ISCSI_LOGIN_STATUS_TARGET_ERROR);
+                       goto new_sess_out;
+               }
+               sprintf(conn->local_ip, "%pI4", &sock_in.sin_addr.s_addr);
+               conn->local_port = ntohs(sock_in.sin_port);
        }
 
        conn->network_transport = np->np_network_transport;
@@ -1038,7 +1070,7 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        pr_debug("Received iSCSI login request from %s on %s Network"
                        " Portal %s:%hu\n", conn->login_ip,
                (conn->network_transport == ISCSI_TCP) ? "TCP" : "SCTP",
-                       np->np_ip, np->np_port);
+                       conn->local_ip, conn->local_port);
 
        pr_debug("Moving to TARG_CONN_STATE_IN_LOGIN.\n");
        conn->conn_state        = TARG_CONN_STATE_IN_LOGIN;
@@ -1206,7 +1238,7 @@ out:
 
 int iscsi_target_login_thread(void *arg)
 {
-       struct iscsi_np *np = (struct iscsi_np *)arg;
+       struct iscsi_np *np = arg;
        int ret;
 
        allow_signal(SIGINT);
index 98936cb7c2947ceb0edbaa41dda91d6641a9b892..e89fa7457254d6f44fde426dceaab84de0a99fa2 100644 (file)
@@ -21,7 +21,7 @@
 #include <linux/ctype.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_parameters.h"
@@ -732,7 +732,7 @@ static void iscsi_initiatorname_tolower(
        u32 iqn_size = strlen(param_buf), i;
 
        for (i = 0; i < iqn_size; i++) {
-               c = (char *)&param_buf[i];
+               c = &param_buf[i];
                if (!isupper(*c))
                        continue;
 
index aeafbe0cd7d11b294053f1424e5d91cb3c7336ac..b3c699c4fe8ceb8b9dc2fe35a3eb0e94abfc1126 100644 (file)
@@ -19,7 +19,6 @@
  ******************************************************************************/
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_device.h"
@@ -135,7 +134,7 @@ extern int iscsit_na_nopin_timeout(
                spin_lock_bh(&se_nacl->nacl_sess_lock);
                se_sess = se_nacl->nacl_sess;
                if (se_sess) {
-                       sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+                       sess = se_sess->fabric_sess_ptr;
 
                        spin_lock(&sess->conn_lock);
                        list_for_each_entry(conn, &sess->sess_conn_list,
index f1db83077e0a8418d448a7d9b210cc63014b1d3c..421d6947dc64247c2bc02a1136f8bb2e0489c64a 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/export.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
 #include <target/configfs_macros.h>
 
 #include "iscsi_target_core.h"
@@ -746,7 +745,7 @@ static ssize_t iscsi_stat_sess_show_attr_node(
        spin_lock_bh(&se_nacl->nacl_sess_lock);
        se_sess = se_nacl->nacl_sess;
        if (se_sess) {
-               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+               sess = se_sess->fabric_sess_ptr;
                if (sess)
                        ret = snprintf(page, PAGE_SIZE, "%u\n",
                                sess->sess_ops->SessionType ? 0 : ISCSI_NODE_INDEX);
@@ -770,7 +769,7 @@ static ssize_t iscsi_stat_sess_show_attr_indx(
        spin_lock_bh(&se_nacl->nacl_sess_lock);
        se_sess = se_nacl->nacl_sess;
        if (se_sess) {
-               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+               sess = se_sess->fabric_sess_ptr;
                if (sess)
                        ret = snprintf(page, PAGE_SIZE, "%u\n",
                                        sess->session_index);
@@ -794,7 +793,7 @@ static ssize_t iscsi_stat_sess_show_attr_cmd_pdus(
        spin_lock_bh(&se_nacl->nacl_sess_lock);
        se_sess = se_nacl->nacl_sess;
        if (se_sess) {
-               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+               sess = se_sess->fabric_sess_ptr;
                if (sess)
                        ret = snprintf(page, PAGE_SIZE, "%u\n", sess->cmd_pdus);
        }
@@ -817,7 +816,7 @@ static ssize_t iscsi_stat_sess_show_attr_rsp_pdus(
        spin_lock_bh(&se_nacl->nacl_sess_lock);
        se_sess = se_nacl->nacl_sess;
        if (se_sess) {
-               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+               sess = se_sess->fabric_sess_ptr;
                if (sess)
                        ret = snprintf(page, PAGE_SIZE, "%u\n", sess->rsp_pdus);
        }
@@ -840,7 +839,7 @@ static ssize_t iscsi_stat_sess_show_attr_txdata_octs(
        spin_lock_bh(&se_nacl->nacl_sess_lock);
        se_sess = se_nacl->nacl_sess;
        if (se_sess) {
-               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+               sess = se_sess->fabric_sess_ptr;
                if (sess)
                        ret = snprintf(page, PAGE_SIZE, "%llu\n",
                                (unsigned long long)sess->tx_data_octets);
@@ -864,7 +863,7 @@ static ssize_t iscsi_stat_sess_show_attr_rxdata_octs(
        spin_lock_bh(&se_nacl->nacl_sess_lock);
        se_sess = se_nacl->nacl_sess;
        if (se_sess) {
-               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+               sess = se_sess->fabric_sess_ptr;
                if (sess)
                        ret = snprintf(page, PAGE_SIZE, "%llu\n",
                                (unsigned long long)sess->rx_data_octets);
@@ -888,7 +887,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_digest_errors(
        spin_lock_bh(&se_nacl->nacl_sess_lock);
        se_sess = se_nacl->nacl_sess;
        if (se_sess) {
-               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+               sess = se_sess->fabric_sess_ptr;
                if (sess)
                        ret = snprintf(page, PAGE_SIZE, "%u\n",
                                        sess->conn_digest_errors);
@@ -912,7 +911,7 @@ static ssize_t iscsi_stat_sess_show_attr_conn_timeout_errors(
        spin_lock_bh(&se_nacl->nacl_sess_lock);
        se_sess = se_nacl->nacl_sess;
        if (se_sess) {
-               sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
+               sess = se_sess->fabric_sess_ptr;
                if (sess)
                        ret = snprintf(page, PAGE_SIZE, "%u\n",
                                        sess->conn_timeout_errors);
index 490207eacde976a179f1197d21f3901afd1c78e0..255ed35da815e3059c1930683255575b51ff58d7 100644 (file)
@@ -21,7 +21,7 @@
 #include <asm/unaligned.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_fabric.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_seq_pdu_list.h"
index d4cf2cd25c447f8ac109f7dcdc20fca572d96abc..879d8d0fa3feb38e061c911887e50a91a286c30f 100644 (file)
  ******************************************************************************/
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
-#include <target/target_core_tpg.h>
 
 #include "iscsi_target_core.h"
 #include "iscsi_target_erl0.h"
@@ -72,7 +70,7 @@ int iscsit_load_discovery_tpg(void)
 
        ret = core_tpg_register(
                        &lio_target_fabric_configfs->tf_ops,
-                       NULL, &tpg->tpg_se_tpg, (void *)tpg,
+                       NULL, &tpg->tpg_se_tpg, tpg,
                        TRANSPORT_TPG_TYPE_DISCOVERY);
        if (ret < 0) {
                kfree(tpg);
index 02348f727bd4ddaaa4ef4b70e3499fc135001dad..11287e1ece134190b6541bdb2bea45a236e46515 100644 (file)
@@ -22,9 +22,7 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/iscsi_proto.h>
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
 #include "iscsi_target_core.h"
@@ -289,7 +287,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(
        }
 
        se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd,
-                               (void *)cmd->tmr_req, tcm_function,
+                               cmd->tmr_req, tcm_function,
                                GFP_KERNEL);
        if (!se_cmd->se_tmr_req)
                goto out;
@@ -851,6 +849,17 @@ void iscsit_free_cmd(struct iscsi_cmd *cmd)
        case ISCSI_OP_SCSI_TMFUNC:
                transport_generic_free_cmd(&cmd->se_cmd, 1);
                break;
+       case ISCSI_OP_REJECT:
+               /*
+                * Handle special case for REJECT when iscsi_add_reject*() has
+                * overwritten the original iscsi_opcode assignment, and the
+                * associated cmd->se_cmd needs to be released.
+                */
+               if (cmd->se_cmd.se_tfo != NULL) {
+                       transport_generic_free_cmd(&cmd->se_cmd, 1);
+                       break;
+               }
+               /* Fall-through */
        default:
                iscsit_release_cmd(cmd);
                break;
@@ -1066,7 +1075,7 @@ static void iscsit_handle_nopin_response_timeout(unsigned long data)
        if (tiqn) {
                spin_lock_bh(&tiqn->sess_err_stats.lock);
                strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
-                               (void *)conn->sess->sess_ops->InitiatorName);
+                               conn->sess->sess_ops->InitiatorName);
                tiqn->sess_err_stats.last_sess_failure_type =
                                ISCSI_SESS_ERR_CXN_TIMEOUT;
                tiqn->sess_err_stats.cxn_timeout_errors++;
index 81d5832fbbd537e7bbffe2c21b1792c1e7a2acde..c47ff7f59e5733226702004617327a28b566f7f1 100644 (file)
 #include <scsi/scsi_cmnd.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_fabric_lib.h>
 #include <target/target_core_configfs.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_tmr.h>
 
 #include "tcm_loop.h"
 
@@ -421,11 +416,11 @@ static struct scsi_host_template tcm_loop_driver_template = {
        .queuecommand           = tcm_loop_queuecommand,
        .change_queue_depth     = tcm_loop_change_queue_depth,
        .eh_device_reset_handler = tcm_loop_device_reset,
-       .can_queue              = TL_SCSI_CAN_QUEUE,
+       .can_queue              = 1024,
        .this_id                = -1,
-       .sg_tablesize           = TL_SCSI_SG_TABLESIZE,
-       .cmd_per_lun            = TL_SCSI_CMD_PER_LUN,
-       .max_sectors            = TL_SCSI_MAX_SECTORS,
+       .sg_tablesize           = 256,
+       .cmd_per_lun            = 1024,
+       .max_sectors            = 0xFFFF,
        .use_clustering         = DISABLE_CLUSTERING,
        .slave_alloc            = tcm_loop_slave_alloc,
        .slave_configure        = tcm_loop_slave_configure,
@@ -564,8 +559,7 @@ static char *tcm_loop_get_fabric_name(void)
 
 static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
 {
-       struct tcm_loop_tpg *tl_tpg =
-                       (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+       struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
        struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
        /*
         * tl_proto_id is set at tcm_loop_configfs.c:tcm_loop_make_scsi_hba()
@@ -592,8 +586,7 @@ static u8 tcm_loop_get_fabric_proto_ident(struct se_portal_group *se_tpg)
 
 static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 {
-       struct tcm_loop_tpg *tl_tpg =
-               (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+       struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
        /*
         * Return the passed NAA identifier for the SAS Target Port
         */
@@ -602,8 +595,7 @@ static char *tcm_loop_get_endpoint_wwn(struct se_portal_group *se_tpg)
 
 static u16 tcm_loop_get_tag(struct se_portal_group *se_tpg)
 {
-       struct tcm_loop_tpg *tl_tpg =
-               (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+       struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
        /*
         * This Tag is used when forming SCSI Name identifier in EVPD=1 0x83
         * to represent the SCSI Target Port.
@@ -623,8 +615,7 @@ static u32 tcm_loop_get_pr_transport_id(
        int *format_code,
        unsigned char *buf)
 {
-       struct tcm_loop_tpg *tl_tpg =
-                       (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+       struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
        struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 
        switch (tl_hba->tl_proto_id) {
@@ -653,8 +644,7 @@ static u32 tcm_loop_get_pr_transport_id_len(
        struct t10_pr_registration *pr_reg,
        int *format_code)
 {
-       struct tcm_loop_tpg *tl_tpg =
-                       (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+       struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
        struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 
        switch (tl_hba->tl_proto_id) {
@@ -687,8 +677,7 @@ static char *tcm_loop_parse_pr_out_transport_id(
        u32 *out_tid_len,
        char **port_nexus_ptr)
 {
-       struct tcm_loop_tpg *tl_tpg =
-                       (struct tcm_loop_tpg *)se_tpg->se_tpg_fabric_ptr;
+       struct tcm_loop_tpg *tl_tpg = se_tpg->se_tpg_fabric_ptr;
        struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
 
        switch (tl_hba->tl_proto_id) {
index 6b76c7a22bb062fa692a89e89d903b747e67e157..15a036441471d3aca51624e903ea19f8b7ca8941 100644 (file)
@@ -1,16 +1,7 @@
 #define TCM_LOOP_VERSION               "v2.1-rc1"
 #define TL_WWN_ADDR_LEN                        256
 #define TL_TPGS_PER_HBA                        32
-/*
- * Defaults for struct scsi_host_template tcm_loop_driver_template
- *
- * We use large can_queue and cmd_per_lun here and let TCM enforce
- * the underlying se_device_t->queue_depth.
- */
-#define TL_SCSI_CAN_QUEUE              1024
-#define TL_SCSI_CMD_PER_LUN            1024
-#define TL_SCSI_MAX_SECTORS            1024
-#define TL_SCSI_SG_TABLESIZE           256
+
 /*
  * Used in tcm_loop_driver_probe() for struct Scsi_Host->max_cmd_len
  */
index 1dcbef499d6a09f2add951dee10c955f3541ccb8..01a2691dfb47c4f192f0b123a068339653e575f1 100644 (file)
 #include <scsi/scsi_cmnd.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
-#include "target_core_hba.h"
 #include "target_core_ua.h"
 
 static int core_alua_check_transition(int state, int *primary);
@@ -79,7 +78,7 @@ int target_emulate_report_target_port_groups(struct se_task *task)
                return -EINVAL;
        }
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
 
        spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
        list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
@@ -164,7 +163,7 @@ int target_emulate_report_target_port_groups(struct se_task *task)
        buf[2] = ((rd_len >> 8) & 0xff);
        buf[3] = (rd_len & 0xff);
 
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
 
        task->task_scsi_status = GOOD;
        transport_complete_task(task, 1);
@@ -195,7 +194,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
                cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                return -EINVAL;
        }
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
 
        /*
         * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed
@@ -352,7 +351,7 @@ int target_emulate_set_target_port_groups(struct se_task *task)
        }
 
 out:
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
        task->task_scsi_status = GOOD;
        transport_complete_task(task, 1);
        return 0;
index 831468b3163d777f3eb5c982fc05819d37dea3e6..f3d71fa88a2825dab89a2847685ddfeb54eb57d2 100644 (file)
 #include <scsi/scsi.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
+
+#include "target_core_internal.h"
 #include "target_core_ua.h"
-#include "target_core_cdb.h"
 
 static void
 target_fill_alua_data(struct se_port *port, unsigned char *buf)
@@ -82,7 +83,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
                return -EINVAL;
        }
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
 
        if (dev == tpg->tpg_virt_lun0.lun_se_dev) {
                buf[0] = 0x3f; /* Not connected */
@@ -93,6 +94,18 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
        }
        buf[2] = dev->transport->get_device_rev(dev);
 
+       /*
+        * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2
+        *
+        * SPC4 says:
+        *   A RESPONSE DATA FORMAT field set to 2h indicates that the
+        *   standard INQUIRY data is in the format defined in this
+        *   standard. Response data format values less than 2h are
+        *   obsolete. Response data format values greater than 2h are
+        *   reserved.
+        */
+       buf[3] = 2;
+
        /*
         * Enable SCCS and TPGS fields for Emulated ALUA
         */
@@ -115,15 +128,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd)
                goto out;
        }
 
-       snprintf((unsigned char *)&buf[8], 8, "LIO-ORG");
-       snprintf((unsigned char *)&buf[16], 16, "%s",
-                &dev->se_sub_dev->t10_wwn.model[0]);
-       snprintf((unsigned char *)&buf[32], 4, "%s",
-                &dev->se_sub_dev->t10_wwn.revision[0]);
+       snprintf(&buf[8], 8, "LIO-ORG");
+       snprintf(&buf[16], 16, "%s", dev->se_sub_dev->t10_wwn.model);
+       snprintf(&buf[32], 4, "%s", dev->se_sub_dev->t10_wwn.revision);
        buf[4] = 31; /* Set additional length to 31 */
 
 out:
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
        return 0;
 }
 
@@ -138,8 +149,7 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
                        SDF_EMULATED_VPD_UNIT_SERIAL) {
                u32 unit_serial_len;
 
-               unit_serial_len =
-                       strlen(&dev->se_sub_dev->t10_wwn.unit_serial[0]);
+               unit_serial_len = strlen(dev->se_sub_dev->t10_wwn.unit_serial);
                unit_serial_len++; /* For NULL Terminator */
 
                if (((len + 4) + unit_serial_len) > cmd->data_length) {
@@ -148,8 +158,8 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)
                        buf[3] = (len & 0xff);
                        return 0;
                }
-               len += sprintf((unsigned char *)&buf[4], "%s",
-                       &dev->se_sub_dev->t10_wwn.unit_serial[0]);
+               len += sprintf(&buf[4], "%s",
+                       dev->se_sub_dev->t10_wwn.unit_serial);
                len++; /* Extra Byte for NULL Terminator */
                buf[3] = len;
        }
@@ -279,14 +289,13 @@ check_t10_vend_desc:
                        len += (prod_len + unit_serial_len);
                        goto check_port;
                }
-               id_len += sprintf((unsigned char *)&buf[off+12],
-                               "%s:%s", prod,
+               id_len += sprintf(&buf[off+12], "%s:%s", prod,
                                &dev->se_sub_dev->t10_wwn.unit_serial[0]);
        }
        buf[off] = 0x2; /* ASCII */
        buf[off+1] = 0x1; /* T10 Vendor ID */
        buf[off+2] = 0x0;
-       memcpy((unsigned char *)&buf[off+4], "LIO-ORG", 8);
+       memcpy(&buf[off+4], "LIO-ORG", 8);
        /* Extra Byte for NULL Terminator */
        id_len++;
        /* Identifier Length */
@@ -689,6 +698,13 @@ int target_emulate_inquiry(struct se_task *task)
        int p, ret;
 
        if (!(cdb[1] & 0x1)) {
+               if (cdb[2]) {
+                       pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n",
+                              cdb[2]);
+                       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
+                       return -EINVAL;
+               }
+
                ret = target_emulate_inquiry_std(cmd);
                goto out;
        }
@@ -707,7 +723,7 @@ int target_emulate_inquiry(struct se_task *task)
                return -EINVAL;
        }
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
 
        buf[0] = dev->transport->get_device_type(dev);
 
@@ -720,11 +736,11 @@ int target_emulate_inquiry(struct se_task *task)
        }
 
        pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
-       cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
+       cmd->scsi_sense_reason = TCM_INVALID_CDB_FIELD;
        ret = -EINVAL;
 
 out_unmap:
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
 out:
        if (!ret) {
                task->task_scsi_status = GOOD;
@@ -746,7 +762,7 @@ int target_emulate_readcapacity(struct se_task *task)
        else
                blocks = (u32)blocks_long;
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
 
        buf[0] = (blocks >> 24) & 0xff;
        buf[1] = (blocks >> 16) & 0xff;
@@ -762,7 +778,7 @@ int target_emulate_readcapacity(struct se_task *task)
        if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
                put_unaligned_be32(0xFFFFFFFF, &buf[0]);
 
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
 
        task->task_scsi_status = GOOD;
        transport_complete_task(task, 1);
@@ -776,7 +792,7 @@ int target_emulate_readcapacity_16(struct se_task *task)
        unsigned char *buf;
        unsigned long long blocks = dev->transport->get_blocks(dev);
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
 
        buf[0] = (blocks >> 56) & 0xff;
        buf[1] = (blocks >> 48) & 0xff;
@@ -797,7 +813,7 @@ int target_emulate_readcapacity_16(struct se_task *task)
        if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws)
                buf[14] = 0x80;
 
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
 
        task->task_scsi_status = GOOD;
        transport_complete_task(task, 1);
@@ -1010,9 +1026,9 @@ int target_emulate_modesense(struct se_task *task)
                        offset = cmd->data_length;
        }
 
-       rbuf = transport_kmap_first_data_page(cmd);
+       rbuf = transport_kmap_data_sg(cmd);
        memcpy(rbuf, buf, offset);
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
 
        task->task_scsi_status = GOOD;
        transport_complete_task(task, 1);
@@ -1034,7 +1050,7 @@ int target_emulate_request_sense(struct se_task *task)
                return -ENOSYS;
        }
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
 
        if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
                /*
@@ -1042,11 +1058,8 @@ int target_emulate_request_sense(struct se_task *task)
                 */
                buf[0] = 0x70;
                buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
-               /*
-                * Make sure request data length is enough for additional
-                * sense data.
-                */
-               if (cmd->data_length <= 18) {
+
+               if (cmd->data_length < 18) {
                        buf[7] = 0x00;
                        err = -EINVAL;
                        goto end;
@@ -1063,11 +1076,8 @@ int target_emulate_request_sense(struct se_task *task)
                 */
                buf[0] = 0x70;
                buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE;
-               /*
-                * Make sure request data length is enough for additional
-                * sense data.
-                */
-               if (cmd->data_length <= 18) {
+
+               if (cmd->data_length < 18) {
                        buf[7] = 0x00;
                        err = -EINVAL;
                        goto end;
@@ -1080,7 +1090,7 @@ int target_emulate_request_sense(struct se_task *task)
        }
 
 end:
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
        task->task_scsi_status = GOOD;
        transport_complete_task(task, 1);
        return 0;
@@ -1114,7 +1124,7 @@ int target_emulate_unmap(struct se_task *task)
        dl = get_unaligned_be16(&cdb[0]);
        bd_dl = get_unaligned_be16(&cdb[2]);
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
 
        ptr = &buf[offset];
        pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
@@ -1138,7 +1148,7 @@ int target_emulate_unmap(struct se_task *task)
        }
 
 err:
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
        if (!ret) {
                task->task_scsi_status = GOOD;
                transport_complete_task(task, 1);
diff --git a/drivers/target/target_core_cdb.h b/drivers/target/target_core_cdb.h
deleted file mode 100644 (file)
index ad6b1e3..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-#ifndef TARGET_CORE_CDB_H
-#define TARGET_CORE_CDB_H
-
-int target_emulate_inquiry(struct se_task *task);
-int target_emulate_readcapacity(struct se_task *task);
-int target_emulate_readcapacity_16(struct se_task *task);
-int target_emulate_modesense(struct se_task *task);
-int target_emulate_request_sense(struct se_task *task);
-int target_emulate_unmap(struct se_task *task);
-int target_emulate_write_same(struct se_task *task);
-int target_emulate_synchronize_cache(struct se_task *task);
-int target_emulate_noop(struct se_task *task);
-
-#endif /* TARGET_CORE_CDB_H */
index 93d4f6a1b7980c597c119ae7f7f0506d57846993..6e043eeb1db9c4449edfb4a4385184de1c0633fa 100644 (file)
 #include <linux/spinlock.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
-#include "target_core_hba.h"
 #include "target_core_pr.h"
 #include "target_core_rd.h"
-#include "target_core_stat.h"
 
 extern struct t10_alua_lu_gp *default_lu_gp;
 
@@ -1452,7 +1450,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
                return -ENOMEM;
 
        orig = opts;
-       while ((ptr = strsep(&opts, ",")) != NULL) {
+       while ((ptr = strsep(&opts, ",\n")) != NULL) {
                if (!*ptr)
                        continue;
 
@@ -1631,7 +1629,7 @@ static struct config_item_type target_core_dev_pr_cit = {
 
 static ssize_t target_core_show_dev_info(void *p, char *page)
 {
-       struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+       struct se_subsystem_dev *se_dev = p;
        struct se_hba *hba = se_dev->se_dev_hba;
        struct se_subsystem_api *t = hba->transport;
        int bl = 0;
@@ -1659,7 +1657,7 @@ static ssize_t target_core_store_dev_control(
        const char *page,
        size_t count)
 {
-       struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+       struct se_subsystem_dev *se_dev = p;
        struct se_hba *hba = se_dev->se_dev_hba;
        struct se_subsystem_api *t = hba->transport;
 
@@ -1682,7 +1680,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_control = {
 
 static ssize_t target_core_show_dev_alias(void *p, char *page)
 {
-       struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+       struct se_subsystem_dev *se_dev = p;
 
        if (!(se_dev->su_dev_flags & SDF_USING_ALIAS))
                return 0;
@@ -1695,7 +1693,7 @@ static ssize_t target_core_store_dev_alias(
        const char *page,
        size_t count)
 {
-       struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+       struct se_subsystem_dev *se_dev = p;
        struct se_hba *hba = se_dev->se_dev_hba;
        ssize_t read_bytes;
 
@@ -1706,9 +1704,14 @@ static ssize_t target_core_store_dev_alias(
                return -EINVAL;
        }
 
-       se_dev->su_dev_flags |= SDF_USING_ALIAS;
        read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
                        "%s", page);
+       if (!read_bytes)
+               return -EINVAL;
+       if (se_dev->se_dev_alias[read_bytes - 1] == '\n')
+               se_dev->se_dev_alias[read_bytes - 1] = '\0';
+
+       se_dev->su_dev_flags |= SDF_USING_ALIAS;
 
        pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
                config_item_name(&hba->hba_group.cg_item),
@@ -1728,7 +1731,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_alias = {
 
 static ssize_t target_core_show_dev_udev_path(void *p, char *page)
 {
-       struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+       struct se_subsystem_dev *se_dev = p;
 
        if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH))
                return 0;
@@ -1741,7 +1744,7 @@ static ssize_t target_core_store_dev_udev_path(
        const char *page,
        size_t count)
 {
-       struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+       struct se_subsystem_dev *se_dev = p;
        struct se_hba *hba = se_dev->se_dev_hba;
        ssize_t read_bytes;
 
@@ -1752,9 +1755,14 @@ static ssize_t target_core_store_dev_udev_path(
                return -EINVAL;
        }
 
-       se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
        read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
                        "%s", page);
+       if (!read_bytes)
+               return -EINVAL;
+       if (se_dev->se_dev_udev_path[read_bytes - 1] == '\n')
+               se_dev->se_dev_udev_path[read_bytes - 1] = '\0';
+
+       se_dev->su_dev_flags |= SDF_USING_UDEV_PATH;
 
        pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
                config_item_name(&hba->hba_group.cg_item),
@@ -1777,7 +1785,7 @@ static ssize_t target_core_store_dev_enable(
        const char *page,
        size_t count)
 {
-       struct se_subsystem_dev *se_dev = (struct se_subsystem_dev *)p;
+       struct se_subsystem_dev *se_dev = p;
        struct se_device *dev;
        struct se_hba *hba = se_dev->se_dev_hba;
        struct se_subsystem_api *t = hba->transport;
@@ -1822,7 +1830,7 @@ static struct target_core_configfs_attribute target_core_attr_dev_enable = {
 static ssize_t target_core_show_alua_lu_gp(void *p, char *page)
 {
        struct se_device *dev;
-       struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+       struct se_subsystem_dev *su_dev = p;
        struct config_item *lu_ci;
        struct t10_alua_lu_gp *lu_gp;
        struct t10_alua_lu_gp_member *lu_gp_mem;
@@ -1860,7 +1868,7 @@ static ssize_t target_core_store_alua_lu_gp(
        size_t count)
 {
        struct se_device *dev;
-       struct se_subsystem_dev *su_dev = (struct se_subsystem_dev *)p;
+       struct se_subsystem_dev *su_dev = p;
        struct se_hba *hba = su_dev->se_dev_hba;
        struct t10_alua_lu_gp *lu_gp = NULL, *lu_gp_new = NULL;
        struct t10_alua_lu_gp_member *lu_gp_mem;
index 9b8639425472d8322aab749c6ae03fb56ea2a377..edbcabbf85f7339d1eddef7d618b26181540cd5a 100644 (file)
 #include <scsi/scsi_device.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
-#include "target_core_hba.h"
 #include "target_core_pr.h"
 #include "target_core_ua.h"
 
@@ -322,11 +320,12 @@ int core_free_device_list_for_node(
 void core_dec_lacl_count(struct se_node_acl *se_nacl, struct se_cmd *se_cmd)
 {
        struct se_dev_entry *deve;
+       unsigned long flags;
 
-       spin_lock_irq(&se_nacl->device_list_lock);
+       spin_lock_irqsave(&se_nacl->device_list_lock, flags);
        deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
        deve->deve_cmds--;
-       spin_unlock_irq(&se_nacl->device_list_lock);
+       spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
 }
 
 void core_update_device_list_access(
@@ -658,7 +657,7 @@ int target_report_luns(struct se_task *se_task)
        unsigned char *buf;
        u32 cdb_offset = 0, lun_count = 0, offset = 8, i;
 
-       buf = transport_kmap_first_data_page(se_cmd);
+       buf = (unsigned char *) transport_kmap_data_sg(se_cmd);
 
        /*
         * If no struct se_session pointer is present, this struct se_cmd is
@@ -696,7 +695,7 @@ int target_report_luns(struct se_task *se_task)
         * See SPC3 r07, page 159.
         */
 done:
-       transport_kunmap_first_data_page(se_cmd);
+       transport_kunmap_data_sg(se_cmd);
        lun_count *= 8;
        buf[0] = ((lun_count >> 24) & 0xff);
        buf[1] = ((lun_count >> 16) & 0xff);
@@ -1134,8 +1133,6 @@ int se_dev_set_emulate_rest_reord(struct se_device *dev, int flag)
  */
 int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
 {
-       u32 orig_queue_depth = dev->queue_depth;
-
        if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
                pr_err("dev[%p]: Unable to change SE Device TCQ while"
                        " dev_export_obj: %d count exists\n", dev,
@@ -1169,11 +1166,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
        }
 
        dev->se_sub_dev->se_dev_attrib.queue_depth = dev->queue_depth = queue_depth;
-       if (queue_depth > orig_queue_depth)
-               atomic_add(queue_depth - orig_queue_depth, &dev->depth_left);
-       else if (queue_depth < orig_queue_depth)
-               atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
-
        pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
                        dev, queue_depth);
        return 0;
@@ -1303,24 +1295,26 @@ struct se_lun *core_dev_add_lun(
 {
        struct se_lun *lun_p;
        u32 lun_access = 0;
+       int rc;
 
        if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
                pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
                        atomic_read(&dev->dev_access_obj.obj_access_count));
-               return NULL;
+               return ERR_PTR(-EACCES);
        }
 
        lun_p = core_tpg_pre_addlun(tpg, lun);
-       if ((IS_ERR(lun_p)) || !lun_p)
-               return NULL;
+       if (IS_ERR(lun_p))
+               return lun_p;
 
        if (dev->dev_flags & DF_READ_ONLY)
                lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
        else
                lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
 
-       if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
-               return NULL;
+       rc = core_tpg_post_addlun(tpg, lun_p, lun_access, dev);
+       if (rc < 0)
+               return ERR_PTR(rc);
 
        pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
                " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
@@ -1357,11 +1351,10 @@ int core_dev_del_lun(
        u32 unpacked_lun)
 {
        struct se_lun *lun;
-       int ret = 0;
 
-       lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
-       if (!lun)
-               return ret;
+       lun = core_tpg_pre_dellun(tpg, unpacked_lun);
+       if (IS_ERR(lun))
+               return PTR_ERR(lun);
 
        core_tpg_post_dellun(tpg, lun);
 
index 09b6f8729f918edbcd913770ef73c16485594839..9a2ce11e1a6e4025d7400dd313ccec64028308b3 100644 (file)
 #include <linux/configfs.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
-#include "target_core_hba.h"
 #include "target_core_pr.h"
-#include "target_core_stat.h"
 
 #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs)             \
 static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \
@@ -770,9 +766,9 @@ static int target_fabric_port_link(
 
        lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
                                lun->unpacked_lun);
-       if (IS_ERR(lun_p) || !lun_p) {
+       if (IS_ERR(lun_p)) {
                pr_err("core_dev_add_lun() failed\n");
-               ret = -EINVAL;
+               ret = PTR_ERR(lun_p);
                goto out;
        }
 
index ec4249be617e348aaed580cfa027df1fc5174bbf..283a36e464e65ee5e55e9dded0037262a8203a9a 100644 (file)
 #include <scsi/scsi_cmnd.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_lib.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
-#include "target_core_hba.h"
+#include "target_core_internal.h"
 #include "target_core_pr.h"
 
 /*
@@ -402,7 +399,7 @@ char *iscsi_parse_pr_out_transport_id(
                add_len = ((buf[2] >> 8) & 0xff);
                add_len |= (buf[3] & 0xff);
 
-               tid_len = strlen((char *)&buf[4]);
+               tid_len = strlen(&buf[4]);
                tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
                tid_len += 1; /* Add one byte for NULL terminator */
                padding = ((-tid_len) & 3);
@@ -423,11 +420,11 @@ char *iscsi_parse_pr_out_transport_id(
         * format.
         */
        if (format_code == 0x40) {
-               p = strstr((char *)&buf[4], ",i,0x");
+               p = strstr(&buf[4], ",i,0x");
                if (!p) {
                        pr_err("Unable to locate \",i,0x\" seperator"
                                " for Initiator port identifier: %s\n",
-                               (char *)&buf[4]);
+                               &buf[4]);
                        return NULL;
                }
                *p = '\0'; /* Terminate iSCSI Name */
index b4864fba4ef0d511758916a8debac60ee9f43674..7ed58e2df7914176cb8c9bd8e8bad187b5218131 100644 (file)
@@ -37,8 +37,7 @@
 #include <scsi/scsi_host.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
 
 #include "target_core_file.h"
 
@@ -86,7 +85,7 @@ static void fd_detach_hba(struct se_hba *hba)
 static void *fd_allocate_virtdevice(struct se_hba *hba, const char *name)
 {
        struct fd_dev *fd_dev;
-       struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+       struct fd_host *fd_host = hba->hba_ptr;
 
        fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
        if (!fd_dev) {
@@ -114,8 +113,8 @@ static struct se_device *fd_create_virtdevice(
        struct se_device *dev;
        struct se_dev_limits dev_limits;
        struct queue_limits *limits;
-       struct fd_dev *fd_dev = (struct fd_dev *) p;
-       struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
+       struct fd_dev *fd_dev = p;
+       struct fd_host *fd_host = hba->hba_ptr;
        mm_segment_t old_fs;
        struct file *file;
        struct inode *inode = NULL;
@@ -240,7 +239,7 @@ fail:
  */
 static void fd_free_device(void *p)
 {
-       struct fd_dev *fd_dev = (struct fd_dev *) p;
+       struct fd_dev *fd_dev = p;
 
        if (fd_dev->fd_file) {
                filp_close(fd_dev->fd_file, NULL);
@@ -498,7 +497,7 @@ static ssize_t fd_set_configfs_dev_params(
 
        orig = opts;
 
-       while ((ptr = strsep(&opts, ",")) != NULL) {
+       while ((ptr = strsep(&opts, ",\n")) != NULL) {
                if (!*ptr)
                        continue;
 
@@ -559,7 +558,7 @@ out:
 
 static ssize_t fd_check_configfs_dev_params(struct se_hba *hba, struct se_subsystem_dev *se_dev)
 {
-       struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
+       struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
 
        if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
                pr_err("Missing fd_dev_name=\n");
index c68019d6c406292335522d536760fb8bb5904629..3dd1bd4b6f71c712eb10770ebf976a9a7fa67cfc 100644 (file)
 #include <net/tcp.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 
-#include "target_core_hba.h"
+#include "target_core_internal.h"
 
 static LIST_HEAD(subsystem_list);
 static DEFINE_MUTEX(subsystem_mutex);
diff --git a/drivers/target/target_core_hba.h b/drivers/target/target_core_hba.h
deleted file mode 100644 (file)
index bb0fea5..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-#ifndef TARGET_CORE_HBA_H
-#define TARGET_CORE_HBA_H
-
-extern struct se_hba *core_alloc_hba(const char *, u32, u32);
-extern int core_delete_hba(struct se_hba *);
-
-#endif /* TARGET_CORE_HBA_H */
index 4aa9922044382628fc21e12d8df3ab67840c23ae..8572eae62da7a20399216db03ea04fe9fa1dde3e 100644 (file)
@@ -42,8 +42,7 @@
 #include <scsi/scsi_host.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
 
 #include "target_core_iblock.h"
 
@@ -130,7 +129,7 @@ static struct se_device *iblock_create_virtdevice(
        /*
         * These settings need to be made tunable..
         */
-       ib_dev->ibd_bio_set = bioset_create(32, 64);
+       ib_dev->ibd_bio_set = bioset_create(32, 0);
        if (!ib_dev->ibd_bio_set) {
                pr_err("IBLOCK: Unable to create bioset()\n");
                return ERR_PTR(-ENOMEM);
@@ -182,7 +181,7 @@ static struct se_device *iblock_create_virtdevice(
                 */
                dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count = 1;
                dev->se_sub_dev->se_dev_attrib.unmap_granularity =
-                               q->limits.discard_granularity;
+                               q->limits.discard_granularity >> 9;
                dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
                                q->limits.discard_alignment;
 
@@ -391,7 +390,7 @@ static ssize_t iblock_set_configfs_dev_params(struct se_hba *hba,
 
        orig = opts;
 
-       while ((ptr = strsep(&opts, ",")) != NULL) {
+       while ((ptr = strsep(&opts, ",\n")) != NULL) {
                if (!*ptr)
                        continue;
 
@@ -465,7 +464,7 @@ static ssize_t iblock_show_configfs_dev_params(
        if (bd) {
                bl += sprintf(b + bl, "Major: %d Minor: %d  %s\n",
                        MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
-                       "" : (bd->bd_holder == (struct iblock_dev *)ibd) ?
+                       "" : (bd->bd_holder == ibd) ?
                        "CLAIMED: IBLOCK" : "CLAIMED: OS");
        } else {
                bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
@@ -489,6 +488,13 @@ iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)
        struct iblock_req *ib_req = IBLOCK_REQ(task);
        struct bio *bio;
 
+       /*
+        * Only allocate as many vector entries as the bio code allows us to,
+        * we'll loop later on until we have handled the whole request.
+        */
+       if (sg_num > BIO_MAX_PAGES)
+               sg_num = BIO_MAX_PAGES;
+
        bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
        if (!bio) {
                pr_err("Unable to allocate memory for bio\n");
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
new file mode 100644 (file)
index 0000000..4500136
--- /dev/null
@@ -0,0 +1,123 @@
+#ifndef TARGET_CORE_INTERNAL_H
+#define TARGET_CORE_INTERNAL_H
+
+/* target_core_alua.c */
+extern struct t10_alua_lu_gp *default_lu_gp;
+
+/* target_core_cdb.c */
+int    target_emulate_inquiry(struct se_task *task);
+int    target_emulate_readcapacity(struct se_task *task);
+int    target_emulate_readcapacity_16(struct se_task *task);
+int    target_emulate_modesense(struct se_task *task);
+int    target_emulate_request_sense(struct se_task *task);
+int    target_emulate_unmap(struct se_task *task);
+int    target_emulate_write_same(struct se_task *task);
+int    target_emulate_synchronize_cache(struct se_task *task);
+int    target_emulate_noop(struct se_task *task);
+
+/* target_core_device.c */
+struct se_dev_entry *core_get_se_deve_from_rtpi(struct se_node_acl *, u16);
+int    core_free_device_list_for_node(struct se_node_acl *,
+               struct se_portal_group *);
+void   core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
+void   core_update_device_list_access(u32, u32, struct se_node_acl *);
+int    core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *,
+               u32, u32, struct se_node_acl *, struct se_portal_group *, int);
+void   core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
+int    core_dev_export(struct se_device *, struct se_portal_group *,
+               struct se_lun *);
+void   core_dev_unexport(struct se_device *, struct se_portal_group *,
+               struct se_lun *);
+int    target_report_luns(struct se_task *);
+void   se_release_device_for_hba(struct se_device *);
+void   se_release_vpd_for_dev(struct se_device *);
+int    se_free_virtual_device(struct se_device *, struct se_hba *);
+int    se_dev_check_online(struct se_device *);
+int    se_dev_check_shutdown(struct se_device *);
+void   se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
+int    se_dev_set_task_timeout(struct se_device *, u32);
+int    se_dev_set_max_unmap_lba_count(struct se_device *, u32);
+int    se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
+int    se_dev_set_unmap_granularity(struct se_device *, u32);
+int    se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
+int    se_dev_set_emulate_dpo(struct se_device *, int);
+int    se_dev_set_emulate_fua_write(struct se_device *, int);
+int    se_dev_set_emulate_fua_read(struct se_device *, int);
+int    se_dev_set_emulate_write_cache(struct se_device *, int);
+int    se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
+int    se_dev_set_emulate_tas(struct se_device *, int);
+int    se_dev_set_emulate_tpu(struct se_device *, int);
+int    se_dev_set_emulate_tpws(struct se_device *, int);
+int    se_dev_set_enforce_pr_isids(struct se_device *, int);
+int    se_dev_set_is_nonrot(struct se_device *, int);
+int    se_dev_set_emulate_rest_reord(struct se_device *dev, int);
+int    se_dev_set_queue_depth(struct se_device *, u32);
+int    se_dev_set_max_sectors(struct se_device *, u32);
+int    se_dev_set_optimal_sectors(struct se_device *, u32);
+int    se_dev_set_block_size(struct se_device *, u32);
+struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
+               struct se_device *, u32);
+int    core_dev_del_lun(struct se_portal_group *, u32);
+struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
+struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
+               u32, char *, int *);
+int    core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
+               struct se_lun_acl *, u32, u32);
+int    core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
+               struct se_lun *, struct se_lun_acl *);
+void   core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
+               struct se_lun_acl *lacl);
+int    core_dev_setup_virtual_lun0(void);
+void   core_dev_release_virtual_lun0(void);
+
+/* target_core_hba.c */
+struct se_hba *core_alloc_hba(const char *, u32, u32);
+int    core_delete_hba(struct se_hba *);
+
+/* target_core_tmr.c */
+int    core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
+               struct list_head *, struct se_cmd *);
+
+/* target_core_tpg.c */
+extern struct se_device *g_lun0_dev;
+
+struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+               const char *);
+struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
+               unsigned char *);
+void   core_tpg_add_node_to_devs(struct se_node_acl *, struct se_portal_group *);
+void   core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
+struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
+int    core_tpg_post_addlun(struct se_portal_group *, struct se_lun *,
+               u32, void *);
+struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun);
+int    core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
+
+/* target_core_transport.c */
+extern struct kmem_cache *se_tmr_req_cache;
+
+int    init_se_kmem_caches(void);
+void   release_se_kmem_caches(void);
+u32    scsi_get_new_index(scsi_index_t);
+void   transport_subsystem_check_init(void);
+void   transport_cmd_finish_abort(struct se_cmd *, int);
+void   __transport_remove_task_from_execute_queue(struct se_task *,
+               struct se_device *);
+unsigned char *transport_dump_cmd_direction(struct se_cmd *);
+void   transport_dump_dev_state(struct se_device *, char *, int *);
+void   transport_dump_dev_info(struct se_device *, struct se_lun *,
+               unsigned long long, char *, int *);
+void   transport_dump_vpd_proto_id(struct t10_vpd *, unsigned char *, int);
+int    transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
+int    transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
+int    transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
+bool   target_stop_task(struct se_task *task, unsigned long *flags);
+int    transport_clear_lun_from_sessions(struct se_lun *);
+void   transport_send_task_abort(struct se_cmd *);
+
+/* target_core_stat.c */
+void   target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
+void   target_stat_setup_port_default_groups(struct se_lun *);
+void   target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
+
+#endif /* TARGET_CORE_INTERNAL_H */
index 95dee7074aeb5eb05f630fdded543e948ac206b3..b7c779389eea6820c34c5f9d6a6dafc98638c932 100644 (file)
 #include <asm/unaligned.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
-#include "target_core_hba.h"
+#include "target_core_internal.h"
 #include "target_core_pr.h"
 #include "target_core_ua.h"
 
@@ -481,6 +478,7 @@ static int core_scsi3_pr_seq_non_holder(
        case READ_MEDIA_SERIAL_NUMBER:
        case REPORT_LUNS:
        case REQUEST_SENSE:
+       case PERSISTENT_RESERVE_IN:
                ret = 0; /*/ Allowed CDBs */
                break;
        default:
@@ -1537,7 +1535,7 @@ static int core_scsi3_decode_spec_i_port(
        tidh_new->dest_local_nexus = 1;
        list_add_tail(&tidh_new->dest_list, &tid_dest_list);
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
        /*
         * For a PERSISTENT RESERVE OUT specify initiator ports payload,
         * first extract TransportID Parameter Data Length, and make sure
@@ -1788,7 +1786,7 @@ static int core_scsi3_decode_spec_i_port(
 
        }
 
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
 
        /*
         * Go ahead and create a registrations from tid_dest_list for the
@@ -1836,7 +1834,7 @@ static int core_scsi3_decode_spec_i_port(
 
        return 0;
 out:
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
        /*
         * For the failure case, release everything from tid_dest_list
         * including *dest_pr_reg and the configfs dependances..
@@ -2984,21 +2982,6 @@ static void core_scsi3_release_preempt_and_abort(
        }
 }
 
-int core_scsi3_check_cdb_abort_and_preempt(
-       struct list_head *preempt_and_abort_list,
-       struct se_cmd *cmd)
-{
-       struct t10_pr_registration *pr_reg, *pr_reg_tmp;
-
-       list_for_each_entry_safe(pr_reg, pr_reg_tmp, preempt_and_abort_list,
-                               pr_reg_abort_list) {
-               if (pr_reg->pr_res_key == cmd->pr_res_key)
-                       return 0;
-       }
-
-       return 1;
-}
-
 static int core_scsi3_pro_preempt(
        struct se_cmd *cmd,
        int type,
@@ -3138,7 +3121,7 @@ static int core_scsi3_pro_preempt(
                        if (!calling_it_nexus)
                                core_scsi3_ua_allocate(pr_reg_nacl,
                                        pr_res_mapped_lun, 0x2A,
-                                       ASCQ_2AH_RESERVATIONS_PREEMPTED);
+                                       ASCQ_2AH_REGISTRATIONS_PREEMPTED);
                }
                spin_unlock(&pr_tmpl->registration_lock);
                /*
@@ -3251,7 +3234,7 @@ static int core_scsi3_pro_preempt(
                 *    additional sense code set to REGISTRATIONS PREEMPTED;
                 */
                core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun, 0x2A,
-                               ASCQ_2AH_RESERVATIONS_PREEMPTED);
+                               ASCQ_2AH_REGISTRATIONS_PREEMPTED);
        }
        spin_unlock(&pr_tmpl->registration_lock);
        /*
@@ -3428,14 +3411,14 @@ static int core_scsi3_emulate_pro_register_and_move(
         * will be moved to for the TransportID containing SCSI initiator WWN
         * information.
         */
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
        rtpi = (buf[18] & 0xff) << 8;
        rtpi |= buf[19] & 0xff;
        tid_len = (buf[20] & 0xff) << 24;
        tid_len |= (buf[21] & 0xff) << 16;
        tid_len |= (buf[22] & 0xff) << 8;
        tid_len |= buf[23] & 0xff;
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
        buf = NULL;
 
        if ((tid_len + 24) != cmd->data_length) {
@@ -3487,7 +3470,7 @@ static int core_scsi3_emulate_pro_register_and_move(
                return -EINVAL;
        }
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
        proto_ident = (buf[24] & 0x0f);
 #if 0
        pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
@@ -3521,7 +3504,7 @@ static int core_scsi3_emulate_pro_register_and_move(
                goto out;
        }
 
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
        buf = NULL;
 
        pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
@@ -3786,13 +3769,13 @@ after_iport_check:
                                        " REGISTER_AND_MOVE\n");
        }
 
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
 
        core_scsi3_put_pr_reg(dest_pr_reg);
        return 0;
 out:
        if (buf)
-               transport_kunmap_first_data_page(cmd);
+               transport_kunmap_data_sg(cmd);
        if (dest_se_deve)
                core_scsi3_lunacl_undepend_item(dest_se_deve);
        if (dest_node_acl)
@@ -3866,7 +3849,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
        scope = (cdb[2] & 0xf0);
        type = (cdb[2] & 0x0f);
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
        /*
         * From PERSISTENT_RESERVE_OUT parameter list (payload)
         */
@@ -3884,7 +3867,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task)
                aptpl = (buf[17] & 0x01);
                unreg = (buf[17] & 0x02);
        }
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
        buf = NULL;
 
        /*
@@ -3984,7 +3967,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
                return -EINVAL;
        }
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
        buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
        buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
        buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
@@ -4018,7 +4001,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd)
        buf[6] = ((add_len >> 8) & 0xff);
        buf[7] = (add_len & 0xff);
 
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
 
        return 0;
 }
@@ -4044,7 +4027,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
                return -EINVAL;
        }
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
        buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
        buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
        buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff);
@@ -4103,7 +4086,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd)
 
 err:
        spin_unlock(&se_dev->dev_reservation_lock);
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
 
        return 0;
 }
@@ -4127,7 +4110,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
                return -EINVAL;
        }
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
 
        buf[0] = ((add_len << 8) & 0xff);
        buf[1] = (add_len & 0xff);
@@ -4159,7 +4142,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
        buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */
        buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */
 
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
 
        return 0;
 }
@@ -4189,7 +4172,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
                return -EINVAL;
        }
 
-       buf = transport_kmap_first_data_page(cmd);
+       buf = transport_kmap_data_sg(cmd);
 
        buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff);
        buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff);
@@ -4310,7 +4293,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd)
        buf[6] = ((add_len >> 8) & 0xff);
        buf[7] = (add_len & 0xff);
 
-       transport_kunmap_first_data_page(cmd);
+       transport_kunmap_data_sg(cmd);
 
        return 0;
 }
index b97f6940dd051a9e6c18e1842d0626af3a60ebbc..7a233feb7e992a30502ea4b43368214eb8f269ff 100644 (file)
@@ -60,8 +60,6 @@ extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
                                             struct se_node_acl *);
 extern void core_scsi3_free_all_registrations(struct se_device *);
 extern unsigned char *core_scsi3_pr_dump_type(int);
-extern int core_scsi3_check_cdb_abort_and_preempt(struct list_head *,
-                                                 struct se_cmd *);
 
 extern int target_scsi3_emulate_pr_in(struct se_task *task);
 extern int target_scsi3_emulate_pr_out(struct se_task *task);
index 8b15e56b038461169872d964055316c0318e7e31..8d4def30e9e80f788e20c6f120ff107be9d07be0 100644 (file)
@@ -44,8 +44,7 @@
 #include <scsi/scsi_tcq.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
+#include <target/target_core_backend.h>
 
 #include "target_core_pscsi.h"
 
@@ -105,7 +104,7 @@ static void pscsi_detach_hba(struct se_hba *hba)
 
 static int pscsi_pmode_enable_hba(struct se_hba *hba, unsigned long mode_flag)
 {
-       struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+       struct pscsi_hba_virt *phv = hba->hba_ptr;
        struct Scsi_Host *sh = phv->phv_lld_host;
        /*
         * Release the struct Scsi_Host
@@ -351,7 +350,6 @@ static struct se_device *pscsi_add_device_to_list(
         * scsi_device_put() and the pdv->pdv_sd cleared.
         */
        pdv->pdv_sd = sd;
-
        dev = transport_add_device_to_core_hba(hba, &pscsi_template,
                                se_dev, dev_flags, pdv,
                                &dev_limits, NULL, NULL);
@@ -406,7 +404,7 @@ static struct se_device *pscsi_create_type_disk(
        __releases(sh->host_lock)
 {
        struct se_device *dev;
-       struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+       struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
        struct Scsi_Host *sh = sd->host;
        struct block_device *bd;
        u32 dev_flags = 0;
@@ -454,7 +452,7 @@ static struct se_device *pscsi_create_type_rom(
        __releases(sh->host_lock)
 {
        struct se_device *dev;
-       struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+       struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
        struct Scsi_Host *sh = sd->host;
        u32 dev_flags = 0;
 
@@ -489,7 +487,7 @@ static struct se_device *pscsi_create_type_other(
        __releases(sh->host_lock)
 {
        struct se_device *dev;
-       struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)pdv->pdv_se_hba->hba_ptr;
+       struct pscsi_hba_virt *phv = pdv->pdv_se_hba->hba_ptr;
        struct Scsi_Host *sh = sd->host;
        u32 dev_flags = 0;
 
@@ -510,10 +508,10 @@ static struct se_device *pscsi_create_virtdevice(
        struct se_subsystem_dev *se_dev,
        void *p)
 {
-       struct pscsi_dev_virt *pdv = (struct pscsi_dev_virt *)p;
+       struct pscsi_dev_virt *pdv = p;
        struct se_device *dev;
        struct scsi_device *sd;
-       struct pscsi_hba_virt *phv = (struct pscsi_hba_virt *)hba->hba_ptr;
+       struct pscsi_hba_virt *phv = hba->hba_ptr;
        struct Scsi_Host *sh = phv->phv_lld_host;
        int legacy_mode_enable = 0;
 
@@ -695,7 +693,7 @@ static int pscsi_transport_complete(struct se_task *task)
 
                if (task->task_se_cmd->se_deve->lun_flags &
                                TRANSPORT_LUNFLAGS_READ_ONLY) {
-                       unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd);
+                       unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd);
 
                        if (cdb[0] == MODE_SENSE_10) {
                                if (!(buf[3] & 0x80))
@@ -705,7 +703,7 @@ static int pscsi_transport_complete(struct se_task *task)
                                        buf[2] |= 0x80;
                        }
 
-                       transport_kunmap_first_data_page(task->task_se_cmd);
+                       transport_kunmap_data_sg(task->task_se_cmd);
                }
        }
 after_mode_sense:
@@ -818,7 +816,7 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_hba *hba,
 
        orig = opts;
 
-       while ((ptr = strsep(&opts, ",")) != NULL) {
+       while ((ptr = strsep(&opts, ",\n")) != NULL) {
                if (!*ptr)
                        continue;
 
@@ -1144,7 +1142,7 @@ static unsigned char *pscsi_get_sense_buffer(struct se_task *task)
 {
        struct pscsi_plugin_task *pt = PSCSI_TASK(task);
 
-       return (unsigned char *)&pt->pscsi_sense[0];
+       return pt->pscsi_sense;
 }
 
 /*     pscsi_get_device_rev():
index 02e51faa2f4ea168f0a6139c8e303fc9fca81c28..8b68f7b82631ea8e0993da09228882820366067b 100644 (file)
@@ -37,9 +37,7 @@
 #include <scsi/scsi_host.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
 
 #include "target_core_rd.h"
 
@@ -474,7 +472,7 @@ static ssize_t rd_set_configfs_dev_params(
 
        orig = opts;
 
-       while ((ptr = strsep(&opts, ",")) != NULL) {
+       while ((ptr = strsep(&opts, ",\n")) != NULL) {
                if (!*ptr)
                        continue;
 
index 874152aed94af013de0bc5762f0959ea25281e05..f8c2d2cc34310041323d2614c3dfb09902a1f1db 100644 (file)
 #include <scsi/scsi_host.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
-#include "target_core_hba.h"
+#include "target_core_internal.h"
 
 #ifndef INITIAL_JIFFIES
 #define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ))
@@ -1755,8 +1755,7 @@ static ssize_t target_stat_scsi_att_intr_port_show_attr_port_ident(
        /* scsiAttIntrPortName+scsiAttIntrPortIdentifier */
        memset(buf, 0, 64);
        if (tpg->se_tpg_tfo->sess_get_initiator_sid != NULL)
-               tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
-                               (unsigned char *)&buf[0], 64);
+               tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess, buf, 64);
 
        ret = snprintf(page, PAGE_SIZE, "%s+i+%s\n", nacl->initiatorname, buf);
        spin_unlock_irq(&nacl->nacl_sess_lock);
diff --git a/drivers/target/target_core_stat.h b/drivers/target/target_core_stat.h
deleted file mode 100644 (file)
index 86c252f..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef TARGET_CORE_STAT_H
-#define TARGET_CORE_STAT_H
-
-extern void target_stat_setup_dev_default_groups(struct se_subsystem_dev *);
-extern void target_stat_setup_port_default_groups(struct se_lun *);
-extern void target_stat_setup_mappedlun_default_groups(struct se_lun_acl *);
-
-#endif   /*** TARGET_CORE_STAT_H ***/
index 684522805a1f370a99a5745c2fa3815a9c722912..dcb0618c9388a9e41341b7abef159d18a2eed259 100644 (file)
 #include <scsi/scsi_cmnd.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
 #include "target_core_pr.h"
 
@@ -101,6 +100,21 @@ static void core_tmr_handle_tas_abort(
        transport_cmd_finish_abort(cmd, 0);
 }
 
+static int target_check_cdb_and_preempt(struct list_head *list,
+               struct se_cmd *cmd)
+{
+       struct t10_pr_registration *reg;
+
+       if (!list)
+               return 0;
+       list_for_each_entry(reg, list, pr_reg_abort_list) {
+               if (reg->pr_res_key == cmd->pr_res_key)
+                       return 0;
+       }
+
+       return 1;
+}
+
 static void core_tmr_drain_tmr_list(
        struct se_device *dev,
        struct se_tmr_req *tmr,
@@ -132,9 +146,7 @@ static void core_tmr_drain_tmr_list(
                 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
                 * skip non regisration key matching TMRs.
                 */
-               if (preempt_and_abort_list &&
-                   (core_scsi3_check_cdb_abort_and_preempt(
-                                       preempt_and_abort_list, cmd) != 0))
+               if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
                        continue;
 
                spin_lock(&cmd->t_state_lock);
@@ -211,9 +223,7 @@ static void core_tmr_drain_task_list(
                 * For PREEMPT_AND_ABORT usage, only process commands
                 * with a matching reservation key.
                 */
-               if (preempt_and_abort_list &&
-                   (core_scsi3_check_cdb_abort_and_preempt(
-                                       preempt_and_abort_list, cmd) != 0))
+               if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
                        continue;
                /*
                 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
@@ -222,7 +232,7 @@ static void core_tmr_drain_task_list(
                        continue;
 
                list_move_tail(&task->t_state_list, &drain_task_list);
-               atomic_set(&task->task_state_active, 0);
+               task->t_state_active = false;
                /*
                 * Remove from task execute list before processing drain_task_list
                 */
@@ -321,9 +331,7 @@ static void core_tmr_drain_cmd_list(
                 * For PREEMPT_AND_ABORT usage, only process commands
                 * with a matching reservation key.
                 */
-               if (preempt_and_abort_list &&
-                   (core_scsi3_check_cdb_abort_and_preempt(
-                                       preempt_and_abort_list, cmd) != 0))
+               if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
                        continue;
                /*
                 * Not aborting PROUT PREEMPT_AND_ABORT CDB..
index 8ddd133025b932a98a5968bfeb4f30b0d396a3d2..06336ecd872df683e8d2ac08e61d2898d03cfcb9 100644 (file)
 #include <scsi/scsi_cmnd.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 
-#include "target_core_hba.h"
-#include "target_core_stat.h"
+#include "target_core_internal.h"
 
 extern struct se_device *g_lun0_dev;
 
@@ -810,8 +807,7 @@ static void core_tpg_shutdown_lun(
 
 struct se_lun *core_tpg_pre_dellun(
        struct se_portal_group *tpg,
-       u32 unpacked_lun,
-       int *ret)
+       u32 unpacked_lun)
 {
        struct se_lun *lun;
 
index 0257658e2e3ea8a75642ae0dcabc77547ac2379b..58cea07b12fbcaea6ae4f3990ed02cf2deba1995 100644 (file)
 #include <scsi/scsi_tcq.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tmr.h>
-#include <target/target_core_tpg.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_backend.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
-#include "target_core_cdb.h"
-#include "target_core_hba.h"
 #include "target_core_pr.h"
 #include "target_core_ua.h"
 
@@ -72,7 +68,7 @@ struct kmem_cache *t10_alua_tg_pt_gp_mem_cache;
 
 static int transport_generic_write_pending(struct se_cmd *);
 static int transport_processing_thread(void *param);
-static int __transport_execute_tasks(struct se_device *dev);
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *);
 static void transport_complete_task_attr(struct se_cmd *cmd);
 static void transport_handle_queue_full(struct se_cmd *cmd,
                struct se_device *dev);
@@ -212,14 +208,13 @@ u32 scsi_get_new_index(scsi_index_t type)
        return new_index;
 }
 
-void transport_init_queue_obj(struct se_queue_obj *qobj)
+static void transport_init_queue_obj(struct se_queue_obj *qobj)
 {
        atomic_set(&qobj->queue_cnt, 0);
        INIT_LIST_HEAD(&qobj->qobj_list);
        init_waitqueue_head(&qobj->thread_wq);
        spin_lock_init(&qobj->cmd_queue_lock);
 }
-EXPORT_SYMBOL(transport_init_queue_obj);
 
 void transport_subsystem_check_init(void)
 {
@@ -426,18 +421,18 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
                if (task->task_flags & TF_ACTIVE)
                        continue;
 
-               if (!atomic_read(&task->task_state_active))
-                       continue;
-
                spin_lock_irqsave(&dev->execute_task_lock, flags);
-               list_del(&task->t_state_list);
-               pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
-                       cmd->se_tfo->get_task_tag(cmd), dev, task);
-               spin_unlock_irqrestore(&dev->execute_task_lock, flags);
+               if (task->t_state_active) {
+                       pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
+                               cmd->se_tfo->get_task_tag(cmd), dev, task);
 
-               atomic_set(&task->task_state_active, 0);
-               atomic_dec(&cmd->t_task_cdbs_ex_left);
+                       list_del(&task->t_state_list);
+                       atomic_dec(&cmd->t_task_cdbs_ex_left);
+                       task->t_state_active = false;
+               }
+               spin_unlock_irqrestore(&dev->execute_task_lock, flags);
        }
+
 }
 
 /*     transport_cmd_check_stop():
@@ -696,12 +691,6 @@ void transport_complete_task(struct se_task *task, int success)
        struct se_cmd *cmd = task->task_se_cmd;
        struct se_device *dev = cmd->se_dev;
        unsigned long flags;
-#if 0
-       pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
-                       cmd->t_task_cdb[0], dev);
-#endif
-       if (dev)
-               atomic_inc(&dev->depth_left);
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        task->task_flags &= ~TF_ACTIVE;
@@ -714,7 +703,7 @@ void transport_complete_task(struct se_task *task, int success)
        if (dev && dev->transport->transport_complete) {
                if (dev->transport->transport_complete(task) != 0) {
                        cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
-                       task->task_sense = 1;
+                       task->task_flags |= TF_HAS_SENSE;
                        success = 1;
                }
        }
@@ -743,13 +732,7 @@ void transport_complete_task(struct se_task *task, int success)
        }
 
        if (cmd->t_tasks_failed) {
-               if (!task->task_error_status) {
-                       task->task_error_status =
-                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-                       cmd->scsi_sense_reason =
-                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               }
-
+               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
                INIT_WORK(&cmd->work, target_complete_failure_work);
        } else {
                atomic_set(&cmd->t_transport_complete, 1);
@@ -824,7 +807,7 @@ static void __transport_add_task_to_execute_queue(
        head_of_queue = transport_add_task_check_sam_attr(task, task_prev, dev);
        atomic_inc(&dev->execute_tasks);
 
-       if (atomic_read(&task->task_state_active))
+       if (task->t_state_active)
                return;
        /*
         * Determine if this task needs to go to HEAD_OF_QUEUE for the
@@ -838,7 +821,7 @@ static void __transport_add_task_to_execute_queue(
        else
                list_add_tail(&task->t_state_list, &dev->state_task_list);
 
-       atomic_set(&task->task_state_active, 1);
+       task->t_state_active = true;
 
        pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
                task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
@@ -853,29 +836,26 @@ static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)
 
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        list_for_each_entry(task, &cmd->t_task_list, t_list) {
-               if (atomic_read(&task->task_state_active))
-                       continue;
-
                spin_lock(&dev->execute_task_lock);
-               list_add_tail(&task->t_state_list, &dev->state_task_list);
-               atomic_set(&task->task_state_active, 1);
-
-               pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
-                       task->task_se_cmd->se_tfo->get_task_tag(
-                       task->task_se_cmd), task, dev);
-
+               if (!task->t_state_active) {
+                       list_add_tail(&task->t_state_list,
+                                     &dev->state_task_list);
+                       task->t_state_active = true;
+
+                       pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+                               task->task_se_cmd->se_tfo->get_task_tag(
+                               task->task_se_cmd), task, dev);
+               }
                spin_unlock(&dev->execute_task_lock);
        }
        spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 }
 
-static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+static void __transport_add_tasks_from_cmd(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_task *task, *task_prev = NULL;
-       unsigned long flags;
 
-       spin_lock_irqsave(&dev->execute_task_lock, flags);
        list_for_each_entry(task, &cmd->t_task_list, t_list) {
                if (!list_empty(&task->t_execute_list))
                        continue;
@@ -886,6 +866,15 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
                __transport_add_task_to_execute_queue(task, task_prev, dev);
                task_prev = task;
        }
+}
+
+static void transport_add_tasks_from_cmd(struct se_cmd *cmd)
+{
+       unsigned long flags;
+       struct se_device *dev = cmd->se_dev;
+
+       spin_lock_irqsave(&dev->execute_task_lock, flags);
+       __transport_add_tasks_from_cmd(cmd);
        spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 }
 
@@ -896,7 +885,7 @@ void __transport_remove_task_from_execute_queue(struct se_task *task,
        atomic_dec(&dev->execute_tasks);
 }
 
-void transport_remove_task_from_execute_queue(
+static void transport_remove_task_from_execute_queue(
        struct se_task *task,
        struct se_device *dev)
 {
@@ -983,9 +972,8 @@ void transport_dump_dev_state(
                break;
        }
 
-       *bl += sprintf(b + *bl, "  Execute/Left/Max Queue Depth: %d/%d/%d",
-               atomic_read(&dev->execute_tasks), atomic_read(&dev->depth_left),
-               dev->queue_depth);
+       *bl += sprintf(b + *bl, "  Execute/Max Queue Depth: %d/%d",
+               atomic_read(&dev->execute_tasks), dev->queue_depth);
        *bl += sprintf(b + *bl, "  SectorSize: %u  MaxSectors: %u\n",
                dev->se_sub_dev->se_dev_attrib.block_size, dev->se_sub_dev->se_dev_attrib.max_sectors);
        *bl += sprintf(b + *bl, "        ");
@@ -1267,32 +1255,34 @@ static void core_setup_task_attr_emulation(struct se_device *dev)
 static void scsi_dump_inquiry(struct se_device *dev)
 {
        struct t10_wwn *wwn = &dev->se_sub_dev->t10_wwn;
+       char buf[17];
        int i, device_type;
        /*
         * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
         */
-       pr_debug("  Vendor: ");
        for (i = 0; i < 8; i++)
                if (wwn->vendor[i] >= 0x20)
-                       pr_debug("%c", wwn->vendor[i]);
+                       buf[i] = wwn->vendor[i];
                else
-                       pr_debug(" ");
+                       buf[i] = ' ';
+       buf[i] = '\0';
+       pr_debug("  Vendor: %s\n", buf);
 
-       pr_debug("  Model: ");
        for (i = 0; i < 16; i++)
                if (wwn->model[i] >= 0x20)
-                       pr_debug("%c", wwn->model[i]);
+                       buf[i] = wwn->model[i];
                else
-                       pr_debug(" ");
+                       buf[i] = ' ';
+       buf[i] = '\0';
+       pr_debug("  Model: %s\n", buf);
 
-       pr_debug("  Revision: ");
        for (i = 0; i < 4; i++)
                if (wwn->revision[i] >= 0x20)
-                       pr_debug("%c", wwn->revision[i]);
+                       buf[i] = wwn->revision[i];
                else
-                       pr_debug(" ");
-
-       pr_debug("\n");
+                       buf[i] = ' ';
+       buf[i] = '\0';
+       pr_debug("  Revision: %s\n", buf);
 
        device_type = dev->transport->get_device_type(dev);
        pr_debug("  Type:   %s ", scsi_device_type(device_type));
@@ -1340,9 +1330,6 @@ struct se_device *transport_add_device_to_core_hba(
        spin_lock_init(&dev->se_port_lock);
        spin_lock_init(&dev->se_tmr_lock);
        spin_lock_init(&dev->qf_cmd_lock);
-
-       dev->queue_depth        = dev_limits->queue_depth;
-       atomic_set(&dev->depth_left, dev->queue_depth);
        atomic_set(&dev->dev_ordered_id, 0);
 
        se_dev_set_default_attribs(dev, dev_limits);
@@ -1654,6 +1641,81 @@ int transport_handle_cdb_direct(
 }
 EXPORT_SYMBOL(transport_handle_cdb_direct);
 
+/**
+ * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
+ *
+ * @se_cmd: command descriptor to submit
+ * @se_sess: associated se_sess for endpoint
+ * @cdb: pointer to SCSI CDB
+ * @sense: pointer to SCSI sense buffer
+ * @unpacked_lun: unpacked LUN to reference for struct se_lun
+ * @data_length: fabric expected data transfer length
+ * @task_addr: SAM task attribute
+ * @data_dir: DMA data direction
+ * @flags: flags for command submission from target_sc_flags_tables
+ *
+ * This may only be called from process context, and also currently
+ * assumes internal allocation of fabric payload buffer by target-core.
+ **/
+void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
+               unsigned char *cdb, unsigned char *sense, u32 unpacked_lun,
+               u32 data_length, int task_attr, int data_dir, int flags)
+{
+       struct se_portal_group *se_tpg;
+       int rc;
+
+       se_tpg = se_sess->se_tpg;
+       BUG_ON(!se_tpg);
+       BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
+       BUG_ON(in_interrupt());
+       /*
+        * Initialize se_cmd for target operation.  From this point
+        * exceptions are handled by sending exception status via
+        * target_core_fabric_ops->queue_status() callback
+        */
+       transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
+                               data_length, data_dir, task_attr, sense);
+       /*
+        * Obtain struct se_cmd->cmd_kref reference and add new cmd to
+        * se_sess->sess_cmd_list.  A second kref_get here is necessary
+        * for fabrics using TARGET_SCF_ACK_KREF that expect a second
+        * kref_put() to happen during fabric packet acknowledgement.
+        */
+       target_get_sess_cmd(se_sess, se_cmd, (flags & TARGET_SCF_ACK_KREF));
+       /*
+        * Signal bidirectional data payloads to target-core
+        */
+       if (flags & TARGET_SCF_BIDI_OP)
+               se_cmd->se_cmd_flags |= SCF_BIDI;
+       /*
+        * Locate se_lun pointer and attach it to struct se_cmd
+        */
+       if (transport_lookup_cmd_lun(se_cmd, unpacked_lun) < 0) {
+               transport_send_check_condition_and_sense(se_cmd,
+                               se_cmd->scsi_sense_reason, 0);
+               target_put_sess_cmd(se_sess, se_cmd);
+               return;
+       }
+       /*
+        * Sanitize CDBs via transport_generic_cmd_sequencer() and
+        * allocate the necessary tasks to complete the received CDB+data
+        */
+       rc = transport_generic_allocate_tasks(se_cmd, cdb);
+       if (rc != 0) {
+               transport_generic_request_failure(se_cmd);
+               return;
+       }
+       /*
+        * Dispatch se_cmd descriptor to se_lun->lun_se_dev backend
+        * for immediate execution of READs, otherwise wait for
+        * transport_generic_handle_data() to be called for WRITEs
+        * when fabric has filled the incoming buffer.
+        */
+       transport_handle_cdb_direct(se_cmd);
+       return;
+}
+EXPORT_SYMBOL(target_submit_cmd);
+
 /*
  * Used by fabric module frontends defining a TFO->new_cmd_map() caller
  * to  queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD_MAP in order to
@@ -1920,18 +1982,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)
        spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
 }
 
-static inline int transport_tcq_window_closed(struct se_device *dev)
-{
-       if (dev->dev_tcq_window_closed++ <
-                       PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD) {
-               msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT);
-       } else
-               msleep(PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG);
-
-       wake_up_interruptible(&dev->dev_queue_obj.thread_wq);
-       return 0;
-}
-
 /*
  * Called from Fabric Module context from transport_execute_tasks()
  *
@@ -2014,13 +2064,7 @@ static inline int transport_execute_task_attr(struct se_cmd *cmd)
 static int transport_execute_tasks(struct se_cmd *cmd)
 {
        int add_tasks;
-
-       if (se_dev_check_online(cmd->se_dev) != 0) {
-               cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-               transport_generic_request_failure(cmd);
-               return 0;
-       }
-
+       struct se_device *se_dev = cmd->se_dev;
        /*
         * Call transport_cmd_check_stop() to see if a fabric exception
         * has occurred that prevents execution.
@@ -2034,19 +2078,16 @@ static int transport_execute_tasks(struct se_cmd *cmd)
                if (!add_tasks)
                        goto execute_tasks;
                /*
-                * This calls transport_add_tasks_from_cmd() to handle
-                * HEAD_OF_QUEUE ordering for SAM Task Attribute emulation
-                * (if enabled) in __transport_add_task_to_execute_queue() and
-                * transport_add_task_check_sam_attr().
+                * __transport_execute_tasks() -> __transport_add_tasks_from_cmd()
+                * adds associated se_tasks while holding dev->execute_task_lock
+                * before I/O dispath to avoid a double spinlock access.
                 */
-               transport_add_tasks_from_cmd(cmd);
+               __transport_execute_tasks(se_dev, cmd);
+               return 0;
        }
-       /*
-        * Kick the execution queue for the cmd associated struct se_device
-        * storage object.
-        */
+
 execute_tasks:
-       __transport_execute_tasks(cmd->se_dev);
+       __transport_execute_tasks(se_dev, NULL);
        return 0;
 }
 
@@ -2056,24 +2097,18 @@ execute_tasks:
  *
  * Called from transport_processing_thread()
  */
-static int __transport_execute_tasks(struct se_device *dev)
+static int __transport_execute_tasks(struct se_device *dev, struct se_cmd *new_cmd)
 {
        int error;
        struct se_cmd *cmd = NULL;
        struct se_task *task = NULL;
        unsigned long flags;
 
-       /*
-        * Check if there is enough room in the device and HBA queue to send
-        * struct se_tasks to the selected transport.
-        */
 check_depth:
-       if (!atomic_read(&dev->depth_left))
-               return transport_tcq_window_closed(dev);
-
-       dev->dev_tcq_window_closed = 0;
-
        spin_lock_irq(&dev->execute_task_lock);
+       if (new_cmd != NULL)
+               __transport_add_tasks_from_cmd(new_cmd);
+
        if (list_empty(&dev->execute_task_list)) {
                spin_unlock_irq(&dev->execute_task_lock);
                return 0;
@@ -2083,10 +2118,7 @@ check_depth:
        __transport_remove_task_from_execute_queue(task, dev);
        spin_unlock_irq(&dev->execute_task_lock);
 
-       atomic_dec(&dev->depth_left);
-
        cmd = task->task_se_cmd;
-
        spin_lock_irqsave(&cmd->t_state_lock, flags);
        task->task_flags |= (TF_ACTIVE | TF_SENT);
        atomic_inc(&cmd->t_task_cdbs_sent);
@@ -2107,10 +2139,10 @@ check_depth:
                spin_unlock_irqrestore(&cmd->t_state_lock, flags);
                atomic_set(&cmd->t_transport_sent, 0);
                transport_stop_tasks_for_cmd(cmd);
-               atomic_inc(&dev->depth_left);
                transport_generic_request_failure(cmd);
        }
 
+       new_cmd = NULL;
        goto check_depth;
 
        return 0;
@@ -2351,7 +2383,7 @@ static int transport_get_sense_data(struct se_cmd *cmd)
 
        list_for_each_entry_safe(task, task_tmp,
                                &cmd->t_task_list, t_list) {
-               if (!task->task_sense)
+               if (!(task->task_flags & TF_HAS_SENSE))
                        continue;
 
                if (!dev->transport->get_sense_buffer) {
@@ -2665,7 +2697,7 @@ static int transport_generic_cmd_sequencer(
                        cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 
                        if (target_check_write_same_discard(&cdb[10], dev) < 0)
-                               goto out_invalid_cdb_field;
+                               goto out_unsupported_cdb;
                        if (!passthrough)
                                cmd->execute_task = target_emulate_write_same;
                        break;
@@ -2948,7 +2980,7 @@ static int transport_generic_cmd_sequencer(
                cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 
                if (target_check_write_same_discard(&cdb[1], dev) < 0)
-                       goto out_invalid_cdb_field;
+                       goto out_unsupported_cdb;
                if (!passthrough)
                        cmd->execute_task = target_emulate_write_same;
                break;
@@ -2971,7 +3003,7 @@ static int transport_generic_cmd_sequencer(
                 * of byte 1 bit 3 UNMAP instead of original reserved field
                 */
                if (target_check_write_same_discard(&cdb[1], dev) < 0)
-                       goto out_invalid_cdb_field;
+                       goto out_unsupported_cdb;
                if (!passthrough)
                        cmd->execute_task = target_emulate_write_same;
                break;
@@ -3053,11 +3085,6 @@ static int transport_generic_cmd_sequencer(
             (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)))
                goto out_unsupported_cdb;
 
-       /* Let's limit control cdbs to a page, for simplicity's sake. */
-       if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
-           size > PAGE_SIZE)
-               goto out_invalid_cdb_field;
-
        transport_set_supported_SAM_opcode(cmd);
        return ret;
 
@@ -3345,6 +3372,32 @@ static inline void transport_free_pages(struct se_cmd *cmd)
        cmd->t_bidi_data_nents = 0;
 }
 
+/**
+ * transport_release_cmd - free a command
+ * @cmd:       command to free
+ *
+ * This routine unconditionally frees a command, and reference counting
+ * or list removal must be done in the caller.
+ */
+static void transport_release_cmd(struct se_cmd *cmd)
+{
+       BUG_ON(!cmd->se_tfo);
+
+       if (cmd->se_tmr_req)
+               core_tmr_release_req(cmd->se_tmr_req);
+       if (cmd->t_task_cdb != cmd->__t_task_cdb)
+               kfree(cmd->t_task_cdb);
+       /*
+        * If this cmd has been setup with target_get_sess_cmd(), drop
+        * the kref and call ->release_cmd() in kref callback.
+        */
+        if (cmd->check_release != 0) {
+               target_put_sess_cmd(cmd->se_sess, cmd);
+               return;
+       }
+       cmd->se_tfo->release_cmd(cmd);
+}
+
 /**
  * transport_put_cmd - release a reference to a command
  * @cmd:       command to release
@@ -3435,9 +3488,11 @@ int transport_generic_map_mem_to_cmd(
 }
 EXPORT_SYMBOL(transport_generic_map_mem_to_cmd);
 
-void *transport_kmap_first_data_page(struct se_cmd *cmd)
+void *transport_kmap_data_sg(struct se_cmd *cmd)
 {
        struct scatterlist *sg = cmd->t_data_sg;
+       struct page **pages;
+       int i;
 
        BUG_ON(!sg);
        /*
@@ -3445,15 +3500,41 @@ void *transport_kmap_first_data_page(struct se_cmd *cmd)
         * tcm_loop who may be using a contig buffer from the SCSI midlayer for
         * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
         */
-       return kmap(sg_page(sg)) + sg->offset;
+       if (!cmd->t_data_nents)
+               return NULL;
+       else if (cmd->t_data_nents == 1)
+               return kmap(sg_page(sg)) + sg->offset;
+
+       /* >1 page. use vmap */
+       pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL);
+       if (!pages)
+               return NULL;
+
+       /* convert sg[] to pages[] */
+       for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
+               pages[i] = sg_page(sg);
+       }
+
+       cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
+       kfree(pages);
+       if (!cmd->t_data_vmap)
+               return NULL;
+
+       return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
 }
-EXPORT_SYMBOL(transport_kmap_first_data_page);
+EXPORT_SYMBOL(transport_kmap_data_sg);
 
-void transport_kunmap_first_data_page(struct se_cmd *cmd)
+void transport_kunmap_data_sg(struct se_cmd *cmd)
 {
-       kunmap(sg_page(cmd->t_data_sg));
+       if (!cmd->t_data_nents)
+               return;
+       else if (cmd->t_data_nents == 1)
+               kunmap(sg_page(cmd->t_data_sg));
+
+       vunmap(cmd->t_data_vmap);
+       cmd->t_data_vmap = NULL;
 }
-EXPORT_SYMBOL(transport_kunmap_first_data_page);
+EXPORT_SYMBOL(transport_kunmap_data_sg);
 
 static int
 transport_generic_get_mem(struct se_cmd *cmd)
@@ -3461,6 +3542,7 @@ transport_generic_get_mem(struct se_cmd *cmd)
        u32 length = cmd->data_length;
        unsigned int nents;
        struct page *page;
+       gfp_t zero_flag;
        int i = 0;
 
        nents = DIV_ROUND_UP(length, PAGE_SIZE);
@@ -3471,9 +3553,11 @@ transport_generic_get_mem(struct se_cmd *cmd)
        cmd->t_data_nents = nents;
        sg_init_table(cmd->t_data_sg, nents);
 
+       zero_flag = cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB ? 0 : __GFP_ZERO;
+
        while (length) {
                u32 page_len = min_t(u32, length, PAGE_SIZE);
-               page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+               page = alloc_page(GFP_KERNEL | zero_flag);
                if (!page)
                        goto out;
 
@@ -3701,6 +3785,11 @@ transport_allocate_control_task(struct se_cmd *cmd)
        struct se_task *task;
        unsigned long flags;
 
+       /* Workaround for handling zero-length control CDBs */
+       if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) &&
+           !cmd->data_length)
+               return 0;
+
        task = transport_generic_get_task(cmd, cmd->data_direction);
        if (!task)
                return -ENOMEM;
@@ -3772,6 +3861,14 @@ int transport_generic_new_cmd(struct se_cmd *cmd)
        else if (!task_cdbs && (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)) {
                cmd->t_state = TRANSPORT_COMPLETE;
                atomic_set(&cmd->t_transport_active, 1);
+
+               if (cmd->t_task_cdb[0] == REQUEST_SENSE) {
+                       u8 ua_asc = 0, ua_ascq = 0;
+
+                       core_scsi3_ua_clear_for_request_sense(cmd,
+                                       &ua_asc, &ua_ascq);
+               }
+
                INIT_WORK(&cmd->work, target_complete_ok_work);
                queue_work(target_completion_wq, &cmd->work);
                return 0;
@@ -3870,33 +3967,6 @@ queue_full:
        return 0;
 }
 
-/**
- * transport_release_cmd - free a command
- * @cmd:       command to free
- *
- * This routine unconditionally frees a command, and reference counting
- * or list removal must be done in the caller.
- */
-void transport_release_cmd(struct se_cmd *cmd)
-{
-       BUG_ON(!cmd->se_tfo);
-
-       if (cmd->se_tmr_req)
-               core_tmr_release_req(cmd->se_tmr_req);
-       if (cmd->t_task_cdb != cmd->__t_task_cdb)
-               kfree(cmd->t_task_cdb);
-       /*
-        * Check if target_wait_for_sess_cmds() is expecting to
-        * release se_cmd directly here..
-        */
-       if (cmd->check_release != 0 && cmd->se_tfo->check_release_cmd)
-               if (cmd->se_tfo->check_release_cmd(cmd) != 0)
-                       return;
-
-       cmd->se_tfo->release_cmd(cmd);
-}
-EXPORT_SYMBOL(transport_release_cmd);
-
 void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
 {
        if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
@@ -3923,11 +3993,22 @@ EXPORT_SYMBOL(transport_generic_free_cmd);
 /* target_get_sess_cmd - Add command to active ->sess_cmd_list
  * @se_sess:   session to reference
  * @se_cmd:    command descriptor to add
+ * @ack_kref:  Signal that fabric will perform an ack target_put_sess_cmd()
  */
-void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd,
+                       bool ack_kref)
 {
        unsigned long flags;
 
+       kref_init(&se_cmd->cmd_kref);
+       /*
+        * Add a second kref if the fabric caller is expecting to handle
+        * fabric acknowledgement that requires two target_put_sess_cmd()
+        * invocations before se_cmd descriptor release.
+        */
+       if (ack_kref == true)
+               kref_get(&se_cmd->cmd_kref);
+
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        list_add_tail(&se_cmd->se_cmd_list, &se_sess->sess_cmd_list);
        se_cmd->check_release = 1;
@@ -3935,30 +4016,36 @@ void target_get_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
 }
 EXPORT_SYMBOL(target_get_sess_cmd);
 
-/* target_put_sess_cmd - Check for active I/O shutdown or list delete
- * @se_sess:   session to reference
- * @se_cmd:    command descriptor to drop
- */
-int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+static void target_release_cmd_kref(struct kref *kref)
 {
+       struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
+       struct se_session *se_sess = se_cmd->se_sess;
        unsigned long flags;
 
        spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
        if (list_empty(&se_cmd->se_cmd_list)) {
                spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
                WARN_ON(1);
-               return 0;
+               return;
        }
-
        if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
                spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
                complete(&se_cmd->cmd_wait_comp);
-               return 1;
+               return;
        }
        list_del(&se_cmd->se_cmd_list);
        spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
 
-       return 0;
+       se_cmd->se_tfo->release_cmd(se_cmd);
+}
+
+/* target_put_sess_cmd - Check for active I/O shutdown via kref_put
+ * @se_sess:   session to reference
+ * @se_cmd:    command descriptor to drop
+ */
+int target_put_sess_cmd(struct se_session *se_sess, struct se_cmd *se_cmd)
+{
+       return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
 }
 EXPORT_SYMBOL(target_put_sess_cmd);
 
@@ -4174,7 +4261,7 @@ check_cond:
 
 static int transport_clear_lun_thread(void *p)
 {
-       struct se_lun *lun = (struct se_lun *)p;
+       struct se_lun *lun = p;
 
        __transport_clear_lun_from_sessions(lun);
        complete(&lun->lun_shutdown_comp);
@@ -4353,6 +4440,7 @@ int transport_send_check_condition_and_sense(
        case TCM_NON_EXISTENT_LUN:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ILLEGAL REQUEST */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* LOGICAL UNIT NOT SUPPORTED */
@@ -4362,6 +4450,7 @@ int transport_send_check_condition_and_sense(
        case TCM_SECTOR_COUNT_TOO_MANY:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ILLEGAL REQUEST */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* INVALID COMMAND OPERATION CODE */
@@ -4370,6 +4459,7 @@ int transport_send_check_condition_and_sense(
        case TCM_UNKNOWN_MODE_PAGE:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ILLEGAL REQUEST */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* INVALID FIELD IN CDB */
@@ -4378,6 +4468,7 @@ int transport_send_check_condition_and_sense(
        case TCM_CHECK_CONDITION_ABORT_CMD:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* BUS DEVICE RESET FUNCTION OCCURRED */
@@ -4387,6 +4478,7 @@ int transport_send_check_condition_and_sense(
        case TCM_INCORRECT_AMOUNT_OF_DATA:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* WRITE ERROR */
@@ -4397,22 +4489,25 @@ int transport_send_check_condition_and_sense(
        case TCM_INVALID_CDB_FIELD:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
-               /* ABORTED COMMAND */
-               buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+               /* ILLEGAL REQUEST */
+               buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* INVALID FIELD IN CDB */
                buffer[offset+SPC_ASC_KEY_OFFSET] = 0x24;
                break;
        case TCM_INVALID_PARAMETER_LIST:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
-               /* ABORTED COMMAND */
-               buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
+               /* ILLEGAL REQUEST */
+               buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* INVALID FIELD IN PARAMETER LIST */
                buffer[offset+SPC_ASC_KEY_OFFSET] = 0x26;
                break;
        case TCM_UNEXPECTED_UNSOLICITED_DATA:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* WRITE ERROR */
@@ -4423,6 +4518,7 @@ int transport_send_check_condition_and_sense(
        case TCM_SERVICE_CRC_ERROR:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* PROTOCOL SERVICE CRC ERROR */
@@ -4433,6 +4529,7 @@ int transport_send_check_condition_and_sense(
        case TCM_SNACK_REJECTED:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ABORTED COMMAND */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ABORTED_COMMAND;
                /* READ ERROR */
@@ -4443,6 +4540,7 @@ int transport_send_check_condition_and_sense(
        case TCM_WRITE_PROTECTED:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* DATA PROTECT */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = DATA_PROTECT;
                /* WRITE PROTECTED */
@@ -4451,6 +4549,7 @@ int transport_send_check_condition_and_sense(
        case TCM_CHECK_CONDITION_UNIT_ATTENTION:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* UNIT ATTENTION */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION;
                core_scsi3_ua_for_check_condition(cmd, &asc, &ascq);
@@ -4460,6 +4559,7 @@ int transport_send_check_condition_and_sense(
        case TCM_CHECK_CONDITION_NOT_READY:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* Not Ready */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = NOT_READY;
                transport_get_sense_codes(cmd, &asc, &ascq);
@@ -4470,6 +4570,7 @@ int transport_send_check_condition_and_sense(
        default:
                /* CURRENT ERROR */
                buffer[offset] = 0x70;
+               buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
                /* ILLEGAL REQUEST */
                buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
                /* LOGICAL UNIT COMMUNICATION FAILURE */
@@ -4545,11 +4646,7 @@ void transport_send_task_abort(struct se_cmd *cmd)
        cmd->se_tfo->queue_status(cmd);
 }
 
-/*     transport_generic_do_tmr():
- *
- *
- */
-int transport_generic_do_tmr(struct se_cmd *cmd)
+static int transport_generic_do_tmr(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
        struct se_tmr_req *tmr = cmd->se_tmr_req;
@@ -4597,7 +4694,7 @@ static int transport_processing_thread(void *param)
 {
        int ret;
        struct se_cmd *cmd;
-       struct se_device *dev = (struct se_device *) param;
+       struct se_device *dev = param;
 
        while (!kthread_should_stop()) {
                ret = wait_event_interruptible(dev->dev_queue_obj.thread_wq,
@@ -4607,8 +4704,6 @@ static int transport_processing_thread(void *param)
                        goto out;
 
 get_cmd:
-               __transport_execute_tasks(dev);
-
                cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj);
                if (!cmd)
                        continue;
index 50a480db7a66df3d619e103060ca6c1be301f9b4..3e12f6bcfa10ba6d502aab352a6ea474839a1dd0 100644 (file)
 #include <scsi/scsi_cmnd.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_device.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 
+#include "target_core_internal.h"
 #include "target_core_alua.h"
-#include "target_core_hba.h"
 #include "target_core_pr.h"
 #include "target_core_ua.h"
 
index 71fc9cea5dc9ba120b725a4e0f6bf94c879d1340..9e7e26c74c7944e4b20d93eaaf20f7490068df57 100644 (file)
 #include <scsi/fc_encode.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
-#include <target/target_core_tmr.h>
 #include <target/configfs_macros.h>
 
 #include "tcm_fc.h"
@@ -367,6 +363,11 @@ static void ft_send_tm(struct ft_cmd *cmd)
        struct ft_sess *sess;
        u8 tm_func;
 
+       transport_init_se_cmd(&cmd->se_cmd, &ft_configfs->tf_ops,
+                       cmd->sess->se_sess, 0, DMA_NONE, 0,
+                       &cmd->ft_sense_buffer[0]);
+       target_get_sess_cmd(cmd->sess->se_sess, &cmd->se_cmd, false);
+
        fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
 
        switch (fcp->fc_tm_flags) {
@@ -420,7 +421,6 @@ static void ft_send_tm(struct ft_cmd *cmd)
                        sess = cmd->sess;
                        transport_send_check_condition_and_sense(&cmd->se_cmd,
                                cmd->se_cmd.scsi_sense_reason, 0);
-                       transport_generic_free_cmd(&cmd->se_cmd, 0);
                        ft_sess_put(sess);
                        return;
                }
@@ -536,12 +536,10 @@ static void ft_send_work(struct work_struct *work)
 {
        struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);
        struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);
-       struct se_cmd *se_cmd;
        struct fcp_cmnd *fcp;
        int data_dir = 0;
        u32 data_len;
        int task_attr;
-       int ret;
 
        fcp = fc_frame_payload_get(cmd->req_frame, sizeof(*fcp));
        if (!fcp)
@@ -591,15 +589,6 @@ static void ft_send_work(struct work_struct *work)
                data_len = ntohl(fcp->fc_dl);
                cmd->cdb = fcp->fc_cdb;
        }
-
-       se_cmd = &cmd->se_cmd;
-       /*
-        * Initialize struct se_cmd descriptor from target_core_mod
-        * infrastructure
-        */
-       transport_init_se_cmd(se_cmd, &ft_configfs->tf_ops, cmd->sess->se_sess,
-                             data_len, data_dir, task_attr,
-                             &cmd->ft_sense_buffer[0]);
        /*
         * Check for FCP task management flags
         */
@@ -607,39 +596,16 @@ static void ft_send_work(struct work_struct *work)
                ft_send_tm(cmd);
                return;
        }
-
        fc_seq_exch(cmd->seq)->lp->tt.seq_set_resp(cmd->seq, ft_recv_seq, cmd);
-
        cmd->lun = scsilun_to_int((struct scsi_lun *)fcp->fc_lun);
-       ret = transport_lookup_cmd_lun(&cmd->se_cmd, cmd->lun);
-       if (ret < 0) {
-               ft_dump_cmd(cmd, __func__);
-               transport_send_check_condition_and_sense(&cmd->se_cmd,
-                       cmd->se_cmd.scsi_sense_reason, 0);
-               return;
-       }
-
-       ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
-
-       pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
-       ft_dump_cmd(cmd, __func__);
-
-       if (ret == -ENOMEM) {
-               transport_send_check_condition_and_sense(se_cmd,
-                               TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
-               transport_generic_free_cmd(se_cmd, 0);
-               return;
-       }
-       if (ret == -EINVAL) {
-               if (se_cmd->se_cmd_flags & SCF_SCSI_RESERVATION_CONFLICT)
-                       ft_queue_status(se_cmd);
-               else
-                       transport_send_check_condition_and_sense(se_cmd,
-                                       se_cmd->scsi_sense_reason, 0);
-               transport_generic_free_cmd(se_cmd, 0);
-               return;
-       }
-       transport_handle_cdb_direct(se_cmd);
+       /*
+        * Use a single se_cmd->cmd_kref as we expect to release se_cmd
+        * directly from ft_check_stop_free callback in response path.
+        */
+       target_submit_cmd(&cmd->se_cmd, cmd->sess->se_sess, cmd->cdb,
+                               &cmd->ft_sense_buffer[0], cmd->lun, data_len,
+                               task_attr, data_dir, 0);
+       pr_debug("r_ctl %x alloc target_submit_cmd\n", fh->fh_r_ctl);
        return;
 
 err:
index 9402b7387cac570d91ff6001a885daed3bbd8b6d..73852fbc857b4b5aee0622ee7b55fb5c0880291b 100644 (file)
 #include <scsi/libfc.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_fabric_configfs.h>
-#include <target/target_core_fabric_lib.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
index 1369b1cb103d7d1e0ad4053322e599713445e9b7..d8cabc21036d3ec1040168b9940ab013c55b4a0b 100644 (file)
 #include <scsi/fc_encode.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
index 326921385aff973c17ce4f8598ba791c0fa94fc2..4c0507cf808c08662609765b41488a35c461add7 100644 (file)
 #include <scsi/libfc.h>
 
 #include <target/target_core_base.h>
-#include <target/target_core_transport.h>
-#include <target/target_core_fabric_ops.h>
-#include <target/target_core_device.h>
-#include <target/target_core_tpg.h>
+#include <target/target_core_fabric.h>
 #include <target/target_core_configfs.h>
 #include <target/configfs_macros.h>
 
index dd9a5743fa991f3fb789d2e17d12563deaead0cd..220ce7e31cf50faa89fd35a46946111b523da0d5 100644 (file)
@@ -1304,7 +1304,7 @@ static struct genl_multicast_group thermal_event_mcgrp = {
        .name = THERMAL_GENL_MCAST_GROUP_NAME,
 };
 
-int generate_netlink_event(u32 orig, enum events event)
+int thermal_generate_netlink_event(u32 orig, enum events event)
 {
        struct sk_buff *skb;
        struct nlattr *attr;
@@ -1363,7 +1363,7 @@ int generate_netlink_event(u32 orig, enum events event)
 
        return result;
 }
-EXPORT_SYMBOL(generate_netlink_event);
+EXPORT_SYMBOL(thermal_generate_netlink_event);
 
 static int genetlink_init(void)
 {
diff --git a/drivers/tty/serial/8250/Kconfig b/drivers/tty/serial/8250/Kconfig
new file mode 100644 (file)
index 0000000..591f801
--- /dev/null
@@ -0,0 +1,280 @@
+#
+# The 8250/16550 serial drivers.  You shouldn't be in this list unless
+# you somehow have an implicit or explicit dependency on SERIAL_8250.
+#
+
+config SERIAL_8250
+       tristate "8250/16550 and compatible serial support"
+       select SERIAL_CORE
+       ---help---
+         This selects whether you want to include the driver for the standard
+         serial ports.  The standard answer is Y.  People who might say N
+         here are those that are setting up dedicated Ethernet WWW/FTP
+         servers, or users that have one of the various bus mice instead of a
+         serial mouse and don't intend to use their machine's standard serial
+         port for anything.  (Note that the Cyclades and Stallion multi
+         serial port drivers do not need this driver built in for them to
+         work.)
+
+         To compile this driver as a module, choose M here: the
+         module will be called 8250.
+         [WARNING: Do not compile this driver as a module if you are using
+         non-standard serial ports, since the configuration information will
+         be lost when the driver is unloaded.  This limitation may be lifted
+         in the future.]
+
+         BTW1: If you have a mouseman serial mouse which is not recognized by
+         the X window system, try running gpm first.
+
+         BTW2: If you intend to use a software modem (also called Winmodem)
+         under Linux, forget it.  These modems are crippled and require
+         proprietary drivers which are only available under Windows.
+
+         Most people will say Y or M here, so that they can use serial mice,
+         modems and similar devices connecting to the standard serial ports.
+
+config SERIAL_8250_CONSOLE
+       bool "Console on 8250/16550 and compatible serial port"
+       depends on SERIAL_8250=y
+       select SERIAL_CORE_CONSOLE
+       ---help---
+         If you say Y here, it will be possible to use a serial port as the
+         system console (the system console is the device which receives all
+         kernel messages and warnings and which allows logins in single user
+         mode). This could be useful if some terminal or printer is connected
+         to that serial port.
+
+         Even if you say Y here, the currently visible virtual console
+         (/dev/tty0) will still be used as the system console by default, but
+         you can alter that using a kernel command line option such as
+         "console=ttyS1". (Try "man bootparam" or see the documentation of
+         your boot loader (grub or lilo or loadlin) about how to pass options
+         to the kernel at boot time.)
+
+         If you don't have a VGA card installed and you say Y here, the
+         kernel will automatically use the first serial line, /dev/ttyS0, as
+         system console.
+
+         You can set that using a kernel command line option such as
+         "console=uart8250,io,0x3f8,9600n8"
+         "console=uart8250,mmio,0xff5e0000,115200n8".
+         and it will switch to normal serial console when the corresponding
+         port is ready.
+         "earlycon=uart8250,io,0x3f8,9600n8"
+         "earlycon=uart8250,mmio,0xff5e0000,115200n8".
+         it will not only setup early console.
+
+         If unsure, say N.
+
+config FIX_EARLYCON_MEM
+       bool
+       depends on X86
+       default y
+
+config SERIAL_8250_GSC
+       tristate
+       depends on SERIAL_8250 && GSC
+       default SERIAL_8250
+
+config SERIAL_8250_PCI
+       tristate "8250/16550 PCI device support" if EXPERT
+       depends on SERIAL_8250 && PCI
+       default SERIAL_8250
+       help
+         This builds standard PCI serial support. You may be able to
+         disable this feature if you only need legacy serial support.
+         Saves about 9K.
+
+config SERIAL_8250_PNP
+       tristate "8250/16550 PNP device support" if EXPERT
+       depends on SERIAL_8250 && PNP
+       default SERIAL_8250
+       help
+         This builds standard PNP serial support. You may be able to
+         disable this feature if you only need legacy serial support.
+
+config SERIAL_8250_HP300
+       tristate
+       depends on SERIAL_8250 && HP300
+       default SERIAL_8250
+
+config SERIAL_8250_CS
+       tristate "8250/16550 PCMCIA device support"
+       depends on PCMCIA && SERIAL_8250
+       ---help---
+         Say Y here to enable support for 16-bit PCMCIA serial devices,
+         including serial port cards, modems, and the modem functions of
+         multi-function Ethernet/modem cards. (PCMCIA- or PC-cards are
+         credit-card size devices often used with laptops.)
+
+         To compile this driver as a module, choose M here: the
+         module will be called serial_cs.
+
+         If unsure, say N.
+
+config SERIAL_8250_NR_UARTS
+       int "Maximum number of 8250/16550 serial ports"
+       depends on SERIAL_8250
+       default "4"
+       help
+         Set this to the number of serial ports you want the driver
+         to support.  This includes any ports discovered via ACPI or
+         PCI enumeration and any ports that may be added at run-time
+         via hot-plug, or any ISA multi-port serial cards.
+
+config SERIAL_8250_RUNTIME_UARTS
+       int "Number of 8250/16550 serial ports to register at runtime"
+       depends on SERIAL_8250
+       range 0 SERIAL_8250_NR_UARTS
+       default "4"
+       help
+         Set this to the maximum number of serial ports you want
+         the kernel to register at boot time.  This can be overridden
+         with the module parameter "nr_uarts", or boot-time parameter
+         8250.nr_uarts
+
+config SERIAL_8250_EXTENDED
+       bool "Extended 8250/16550 serial driver options"
+       depends on SERIAL_8250
+       help
+         If you wish to use any non-standard features of the standard "dumb"
+         driver, say Y here. This includes HUB6 support, shared serial
+         interrupts, special multiport support, support for more than the
+         four COM 1/2/3/4 boards, etc.
+
+         Note that the answer to this question won't directly affect the
+         kernel: saying N will just cause the configurator to skip all
+         the questions about serial driver options. If unsure, say N.
+
+config SERIAL_8250_MANY_PORTS
+       bool "Support more than 4 legacy serial ports"
+       depends on SERIAL_8250_EXTENDED && !IA64
+       help
+         Say Y here if you have dumb serial boards other than the four
+         standard COM 1/2/3/4 ports. This may happen if you have an AST
+         FourPort, Accent Async, Boca (read the Boca mini-HOWTO, available
+         from <http://www.tldp.org/docs.html#howto>), or other custom
+         serial port hardware which acts similar to standard serial port
+         hardware. If you only use the standard COM 1/2/3/4 ports, you can
+         say N here to save some memory. You can also say Y if you have an
+         "intelligent" multiport card such as Cyclades, Digiboards, etc.
+
+#
+# Multi-port serial cards
+#
+
+config SERIAL_8250_FOURPORT
+       tristate "Support Fourport cards"
+       depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+       help
+         Say Y here if you have an AST FourPort serial board.
+
+         To compile this driver as a module, choose M here: the module
+         will be called 8250_fourport.
+
+config SERIAL_8250_ACCENT
+       tristate "Support Accent cards"
+       depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+       help
+         Say Y here if you have an Accent Async serial board.
+
+         To compile this driver as a module, choose M here: the module
+         will be called 8250_accent.
+
+config SERIAL_8250_BOCA
+       tristate "Support Boca cards"
+       depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+       help
+         Say Y here if you have a Boca serial board.  Please read the Boca
+         mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>
+
+         To compile this driver as a module, choose M here: the module
+         will be called 8250_boca.
+
+config SERIAL_8250_EXAR_ST16C554
+       tristate "Support Exar ST16C554/554D Quad UART"
+       depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+       help
+         The Uplogix Envoy TU301 uses this Exar Quad UART.  If you are
+         tinkering with your Envoy TU301, or have a machine with this UART,
+         say Y here.
+
+         To compile this driver as a module, choose M here: the module
+         will be called 8250_exar_st16c554.
+
+config SERIAL_8250_HUB6
+       tristate "Support Hub6 cards"
+       depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
+       help
+         Say Y here if you have a HUB6 serial board.
+
+         To compile this driver as a module, choose M here: the module
+         will be called 8250_hub6.
+
+#
+# Misc. options/drivers.
+#
+
+config SERIAL_8250_SHARE_IRQ
+       bool "Support for sharing serial interrupts"
+       depends on SERIAL_8250_EXTENDED
+       help
+         Some serial boards have hardware support which allows multiple dumb
+         serial ports on the same board to share a single IRQ. To enable
+         support for this in the serial driver, say Y here.
+
+config SERIAL_8250_DETECT_IRQ
+       bool "Autodetect IRQ on standard ports (unsafe)"
+       depends on SERIAL_8250_EXTENDED
+       help
+         Say Y here if you want the kernel to try to guess which IRQ
+         to use for your serial port.
+
+         This is considered unsafe; it is far better to configure the IRQ in
+         a boot script using the setserial command.
+
+         If unsure, say N.
+
+config SERIAL_8250_RSA
+       bool "Support RSA serial ports"
+       depends on SERIAL_8250_EXTENDED
+       help
+         ::: To be written :::
+
+config SERIAL_8250_MCA
+       tristate "Support 8250-type ports on MCA buses"
+       depends on SERIAL_8250 != n && MCA
+       help
+         Say Y here if you have a MCA serial ports.
+
+         To compile this driver as a module, choose M here: the module
+         will be called 8250_mca.
+
+config SERIAL_8250_ACORN
+       tristate "Acorn expansion card serial port support"
+       depends on ARCH_ACORN && SERIAL_8250
+       help
+         If you have an Atomwide Serial card or Serial Port card for an Acorn
+         system, say Y to this option.  The driver can handle 1, 2, or 3 port
+         cards.  If unsure, say N.
+
+config SERIAL_8250_RM9K
+       bool "Support for MIPS RM9xxx integrated serial port"
+       depends on SERIAL_8250 != n && SERIAL_RM9000
+       select SERIAL_8250_SHARE_IRQ
+       help
+         Selecting this option will add support for the integrated serial
+         port hardware found on MIPS RM9122 and similar processors.
+         If unsure, say N.
+
+config SERIAL_8250_FSL
+       bool
+       depends on SERIAL_8250_CONSOLE && PPC_UDBG_16550
+       default PPC
+
+config SERIAL_8250_DW
+       tristate "Support for Synopsys DesignWare 8250 quirks"
+       depends on SERIAL_8250 && OF
+       help
+         Selecting this option will enable handling of the extra features
+         present in the Synopsys DesignWare APB UART.
diff --git a/drivers/tty/serial/8250/Makefile b/drivers/tty/serial/8250/Makefile
new file mode 100644 (file)
index 0000000..867bba7
--- /dev/null
@@ -0,0 +1,20 @@
+#
+# Makefile for the 8250 serial device drivers.
+#
+
+obj-$(CONFIG_SERIAL_8250)              += 8250.o
+obj-$(CONFIG_SERIAL_8250_PNP)          += 8250_pnp.o
+obj-$(CONFIG_SERIAL_8250_GSC)          += 8250_gsc.o
+obj-$(CONFIG_SERIAL_8250_PCI)          += 8250_pci.o
+obj-$(CONFIG_SERIAL_8250_HP300)                += 8250_hp300.o
+obj-$(CONFIG_SERIAL_8250_CS)           += serial_cs.o
+obj-$(CONFIG_SERIAL_8250_ACORN)                += 8250_acorn.o
+obj-$(CONFIG_SERIAL_8250_CONSOLE)      += 8250_early.o
+obj-$(CONFIG_SERIAL_8250_FOURPORT)     += 8250_fourport.o
+obj-$(CONFIG_SERIAL_8250_ACCENT)       += 8250_accent.o
+obj-$(CONFIG_SERIAL_8250_BOCA)         += 8250_boca.o
+obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554)        += 8250_exar_st16c554.o
+obj-$(CONFIG_SERIAL_8250_HUB6)         += 8250_hub6.o
+obj-$(CONFIG_SERIAL_8250_MCA)          += 8250_mca.o
+obj-$(CONFIG_SERIAL_8250_FSL)          += 8250_fsl.o
+obj-$(CONFIG_SERIAL_8250_DW)           += 8250_dw.o
index aca2386c5ef10261e74bfb66b2461546c8206150..2de99248dfaee06329f8a4f49505c9db66f7adf1 100644 (file)
@@ -5,279 +5,7 @@
 menu "Serial drivers"
        depends on HAS_IOMEM
 
-#
-# The new 8250/16550 serial drivers
-config SERIAL_8250
-       tristate "8250/16550 and compatible serial support"
-       select SERIAL_CORE
-       ---help---
-         This selects whether you want to include the driver for the standard
-         serial ports.  The standard answer is Y.  People who might say N
-         here are those that are setting up dedicated Ethernet WWW/FTP
-         servers, or users that have one of the various bus mice instead of a
-         serial mouse and don't intend to use their machine's standard serial
-         port for anything.  (Note that the Cyclades and Stallion multi
-         serial port drivers do not need this driver built in for them to
-         work.)
-
-         To compile this driver as a module, choose M here: the
-         module will be called 8250.
-         [WARNING: Do not compile this driver as a module if you are using
-         non-standard serial ports, since the configuration information will
-         be lost when the driver is unloaded.  This limitation may be lifted
-         in the future.]
-
-         BTW1: If you have a mouseman serial mouse which is not recognized by
-         the X window system, try running gpm first.
-
-         BTW2: If you intend to use a software modem (also called Winmodem)
-         under Linux, forget it.  These modems are crippled and require
-         proprietary drivers which are only available under Windows.
-
-         Most people will say Y or M here, so that they can use serial mice,
-         modems and similar devices connecting to the standard serial ports.
-
-config SERIAL_8250_CONSOLE
-       bool "Console on 8250/16550 and compatible serial port"
-       depends on SERIAL_8250=y
-       select SERIAL_CORE_CONSOLE
-       ---help---
-         If you say Y here, it will be possible to use a serial port as the
-         system console (the system console is the device which receives all
-         kernel messages and warnings and which allows logins in single user
-         mode). This could be useful if some terminal or printer is connected
-         to that serial port.
-
-         Even if you say Y here, the currently visible virtual console
-         (/dev/tty0) will still be used as the system console by default, but
-         you can alter that using a kernel command line option such as
-         "console=ttyS1". (Try "man bootparam" or see the documentation of
-         your boot loader (grub or lilo or loadlin) about how to pass options
-         to the kernel at boot time.)
-
-         If you don't have a VGA card installed and you say Y here, the
-         kernel will automatically use the first serial line, /dev/ttyS0, as
-         system console.
-
-         You can set that using a kernel command line option such as
-         "console=uart8250,io,0x3f8,9600n8"
-         "console=uart8250,mmio,0xff5e0000,115200n8".
-         and it will switch to normal serial console when the corresponding 
-         port is ready.
-         "earlycon=uart8250,io,0x3f8,9600n8"
-         "earlycon=uart8250,mmio,0xff5e0000,115200n8".
-         it will not only setup early console.
-
-         If unsure, say N.
-
-config FIX_EARLYCON_MEM
-       bool
-       depends on X86
-       default y
-
-config SERIAL_8250_GSC
-       tristate
-       depends on SERIAL_8250 && GSC
-       default SERIAL_8250
-
-config SERIAL_8250_PCI
-       tristate "8250/16550 PCI device support" if EXPERT
-       depends on SERIAL_8250 && PCI
-       default SERIAL_8250
-       help
-         This builds standard PCI serial support. You may be able to
-         disable this feature if you only need legacy serial support.
-         Saves about 9K.
-
-config SERIAL_8250_PNP
-       tristate "8250/16550 PNP device support" if EXPERT
-       depends on SERIAL_8250 && PNP
-       default SERIAL_8250
-       help
-         This builds standard PNP serial support. You may be able to
-         disable this feature if you only need legacy serial support.
-
-config SERIAL_8250_FSL
-       bool
-       depends on SERIAL_8250_CONSOLE && PPC_UDBG_16550
-       default PPC
-
-config SERIAL_8250_HP300
-       tristate
-       depends on SERIAL_8250 && HP300
-       default SERIAL_8250
-
-config SERIAL_8250_CS
-       tristate "8250/16550 PCMCIA device support"
-       depends on PCMCIA && SERIAL_8250
-       ---help---
-         Say Y here to enable support for 16-bit PCMCIA serial devices,
-         including serial port cards, modems, and the modem functions of
-         multi-function Ethernet/modem cards. (PCMCIA- or PC-cards are
-         credit-card size devices often used with laptops.)
-
-         To compile this driver as a module, choose M here: the
-         module will be called serial_cs.
-
-         If unsure, say N.
-
-config SERIAL_8250_NR_UARTS
-       int "Maximum number of 8250/16550 serial ports"
-       depends on SERIAL_8250
-       default "4"
-       help
-         Set this to the number of serial ports you want the driver
-         to support.  This includes any ports discovered via ACPI or
-         PCI enumeration and any ports that may be added at run-time
-         via hot-plug, or any ISA multi-port serial cards.
-
-config SERIAL_8250_RUNTIME_UARTS
-       int "Number of 8250/16550 serial ports to register at runtime"
-       depends on SERIAL_8250
-       range 0 SERIAL_8250_NR_UARTS
-       default "4"
-       help
-         Set this to the maximum number of serial ports you want
-         the kernel to register at boot time.  This can be overridden
-         with the module parameter "nr_uarts", or boot-time parameter
-         8250.nr_uarts
-
-config SERIAL_8250_EXTENDED
-       bool "Extended 8250/16550 serial driver options"
-       depends on SERIAL_8250
-       help
-         If you wish to use any non-standard features of the standard "dumb"
-         driver, say Y here. This includes HUB6 support, shared serial
-         interrupts, special multiport support, support for more than the
-         four COM 1/2/3/4 boards, etc.
-
-         Note that the answer to this question won't directly affect the
-         kernel: saying N will just cause the configurator to skip all
-         the questions about serial driver options. If unsure, say N.
-
-config SERIAL_8250_MANY_PORTS
-       bool "Support more than 4 legacy serial ports"
-       depends on SERIAL_8250_EXTENDED && !IA64
-       help
-         Say Y here if you have dumb serial boards other than the four
-         standard COM 1/2/3/4 ports. This may happen if you have an AST
-         FourPort, Accent Async, Boca (read the Boca mini-HOWTO, available
-         from <http://www.tldp.org/docs.html#howto>), or other custom
-         serial port hardware which acts similar to standard serial port
-         hardware. If you only use the standard COM 1/2/3/4 ports, you can
-         say N here to save some memory. You can also say Y if you have an
-         "intelligent" multiport card such as Cyclades, Digiboards, etc.
-
-#
-# Multi-port serial cards
-#
-
-config SERIAL_8250_FOURPORT
-       tristate "Support Fourport cards"
-       depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
-       help
-         Say Y here if you have an AST FourPort serial board.
-
-         To compile this driver as a module, choose M here: the module
-         will be called 8250_fourport.
-
-config SERIAL_8250_ACCENT
-       tristate "Support Accent cards"
-       depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
-       help
-         Say Y here if you have an Accent Async serial board.
-
-         To compile this driver as a module, choose M here: the module
-         will be called 8250_accent.
-
-config SERIAL_8250_BOCA
-       tristate "Support Boca cards"
-       depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
-       help
-         Say Y here if you have a Boca serial board.  Please read the Boca
-         mini-HOWTO, available from <http://www.tldp.org/docs.html#howto>
-
-         To compile this driver as a module, choose M here: the module
-         will be called 8250_boca.
-
-config SERIAL_8250_EXAR_ST16C554
-       tristate "Support Exar ST16C554/554D Quad UART"
-       depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
-       help
-         The Uplogix Envoy TU301 uses this Exar Quad UART.  If you are
-         tinkering with your Envoy TU301, or have a machine with this UART,
-         say Y here.
-
-         To compile this driver as a module, choose M here: the module
-         will be called 8250_exar_st16c554.
-
-config SERIAL_8250_HUB6
-       tristate "Support Hub6 cards"
-       depends on SERIAL_8250 != n && ISA && SERIAL_8250_MANY_PORTS
-       help
-         Say Y here if you have a HUB6 serial board.
-
-         To compile this driver as a module, choose M here: the module
-         will be called 8250_hub6.
-
-config SERIAL_8250_SHARE_IRQ
-       bool "Support for sharing serial interrupts"
-       depends on SERIAL_8250_EXTENDED
-       help
-         Some serial boards have hardware support which allows multiple dumb
-         serial ports on the same board to share a single IRQ. To enable
-         support for this in the serial driver, say Y here.
-
-config SERIAL_8250_DETECT_IRQ
-       bool "Autodetect IRQ on standard ports (unsafe)"
-       depends on SERIAL_8250_EXTENDED
-       help
-         Say Y here if you want the kernel to try to guess which IRQ
-         to use for your serial port.
-
-         This is considered unsafe; it is far better to configure the IRQ in
-         a boot script using the setserial command.
-
-         If unsure, say N.
-
-config SERIAL_8250_RSA
-       bool "Support RSA serial ports"
-       depends on SERIAL_8250_EXTENDED
-       help
-         ::: To be written :::
-
-config SERIAL_8250_MCA
-       tristate "Support 8250-type ports on MCA buses"
-       depends on SERIAL_8250 != n && MCA
-       help
-         Say Y here if you have a MCA serial ports.
-
-         To compile this driver as a module, choose M here: the module
-         will be called 8250_mca.
-
-config SERIAL_8250_ACORN
-       tristate "Acorn expansion card serial port support"
-       depends on ARCH_ACORN && SERIAL_8250
-       help
-         If you have an Atomwide Serial card or Serial Port card for an Acorn
-         system, say Y to this option.  The driver can handle 1, 2, or 3 port
-         cards.  If unsure, say N.
-
-config SERIAL_8250_RM9K
-       bool "Support for MIPS RM9xxx integrated serial port"
-       depends on SERIAL_8250 != n && SERIAL_RM9000
-       select SERIAL_8250_SHARE_IRQ
-       help
-         Selecting this option will add support for the integrated serial
-         port hardware found on MIPS RM9122 and similar processors.
-         If unsure, say N.
-
-config SERIAL_8250_DW
-       tristate "Support for Synopsys DesignWare 8250 quirks"
-       depends on SERIAL_8250 && OF
-       help
-         Selecting this option will enable handling of the extra features
-         present in the Synopsys DesignWare APB UART.
+source "drivers/tty/serial/8250/Kconfig"
 
 comment "Non-8250 serial port support"
 
@@ -536,15 +264,6 @@ config SERIAL_MAX3107
        help
          MAX3107 chip support
 
-config SERIAL_MAX3107_AAVA
-       tristate "MAX3107 AAVA platform support"
-       depends on X86_MRST && SERIAL_MAX3107 && GPIOLIB
-       select SERIAL_CORE
-       help
-         Support for the MAX3107 chip configuration found on the AAVA
-         platform. Includes the extra initialisation and GPIO support
-         neded for this device.
-
 config SERIAL_DZ
        bool "DECstation DZ serial driver"
        depends on MACH_DECSTATION && 32BIT
index f5b01f2ce525da8df92d1972529fefb70c98ae23..fef32e10c8515a9de39ceab267f6647882c64197 100644 (file)
@@ -14,22 +14,9 @@ obj-$(CONFIG_SERIAL_SUNZILOG) += sunzilog.o
 obj-$(CONFIG_SERIAL_SUNSU) += sunsu.o
 obj-$(CONFIG_SERIAL_SUNSAB) += sunsab.o
 
-obj-$(CONFIG_SERIAL_8250) += 8250.o
-obj-$(CONFIG_SERIAL_8250_PNP) += 8250_pnp.o
-obj-$(CONFIG_SERIAL_8250_GSC) += 8250_gsc.o
-obj-$(CONFIG_SERIAL_8250_PCI) += 8250_pci.o
-obj-$(CONFIG_SERIAL_8250_HP300) += 8250_hp300.o
-obj-$(CONFIG_SERIAL_8250_CS) += serial_cs.o
-obj-$(CONFIG_SERIAL_8250_ACORN) += 8250_acorn.o
-obj-$(CONFIG_SERIAL_8250_CONSOLE) += 8250_early.o
-obj-$(CONFIG_SERIAL_8250_FOURPORT) += 8250_fourport.o
-obj-$(CONFIG_SERIAL_8250_ACCENT) += 8250_accent.o
-obj-$(CONFIG_SERIAL_8250_BOCA) += 8250_boca.o
-obj-$(CONFIG_SERIAL_8250_EXAR_ST16C554) += 8250_exar_st16c554.o
-obj-$(CONFIG_SERIAL_8250_HUB6) += 8250_hub6.o
-obj-$(CONFIG_SERIAL_8250_MCA) += 8250_mca.o
-obj-$(CONFIG_SERIAL_8250_FSL) += 8250_fsl.o
-obj-$(CONFIG_SERIAL_8250_DW) += 8250_dw.o
+# Now bring in any enabled 8250/16450/16550 type drivers.
+obj-$(CONFIG_SERIAL_8250) += 8250/
+
 obj-$(CONFIG_SERIAL_AMBA_PL010) += amba-pl010.o
 obj-$(CONFIG_SERIAL_AMBA_PL011) += amba-pl011.o
 obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
@@ -42,7 +29,6 @@ obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
 obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
 obj-$(CONFIG_SERIAL_MAX3100) += max3100.o
 obj-$(CONFIG_SERIAL_MAX3107) += max3107.o
-obj-$(CONFIG_SERIAL_MAX3107_AAVA) += max3107-aava.o
 obj-$(CONFIG_SERIAL_IP22_ZILOG) += ip22zilog.o
 obj-$(CONFIG_SERIAL_MUX) += mux.o
 obj-$(CONFIG_SERIAL_68328) += 68328serial.o
index 6958594f2fc09a4c35eb65ce6037f937b323251e..6800f5f26241430789a92efea30d58920dca9453 100644 (file)
@@ -159,6 +159,7 @@ struct uart_amba_port {
        unsigned int            fifosize;       /* vendor-specific */
        unsigned int            lcrh_tx;        /* vendor-specific */
        unsigned int            lcrh_rx;        /* vendor-specific */
+       unsigned int            old_cr;         /* state during shutdown */
        bool                    autorts;
        char                    type[12];
        bool                    interrupt_may_hang; /* vendor-specific */
@@ -268,7 +269,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
        struct dma_slave_config tx_conf = {
                .dst_addr = uap->port.mapbase + UART01x_DR,
                .dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
-               .direction = DMA_TO_DEVICE,
+               .direction = DMA_MEM_TO_DEV,
                .dst_maxburst = uap->fifosize >> 1,
        };
        struct dma_chan *chan;
@@ -301,7 +302,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
                struct dma_slave_config rx_conf = {
                        .src_addr = uap->port.mapbase + UART01x_DR,
                        .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
-                       .direction = DMA_FROM_DEVICE,
+                       .direction = DMA_DEV_TO_MEM,
                        .src_maxburst = uap->fifosize >> 1,
                };
 
@@ -480,7 +481,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
                return -EBUSY;
        }
 
-       desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_TO_DEVICE,
+       desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
                                             DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
@@ -676,7 +677,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
                &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
        dma_dev = rxchan->device;
        desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
-                                       DMA_FROM_DEVICE,
+                                       DMA_DEV_TO_MEM,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        /*
         * If the DMA engine is busy and cannot prepare a
@@ -1411,7 +1412,9 @@ static int pl011_startup(struct uart_port *port)
        while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
                barrier();
 
-       cr = UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
+       /* restore RTS and DTR */
+       cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
+       cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
        writew(cr, uap->port.membase + UART011_CR);
 
        /* Clear pending error interrupts */
@@ -1469,6 +1472,7 @@ static void pl011_shutdown_channel(struct uart_amba_port *uap,
 static void pl011_shutdown(struct uart_port *port)
 {
        struct uart_amba_port *uap = (struct uart_amba_port *)port;
+       unsigned int cr;
 
        /*
         * disable all interrupts
@@ -1488,9 +1492,16 @@ static void pl011_shutdown(struct uart_port *port)
 
        /*
         * disable the port
+        * disable the port. It should not disable RTS and DTR.
+        * Also RTS and DTR state should be preserved to restore
+        * it during startup().
         */
        uap->autorts = false;
-       writew(UART01x_CR_UARTEN | UART011_CR_TXE, uap->port.membase + UART011_CR);
+       cr = readw(uap->port.membase + UART011_CR);
+       uap->old_cr = cr;
+       cr &= UART011_CR_RTS | UART011_CR_DTR;
+       cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
+       writew(cr, uap->port.membase + UART011_CR);
 
        /*
         * disable break condition and fifos
@@ -1740,9 +1751,19 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
 {
        struct uart_amba_port *uap = amba_ports[co->index];
        unsigned int status, old_cr, new_cr;
+       unsigned long flags;
+       int locked = 1;
 
        clk_enable(uap->clk);
 
+       local_irq_save(flags);
+       if (uap->port.sysrq)
+               locked = 0;
+       else if (oops_in_progress)
+               locked = spin_trylock(&uap->port.lock);
+       else
+               spin_lock(&uap->port.lock);
+
        /*
         *      First save the CR then disable the interrupts
         */
@@ -1762,6 +1783,10 @@ pl011_console_write(struct console *co, const char *s, unsigned int count)
        } while (status & UART01x_FR_BUSY);
        writew(old_cr, uap->port.membase + UART011_CR);
 
+       if (locked)
+               spin_unlock(&uap->port.lock);
+       local_irq_restore(flags);
+
        clk_disable(uap->clk);
 }
 
@@ -1905,6 +1930,7 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
        uap->vendor = vendor;
        uap->lcrh_rx = vendor->lcrh_rx;
        uap->lcrh_tx = vendor->lcrh_tx;
+       uap->old_cr = 0;
        uap->fifosize = vendor->fifosize;
        uap->interrupt_may_hang = vendor->interrupt_may_hang;
        uap->port.dev = &dev->dev;
index 7c867a046c9752214773ba5f2a1237893ed2ebfd..7545fe1b99257dad128f3ddd57356939ce8f8399 100644 (file)
@@ -251,6 +251,7 @@ static void jsm_io_resume(struct pci_dev *pdev)
        struct jsm_board *brd = pci_get_drvdata(pdev);
 
        pci_restore_state(pdev);
+       pci_save_state(pdev);
 
        jsm_uart_port_init(brd);
 }
diff --git a/drivers/tty/serial/max3107-aava.c b/drivers/tty/serial/max3107-aava.c
deleted file mode 100644 (file)
index aae772a..0000000
+++ /dev/null
@@ -1,344 +0,0 @@
-/*
- *  max3107.c - spi uart protocol driver for Maxim 3107
- *  Based on max3100.c
- *     by Christian Pellegrin <chripell@evolware.org>
- *  and        max3110.c
- *     by Feng Tang <feng.tang@intel.com>
- *
- *  Copyright (C) Aavamobile 2009
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License as published by
- *  the Free Software Foundation; either version 2 of the License, or
- *  (at your option) any later version.
- *
- *  This program is distributed in the hope that it will be useful,
- *  but WITHOUT ANY WARRANTY; without even the implied warranty of
- *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- *  GNU General Public License for more details.
- *
- *  You should have received a copy of the GNU General Public License
- *  along with this program; if not, write to the Free Software
- *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- */
-
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <linux/serial_core.h>
-#include <linux/serial.h>
-#include <linux/spi/spi.h>
-#include <linux/freezer.h>
-#include <linux/platform_device.h>
-#include <linux/gpio.h>
-#include <linux/sfi.h>
-#include <linux/module.h>
-#include <asm/mrst.h>
-#include "max3107.h"
-
-/* GPIO direction to input function */
-static int max3107_gpio_direction_in(struct gpio_chip *chip, unsigned offset)
-{
-       struct max3107_port *s = container_of(chip, struct max3107_port, chip);
-       u16 buf[1];             /* Buffer for SPI transfer */
-
-       if (offset >= MAX3107_GPIO_COUNT) {
-               dev_err(&s->spi->dev, "Invalid GPIO\n");
-               return -EINVAL;
-       }
-
-       /* Read current GPIO configuration register */
-       buf[0] = MAX3107_GPIOCFG_REG;
-       /* Perform SPI transfer */
-       if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
-               dev_err(&s->spi->dev, "SPI transfer GPIO read failed\n");
-               return -EIO;
-       }
-       buf[0] &= MAX3107_SPI_RX_DATA_MASK;
-
-       /* Set GPIO to input */
-       buf[0] &= ~(0x0001 << offset);
-
-       /* Write new GPIO configuration register value */
-       buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG);
-       /* Perform SPI transfer */
-       if (max3107_rw(s, (u8 *)buf, NULL, 2)) {
-               dev_err(&s->spi->dev, "SPI transfer GPIO write failed\n");
-               return -EIO;
-       }
-       return 0;
-}
-
-/* GPIO direction to output function */
-static int max3107_gpio_direction_out(struct gpio_chip *chip, unsigned offset,
-                                       int value)
-{
-       struct max3107_port *s = container_of(chip, struct max3107_port, chip);
-       u16 buf[2];     /* Buffer for SPI transfers */
-
-       if (offset >= MAX3107_GPIO_COUNT) {
-               dev_err(&s->spi->dev, "Invalid GPIO\n");
-               return -EINVAL;
-       }
-
-       /* Read current GPIO configuration and data registers */
-       buf[0] = MAX3107_GPIOCFG_REG;
-       buf[1] = MAX3107_GPIODATA_REG;
-       /* Perform SPI transfer */
-       if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) {
-               dev_err(&s->spi->dev, "SPI transfer gpio failed\n");
-               return -EIO;
-       }
-       buf[0] &= MAX3107_SPI_RX_DATA_MASK;
-       buf[1] &= MAX3107_SPI_RX_DATA_MASK;
-
-       /* Set GPIO to output */
-       buf[0] |= (0x0001 << offset);
-       /* Set value */
-       if (value)
-               buf[1] |= (0x0001 << offset);
-       else
-               buf[1] &= ~(0x0001 << offset);
-
-       /* Write new GPIO configuration and data register values */
-       buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIOCFG_REG);
-       buf[1] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG);
-       /* Perform SPI transfer */
-       if (max3107_rw(s, (u8 *)buf, NULL, 4)) {
-               dev_err(&s->spi->dev,
-                       "SPI transfer for GPIO conf data w failed\n");
-               return -EIO;
-       }
-       return 0;
-}
-
-/* GPIO value query function */
-static int max3107_gpio_get(struct gpio_chip *chip, unsigned offset)
-{
-       struct max3107_port *s = container_of(chip, struct max3107_port, chip);
-       u16 buf[1];     /* Buffer for SPI transfer */
-
-       if (offset >= MAX3107_GPIO_COUNT) {
-               dev_err(&s->spi->dev, "Invalid GPIO\n");
-               return -EINVAL;
-       }
-
-       /* Read current GPIO data register */
-       buf[0] = MAX3107_GPIODATA_REG;
-       /* Perform SPI transfer */
-       if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 2)) {
-               dev_err(&s->spi->dev, "SPI transfer GPIO data r failed\n");
-               return -EIO;
-       }
-       buf[0] &= MAX3107_SPI_RX_DATA_MASK;
-
-       /* Return value */
-       return buf[0] & (0x0001 << offset);
-}
-
-/* GPIO value set function */
-static void max3107_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
-{
-       struct max3107_port *s = container_of(chip, struct max3107_port, chip);
-       u16 buf[2];     /* Buffer for SPI transfers */
-
-       if (offset >= MAX3107_GPIO_COUNT) {
-               dev_err(&s->spi->dev, "Invalid GPIO\n");
-               return;
-       }
-
-       /* Read current GPIO configuration registers*/
-       buf[0] = MAX3107_GPIODATA_REG;
-       buf[1] = MAX3107_GPIOCFG_REG;
-       /* Perform SPI transfer */
-       if (max3107_rw(s, (u8 *)buf, (u8 *)buf, 4)) {
-               dev_err(&s->spi->dev,
-                       "SPI transfer for GPIO data and config read failed\n");
-               return;
-       }
-       buf[0] &= MAX3107_SPI_RX_DATA_MASK;
-       buf[1] &= MAX3107_SPI_RX_DATA_MASK;
-
-       if (!(buf[1] & (0x0001 << offset))) {
-               /* Configured as input, can't set value */
-               dev_warn(&s->spi->dev,
-                               "Trying to set value for input GPIO\n");
-               return;
-       }
-
-       /* Set value */
-       if (value)
-               buf[0] |= (0x0001 << offset);
-       else
-               buf[0] &= ~(0x0001 << offset);
-
-       /* Write new GPIO data register value */
-       buf[0] |= (MAX3107_WRITE_BIT | MAX3107_GPIODATA_REG);
-       /* Perform SPI transfer */
-       if (max3107_rw(s, (u8 *)buf, NULL, 2))
-               dev_err(&s->spi->dev, "SPI transfer GPIO data w failed\n");
-}
-
-/* GPIO chip data */
-static struct gpio_chip max3107_gpio_chip = {
-       .owner                  = THIS_MODULE,
-       .direction_input        = max3107_gpio_direction_in,
-       .direction_output       = max3107_gpio_direction_out,
-       .get                    = max3107_gpio_get,
-       .set                    = max3107_gpio_set,
-       .can_sleep              = 1,
-       .base                   = MAX3107_GPIO_BASE,
-       .ngpio                  = MAX3107_GPIO_COUNT,
-};
-
-/**
- *     max3107_aava_reset      -       reset on AAVA systems
- *     @spi: The SPI device we are probing
- *
- *     Reset the device ready for probing.
- */
-
-static int max3107_aava_reset(struct spi_device *spi)
-{
-       /* Reset the chip */
-       if (gpio_request(MAX3107_RESET_GPIO, "max3107")) {
-               pr_err("Requesting RESET GPIO failed\n");
-               return -EIO;
-       }
-       if (gpio_direction_output(MAX3107_RESET_GPIO, 0)) {
-               pr_err("Setting RESET GPIO to 0 failed\n");
-               gpio_free(MAX3107_RESET_GPIO);
-               return -EIO;
-       }
-       msleep(MAX3107_RESET_DELAY);
-       if (gpio_direction_output(MAX3107_RESET_GPIO, 1)) {
-               pr_err("Setting RESET GPIO to 1 failed\n");
-               gpio_free(MAX3107_RESET_GPIO);
-               return -EIO;
-       }
-       gpio_free(MAX3107_RESET_GPIO);
-       msleep(MAX3107_WAKEUP_DELAY);
-       return 0;
-}
-
-static int max3107_aava_configure(struct max3107_port *s)
-{
-       int retval;
-
-       /* Initialize GPIO chip data */
-       s->chip = max3107_gpio_chip;
-       s->chip.label = s->spi->modalias;
-       s->chip.dev = &s->spi->dev;
-
-       /* Add GPIO chip */
-       retval = gpiochip_add(&s->chip);
-       if (retval) {
-               dev_err(&s->spi->dev, "Adding GPIO chip failed\n");
-               return retval;
-       }
-
-       /* Temporary fix for EV2 boot problems, set modem reset to 0 */
-       max3107_gpio_direction_out(&s->chip, 3, 0);
-       return 0;
-}
-
-#if 0
-/* This will get enabled once we have the board stuff merged for this
-   specific case */
-
-static const struct baud_table brg13_ext[] = {
-       { 300,    MAX3107_BRG13_B300 },
-       { 600,    MAX3107_BRG13_B600 },
-       { 1200,   MAX3107_BRG13_B1200 },
-       { 2400,   MAX3107_BRG13_B2400 },
-       { 4800,   MAX3107_BRG13_B4800 },
-       { 9600,   MAX3107_BRG13_B9600 },
-       { 19200,  MAX3107_BRG13_B19200 },
-       { 57600,  MAX3107_BRG13_B57600 },
-       { 115200, MAX3107_BRG13_B115200 },
-       { 230400, MAX3107_BRG13_B230400 },
-       { 460800, MAX3107_BRG13_B460800 },
-       { 921600, MAX3107_BRG13_B921600 },
-       { 0, 0 }
-};
-
-static void max3107_aava_init(struct max3107_port *s)
-{
-       /*override for AAVA SC specific*/
-       if (mrst_platform_id() == MRST_PLATFORM_AAVA_SC) {
-               if (get_koski_build_id() <= KOSKI_EV2)
-                       if (s->ext_clk) {
-                               s->brg_cfg = MAX3107_BRG13_B9600;
-                               s->baud_tbl = (struct baud_table *)brg13_ext;
-                       }
-       }
-}
-#endif
-
-static int __devexit max3107_aava_remove(struct spi_device *spi)
-{
-       struct max3107_port *s = dev_get_drvdata(&spi->dev);
-
-       /* Remove GPIO chip */
-       if (gpiochip_remove(&s->chip))
-               dev_warn(&spi->dev, "Removing GPIO chip failed\n");
-
-       /* Then do the default remove */
-       return max3107_remove(spi);
-}
-
-/* Platform data */
-static struct max3107_plat aava_plat_data = {
-       .loopback               = 0,
-       .ext_clk                = 1,
-/*     .init                   = max3107_aava_init, */
-       .configure              = max3107_aava_configure,
-       .hw_suspend             = max3107_hw_susp,
-       .polled_mode            = 0,
-       .poll_time              = 0,
-};
-
-
-static int __devinit max3107_probe_aava(struct spi_device *spi)
-{
-       int err = max3107_aava_reset(spi);
-       if (err < 0)
-               return err;
-       return max3107_probe(spi, &aava_plat_data);
-}
-
-/* Spi driver data */
-static struct spi_driver max3107_driver = {
-       .driver = {
-               .name           = "aava-max3107",
-               .owner          = THIS_MODULE,
-       },
-       .probe          = max3107_probe_aava,
-       .remove         = __devexit_p(max3107_aava_remove),
-       .suspend        = max3107_suspend,
-       .resume         = max3107_resume,
-};
-
-/* Driver init function */
-static int __init max3107_init(void)
-{
-       return spi_register_driver(&max3107_driver);
-}
-
-/* Driver exit function */
-static void __exit max3107_exit(void)
-{
-       spi_unregister_driver(&max3107_driver);
-}
-
-module_init(max3107_init);
-module_exit(max3107_exit);
-
-MODULE_DESCRIPTION("MAX3107 driver");
-MODULE_AUTHOR("Aavamobile");
-MODULE_ALIAS("spi:aava-max3107");
-MODULE_LICENSE("GPL v2");
index d192dcbb82f5e464f9ab6beb8805a2dcd1c6733b..1c2426931484fa271e0acc8f935f947ec5df97ad 100644 (file)
@@ -1160,7 +1160,7 @@ static struct uart_driver serial_omap_reg = {
        .cons           = OMAP_CONSOLE,
 };
 
-#ifdef CONFIG_SUSPEND
+#ifdef CONFIG_PM_SLEEP
 static int serial_omap_suspend(struct device *dev)
 {
        struct uart_omap_port *up = dev_get_drvdata(dev);
@@ -1521,6 +1521,7 @@ static void serial_omap_mdr1_errataset(struct uart_omap_port *up, u8 mdr1)
        }
 }
 
+#ifdef CONFIG_PM_RUNTIME
 static void serial_omap_restore_context(struct uart_omap_port *up)
 {
        if (up->errata & UART_ERRATA_i202_MDR1_ACCESS)
@@ -1550,7 +1551,6 @@ static void serial_omap_restore_context(struct uart_omap_port *up)
                serial_out(up, UART_OMAP_MDR1, up->mdr1);
 }
 
-#ifdef CONFIG_PM_RUNTIME
 static int serial_omap_runtime_suspend(struct device *dev)
 {
        struct uart_omap_port *up = dev_get_drvdata(dev);
index de0f613ed6f56051df309abc3ad25ea804081006..17ae65762d1a465f83ae92d9e64128419c2e077c 100644 (file)
@@ -764,7 +764,7 @@ static int dma_handle_rx(struct eg20t_port *priv)
        sg_dma_address(sg) = priv->rx_buf_dma;
 
        desc = priv->chan_rx->device->device_prep_slave_sg(priv->chan_rx,
-                       sg, 1, DMA_FROM_DEVICE,
+                       sg, 1, DMA_DEV_TO_MEM,
                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
 
        if (!desc)
@@ -923,7 +923,7 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv)
        }
 
        desc = priv->chan_tx->device->device_prep_slave_sg(priv->chan_tx,
-                                       priv->sg_tx_p, nent, DMA_TO_DEVICE,
+                                       priv->sg_tx_p, nent, DMA_MEM_TO_DEV,
                                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dev_err(priv->port.dev, "%s:device_prep_slave_sg Failed\n",
index c7bf31a6a7e75f711b711cad125a1b874db2fa04..13056180adf5eff85b9af7a81d2cb12a93c5dae0 100644 (file)
@@ -2348,11 +2348,11 @@ int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
         */
        tty_dev = tty_register_device(drv->tty_driver, uport->line, uport->dev);
        if (likely(!IS_ERR(tty_dev))) {
-               device_init_wakeup(tty_dev, 1);
-               device_set_wakeup_enable(tty_dev, 0);
-       } else
+               device_set_wakeup_capable(tty_dev, 1);
+       } else {
                printk(KERN_ERR "Cannot register tty device on line %d\n",
                       uport->line);
+       }
 
        /*
         * Ensure UPF_DEAD is not set.
index 9e62349b3d9f22cfeb59d2fcea49da4b068aac34..75085795528edd172568dedd2a82f5b160246029 100644 (file)
@@ -1339,7 +1339,7 @@ static void sci_submit_rx(struct sci_port *s)
                struct dma_async_tx_descriptor *desc;
 
                desc = chan->device->device_prep_slave_sg(chan,
-                       sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT);
+                       sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
 
                if (desc) {
                        s->desc_rx[i] = desc;
@@ -1454,7 +1454,7 @@ static void work_fn_tx(struct work_struct *work)
        BUG_ON(!sg_dma_len(sg));
 
        desc = chan->device->device_prep_slave_sg(chan,
-                       sg, s->sg_len_tx, DMA_TO_DEVICE,
+                       sg, s->sg_len_tx, DMA_MEM_TO_DEV,
                        DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                /* switch to PIO */
index ef9dd628ba0b9f00859a301a67e581f1bd60039c..bf6e238146ae40acd4ac8ea2f517574870366590 100644 (file)
@@ -227,7 +227,6 @@ int tty_port_block_til_ready(struct tty_port *port,
        int do_clocal = 0, retval;
        unsigned long flags;
        DEFINE_WAIT(wait);
-       int cd;
 
        /* block if port is in the process of being closed */
        if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
@@ -284,11 +283,14 @@ int tty_port_block_til_ready(struct tty_port *port,
                                retval = -ERESTARTSYS;
                        break;
                }
-               /* Probe the carrier. For devices with no carrier detect this
-                  will always return true */
-               cd = tty_port_carrier_raised(port);
+               /*
+                * Probe the carrier. For devices with no carrier detect
+                * tty_port_carrier_raised will always return true.
+                * Never ask drivers if CLOCAL is set, this causes troubles
+                * on some hardware.
+                */
                if (!(port->flags & ASYNC_CLOSING) &&
-                               (do_clocal || cd))
+                               (do_clocal || tty_port_carrier_raised(port)))
                        break;
                if (signal_pending(current)) {
                        retval = -ERESTARTSYS;
index 5e096f43bceaceb056098d69227d937501ef27bf..65447c5f91d7e0c1108d5f4db5d6eaf69af53234 100644 (file)
@@ -1463,7 +1463,6 @@ compat_kdfontop_ioctl(struct compat_console_font_op __user *fontop,
        if (!perm && op->op != KD_FONT_OP_GET)
                return -EPERM;
        op->data = compat_ptr(((struct compat_console_font_op *)op)->data);
-       op->flags |= KD_FONT_FLAG_OLD;
        i = con_font_op(vc, op);
        if (i)
                return i;
index 1c50baff7725ec2d8e368b3c34f50925a76f3804..d2b3cffca3f786c9eb21c658604419e710a14985 100644 (file)
@@ -57,6 +57,8 @@ MODULE_DEVICE_TABLE (usb, wdm_ids);
 
 #define WDM_MAX                        16
 
+/* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */
+#define WDM_DEFAULT_BUFSIZE    256
 
 static DEFINE_MUTEX(wdm_mutex);
 
@@ -88,7 +90,8 @@ struct wdm_device {
        int                     count;
        dma_addr_t              shandle;
        dma_addr_t              ihandle;
-       struct mutex            lock;
+       struct mutex            wlock;
+       struct mutex            rlock;
        wait_queue_head_t       wait;
        struct work_struct      rxwork;
        int                     werr;
@@ -323,7 +326,7 @@ static ssize_t wdm_write
        }
 
        /* concurrent writes and disconnect */
-       r = mutex_lock_interruptible(&desc->lock);
+       r = mutex_lock_interruptible(&desc->wlock);
        rv = -ERESTARTSYS;
        if (r) {
                kfree(buf);
@@ -386,7 +389,7 @@ static ssize_t wdm_write
 out:
        usb_autopm_put_interface(desc->intf);
 outnp:
-       mutex_unlock(&desc->lock);
+       mutex_unlock(&desc->wlock);
 outnl:
        return rv < 0 ? rv : count;
 }
@@ -399,7 +402,7 @@ static ssize_t wdm_read
        struct wdm_device *desc = file->private_data;
 
 
-       rv = mutex_lock_interruptible(&desc->lock); /*concurrent reads */
+       rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */
        if (rv < 0)
                return -ERESTARTSYS;
 
@@ -467,14 +470,16 @@ retry:
        for (i = 0; i < desc->length - cntr; i++)
                desc->ubuf[i] = desc->ubuf[i + cntr];
 
+       spin_lock_irq(&desc->iuspin);
        desc->length -= cntr;
+       spin_unlock_irq(&desc->iuspin);
        /* in case we had outstanding data */
        if (!desc->length)
                clear_bit(WDM_READ, &desc->flags);
        rv = cntr;
 
 err:
-       mutex_unlock(&desc->lock);
+       mutex_unlock(&desc->rlock);
        return rv;
 }
 
@@ -540,7 +545,8 @@ static int wdm_open(struct inode *inode, struct file *file)
        }
        intf->needs_remote_wakeup = 1;
 
-       mutex_lock(&desc->lock);
+       /* using write lock to protect desc->count */
+       mutex_lock(&desc->wlock);
        if (!desc->count++) {
                desc->werr = 0;
                desc->rerr = 0;
@@ -553,7 +559,7 @@ static int wdm_open(struct inode *inode, struct file *file)
        } else {
                rv = 0;
        }
-       mutex_unlock(&desc->lock);
+       mutex_unlock(&desc->wlock);
        usb_autopm_put_interface(desc->intf);
 out:
        mutex_unlock(&wdm_mutex);
@@ -565,9 +571,11 @@ static int wdm_release(struct inode *inode, struct file *file)
        struct wdm_device *desc = file->private_data;
 
        mutex_lock(&wdm_mutex);
-       mutex_lock(&desc->lock);
+
+       /* using write lock to protect desc->count */
+       mutex_lock(&desc->wlock);
        desc->count--;
-       mutex_unlock(&desc->lock);
+       mutex_unlock(&desc->wlock);
 
        if (!desc->count) {
                dev_dbg(&desc->intf->dev, "wdm_release: cleanup");
@@ -630,7 +638,7 @@ static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
        struct usb_cdc_dmm_desc *dmhd;
        u8 *buffer = intf->altsetting->extra;
        int buflen = intf->altsetting->extralen;
-       u16 maxcom = 0;
+       u16 maxcom = WDM_DEFAULT_BUFSIZE;
 
        if (!buffer)
                goto out;
@@ -665,7 +673,8 @@ next_desc:
        desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL);
        if (!desc)
                goto out;
-       mutex_init(&desc->lock);
+       mutex_init(&desc->rlock);
+       mutex_init(&desc->wlock);
        spin_lock_init(&desc->iuspin);
        init_waitqueue_head(&desc->wait);
        desc->wMaxCommand = maxcom;
@@ -716,7 +725,7 @@ next_desc:
                goto err;
 
        desc->inbuf = usb_alloc_coherent(interface_to_usbdev(intf),
-                                        desc->bMaxPacketSize0,
+                                        desc->wMaxCommand,
                                         GFP_KERNEL,
                                         &desc->response->transfer_dma);
        if (!desc->inbuf)
@@ -779,11 +788,13 @@ static void wdm_disconnect(struct usb_interface *intf)
        /* to terminate pending flushes */
        clear_bit(WDM_IN_USE, &desc->flags);
        spin_unlock_irqrestore(&desc->iuspin, flags);
-       mutex_lock(&desc->lock);
+       wake_up_all(&desc->wait);
+       mutex_lock(&desc->rlock);
+       mutex_lock(&desc->wlock);
        kill_urbs(desc);
        cancel_work_sync(&desc->rxwork);
-       mutex_unlock(&desc->lock);
-       wake_up_all(&desc->wait);
+       mutex_unlock(&desc->wlock);
+       mutex_unlock(&desc->rlock);
        if (!desc->count)
                cleanup(desc);
        mutex_unlock(&wdm_mutex);
@@ -798,8 +809,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
        dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
 
        /* if this is an autosuspend the caller does the locking */
-       if (!PMSG_IS_AUTO(message))
-               mutex_lock(&desc->lock);
+       if (!PMSG_IS_AUTO(message)) {
+               mutex_lock(&desc->rlock);
+               mutex_lock(&desc->wlock);
+       }
        spin_lock_irq(&desc->iuspin);
 
        if (PMSG_IS_AUTO(message) &&
@@ -815,8 +828,10 @@ static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
                kill_urbs(desc);
                cancel_work_sync(&desc->rxwork);
        }
-       if (!PMSG_IS_AUTO(message))
-               mutex_unlock(&desc->lock);
+       if (!PMSG_IS_AUTO(message)) {
+               mutex_unlock(&desc->wlock);
+               mutex_unlock(&desc->rlock);
+       }
 
        return rv;
 }
@@ -854,7 +869,8 @@ static int wdm_pre_reset(struct usb_interface *intf)
 {
        struct wdm_device *desc = usb_get_intfdata(intf);
 
-       mutex_lock(&desc->lock);
+       mutex_lock(&desc->rlock);
+       mutex_lock(&desc->wlock);
        kill_urbs(desc);
 
        /*
@@ -876,7 +892,8 @@ static int wdm_post_reset(struct usb_interface *intf)
        int rv;
 
        rv = recover_from_urb_loss(desc);
-       mutex_unlock(&desc->lock);
+       mutex_unlock(&desc->wlock);
+       mutex_unlock(&desc->rlock);
        return 0;
 }
 
index 2f51de57593a11ebc617085707d22e67735165de..c8df1dd967efe3e5160fbe8507a8b2bbff972aa5 100644 (file)
@@ -126,7 +126,6 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
                struct dwc3_request *req)
 {
        struct dwc3             *dwc = dep->dwc;
-       u32                     type;
        int                     ret = 0;
 
        req->request.actual     = 0;
@@ -149,20 +148,14 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
 
                direction = !!(dep->flags & DWC3_EP0_DIR_IN);
 
-               if (dwc->ep0state == EP0_STATUS_PHASE) {
-                       type = dwc->three_stage_setup
-                               ? DWC3_TRBCTL_CONTROL_STATUS3
-                               : DWC3_TRBCTL_CONTROL_STATUS2;
-               } else if (dwc->ep0state == EP0_DATA_PHASE) {
-                       type = DWC3_TRBCTL_CONTROL_DATA;
-               } else {
-                       /* should never happen */
-                       WARN_ON(1);
+               if (dwc->ep0state != EP0_DATA_PHASE) {
+                       dev_WARN(dwc->dev, "Unexpected pending request\n");
                        return 0;
                }
 
                ret = dwc3_ep0_start_trans(dwc, direction,
-                               req->request.dma, req->request.length, type);
+                               req->request.dma, req->request.length,
+                               DWC3_TRBCTL_CONTROL_DATA);
                dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
                                DWC3_EP0_DIR_IN);
        } else if (dwc->delayed_status) {
index a696bde5322268013819ac43d9059da653d9c045..064b6e2cd4118635508a1dcc4eddfd63e3d08189 100644 (file)
@@ -101,7 +101,7 @@ void dwc3_unmap_buffer_from_dma(struct dwc3_request *req)
        if (req->request.num_mapped_sgs) {
                req->request.dma = DMA_ADDR_INVALID;
                dma_unmap_sg(dwc->dev, req->request.sg,
-                               req->request.num_sgs,
+                               req->request.num_mapped_sgs,
                                req->direction ? DMA_TO_DEVICE
                                : DMA_FROM_DEVICE);
 
index a95de6a4a13447d3217d7495bcd94079bca04903..baaebf2830fce882022801f7a492a01c4b9f4c79 100644 (file)
@@ -175,13 +175,12 @@ ep_found:
        _ep->comp_desc = comp_desc;
        if (g->speed == USB_SPEED_SUPER) {
                switch (usb_endpoint_type(_ep->desc)) {
-               case USB_ENDPOINT_XFER_BULK:
-               case USB_ENDPOINT_XFER_INT:
-                       _ep->maxburst = comp_desc->bMaxBurst;
-                       break;
                case USB_ENDPOINT_XFER_ISOC:
                        /* mult: bits 1:0 of bmAttributes */
                        _ep->mult = comp_desc->bmAttributes & 0x3;
+               case USB_ENDPOINT_XFER_BULK:
+               case USB_ENDPOINT_XFER_INT:
+                       _ep->maxburst = comp_desc->bMaxBurst;
                        break;
                default:
                        /* Do nothing for control endpoints */
index 753aa0683ac12c483ed34e23543063182bc15536..e0e6375ef5dd7693cb89ff24397223e94f26aa7a 100644 (file)
@@ -126,7 +126,7 @@ ep_matches (
         * descriptor and see if the EP matches it
         */
        if (usb_endpoint_xfer_bulk(desc)) {
-               if (ep_comp) {
+               if (ep_comp && gadget->max_speed >= USB_SPEED_SUPER) {
                        num_req_streams = ep_comp->bmAttributes & 0x1f;
                        if (num_req_streams > ep->max_streams)
                                return 0;
index 6353eca1e852e5fa260d7e38562816ec6fd1338d..ee8ceec01560e5391a4f115c130002bd98917e41 100644 (file)
@@ -3123,15 +3123,15 @@ fsg_add(struct usb_composite_dev *cdev, struct usb_configuration *c,
 
 struct fsg_module_parameters {
        char            *file[FSG_MAX_LUNS];
-       int             ro[FSG_MAX_LUNS];
-       int             removable[FSG_MAX_LUNS];
-       int             cdrom[FSG_MAX_LUNS];
-       int             nofua[FSG_MAX_LUNS];
+       bool            ro[FSG_MAX_LUNS];
+       bool            removable[FSG_MAX_LUNS];
+       bool            cdrom[FSG_MAX_LUNS];
+       bool            nofua[FSG_MAX_LUNS];
 
        unsigned int    file_count, ro_count, removable_count, cdrom_count;
        unsigned int    nofua_count;
        unsigned int    luns;   /* nluns */
-       int             stall;  /* can_stall */
+       bool            stall;  /* can_stall */
 };
 
 #define _FSG_MODULE_PARAM_ARRAY(prefix, params, name, type, desc)      \
index d7ea6c076ce98e185ee43b557c524d2b29c67270..b04712f19f1efa613261c2216b08b31958ec4973 100644 (file)
@@ -1430,7 +1430,7 @@ static void setup_received_irq(struct fsl_udc *udc,
                        int pipe = get_pipe_by_windex(wIndex);
                        struct fsl_ep *ep;
 
-                       if (wValue != 0 || wLength != 0 || pipe > udc->max_ep)
+                       if (wValue != 0 || wLength != 0 || pipe >= udc->max_ep)
                                break;
                        ep = get_ep_by_pipe(udc, pipe);
 
@@ -1673,7 +1673,7 @@ static void dtd_complete_irq(struct fsl_udc *udc)
        if (!bit_pos)
                return;
 
-       for (i = 0; i < udc->max_ep * 2; i++) {
+       for (i = 0; i < udc->max_ep; i++) {
                ep_num = i >> 1;
                direction = i % 2;
 
index fa0fcc11263fb8b6a73d21bcdbe7ba3f168e19dc..e2293c1588eed80a2e0ba031677f918f774305f0 100644 (file)
 /* #undef      DEBUG */
 /* #undef      VERBOSE_DEBUG */
 
-#if defined(CONFIG_USB_LANGWELL_OTG)
-#define        OTG_TRANSCEIVER
-#endif
-
-
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
@@ -1522,8 +1517,7 @@ static void langwell_udc_stop(struct langwell_udc *dev)
 
 
 /* stop all USB activities */
-static void stop_activity(struct langwell_udc *dev,
-               struct usb_gadget_driver *driver)
+static void stop_activity(struct langwell_udc *dev)
 {
        struct langwell_ep      *ep;
        dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
@@ -1535,9 +1529,9 @@ static void stop_activity(struct langwell_udc *dev,
        }
 
        /* report disconnect; the driver is already quiesced */
-       if (driver) {
+       if (dev->driver) {
                spin_unlock(&dev->lock);
-               driver->disconnect(&dev->gadget);
+               dev->driver->disconnect(&dev->gadget);
                spin_lock(&dev->lock);
        }
 
@@ -1925,11 +1919,10 @@ static int langwell_stop(struct usb_gadget *g,
 
        /* stop all usb activities */
        dev->gadget.speed = USB_SPEED_UNKNOWN;
-       stop_activity(dev, driver);
-       spin_unlock_irqrestore(&dev->lock, flags);
-
        dev->gadget.dev.driver = NULL;
        dev->driver = NULL;
+       stop_activity(dev);
+       spin_unlock_irqrestore(&dev->lock, flags);
 
        device_remove_file(&dev->pdev->dev, &dev_attr_function);
 
@@ -2315,13 +2308,9 @@ static void handle_setup_packet(struct langwell_udc *dev,
 
                        if (!gadget_is_otg(&dev->gadget))
                                break;
-                       else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE) {
+                       else if (setup->bRequest == USB_DEVICE_B_HNP_ENABLE)
                                dev->gadget.b_hnp_enable = 1;
-#ifdef OTG_TRANSCEIVER
-                               if (!dev->lotg->otg.default_a)
-                                       dev->lotg->hsm.b_hnp_enable = 1;
-#endif
-                       } else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
+                       else if (setup->bRequest == USB_DEVICE_A_HNP_SUPPORT)
                                dev->gadget.a_hnp_support = 1;
                        else if (setup->bRequest ==
                                        USB_DEVICE_A_ALT_HNP_SUPPORT)
@@ -2733,7 +2722,7 @@ static void handle_usb_reset(struct langwell_udc *dev)
                dev->bus_reset = 1;
 
                /* reset all the queues, stop all USB activities */
-               stop_activity(dev, dev->driver);
+               stop_activity(dev);
                dev->usb_state = USB_STATE_DEFAULT;
        } else {
                dev_vdbg(&dev->pdev->dev, "device controller reset\n");
@@ -2741,7 +2730,7 @@ static void handle_usb_reset(struct langwell_udc *dev)
                langwell_udc_reset(dev);
 
                /* reset all the queues, stop all USB activities */
-               stop_activity(dev, dev->driver);
+               stop_activity(dev);
 
                /* reset ep0 dQH and endptctrl */
                ep0_reset(dev);
@@ -2752,12 +2741,6 @@ static void handle_usb_reset(struct langwell_udc *dev)
                dev->usb_state = USB_STATE_ATTACHED;
        }
 
-#ifdef OTG_TRANSCEIVER
-       /* refer to USB OTG 6.6.2.3 b_hnp_en is cleared */
-       if (!dev->lotg->otg.default_a)
-               dev->lotg->hsm.b_hnp_enable = 0;
-#endif
-
        dev_vdbg(&dev->pdev->dev, "<--- %s()\n", __func__);
 }
 
@@ -2770,29 +2753,6 @@ static void handle_bus_suspend(struct langwell_udc *dev)
        dev->resume_state = dev->usb_state;
        dev->usb_state = USB_STATE_SUSPENDED;
 
-#ifdef OTG_TRANSCEIVER
-       if (dev->lotg->otg.default_a) {
-               if (dev->lotg->hsm.b_bus_suspend_vld == 1) {
-                       dev->lotg->hsm.b_bus_suspend = 1;
-                       /* notify transceiver the state changes */
-                       if (spin_trylock(&dev->lotg->wq_lock)) {
-                               langwell_update_transceiver();
-                               spin_unlock(&dev->lotg->wq_lock);
-                       }
-               }
-               dev->lotg->hsm.b_bus_suspend_vld++;
-       } else {
-               if (!dev->lotg->hsm.a_bus_suspend) {
-                       dev->lotg->hsm.a_bus_suspend = 1;
-                       /* notify transceiver the state changes */
-                       if (spin_trylock(&dev->lotg->wq_lock)) {
-                               langwell_update_transceiver();
-                               spin_unlock(&dev->lotg->wq_lock);
-                       }
-               }
-       }
-#endif
-
        /* report suspend to the driver */
        if (dev->driver) {
                if (dev->driver->suspend) {
@@ -2823,11 +2783,6 @@ static void handle_bus_resume(struct langwell_udc *dev)
        if (dev->pdev->device != 0x0829)
                langwell_phy_low_power(dev, 0);
 
-#ifdef OTG_TRANSCEIVER
-       if (dev->lotg->otg.default_a == 0)
-               dev->lotg->hsm.a_bus_suspend = 0;
-#endif
-
        /* report resume to the driver */
        if (dev->driver) {
                if (dev->driver->resume) {
@@ -3020,7 +2975,6 @@ static void langwell_udc_remove(struct pci_dev *pdev)
 
        dev->done = &done;
 
-#ifndef        OTG_TRANSCEIVER
        /* free dTD dma_pool and dQH */
        if (dev->dtd_pool)
                dma_pool_destroy(dev->dtd_pool);
@@ -3032,7 +2986,6 @@ static void langwell_udc_remove(struct pci_dev *pdev)
        /* release SRAM caching */
        if (dev->has_sram && dev->got_sram)
                sram_deinit(dev);
-#endif
 
        if (dev->status_req) {
                kfree(dev->status_req->req.buf);
@@ -3045,7 +2998,6 @@ static void langwell_udc_remove(struct pci_dev *pdev)
        if (dev->got_irq)
                free_irq(pdev->irq, dev);
 
-#ifndef        OTG_TRANSCEIVER
        if (dev->cap_regs)
                iounmap(dev->cap_regs);
 
@@ -3055,13 +3007,6 @@ static void langwell_udc_remove(struct pci_dev *pdev)
 
        if (dev->enabled)
                pci_disable_device(pdev);
-#else
-       if (dev->transceiver) {
-               otg_put_transceiver(dev->transceiver);
-               dev->transceiver = NULL;
-               dev->lotg = NULL;
-       }
-#endif
 
        dev->cap_regs = NULL;
 
@@ -3072,9 +3017,7 @@ static void langwell_udc_remove(struct pci_dev *pdev)
        device_remove_file(&pdev->dev, &dev_attr_langwell_udc);
        device_remove_file(&pdev->dev, &dev_attr_remote_wakeup);
 
-#ifndef        OTG_TRANSCEIVER
        pci_set_drvdata(pdev, NULL);
-#endif
 
        /* free dev, wait for the release() finished */
        wait_for_completion(&done);
@@ -3089,9 +3032,7 @@ static int langwell_udc_probe(struct pci_dev *pdev,
                const struct pci_device_id *id)
 {
        struct langwell_udc     *dev;
-#ifndef        OTG_TRANSCEIVER
        unsigned long           resource, len;
-#endif
        void                    __iomem *base = NULL;
        size_t                  size;
        int                     retval;
@@ -3109,16 +3050,6 @@ static int langwell_udc_probe(struct pci_dev *pdev,
        dev->pdev = pdev;
        dev_dbg(&dev->pdev->dev, "---> %s()\n", __func__);
 
-#ifdef OTG_TRANSCEIVER
-       /* PCI device is already enabled by otg_transceiver driver */
-       dev->enabled = 1;
-
-       /* mem region and register base */
-       dev->region = 1;
-       dev->transceiver = otg_get_transceiver();
-       dev->lotg = otg_to_langwell(dev->transceiver);
-       base = dev->lotg->regs;
-#else
        pci_set_drvdata(pdev, dev);
 
        /* now all the pci goodies ... */
@@ -3139,7 +3070,6 @@ static int langwell_udc_probe(struct pci_dev *pdev,
        dev->region = 1;
 
        base = ioremap_nocache(resource, len);
-#endif
        if (base == NULL) {
                dev_err(&dev->pdev->dev, "can't map memory\n");
                retval = -EFAULT;
@@ -3163,7 +3093,6 @@ static int langwell_udc_probe(struct pci_dev *pdev,
        dev->got_sram = 0;
        dev_vdbg(&dev->pdev->dev, "dev->has_sram: %d\n", dev->has_sram);
 
-#ifndef        OTG_TRANSCEIVER
        /* enable SRAM caching if detected */
        if (dev->has_sram && !dev->got_sram)
                sram_init(dev);
@@ -3182,7 +3111,6 @@ static int langwell_udc_probe(struct pci_dev *pdev,
                goto error;
        }
        dev->got_irq = 1;
-#endif
 
        /* set stopped bit */
        dev->stopped = 1;
@@ -3257,10 +3185,8 @@ static int langwell_udc_probe(struct pci_dev *pdev,
        dev->remote_wakeup = 0;
        dev->dev_status = 1 << USB_DEVICE_SELF_POWERED;
 
-#ifndef        OTG_TRANSCEIVER
        /* reset device controller */
        langwell_udc_reset(dev);
-#endif
 
        /* initialize gadget structure */
        dev->gadget.ops = &langwell_ops;        /* usb_gadget_ops */
@@ -3268,9 +3194,6 @@ static int langwell_udc_probe(struct pci_dev *pdev,
        INIT_LIST_HEAD(&dev->gadget.ep_list);   /* ep_list */
        dev->gadget.speed = USB_SPEED_UNKNOWN;  /* speed */
        dev->gadget.max_speed = USB_SPEED_HIGH; /* support dual speed */
-#ifdef OTG_TRANSCEIVER
-       dev->gadget.is_otg = 1;                 /* support otg mode */
-#endif
 
        /* the "gadget" abstracts/virtualizes the controller */
        dev_set_name(&dev->gadget.dev, "gadget");
@@ -3282,10 +3205,8 @@ static int langwell_udc_probe(struct pci_dev *pdev,
        /* controller endpoints reinit */
        eps_reinit(dev);
 
-#ifndef        OTG_TRANSCEIVER
        /* reset ep0 dQH and endptctrl */
        ep0_reset(dev);
-#endif
 
        /* create dTD dma_pool resource */
        dev->dtd_pool = dma_pool_create("langwell_dtd",
@@ -3367,7 +3288,7 @@ static int langwell_udc_suspend(struct pci_dev *pdev, pm_message_t state)
 
        spin_lock_irq(&dev->lock);
        /* stop all usb activities */
-       stop_activity(dev, dev->driver);
+       stop_activity(dev);
        spin_unlock_irq(&dev->lock);
 
        /* free dTD dma_pool and dQH */
@@ -3525,22 +3446,14 @@ static struct pci_driver langwell_pci_driver = {
 
 static int __init init(void)
 {
-#ifdef OTG_TRANSCEIVER
-       return langwell_register_peripheral(&langwell_pci_driver);
-#else
        return pci_register_driver(&langwell_pci_driver);
-#endif
 }
 module_init(init);
 
 
 static void __exit cleanup(void)
 {
-#ifdef OTG_TRANSCEIVER
-       return langwell_unregister_peripheral(&langwell_pci_driver);
-#else
        pci_unregister_driver(&langwell_pci_driver);
-#endif
 }
 module_exit(cleanup);
 
index ef79e242b7b0debf6b9bbb169dd2b5cedf13c733..d6e78accaffee7d8fc68d26291c404e5846d5ac2 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/usb/langwell_udc.h>
-#include <linux/usb/langwell_otg.h>
 
 /*-------------------------------------------------------------------------*/
 
index c7f291a331dfc4454a4879b8e5361f6a3b95278c..85ea14e2545e8fa29d777ddbd945ed8df8476807 100644 (file)
@@ -598,16 +598,16 @@ static __maybe_unused struct usb_ss_cap_descriptor fsg_ss_cap_desc = {
                | USB_5GBPS_OPERATION),
        .bFunctionalitySupport = USB_LOW_SPEED_OPERATION,
        .bU1devExitLat =        USB_DEFAULT_U1_DEV_EXIT_LAT,
-       .bU2DevExitLat =        USB_DEFAULT_U2_DEV_EXIT_LAT,
+       .bU2DevExitLat =        cpu_to_le16(USB_DEFAULT_U2_DEV_EXIT_LAT),
 };
 
 static __maybe_unused struct usb_bos_descriptor fsg_bos_desc = {
        .bLength =              USB_DT_BOS_SIZE,
        .bDescriptorType =      USB_DT_BOS,
 
-       .wTotalLength =         USB_DT_BOS_SIZE
+       .wTotalLength =         cpu_to_le16(USB_DT_BOS_SIZE
                                + USB_DT_USB_EXT_CAP_SIZE
-                               + USB_DT_USB_SS_CAP_SIZE,
+                               + USB_DT_USB_SS_CAP_SIZE),
 
        .bNumDeviceCaps =       2,
 };
index e90344a1763173e4c49c9099ecf65a7609aad358..b556a72264d1084b5e6809ea5b4a3f59c5583b82 100644 (file)
@@ -125,7 +125,7 @@ static int usb_hcd_fsl_probe(const struct hc_driver *driver,
         */
        if (pdata->init && pdata->init(pdev)) {
                retval = -ENODEV;
-               goto err3;
+               goto err4;
        }
 
        /* Enable USB controller, 83xx or 8536 */
index f4b627d343acdf39d1f33d20cea8da7dc6c0f27d..01bb7241d6efd53f3769d47e86d59a28c5cf9ce2 100644 (file)
@@ -276,6 +276,9 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
 
        /* Serial Bus Release Number is at PCI 0x60 offset */
        pci_read_config_byte(pdev, 0x60, &ehci->sbrn);
+       if (pdev->vendor == PCI_VENDOR_ID_STMICRO
+           && pdev->device == PCI_DEVICE_ID_STMICRO_USB_HOST)
+               ehci->sbrn = 0x20; /* ConneXT has no sbrn register */
 
        /* Keep this around for a while just in case some EHCI
         * implementation uses legacy PCI PM support.  This test
@@ -526,6 +529,9 @@ static const struct pci_device_id pci_ids [] = { {
        /* handle any USB 2.0 EHCI controller */
        PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0),
        .driver_data =  (unsigned long) &ehci_pci_hc_driver,
+       }, {
+       PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_HOST),
+       .driver_data = (unsigned long) &ehci_pci_hc_driver,
        },
        { /* end: all zeroes */ }
 };
index 32793ce3d9e9dd4a32588204d5a59680046374e6..9c2cc4633894b152047ed587b2cf9494d5fdc684 100644 (file)
@@ -183,7 +183,7 @@ static int __devinit ehci_hcd_xilinx_of_probe(struct platform_device *op)
        }
 
        irq = irq_of_parse_and_map(dn, 0);
-       if (irq == NO_IRQ) {
+       if (!irq) {
                printk(KERN_ERR "%s: irq_of_parse_and_map failed\n", __FILE__);
                rv = -EBUSY;
                goto err_irq;
index 5df0b0e3392bed244a414bcc58f4da104822b49c..77afabc77f9be8d71fd4502bd5f315d9c8bd2c40 100644 (file)
@@ -139,8 +139,23 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
        }
 
        iclk = clk_get(&pdev->dev, "ohci_clk");
+       if (IS_ERR(iclk)) {
+               dev_err(&pdev->dev, "failed to get ohci_clk\n");
+               retval = PTR_ERR(iclk);
+               goto err3;
+       }
        fclk = clk_get(&pdev->dev, "uhpck");
+       if (IS_ERR(fclk)) {
+               dev_err(&pdev->dev, "failed to get uhpck\n");
+               retval = PTR_ERR(fclk);
+               goto err4;
+       }
        hclk = clk_get(&pdev->dev, "hclk");
+       if (IS_ERR(hclk)) {
+               dev_err(&pdev->dev, "failed to get hclk\n");
+               retval = PTR_ERR(hclk);
+               goto err5;
+       }
 
        at91_start_hc(pdev);
        ohci_hcd_init(hcd_to_ohci(hcd));
@@ -153,9 +168,12 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
        at91_stop_hc(pdev);
 
        clk_put(hclk);
+ err5:
        clk_put(fclk);
+ err4:
        clk_put(iclk);
 
+ err3:
        iounmap(hcd->regs);
 
  err2:
@@ -226,7 +244,8 @@ static void ohci_at91_usb_set_power(struct at91_usbh_data *pdata, int port, int
        if (!gpio_is_valid(pdata->vbus_pin[port]))
                return;
 
-       gpio_set_value(pdata->vbus_pin[port], !pdata->vbus_pin_inverted ^ enable);
+       gpio_set_value(pdata->vbus_pin[port],
+                      !pdata->vbus_pin_active_low[port] ^ enable);
 }
 
 static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
@@ -237,7 +256,8 @@ static int ohci_at91_usb_get_power(struct at91_usbh_data *pdata, int port)
        if (!gpio_is_valid(pdata->vbus_pin[port]))
                return -EINVAL;
 
-       return gpio_get_value(pdata->vbus_pin[port]) ^ !pdata->vbus_pin_inverted;
+       return gpio_get_value(pdata->vbus_pin[port]) ^
+               !pdata->vbus_pin_active_low[port];
 }
 
 /*
index 5179fcd73d8a391b165abaf1327dd944916dce1a..e4bcb62b930a9ef9e790044255b0d587c890e6c1 100644 (file)
@@ -82,6 +82,14 @@ urb_print(struct urb * urb, char * str, int small, int status)
                ohci_dbg(ohci,format, ## arg ); \
        } while (0);
 
+/* Version for use where "next" is the address of a local variable */
+#define ohci_dbg_nosw(ohci, next, size, format, arg...) \
+       do { \
+               unsigned s_len; \
+               s_len = scnprintf(*next, *size, format, ## arg); \
+               *size -= s_len; *next += s_len; \
+       } while (0);
+
 
 static void ohci_dump_intr_mask (
        struct ohci_hcd *ohci,
@@ -653,7 +661,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
 
        /* dump driver info, then registers in spec order */
 
-       ohci_dbg_sw (ohci, &next, &size,
+       ohci_dbg_nosw(ohci, &next, &size,
                "bus %s, device %s\n"
                "%s\n"
                "%s\n",
@@ -672,7 +680,7 @@ static ssize_t fill_registers_buffer(struct debug_buffer *buf)
 
        /* hcca */
        if (ohci->hcca)
-               ohci_dbg_sw (ohci, &next, &size,
+               ohci_dbg_nosw(ohci, &next, &size,
                        "hcca frame 0x%04x\n", ohci_frame_no(ohci));
 
        /* other registers mostly affect frame timings */
index 6109810cc2d3eba72c8eeb6ebb38eff17b93a399..1843bb68ac7ceefc73afcc2ed9f3a2f7fa8de1db 100644 (file)
@@ -397,6 +397,10 @@ static const struct pci_device_id pci_ids [] = { {
        /* handle any USB OHCI controller */
        PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_OHCI, ~0),
        .driver_data =  (unsigned long) &ohci_pci_hc_driver,
+       }, {
+       /* The device in the ConneXT I/O hub has no class reg */
+       PCI_VDEVICE(STMICRO, PCI_DEVICE_ID_STMICRO_USB_OHCI),
+       .driver_data =  (unsigned long) &ohci_pci_hc_driver,
        }, { /* end: all zeroes */ }
 };
 MODULE_DEVICE_TABLE (pci, pci_ids);
index b90e1386418b429f05eada3487117d4fbba50186..b62037bff688c07c38f0ad06e22ab8cb964bf62f 100644 (file)
@@ -1204,6 +1204,7 @@ static void handle_vendor_event(struct xhci_hcd *xhci,
  *
  * Returns a zero-based port number, which is suitable for indexing into each of
  * the split roothubs' port arrays and bus state arrays.
+ * Add one to it in order to call xhci_find_slot_id_by_port.
  */
 static unsigned int find_faked_portnum_from_hw_portnum(struct usb_hcd *hcd,
                struct xhci_hcd *xhci, u32 port_id)
@@ -1324,7 +1325,7 @@ static void handle_port_status(struct xhci_hcd *xhci,
                        xhci_set_link_state(xhci, port_array, faked_port_index,
                                                XDEV_U0);
                        slot_id = xhci_find_slot_id_by_port(hcd, xhci,
-                                       faked_port_index);
+                                       faked_port_index + 1);
                        if (!slot_id) {
                                xhci_dbg(xhci, "slot_id is zero\n");
                                goto cleanup;
@@ -3323,7 +3324,8 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
                /* Check TD length */
                if (running_total != td_len) {
                        xhci_err(xhci, "ISOC TD length unmatch\n");
-                       return -EINVAL;
+                       ret = -EINVAL;
+                       goto cleanup;
                }
        }
 
index d9b6a0355443c8457b16e3e660f7a0edb7324d25..da97dcec1f32c88ee95c49a097517b1e9390b41d 100644 (file)
@@ -37,9 +37,6 @@ static int emi26_set_reset(struct usb_device *dev, unsigned char reset_bit);
 static int emi26_load_firmware (struct usb_device *dev);
 static int emi26_probe(struct usb_interface *intf, const struct usb_device_id *id);
 static void emi26_disconnect(struct usb_interface *intf);
-static int __init emi26_init (void);
-static void __exit emi26_exit (void);
-
 
 /* thanks to drivers/usb/serial/keyspan_pda.c code */
 static int emi26_writememory (struct usb_device *dev, int address,
index 9f39062ebb080dfe57644fdcb52ea2e74542576d..4e0f167a6c4ef49832bd1357517e738b397980a0 100644 (file)
@@ -46,9 +46,6 @@ static int emi62_set_reset(struct usb_device *dev, unsigned char reset_bit);
 static int emi62_load_firmware (struct usb_device *dev);
 static int emi62_probe(struct usb_interface *intf, const struct usb_device_id *id);
 static void emi62_disconnect(struct usb_interface *intf);
-static int __init emi62_init (void);
-static void __exit emi62_exit (void);
-
 
 /* thanks to drivers/usb/serial/keyspan_pda.c code */
 static int emi62_writememory(struct usb_device *dev, int address,
index 107bf13b1cf14e64619a081b3f28c4d6f67ae94d..b2d82b937392eb9b0186416938d2485b2fd29f18 100644 (file)
@@ -24,7 +24,7 @@
 
 #define VENDOR_ID      0x0fc5
 #define PRODUCT_ID     0x1227
-#define MAXLEN         6
+#define MAXLEN         8
 
 /* table of devices that work with this driver */
 static const struct usb_device_id id_table[] = {
index f9a3f62a83b51d82d9192a4d9325eb0318df689e..7c569f51212abeeed36e504dde9b6a1e5460817c 100644 (file)
@@ -33,9 +33,6 @@
 #include <linux/platform_device.h>
 #include <linux/dma-mapping.h>
 
-#include <mach/hardware.h>
-#include <mach/memory.h>
-#include <asm/gpio.h>
 #include <mach/cputype.h>
 
 #include <asm/mach-types.h>
index 56cf0243979e504df48742f447dd1e6c977ab924..3d11cf64ebd17d6b318df2bd33f696a15029da8a 100644 (file)
@@ -981,6 +981,9 @@ static void musb_shutdown(struct platform_device *pdev)
        unsigned long   flags;
 
        pm_runtime_get_sync(musb->controller);
+
+       musb_gadget_cleanup(musb);
+
        spin_lock_irqsave(&musb->lock, flags);
        musb_platform_disable(musb);
        musb_generic_disable(musb);
@@ -1827,8 +1830,6 @@ static void musb_free(struct musb *musb)
        sysfs_remove_group(&musb->controller->kobj, &musb_attr_group);
 #endif
 
-       musb_gadget_cleanup(musb);
-
        if (musb->nIrq >= 0) {
                if (musb->irq_wake)
                        disable_irq_wake(musb->nIrq);
index c27bbbf32b522376d51c5b089d96508680c0e2bf..df719eae3b033a23431fbdb164b2e435b222487c 100644 (file)
@@ -222,7 +222,6 @@ static inline void omap2430_low_level_init(struct musb *musb)
        musb_writel(musb->mregs, OTG_FORCESTDBY, l);
 }
 
-/* blocking notifier support */
 static int musb_otg_notifications(struct notifier_block *nb,
                unsigned long event, void *unused)
 {
@@ -231,7 +230,7 @@ static int musb_otg_notifications(struct notifier_block *nb,
        musb->xceiv_event = event;
        schedule_work(&musb->otg_notifier_work);
 
-       return 0;
+       return NOTIFY_OK;
 }
 
 static void musb_otg_notifier_work(struct work_struct *data_notifier_work)
@@ -386,6 +385,7 @@ static void omap2430_musb_disable(struct musb *musb)
 static int omap2430_musb_exit(struct musb *musb)
 {
        del_timer_sync(&musb_idle_timer);
+       cancel_work_sync(&musb->otg_notifier_work);
 
        omap2430_low_level_exit(musb);
        otg_put_transceiver(musb->xceiv);
index a163632877afa5a0a68fdbeb297c721ef2308273..97cb45916c4351110e3a21fb2613c2954855ee9b 100644 (file)
@@ -84,7 +84,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
        struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
        struct dma_chan *dma_chan = ux500_channel->dma_chan;
        struct dma_async_tx_descriptor *dma_desc;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        struct scatterlist sg;
        struct dma_slave_config slave_conf;
        enum dma_slave_buswidth addr_width;
@@ -104,7 +104,7 @@ static bool ux500_configure_channel(struct dma_channel *channel,
        sg_dma_address(&sg) = dma_addr;
        sg_dma_len(&sg) = len;
 
-       direction = ux500_channel->is_tx ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+       direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
        addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
                                        DMA_SLAVE_BUSWIDTH_4_BYTES;
 
index 2a25955881fc77cb2ac97cb6d182b628aecedda7..76d6293454185134247929dc7cad69c367fd48fa 100644 (file)
@@ -86,20 +86,6 @@ config NOP_USB_XCEIV
          built-in with usb ip or which are autonomous and doesn't require any
          phy programming such as ISP1x04 etc.
 
-config USB_LANGWELL_OTG
-       tristate "Intel Langwell USB OTG dual-role support"
-       depends on USB && PCI && INTEL_SCU_IPC
-       select USB_OTG
-       select USB_OTG_UTILS
-       help
-         Say Y here if you want to build Intel Langwell USB OTG
-         transciever driver in kernel. This driver implements role
-         switch between EHCI host driver and Langwell USB OTG
-         client driver.
-
-         To compile this driver as a module, choose M here: the
-         module will be called langwell_otg.
-
 config USB_MSM_OTG
        tristate "OTG support for Qualcomm on-chip USB controller"
        depends on (USB || USB_GADGET) && ARCH_MSM
@@ -124,7 +110,7 @@ config AB8500_USB
 
 config FSL_USB2_OTG
        bool "Freescale USB OTG Transceiver Driver"
-       depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2
+       depends on USB_EHCI_FSL && USB_GADGET_FSL_USB2 && USB_SUSPEND
        select USB_OTG
        select USB_OTG_UTILS
        help
@@ -132,7 +118,7 @@ config FSL_USB2_OTG
 
 config USB_MV_OTG
        tristate "Marvell USB OTG support"
-       depends on USB_MV_UDC
+       depends on USB_MV_UDC && USB_SUSPEND
        select USB_OTG
        select USB_OTG_UTILS
        help
index b2c5a9598637d98bb5d7e40e74145d33b7a20bea..41aa5098b139973a4b7e961b7227d2718690eb05 100644 (file)
@@ -13,7 +13,6 @@ obj-$(CONFIG_USB_GPIO_VBUS)   += gpio_vbus.o
 obj-$(CONFIG_ISP1301_OMAP)     += isp1301_omap.o
 obj-$(CONFIG_TWL4030_USB)      += twl4030-usb.o
 obj-$(CONFIG_TWL6030_USB)      += twl6030-usb.o
-obj-$(CONFIG_USB_LANGWELL_OTG) += langwell_otg.o
 obj-$(CONFIG_NOP_USB_XCEIV)    += nop-usb-xceiv.o
 obj-$(CONFIG_USB_ULPI)         += ulpi.o
 obj-$(CONFIG_USB_ULPI_VIEWPORT)        += ulpi_viewport.o
diff --git a/drivers/usb/otg/langwell_otg.c b/drivers/usb/otg/langwell_otg.c
deleted file mode 100644 (file)
index f08f784..0000000
+++ /dev/null
@@ -1,2347 +0,0 @@
-/*
- * Intel Langwell USB OTG transceiver driver
- * Copyright (C) 2008 - 2010, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-/* This driver helps to switch Langwell OTG controller function between host
- * and peripheral. It works with EHCI driver and Langwell client controller
- * driver together.
- */
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/pci.h>
-#include <linux/errno.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/device.h>
-#include <linux/moduleparam.h>
-#include <linux/usb/ch9.h>
-#include <linux/usb/gadget.h>
-#include <linux/usb.h>
-#include <linux/usb/otg.h>
-#include <linux/usb/hcd.h>
-#include <linux/notifier.h>
-#include <linux/delay.h>
-#include <asm/intel_scu_ipc.h>
-
-#include <linux/usb/langwell_otg.h>
-
-#define        DRIVER_DESC             "Intel Langwell USB OTG transceiver driver"
-#define        DRIVER_VERSION          "July 10, 2010"
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_AUTHOR("Henry Yuan <hang.yuan@intel.com>, Hao Wu <hao.wu@intel.com>");
-MODULE_VERSION(DRIVER_VERSION);
-MODULE_LICENSE("GPL");
-
-static const char driver_name[] = "langwell_otg";
-
-static int langwell_otg_probe(struct pci_dev *pdev,
-                       const struct pci_device_id *id);
-static void langwell_otg_remove(struct pci_dev *pdev);
-static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message);
-static int langwell_otg_resume(struct pci_dev *pdev);
-
-static int langwell_otg_set_host(struct otg_transceiver *otg,
-                               struct usb_bus *host);
-static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
-                               struct usb_gadget *gadget);
-static int langwell_otg_start_srp(struct otg_transceiver *otg);
-
-static const struct pci_device_id pci_ids[] = {{
-       .class =        ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
-       .class_mask =   ~0,
-       .vendor =       0x8086,
-       .device =       0x0811,
-       .subvendor =    PCI_ANY_ID,
-       .subdevice =    PCI_ANY_ID,
-}, { /* end: all zeroes */ }
-};
-
-static struct pci_driver otg_pci_driver = {
-       .name =         (char *) driver_name,
-       .id_table =     pci_ids,
-
-       .probe =        langwell_otg_probe,
-       .remove =       langwell_otg_remove,
-
-       .suspend =      langwell_otg_suspend,
-       .resume =       langwell_otg_resume,
-};
-
-/* HSM timers */
-static inline struct langwell_otg_timer *otg_timer_initializer
-(void (*function)(unsigned long), unsigned long expires, unsigned long data)
-{
-       struct langwell_otg_timer *timer;
-       timer = kmalloc(sizeof(struct langwell_otg_timer), GFP_KERNEL);
-       if (timer == NULL)
-               return timer;
-
-       timer->function = function;
-       timer->expires = expires;
-       timer->data = data;
-       return timer;
-}
-
-static struct langwell_otg_timer *a_wait_vrise_tmr, *a_aidl_bdis_tmr,
-       *b_se0_srp_tmr, *b_srp_init_tmr;
-
-static struct list_head active_timers;
-
-static struct langwell_otg *the_transceiver;
-
-/* host/client notify transceiver when event affects HNP state */
-void langwell_update_transceiver(void)
-{
-       struct langwell_otg *lnw = the_transceiver;
-
-       dev_dbg(lnw->dev, "transceiver is updated\n");
-
-       if (!lnw->qwork)
-               return ;
-
-       queue_work(lnw->qwork, &lnw->work);
-}
-EXPORT_SYMBOL(langwell_update_transceiver);
-
-static int langwell_otg_set_host(struct otg_transceiver *otg,
-                                       struct usb_bus *host)
-{
-       otg->host = host;
-
-       return 0;
-}
-
-static int langwell_otg_set_peripheral(struct otg_transceiver *otg,
-                                       struct usb_gadget *gadget)
-{
-       otg->gadget = gadget;
-
-       return 0;
-}
-
-static int langwell_otg_set_power(struct otg_transceiver *otg,
-                               unsigned mA)
-{
-       return 0;
-}
-
-/* A-device drives vbus, controlled through IPC commands */
-static int langwell_otg_set_vbus(struct otg_transceiver *otg, bool enabled)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       u8                              sub_id;
-
-       dev_dbg(lnw->dev, "%s <--- %s\n", __func__, enabled ? "on" : "off");
-
-       if (enabled)
-               sub_id = 0x8; /* Turn on the VBus */
-       else
-               sub_id = 0x9; /* Turn off the VBus */
-
-       if (intel_scu_ipc_simple_command(0xef, sub_id)) {
-               dev_dbg(lnw->dev, "Failed to set Vbus via IPC commands\n");
-               return -EBUSY;
-       }
-
-       dev_dbg(lnw->dev, "%s --->\n", __func__);
-
-       return 0;
-}
-
-/* charge vbus or discharge vbus through a resistor to ground */
-static void langwell_otg_chrg_vbus(int on)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       u32     val;
-
-       val = readl(lnw->iotg.base + CI_OTGSC);
-
-       if (on)
-               writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VC,
-                               lnw->iotg.base + CI_OTGSC);
-       else
-               writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_VD,
-                               lnw->iotg.base + CI_OTGSC);
-}
-
-/* Start SRP */
-static int langwell_otg_start_srp(struct otg_transceiver *otg)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-       u32                             val;
-
-       dev_dbg(lnw->dev, "%s --->\n", __func__);
-
-       val = readl(iotg->base + CI_OTGSC);
-
-       writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HADP,
-                               iotg->base + CI_OTGSC);
-
-       /* Check if the data plus is finished or not */
-       msleep(8);
-       val = readl(iotg->base + CI_OTGSC);
-       if (val & (OTGSC_HADP | OTGSC_DP))
-               dev_dbg(lnw->dev, "DataLine SRP Error\n");
-
-       /* Disable interrupt - b_sess_vld */
-       val = readl(iotg->base + CI_OTGSC);
-       val &= (~(OTGSC_BSVIE | OTGSC_BSEIE));
-       writel(val, iotg->base + CI_OTGSC);
-
-       /* Start VBus SRP, drive vbus to generate VBus pulse */
-       iotg->otg.set_vbus(&iotg->otg, true);
-       msleep(15);
-       iotg->otg.set_vbus(&iotg->otg, false);
-
-       /* Enable interrupt - b_sess_vld*/
-       val = readl(iotg->base + CI_OTGSC);
-       dev_dbg(lnw->dev, "after VBUS pulse otgsc = %x\n", val);
-
-       val |= (OTGSC_BSVIE | OTGSC_BSEIE);
-       writel(val, iotg->base + CI_OTGSC);
-
-       /* If Vbus is valid, then update the hsm */
-       if (val & OTGSC_BSV) {
-               dev_dbg(lnw->dev, "no b_sess_vld interrupt\n");
-
-               lnw->iotg.hsm.b_sess_vld = 1;
-               langwell_update_transceiver();
-       }
-
-       dev_dbg(lnw->dev, "%s <---\n", __func__);
-       return 0;
-}
-
-/* stop SOF via bus_suspend */
-static void langwell_otg_loc_sof(int on)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       struct usb_hcd          *hcd;
-       int                     err;
-
-       dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "suspend" : "resume");
-
-       hcd = bus_to_hcd(lnw->iotg.otg.host);
-       if (on)
-               err = hcd->driver->bus_resume(hcd);
-       else
-               err = hcd->driver->bus_suspend(hcd);
-
-       if (err)
-               dev_dbg(lnw->dev, "Fail to resume/suspend USB bus - %d\n", err);
-
-       dev_dbg(lnw->dev, "%s <---\n", __func__);
-}
-
-static int langwell_otg_check_otgsc(void)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       u32                             otgsc, usbcfg;
-
-       dev_dbg(lnw->dev, "check sync OTGSC and USBCFG registers\n");
-
-       otgsc = readl(lnw->iotg.base + CI_OTGSC);
-       usbcfg = readl(lnw->usbcfg);
-
-       dev_dbg(lnw->dev, "OTGSC = %08x, USBCFG = %08x\n",
-                                       otgsc, usbcfg);
-       dev_dbg(lnw->dev, "OTGSC_AVV = %d\n", !!(otgsc & OTGSC_AVV));
-       dev_dbg(lnw->dev, "USBCFG.VBUSVAL = %d\n",
-                                       !!(usbcfg & USBCFG_VBUSVAL));
-       dev_dbg(lnw->dev, "OTGSC_ASV = %d\n", !!(otgsc & OTGSC_ASV));
-       dev_dbg(lnw->dev, "USBCFG.AVALID = %d\n",
-                                       !!(usbcfg & USBCFG_AVALID));
-       dev_dbg(lnw->dev, "OTGSC_BSV = %d\n", !!(otgsc & OTGSC_BSV));
-       dev_dbg(lnw->dev, "USBCFG.BVALID = %d\n",
-                                       !!(usbcfg & USBCFG_BVALID));
-       dev_dbg(lnw->dev, "OTGSC_BSE = %d\n", !!(otgsc & OTGSC_BSE));
-       dev_dbg(lnw->dev, "USBCFG.SESEND = %d\n",
-                                       !!(usbcfg & USBCFG_SESEND));
-
-       /* Check USBCFG VBusValid/AValid/BValid/SessEnd */
-       if (!!(otgsc & OTGSC_AVV) ^ !!(usbcfg & USBCFG_VBUSVAL)) {
-               dev_dbg(lnw->dev, "OTGSC.AVV != USBCFG.VBUSVAL\n");
-               goto err;
-       }
-       if (!!(otgsc & OTGSC_ASV) ^ !!(usbcfg & USBCFG_AVALID)) {
-               dev_dbg(lnw->dev, "OTGSC.ASV != USBCFG.AVALID\n");
-               goto err;
-       }
-       if (!!(otgsc & OTGSC_BSV) ^ !!(usbcfg & USBCFG_BVALID)) {
-               dev_dbg(lnw->dev, "OTGSC.BSV != USBCFG.BVALID\n");
-               goto err;
-       }
-       if (!!(otgsc & OTGSC_BSE) ^ !!(usbcfg & USBCFG_SESEND)) {
-               dev_dbg(lnw->dev, "OTGSC.BSE != USBCFG.SESSEN\n");
-               goto err;
-       }
-
-       dev_dbg(lnw->dev, "OTGSC and USBCFG are synced\n");
-
-       return 0;
-
-err:
-       dev_warn(lnw->dev, "OTGSC isn't equal to USBCFG\n");
-       return -EPIPE;
-}
-
-
-static void langwell_otg_phy_low_power(int on)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-       u8                              val, phcd;
-       int                             retval;
-
-       dev_dbg(lnw->dev, "%s ---> %s mode\n",
-                       __func__, on ? "Low power" : "Normal");
-
-       phcd = 0x40;
-
-       val = readb(iotg->base + CI_HOSTPC1 + 2);
-
-       if (on) {
-               /* Due to hardware issue, after set PHCD, sync will failed
-                * between USBCFG and OTGSC, so before set PHCD, check if
-                * sync is in process now. If the answer is "yes", then do
-                * not touch PHCD bit */
-               retval = langwell_otg_check_otgsc();
-               if (retval) {
-                       dev_dbg(lnw->dev, "Skip PHCD programming..\n");
-                       return ;
-               }
-
-               writeb(val | phcd, iotg->base + CI_HOSTPC1 + 2);
-       } else
-               writeb(val & ~phcd, iotg->base + CI_HOSTPC1 + 2);
-
-       dev_dbg(lnw->dev, "%s <--- done\n", __func__);
-}
-
-/* After drv vbus, add 5 ms delay to set PHCD */
-static void langwell_otg_phy_low_power_wait(int on)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-
-       dev_dbg(lnw->dev, "add 5ms delay before programing PHCD\n");
-
-       mdelay(5);
-       langwell_otg_phy_low_power(on);
-}
-
-/* Enable/Disable OTG interrupt */
-static void langwell_otg_intr(int on)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-       u32                             val;
-
-       dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
-
-       val = readl(iotg->base + CI_OTGSC);
-
-       /* OTGSC_INT_MASK doesn't contains 1msInt */
-       if (on) {
-               val = val | (OTGSC_INT_MASK);
-               writel(val, iotg->base + CI_OTGSC);
-       } else {
-               val = val & ~(OTGSC_INT_MASK);
-               writel(val, iotg->base + CI_OTGSC);
-       }
-
-       dev_dbg(lnw->dev, "%s <---\n", __func__);
-}
-
-/* set HAAR: Hardware Assist Auto-Reset */
-static void langwell_otg_HAAR(int on)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-       u32                             val;
-
-       dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
-
-       val = readl(iotg->base + CI_OTGSC);
-       if (on)
-               writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HAAR,
-                                       iotg->base + CI_OTGSC);
-       else
-               writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HAAR,
-                                       iotg->base + CI_OTGSC);
-
-       dev_dbg(lnw->dev, "%s <---\n", __func__);
-}
-
-/* set HABA: Hardware Assist B-Disconnect to A-Connect */
-static void langwell_otg_HABA(int on)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-       u32                             val;
-
-       dev_dbg(lnw->dev, "%s ---> %s\n", __func__, on ? "on" : "off");
-
-       val = readl(iotg->base + CI_OTGSC);
-       if (on)
-               writel((val & ~OTGSC_INTSTS_MASK) | OTGSC_HABA,
-                                       iotg->base + CI_OTGSC);
-       else
-               writel((val & ~OTGSC_INTSTS_MASK) & ~OTGSC_HABA,
-                                       iotg->base + CI_OTGSC);
-
-       dev_dbg(lnw->dev, "%s <---\n", __func__);
-}
-
-static int langwell_otg_check_se0_srp(int on)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       int                     delay_time = TB_SE0_SRP * 10;
-       u32                     val;
-
-       dev_dbg(lnw->dev, "%s --->\n", __func__);
-
-       do {
-               udelay(100);
-               if (!delay_time--)
-                       break;
-               val = readl(lnw->iotg.base + CI_PORTSC1);
-               val &= PORTSC_LS;
-       } while (!val);
-
-       dev_dbg(lnw->dev, "%s <---\n", __func__);
-       return val;
-}
-
-/* The timeout callback function to set time out bit */
-static void set_tmout(unsigned long indicator)
-{
-       *(int *)indicator = 1;
-}
-
-void langwell_otg_nsf_msg(unsigned long indicator)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-
-       switch (indicator) {
-       case 2:
-       case 4:
-       case 6:
-       case 7:
-               dev_warn(lnw->dev,
-                       "OTG:NSF-%lu - deivce not responding\n", indicator);
-               break;
-       case 3:
-               dev_warn(lnw->dev,
-                       "OTG:NSF-%lu - deivce not supported\n", indicator);
-               break;
-       default:
-               dev_warn(lnw->dev, "Do not have this kind of NSF\n");
-               break;
-       }
-}
-
-/* Initialize timers */
-static int langwell_otg_init_timers(struct otg_hsm *hsm)
-{
-       /* HSM used timers */
-       a_wait_vrise_tmr = otg_timer_initializer(&set_tmout, TA_WAIT_VRISE,
-                               (unsigned long)&hsm->a_wait_vrise_tmout);
-       if (a_wait_vrise_tmr == NULL)
-               return -ENOMEM;
-       a_aidl_bdis_tmr = otg_timer_initializer(&set_tmout, TA_AIDL_BDIS,
-                               (unsigned long)&hsm->a_aidl_bdis_tmout);
-       if (a_aidl_bdis_tmr == NULL)
-               return -ENOMEM;
-       b_se0_srp_tmr = otg_timer_initializer(&set_tmout, TB_SE0_SRP,
-                               (unsigned long)&hsm->b_se0_srp);
-       if (b_se0_srp_tmr == NULL)
-               return -ENOMEM;
-       b_srp_init_tmr = otg_timer_initializer(&set_tmout, TB_SRP_INIT,
-                               (unsigned long)&hsm->b_srp_init_tmout);
-       if (b_srp_init_tmr == NULL)
-               return -ENOMEM;
-
-       return 0;
-}
-
-/* Free timers */
-static void langwell_otg_free_timers(void)
-{
-       kfree(a_wait_vrise_tmr);
-       kfree(a_aidl_bdis_tmr);
-       kfree(b_se0_srp_tmr);
-       kfree(b_srp_init_tmr);
-}
-
-/* The timeout callback function to set time out bit */
-static void langwell_otg_timer_fn(unsigned long indicator)
-{
-       struct langwell_otg *lnw = the_transceiver;
-
-       *(int *)indicator = 1;
-
-       dev_dbg(lnw->dev, "kernel timer - timeout\n");
-
-       langwell_update_transceiver();
-}
-
-/* kernel timer used instead of HW based interrupt */
-static void langwell_otg_add_ktimer(enum langwell_otg_timer_type timers)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-       unsigned long           j = jiffies;
-       unsigned long           data, time;
-
-       switch (timers) {
-       case TA_WAIT_VRISE_TMR:
-               iotg->hsm.a_wait_vrise_tmout = 0;
-               data = (unsigned long)&iotg->hsm.a_wait_vrise_tmout;
-               time = TA_WAIT_VRISE;
-               break;
-       case TA_WAIT_BCON_TMR:
-               iotg->hsm.a_wait_bcon_tmout = 0;
-               data = (unsigned long)&iotg->hsm.a_wait_bcon_tmout;
-               time = TA_WAIT_BCON;
-               break;
-       case TA_AIDL_BDIS_TMR:
-               iotg->hsm.a_aidl_bdis_tmout = 0;
-               data = (unsigned long)&iotg->hsm.a_aidl_bdis_tmout;
-               time = TA_AIDL_BDIS;
-               break;
-       case TB_ASE0_BRST_TMR:
-               iotg->hsm.b_ase0_brst_tmout = 0;
-               data = (unsigned long)&iotg->hsm.b_ase0_brst_tmout;
-               time = TB_ASE0_BRST;
-               break;
-       case TB_SRP_INIT_TMR:
-               iotg->hsm.b_srp_init_tmout = 0;
-               data = (unsigned long)&iotg->hsm.b_srp_init_tmout;
-               time = TB_SRP_INIT;
-               break;
-       case TB_SRP_FAIL_TMR:
-               iotg->hsm.b_srp_fail_tmout = 0;
-               data = (unsigned long)&iotg->hsm.b_srp_fail_tmout;
-               time = TB_SRP_FAIL;
-               break;
-       case TB_BUS_SUSPEND_TMR:
-               iotg->hsm.b_bus_suspend_tmout = 0;
-               data = (unsigned long)&iotg->hsm.b_bus_suspend_tmout;
-               time = TB_BUS_SUSPEND;
-               break;
-       default:
-               dev_dbg(lnw->dev, "unknown timer, cannot enable it\n");
-               return;
-       }
-
-       lnw->hsm_timer.data = data;
-       lnw->hsm_timer.function = langwell_otg_timer_fn;
-       lnw->hsm_timer.expires = j + time * HZ / 1000; /* milliseconds */
-
-       add_timer(&lnw->hsm_timer);
-
-       dev_dbg(lnw->dev, "add timer successfully\n");
-}
-
-/* Add timer to timer list */
-static void langwell_otg_add_timer(void *gtimer)
-{
-       struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
-       struct langwell_otg_timer *tmp_timer;
-       struct intel_mid_otg_xceiv *iotg = &the_transceiver->iotg;
-       u32     val32;
-
-       /* Check if the timer is already in the active list,
-        * if so update timer count
-        */
-       list_for_each_entry(tmp_timer, &active_timers, list)
-               if (tmp_timer == timer) {
-                       timer->count = timer->expires;
-                       return;
-               }
-       timer->count = timer->expires;
-
-       if (list_empty(&active_timers)) {
-               val32 = readl(iotg->base + CI_OTGSC);
-               writel(val32 | OTGSC_1MSE, iotg->base + CI_OTGSC);
-       }
-
-       list_add_tail(&timer->list, &active_timers);
-}
-
-/* Remove timer from the timer list; clear timeout status */
-static void langwell_otg_del_timer(void *gtimer)
-{
-       struct langwell_otg *lnw = the_transceiver;
-       struct langwell_otg_timer *timer = (struct langwell_otg_timer *)gtimer;
-       struct langwell_otg_timer *tmp_timer, *del_tmp;
-       u32 val32;
-
-       list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list)
-               if (tmp_timer == timer)
-                       list_del(&timer->list);
-
-       if (list_empty(&active_timers)) {
-               val32 = readl(lnw->iotg.base + CI_OTGSC);
-               writel(val32 & ~OTGSC_1MSE, lnw->iotg.base + CI_OTGSC);
-       }
-}
-
-/* Reduce timer count by 1, and find timeout conditions.*/
-static int langwell_otg_tick_timer(u32 *int_sts)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       struct langwell_otg_timer *tmp_timer, *del_tmp;
-       int expired = 0;
-
-       list_for_each_entry_safe(tmp_timer, del_tmp, &active_timers, list) {
-               tmp_timer->count--;
-               /* check if timer expires */
-               if (!tmp_timer->count) {
-                       list_del(&tmp_timer->list);
-                       tmp_timer->function(tmp_timer->data);
-                       expired = 1;
-               }
-       }
-
-       if (list_empty(&active_timers)) {
-               dev_dbg(lnw->dev, "tick timer: disable 1ms int\n");
-               *int_sts = *int_sts & ~OTGSC_1MSE;
-       }
-       return expired;
-}
-
-static void reset_otg(void)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       int                     delay_time = 1000;
-       u32                     val;
-
-       dev_dbg(lnw->dev, "reseting OTG controller ...\n");
-       val = readl(lnw->iotg.base + CI_USBCMD);
-       writel(val | USBCMD_RST, lnw->iotg.base + CI_USBCMD);
-       do {
-               udelay(100);
-               if (!delay_time--)
-                       dev_dbg(lnw->dev, "reset timeout\n");
-               val = readl(lnw->iotg.base + CI_USBCMD);
-               val &= USBCMD_RST;
-       } while (val != 0);
-       dev_dbg(lnw->dev, "reset done.\n");
-}
-
-static void set_host_mode(void)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       u32                     val;
-
-       reset_otg();
-       val = readl(lnw->iotg.base + CI_USBMODE);
-       val = (val & (~USBMODE_CM)) | USBMODE_HOST;
-       writel(val, lnw->iotg.base + CI_USBMODE);
-}
-
-static void set_client_mode(void)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       u32                     val;
-
-       reset_otg();
-       val = readl(lnw->iotg.base + CI_USBMODE);
-       val = (val & (~USBMODE_CM)) | USBMODE_DEVICE;
-       writel(val, lnw->iotg.base + CI_USBMODE);
-}
-
-static void init_hsm(void)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-       u32                             val32;
-
-       /* read OTGSC after reset */
-       val32 = readl(lnw->iotg.base + CI_OTGSC);
-       dev_dbg(lnw->dev, "%s: OTGSC init value = 0x%x\n", __func__, val32);
-
-       /* set init state */
-       if (val32 & OTGSC_ID) {
-               iotg->hsm.id = 1;
-               iotg->otg.default_a = 0;
-               set_client_mode();
-               iotg->otg.state = OTG_STATE_B_IDLE;
-       } else {
-               iotg->hsm.id = 0;
-               iotg->otg.default_a = 1;
-               set_host_mode();
-               iotg->otg.state = OTG_STATE_A_IDLE;
-       }
-
-       /* set session indicator */
-       if (val32 & OTGSC_BSE)
-               iotg->hsm.b_sess_end = 1;
-       if (val32 & OTGSC_BSV)
-               iotg->hsm.b_sess_vld = 1;
-       if (val32 & OTGSC_ASV)
-               iotg->hsm.a_sess_vld = 1;
-       if (val32 & OTGSC_AVV)
-               iotg->hsm.a_vbus_vld = 1;
-
-       /* defautly power the bus */
-       iotg->hsm.a_bus_req = 1;
-       iotg->hsm.a_bus_drop = 0;
-       /* defautly don't request bus as B device */
-       iotg->hsm.b_bus_req = 0;
-       /* no system error */
-       iotg->hsm.a_clr_err = 0;
-
-       langwell_otg_phy_low_power_wait(1);
-}
-
-static void update_hsm(void)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-       u32                             val32;
-
-       /* read OTGSC */
-       val32 = readl(lnw->iotg.base + CI_OTGSC);
-       dev_dbg(lnw->dev, "%s: OTGSC value = 0x%x\n", __func__, val32);
-
-       iotg->hsm.id = !!(val32 & OTGSC_ID);
-       iotg->hsm.b_sess_end = !!(val32 & OTGSC_BSE);
-       iotg->hsm.b_sess_vld = !!(val32 & OTGSC_BSV);
-       iotg->hsm.a_sess_vld = !!(val32 & OTGSC_ASV);
-       iotg->hsm.a_vbus_vld = !!(val32 & OTGSC_AVV);
-}
-
-static irqreturn_t otg_dummy_irq(int irq, void *_dev)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       void __iomem            *reg_base = _dev;
-       u32                     val;
-       u32                     int_mask = 0;
-
-       val = readl(reg_base + CI_USBMODE);
-       if ((val & USBMODE_CM) != USBMODE_DEVICE)
-               return IRQ_NONE;
-
-       val = readl(reg_base + CI_USBSTS);
-       int_mask = val & INTR_DUMMY_MASK;
-
-       if (int_mask == 0)
-               return IRQ_NONE;
-
-       /* clear hsm.b_conn here since host driver can't detect it
-       *  otg_dummy_irq called means B-disconnect happened.
-       */
-       if (lnw->iotg.hsm.b_conn) {
-               lnw->iotg.hsm.b_conn = 0;
-               if (spin_trylock(&lnw->wq_lock)) {
-                       langwell_update_transceiver();
-                       spin_unlock(&lnw->wq_lock);
-               }
-       }
-
-       /* Clear interrupts */
-       writel(int_mask, reg_base + CI_USBSTS);
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t otg_irq(int irq, void *_dev)
-{
-       struct langwell_otg             *lnw = _dev;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-       u32                             int_sts, int_en;
-       u32                             int_mask = 0;
-       int                             flag = 0;
-
-       int_sts = readl(lnw->iotg.base + CI_OTGSC);
-       int_en = (int_sts & OTGSC_INTEN_MASK) >> 8;
-       int_mask = int_sts & int_en;
-       if (int_mask == 0)
-               return IRQ_NONE;
-
-       if (int_mask & OTGSC_IDIS) {
-               dev_dbg(lnw->dev, "%s: id change int\n", __func__);
-               iotg->hsm.id = (int_sts & OTGSC_ID) ? 1 : 0;
-               dev_dbg(lnw->dev, "id = %d\n", iotg->hsm.id);
-               flag = 1;
-       }
-       if (int_mask & OTGSC_DPIS) {
-               dev_dbg(lnw->dev, "%s: data pulse int\n", __func__);
-               iotg->hsm.a_srp_det = (int_sts & OTGSC_DPS) ? 1 : 0;
-               dev_dbg(lnw->dev, "data pulse = %d\n", iotg->hsm.a_srp_det);
-               flag = 1;
-       }
-       if (int_mask & OTGSC_BSEIS) {
-               dev_dbg(lnw->dev, "%s: b session end int\n", __func__);
-               iotg->hsm.b_sess_end = (int_sts & OTGSC_BSE) ? 1 : 0;
-               dev_dbg(lnw->dev, "b_sess_end = %d\n", iotg->hsm.b_sess_end);
-               flag = 1;
-       }
-       if (int_mask & OTGSC_BSVIS) {
-               dev_dbg(lnw->dev, "%s: b session valid int\n", __func__);
-               iotg->hsm.b_sess_vld = (int_sts & OTGSC_BSV) ? 1 : 0;
-               dev_dbg(lnw->dev, "b_sess_vld = %d\n", iotg->hsm.b_sess_end);
-               flag = 1;
-       }
-       if (int_mask & OTGSC_ASVIS) {
-               dev_dbg(lnw->dev, "%s: a session valid int\n", __func__);
-               iotg->hsm.a_sess_vld = (int_sts & OTGSC_ASV) ? 1 : 0;
-               dev_dbg(lnw->dev, "a_sess_vld = %d\n", iotg->hsm.a_sess_vld);
-               flag = 1;
-       }
-       if (int_mask & OTGSC_AVVIS) {
-               dev_dbg(lnw->dev, "%s: a vbus valid int\n", __func__);
-               iotg->hsm.a_vbus_vld = (int_sts & OTGSC_AVV) ? 1 : 0;
-               dev_dbg(lnw->dev, "a_vbus_vld = %d\n", iotg->hsm.a_vbus_vld);
-               flag = 1;
-       }
-
-       if (int_mask & OTGSC_1MSS) {
-               /* need to schedule otg_work if any timer is expired */
-               if (langwell_otg_tick_timer(&int_sts))
-                       flag = 1;
-       }
-
-       writel((int_sts & ~OTGSC_INTSTS_MASK) | int_mask,
-                                       lnw->iotg.base + CI_OTGSC);
-       if (flag)
-               langwell_update_transceiver();
-
-       return IRQ_HANDLED;
-}
-
-static int langwell_otg_iotg_notify(struct notifier_block *nb,
-                               unsigned long action, void *data)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = data;
-       int                             flag = 0;
-
-       if (iotg == NULL)
-               return NOTIFY_BAD;
-
-       if (lnw == NULL)
-               return NOTIFY_BAD;
-
-       switch (action) {
-       case MID_OTG_NOTIFY_CONNECT:
-               dev_dbg(lnw->dev, "Lnw OTG Notify Connect Event\n");
-               if (iotg->otg.default_a == 1)
-                       iotg->hsm.b_conn = 1;
-               else
-                       iotg->hsm.a_conn = 1;
-               flag = 1;
-               break;
-       case MID_OTG_NOTIFY_DISCONN:
-               dev_dbg(lnw->dev, "Lnw OTG Notify Disconnect Event\n");
-               if (iotg->otg.default_a == 1)
-                       iotg->hsm.b_conn = 0;
-               else
-                       iotg->hsm.a_conn = 0;
-               flag = 1;
-               break;
-       case MID_OTG_NOTIFY_HSUSPEND:
-               dev_dbg(lnw->dev, "Lnw OTG Notify Host Bus suspend Event\n");
-               if (iotg->otg.default_a == 1)
-                       iotg->hsm.a_suspend_req = 1;
-               else
-                       iotg->hsm.b_bus_req = 0;
-               flag = 1;
-               break;
-       case MID_OTG_NOTIFY_HRESUME:
-               dev_dbg(lnw->dev, "Lnw OTG Notify Host Bus resume Event\n");
-               if (iotg->otg.default_a == 1)
-                       iotg->hsm.b_bus_resume = 1;
-               flag = 1;
-               break;
-       case MID_OTG_NOTIFY_CSUSPEND:
-               dev_dbg(lnw->dev, "Lnw OTG Notify Client Bus suspend Event\n");
-               if (iotg->otg.default_a == 1) {
-                       if (iotg->hsm.b_bus_suspend_vld == 2) {
-                               iotg->hsm.b_bus_suspend = 1;
-                               iotg->hsm.b_bus_suspend_vld = 0;
-                               flag = 1;
-                       } else {
-                               iotg->hsm.b_bus_suspend_vld++;
-                               flag = 0;
-                       }
-               } else {
-                       if (iotg->hsm.a_bus_suspend == 0) {
-                               iotg->hsm.a_bus_suspend = 1;
-                               flag = 1;
-                       }
-               }
-               break;
-       case MID_OTG_NOTIFY_CRESUME:
-               dev_dbg(lnw->dev, "Lnw OTG Notify Client Bus resume Event\n");
-               if (iotg->otg.default_a == 0)
-                       iotg->hsm.a_bus_suspend = 0;
-               flag = 0;
-               break;
-       case MID_OTG_NOTIFY_HOSTADD:
-               dev_dbg(lnw->dev, "Lnw OTG Nofity Host Driver Add\n");
-               flag = 1;
-               break;
-       case MID_OTG_NOTIFY_HOSTREMOVE:
-               dev_dbg(lnw->dev, "Lnw OTG Nofity Host Driver remove\n");
-               flag = 1;
-               break;
-       case MID_OTG_NOTIFY_CLIENTADD:
-               dev_dbg(lnw->dev, "Lnw OTG Nofity Client Driver Add\n");
-               flag = 1;
-               break;
-       case MID_OTG_NOTIFY_CLIENTREMOVE:
-               dev_dbg(lnw->dev, "Lnw OTG Nofity Client Driver remove\n");
-               flag = 1;
-               break;
-       default:
-               dev_dbg(lnw->dev, "Lnw OTG Nofity unknown notify message\n");
-               return NOTIFY_DONE;
-       }
-
-       if (flag)
-               langwell_update_transceiver();
-
-       return NOTIFY_OK;
-}
-
-static void langwell_otg_work(struct work_struct *work)
-{
-       struct langwell_otg             *lnw;
-       struct intel_mid_otg_xceiv      *iotg;
-       int                             retval;
-       struct pci_dev                  *pdev;
-
-       lnw = container_of(work, struct langwell_otg, work);
-       iotg = &lnw->iotg;
-       pdev = to_pci_dev(lnw->dev);
-
-       dev_dbg(lnw->dev, "%s: old state = %s\n", __func__,
-                       otg_state_string(iotg->otg.state));
-
-       switch (iotg->otg.state) {
-       case OTG_STATE_UNDEFINED:
-       case OTG_STATE_B_IDLE:
-               if (!iotg->hsm.id) {
-                       langwell_otg_del_timer(b_srp_init_tmr);
-                       del_timer_sync(&lnw->hsm_timer);
-
-                       iotg->otg.default_a = 1;
-                       iotg->hsm.a_srp_det = 0;
-
-                       langwell_otg_chrg_vbus(0);
-                       set_host_mode();
-                       langwell_otg_phy_low_power(1);
-
-                       iotg->otg.state = OTG_STATE_A_IDLE;
-                       langwell_update_transceiver();
-               } else if (iotg->hsm.b_sess_vld) {
-                       langwell_otg_del_timer(b_srp_init_tmr);
-                       del_timer_sync(&lnw->hsm_timer);
-                       iotg->hsm.b_sess_end = 0;
-                       iotg->hsm.a_bus_suspend = 0;
-                       langwell_otg_chrg_vbus(0);
-
-                       if (lnw->iotg.start_peripheral) {
-                               lnw->iotg.start_peripheral(&lnw->iotg);
-                               iotg->otg.state = OTG_STATE_B_PERIPHERAL;
-                       } else
-                               dev_dbg(lnw->dev, "client driver not loaded\n");
-
-               } else if (iotg->hsm.b_srp_init_tmout) {
-                       iotg->hsm.b_srp_init_tmout = 0;
-                       dev_warn(lnw->dev, "SRP init timeout\n");
-               } else if (iotg->hsm.b_srp_fail_tmout) {
-                       iotg->hsm.b_srp_fail_tmout = 0;
-                       iotg->hsm.b_bus_req = 0;
-
-                       /* No silence failure */
-                       langwell_otg_nsf_msg(6);
-               } else if (iotg->hsm.b_bus_req && iotg->hsm.b_sess_end) {
-                       del_timer_sync(&lnw->hsm_timer);
-                       /* workaround for b_se0_srp detection */
-                       retval = langwell_otg_check_se0_srp(0);
-                       if (retval) {
-                               iotg->hsm.b_bus_req = 0;
-                               dev_dbg(lnw->dev, "LS isn't SE0, try later\n");
-                       } else {
-                               /* clear the PHCD before start srp */
-                               langwell_otg_phy_low_power(0);
-
-                               /* Start SRP */
-                               langwell_otg_add_timer(b_srp_init_tmr);
-                               iotg->otg.start_srp(&iotg->otg);
-                               langwell_otg_del_timer(b_srp_init_tmr);
-                               langwell_otg_add_ktimer(TB_SRP_FAIL_TMR);
-
-                               /* reset PHY low power mode here */
-                               langwell_otg_phy_low_power_wait(1);
-                       }
-               }
-               break;
-       case OTG_STATE_B_SRP_INIT:
-               if (!iotg->hsm.id) {
-                       iotg->otg.default_a = 1;
-                       iotg->hsm.a_srp_det = 0;
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       langwell_otg_chrg_vbus(0);
-                       set_host_mode();
-                       langwell_otg_phy_low_power(1);
-                       iotg->otg.state = OTG_STATE_A_IDLE;
-                       langwell_update_transceiver();
-               } else if (iotg->hsm.b_sess_vld) {
-                       langwell_otg_chrg_vbus(0);
-                       if (lnw->iotg.start_peripheral) {
-                               lnw->iotg.start_peripheral(&lnw->iotg);
-                               iotg->otg.state = OTG_STATE_B_PERIPHERAL;
-                       } else
-                               dev_dbg(lnw->dev, "client driver not loaded\n");
-               }
-               break;
-       case OTG_STATE_B_PERIPHERAL:
-               if (!iotg->hsm.id) {
-                       iotg->otg.default_a = 1;
-                       iotg->hsm.a_srp_det = 0;
-
-                       langwell_otg_chrg_vbus(0);
-
-                       if (lnw->iotg.stop_peripheral)
-                               lnw->iotg.stop_peripheral(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "client driver has been removed.\n");
-
-                       set_host_mode();
-                       langwell_otg_phy_low_power(1);
-                       iotg->otg.state = OTG_STATE_A_IDLE;
-                       langwell_update_transceiver();
-               } else if (!iotg->hsm.b_sess_vld) {
-                       iotg->hsm.b_hnp_enable = 0;
-
-                       if (lnw->iotg.stop_peripheral)
-                               lnw->iotg.stop_peripheral(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "client driver has been removed.\n");
-
-                       iotg->otg.state = OTG_STATE_B_IDLE;
-               } else if (iotg->hsm.b_bus_req && iotg->otg.gadget &&
-                                       iotg->otg.gadget->b_hnp_enable &&
-                                       iotg->hsm.a_bus_suspend) {
-
-                       if (lnw->iotg.stop_peripheral)
-                               lnw->iotg.stop_peripheral(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "client driver has been removed.\n");
-
-                       langwell_otg_HAAR(1);
-                       iotg->hsm.a_conn = 0;
-
-                       if (lnw->iotg.start_host) {
-                               lnw->iotg.start_host(&lnw->iotg);
-                               iotg->otg.state = OTG_STATE_B_WAIT_ACON;
-                       } else
-                               dev_dbg(lnw->dev,
-                                               "host driver not loaded.\n");
-
-                       iotg->hsm.a_bus_resume = 0;
-                       langwell_otg_add_ktimer(TB_ASE0_BRST_TMR);
-               }
-               break;
-
-       case OTG_STATE_B_WAIT_ACON:
-               if (!iotg->hsm.id) {
-                       /* delete hsm timer for b_ase0_brst_tmr */
-                       del_timer_sync(&lnw->hsm_timer);
-
-                       iotg->otg.default_a = 1;
-                       iotg->hsm.a_srp_det = 0;
-
-                       langwell_otg_chrg_vbus(0);
-
-                       langwell_otg_HAAR(0);
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       set_host_mode();
-                       langwell_otg_phy_low_power(1);
-                       iotg->otg.state = OTG_STATE_A_IDLE;
-                       langwell_update_transceiver();
-               } else if (!iotg->hsm.b_sess_vld) {
-                       /* delete hsm timer for b_ase0_brst_tmr */
-                       del_timer_sync(&lnw->hsm_timer);
-
-                       iotg->hsm.b_hnp_enable = 0;
-                       iotg->hsm.b_bus_req = 0;
-
-                       langwell_otg_chrg_vbus(0);
-                       langwell_otg_HAAR(0);
-
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       set_client_mode();
-                       langwell_otg_phy_low_power(1);
-                       iotg->otg.state = OTG_STATE_B_IDLE;
-               } else if (iotg->hsm.a_conn) {
-                       /* delete hsm timer for b_ase0_brst_tmr */
-                       del_timer_sync(&lnw->hsm_timer);
-
-                       langwell_otg_HAAR(0);
-                       iotg->otg.state = OTG_STATE_B_HOST;
-                       langwell_update_transceiver();
-               } else if (iotg->hsm.a_bus_resume ||
-                               iotg->hsm.b_ase0_brst_tmout) {
-                       /* delete hsm timer for b_ase0_brst_tmr */
-                       del_timer_sync(&lnw->hsm_timer);
-
-                       langwell_otg_HAAR(0);
-                       langwell_otg_nsf_msg(7);
-
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       iotg->hsm.a_bus_suspend = 0;
-                       iotg->hsm.b_bus_req = 0;
-
-                       if (lnw->iotg.start_peripheral)
-                               lnw->iotg.start_peripheral(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "client driver not loaded.\n");
-
-                       iotg->otg.state = OTG_STATE_B_PERIPHERAL;
-               }
-               break;
-
-       case OTG_STATE_B_HOST:
-               if (!iotg->hsm.id) {
-                       iotg->otg.default_a = 1;
-                       iotg->hsm.a_srp_det = 0;
-
-                       langwell_otg_chrg_vbus(0);
-
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       set_host_mode();
-                       langwell_otg_phy_low_power(1);
-                       iotg->otg.state = OTG_STATE_A_IDLE;
-                       langwell_update_transceiver();
-               } else if (!iotg->hsm.b_sess_vld) {
-                       iotg->hsm.b_hnp_enable = 0;
-                       iotg->hsm.b_bus_req = 0;
-
-                       langwell_otg_chrg_vbus(0);
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       set_client_mode();
-                       langwell_otg_phy_low_power(1);
-                       iotg->otg.state = OTG_STATE_B_IDLE;
-               } else if ((!iotg->hsm.b_bus_req) ||
-                               (!iotg->hsm.a_conn)) {
-                       iotg->hsm.b_bus_req = 0;
-                       langwell_otg_loc_sof(0);
-
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       iotg->hsm.a_bus_suspend = 0;
-
-                       if (lnw->iotg.start_peripheral)
-                               lnw->iotg.start_peripheral(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                               "client driver not loaded.\n");
-
-                       iotg->otg.state = OTG_STATE_B_PERIPHERAL;
-               }
-               break;
-
-       case OTG_STATE_A_IDLE:
-               iotg->otg.default_a = 1;
-               if (iotg->hsm.id) {
-                       iotg->otg.default_a = 0;
-                       iotg->hsm.b_bus_req = 0;
-                       iotg->hsm.vbus_srp_up = 0;
-
-                       langwell_otg_chrg_vbus(0);
-                       set_client_mode();
-                       langwell_otg_phy_low_power(1);
-                       iotg->otg.state = OTG_STATE_B_IDLE;
-                       langwell_update_transceiver();
-               } else if (!iotg->hsm.a_bus_drop &&
-                       (iotg->hsm.a_srp_det || iotg->hsm.a_bus_req)) {
-                       langwell_otg_phy_low_power(0);
-
-                       /* Turn on VBus */
-                       iotg->otg.set_vbus(&iotg->otg, true);
-
-                       iotg->hsm.vbus_srp_up = 0;
-                       iotg->hsm.a_wait_vrise_tmout = 0;
-                       langwell_otg_add_timer(a_wait_vrise_tmr);
-                       iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
-                       langwell_update_transceiver();
-               } else if (!iotg->hsm.a_bus_drop && iotg->hsm.a_sess_vld) {
-                       iotg->hsm.vbus_srp_up = 1;
-               } else if (!iotg->hsm.a_sess_vld && iotg->hsm.vbus_srp_up) {
-                       msleep(10);
-                       langwell_otg_phy_low_power(0);
-
-                       /* Turn on VBus */
-                       iotg->otg.set_vbus(&iotg->otg, true);
-                       iotg->hsm.a_srp_det = 1;
-                       iotg->hsm.vbus_srp_up = 0;
-                       iotg->hsm.a_wait_vrise_tmout = 0;
-                       langwell_otg_add_timer(a_wait_vrise_tmr);
-                       iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
-                       langwell_update_transceiver();
-               } else if (!iotg->hsm.a_sess_vld &&
-                               !iotg->hsm.vbus_srp_up) {
-                       langwell_otg_phy_low_power(1);
-               }
-               break;
-       case OTG_STATE_A_WAIT_VRISE:
-               if (iotg->hsm.id) {
-                       langwell_otg_del_timer(a_wait_vrise_tmr);
-                       iotg->hsm.b_bus_req = 0;
-                       iotg->otg.default_a = 0;
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       set_client_mode();
-                       langwell_otg_phy_low_power_wait(1);
-                       iotg->otg.state = OTG_STATE_B_IDLE;
-               } else if (iotg->hsm.a_vbus_vld) {
-                       langwell_otg_del_timer(a_wait_vrise_tmr);
-                       iotg->hsm.b_conn = 0;
-                       if (lnw->iotg.start_host)
-                               lnw->iotg.start_host(&lnw->iotg);
-                       else {
-                               dev_dbg(lnw->dev, "host driver not loaded.\n");
-                               break;
-                       }
-
-                       langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
-                       iotg->otg.state = OTG_STATE_A_WAIT_BCON;
-               } else if (iotg->hsm.a_wait_vrise_tmout) {
-                       iotg->hsm.b_conn = 0;
-                       if (iotg->hsm.a_vbus_vld) {
-                               if (lnw->iotg.start_host)
-                                       lnw->iotg.start_host(&lnw->iotg);
-                               else {
-                                       dev_dbg(lnw->dev,
-                                               "host driver not loaded.\n");
-                                       break;
-                               }
-                               langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
-                               iotg->otg.state = OTG_STATE_A_WAIT_BCON;
-                       } else {
-
-                               /* Turn off VBus */
-                               iotg->otg.set_vbus(&iotg->otg, false);
-                               langwell_otg_phy_low_power_wait(1);
-                               iotg->otg.state = OTG_STATE_A_VBUS_ERR;
-                       }
-               }
-               break;
-       case OTG_STATE_A_WAIT_BCON:
-               if (iotg->hsm.id) {
-                       /* delete hsm timer for a_wait_bcon_tmr */
-                       del_timer_sync(&lnw->hsm_timer);
-
-                       iotg->otg.default_a = 0;
-                       iotg->hsm.b_bus_req = 0;
-
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       set_client_mode();
-                       langwell_otg_phy_low_power_wait(1);
-                       iotg->otg.state = OTG_STATE_B_IDLE;
-                       langwell_update_transceiver();
-               } else if (!iotg->hsm.a_vbus_vld) {
-                       /* delete hsm timer for a_wait_bcon_tmr */
-                       del_timer_sync(&lnw->hsm_timer);
-
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       langwell_otg_phy_low_power_wait(1);
-                       iotg->otg.state = OTG_STATE_A_VBUS_ERR;
-               } else if (iotg->hsm.a_bus_drop ||
-                               (iotg->hsm.a_wait_bcon_tmout &&
-                               !iotg->hsm.a_bus_req)) {
-                       /* delete hsm timer for a_wait_bcon_tmr */
-                       del_timer_sync(&lnw->hsm_timer);
-
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
-               } else if (iotg->hsm.b_conn) {
-                       /* delete hsm timer for a_wait_bcon_tmr */
-                       del_timer_sync(&lnw->hsm_timer);
-
-                       iotg->hsm.a_suspend_req = 0;
-                       iotg->otg.state = OTG_STATE_A_HOST;
-                       if (iotg->hsm.a_srp_det && iotg->otg.host &&
-                                       !iotg->otg.host->b_hnp_enable) {
-                               /* SRP capable peripheral-only device */
-                               iotg->hsm.a_bus_req = 1;
-                               iotg->hsm.a_srp_det = 0;
-                       } else if (!iotg->hsm.a_bus_req && iotg->otg.host &&
-                                       iotg->otg.host->b_hnp_enable) {
-                               /* It is not safe enough to do a fast
-                                * transition from A_WAIT_BCON to
-                                * A_SUSPEND */
-                               msleep(10000);
-                               if (iotg->hsm.a_bus_req)
-                                       break;
-
-                               if (request_irq(pdev->irq,
-                                       otg_dummy_irq, IRQF_SHARED,
-                                       driver_name, iotg->base) != 0) {
-                                       dev_dbg(lnw->dev,
-                                               "request interrupt %d fail\n",
-                                               pdev->irq);
-                               }
-
-                               langwell_otg_HABA(1);
-                               iotg->hsm.b_bus_resume = 0;
-                               iotg->hsm.a_aidl_bdis_tmout = 0;
-
-                               langwell_otg_loc_sof(0);
-                               /* clear PHCD to enable HW timer */
-                               langwell_otg_phy_low_power(0);
-                               langwell_otg_add_timer(a_aidl_bdis_tmr);
-                               iotg->otg.state = OTG_STATE_A_SUSPEND;
-                       } else if (!iotg->hsm.a_bus_req && iotg->otg.host &&
-                               !iotg->otg.host->b_hnp_enable) {
-                               if (lnw->iotg.stop_host)
-                                       lnw->iotg.stop_host(&lnw->iotg);
-                               else
-                                       dev_dbg(lnw->dev,
-                                               "host driver removed.\n");
-
-                               /* Turn off VBus */
-                               iotg->otg.set_vbus(&iotg->otg, false);
-                               iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
-                       }
-               }
-               break;
-       case OTG_STATE_A_HOST:
-               if (iotg->hsm.id) {
-                       iotg->otg.default_a = 0;
-                       iotg->hsm.b_bus_req = 0;
-
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       set_client_mode();
-                       langwell_otg_phy_low_power_wait(1);
-                       iotg->otg.state = OTG_STATE_B_IDLE;
-                       langwell_update_transceiver();
-               } else if (iotg->hsm.a_bus_drop ||
-                               (iotg->otg.host &&
-                               !iotg->otg.host->b_hnp_enable &&
-                                       !iotg->hsm.a_bus_req)) {
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
-               } else if (!iotg->hsm.a_vbus_vld) {
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       langwell_otg_phy_low_power_wait(1);
-                       iotg->otg.state = OTG_STATE_A_VBUS_ERR;
-               } else if (iotg->otg.host &&
-                               iotg->otg.host->b_hnp_enable &&
-                               !iotg->hsm.a_bus_req) {
-                       /* Set HABA to enable hardware assistance to signal
-                        *  A-connect after receiver B-disconnect. Hardware
-                        *  will then set client mode and enable URE, SLE and
-                        *  PCE after the assistance. otg_dummy_irq is used to
-                        *  clean these ints when client driver is not resumed.
-                        */
-                       if (request_irq(pdev->irq, otg_dummy_irq, IRQF_SHARED,
-                                       driver_name, iotg->base) != 0) {
-                               dev_dbg(lnw->dev,
-                                       "request interrupt %d failed\n",
-                                               pdev->irq);
-                       }
-
-                       /* set HABA */
-                       langwell_otg_HABA(1);
-                       iotg->hsm.b_bus_resume = 0;
-                       iotg->hsm.a_aidl_bdis_tmout = 0;
-                       langwell_otg_loc_sof(0);
-                       /* clear PHCD to enable HW timer */
-                       langwell_otg_phy_low_power(0);
-                       langwell_otg_add_timer(a_aidl_bdis_tmr);
-                       iotg->otg.state = OTG_STATE_A_SUSPEND;
-               } else if (!iotg->hsm.b_conn || !iotg->hsm.a_bus_req) {
-                       langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
-                       iotg->otg.state = OTG_STATE_A_WAIT_BCON;
-               }
-               break;
-       case OTG_STATE_A_SUSPEND:
-               if (iotg->hsm.id) {
-                       langwell_otg_del_timer(a_aidl_bdis_tmr);
-                       langwell_otg_HABA(0);
-                       free_irq(pdev->irq, iotg->base);
-                       iotg->otg.default_a = 0;
-                       iotg->hsm.b_bus_req = 0;
-
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       set_client_mode();
-                       langwell_otg_phy_low_power(1);
-                       iotg->otg.state = OTG_STATE_B_IDLE;
-                       langwell_update_transceiver();
-               } else if (iotg->hsm.a_bus_req ||
-                               iotg->hsm.b_bus_resume) {
-                       langwell_otg_del_timer(a_aidl_bdis_tmr);
-                       langwell_otg_HABA(0);
-                       free_irq(pdev->irq, iotg->base);
-                       iotg->hsm.a_suspend_req = 0;
-                       langwell_otg_loc_sof(1);
-                       iotg->otg.state = OTG_STATE_A_HOST;
-               } else if (iotg->hsm.a_aidl_bdis_tmout ||
-                               iotg->hsm.a_bus_drop) {
-                       langwell_otg_del_timer(a_aidl_bdis_tmr);
-                       langwell_otg_HABA(0);
-                       free_irq(pdev->irq, iotg->base);
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
-               } else if (!iotg->hsm.b_conn && iotg->otg.host &&
-                               iotg->otg.host->b_hnp_enable) {
-                       langwell_otg_del_timer(a_aidl_bdis_tmr);
-                       langwell_otg_HABA(0);
-                       free_irq(pdev->irq, iotg->base);
-
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       iotg->hsm.b_bus_suspend = 0;
-                       iotg->hsm.b_bus_suspend_vld = 0;
-
-                       /* msleep(200); */
-                       if (lnw->iotg.start_peripheral)
-                               lnw->iotg.start_peripheral(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "client driver not loaded.\n");
-
-                       langwell_otg_add_ktimer(TB_BUS_SUSPEND_TMR);
-                       iotg->otg.state = OTG_STATE_A_PERIPHERAL;
-                       break;
-               } else if (!iotg->hsm.a_vbus_vld) {
-                       langwell_otg_del_timer(a_aidl_bdis_tmr);
-                       langwell_otg_HABA(0);
-                       free_irq(pdev->irq, iotg->base);
-                       if (lnw->iotg.stop_host)
-                               lnw->iotg.stop_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "host driver has been removed.\n");
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       langwell_otg_phy_low_power_wait(1);
-                       iotg->otg.state = OTG_STATE_A_VBUS_ERR;
-               }
-               break;
-       case OTG_STATE_A_PERIPHERAL:
-               if (iotg->hsm.id) {
-                       /* delete hsm timer for b_bus_suspend_tmr */
-                       del_timer_sync(&lnw->hsm_timer);
-                       iotg->otg.default_a = 0;
-                       iotg->hsm.b_bus_req = 0;
-                       if (lnw->iotg.stop_peripheral)
-                               lnw->iotg.stop_peripheral(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "client driver has been removed.\n");
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       set_client_mode();
-                       langwell_otg_phy_low_power_wait(1);
-                       iotg->otg.state = OTG_STATE_B_IDLE;
-                       langwell_update_transceiver();
-               } else if (!iotg->hsm.a_vbus_vld) {
-                       /* delete hsm timer for b_bus_suspend_tmr */
-                       del_timer_sync(&lnw->hsm_timer);
-
-                       if (lnw->iotg.stop_peripheral)
-                               lnw->iotg.stop_peripheral(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "client driver has been removed.\n");
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       langwell_otg_phy_low_power_wait(1);
-                       iotg->otg.state = OTG_STATE_A_VBUS_ERR;
-               } else if (iotg->hsm.a_bus_drop) {
-                       /* delete hsm timer for b_bus_suspend_tmr */
-                       del_timer_sync(&lnw->hsm_timer);
-
-                       if (lnw->iotg.stop_peripheral)
-                               lnw->iotg.stop_peripheral(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "client driver has been removed.\n");
-
-                       /* Turn off VBus */
-                       iotg->otg.set_vbus(&iotg->otg, false);
-                       iotg->otg.state = OTG_STATE_A_WAIT_VFALL;
-               } else if (iotg->hsm.b_bus_suspend) {
-                       /* delete hsm timer for b_bus_suspend_tmr */
-                       del_timer_sync(&lnw->hsm_timer);
-
-                       if (lnw->iotg.stop_peripheral)
-                               lnw->iotg.stop_peripheral(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "client driver has been removed.\n");
-
-                       if (lnw->iotg.start_host)
-                               lnw->iotg.start_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                               "host driver not loaded.\n");
-                       langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
-                       iotg->otg.state = OTG_STATE_A_WAIT_BCON;
-               } else if (iotg->hsm.b_bus_suspend_tmout) {
-                       u32     val;
-                       val = readl(lnw->iotg.base + CI_PORTSC1);
-                       if (!(val & PORTSC_SUSP))
-                               break;
-
-                       if (lnw->iotg.stop_peripheral)
-                               lnw->iotg.stop_peripheral(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                       "client driver has been removed.\n");
-
-                       if (lnw->iotg.start_host)
-                               lnw->iotg.start_host(&lnw->iotg);
-                       else
-                               dev_dbg(lnw->dev,
-                                               "host driver not loaded.\n");
-                       langwell_otg_add_ktimer(TA_WAIT_BCON_TMR);
-                       iotg->otg.state = OTG_STATE_A_WAIT_BCON;
-               }
-               break;
-       case OTG_STATE_A_VBUS_ERR:
-               if (iotg->hsm.id) {
-                       iotg->otg.default_a = 0;
-                       iotg->hsm.a_clr_err = 0;
-                       iotg->hsm.a_srp_det = 0;
-                       set_client_mode();
-                       langwell_otg_phy_low_power(1);
-                       iotg->otg.state = OTG_STATE_B_IDLE;
-                       langwell_update_transceiver();
-               } else if (iotg->hsm.a_clr_err) {
-                       iotg->hsm.a_clr_err = 0;
-                       iotg->hsm.a_srp_det = 0;
-                       reset_otg();
-                       init_hsm();
-                       if (iotg->otg.state == OTG_STATE_A_IDLE)
-                               langwell_update_transceiver();
-               } else {
-                       /* FW will clear PHCD bit when any VBus
-                        * event detected. Reset PHCD to 1 again */
-                       langwell_otg_phy_low_power(1);
-               }
-               break;
-       case OTG_STATE_A_WAIT_VFALL:
-               if (iotg->hsm.id) {
-                       iotg->otg.default_a = 0;
-                       set_client_mode();
-                       langwell_otg_phy_low_power(1);
-                       iotg->otg.state = OTG_STATE_B_IDLE;
-                       langwell_update_transceiver();
-               } else if (iotg->hsm.a_bus_req) {
-
-                       /* Turn on VBus */
-                       iotg->otg.set_vbus(&iotg->otg, true);
-                       iotg->hsm.a_wait_vrise_tmout = 0;
-                       langwell_otg_add_timer(a_wait_vrise_tmr);
-                       iotg->otg.state = OTG_STATE_A_WAIT_VRISE;
-               } else if (!iotg->hsm.a_sess_vld) {
-                       iotg->hsm.a_srp_det = 0;
-                       set_host_mode();
-                       langwell_otg_phy_low_power(1);
-                       iotg->otg.state = OTG_STATE_A_IDLE;
-               }
-               break;
-       default:
-               ;
-       }
-
-       dev_dbg(lnw->dev, "%s: new state = %s\n", __func__,
-                       otg_state_string(iotg->otg.state));
-}
-
-static ssize_t
-show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       char                    *next;
-       unsigned                size, t;
-
-       next = buf;
-       size = PAGE_SIZE;
-
-       t = scnprintf(next, size,
-               "\n"
-               "USBCMD = 0x%08x\n"
-               "USBSTS = 0x%08x\n"
-               "USBINTR = 0x%08x\n"
-               "ASYNCLISTADDR = 0x%08x\n"
-               "PORTSC1 = 0x%08x\n"
-               "HOSTPC1 = 0x%08x\n"
-               "OTGSC = 0x%08x\n"
-               "USBMODE = 0x%08x\n",
-               readl(lnw->iotg.base + 0x30),
-               readl(lnw->iotg.base + 0x34),
-               readl(lnw->iotg.base + 0x38),
-               readl(lnw->iotg.base + 0x48),
-               readl(lnw->iotg.base + 0x74),
-               readl(lnw->iotg.base + 0xb4),
-               readl(lnw->iotg.base + 0xf4),
-               readl(lnw->iotg.base + 0xf8)
-            );
-       size -= t;
-       next += t;
-
-       return PAGE_SIZE - size;
-}
-static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL);
-
-static ssize_t
-show_hsm(struct device *_dev, struct device_attribute *attr, char *buf)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-       char                            *next;
-       unsigned                        size, t;
-
-       next = buf;
-       size = PAGE_SIZE;
-
-       if (iotg->otg.host)
-               iotg->hsm.a_set_b_hnp_en = iotg->otg.host->b_hnp_enable;
-
-       if (iotg->otg.gadget)
-               iotg->hsm.b_hnp_enable = iotg->otg.gadget->b_hnp_enable;
-
-       t = scnprintf(next, size,
-               "\n"
-               "current state = %s\n"
-               "a_bus_resume = \t%d\n"
-               "a_bus_suspend = \t%d\n"
-               "a_conn = \t%d\n"
-               "a_sess_vld = \t%d\n"
-               "a_srp_det = \t%d\n"
-               "a_vbus_vld = \t%d\n"
-               "b_bus_resume = \t%d\n"
-               "b_bus_suspend = \t%d\n"
-               "b_conn = \t%d\n"
-               "b_se0_srp = \t%d\n"
-               "b_sess_end = \t%d\n"
-               "b_sess_vld = \t%d\n"
-               "id = \t%d\n"
-               "a_set_b_hnp_en = \t%d\n"
-               "b_srp_done = \t%d\n"
-               "b_hnp_enable = \t%d\n"
-               "a_wait_vrise_tmout = \t%d\n"
-               "a_wait_bcon_tmout = \t%d\n"
-               "a_aidl_bdis_tmout = \t%d\n"
-               "b_ase0_brst_tmout = \t%d\n"
-               "a_bus_drop = \t%d\n"
-               "a_bus_req = \t%d\n"
-               "a_clr_err = \t%d\n"
-               "a_suspend_req = \t%d\n"
-               "b_bus_req = \t%d\n"
-               "b_bus_suspend_tmout = \t%d\n"
-               "b_bus_suspend_vld = \t%d\n",
-               otg_state_string(iotg->otg.state),
-               iotg->hsm.a_bus_resume,
-               iotg->hsm.a_bus_suspend,
-               iotg->hsm.a_conn,
-               iotg->hsm.a_sess_vld,
-               iotg->hsm.a_srp_det,
-               iotg->hsm.a_vbus_vld,
-               iotg->hsm.b_bus_resume,
-               iotg->hsm.b_bus_suspend,
-               iotg->hsm.b_conn,
-               iotg->hsm.b_se0_srp,
-               iotg->hsm.b_sess_end,
-               iotg->hsm.b_sess_vld,
-               iotg->hsm.id,
-               iotg->hsm.a_set_b_hnp_en,
-               iotg->hsm.b_srp_done,
-               iotg->hsm.b_hnp_enable,
-               iotg->hsm.a_wait_vrise_tmout,
-               iotg->hsm.a_wait_bcon_tmout,
-               iotg->hsm.a_aidl_bdis_tmout,
-               iotg->hsm.b_ase0_brst_tmout,
-               iotg->hsm.a_bus_drop,
-               iotg->hsm.a_bus_req,
-               iotg->hsm.a_clr_err,
-               iotg->hsm.a_suspend_req,
-               iotg->hsm.b_bus_req,
-               iotg->hsm.b_bus_suspend_tmout,
-               iotg->hsm.b_bus_suspend_vld
-               );
-       size -= t;
-       next += t;
-
-       return PAGE_SIZE - size;
-}
-static DEVICE_ATTR(hsm, S_IRUGO, show_hsm, NULL);
-
-static ssize_t
-get_a_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       char                    *next;
-       unsigned                size, t;
-
-       next = buf;
-       size = PAGE_SIZE;
-
-       t = scnprintf(next, size, "%d", lnw->iotg.hsm.a_bus_req);
-       size -= t;
-       next += t;
-
-       return PAGE_SIZE - size;
-}
-
-static ssize_t
-set_a_bus_req(struct device *dev, struct device_attribute *attr,
-               const char *buf, size_t count)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-
-       if (!iotg->otg.default_a)
-               return -1;
-       if (count > 2)
-               return -1;
-
-       if (buf[0] == '0') {
-               iotg->hsm.a_bus_req = 0;
-               dev_dbg(lnw->dev, "User request: a_bus_req = 0\n");
-       } else if (buf[0] == '1') {
-               /* If a_bus_drop is TRUE, a_bus_req can't be set */
-               if (iotg->hsm.a_bus_drop)
-                       return -1;
-               iotg->hsm.a_bus_req = 1;
-               dev_dbg(lnw->dev, "User request: a_bus_req = 1\n");
-       }
-       if (spin_trylock(&lnw->wq_lock)) {
-               langwell_update_transceiver();
-               spin_unlock(&lnw->wq_lock);
-       }
-       return count;
-}
-static DEVICE_ATTR(a_bus_req, S_IRUGO | S_IWUSR, get_a_bus_req, set_a_bus_req);
-
-static ssize_t
-get_a_bus_drop(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       char                    *next;
-       unsigned                size, t;
-
-       next = buf;
-       size = PAGE_SIZE;
-
-       t = scnprintf(next, size, "%d", lnw->iotg.hsm.a_bus_drop);
-       size -= t;
-       next += t;
-
-       return PAGE_SIZE - size;
-}
-
-static ssize_t
-set_a_bus_drop(struct device *dev, struct device_attribute *attr,
-               const char *buf, size_t count)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-
-       if (!iotg->otg.default_a)
-               return -1;
-       if (count > 2)
-               return -1;
-
-       if (buf[0] == '0') {
-               iotg->hsm.a_bus_drop = 0;
-               dev_dbg(lnw->dev, "User request: a_bus_drop = 0\n");
-       } else if (buf[0] == '1') {
-               iotg->hsm.a_bus_drop = 1;
-               iotg->hsm.a_bus_req = 0;
-               dev_dbg(lnw->dev, "User request: a_bus_drop = 1\n");
-               dev_dbg(lnw->dev, "User request: and a_bus_req = 0\n");
-       }
-       if (spin_trylock(&lnw->wq_lock)) {
-               langwell_update_transceiver();
-               spin_unlock(&lnw->wq_lock);
-       }
-       return count;
-}
-static DEVICE_ATTR(a_bus_drop, S_IRUGO | S_IWUSR, get_a_bus_drop, set_a_bus_drop);
-
-static ssize_t
-get_b_bus_req(struct device *dev, struct device_attribute *attr, char *buf)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       char                    *next;
-       unsigned                size, t;
-
-       next = buf;
-       size = PAGE_SIZE;
-
-       t = scnprintf(next, size, "%d", lnw->iotg.hsm.b_bus_req);
-       size -= t;
-       next += t;
-
-       return PAGE_SIZE - size;
-}
-
-static ssize_t
-set_b_bus_req(struct device *dev, struct device_attribute *attr,
-               const char *buf, size_t count)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-
-       if (iotg->otg.default_a)
-               return -1;
-
-       if (count > 2)
-               return -1;
-
-       if (buf[0] == '0') {
-               iotg->hsm.b_bus_req = 0;
-               dev_dbg(lnw->dev, "User request: b_bus_req = 0\n");
-       } else if (buf[0] == '1') {
-               iotg->hsm.b_bus_req = 1;
-               dev_dbg(lnw->dev, "User request: b_bus_req = 1\n");
-       }
-       if (spin_trylock(&lnw->wq_lock)) {
-               langwell_update_transceiver();
-               spin_unlock(&lnw->wq_lock);
-       }
-       return count;
-}
-static DEVICE_ATTR(b_bus_req, S_IRUGO | S_IWUSR, get_b_bus_req, set_b_bus_req);
-
-static ssize_t
-set_a_clr_err(struct device *dev, struct device_attribute *attr,
-               const char *buf, size_t count)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-
-       if (!iotg->otg.default_a)
-               return -1;
-       if (count > 2)
-               return -1;
-
-       if (buf[0] == '1') {
-               iotg->hsm.a_clr_err = 1;
-               dev_dbg(lnw->dev, "User request: a_clr_err = 1\n");
-       }
-       if (spin_trylock(&lnw->wq_lock)) {
-               langwell_update_transceiver();
-               spin_unlock(&lnw->wq_lock);
-       }
-       return count;
-}
-static DEVICE_ATTR(a_clr_err, S_IWUSR, NULL, set_a_clr_err);
-
-static struct attribute *inputs_attrs[] = {
-       &dev_attr_a_bus_req.attr,
-       &dev_attr_a_bus_drop.attr,
-       &dev_attr_b_bus_req.attr,
-       &dev_attr_a_clr_err.attr,
-       NULL,
-};
-
-static struct attribute_group debug_dev_attr_group = {
-       .name = "inputs",
-       .attrs = inputs_attrs,
-};
-
-static int langwell_otg_probe(struct pci_dev *pdev,
-               const struct pci_device_id *id)
-{
-       unsigned long           resource, len;
-       void __iomem            *base = NULL;
-       int                     retval;
-       u32                     val32;
-       struct langwell_otg     *lnw;
-       char                    qname[] = "langwell_otg_queue";
-
-       retval = 0;
-       dev_dbg(&pdev->dev, "\notg controller is detected.\n");
-       if (pci_enable_device(pdev) < 0) {
-               retval = -ENODEV;
-               goto done;
-       }
-
-       lnw = kzalloc(sizeof *lnw, GFP_KERNEL);
-       if (lnw == NULL) {
-               retval = -ENOMEM;
-               goto done;
-       }
-       the_transceiver = lnw;
-
-       /* control register: BAR 0 */
-       resource = pci_resource_start(pdev, 0);
-       len = pci_resource_len(pdev, 0);
-       if (!request_mem_region(resource, len, driver_name)) {
-               retval = -EBUSY;
-               goto err;
-       }
-       lnw->region = 1;
-
-       base = ioremap_nocache(resource, len);
-       if (base == NULL) {
-               retval = -EFAULT;
-               goto err;
-       }
-       lnw->iotg.base = base;
-
-       if (!request_mem_region(USBCFG_ADDR, USBCFG_LEN, driver_name)) {
-               retval = -EBUSY;
-               goto err;
-       }
-       lnw->cfg_region = 1;
-
-       /* For the SCCB.USBCFG register */
-       base = ioremap_nocache(USBCFG_ADDR, USBCFG_LEN);
-       if (base == NULL) {
-               retval = -EFAULT;
-               goto err;
-       }
-       lnw->usbcfg = base;
-
-       if (!pdev->irq) {
-               dev_dbg(&pdev->dev, "No IRQ.\n");
-               retval = -ENODEV;
-               goto err;
-       }
-
-       lnw->qwork = create_singlethread_workqueue(qname);
-       if (!lnw->qwork) {
-               dev_dbg(&pdev->dev, "cannot create workqueue %s\n", qname);
-               retval = -ENOMEM;
-               goto err;
-       }
-       INIT_WORK(&lnw->work, langwell_otg_work);
-
-       /* OTG common part */
-       lnw->dev = &pdev->dev;
-       lnw->iotg.otg.dev = lnw->dev;
-       lnw->iotg.otg.label = driver_name;
-       lnw->iotg.otg.set_host = langwell_otg_set_host;
-       lnw->iotg.otg.set_peripheral = langwell_otg_set_peripheral;
-       lnw->iotg.otg.set_power = langwell_otg_set_power;
-       lnw->iotg.otg.set_vbus = langwell_otg_set_vbus;
-       lnw->iotg.otg.start_srp = langwell_otg_start_srp;
-       lnw->iotg.otg.state = OTG_STATE_UNDEFINED;
-
-       if (otg_set_transceiver(&lnw->iotg.otg)) {
-               dev_dbg(lnw->dev, "can't set transceiver\n");
-               retval = -EBUSY;
-               goto err;
-       }
-
-       reset_otg();
-       init_hsm();
-
-       spin_lock_init(&lnw->lock);
-       spin_lock_init(&lnw->wq_lock);
-       INIT_LIST_HEAD(&active_timers);
-       retval = langwell_otg_init_timers(&lnw->iotg.hsm);
-       if (retval) {
-               dev_dbg(&pdev->dev, "Failed to init timers\n");
-               goto err;
-       }
-
-       init_timer(&lnw->hsm_timer);
-       ATOMIC_INIT_NOTIFIER_HEAD(&lnw->iotg.iotg_notifier);
-
-       lnw->iotg_notifier.notifier_call = langwell_otg_iotg_notify;
-
-       retval = intel_mid_otg_register_notifier(&lnw->iotg,
-                                               &lnw->iotg_notifier);
-       if (retval) {
-               dev_dbg(lnw->dev, "Failed to register notifier\n");
-               goto err;
-       }
-
-       if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
-                               driver_name, lnw) != 0) {
-               dev_dbg(lnw->dev, "request interrupt %d failed\n", pdev->irq);
-               retval = -EBUSY;
-               goto err;
-       }
-
-       /* enable OTGSC int */
-       val32 = OTGSC_DPIE | OTGSC_BSEIE | OTGSC_BSVIE |
-               OTGSC_ASVIE | OTGSC_AVVIE | OTGSC_IDIE | OTGSC_IDPU;
-       writel(val32, lnw->iotg.base + CI_OTGSC);
-
-       retval = device_create_file(&pdev->dev, &dev_attr_registers);
-       if (retval < 0) {
-               dev_dbg(lnw->dev,
-                       "Can't register sysfs attribute: %d\n", retval);
-               goto err;
-       }
-
-       retval = device_create_file(&pdev->dev, &dev_attr_hsm);
-       if (retval < 0) {
-               dev_dbg(lnw->dev, "Can't hsm sysfs attribute: %d\n", retval);
-               goto err;
-       }
-
-       retval = sysfs_create_group(&pdev->dev.kobj, &debug_dev_attr_group);
-       if (retval < 0) {
-               dev_dbg(lnw->dev,
-                       "Can't register sysfs attr group: %d\n", retval);
-               goto err;
-       }
-
-       if (lnw->iotg.otg.state == OTG_STATE_A_IDLE)
-               langwell_update_transceiver();
-
-       return 0;
-
-err:
-       if (the_transceiver)
-               langwell_otg_remove(pdev);
-done:
-       return retval;
-}
-
-static void langwell_otg_remove(struct pci_dev *pdev)
-{
-       struct langwell_otg *lnw = the_transceiver;
-
-       if (lnw->qwork) {
-               flush_workqueue(lnw->qwork);
-               destroy_workqueue(lnw->qwork);
-       }
-       intel_mid_otg_unregister_notifier(&lnw->iotg, &lnw->iotg_notifier);
-       langwell_otg_free_timers();
-
-       /* disable OTGSC interrupt as OTGSC doesn't change in reset */
-       writel(0, lnw->iotg.base + CI_OTGSC);
-
-       if (pdev->irq)
-               free_irq(pdev->irq, lnw);
-       if (lnw->usbcfg)
-               iounmap(lnw->usbcfg);
-       if (lnw->cfg_region)
-               release_mem_region(USBCFG_ADDR, USBCFG_LEN);
-       if (lnw->iotg.base)
-               iounmap(lnw->iotg.base);
-       if (lnw->region)
-               release_mem_region(pci_resource_start(pdev, 0),
-                               pci_resource_len(pdev, 0));
-
-       otg_set_transceiver(NULL);
-       pci_disable_device(pdev);
-       sysfs_remove_group(&pdev->dev.kobj, &debug_dev_attr_group);
-       device_remove_file(&pdev->dev, &dev_attr_hsm);
-       device_remove_file(&pdev->dev, &dev_attr_registers);
-       kfree(lnw);
-       lnw = NULL;
-}
-
-static void transceiver_suspend(struct pci_dev *pdev)
-{
-       pci_save_state(pdev);
-       pci_set_power_state(pdev, PCI_D3hot);
-       langwell_otg_phy_low_power(1);
-}
-
-static int langwell_otg_suspend(struct pci_dev *pdev, pm_message_t message)
-{
-       struct langwell_otg             *lnw = the_transceiver;
-       struct intel_mid_otg_xceiv      *iotg = &lnw->iotg;
-       int                             ret = 0;
-
-       /* Disbale OTG interrupts */
-       langwell_otg_intr(0);
-
-       if (pdev->irq)
-               free_irq(pdev->irq, lnw);
-
-       /* Prevent more otg_work */
-       flush_workqueue(lnw->qwork);
-       destroy_workqueue(lnw->qwork);
-       lnw->qwork = NULL;
-
-       /* start actions */
-       switch (iotg->otg.state) {
-       case OTG_STATE_A_WAIT_VFALL:
-               iotg->otg.state = OTG_STATE_A_IDLE;
-       case OTG_STATE_A_IDLE:
-       case OTG_STATE_B_IDLE:
-       case OTG_STATE_A_VBUS_ERR:
-               transceiver_suspend(pdev);
-               break;
-       case OTG_STATE_A_WAIT_VRISE:
-               langwell_otg_del_timer(a_wait_vrise_tmr);
-               iotg->hsm.a_srp_det = 0;
-
-               /* Turn off VBus */
-               iotg->otg.set_vbus(&iotg->otg, false);
-               iotg->otg.state = OTG_STATE_A_IDLE;
-               transceiver_suspend(pdev);
-               break;
-       case OTG_STATE_A_WAIT_BCON:
-               del_timer_sync(&lnw->hsm_timer);
-               if (lnw->iotg.stop_host)
-                       lnw->iotg.stop_host(&lnw->iotg);
-               else
-                       dev_dbg(&pdev->dev, "host driver has been removed.\n");
-
-               iotg->hsm.a_srp_det = 0;
-
-               /* Turn off VBus */
-               iotg->otg.set_vbus(&iotg->otg, false);
-               iotg->otg.state = OTG_STATE_A_IDLE;
-               transceiver_suspend(pdev);
-               break;
-       case OTG_STATE_A_HOST:
-               if (lnw->iotg.stop_host)
-                       lnw->iotg.stop_host(&lnw->iotg);
-               else
-                       dev_dbg(&pdev->dev, "host driver has been removed.\n");
-
-               iotg->hsm.a_srp_det = 0;
-
-               /* Turn off VBus */
-               iotg->otg.set_vbus(&iotg->otg, false);
-
-               iotg->otg.state = OTG_STATE_A_IDLE;
-               transceiver_suspend(pdev);
-               break;
-       case OTG_STATE_A_SUSPEND:
-               langwell_otg_del_timer(a_aidl_bdis_tmr);
-               langwell_otg_HABA(0);
-               if (lnw->iotg.stop_host)
-                       lnw->iotg.stop_host(&lnw->iotg);
-               else
-                       dev_dbg(lnw->dev, "host driver has been removed.\n");
-               iotg->hsm.a_srp_det = 0;
-
-               /* Turn off VBus */
-               iotg->otg.set_vbus(&iotg->otg, false);
-               iotg->otg.state = OTG_STATE_A_IDLE;
-               transceiver_suspend(pdev);
-               break;
-       case OTG_STATE_A_PERIPHERAL:
-               del_timer_sync(&lnw->hsm_timer);
-
-               if (lnw->iotg.stop_peripheral)
-                       lnw->iotg.stop_peripheral(&lnw->iotg);
-               else
-                       dev_dbg(&pdev->dev,
-                               "client driver has been removed.\n");
-               iotg->hsm.a_srp_det = 0;
-
-               /* Turn off VBus */
-               iotg->otg.set_vbus(&iotg->otg, false);
-               iotg->otg.state = OTG_STATE_A_IDLE;
-               transceiver_suspend(pdev);
-               break;
-       case OTG_STATE_B_HOST:
-               if (lnw->iotg.stop_host)
-                       lnw->iotg.stop_host(&lnw->iotg);
-               else
-                       dev_dbg(&pdev->dev, "host driver has been removed.\n");
-               iotg->hsm.b_bus_req = 0;
-               iotg->otg.state = OTG_STATE_B_IDLE;
-               transceiver_suspend(pdev);
-               break;
-       case OTG_STATE_B_PERIPHERAL:
-               if (lnw->iotg.stop_peripheral)
-                       lnw->iotg.stop_peripheral(&lnw->iotg);
-               else
-                       dev_dbg(&pdev->dev,
-                               "client driver has been removed.\n");
-               iotg->otg.state = OTG_STATE_B_IDLE;
-               transceiver_suspend(pdev);
-               break;
-       case OTG_STATE_B_WAIT_ACON:
-               /* delete hsm timer for b_ase0_brst_tmr */
-               del_timer_sync(&lnw->hsm_timer);
-
-               langwell_otg_HAAR(0);
-
-               if (lnw->iotg.stop_host)
-                       lnw->iotg.stop_host(&lnw->iotg);
-               else
-                       dev_dbg(&pdev->dev, "host driver has been removed.\n");
-               iotg->hsm.b_bus_req = 0;
-               iotg->otg.state = OTG_STATE_B_IDLE;
-               transceiver_suspend(pdev);
-               break;
-       default:
-               dev_dbg(lnw->dev, "error state before suspend\n");
-               break;
-       }
-
-       return ret;
-}
-
-static void transceiver_resume(struct pci_dev *pdev)
-{
-       pci_restore_state(pdev);
-       pci_set_power_state(pdev, PCI_D0);
-}
-
-static int langwell_otg_resume(struct pci_dev *pdev)
-{
-       struct langwell_otg     *lnw = the_transceiver;
-       int                     ret = 0;
-
-       transceiver_resume(pdev);
-
-       lnw->qwork = create_singlethread_workqueue("langwell_otg_queue");
-       if (!lnw->qwork) {
-               dev_dbg(&pdev->dev, "cannot create langwell otg workqueuen");
-               ret = -ENOMEM;
-               goto error;
-       }
-
-       if (request_irq(pdev->irq, otg_irq, IRQF_SHARED,
-                               driver_name, lnw) != 0) {
-               dev_dbg(&pdev->dev, "request interrupt %d failed\n", pdev->irq);
-               ret = -EBUSY;
-               goto error;
-       }
-
-       /* enable OTG interrupts */
-       langwell_otg_intr(1);
-
-       update_hsm();
-
-       langwell_update_transceiver();
-
-       return ret;
-error:
-       langwell_otg_intr(0);
-       transceiver_suspend(pdev);
-       return ret;
-}
-
-static int __init langwell_otg_init(void)
-{
-       return pci_register_driver(&otg_pci_driver);
-}
-module_init(langwell_otg_init);
-
-static void __exit langwell_otg_cleanup(void)
-{
-       pci_unregister_driver(&otg_pci_driver);
-}
-module_exit(langwell_otg_cleanup);
index db0d4fcdc8e21902705343b1497743d5cc157845..b5fbe1452ab00e83b2a25dff027548a31b56ff29 100644 (file)
@@ -202,6 +202,7 @@ static void mv_otg_init_irq(struct mv_otg *mvotg)
 
 static void mv_otg_start_host(struct mv_otg *mvotg, int on)
 {
+#ifdef CONFIG_USB
        struct otg_transceiver *otg = &mvotg->otg;
        struct usb_hcd *hcd;
 
@@ -216,6 +217,7 @@ static void mv_otg_start_host(struct mv_otg *mvotg, int on)
                usb_add_hcd(hcd, hcd->irq, IRQF_SHARED);
        else
                usb_remove_hcd(hcd);
+#endif /* CONFIG_USB */
 }
 
 static void mv_otg_start_periphrals(struct mv_otg *mvotg, int on)
index b51fcd80d244b57ca75a540276ff45b1c1c3c8c0..72339bd6fcab862565ee222aa4ead450f1d2945d 100644 (file)
@@ -772,10 +772,10 @@ static void usbhsf_dma_prepare_tasklet(unsigned long data)
        struct dma_async_tx_descriptor *desc;
        struct dma_chan *chan = usbhsf_dma_chan_get(fifo, pkt);
        struct device *dev = usbhs_priv_to_dev(priv);
-       enum dma_data_direction dir;
+       enum dma_transfer_direction dir;
        dma_cookie_t cookie;
 
-       dir = usbhs_pipe_is_dir_in(pipe) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+       dir = usbhs_pipe_is_dir_in(pipe) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
 
        sg_init_table(&sg, 1);
        sg_set_page(&sg, virt_to_page(pkt->dma),
index 528691d5f3e261119282145e775536a50ea2dc9e..7542aa94a4622421219237598c8db20ff1f46a87 100644 (file)
@@ -425,7 +425,7 @@ static int usbhsg_recip_run_handle(struct usbhs_priv *priv,
        struct usbhs_pipe *pipe;
        int recip = ctrl->bRequestType & USB_RECIP_MASK;
        int nth = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK;
-       int ret;
+       int ret = 0;
        int (*func)(struct usbhs_priv *priv, struct usbhsg_uep *uep,
                    struct usb_ctrlrequest *ctrl);
        char *msg;
index fba1147ed9169c1711027a693fcb8df5788aa031..8dbf51a43c45d2fcaf61cf5f6d26d1b65a4e96cf 100644 (file)
@@ -39,6 +39,8 @@ static void cp210x_get_termios(struct tty_struct *,
        struct usb_serial_port *port);
 static void cp210x_get_termios_port(struct usb_serial_port *port,
        unsigned int *cflagp, unsigned int *baudp);
+static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *,
+                                                       struct ktermios *);
 static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *,
                                                        struct ktermios*);
 static int cp210x_tiocmget(struct tty_struct *);
@@ -138,6 +140,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
+       { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */
        { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */
        { } /* Terminating Entry */
 };
@@ -201,6 +204,8 @@ static struct usb_serial_driver cp210x_device = {
 #define CP210X_EMBED_EVENTS    0x15
 #define CP210X_GET_EVENTSTATE  0x16
 #define CP210X_SET_CHARS       0x19
+#define CP210X_GET_BAUDRATE    0x1D
+#define CP210X_SET_BAUDRATE    0x1E
 
 /* CP210X_IFC_ENABLE */
 #define UART_ENABLE            0x0001
@@ -360,8 +365,8 @@ static inline int cp210x_set_config_single(struct usb_serial_port *port,
  * Quantises the baud rate as per AN205 Table 1
  */
 static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
-       if      (baud <= 56)       baud = 0;
-       else if (baud <= 300)      baud = 300;
+       if (baud <= 300)
+               baud = 300;
        else if (baud <= 600)      baud = 600;
        else if (baud <= 1200)     baud = 1200;
        else if (baud <= 1800)     baud = 1800;
@@ -389,10 +394,10 @@ static unsigned int cp210x_quantise_baudrate(unsigned int baud) {
        else if (baud <= 491520)   baud = 460800;
        else if (baud <= 567138)   baud = 500000;
        else if (baud <= 670254)   baud = 576000;
-       else if (baud <= 1053257)  baud = 921600;
-       else if (baud <= 1474560)  baud = 1228800;
-       else if (baud <= 2457600)  baud = 1843200;
-       else                       baud = 3686400;
+       else if (baud < 1000000)
+               baud = 921600;
+       else if (baud > 2000000)
+               baud = 2000000;
        return baud;
 }
 
@@ -409,13 +414,14 @@ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port)
                return result;
        }
 
-       result = usb_serial_generic_open(tty, port);
-       if (result)
-               return result;
-
        /* Configure the termios structure */
        cp210x_get_termios(tty, port);
-       return 0;
+
+       /* The baud rate must be initialised on cp2104 */
+       if (tty)
+               cp210x_change_speed(tty, port, NULL);
+
+       return usb_serial_generic_open(tty, port);
 }
 
 static void cp210x_close(struct usb_serial_port *port)
@@ -467,10 +473,7 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
 
        dbg("%s - port %d", __func__, port->number);
 
-       cp210x_get_config(port, CP210X_GET_BAUDDIV, &baud, 2);
-       /* Convert to baudrate */
-       if (baud)
-               baud = cp210x_quantise_baudrate((BAUD_RATE_GEN_FREQ + baud/2)/ baud);
+       cp210x_get_config(port, CP210X_GET_BAUDRATE, &baud, 4);
 
        dbg("%s - baud rate = %d", __func__, baud);
        *baudp = baud;
@@ -579,11 +582,64 @@ static void cp210x_get_termios_port(struct usb_serial_port *port,
        *cflagp = cflag;
 }
 
+/*
+ * CP2101 supports the following baud rates:
+ *
+ *     300, 600, 1200, 1800, 2400, 4800, 7200, 9600, 14400, 19200, 28800,
+ *     38400, 56000, 57600, 115200, 128000, 230400, 460800, 921600
+ *
+ * CP2102 and CP2103 support the following additional rates:
+ *
+ *     4000, 16000, 51200, 64000, 76800, 153600, 250000, 256000, 500000,
+ *     576000
+ *
+ * The device will map a requested rate to a supported one, but the result
+ * of requests for rates greater than 1053257 is undefined (see AN205).
+ *
+ * CP2104, CP2105 and CP2110 support most rates up to 2M, 921k and 1M baud,
+ * respectively, with an error less than 1%. The actual rates are determined
+ * by
+ *
+ *     div = round(freq / (2 x prescale x request))
+ *     actual = freq / (2 x prescale x div)
+ *
+ * For CP2104 and CP2105 freq is 48Mhz and prescale is 4 for request <= 365bps
+ * or 1 otherwise.
+ * For CP2110 freq is 24Mhz and prescale is 4 for request <= 300bps or 1
+ * otherwise.
+ */
+static void cp210x_change_speed(struct tty_struct *tty,
+               struct usb_serial_port *port, struct ktermios *old_termios)
+{
+       u32 baud;
+
+       baud = tty->termios->c_ospeed;
+
+       /* This maps the requested rate to a rate valid on cp2102 or cp2103,
+        * or to an arbitrary rate in [1M,2M].
+        *
+        * NOTE: B0 is not implemented.
+        */
+       baud = cp210x_quantise_baudrate(baud);
+
+       dbg("%s - setting baud rate to %u", __func__, baud);
+       if (cp210x_set_config(port, CP210X_SET_BAUDRATE, &baud,
+                                                       sizeof(baud))) {
+               dev_warn(&port->dev, "failed to set baud rate to %u\n", baud);
+               if (old_termios)
+                       baud = old_termios->c_ospeed;
+               else
+                       baud = 9600;
+       }
+
+       tty_encode_baud_rate(tty, baud, baud);
+}
+
 static void cp210x_set_termios(struct tty_struct *tty,
                struct usb_serial_port *port, struct ktermios *old_termios)
 {
        unsigned int cflag, old_cflag;
-       unsigned int baud = 0, bits;
+       unsigned int bits;
        unsigned int modem_ctl[4];
 
        dbg("%s - port %d", __func__, port->number);
@@ -593,20 +649,9 @@ static void cp210x_set_termios(struct tty_struct *tty,
 
        cflag = tty->termios->c_cflag;
        old_cflag = old_termios->c_cflag;
-       baud = cp210x_quantise_baudrate(tty_get_baud_rate(tty));
-
-       /* If the baud rate is to be updated*/
-       if (baud != tty_termios_baud_rate(old_termios) && baud != 0) {
-               dbg("%s - Setting baud rate to %d baud", __func__,
-                               baud);
-               if (cp210x_set_config_single(port, CP210X_SET_BAUDDIV,
-                                       ((BAUD_RATE_GEN_FREQ + baud/2) / baud))) {
-                       dbg("Baud rate requested not supported by device");
-                       baud = tty_termios_baud_rate(old_termios);
-               }
-       }
-       /* Report back the resulting baud rate */
-       tty_encode_baud_rate(tty, baud, baud);
+
+       if (tty->termios->c_ospeed != old_termios->c_ospeed)
+               cp210x_change_speed(tty, port, old_termios);
 
        /* If the number of data bits is to be updated */
        if ((cflag & CSIZE) != (old_cflag & CSIZE)) {
index 01b6404df395bd3c2a042c2f8c92b15b0905f6bd..ad654f8208ef7596f57997ec606faa9f7e7e5892 100644 (file)
@@ -797,6 +797,7 @@ static struct usb_device_id id_table_combined [] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(ADI_VID, ADI_GNICEPLUS_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(HORNBY_VID, HORNBY_ELITE_PID) },
        { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
        { USB_DEVICE(MARVELL_VID, MARVELL_SHEEVAPLUG_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
@@ -805,6 +806,8 @@ static struct usb_device_id id_table_combined [] = {
        { USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
        { USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
+       { USB_DEVICE(FTDI_VID, TI_XDS100V2_PID),
+               .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(FTDI_VID, HAMEG_HO820_PID) },
        { USB_DEVICE(FTDI_VID, HAMEG_HO720_PID) },
        { USB_DEVICE(FTDI_VID, HAMEG_HO730_PID) },
@@ -841,6 +844,7 @@ static struct usb_device_id id_table_combined [] = {
                .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
        { USB_DEVICE(ST_VID, ST_STMCLT1030_PID),
                .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk },
+       { USB_DEVICE(FTDI_VID, FTDI_RF_R106) },
        { },                                    /* Optional parameter entry */
        { }                                     /* Terminating entry */
 };
@@ -1333,8 +1337,7 @@ static int set_serial_info(struct tty_struct *tty,
                goto check_and_exit;
        }
 
-       if ((new_serial.baud_base != priv->baud_base) &&
-           (new_serial.baud_base < 9600)) {
+       if (new_serial.baud_base != priv->baud_base) {
                mutex_unlock(&priv->cfg_lock);
                return -EINVAL;
        }
@@ -1824,6 +1827,7 @@ static int ftdi_sio_port_remove(struct usb_serial_port *port)
 
 static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
 {
+       struct ktermios dummy;
        struct usb_device *dev = port->serial->dev;
        struct ftdi_private *priv = usb_get_serial_port_data(port);
        int result;
@@ -1842,8 +1846,10 @@ static int ftdi_open(struct tty_struct *tty, struct usb_serial_port *port)
           This is same behaviour as serial.c/rs_open() - Kuba */
 
        /* ftdi_set_termios  will send usb control messages */
-       if (tty)
-               ftdi_set_termios(tty, port, tty->termios);
+       if (tty) {
+               memset(&dummy, 0, sizeof(dummy));
+               ftdi_set_termios(tty, port, &dummy);
+       }
 
        /* Start reading from the device */
        result = usb_serial_generic_open(tty, port);
index df1d7da933ec6f45281f116f57128a0707b4c4d9..f994503df2dd91664146f4dd82e19cba41c87841 100644 (file)
 /* www.candapter.com Ewert Energy Systems CANdapter device */
 #define FTDI_CANDAPTER_PID 0x9F80 /* Product Id */
 
+/*
+ * Texas Instruments XDS100v2 JTAG / BeagleBone A3
+ * http://processors.wiki.ti.com/index.php/XDS100
+ * http://beagleboard.org/bone
+ */
+#define TI_XDS100V2_PID                0xa6d0
+
 #define FTDI_NXTCAM_PID                0xABB8 /* NXTCam for Mindstorms NXT */
 
 /* US Interface Navigator (http://www.usinterface.com/) */
 #define ADI_GNICE_PID          0xF000
 #define ADI_GNICEPLUS_PID      0xF001
 
+/*
+ * Hornby Elite
+ */
+#define HORNBY_VID             0x04D8
+#define HORNBY_ELITE_PID       0x000A
+
 /*
  * RATOC REX-USB60F
  */
  */
 /* TagTracer MIFARE*/
 #define FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID   0xF7C0
+
+/*
+ * Rainforest Automation
+ */
+/* ZigBee controller */
+#define FTDI_RF_R106           0x8A28
index 65bf06aa591a091616b084943388194209b913a6..5818bfc3261ebb2418a54406e768abbe76245ed9 100644 (file)
@@ -2657,15 +2657,7 @@ cleanup:
 
 static void edge_disconnect(struct usb_serial *serial)
 {
-       int i;
-       struct edgeport_port *edge_port;
-
        dbg("%s", __func__);
-
-       for (i = 0; i < serial->num_ports; ++i) {
-               edge_port = usb_get_serial_port_data(serial->port[i]);
-               edge_remove_sysfs_attrs(edge_port->port);
-       }
 }
 
 static void edge_release(struct usb_serial *serial)
@@ -2744,6 +2736,7 @@ static struct usb_serial_driver edgeport_1port_device = {
        .disconnect             = edge_disconnect,
        .release                = edge_release,
        .port_probe             = edge_create_sysfs_attrs,
+       .port_remove            = edge_remove_sysfs_attrs,
        .ioctl                  = edge_ioctl,
        .set_termios            = edge_set_termios,
        .tiocmget               = edge_tiocmget,
@@ -2775,6 +2768,7 @@ static struct usb_serial_driver edgeport_2port_device = {
        .disconnect             = edge_disconnect,
        .release                = edge_release,
        .port_probe             = edge_create_sysfs_attrs,
+       .port_remove            = edge_remove_sysfs_attrs,
        .ioctl                  = edge_ioctl,
        .set_termios            = edge_set_termios,
        .tiocmget               = edge_tiocmget,
index 5d3beeeb5fd9b527359190b3e0076b6b85fb2d43..a92a3efb507bdaa45f7134919b30c4bf8b495917 100644 (file)
@@ -38,7 +38,7 @@
 #include <linux/ioctl.h>
 #include "kobil_sct.h"
 
-static int debug;
+static bool debug;
 
 /* Version Information */
 #define DRIVER_VERSION "21/05/2004"
index 420d9857394a090a6bf1aae5d9ef86541585dd4d..ea126a4490cdea656c3ce94aa35294f4caa858c9 100644 (file)
@@ -480,6 +480,10 @@ static void option_instat_callback(struct urb *urb);
 #define ZD_VENDOR_ID                           0x0685
 #define ZD_PRODUCT_7000                                0x7000
 
+/* LG products */
+#define LG_VENDOR_ID                           0x1004
+#define LG_PRODUCT_L02C                                0x618f
+
 /* some devices interfaces need special handling due to a number of reasons */
 enum option_blacklist_reason {
                OPTION_BLACKLIST_NONE = 0,
@@ -1183,6 +1187,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
        { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
+       { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
        { } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
index 30b73e68a904d65e7dadb3fdeee8acd73f65607c..a34819884c1ad6b82e732f069e7392f0272d766f 100644 (file)
@@ -36,6 +36,7 @@
 #define UTSTARCOM_PRODUCT_UM175_V1             0x3712
 #define UTSTARCOM_PRODUCT_UM175_V2             0x3714
 #define UTSTARCOM_PRODUCT_UM175_ALLTEL         0x3715
+#define PANTECH_PRODUCT_UML190_VZW             0x3716
 #define PANTECH_PRODUCT_UML290_VZW             0x3718
 
 /* CMOTECH devices */
@@ -67,7 +68,11 @@ static struct usb_device_id id_table[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(LG_VENDOR_ID, LG_PRODUCT_VX4400_6000, 0xff, 0xff, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(SANYO_VENDOR_ID, SANYO_PRODUCT_KATANA_LX, 0xff, 0xff, 0x00) },
        { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_U520, 0xff, 0x00, 0x00) },
-       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xff, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML190_VZW, 0xff, 0xfe, 0xff) },
+       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfd, 0xff) },  /* NMEA */
+       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xfe, 0xff) },  /* WMC */
+       { USB_DEVICE_AND_INTERFACE_INFO(UTSTARCOM_VENDOR_ID, PANTECH_PRODUCT_UML290_VZW, 0xff, 0xff, 0xff) },  /* DIAG */
        { },
 };
 MODULE_DEVICE_TABLE(usb, id_table);
index 1f62723ef1a86013a8f1dbd5d96356559ea0ceda..d32f72061c099ddae5aca09758bed77feddf7319 100644 (file)
@@ -789,7 +789,7 @@ static void rts51x_suspend_timer_fn(unsigned long data)
                        rts51x_set_stat(chip, RTS51X_STAT_SS);
                        /* ignore mass storage interface's children */
                        pm_suspend_ignore_children(&us->pusb_intf->dev, true);
-                       usb_autopm_put_interface(us->pusb_intf);
+                       usb_autopm_put_interface_async(us->pusb_intf);
                        US_DEBUGP("%s: RTS51X_STAT_SS 01,"
                                "intf->pm_usage_cnt:%d, power.usage:%d\n",
                                __func__,
index 8efeae24764f97dedfe016f7006520fa61b8a09f..b4a71679c933def94e684c0d68c3d6a0b1c315c0 100644 (file)
@@ -27,8 +27,6 @@
 #define USB_SKEL_VENDOR_ID     0xfff0
 #define USB_SKEL_PRODUCT_ID    0xfff0
 
-static DEFINE_MUTEX(skel_mutex);
-
 /* table of devices that work with this driver */
 static const struct usb_device_id skel_table[] = {
        { USB_DEVICE(USB_SKEL_VENDOR_ID, USB_SKEL_PRODUCT_ID) },
@@ -101,25 +99,18 @@ static int skel_open(struct inode *inode, struct file *file)
                goto exit;
        }
 
-       mutex_lock(&skel_mutex);
        dev = usb_get_intfdata(interface);
        if (!dev) {
-               mutex_unlock(&skel_mutex);
                retval = -ENODEV;
                goto exit;
        }
 
        /* increment our usage count for the device */
        kref_get(&dev->kref);
-       mutex_unlock(&skel_mutex);
 
        /* lock the device to allow correctly handling errors
         * in resumption */
        mutex_lock(&dev->io_mutex);
-       if (!dev->interface) {
-               retval = -ENODEV;
-               goto out_err;
-       }
 
        retval = usb_autopm_get_interface(interface);
        if (retval)
@@ -127,11 +118,7 @@ static int skel_open(struct inode *inode, struct file *file)
 
        /* save our object in the file's private structure */
        file->private_data = dev;
-
-out_err:
        mutex_unlock(&dev->io_mutex);
-       if (retval)
-               kref_put(&dev->kref, skel_delete);
 
 exit:
        return retval;
@@ -611,6 +598,7 @@ static void skel_disconnect(struct usb_interface *interface)
        int minor = interface->minor;
 
        dev = usb_get_intfdata(interface);
+       usb_set_intfdata(interface, NULL);
 
        /* give back our minor */
        usb_deregister_dev(interface, &skel_class);
@@ -622,12 +610,8 @@ static void skel_disconnect(struct usb_interface *interface)
 
        usb_kill_anchored_urbs(&dev->submitted);
 
-       mutex_lock(&skel_mutex);
-       usb_set_intfdata(interface, NULL);
-
        /* decrement our usage count */
        kref_put(&dev->kref, skel_delete);
-       mutex_unlock(&skel_mutex);
 
        dev_info(&interface->dev, "USB Skeleton #%d now disconnected", minor);
 }
index 0ead8826ec7947f8abf2721b8609094987055cef..f29fdd7f6d750b6849c8d0d57d8ca5c948efe2e6 100644 (file)
@@ -6,7 +6,7 @@ config USB_WUSB
        depends on EXPERIMENTAL
        depends on USB
        depends on PCI
-        select UWB
+       depends on UWB
         select CRYPTO
         select CRYPTO_BLKCIPHER
         select CRYPTO_CBC
index 882a51fe7b3c8cd000dddc5f6b2214536f7b7636..9dab1f51dd43b3bac6e275eac77f995fb245d48d 100644 (file)
@@ -856,9 +856,9 @@ static const struct file_operations vhost_net_fops = {
 };
 
 static struct miscdevice vhost_net_misc = {
-       MISC_DYNAMIC_MINOR,
-       "vhost-net",
-       &vhost_net_fops,
+       .minor = VHOST_NET_MINOR,
+       .name = "vhost-net",
+       .fops = &vhost_net_fops,
 };
 
 static int vhost_net_init(void)
@@ -879,3 +879,5 @@ MODULE_VERSION("0.0.1");
 MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Michael S. Tsirkin");
 MODULE_DESCRIPTION("Host kernel accelerator for virtio net");
+MODULE_ALIAS_MISCDEV(VHOST_NET_MINOR);
+MODULE_ALIAS("devname:vhost-net");
index 0d7b20d4285d3cb47fc1894c237321756512942a..e40c00f2c2ba8ecf7dfa6a1f35655ee3fa419ffc 100644 (file)
@@ -1108,7 +1108,7 @@ static int atmel_lcdfb_suspend(struct platform_device *pdev, pm_message_t mesg)
         */
        lcdc_writel(sinfo, ATMEL_LCDC_IDR, ~0UL);
 
-       sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
+       sinfo->saved_lcdcon = lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_CTR);
        lcdc_writel(sinfo, ATMEL_LCDC_CONTRAST_CTR, 0);
        if (sinfo->atmel_lcdfb_power_control)
                sinfo->atmel_lcdfb_power_control(0);
index 66bc74d9ce2af30889d026835d00442923cbc822..378276c9d3cfdaa2926740da3aa6faba5213a3ae 100644 (file)
@@ -146,7 +146,7 @@ static int adp8860_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask
 
        ret = adp8860_read(client, reg, &reg_val);
 
-       if (!ret && ((reg_val & bit_mask) == 0)) {
+       if (!ret && ((reg_val & bit_mask) != bit_mask)) {
                reg_val |= bit_mask;
                ret = adp8860_write(client, reg, reg_val);
        }
index 6c68a6899e8769c0f131c91860e3467240bcb87c..6735059376d63840d95067ee4564500cd70fe390 100644 (file)
@@ -160,7 +160,7 @@ static int adp8870_set_bits(struct i2c_client *client, int reg, uint8_t bit_mask
 
        ret = adp8870_read(client, reg, &reg_val);
 
-       if (!ret && ((reg_val & bit_mask) == 0)) {
+       if (!ret && ((reg_val & bit_mask) != bit_mask)) {
                reg_val |= bit_mask;
                ret = adp8870_write(client, reg, reg_val);
        }
index 4f5d1c4cb6aba0a53c7bb75c2605bf9c000241f3..27d1d7a29c77ef501c29a043b309ce7f4bb290db 100644 (file)
@@ -190,6 +190,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
 
        priv->io_reg = regulator_get(&spi->dev, "vdd");
        if (IS_ERR(priv->io_reg)) {
+               ret = PTR_ERR(priv->io_reg);
                dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
                       __func__);
                goto err3;
@@ -197,6 +198,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
 
        priv->core_reg = regulator_get(&spi->dev, "vcore");
        if (IS_ERR(priv->core_reg)) {
+               ret = PTR_ERR(priv->core_reg);
                dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
                       __func__);
                goto err4;
index acf292bfba021a93206de3b028e147138b5ced99..6af3f16754f0e2bd8e5062312164f05a80219a78 100644 (file)
@@ -1432,7 +1432,7 @@ static int fsl_diu_suspend(struct platform_device *ofdev, pm_message_t state)
        struct fsl_diu_data *data;
 
        data = dev_get_drvdata(&ofdev->dev);
-       disable_lcdc(data->fsl_diu_info[0]);
+       disable_lcdc(data->fsl_diu_info);
 
        return 0;
 }
@@ -1442,7 +1442,7 @@ static int fsl_diu_resume(struct platform_device *ofdev)
        struct fsl_diu_data *data;
 
        data = dev_get_drvdata(&ofdev->dev);
-       enable_lcdc(data->fsl_diu_info[0]);
+       enable_lcdc(data->fsl_diu_info);
 
        return 0;
 }
index c6afa33a45322b4f70c57e2672535ab8cabef715..02fd2263610c1c63d1cdb8cd8173373a3ce88200 100644 (file)
@@ -529,7 +529,6 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
        if (fb_alloc_cmap(&info->cmap, 256, 1) < 0) {
                ERR_MSG("Could not allocate cmap for intelfb_info.\n");
                goto err_out_cmap;
-               return -ENODEV;
        }
 
        dinfo = info->par;
index 43207cc6cc1953099b2804717e173e47ad7e27f1..fe01add3700e05769dc035cde599040b43460328 100644 (file)
@@ -592,12 +592,12 @@ static int __init macfb_init(void)
        if (!fb_info.screen_base)
                return -ENODEV;
 
-       printk("macfb: framebuffer at 0x%08lx, mapped to 0x%p, size %dk\n",
-              macfb_fix.smem_start, fb_info.screen_base,
-              macfb_fix.smem_len / 1024);
-       printk("macfb: mode is %dx%dx%d, linelength=%d\n",
-              macfb_defined.xres, macfb_defined.yres,
-              macfb_defined.bits_per_pixel, macfb_fix.line_length);
+       pr_info("macfb: framebuffer at 0x%08lx, mapped to 0x%p, size %dk\n",
+               macfb_fix.smem_start, fb_info.screen_base,
+               macfb_fix.smem_len / 1024);
+       pr_info("macfb: mode is %dx%dx%d, linelength=%d\n",
+               macfb_defined.xres, macfb_defined.yres,
+               macfb_defined.bits_per_pixel, macfb_fix.line_length);
 
        /* Fill in the available video resolution */
        macfb_defined.xres_virtual = macfb_defined.xres;
@@ -613,14 +613,10 @@ static int __init macfb_init(void)
 
        switch (macfb_defined.bits_per_pixel) {
        case 1:
-               /*
-                * XXX: I think this will catch any program that tries
-                * to do FBIO_PUTCMAP when the visual is monochrome.
-                */
                macfb_defined.red.length = macfb_defined.bits_per_pixel;
                macfb_defined.green.length = macfb_defined.bits_per_pixel;
                macfb_defined.blue.length = macfb_defined.bits_per_pixel;
-               video_cmap_len = 0;
+               video_cmap_len = 2;
                macfb_fix.visual = FB_VISUAL_MONO01;
                break;
        case 2:
@@ -660,11 +656,10 @@ static int __init macfb_init(void)
                macfb_fix.visual = FB_VISUAL_TRUECOLOR;
                break;
        default:
-               video_cmap_len = 0;
-               macfb_fix.visual = FB_VISUAL_MONO01;
-               printk("macfb: unknown or unsupported bit depth: %d\n",
+               pr_err("macfb: unknown or unsupported bit depth: %d\n",
                       macfb_defined.bits_per_pixel);
-               break;
+               err = -EINVAL;
+               goto fail_unmap;
        }
        
        /*
@@ -734,8 +729,8 @@ static int __init macfb_init(void)
                case MAC_MODEL_Q950:
                        strcpy(macfb_fix.id, "DAFB");
                        macfb_setpalette = dafb_setpalette;
-                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        dafb_cmap_regs = ioremap(DAFB_BASE, 0x1000);
+                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        break;
 
                /*
@@ -744,8 +739,8 @@ static int __init macfb_init(void)
                case MAC_MODEL_LCII:
                        strcpy(macfb_fix.id, "V8");
                        macfb_setpalette = v8_brazil_setpalette;
-                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
+                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        break;
 
                /*
@@ -758,8 +753,8 @@ static int __init macfb_init(void)
                case MAC_MODEL_P600:
                        strcpy(macfb_fix.id, "Brazil");
                        macfb_setpalette = v8_brazil_setpalette;
-                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
+                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        break;
 
                /*
@@ -773,10 +768,10 @@ static int __init macfb_init(void)
                case MAC_MODEL_P520:
                case MAC_MODEL_P550:
                case MAC_MODEL_P460:
-                       macfb_setpalette = v8_brazil_setpalette;
-                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        strcpy(macfb_fix.id, "Sonora");
+                       macfb_setpalette = v8_brazil_setpalette;
                        v8_brazil_cmap_regs = ioremap(DAC_BASE, 0x1000);
+                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        break;
 
                /*
@@ -786,10 +781,10 @@ static int __init macfb_init(void)
                 */
                case MAC_MODEL_IICI:
                case MAC_MODEL_IISI:
-                       macfb_setpalette = rbv_setpalette;
-                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        strcpy(macfb_fix.id, "RBV");
+                       macfb_setpalette = rbv_setpalette;
                        rbv_cmap_regs = ioremap(DAC_BASE, 0x1000);
+                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        break;
 
                /*
@@ -797,10 +792,10 @@ static int __init macfb_init(void)
                 */
                case MAC_MODEL_Q840:
                case MAC_MODEL_C660:
-                       macfb_setpalette = civic_setpalette;
-                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        strcpy(macfb_fix.id, "Civic");
+                       macfb_setpalette = civic_setpalette;
                        civic_cmap_regs = ioremap(CIVIC_BASE, 0x1000);
+                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        break;
 
                
@@ -809,26 +804,26 @@ static int __init macfb_init(void)
                 * We think this may be like the LC II
                 */
                case MAC_MODEL_LC:
+                       strcpy(macfb_fix.id, "LC");
                        if (vidtest) {
                                macfb_setpalette = v8_brazil_setpalette;
-                               macfb_defined.activate = FB_ACTIVATE_NOW;
                                v8_brazil_cmap_regs =
                                        ioremap(DAC_BASE, 0x1000);
+                               macfb_defined.activate = FB_ACTIVATE_NOW;
                        }
-                       strcpy(macfb_fix.id, "LC");
                        break;
 
                /*
                 * We think this may be like the LC II
                 */
                case MAC_MODEL_CCL:
+                       strcpy(macfb_fix.id, "Color Classic");
                        if (vidtest) {
                                macfb_setpalette = v8_brazil_setpalette;
-                               macfb_defined.activate = FB_ACTIVATE_NOW;
                                v8_brazil_cmap_regs =
                                        ioremap(DAC_BASE, 0x1000);
+                               macfb_defined.activate = FB_ACTIVATE_NOW;
                        }
-                       strcpy(macfb_fix.id, "Color Classic");
                        break;
 
                /*
@@ -893,10 +888,10 @@ static int __init macfb_init(void)
                case MAC_MODEL_PB270C:
                case MAC_MODEL_PB280:
                case MAC_MODEL_PB280C:
-                       macfb_setpalette = csc_setpalette;
-                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        strcpy(macfb_fix.id, "CSC");
+                       macfb_setpalette = csc_setpalette;
                        csc_cmap_regs = ioremap(CSC_BASE, 0x1000);
+                       macfb_defined.activate = FB_ACTIVATE_NOW;
                        break;
 
                default:
@@ -918,8 +913,9 @@ static int __init macfb_init(void)
        if (err)
                goto fail_dealloc;
 
-       printk("fb%d: %s frame buffer device\n",
-              fb_info.node, fb_info.fix.id);
+       pr_info("fb%d: %s frame buffer device\n",
+               fb_info.node, fb_info.fix.id);
+
        return 0;
 
 fail_dealloc:
index e3406ab313059fcde53187f7bd9ea3f6352aff11..727a5149d81806be71c7b902be7d4a20b3309a2e 100644 (file)
@@ -245,6 +245,7 @@ struct mx3fb_data {
 
        uint32_t                h_start_width;
        uint32_t                v_start_width;
+       enum disp_data_mapping  disp_data_fmt;
 };
 
 struct dma_chan_request {
@@ -287,11 +288,14 @@ static void mx3fb_write_reg(struct mx3fb_data *mx3fb, u32 value, unsigned long r
        __raw_writel(value, mx3fb->reg_base + reg);
 }
 
-static const uint32_t di_mappings[] = {
-       0x1600AAAA, 0x00E05555, 0x00070000, 3,  /* RGB888 */
-       0x0005000F, 0x000B000F, 0x0011000F, 1,  /* RGB666 */
-       0x0011000F, 0x000B000F, 0x0005000F, 1,  /* BGR666 */
-       0x0004003F, 0x000A000F, 0x000F003F, 1   /* RGB565 */
+struct di_mapping {
+       uint32_t b0, b1, b2;
+};
+
+static const struct di_mapping di_mappings[] = {
+       [IPU_DISP_DATA_MAPPING_RGB666] = { 0x0005000f, 0x000b000f, 0x0011000f },
+       [IPU_DISP_DATA_MAPPING_RGB565] = { 0x0004003f, 0x000a000f, 0x000f003f },
+       [IPU_DISP_DATA_MAPPING_RGB888] = { 0x00070000, 0x000f0000, 0x00170000 },
 };
 
 static void sdc_fb_init(struct mx3fb_info *fbi)
@@ -334,7 +338,7 @@ static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
        /* This enables the channel */
        if (mx3_fbi->cookie < 0) {
                mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
-                     &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
+                     &mx3_fbi->sg[0], 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
                if (!mx3_fbi->txd) {
                        dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
                                dma_chan->chan_id);
@@ -425,7 +429,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
  * @pixel_clk:         desired pixel clock frequency in Hz.
  * @width:             width of panel in pixels.
  * @height:            height of panel in pixels.
- * @pixel_fmt:         pixel format of buffer as FOURCC ASCII code.
  * @h_start_width:     number of pixel clocks between the HSYNC signal pulse
  *                     and the start of valid data.
  * @h_sync_width:      width of the HSYNC signal in units of pixel clocks.
@@ -442,7 +445,6 @@ static int sdc_set_window_pos(struct mx3fb_data *mx3fb, enum ipu_channel channel
 static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
                          uint32_t pixel_clk,
                          uint16_t width, uint16_t height,
-                         enum pixel_fmt pixel_fmt,
                          uint16_t h_start_width, uint16_t h_sync_width,
                          uint16_t h_end_width, uint16_t v_start_width,
                          uint16_t v_sync_width, uint16_t v_end_width,
@@ -453,6 +455,7 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
        uint32_t old_conf;
        uint32_t div;
        struct clk *ipu_clk;
+       const struct di_mapping *map;
 
        dev_dbg(mx3fb->dev, "panel size = %d x %d", width, height);
 
@@ -540,36 +543,10 @@ static int sdc_init_panel(struct mx3fb_data *mx3fb, enum ipu_panel panel,
                sig.Vsync_pol << DI_D3_VSYNC_POL_SHIFT;
        mx3fb_write_reg(mx3fb, old_conf, DI_DISP_SIG_POL);
 
-       switch (pixel_fmt) {
-       case IPU_PIX_FMT_RGB24:
-               mx3fb_write_reg(mx3fb, di_mappings[0], DI_DISP3_B0_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[1], DI_DISP3_B1_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[2], DI_DISP3_B2_MAP);
-               mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
-                            ((di_mappings[3] - 1) << 12), DI_DISP_ACC_CC);
-               break;
-       case IPU_PIX_FMT_RGB666:
-               mx3fb_write_reg(mx3fb, di_mappings[4], DI_DISP3_B0_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[5], DI_DISP3_B1_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[6], DI_DISP3_B2_MAP);
-               mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
-                            ((di_mappings[7] - 1) << 12), DI_DISP_ACC_CC);
-               break;
-       case IPU_PIX_FMT_BGR666:
-               mx3fb_write_reg(mx3fb, di_mappings[8], DI_DISP3_B0_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[9], DI_DISP3_B1_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[10], DI_DISP3_B2_MAP);
-               mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
-                            ((di_mappings[11] - 1) << 12), DI_DISP_ACC_CC);
-               break;
-       default:
-               mx3fb_write_reg(mx3fb, di_mappings[12], DI_DISP3_B0_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[13], DI_DISP3_B1_MAP);
-               mx3fb_write_reg(mx3fb, di_mappings[14], DI_DISP3_B2_MAP);
-               mx3fb_write_reg(mx3fb, mx3fb_read_reg(mx3fb, DI_DISP_ACC_CC) |
-                            ((di_mappings[15] - 1) << 12), DI_DISP_ACC_CC);
-               break;
-       }
+       map = &di_mappings[mx3fb->disp_data_fmt];
+       mx3fb_write_reg(mx3fb, map->b0, DI_DISP3_B0_MAP);
+       mx3fb_write_reg(mx3fb, map->b1, DI_DISP3_B1_MAP);
+       mx3fb_write_reg(mx3fb, map->b2, DI_DISP3_B2_MAP);
 
        spin_unlock_irqrestore(&mx3fb->lock, lock_flags);
 
@@ -780,8 +757,6 @@ static int __set_par(struct fb_info *fbi, bool lock)
                if (sdc_init_panel(mx3fb, mode,
                                   (PICOS2KHZ(fbi->var.pixclock)) * 1000UL,
                                   fbi->var.xres, fbi->var.yres,
-                                  (fbi->var.sync & FB_SYNC_SWAP_RGB) ?
-                                  IPU_PIX_FMT_BGR666 : IPU_PIX_FMT_RGB666,
                                   fbi->var.left_margin,
                                   fbi->var.hsync_len,
                                   fbi->var.right_margin +
@@ -1117,7 +1092,7 @@ static int mx3fb_pan_display(struct fb_var_screeninfo *var,
                async_tx_ack(mx3_fbi->txd);
 
        txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg +
-               mx3_fbi->cur_ipu_buf, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
+               mx3_fbi->cur_ipu_buf, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
        if (!txd) {
                dev_err(fbi->device,
                        "Error preparing a DMA transaction descriptor.\n");
@@ -1349,6 +1324,12 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
        const struct fb_videomode *mode;
        int ret, num_modes;
 
+       if (mx3fb_pdata->disp_data_fmt >= ARRAY_SIZE(di_mappings)) {
+               dev_err(dev, "Illegal display data format %d\n",
+                               mx3fb_pdata->disp_data_fmt);
+               return -EINVAL;
+       }
+
        ichan->client = mx3fb;
        irq = ichan->eof_irq;
 
@@ -1402,6 +1383,8 @@ static int init_fb_chan(struct mx3fb_data *mx3fb, struct idmac_channel *ichan)
        mx3fbi->mx3fb           = mx3fb;
        mx3fbi->blank           = FB_BLANK_NORMAL;
 
+       mx3fb->disp_data_fmt    = mx3fb_pdata->disp_data_fmt;
+
        init_completion(&mx3fbi->flip_cmpl);
        disable_irq(ichan->eof_irq);
        dev_dbg(mx3fb->dev, "disabling irq %d\n", ichan->eof_irq);
index a5ec7f37c1857f99fdd411e38e0384f05c3e1936..e1626a1d5c451ffe0d3b829326ddf462423f5274 100644 (file)
@@ -401,7 +401,7 @@ void dispc_runtime_put(void)
 
        DSSDBG("dispc_runtime_put\n");
 
-       r = pm_runtime_put(&dispc.pdev->dev);
+       r = pm_runtime_put_sync(&dispc.pdev->dev);
        WARN_ON(r < 0);
 }
 
index d4d676c82c12fbe9789edc354c85b5a14bd19cbb..52f36ec1c8bb3889e9e23cb2c6b6eca2181a454a 100644 (file)
@@ -1079,7 +1079,7 @@ void dsi_runtime_put(struct platform_device *dsidev)
 
        DSSDBG("dsi_runtime_put\n");
 
-       r = pm_runtime_put(&dsi->pdev->dev);
+       r = pm_runtime_put_sync(&dsi->pdev->dev);
        WARN_ON(r < 0);
 }
 
index 17033457ee89baa2e557b4dbd0d53215977ee6ce..77c2b5a32b5d639687e84b8faa1e0385f55b86f9 100644 (file)
@@ -720,7 +720,7 @@ void dss_runtime_put(void)
 
        DSSDBG("dss_runtime_put\n");
 
-       r = pm_runtime_put(&dss.pdev->dev);
+       r = pm_runtime_put_sync(&dss.pdev->dev);
        WARN_ON(r < 0);
 }
 
index b4c270edb915bde5ce721d8373950ee8fa2df667..d7aa3b056529e9469a8e3c0346ebbe421a2df1be 100644 (file)
@@ -176,7 +176,7 @@ static void hdmi_runtime_put(void)
 
        DSSDBG("hdmi_runtime_put\n");
 
-       r = pm_runtime_put(&hdmi.pdev->dev);
+       r = pm_runtime_put_sync(&hdmi.pdev->dev);
        WARN_ON(r < 0);
 }
 
@@ -497,6 +497,7 @@ bool omapdss_hdmi_detect(void)
 
 int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
 {
+       struct omap_dss_hdmi_data *priv = dssdev->data;
        int r = 0;
 
        DSSDBG("ENTER hdmi_display_enable\n");
@@ -509,6 +510,8 @@ int omapdss_hdmi_display_enable(struct omap_dss_device *dssdev)
                goto err0;
        }
 
+       hdmi.ip_data.hpd_gpio = priv->hpd_gpio;
+
        r = omap_dss_start_device(dssdev);
        if (r) {
                DSSERR("failed to start device\n");
index 814bb9500dca77a9b47c6216aedadd361ba5cdaf..55f398014f33b31863e0718a6600c25c368bac8e 100644 (file)
@@ -140,7 +140,7 @@ static void rfbi_runtime_put(void)
 
        DSSDBG("rfbi_runtime_put\n");
 
-       r = pm_runtime_put(&rfbi.pdev->dev);
+       r = pm_runtime_put_sync(&rfbi.pdev->dev);
        WARN_ON(r < 0);
 }
 
index 7503f7f619a7fe43b7be266f04e9bf4cd7c8579a..50dadba5070a6fc862cb514979e30e7faf8f9f34 100644 (file)
@@ -126,6 +126,10 @@ struct hdmi_ip_data {
        const struct ti_hdmi_ip_ops *ops;
        struct hdmi_config cfg;
        struct hdmi_pll_info pll_data;
+
+       /* ti_hdmi_4xxx_ip private data. These should be in a separate struct */
+       int hpd_gpio;
+       bool phy_tx_enabled;
 };
 int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
 void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
index 9af81f18f1633ffaaeae842d922f8800cd9d285c..2d72334ca3da9a7d3bac43fba0c7e1ce6c26621e 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/delay.h>
 #include <linux/string.h>
 #include <linux/seq_file.h>
+#include <linux/gpio.h>
 
 #include "ti_hdmi_4xxx_ip.h"
 #include "dss.h"
@@ -223,6 +224,49 @@ void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data)
        hdmi_set_pll_pwr(ip_data, HDMI_PLLPWRCMD_ALLOFF);
 }
 
+static int hdmi_check_hpd_state(struct hdmi_ip_data *ip_data)
+{
+       unsigned long flags;
+       bool hpd;
+       int r;
+       /* this should be in ti_hdmi_4xxx_ip private data */
+       static DEFINE_SPINLOCK(phy_tx_lock);
+
+       spin_lock_irqsave(&phy_tx_lock, flags);
+
+       hpd = gpio_get_value(ip_data->hpd_gpio);
+
+       if (hpd == ip_data->phy_tx_enabled) {
+               spin_unlock_irqrestore(&phy_tx_lock, flags);
+               return 0;
+       }
+
+       if (hpd)
+               r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
+       else
+               r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_LDOON);
+
+       if (r) {
+               DSSERR("Failed to %s PHY TX power\n",
+                               hpd ? "enable" : "disable");
+               goto err;
+       }
+
+       ip_data->phy_tx_enabled = hpd;
+err:
+       spin_unlock_irqrestore(&phy_tx_lock, flags);
+       return r;
+}
+
+static irqreturn_t hpd_irq_handler(int irq, void *data)
+{
+       struct hdmi_ip_data *ip_data = data;
+
+       hdmi_check_hpd_state(ip_data);
+
+       return IRQ_HANDLED;
+}
+
 int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
 {
        u16 r = 0;
@@ -232,10 +276,6 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
        if (r)
                return r;
 
-       r = hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_TXON);
-       if (r)
-               return r;
-
        /*
         * Read address 0 in order to get the SCP reset done completed
         * Dummy access performed to make sure reset is done
@@ -257,12 +297,32 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
        /* Write to phy address 3 to change the polarity control */
        REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
 
+       r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio),
+                       NULL, hpd_irq_handler,
+                       IRQF_DISABLED | IRQF_TRIGGER_RISING |
+                       IRQF_TRIGGER_FALLING, "hpd", ip_data);
+       if (r) {
+               DSSERR("HPD IRQ request failed\n");
+               hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
+               return r;
+       }
+
+       r = hdmi_check_hpd_state(ip_data);
+       if (r) {
+               free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data);
+               hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
+               return r;
+       }
+
        return 0;
 }
 
 void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data)
 {
+       free_irq(gpio_to_irq(ip_data->hpd_gpio), ip_data);
+
        hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
+       ip_data->phy_tx_enabled = false;
 }
 
 static int hdmi_core_ddc_init(struct hdmi_ip_data *ip_data)
index b3e9f90915815f4e82950743c72b6d36acad8913..5c3d0f9015105beb1d3a3946847097a181035ef1 100644 (file)
@@ -401,7 +401,7 @@ static void venc_runtime_put(void)
 
        DSSDBG("venc_runtime_put\n");
 
-       r = pm_runtime_put(&venc.pdev->dev);
+       r = pm_runtime_put_sync(&venc.pdev->dev);
        WARN_ON(r < 0);
 }
 
index 79e1b292c0309b8175a84d7e4353ff94b8e13eb0..5aa43c3392a2a061ed9fb35f3caa53605690e7fd 100644 (file)
@@ -35,7 +35,7 @@
 #define virtio_rmb(vq) \
        do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
 #define virtio_wmb(vq) \
-       do { if ((vq)->weak_barriers) smp_rmb(); else rmb(); } while(0)
+       do { if ((vq)->weak_barriers) smp_wmb(); else wmb(); } while(0)
 #else
 /* We must force memory ordering even if guest is UP since host could be
  * running on another CPU, but SMP barriers are defined to barrier() in that
@@ -308,9 +308,9 @@ bool virtqueue_kick_prepare(struct virtqueue *_vq)
        bool needs_kick;
 
        START_USE(vq);
-       /* Descriptors and available array need to be set before we expose the
-        * new available array entries. */
-       virtio_wmb(vq);
+       /* We need to expose available array entries before checking avail
+        * event. */
+       virtio_mb(vq);
 
        old = vq->vring.avail->idx - vq->num_added;
        new = vq->vring.avail->idx;
index 1b0e3dd81c1a2751be3db157790f677d27eee5e9..63d7b58f1c7d35edb71baee715d6a18c930c9783 100644 (file)
@@ -300,11 +300,7 @@ static int __devinit dw_wdt_drv_probe(struct platform_device *pdev)
        if (!mem)
                return -EINVAL;
 
-       if (!devm_request_mem_region(&pdev->dev, mem->start, resource_size(mem),
-                                    "dw_wdt"))
-               return -ENOMEM;
-
-       dw_wdt.regs = devm_ioremap(&pdev->dev, mem->start, resource_size(mem));
+       dw_wdt.regs = devm_request_and_ioremap(&pdev->dev, mem);
        if (!dw_wdt.regs)
                return -ENOMEM;
 
index 99796c5d913db2c9f354dbd06b503d8d65580cdf..bdf401b240b547af6877e00b3286e358eb8f1184 100644 (file)
@@ -36,6 +36,7 @@
  *     document number TBD                   : Patsburg (PBG)
  *     document number TBD                   : DH89xxCC
  *     document number TBD                   : Panther Point
+ *     document number TBD                   : Lynx Point
  */
 
 /*
@@ -126,6 +127,7 @@ enum iTCO_chipsets {
        TCO_PBG,        /* Patsburg */
        TCO_DH89XXCC,   /* DH89xxCC */
        TCO_PPT,        /* Panther Point */
+       TCO_LPT,        /* Lynx Point */
 };
 
 static struct {
@@ -189,6 +191,7 @@ static struct {
        {"Patsburg", 2},
        {"DH89xxCC", 2},
        {"Panther Point", 2},
+       {"Lynx Point", 2},
        {NULL, 0}
 };
 
@@ -331,6 +334,38 @@ static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
        { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
        { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
        { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
+       { PCI_VDEVICE(INTEL, 0x8c40), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c41), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c42), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c43), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c44), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c45), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c46), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c47), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c48), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c49), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c4a), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c4b), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c4c), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c4d), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c4e), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c4f), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c50), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c51), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c52), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c53), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c54), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c55), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c56), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c57), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c58), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c59), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c5a), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c5b), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c5c), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c5d), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c5e), TCO_LPT},
+       { PCI_VDEVICE(INTEL, 0x8c5f), TCO_LPT},
        { 0, },                 /* End of list */
 };
 MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
index b8ef2c6dca7ca900fdf6ae752b59ca1e49c1ad6a..c44c3334003a11aa3a8cb53e5099c0309c77830f 100644 (file)
@@ -247,7 +247,6 @@ static struct miscdevice imx2_wdt_miscdev = {
 static int __init imx2_wdt_probe(struct platform_device *pdev)
 {
        int ret;
-       int res_size;
        struct resource *res;
 
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -256,15 +255,7 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
                return -ENODEV;
        }
 
-       res_size = resource_size(res);
-       if (!devm_request_mem_region(&pdev->dev, res->start, res_size,
-               res->name)) {
-               dev_err(&pdev->dev, "can't allocate %d bytes at %d address\n",
-                       res_size, res->start);
-               return -ENOMEM;
-       }
-
-       imx2_wdt.base = devm_ioremap_nocache(&pdev->dev, res->start, res_size);
+       imx2_wdt.base = devm_request_and_ioremap(&pdev->dev, res);
        if (!imx2_wdt.base) {
                dev_err(&pdev->dev, "ioremap failed\n");
                return -ENOMEM;
index 50359bad91770d41d8b3dedbb959cc0dac1d4ad0..529085b8b8fb0358275b3cbbf65801b5eee74445 100644 (file)
@@ -72,7 +72,7 @@ struct nuc900_wdt {
 };
 
 static unsigned long nuc900wdt_busy;
-struct nuc900_wdt *nuc900_wdt;
+static struct nuc900_wdt *nuc900_wdt;
 
 static inline void nuc900_wdt_keepalive(void)
 {
@@ -287,7 +287,8 @@ static int __devinit nuc900wdt_probe(struct platform_device *pdev)
 
        setup_timer(&nuc900_wdt->timer, nuc900_wdt_timer_ping, 0);
 
-       if (misc_register(&nuc900wdt_miscdev)) {
+       ret = misc_register(&nuc900wdt_miscdev);
+       if (ret) {
                dev_err(&pdev->dev, "err register miscdev on minor=%d (%d)\n",
                        WATCHDOG_MINOR, ret);
                goto err_clk;
index 4b33e3fd726bb4b2664dbc3c8ff66d0cef71be3d..d19ff5145e8260fff0eb102bbbfcd7b058b85eeb 100644 (file)
@@ -339,6 +339,7 @@ static int __devinit omap_wdt_probe(struct platform_device *pdev)
        return 0;
 
 err_misc:
+       pm_runtime_disable(wdev->dev);
        platform_set_drvdata(pdev, NULL);
        iounmap(wdev->base);
 
@@ -371,6 +372,7 @@ static int __devexit omap_wdt_remove(struct platform_device *pdev)
        struct omap_wdt_dev *wdev = platform_get_drvdata(pdev);
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
+       pm_runtime_disable(wdev->dev);
        if (!res)
                return -ENOENT;
 
index bd143c9dd3e6d0084c08c40d972082e4b9f9db29..8e210aafdfd05396db165bd32c4f68a07e25e05f 100644 (file)
@@ -226,7 +226,7 @@ static long pnx4008_wdt_ioctl(struct file *file, unsigned int cmd,
 static int pnx4008_wdt_release(struct inode *inode, struct file *file)
 {
        if (!test_bit(WDT_OK_TO_CLOSE, &wdt_status))
-               printk(KERN_WARNING "WATCHDOG: Device closed unexpectdly\n");
+               printk(KERN_WARNING "WATCHDOG: Device closed unexpectedly\n");
 
        wdt_disable();
        clk_disable(wdt_clk);
index 4c2a4e8698f9922813f8d31b86bca583f9985239..e37d81178b9e7bd5326cd5dbbbfafe3f91a2d885 100644 (file)
@@ -174,7 +174,7 @@ static int stmp3xxx_wdt_release(struct inode *inode, struct file *file)
        if (!nowayout) {
                if (!test_bit(WDT_OK_TO_CLOSE, &wdt_status)) {
                        wdt_ping();
-                       pr_debug("%s: Device closed unexpectdly\n", __func__);
+                       pr_debug("%s: Device closed unexpectedly\n", __func__);
                        ret = -EINVAL;
                } else {
                        wdt_disable();
index 026b4bbfa0aa2a0fd783ad178a261f0f3744738a..8f07dd4bd94a67385d3c44093a8a9f5c75e8f960 100644 (file)
@@ -124,8 +124,6 @@ static int wdt_stop(struct watchdog_device *wdd)
 static int wdt_set_timeout(struct watchdog_device *wdd,
                           unsigned int new_timeout)
 {
-       if (new_timeout < 1 || new_timeout > WDT_TIMEOUT_MAX)
-               return -EINVAL;
        writel(new_timeout, wdt_mem + VIA_WDT_COUNT);
        timeout = new_timeout;
        return 0;
@@ -150,6 +148,8 @@ static const struct watchdog_ops wdt_ops = {
 static struct watchdog_device wdt_dev = {
        .info =         &wdt_info,
        .ops =          &wdt_ops,
+       .min_timeout =  1,
+       .max_timeout =  WDT_TIMEOUT_MAX,
 };
 
 static int __devinit wdt_probe(struct pci_dev *pdev,
@@ -233,7 +233,7 @@ static void __devexit wdt_remove(struct pci_dev *pdev)
        pci_disable_device(pdev);
 }
 
-DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = {
+static DEFINE_PCI_DEVICE_TABLE(wdt_pci_table) = {
        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_CX700) },
        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX800) },
        { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VX855) },
index 42e940c238914172e199d5e232369ccc33ccf35e..c3c3188c34d744312cf8e1dbe3e2c30426e87248 100644 (file)
@@ -152,12 +152,12 @@ static long wafwdt_ioctl(struct file *file, unsigned int cmd,
                        return -EFAULT;
 
                if (options & WDIOS_DISABLECARD) {
-                       wafwdt_start();
+                       wafwdt_stop();
                        retval = 0;
                }
 
                if (options & WDIOS_ENABLECARD) {
-                       wafwdt_stop();
+                       wafwdt_start();
                        retval = 0;
                }
 
index 909c78650d3e8942e7b7c3bb70de90d7c208f9fc..5d7113c7e501d1b5b00f8f991bbe475a6504dd88 100644 (file)
@@ -212,10 +212,10 @@ static long wm8350_wdt_ioctl(struct file *file, unsigned int cmd,
 
                /* Setting both simultaneously means at least one must fail */
                if (options == WDIOS_DISABLECARD)
-                       ret = wm8350_wdt_start(wm8350);
+                       ret = wm8350_wdt_stop(wm8350);
 
                if (options == WDIOS_ENABLECARD)
-                       ret = wm8350_wdt_stop(wm8350);
+                       ret = wm8350_wdt_start(wm8350);
                break;
        }
 
index ba6eda4b51433e20e87f4ae977cdd565f7d3c300..0edb91c0de6bf2b69d6de3ec41d43d38b5d823b5 100644 (file)
@@ -1,5 +1,6 @@
 #include <linux/bio.h>
 #include <linux/io.h>
+#include <linux/export.h>
 #include <xen/page.h>
 
 bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
@@ -11,3 +12,4 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
        return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
                ((mfn1 == mfn2) || ((mfn1+1) == mfn2));
 }
+EXPORT_SYMBOL(xen_biovec_phys_mergeable);
index 1cd94daa71db8379443115dd052bc5705fc74432..b4d4eac761db6241042e60db3b604caa9150e91e 100644 (file)
@@ -948,9 +948,12 @@ static void gnttab_request_version(void)
        int rc;
        struct gnttab_set_version gsv;
 
-       gsv.version = 2;
+       if (xen_hvm_domain())
+               gsv.version = 1;
+       else
+               gsv.version = 2;
        rc = HYPERVISOR_grant_table_op(GNTTABOP_set_version, &gsv, 1);
-       if (rc == 0) {
+       if (rc == 0 && gsv.version == 2) {
                grant_table_version = 2;
                gnttab_interface = &gnttab_v2_ops;
        } else if (grant_table_version == 2) {
index 3832e303c33aca5ceabf0bb1540e02a28b7765bc..596e6a7b17d68bc3ebf424cc11344da1cc32f1e5 100644 (file)
@@ -221,7 +221,7 @@ static int register_balloon(struct device *dev)
 {
        int i, error;
 
-       error = bus_register(&balloon_subsys);
+       error = subsys_system_register(&balloon_subsys, NULL);
        if (error)
                return error;
 
index 5f43bfba3c7a76906244b47fce383ab310c08c5b..0d15a3d113a2c77bb119913f6fa2229a7c0605fa 100644 (file)
@@ -82,7 +82,6 @@ fw-shipped-$(CONFIG_SERIAL_8250_CS) += cis/MT5634ZLX.cis cis/RS-COM-2P.cis \
 fw-shipped-$(CONFIG_PCMCIA_SMC91C92) += ositech/Xilinx7OD.bin
 fw-shipped-$(CONFIG_SCSI_ADVANSYS) += advansys/mcode.bin advansys/38C1600.bin \
                                      advansys/3550.bin advansys/38C0800.bin
-fw-shipped-$(CONFIG_SCSI_ISCI) += isci/isci_firmware.bin
 fw-shipped-$(CONFIG_SCSI_QLOGIC_1280) += qlogic/1040.bin qlogic/1280.bin \
                                         qlogic/12160.bin
 fw-shipped-$(CONFIG_SCSI_QLOGICPTI) += qlogic/isp1000.bin
diff --git a/firmware/isci/isci_firmware.bin.ihex b/firmware/isci/isci_firmware.bin.ihex
deleted file mode 100644 (file)
index 2e66195..0000000
+++ /dev/null
@@ -1,16 +0,0 @@
-:10000000495343554F454D42E80018100002000087
-:1000100000000000000000000101000000000000DE
-:10002000FFFFCF5F0100000008DD0B0000FC0F00A8
-:10003000097C0B006EFC0A00FFFFCF5F010000008F
-:1000400008DD0B0000FC0F00097C0B006EFC0A00B1
-:10005000FFFFCF5F0100000008DD0B0000FC0F0078
-:10006000097C0B006EFC0A00FFFFCF5F010000005F
-:1000700008DD0B0000FC0F00097C0B006EFC0A0081
-:100080000101000000000000FFFFCF5F0200000040
-:1000900008DD0B0000FC0F00097C0B006EFC0A0061
-:1000A000FFFFCF5F0200000008DD0B0000FC0F0027
-:1000B000097C0B006EFC0A00FFFFCF5F020000000E
-:1000C00008DD0B0000FC0F00097C0B006EFC0A0031
-:1000D000FFFFCF5F0200000008DD0B0000FC0F00F7
-:0800E000097C0B006EFC0A0014
-:00000001FF
index ecb9fd3be1433838911f4c947627436d816cb136..d33f01c08b60b329247fe4d8729a387af7c263e4 100644 (file)
@@ -31,3 +31,22 @@ config BTRFS_FS_POSIX_ACL
          Linux website <http://acl.bestbits.at/>.
 
          If you don't know what Access Control Lists are, say N
+
+config BTRFS_FS_CHECK_INTEGRITY
+       bool "Btrfs with integrity check tool compiled in (DANGEROUS)"
+       depends on BTRFS_FS
+       help
+         Adds code that examines all block write requests (including
+         writes of the super block). The goal is to verify that the
+         state of the filesystem on disk is always consistent, i.e.,
+         after a power-loss or kernel panic event the filesystem is
+         in a consistent state.
+
+         If the integrity check tool is included and activated in
+         the mount options, plenty of kernel memory is used, and
+         plenty of additional CPU cycles are spent. Enabling this
+         functionality is not intended for normal use.
+
+         In most cases, unless you are a btrfs developer who needs
+         to verify the integrity of (super)-block write requests
+         during the run of a regression test, say N
index c0ddfd29c5e5a348464d5c3d15a77a7708fd8d79..0c4fa2befae793f1a6845322d7ba71aaa5da4374 100644 (file)
@@ -8,6 +8,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
           extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
           export.o tree-log.o free-space-cache.o zlib.o lzo.o \
           compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
-          reada.o backref.o
+          reada.o backref.o ulist.o
 
 btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
+btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
index 22c64fff1bd524b213ce8b13a233f861680d8424..633c701a287d4be0242d5b1706515aad4845ecfb 100644 (file)
 #include "ctree.h"
 #include "disk-io.h"
 #include "backref.h"
+#include "ulist.h"
+#include "transaction.h"
+#include "delayed-ref.h"
 
-struct __data_ref {
+/*
+ * this structure records all encountered refs on the way up to the root
+ */
+struct __prelim_ref {
        struct list_head list;
-       u64 inum;
-       u64 root;
-       u64 extent_data_item_offset;
+       u64 root_id;
+       struct btrfs_key key;
+       int level;
+       int count;
+       u64 parent;
+       u64 wanted_disk_byte;
 };
 
-struct __shared_ref {
-       struct list_head list;
+static int __add_prelim_ref(struct list_head *head, u64 root_id,
+                           struct btrfs_key *key, int level, u64 parent,
+                           u64 wanted_disk_byte, int count)
+{
+       struct __prelim_ref *ref;
+
+       /* in case we're adding delayed refs, we're holding the refs spinlock */
+       ref = kmalloc(sizeof(*ref), GFP_ATOMIC);
+       if (!ref)
+               return -ENOMEM;
+
+       ref->root_id = root_id;
+       if (key)
+               ref->key = *key;
+       else
+               memset(&ref->key, 0, sizeof(ref->key));
+
+       ref->level = level;
+       ref->count = count;
+       ref->parent = parent;
+       ref->wanted_disk_byte = wanted_disk_byte;
+       list_add_tail(&ref->list, head);
+
+       return 0;
+}
+
+static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
+                               struct ulist *parents,
+                               struct extent_buffer *eb, int level,
+                               u64 wanted_objectid, u64 wanted_disk_byte)
+{
+       int ret;
+       int slot;
+       struct btrfs_file_extent_item *fi;
+       struct btrfs_key key;
        u64 disk_byte;
-};
+
+add_parent:
+       ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
+       if (ret < 0)
+               return ret;
+
+       if (level != 0)
+               return 0;
+
+       /*
+        * if the current leaf is full with EXTENT_DATA items, we must
+        * check the next one if that holds a reference as well.
+        * ref->count cannot be used to skip this check.
+        * repeat this until we don't find any additional EXTENT_DATA items.
+        */
+       while (1) {
+               ret = btrfs_next_leaf(root, path);
+               if (ret < 0)
+                       return ret;
+               if (ret)
+                       return 0;
+
+               eb = path->nodes[0];
+               for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) {
+                       btrfs_item_key_to_cpu(eb, &key, slot);
+                       if (key.objectid != wanted_objectid ||
+                           key.type != BTRFS_EXTENT_DATA_KEY)
+                               return 0;
+                       fi = btrfs_item_ptr(eb, slot,
+                                               struct btrfs_file_extent_item);
+                       disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
+                       if (disk_byte == wanted_disk_byte)
+                               goto add_parent;
+               }
+       }
+
+       return 0;
+}
+
+/*
+ * resolve an indirect backref in the form (root_id, key, level)
+ * to a logical address
+ */
+static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
+                                       struct __prelim_ref *ref,
+                                       struct ulist *parents)
+{
+       struct btrfs_path *path;
+       struct btrfs_root *root;
+       struct btrfs_key root_key;
+       struct btrfs_key key = {0};
+       struct extent_buffer *eb;
+       int ret = 0;
+       int root_level;
+       int level = ref->level;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       root_key.objectid = ref->root_id;
+       root_key.type = BTRFS_ROOT_ITEM_KEY;
+       root_key.offset = (u64)-1;
+       root = btrfs_read_fs_root_no_name(fs_info, &root_key);
+       if (IS_ERR(root)) {
+               ret = PTR_ERR(root);
+               goto out;
+       }
+
+       rcu_read_lock();
+       root_level = btrfs_header_level(root->node);
+       rcu_read_unlock();
+
+       if (root_level + 1 == level)
+               goto out;
+
+       path->lowest_level = level;
+       ret = btrfs_search_slot(NULL, root, &ref->key, path, 0, 0);
+       pr_debug("search slot in root %llu (level %d, ref count %d) returned "
+                "%d for key (%llu %u %llu)\n",
+                (unsigned long long)ref->root_id, level, ref->count, ret,
+                (unsigned long long)ref->key.objectid, ref->key.type,
+                (unsigned long long)ref->key.offset);
+       if (ret < 0)
+               goto out;
+
+       eb = path->nodes[level];
+       if (!eb) {
+               WARN_ON(1);
+               ret = 1;
+               goto out;
+       }
+
+       if (level == 0) {
+               if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) {
+                       ret = btrfs_next_leaf(root, path);
+                       if (ret)
+                               goto out;
+                       eb = path->nodes[0];
+               }
+
+               btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
+       }
+
+       /* the last two parameters will only be used for level == 0 */
+       ret = add_all_parents(root, path, parents, eb, level, key.objectid,
+                               ref->wanted_disk_byte);
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+/*
+ * resolve all indirect backrefs from the list
+ */
+static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
+                                  struct list_head *head)
+{
+       int err;
+       int ret = 0;
+       struct __prelim_ref *ref;
+       struct __prelim_ref *ref_safe;
+       struct __prelim_ref *new_ref;
+       struct ulist *parents;
+       struct ulist_node *node;
+
+       parents = ulist_alloc(GFP_NOFS);
+       if (!parents)
+               return -ENOMEM;
+
+       /*
+        * _safe allows us to insert directly after the current item without
+        * iterating over the newly inserted items.
+        * we're also allowed to re-assign ref during iteration.
+        */
+       list_for_each_entry_safe(ref, ref_safe, head, list) {
+               if (ref->parent)        /* already direct */
+                       continue;
+               if (ref->count == 0)
+                       continue;
+               err = __resolve_indirect_ref(fs_info, ref, parents);
+               if (err) {
+                       if (ret == 0)
+                               ret = err;
+                       continue;
+               }
+
+               /* we put the first parent into the ref at hand */
+               node = ulist_next(parents, NULL);
+               ref->parent = node ? node->val : 0;
+
+               /* additional parents require new refs being added here */
+               while ((node = ulist_next(parents, node))) {
+                       new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
+                       if (!new_ref) {
+                               ret = -ENOMEM;
+                               break;
+                       }
+                       memcpy(new_ref, ref, sizeof(*ref));
+                       new_ref->parent = node->val;
+                       list_add(&new_ref->list, &ref->list);
+               }
+               ulist_reinit(parents);
+       }
+
+       ulist_free(parents);
+       return ret;
+}
+
+/*
+ * merge two lists of backrefs and adjust counts accordingly
+ *
+ * mode = 1: merge identical keys, if key is set
+ * mode = 2: merge identical parents
+ */
+static int __merge_refs(struct list_head *head, int mode)
+{
+       struct list_head *pos1;
+
+       list_for_each(pos1, head) {
+               struct list_head *n2;
+               struct list_head *pos2;
+               struct __prelim_ref *ref1;
+
+               ref1 = list_entry(pos1, struct __prelim_ref, list);
+
+               if (mode == 1 && ref1->key.type == 0)
+                       continue;
+               for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
+                    pos2 = n2, n2 = pos2->next) {
+                       struct __prelim_ref *ref2;
+
+                       ref2 = list_entry(pos2, struct __prelim_ref, list);
+
+                       if (mode == 1) {
+                               if (memcmp(&ref1->key, &ref2->key,
+                                          sizeof(ref1->key)) ||
+                                   ref1->level != ref2->level ||
+                                   ref1->root_id != ref2->root_id)
+                                       continue;
+                               ref1->count += ref2->count;
+                       } else {
+                               if (ref1->parent != ref2->parent)
+                                       continue;
+                               ref1->count += ref2->count;
+                       }
+                       list_del(&ref2->list);
+                       kfree(ref2);
+               }
+
+       }
+       return 0;
+}
+
+/*
+ * add all currently queued delayed refs from this head whose seq nr is
+ * smaller or equal that seq to the list
+ */
+static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
+                             struct btrfs_key *info_key,
+                             struct list_head *prefs)
+{
+       struct btrfs_delayed_extent_op *extent_op = head->extent_op;
+       struct rb_node *n = &head->node.rb_node;
+       int sgn;
+       int ret = 0;
+
+       if (extent_op && extent_op->update_key)
+               btrfs_disk_key_to_cpu(info_key, &extent_op->key);
+
+       while ((n = rb_prev(n))) {
+               struct btrfs_delayed_ref_node *node;
+               node = rb_entry(n, struct btrfs_delayed_ref_node,
+                               rb_node);
+               if (node->bytenr != head->node.bytenr)
+                       break;
+               WARN_ON(node->is_head);
+
+               if (node->seq > seq)
+                       continue;
+
+               switch (node->action) {
+               case BTRFS_ADD_DELAYED_EXTENT:
+               case BTRFS_UPDATE_DELAYED_HEAD:
+                       WARN_ON(1);
+                       continue;
+               case BTRFS_ADD_DELAYED_REF:
+                       sgn = 1;
+                       break;
+               case BTRFS_DROP_DELAYED_REF:
+                       sgn = -1;
+                       break;
+               default:
+                       BUG_ON(1);
+               }
+               switch (node->type) {
+               case BTRFS_TREE_BLOCK_REF_KEY: {
+                       struct btrfs_delayed_tree_ref *ref;
+
+                       ref = btrfs_delayed_node_to_tree_ref(node);
+                       ret = __add_prelim_ref(prefs, ref->root, info_key,
+                                              ref->level + 1, 0, node->bytenr,
+                                              node->ref_mod * sgn);
+                       break;
+               }
+               case BTRFS_SHARED_BLOCK_REF_KEY: {
+                       struct btrfs_delayed_tree_ref *ref;
+
+                       ref = btrfs_delayed_node_to_tree_ref(node);
+                       ret = __add_prelim_ref(prefs, ref->root, info_key,
+                                              ref->level + 1, ref->parent,
+                                              node->bytenr,
+                                              node->ref_mod * sgn);
+                       break;
+               }
+               case BTRFS_EXTENT_DATA_REF_KEY: {
+                       struct btrfs_delayed_data_ref *ref;
+                       struct btrfs_key key;
+
+                       ref = btrfs_delayed_node_to_data_ref(node);
+
+                       key.objectid = ref->objectid;
+                       key.type = BTRFS_EXTENT_DATA_KEY;
+                       key.offset = ref->offset;
+                       ret = __add_prelim_ref(prefs, ref->root, &key, 0, 0,
+                                              node->bytenr,
+                                              node->ref_mod * sgn);
+                       break;
+               }
+               case BTRFS_SHARED_DATA_REF_KEY: {
+                       struct btrfs_delayed_data_ref *ref;
+                       struct btrfs_key key;
+
+                       ref = btrfs_delayed_node_to_data_ref(node);
+
+                       key.objectid = ref->objectid;
+                       key.type = BTRFS_EXTENT_DATA_KEY;
+                       key.offset = ref->offset;
+                       ret = __add_prelim_ref(prefs, ref->root, &key, 0,
+                                              ref->parent, node->bytenr,
+                                              node->ref_mod * sgn);
+                       break;
+               }
+               default:
+                       WARN_ON(1);
+               }
+               BUG_ON(ret);
+       }
+
+       return 0;
+}
+
+/*
+ * add all inline backrefs for bytenr to the list
+ */
+static int __add_inline_refs(struct btrfs_fs_info *fs_info,
+                            struct btrfs_path *path, u64 bytenr,
+                            struct btrfs_key *info_key, int *info_level,
+                            struct list_head *prefs)
+{
+       int ret = 0;
+       int slot;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+       unsigned long ptr;
+       unsigned long end;
+       struct btrfs_extent_item *ei;
+       u64 flags;
+       u64 item_size;
+
+       /*
+        * enumerate all inline refs
+        */
+       leaf = path->nodes[0];
+       slot = path->slots[0] - 1;
+
+       item_size = btrfs_item_size_nr(leaf, slot);
+       BUG_ON(item_size < sizeof(*ei));
+
+       ei = btrfs_item_ptr(leaf, slot, struct btrfs_extent_item);
+       flags = btrfs_extent_flags(leaf, ei);
+
+       ptr = (unsigned long)(ei + 1);
+       end = (unsigned long)ei + item_size;
+
+       if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+               struct btrfs_tree_block_info *info;
+               struct btrfs_disk_key disk_key;
+
+               info = (struct btrfs_tree_block_info *)ptr;
+               *info_level = btrfs_tree_block_level(leaf, info);
+               btrfs_tree_block_key(leaf, info, &disk_key);
+               btrfs_disk_key_to_cpu(info_key, &disk_key);
+               ptr += sizeof(struct btrfs_tree_block_info);
+               BUG_ON(ptr > end);
+       } else {
+               BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
+       }
+
+       while (ptr < end) {
+               struct btrfs_extent_inline_ref *iref;
+               u64 offset;
+               int type;
+
+               iref = (struct btrfs_extent_inline_ref *)ptr;
+               type = btrfs_extent_inline_ref_type(leaf, iref);
+               offset = btrfs_extent_inline_ref_offset(leaf, iref);
+
+               switch (type) {
+               case BTRFS_SHARED_BLOCK_REF_KEY:
+                       ret = __add_prelim_ref(prefs, 0, info_key,
+                                               *info_level + 1, offset,
+                                               bytenr, 1);
+                       break;
+               case BTRFS_SHARED_DATA_REF_KEY: {
+                       struct btrfs_shared_data_ref *sdref;
+                       int count;
+
+                       sdref = (struct btrfs_shared_data_ref *)(iref + 1);
+                       count = btrfs_shared_data_ref_count(leaf, sdref);
+                       ret = __add_prelim_ref(prefs, 0, NULL, 0, offset,
+                                              bytenr, count);
+                       break;
+               }
+               case BTRFS_TREE_BLOCK_REF_KEY:
+                       ret = __add_prelim_ref(prefs, offset, info_key,
+                                              *info_level + 1, 0, bytenr, 1);
+                       break;
+               case BTRFS_EXTENT_DATA_REF_KEY: {
+                       struct btrfs_extent_data_ref *dref;
+                       int count;
+                       u64 root;
+
+                       dref = (struct btrfs_extent_data_ref *)(&iref->offset);
+                       count = btrfs_extent_data_ref_count(leaf, dref);
+                       key.objectid = btrfs_extent_data_ref_objectid(leaf,
+                                                                     dref);
+                       key.type = BTRFS_EXTENT_DATA_KEY;
+                       key.offset = btrfs_extent_data_ref_offset(leaf, dref);
+                       root = btrfs_extent_data_ref_root(leaf, dref);
+                       ret = __add_prelim_ref(prefs, root, &key, 0, 0, bytenr,
+                                               count);
+                       break;
+               }
+               default:
+                       WARN_ON(1);
+               }
+               BUG_ON(ret);
+               ptr += btrfs_extent_inline_ref_size(type);
+       }
+
+       return 0;
+}
+
+/*
+ * add all non-inline backrefs for bytenr to the list
+ */
+static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
+                           struct btrfs_path *path, u64 bytenr,
+                           struct btrfs_key *info_key, int info_level,
+                           struct list_head *prefs)
+{
+       struct btrfs_root *extent_root = fs_info->extent_root;
+       int ret;
+       int slot;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+
+       while (1) {
+               ret = btrfs_next_item(extent_root, path);
+               if (ret < 0)
+                       break;
+               if (ret) {
+                       ret = 0;
+                       break;
+               }
+
+               slot = path->slots[0];
+               leaf = path->nodes[0];
+               btrfs_item_key_to_cpu(leaf, &key, slot);
+
+               if (key.objectid != bytenr)
+                       break;
+               if (key.type < BTRFS_TREE_BLOCK_REF_KEY)
+                       continue;
+               if (key.type > BTRFS_SHARED_DATA_REF_KEY)
+                       break;
+
+               switch (key.type) {
+               case BTRFS_SHARED_BLOCK_REF_KEY:
+                       ret = __add_prelim_ref(prefs, 0, info_key,
+                                               info_level + 1, key.offset,
+                                               bytenr, 1);
+                       break;
+               case BTRFS_SHARED_DATA_REF_KEY: {
+                       struct btrfs_shared_data_ref *sdref;
+                       int count;
+
+                       sdref = btrfs_item_ptr(leaf, slot,
+                                             struct btrfs_shared_data_ref);
+                       count = btrfs_shared_data_ref_count(leaf, sdref);
+                       ret = __add_prelim_ref(prefs, 0, NULL, 0, key.offset,
+                                               bytenr, count);
+                       break;
+               }
+               case BTRFS_TREE_BLOCK_REF_KEY:
+                       ret = __add_prelim_ref(prefs, key.offset, info_key,
+                                               info_level + 1, 0, bytenr, 1);
+                       break;
+               case BTRFS_EXTENT_DATA_REF_KEY: {
+                       struct btrfs_extent_data_ref *dref;
+                       int count;
+                       u64 root;
+
+                       dref = btrfs_item_ptr(leaf, slot,
+                                             struct btrfs_extent_data_ref);
+                       count = btrfs_extent_data_ref_count(leaf, dref);
+                       key.objectid = btrfs_extent_data_ref_objectid(leaf,
+                                                                     dref);
+                       key.type = BTRFS_EXTENT_DATA_KEY;
+                       key.offset = btrfs_extent_data_ref_offset(leaf, dref);
+                       root = btrfs_extent_data_ref_root(leaf, dref);
+                       ret = __add_prelim_ref(prefs, root, &key, 0, 0,
+                                               bytenr, count);
+                       break;
+               }
+               default:
+                       WARN_ON(1);
+               }
+               BUG_ON(ret);
+       }
+
+       return ret;
+}
+
+/*
+ * this adds all existing backrefs (inline backrefs, backrefs and delayed
+ * refs) for the given bytenr to the refs list, merges duplicates and resolves
+ * indirect refs to their parent bytenr.
+ * When roots are found, they're added to the roots list
+ *
+ * FIXME some caching might speed things up
+ */
+static int find_parent_nodes(struct btrfs_trans_handle *trans,
+                            struct btrfs_fs_info *fs_info, u64 bytenr,
+                            u64 seq, struct ulist *refs, struct ulist *roots)
+{
+       struct btrfs_key key;
+       struct btrfs_path *path;
+       struct btrfs_key info_key = { 0 };
+       struct btrfs_delayed_ref_root *delayed_refs = NULL;
+       struct btrfs_delayed_ref_head *head = NULL;
+       int info_level = 0;
+       int ret;
+       struct list_head prefs_delayed;
+       struct list_head prefs;
+       struct __prelim_ref *ref;
+
+       INIT_LIST_HEAD(&prefs);
+       INIT_LIST_HEAD(&prefs_delayed);
+
+       key.objectid = bytenr;
+       key.type = BTRFS_EXTENT_ITEM_KEY;
+       key.offset = (u64)-1;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       /*
+        * grab both a lock on the path and a lock on the delayed ref head.
+        * We need both to get a consistent picture of how the refs look
+        * at a specified point in time
+        */
+again:
+       ret = btrfs_search_slot(trans, fs_info->extent_root, &key, path, 0, 0);
+       if (ret < 0)
+               goto out;
+       BUG_ON(ret == 0);
+
+       /*
+        * look if there are updates for this ref queued and lock the head
+        */
+       delayed_refs = &trans->transaction->delayed_refs;
+       spin_lock(&delayed_refs->lock);
+       head = btrfs_find_delayed_ref_head(trans, bytenr);
+       if (head) {
+               if (!mutex_trylock(&head->mutex)) {
+                       atomic_inc(&head->node.refs);
+                       spin_unlock(&delayed_refs->lock);
+
+                       btrfs_release_path(path);
+
+                       /*
+                        * Mutex was contended, block until it's
+                        * released and try again
+                        */
+                       mutex_lock(&head->mutex);
+                       mutex_unlock(&head->mutex);
+                       btrfs_put_delayed_ref(&head->node);
+                       goto again;
+               }
+               ret = __add_delayed_refs(head, seq, &info_key, &prefs_delayed);
+               if (ret)
+                       goto out;
+       }
+       spin_unlock(&delayed_refs->lock);
+
+       if (path->slots[0]) {
+               struct extent_buffer *leaf;
+               int slot;
+
+               leaf = path->nodes[0];
+               slot = path->slots[0] - 1;
+               btrfs_item_key_to_cpu(leaf, &key, slot);
+               if (key.objectid == bytenr &&
+                   key.type == BTRFS_EXTENT_ITEM_KEY) {
+                       ret = __add_inline_refs(fs_info, path, bytenr,
+                                               &info_key, &info_level, &prefs);
+                       if (ret)
+                               goto out;
+                       ret = __add_keyed_refs(fs_info, path, bytenr, &info_key,
+                                              info_level, &prefs);
+                       if (ret)
+                               goto out;
+               }
+       }
+       btrfs_release_path(path);
+
+       /*
+        * when adding the delayed refs above, the info_key might not have
+        * been known yet. Go over the list and replace the missing keys
+        */
+       list_for_each_entry(ref, &prefs_delayed, list) {
+               if ((ref->key.offset | ref->key.type | ref->key.objectid) == 0)
+                       memcpy(&ref->key, &info_key, sizeof(ref->key));
+       }
+       list_splice_init(&prefs_delayed, &prefs);
+
+       ret = __merge_refs(&prefs, 1);
+       if (ret)
+               goto out;
+
+       ret = __resolve_indirect_refs(fs_info, &prefs);
+       if (ret)
+               goto out;
+
+       ret = __merge_refs(&prefs, 2);
+       if (ret)
+               goto out;
+
+       while (!list_empty(&prefs)) {
+               ref = list_first_entry(&prefs, struct __prelim_ref, list);
+               list_del(&ref->list);
+               if (ref->count < 0)
+                       WARN_ON(1);
+               if (ref->count && ref->root_id && ref->parent == 0) {
+                       /* no parent == root of tree */
+                       ret = ulist_add(roots, ref->root_id, 0, GFP_NOFS);
+                       BUG_ON(ret < 0);
+               }
+               if (ref->count && ref->parent) {
+                       ret = ulist_add(refs, ref->parent, 0, GFP_NOFS);
+                       BUG_ON(ret < 0);
+               }
+               kfree(ref);
+       }
+
+out:
+       if (head)
+               mutex_unlock(&head->mutex);
+       btrfs_free_path(path);
+       while (!list_empty(&prefs)) {
+               ref = list_first_entry(&prefs, struct __prelim_ref, list);
+               list_del(&ref->list);
+               kfree(ref);
+       }
+       while (!list_empty(&prefs_delayed)) {
+               ref = list_first_entry(&prefs_delayed, struct __prelim_ref,
+                                      list);
+               list_del(&ref->list);
+               kfree(ref);
+       }
+
+       return ret;
+}
+
+/*
+ * Finds all leafs with a reference to the specified combination of bytenr and
+ * offset. key_list_head will point to a list of corresponding keys (caller must
+ * free each list element). The leafs will be stored in the leafs ulist, which
+ * must be freed with ulist_free.
+ *
+ * returns 0 on success, <0 on error
+ */
+static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
+                               struct btrfs_fs_info *fs_info, u64 bytenr,
+                               u64 num_bytes, u64 seq, struct ulist **leafs)
+{
+       struct ulist *tmp;
+       int ret;
+
+       tmp = ulist_alloc(GFP_NOFS);
+       if (!tmp)
+               return -ENOMEM;
+       *leafs = ulist_alloc(GFP_NOFS);
+       if (!*leafs) {
+               ulist_free(tmp);
+               return -ENOMEM;
+       }
+
+       ret = find_parent_nodes(trans, fs_info, bytenr, seq, *leafs, tmp);
+       ulist_free(tmp);
+
+       if (ret < 0 && ret != -ENOENT) {
+               ulist_free(*leafs);
+               return ret;
+       }
+
+       return 0;
+}
+
+/*
+ * walk all backrefs for a given extent to find all roots that reference this
+ * extent. Walking a backref means finding all extents that reference this
+ * extent and in turn walk the backrefs of those, too. Naturally this is a
+ * recursive process, but here it is implemented in an iterative fashion: We
+ * find all referencing extents for the extent in question and put them on a
+ * list. In turn, we find all referencing extents for those, further appending
+ * to the list. The way we iterate the list allows adding more elements after
+ * the current while iterating. The process stops when we reach the end of the
+ * list. Found roots are added to the roots list.
+ *
+ * returns 0 on success, < 0 on error.
+ */
+int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
+                               struct btrfs_fs_info *fs_info, u64 bytenr,
+                               u64 num_bytes, u64 seq, struct ulist **roots)
+{
+       struct ulist *tmp;
+       struct ulist_node *node = NULL;
+       int ret;
+
+       tmp = ulist_alloc(GFP_NOFS);
+       if (!tmp)
+               return -ENOMEM;
+       *roots = ulist_alloc(GFP_NOFS);
+       if (!*roots) {
+               ulist_free(tmp);
+               return -ENOMEM;
+       }
+
+       while (1) {
+               ret = find_parent_nodes(trans, fs_info, bytenr, seq,
+                                       tmp, *roots);
+               if (ret < 0 && ret != -ENOENT) {
+                       ulist_free(tmp);
+                       ulist_free(*roots);
+                       return ret;
+               }
+               node = ulist_next(tmp, node);
+               if (!node)
+                       break;
+               bytenr = node->val;
+       }
+
+       ulist_free(tmp);
+       return 0;
+}
+
 
 static int __inode_info(u64 inum, u64 ioff, u8 key_type,
                        struct btrfs_root *fs_root, struct btrfs_path *path,
@@ -181,8 +952,11 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
        btrfs_item_key_to_cpu(path->nodes[0], found_key, path->slots[0]);
        if (found_key->type != BTRFS_EXTENT_ITEM_KEY ||
            found_key->objectid > logical ||
-           found_key->objectid + found_key->offset <= logical)
+           found_key->objectid + found_key->offset <= logical) {
+               pr_debug("logical %llu is not within any extent\n",
+                        (unsigned long long)logical);
                return -ENOENT;
+       }
 
        eb = path->nodes[0];
        item_size = btrfs_item_size_nr(eb, path->slots[0]);
@@ -191,6 +965,13 @@ int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
        ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
        flags = btrfs_extent_flags(eb, ei);
 
+       pr_debug("logical %llu is at position %llu within the extent (%llu "
+                "EXTENT_ITEM %llu) flags %#llx size %u\n",
+                (unsigned long long)logical,
+                (unsigned long long)(logical - found_key->objectid),
+                (unsigned long long)found_key->objectid,
+                (unsigned long long)found_key->offset,
+                (unsigned long long)flags, item_size);
        if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
                return BTRFS_EXTENT_FLAG_TREE_BLOCK;
        if (flags & BTRFS_EXTENT_FLAG_DATA)
@@ -287,128 +1068,11 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
        return 0;
 }
 
-static int __data_list_add(struct list_head *head, u64 inum,
-                               u64 extent_data_item_offset, u64 root)
-{
-       struct __data_ref *ref;
-
-       ref = kmalloc(sizeof(*ref), GFP_NOFS);
-       if (!ref)
-               return -ENOMEM;
-
-       ref->inum = inum;
-       ref->extent_data_item_offset = extent_data_item_offset;
-       ref->root = root;
-       list_add_tail(&ref->list, head);
-
-       return 0;
-}
-
-static int __data_list_add_eb(struct list_head *head, struct extent_buffer *eb,
-                               struct btrfs_extent_data_ref *dref)
-{
-       return __data_list_add(head, btrfs_extent_data_ref_objectid(eb, dref),
-                               btrfs_extent_data_ref_offset(eb, dref),
-                               btrfs_extent_data_ref_root(eb, dref));
-}
-
-static int __shared_list_add(struct list_head *head, u64 disk_byte)
-{
-       struct __shared_ref *ref;
-
-       ref = kmalloc(sizeof(*ref), GFP_NOFS);
-       if (!ref)
-               return -ENOMEM;
-
-       ref->disk_byte = disk_byte;
-       list_add_tail(&ref->list, head);
-
-       return 0;
-}
-
-static int __iter_shared_inline_ref_inodes(struct btrfs_fs_info *fs_info,
-                                          u64 logical, u64 inum,
-                                          u64 extent_data_item_offset,
-                                          u64 extent_offset,
-                                          struct btrfs_path *path,
-                                          struct list_head *data_refs,
-                                          iterate_extent_inodes_t *iterate,
-                                          void *ctx)
-{
-       u64 ref_root;
-       u32 item_size;
-       struct btrfs_key key;
-       struct extent_buffer *eb;
-       struct btrfs_extent_item *ei;
-       struct btrfs_extent_inline_ref *eiref;
-       struct __data_ref *ref;
-       int ret;
-       int type;
-       int last;
-       unsigned long ptr = 0;
-
-       WARN_ON(!list_empty(data_refs));
-       ret = extent_from_logical(fs_info, logical, path, &key);
-       if (ret & BTRFS_EXTENT_FLAG_DATA)
-               ret = -EIO;
-       if (ret < 0)
-               goto out;
-
-       eb = path->nodes[0];
-       ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
-       item_size = btrfs_item_size_nr(eb, path->slots[0]);
-
-       ret = 0;
-       ref_root = 0;
-       /*
-        * as done in iterate_extent_inodes, we first build a list of refs to
-        * iterate, then free the path and then iterate them to avoid deadlocks.
-        */
-       do {
-               last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
-                                               &eiref, &type);
-               if (last < 0) {
-                       ret = last;
-                       goto out;
-               }
-               if (type == BTRFS_TREE_BLOCK_REF_KEY ||
-                   type == BTRFS_SHARED_BLOCK_REF_KEY) {
-                       ref_root = btrfs_extent_inline_ref_offset(eb, eiref);
-                       ret = __data_list_add(data_refs, inum,
-                                               extent_data_item_offset,
-                                               ref_root);
-               }
-       } while (!ret && !last);
-
-       btrfs_release_path(path);
-
-       if (ref_root == 0) {
-               printk(KERN_ERR "btrfs: failed to find tree block ref "
-                       "for shared data backref %llu\n", logical);
-               WARN_ON(1);
-               ret = -EIO;
-       }
-
-out:
-       while (!list_empty(data_refs)) {
-               ref = list_first_entry(data_refs, struct __data_ref, list);
-               list_del(&ref->list);
-               if (!ret)
-                       ret = iterate(ref->inum, extent_offset +
-                                       ref->extent_data_item_offset,
-                                       ref->root, ctx);
-               kfree(ref);
-       }
-
-       return ret;
-}
-
-static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
-                                   u64 logical, u64 orig_extent_item_objectid,
-                                   u64 extent_offset, struct btrfs_path *path,
-                                   struct list_head *data_refs,
-                                   iterate_extent_inodes_t *iterate,
-                                   void *ctx)
+static int iterate_leaf_refs(struct btrfs_fs_info *fs_info,
+                               struct btrfs_path *path, u64 logical,
+                               u64 orig_extent_item_objectid,
+                               u64 extent_item_pos, u64 root,
+                               iterate_extent_inodes_t *iterate, void *ctx)
 {
        u64 disk_byte;
        struct btrfs_key key;
@@ -416,8 +1080,10 @@ static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
        struct extent_buffer *eb;
        int slot;
        int nritems;
-       int ret;
-       int found = 0;
+       int ret = 0;
+       int extent_type;
+       u64 data_offset;
+       u64 data_len;
 
        eb = read_tree_block(fs_info->tree_root, logical,
                                fs_info->tree_root->leafsize, 0);
@@ -435,149 +1101,99 @@ static int __iter_shared_inline_ref(struct btrfs_fs_info *fs_info,
                if (key.type != BTRFS_EXTENT_DATA_KEY)
                        continue;
                fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
-               if (!fi) {
-                       free_extent_buffer(eb);
-                       return -EIO;
-               }
+               extent_type = btrfs_file_extent_type(eb, fi);
+               if (extent_type == BTRFS_FILE_EXTENT_INLINE)
+                       continue;
+               /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
                disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
-               if (disk_byte != orig_extent_item_objectid) {
-                       if (found)
-                               break;
-                       else
-                               continue;
-               }
-               ++found;
-               ret = __iter_shared_inline_ref_inodes(fs_info, logical,
-                                                       key.objectid,
-                                                       key.offset,
-                                                       extent_offset, path,
-                                                       data_refs,
-                                                       iterate, ctx);
-               if (ret)
-                       break;
-       }
+               if (disk_byte != orig_extent_item_objectid)
+                       continue;
 
-       if (!found) {
-               printk(KERN_ERR "btrfs: failed to follow shared data backref "
-                       "to parent %llu\n", logical);
-               WARN_ON(1);
-               ret = -EIO;
+               data_offset = btrfs_file_extent_offset(eb, fi);
+               data_len = btrfs_file_extent_num_bytes(eb, fi);
+
+               if (extent_item_pos < data_offset ||
+                   extent_item_pos >= data_offset + data_len)
+                       continue;
+
+               pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
+                               "root %llu\n", orig_extent_item_objectid,
+                               key.objectid, key.offset, root);
+               ret = iterate(key.objectid,
+                               key.offset + (extent_item_pos - data_offset),
+                               root, ctx);
+               if (ret) {
+                       pr_debug("stopping iteration because ret=%d\n", ret);
+                       break;
+               }
        }
 
        free_extent_buffer(eb);
+
        return ret;
 }
 
 /*
  * calls iterate() for every inode that references the extent identified by
- * the given parameters. will use the path given as a parameter and return it
- * released.
+ * the given parameters.
  * when the iterator function returns a non-zero value, iteration stops.
+ * path is guaranteed to be in released state when iterate() is called.
  */
 int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
                                struct btrfs_path *path,
-                               u64 extent_item_objectid,
-                               u64 extent_offset,
+                               u64 extent_item_objectid, u64 extent_item_pos,
                                iterate_extent_inodes_t *iterate, void *ctx)
 {
-       unsigned long ptr = 0;
-       int last;
        int ret;
-       int type;
-       u64 logical;
-       u32 item_size;
-       struct btrfs_extent_inline_ref *eiref;
-       struct btrfs_extent_data_ref *dref;
-       struct extent_buffer *eb;
-       struct btrfs_extent_item *ei;
-       struct btrfs_key key;
        struct list_head data_refs = LIST_HEAD_INIT(data_refs);
        struct list_head shared_refs = LIST_HEAD_INIT(shared_refs);
-       struct __data_ref *ref_d;
-       struct __shared_ref *ref_s;
+       struct btrfs_trans_handle *trans;
+       struct ulist *refs;
+       struct ulist *roots;
+       struct ulist_node *ref_node = NULL;
+       struct ulist_node *root_node = NULL;
+       struct seq_list seq_elem;
+       struct btrfs_delayed_ref_root *delayed_refs;
+
+       trans = btrfs_join_transaction(fs_info->extent_root);
+       if (IS_ERR(trans))
+               return PTR_ERR(trans);
+
+       pr_debug("resolving all inodes for extent %llu\n",
+                       extent_item_objectid);
+
+       delayed_refs = &trans->transaction->delayed_refs;
+       spin_lock(&delayed_refs->lock);
+       btrfs_get_delayed_seq(delayed_refs, &seq_elem);
+       spin_unlock(&delayed_refs->lock);
+
+       ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
+                                  extent_item_pos, seq_elem.seq,
+                                  &refs);
 
-       eb = path->nodes[0];
-       ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
-       item_size = btrfs_item_size_nr(eb, path->slots[0]);
-
-       /* first we iterate the inline refs, ... */
-       do {
-               last = __get_extent_inline_ref(&ptr, eb, ei, item_size,
-                                               &eiref, &type);
-               if (last == -ENOENT) {
-                       ret = 0;
-                       break;
-               }
-               if (last < 0) {
-                       ret = last;
-                       break;
-               }
-
-               if (type == BTRFS_EXTENT_DATA_REF_KEY) {
-                       dref = (struct btrfs_extent_data_ref *)(&eiref->offset);
-                       ret = __data_list_add_eb(&data_refs, eb, dref);
-               } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
-                       logical = btrfs_extent_inline_ref_offset(eb, eiref);
-                       ret = __shared_list_add(&shared_refs, logical);
-               }
-       } while (!ret && !last);
+       if (ret)
+               goto out;
 
-       /* ... then we proceed to in-tree references and ... */
-       while (!ret) {
-               ++path->slots[0];
-               if (path->slots[0] > btrfs_header_nritems(eb)) {
-                       ret = btrfs_next_leaf(fs_info->extent_root, path);
-                       if (ret) {
-                               if (ret == 1)
-                                       ret = 0; /* we're done */
-                               break;
-                       }
-                       eb = path->nodes[0];
-               }
-               btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
-               if (key.objectid != extent_item_objectid)
+       while (!ret && (ref_node = ulist_next(refs, ref_node))) {
+               ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, -1,
+                                               seq_elem.seq, &roots);
+               if (ret)
                        break;
-               if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
-                       dref = btrfs_item_ptr(eb, path->slots[0],
-                                               struct btrfs_extent_data_ref);
-                       ret = __data_list_add_eb(&data_refs, eb, dref);
-               } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
-                       ret = __shared_list_add(&shared_refs, key.offset);
+               while (!ret && (root_node = ulist_next(roots, root_node))) {
+                       pr_debug("root %llu references leaf %llu\n",
+                                       root_node->val, ref_node->val);
+                       ret = iterate_leaf_refs(fs_info, path, ref_node->val,
+                                               extent_item_objectid,
+                                               extent_item_pos, root_node->val,
+                                               iterate, ctx);
                }
        }
 
-       btrfs_release_path(path);
-
-       /*
-        * ... only at the very end we can process the refs we found. this is
-        * because the iterator function we call is allowed to make tree lookups
-        * and we have to avoid deadlocks. additionally, we need more tree
-        * lookups ourselves for shared data refs.
-        */
-       while (!list_empty(&data_refs)) {
-               ref_d = list_first_entry(&data_refs, struct __data_ref, list);
-               list_del(&ref_d->list);
-               if (!ret)
-                       ret = iterate(ref_d->inum, extent_offset +
-                                       ref_d->extent_data_item_offset,
-                                       ref_d->root, ctx);
-               kfree(ref_d);
-       }
-
-       while (!list_empty(&shared_refs)) {
-               ref_s = list_first_entry(&shared_refs, struct __shared_ref,
-                                       list);
-               list_del(&ref_s->list);
-               if (!ret)
-                       ret = __iter_shared_inline_ref(fs_info,
-                                                       ref_s->disk_byte,
-                                                       extent_item_objectid,
-                                                       extent_offset, path,
-                                                       &data_refs,
-                                                       iterate, ctx);
-               kfree(ref_s);
-       }
-
+       ulist_free(refs);
+       ulist_free(roots);
+out:
+       btrfs_put_delayed_seq(delayed_refs, &seq_elem);
+       btrfs_end_transaction(trans, fs_info->extent_root);
        return ret;
 }
 
@@ -586,19 +1202,20 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
                                iterate_extent_inodes_t *iterate, void *ctx)
 {
        int ret;
-       u64 offset;
+       u64 extent_item_pos;
        struct btrfs_key found_key;
 
        ret = extent_from_logical(fs_info, logical, path,
                                        &found_key);
+       btrfs_release_path(path);
        if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
                ret = -EINVAL;
        if (ret < 0)
                return ret;
 
-       offset = logical - found_key.objectid;
+       extent_item_pos = logical - found_key.objectid;
        ret = iterate_extent_inodes(fs_info, path, found_key.objectid,
-                                       offset, iterate, ctx);
+                                       extent_item_pos, iterate, ctx);
 
        return ret;
 }
@@ -643,6 +1260,10 @@ static int iterate_irefs(u64 inum, struct btrfs_root *fs_root,
                for (cur = 0; cur < btrfs_item_size(eb, item); cur += len) {
                        name_len = btrfs_inode_ref_name_len(eb, iref);
                        /* path must be released before calling iterate()! */
+                       pr_debug("following ref at offset %u for inode %llu in "
+                                "tree %llu\n", cur,
+                                (unsigned long long)found_key.objectid,
+                                (unsigned long long)fs_root->objectid);
                        ret = iterate(parent, iref, eb, ctx);
                        if (ret) {
                                free_extent_buffer(eb);
@@ -683,10 +1304,14 @@ static int inode_to_path(u64 inum, struct btrfs_inode_ref *iref,
                return PTR_ERR(fspath);
 
        if (fspath > fspath_min) {
+               pr_debug("path resolved: %s\n", fspath);
                ipath->fspath->val[i] = (u64)(unsigned long)fspath;
                ++ipath->fspath->elem_cnt;
                ipath->fspath->bytes_left = fspath - fspath_min;
        } else {
+               pr_debug("missed path, not enough space. missing bytes: %lu, "
+                        "constructed so far: %s\n",
+                        (unsigned long)(fspath_min - fspath), fspath_min);
                ++ipath->fspath->elem_missed;
                ipath->fspath->bytes_missing += fspath_min - fspath;
                ipath->fspath->bytes_left = 0;
index 92618837cb8f94a3a799ea3d08093d9b33aab336..d00dfa9ca9342c96f5057af09fb06418cd943cb6 100644 (file)
@@ -20,6 +20,7 @@
 #define __BTRFS_BACKREF__
 
 #include "ioctl.h"
+#include "ulist.h"
 
 struct inode_fs_paths {
        struct btrfs_path               *btrfs_path;
@@ -54,6 +55,10 @@ int iterate_inodes_from_logical(u64 logical, struct btrfs_fs_info *fs_info,
 
 int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
 
+int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
+                               struct btrfs_fs_info *fs_info, u64 bytenr,
+                               u64 num_bytes, u64 seq, struct ulist **roots);
+
 struct btrfs_data_container *init_data_container(u32 total_bytes);
 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
                                        struct btrfs_path *path);
index 634608d2a6d03b5d8741e573de0b142ff8cb6310..9b9b15fd5204347c5ef2931fb186af679cb0d369 100644 (file)
@@ -51,6 +51,9 @@ struct btrfs_inode {
        /* held while logging the inode in tree-log.c */
        struct mutex log_mutex;
 
+       /* held while doing delalloc reservations */
+       struct mutex delalloc_mutex;
+
        /* used to order data wrt metadata */
        struct btrfs_ordered_inode_tree ordered_tree;
 
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
new file mode 100644 (file)
index 0000000..b669a7d
--- /dev/null
@@ -0,0 +1,3069 @@
+/*
+ * Copyright (C) STRATO AG 2011.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+/*
+ * This module can be used to catch cases when the btrfs kernel
+ * code executes write requests to the disk that bring the file
+ * system in an inconsistent state. In such a state, a power-loss
+ * or kernel panic event would cause that the data on disk is
+ * lost or at least damaged.
+ *
+ * Code is added that examines all block write requests during
+ * runtime (including writes of the super block). Three rules
+ * are verified and an error is printed on violation of the
+ * rules:
+ * 1. It is not allowed to write a disk block which is
+ *    currently referenced by the super block (either directly
+ *    or indirectly).
+ * 2. When a super block is written, it is verified that all
+ *    referenced (directly or indirectly) blocks fulfill the
+ *    following requirements:
+ *    2a. All referenced blocks have either been present when
+ *        the file system was mounted, (i.e., they have been
+ *        referenced by the super block) or they have been
+ *        written since then and the write completion callback
+ *        was called and a FLUSH request to the device where
+ *        these blocks are located was received and completed.
+ *    2b. All referenced blocks need to have a generation
+ *        number which is equal to the parent's number.
+ *
+ * One issue that was found using this module was that the log
+ * tree on disk became temporarily corrupted because disk blocks
+ * that had been in use for the log tree had been freed and
+ * reused too early, while being referenced by the written super
+ * block.
+ *
+ * The search term in the kernel log that can be used to filter
+ * on the existence of detected integrity issues is
+ * "btrfs: attempt".
+ *
+ * The integrity check is enabled via mount options. These
+ * mount options are only supported if the integrity check
+ * tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY.
+ *
+ * Example #1, apply integrity checks to all metadata:
+ * mount /dev/sdb1 /mnt -o check_int
+ *
+ * Example #2, apply integrity checks to all metadata and
+ * to data extents:
+ * mount /dev/sdb1 /mnt -o check_int_data
+ *
+ * Example #3, apply integrity checks to all metadata and dump
+ * the tree that the super block references to kernel messages
+ * each time after a super block was written:
+ * mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263
+ *
+ * If the integrity check tool is included and activated in
+ * the mount options, plenty of kernel memory is used, and
+ * plenty of additional CPU cycles are spent. Enabling this
+ * functionality is not intended for normal use. In most
+ * cases, unless you are a btrfs developer who needs to verify
+ * the integrity of (super)-block write requests, do not
+ * enable the config option BTRFS_FS_CHECK_INTEGRITY to
+ * include and compile the integrity check tool.
+ */
+
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/buffer_head.h>
+#include <linux/mutex.h>
+#include <linux/crc32c.h>
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include "ctree.h"
+#include "disk-io.h"
+#include "transaction.h"
+#include "extent_io.h"
+#include "disk-io.h"
+#include "volumes.h"
+#include "print-tree.h"
+#include "locking.h"
+#include "check-integrity.h"
+
+#define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000
+#define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000
+#define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100
+#define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051
+#define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807
+#define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530
+#define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300
+#define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6)   /* in characters,
+                                                        * excluding " [...]" */
+#define BTRFSIC_BLOCK_SIZE PAGE_SIZE
+
+#define BTRFSIC_GENERATION_UNKNOWN ((u64)-1)
+
+/*
+ * The definition of the bitmask fields for the print_mask.
+ * They are specified with the mount option check_integrity_print_mask.
+ */
+#define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE                    0x00000001
+#define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION                0x00000002
+#define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE                 0x00000004
+#define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE                        0x00000008
+#define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH                       0x00000010
+#define BTRFSIC_PRINT_MASK_END_IO_BIO_BH                       0x00000020
+#define BTRFSIC_PRINT_MASK_VERBOSE                             0x00000040
+#define BTRFSIC_PRINT_MASK_VERY_VERBOSE                                0x00000080
+#define BTRFSIC_PRINT_MASK_INITIAL_TREE                                0x00000100
+#define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES                   0x00000200
+#define BTRFSIC_PRINT_MASK_INITIAL_DATABASE                    0x00000400
+#define BTRFSIC_PRINT_MASK_NUM_COPIES                          0x00000800
+#define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS               0x00001000
+
+struct btrfsic_dev_state;
+struct btrfsic_state;
+
+struct btrfsic_block {
+       u32 magic_num;          /* only used for debug purposes */
+       unsigned int is_metadata:1;     /* if it is meta-data, not data-data */
+       unsigned int is_superblock:1;   /* if it is one of the superblocks */
+       unsigned int is_iodone:1;       /* if is done by lower subsystem */
+       unsigned int iodone_w_error:1;  /* error was indicated to endio */
+       unsigned int never_written:1;   /* block was added because it was
+                                        * referenced, not because it was
+                                        * written */
+       unsigned int mirror_num:2;      /* large enough to hold
+                                        * BTRFS_SUPER_MIRROR_MAX */
+       struct btrfsic_dev_state *dev_state;
+       u64 dev_bytenr;         /* key, physical byte num on disk */
+       u64 logical_bytenr;     /* logical byte num on disk */
+       u64 generation;
+       struct btrfs_disk_key disk_key; /* extra info to print in case of
+                                        * issues, will not always be correct */
+       struct list_head collision_resolving_node;      /* list node */
+       struct list_head all_blocks_node;       /* list node */
+
+       /* the following two lists contain block_link items */
+       struct list_head ref_to_list;   /* list */
+       struct list_head ref_from_list; /* list */
+       struct btrfsic_block *next_in_same_bio;
+       void *orig_bio_bh_private;
+       union {
+               bio_end_io_t *bio;
+               bh_end_io_t *bh;
+       } orig_bio_bh_end_io;
+       int submit_bio_bh_rw;
+       u64 flush_gen; /* only valid if !never_written */
+};
+
+/*
+ * Elements of this type are allocated dynamically and required because
+ * each block object can refer to and can be ref from multiple blocks.
+ * The key to lookup them in the hashtable is the dev_bytenr of
+ * the block ref to plus the one from the block refered from.
+ * The fact that they are searchable via a hashtable and that a
+ * ref_cnt is maintained is not required for the btrfs integrity
+ * check algorithm itself, it is only used to make the output more
+ * beautiful in case that an error is detected (an error is defined
+ * as a write operation to a block while that block is still referenced).
+ */
+struct btrfsic_block_link {
+       u32 magic_num;          /* only used for debug purposes */
+       u32 ref_cnt;
+       struct list_head node_ref_to;   /* list node */
+       struct list_head node_ref_from; /* list node */
+       struct list_head collision_resolving_node;      /* list node */
+       struct btrfsic_block *block_ref_to;
+       struct btrfsic_block *block_ref_from;
+       u64 parent_generation;
+};
+
+struct btrfsic_dev_state {
+       u32 magic_num;          /* only used for debug purposes */
+       struct block_device *bdev;
+       struct btrfsic_state *state;
+       struct list_head collision_resolving_node;      /* list node */
+       struct btrfsic_block dummy_block_for_bio_bh_flush;
+       u64 last_flush_gen;
+       char name[BDEVNAME_SIZE];
+};
+
+struct btrfsic_block_hashtable {
+       struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE];
+};
+
+struct btrfsic_block_link_hashtable {
+       struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE];
+};
+
+struct btrfsic_dev_state_hashtable {
+       struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE];
+};
+
+struct btrfsic_block_data_ctx {
+       u64 start;              /* virtual bytenr */
+       u64 dev_bytenr;         /* physical bytenr on device */
+       u32 len;
+       struct btrfsic_dev_state *dev;
+       char *data;
+       struct buffer_head *bh; /* do not use if set to NULL */
+};
+
+/* This structure is used to implement recursion without occupying
+ * any stack space, refer to btrfsic_process_metablock() */
+struct btrfsic_stack_frame {
+       u32 magic;
+       u32 nr;
+       int error;
+       int i;
+       int limit_nesting;
+       int num_copies;
+       int mirror_num;
+       struct btrfsic_block *block;
+       struct btrfsic_block_data_ctx *block_ctx;
+       struct btrfsic_block *next_block;
+       struct btrfsic_block_data_ctx next_block_ctx;
+       struct btrfs_header *hdr;
+       struct btrfsic_stack_frame *prev;
+};
+
+/* Some state per mounted filesystem */
+struct btrfsic_state {
+       u32 print_mask;
+       int include_extent_data;
+       int csum_size;
+       struct list_head all_blocks_list;
+       struct btrfsic_block_hashtable block_hashtable;
+       struct btrfsic_block_link_hashtable block_link_hashtable;
+       struct btrfs_root *root;
+       u64 max_superblock_generation;
+       struct btrfsic_block *latest_superblock;
+};
+
+static void btrfsic_block_init(struct btrfsic_block *b);
+static struct btrfsic_block *btrfsic_block_alloc(void);
+static void btrfsic_block_free(struct btrfsic_block *b);
+static void btrfsic_block_link_init(struct btrfsic_block_link *n);
+static struct btrfsic_block_link *btrfsic_block_link_alloc(void);
+static void btrfsic_block_link_free(struct btrfsic_block_link *n);
+static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds);
+static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void);
+static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds);
+static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h);
+static void btrfsic_block_hashtable_add(struct btrfsic_block *b,
+                                       struct btrfsic_block_hashtable *h);
+static void btrfsic_block_hashtable_remove(struct btrfsic_block *b);
+static struct btrfsic_block *btrfsic_block_hashtable_lookup(
+               struct block_device *bdev,
+               u64 dev_bytenr,
+               struct btrfsic_block_hashtable *h);
+static void btrfsic_block_link_hashtable_init(
+               struct btrfsic_block_link_hashtable *h);
+static void btrfsic_block_link_hashtable_add(
+               struct btrfsic_block_link *l,
+               struct btrfsic_block_link_hashtable *h);
+static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l);
+static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
+               struct block_device *bdev_ref_to,
+               u64 dev_bytenr_ref_to,
+               struct block_device *bdev_ref_from,
+               u64 dev_bytenr_ref_from,
+               struct btrfsic_block_link_hashtable *h);
+static void btrfsic_dev_state_hashtable_init(
+               struct btrfsic_dev_state_hashtable *h);
+static void btrfsic_dev_state_hashtable_add(
+               struct btrfsic_dev_state *ds,
+               struct btrfsic_dev_state_hashtable *h);
+static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds);
+static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
+               struct block_device *bdev,
+               struct btrfsic_dev_state_hashtable *h);
+static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void);
+static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf);
+static int btrfsic_process_superblock(struct btrfsic_state *state,
+                                     struct btrfs_fs_devices *fs_devices);
+static int btrfsic_process_metablock(struct btrfsic_state *state,
+                                    struct btrfsic_block *block,
+                                    struct btrfsic_block_data_ctx *block_ctx,
+                                    struct btrfs_header *hdr,
+                                    int limit_nesting, int force_iodone_flag);
+static int btrfsic_create_link_to_next_block(
+               struct btrfsic_state *state,
+               struct btrfsic_block *block,
+               struct btrfsic_block_data_ctx
+               *block_ctx, u64 next_bytenr,
+               int limit_nesting,
+               struct btrfsic_block_data_ctx *next_block_ctx,
+               struct btrfsic_block **next_blockp,
+               int force_iodone_flag,
+               int *num_copiesp, int *mirror_nump,
+               struct btrfs_disk_key *disk_key,
+               u64 parent_generation);
+static int btrfsic_handle_extent_data(struct btrfsic_state *state,
+                                     struct btrfsic_block *block,
+                                     struct btrfsic_block_data_ctx *block_ctx,
+                                     u32 item_offset, int force_iodone_flag);
+static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
+                            struct btrfsic_block_data_ctx *block_ctx_out,
+                            int mirror_num);
+static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
+                                 u32 len, struct block_device *bdev,
+                                 struct btrfsic_block_data_ctx *block_ctx_out);
+static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
+static int btrfsic_read_block(struct btrfsic_state *state,
+                             struct btrfsic_block_data_ctx *block_ctx);
+static void btrfsic_dump_database(struct btrfsic_state *state);
+static int btrfsic_test_for_metadata(struct btrfsic_state *state,
+                                    const u8 *data, unsigned int size);
+static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
+                                         u64 dev_bytenr, u8 *mapped_data,
+                                         unsigned int len, struct bio *bio,
+                                         int *bio_is_patched,
+                                         struct buffer_head *bh,
+                                         int submit_bio_bh_rw);
+static int btrfsic_process_written_superblock(
+               struct btrfsic_state *state,
+               struct btrfsic_block *const block,
+               struct btrfs_super_block *const super_hdr);
+static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status);
+static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate);
+static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state,
+                                             const struct btrfsic_block *block,
+                                             int recursion_level);
+static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
+                                       struct btrfsic_block *const block,
+                                       int recursion_level);
+static void btrfsic_print_add_link(const struct btrfsic_state *state,
+                                  const struct btrfsic_block_link *l);
+static void btrfsic_print_rem_link(const struct btrfsic_state *state,
+                                  const struct btrfsic_block_link *l);
+static char btrfsic_get_block_type(const struct btrfsic_state *state,
+                                  const struct btrfsic_block *block);
+static void btrfsic_dump_tree(const struct btrfsic_state *state);
+static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
+                                 const struct btrfsic_block *block,
+                                 int indent_level);
+static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
+               struct btrfsic_state *state,
+               struct btrfsic_block_data_ctx *next_block_ctx,
+               struct btrfsic_block *next_block,
+               struct btrfsic_block *from_block,
+               u64 parent_generation);
+static struct btrfsic_block *btrfsic_block_lookup_or_add(
+               struct btrfsic_state *state,
+               struct btrfsic_block_data_ctx *block_ctx,
+               const char *additional_string,
+               int is_metadata,
+               int is_iodone,
+               int never_written,
+               int mirror_num,
+               int *was_created);
+static int btrfsic_process_superblock_dev_mirror(
+               struct btrfsic_state *state,
+               struct btrfsic_dev_state *dev_state,
+               struct btrfs_device *device,
+               int superblock_mirror_num,
+               struct btrfsic_dev_state **selected_dev_state,
+               struct btrfs_super_block *selected_super);
+static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
+               struct block_device *bdev);
+static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
+                                          u64 bytenr,
+                                          struct btrfsic_dev_state *dev_state,
+                                          u64 dev_bytenr, char *data);
+
+static struct mutex btrfsic_mutex;
+static int btrfsic_is_initialized;
+static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable;
+
+
+static void btrfsic_block_init(struct btrfsic_block *b)
+{
+       b->magic_num = BTRFSIC_BLOCK_MAGIC_NUMBER;
+       b->dev_state = NULL;
+       b->dev_bytenr = 0;
+       b->logical_bytenr = 0;
+       b->generation = BTRFSIC_GENERATION_UNKNOWN;
+       b->disk_key.objectid = 0;
+       b->disk_key.type = 0;
+       b->disk_key.offset = 0;
+       b->is_metadata = 0;
+       b->is_superblock = 0;
+       b->is_iodone = 0;
+       b->iodone_w_error = 0;
+       b->never_written = 0;
+       b->mirror_num = 0;
+       b->next_in_same_bio = NULL;
+       b->orig_bio_bh_private = NULL;
+       b->orig_bio_bh_end_io.bio = NULL;
+       INIT_LIST_HEAD(&b->collision_resolving_node);
+       INIT_LIST_HEAD(&b->all_blocks_node);
+       INIT_LIST_HEAD(&b->ref_to_list);
+       INIT_LIST_HEAD(&b->ref_from_list);
+       b->submit_bio_bh_rw = 0;
+       b->flush_gen = 0;
+}
+
+static struct btrfsic_block *btrfsic_block_alloc(void)
+{
+       struct btrfsic_block *b;
+
+       b = kzalloc(sizeof(*b), GFP_NOFS);
+       if (NULL != b)
+               btrfsic_block_init(b);
+
+       return b;
+}
+
+static void btrfsic_block_free(struct btrfsic_block *b)
+{
+       BUG_ON(!(NULL == b || BTRFSIC_BLOCK_MAGIC_NUMBER == b->magic_num));
+       kfree(b);
+}
+
+static void btrfsic_block_link_init(struct btrfsic_block_link *l)
+{
+       l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER;
+       l->ref_cnt = 1;
+       INIT_LIST_HEAD(&l->node_ref_to);
+       INIT_LIST_HEAD(&l->node_ref_from);
+       INIT_LIST_HEAD(&l->collision_resolving_node);
+       l->block_ref_to = NULL;
+       l->block_ref_from = NULL;
+}
+
+static struct btrfsic_block_link *btrfsic_block_link_alloc(void)
+{
+       struct btrfsic_block_link *l;
+
+       l = kzalloc(sizeof(*l), GFP_NOFS);
+       if (NULL != l)
+               btrfsic_block_link_init(l);
+
+       return l;
+}
+
+static void btrfsic_block_link_free(struct btrfsic_block_link *l)
+{
+       BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num));
+       kfree(l);
+}
+
+static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds)
+{
+       ds->magic_num = BTRFSIC_DEV2STATE_MAGIC_NUMBER;
+       ds->bdev = NULL;
+       ds->state = NULL;
+       ds->name[0] = '\0';
+       INIT_LIST_HEAD(&ds->collision_resolving_node);
+       ds->last_flush_gen = 0;
+       btrfsic_block_init(&ds->dummy_block_for_bio_bh_flush);
+       ds->dummy_block_for_bio_bh_flush.is_iodone = 1;
+       ds->dummy_block_for_bio_bh_flush.dev_state = ds;
+}
+
+static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void)
+{
+       struct btrfsic_dev_state *ds;
+
+       ds = kzalloc(sizeof(*ds), GFP_NOFS);
+       if (NULL != ds)
+               btrfsic_dev_state_init(ds);
+
+       return ds;
+}
+
+static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds)
+{
+       BUG_ON(!(NULL == ds ||
+                BTRFSIC_DEV2STATE_MAGIC_NUMBER == ds->magic_num));
+       kfree(ds);
+}
+
+static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h)
+{
+       int i;
+
+       for (i = 0; i < BTRFSIC_BLOCK_HASHTABLE_SIZE; i++)
+               INIT_LIST_HEAD(h->table + i);
+}
+
+static void btrfsic_block_hashtable_add(struct btrfsic_block *b,
+                                       struct btrfsic_block_hashtable *h)
+{
+       const unsigned int hashval =
+           (((unsigned int)(b->dev_bytenr >> 16)) ^
+            ((unsigned int)((uintptr_t)b->dev_state->bdev))) &
+            (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
+
+       list_add(&b->collision_resolving_node, h->table + hashval);
+}
+
+static void btrfsic_block_hashtable_remove(struct btrfsic_block *b)
+{
+       list_del(&b->collision_resolving_node);
+}
+
+static struct btrfsic_block *btrfsic_block_hashtable_lookup(
+               struct block_device *bdev,
+               u64 dev_bytenr,
+               struct btrfsic_block_hashtable *h)
+{
+       const unsigned int hashval =
+           (((unsigned int)(dev_bytenr >> 16)) ^
+            ((unsigned int)((uintptr_t)bdev))) &
+            (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
+       struct list_head *elem;
+
+       list_for_each(elem, h->table + hashval) {
+               struct btrfsic_block *const b =
+                   list_entry(elem, struct btrfsic_block,
+                              collision_resolving_node);
+
+               if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
+                       return b;
+       }
+
+       return NULL;
+}
+
+static void btrfsic_block_link_hashtable_init(
+               struct btrfsic_block_link_hashtable *h)
+{
+       int i;
+
+       for (i = 0; i < BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE; i++)
+               INIT_LIST_HEAD(h->table + i);
+}
+
+static void btrfsic_block_link_hashtable_add(
+               struct btrfsic_block_link *l,
+               struct btrfsic_block_link_hashtable *h)
+{
+       const unsigned int hashval =
+           (((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^
+            ((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^
+            ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^
+            ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev)))
+            & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
+
+       BUG_ON(NULL == l->block_ref_to);
+       BUG_ON(NULL == l->block_ref_from);
+       list_add(&l->collision_resolving_node, h->table + hashval);
+}
+
+static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l)
+{
+       list_del(&l->collision_resolving_node);
+}
+
+static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
+               struct block_device *bdev_ref_to,
+               u64 dev_bytenr_ref_to,
+               struct block_device *bdev_ref_from,
+               u64 dev_bytenr_ref_from,
+               struct btrfsic_block_link_hashtable *h)
+{
+       const unsigned int hashval =
+           (((unsigned int)(dev_bytenr_ref_to >> 16)) ^
+            ((unsigned int)(dev_bytenr_ref_from >> 16)) ^
+            ((unsigned int)((uintptr_t)bdev_ref_to)) ^
+            ((unsigned int)((uintptr_t)bdev_ref_from))) &
+            (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
+       struct list_head *elem;
+
+       list_for_each(elem, h->table + hashval) {
+               struct btrfsic_block_link *const l =
+                   list_entry(elem, struct btrfsic_block_link,
+                              collision_resolving_node);
+
+               BUG_ON(NULL == l->block_ref_to);
+               BUG_ON(NULL == l->block_ref_from);
+               if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
+                   l->block_ref_to->dev_bytenr == dev_bytenr_ref_to &&
+                   l->block_ref_from->dev_state->bdev == bdev_ref_from &&
+                   l->block_ref_from->dev_bytenr == dev_bytenr_ref_from)
+                       return l;
+       }
+
+       return NULL;
+}
+
+static void btrfsic_dev_state_hashtable_init(
+               struct btrfsic_dev_state_hashtable *h)
+{
+       int i;
+
+       for (i = 0; i < BTRFSIC_DEV2STATE_HASHTABLE_SIZE; i++)
+               INIT_LIST_HEAD(h->table + i);
+}
+
+static void btrfsic_dev_state_hashtable_add(
+               struct btrfsic_dev_state *ds,
+               struct btrfsic_dev_state_hashtable *h)
+{
+       const unsigned int hashval =
+           (((unsigned int)((uintptr_t)ds->bdev)) &
+            (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
+
+       list_add(&ds->collision_resolving_node, h->table + hashval);
+}
+
+static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds)
+{
+       list_del(&ds->collision_resolving_node);
+}
+
+static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
+               struct block_device *bdev,
+               struct btrfsic_dev_state_hashtable *h)
+{
+       const unsigned int hashval =
+           (((unsigned int)((uintptr_t)bdev)) &
+            (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
+       struct list_head *elem;
+
+       list_for_each(elem, h->table + hashval) {
+               struct btrfsic_dev_state *const ds =
+                   list_entry(elem, struct btrfsic_dev_state,
+                              collision_resolving_node);
+
+               if (ds->bdev == bdev)
+                       return ds;
+       }
+
+       return NULL;
+}
+
+static int btrfsic_process_superblock(struct btrfsic_state *state,
+                                     struct btrfs_fs_devices *fs_devices)
+{
+       int ret;
+       struct btrfs_super_block *selected_super;
+       struct list_head *dev_head = &fs_devices->devices;
+       struct btrfs_device *device;
+       struct btrfsic_dev_state *selected_dev_state = NULL;
+       int pass;
+
+       BUG_ON(NULL == state);
+       selected_super = kmalloc(sizeof(*selected_super), GFP_NOFS);
+       if (NULL == selected_super) {
+               printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+               return -1;
+       }
+
+       list_for_each_entry(device, dev_head, dev_list) {
+               int i;
+               struct btrfsic_dev_state *dev_state;
+
+               if (!device->bdev || !device->name)
+                       continue;
+
+               dev_state = btrfsic_dev_state_lookup(device->bdev);
+               BUG_ON(NULL == dev_state);
+               for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
+                       ret = btrfsic_process_superblock_dev_mirror(
+                                       state, dev_state, device, i,
+                                       &selected_dev_state, selected_super);
+                       if (0 != ret && 0 == i) {
+                               kfree(selected_super);
+                               return ret;
+                       }
+               }
+       }
+
+       if (NULL == state->latest_superblock) {
+               printk(KERN_INFO "btrfsic: no superblock found!\n");
+               kfree(selected_super);
+               return -1;
+       }
+
+       state->csum_size = btrfs_super_csum_size(selected_super);
+
+       for (pass = 0; pass < 3; pass++) {
+               int num_copies;
+               int mirror_num;
+               u64 next_bytenr;
+
+               switch (pass) {
+               case 0:
+                       next_bytenr = btrfs_super_root(selected_super);
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
+                               printk(KERN_INFO "root@%llu\n",
+                                      (unsigned long long)next_bytenr);
+                       break;
+               case 1:
+                       next_bytenr = btrfs_super_chunk_root(selected_super);
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
+                               printk(KERN_INFO "chunk@%llu\n",
+                                      (unsigned long long)next_bytenr);
+                       break;
+               case 2:
+                       next_bytenr = btrfs_super_log_root(selected_super);
+                       if (0 == next_bytenr)
+                               continue;
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
+                               printk(KERN_INFO "log@%llu\n",
+                                      (unsigned long long)next_bytenr);
+                       break;
+               }
+
+               num_copies =
+                   btrfs_num_copies(&state->root->fs_info->mapping_tree,
+                                    next_bytenr, PAGE_SIZE);
+               if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
+                       printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+                              (unsigned long long)next_bytenr, num_copies);
+
+               for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
+                       struct btrfsic_block *next_block;
+                       struct btrfsic_block_data_ctx tmp_next_block_ctx;
+                       struct btrfsic_block_link *l;
+                       struct btrfs_header *hdr;
+
+                       ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+                                               &tmp_next_block_ctx,
+                                               mirror_num);
+                       if (ret) {
+                               printk(KERN_INFO "btrfsic:"
+                                      " btrfsic_map_block(root @%llu,"
+                                      " mirror %d) failed!\n",
+                                      (unsigned long long)next_bytenr,
+                                      mirror_num);
+                               kfree(selected_super);
+                               return -1;
+                       }
+
+                       next_block = btrfsic_block_hashtable_lookup(
+                                       tmp_next_block_ctx.dev->bdev,
+                                       tmp_next_block_ctx.dev_bytenr,
+                                       &state->block_hashtable);
+                       BUG_ON(NULL == next_block);
+
+                       l = btrfsic_block_link_hashtable_lookup(
+                                       tmp_next_block_ctx.dev->bdev,
+                                       tmp_next_block_ctx.dev_bytenr,
+                                       state->latest_superblock->dev_state->
+                                       bdev,
+                                       state->latest_superblock->dev_bytenr,
+                                       &state->block_link_hashtable);
+                       BUG_ON(NULL == l);
+
+                       ret = btrfsic_read_block(state, &tmp_next_block_ctx);
+                       if (ret < (int)BTRFSIC_BLOCK_SIZE) {
+                               printk(KERN_INFO
+                                      "btrfsic: read @logical %llu failed!\n",
+                                      (unsigned long long)
+                                      tmp_next_block_ctx.start);
+                               btrfsic_release_block_ctx(&tmp_next_block_ctx);
+                               kfree(selected_super);
+                               return -1;
+                       }
+
+                       hdr = (struct btrfs_header *)tmp_next_block_ctx.data;
+                       ret = btrfsic_process_metablock(state,
+                                                       next_block,
+                                                       &tmp_next_block_ctx,
+                                                       hdr,
+                                                       BTRFS_MAX_LEVEL + 3, 1);
+                       btrfsic_release_block_ctx(&tmp_next_block_ctx);
+               }
+       }
+
+       kfree(selected_super);
+       return ret;
+}
+
+static int btrfsic_process_superblock_dev_mirror(
+               struct btrfsic_state *state,
+               struct btrfsic_dev_state *dev_state,
+               struct btrfs_device *device,
+               int superblock_mirror_num,
+               struct btrfsic_dev_state **selected_dev_state,
+               struct btrfs_super_block *selected_super)
+{
+       struct btrfs_super_block *super_tmp;
+       u64 dev_bytenr;
+       struct buffer_head *bh;
+       struct btrfsic_block *superblock_tmp;
+       int pass;
+       struct block_device *const superblock_bdev = device->bdev;
+
+       /* super block bytenr is always the unmapped device bytenr */
+       dev_bytenr = btrfs_sb_offset(superblock_mirror_num);
+       bh = __bread(superblock_bdev, dev_bytenr / 4096, 4096);
+       if (NULL == bh)
+               return -1;
+       super_tmp = (struct btrfs_super_block *)
+           (bh->b_data + (dev_bytenr & 4095));
+
+       if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
+           strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC,
+                   sizeof(super_tmp->magic)) ||
+           memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE)) {
+               brelse(bh);
+               return 0;
+       }
+
+       superblock_tmp =
+           btrfsic_block_hashtable_lookup(superblock_bdev,
+                                          dev_bytenr,
+                                          &state->block_hashtable);
+       if (NULL == superblock_tmp) {
+               superblock_tmp = btrfsic_block_alloc();
+               if (NULL == superblock_tmp) {
+                       printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+                       brelse(bh);
+                       return -1;
+               }
+               /* for superblock, only the dev_bytenr makes sense */
+               superblock_tmp->dev_bytenr = dev_bytenr;
+               superblock_tmp->dev_state = dev_state;
+               superblock_tmp->logical_bytenr = dev_bytenr;
+               superblock_tmp->generation = btrfs_super_generation(super_tmp);
+               superblock_tmp->is_metadata = 1;
+               superblock_tmp->is_superblock = 1;
+               superblock_tmp->is_iodone = 1;
+               superblock_tmp->never_written = 0;
+               superblock_tmp->mirror_num = 1 + superblock_mirror_num;
+               if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
+                       printk(KERN_INFO "New initial S-block (bdev %p, %s)"
+                              " @%llu (%s/%llu/%d)\n",
+                              superblock_bdev, device->name,
+                              (unsigned long long)dev_bytenr,
+                              dev_state->name,
+                              (unsigned long long)dev_bytenr,
+                              superblock_mirror_num);
+               list_add(&superblock_tmp->all_blocks_node,
+                        &state->all_blocks_list);
+               btrfsic_block_hashtable_add(superblock_tmp,
+                                           &state->block_hashtable);
+       }
+
+       /* select the one with the highest generation field */
+       if (btrfs_super_generation(super_tmp) >
+           state->max_superblock_generation ||
+           0 == state->max_superblock_generation) {
+               memcpy(selected_super, super_tmp, sizeof(*selected_super));
+               *selected_dev_state = dev_state;
+               state->max_superblock_generation =
+                   btrfs_super_generation(super_tmp);
+               state->latest_superblock = superblock_tmp;
+       }
+
+       for (pass = 0; pass < 3; pass++) {
+               u64 next_bytenr;
+               int num_copies;
+               int mirror_num;
+               const char *additional_string = NULL;
+               struct btrfs_disk_key tmp_disk_key;
+
+               tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY;
+               tmp_disk_key.offset = 0;
+               switch (pass) {
+               case 0:
+                       tmp_disk_key.objectid =
+                           cpu_to_le64(BTRFS_ROOT_TREE_OBJECTID);
+                       additional_string = "initial root ";
+                       next_bytenr = btrfs_super_root(super_tmp);
+                       break;
+               case 1:
+                       tmp_disk_key.objectid =
+                           cpu_to_le64(BTRFS_CHUNK_TREE_OBJECTID);
+                       additional_string = "initial chunk ";
+                       next_bytenr = btrfs_super_chunk_root(super_tmp);
+                       break;
+               case 2:
+                       tmp_disk_key.objectid =
+                           cpu_to_le64(BTRFS_TREE_LOG_OBJECTID);
+                       additional_string = "initial log ";
+                       next_bytenr = btrfs_super_log_root(super_tmp);
+                       if (0 == next_bytenr)
+                               continue;
+                       break;
+               }
+
+               num_copies =
+                   btrfs_num_copies(&state->root->fs_info->mapping_tree,
+                                    next_bytenr, PAGE_SIZE);
+               if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
+                       printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+                              (unsigned long long)next_bytenr, num_copies);
+               for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
+                       struct btrfsic_block *next_block;
+                       struct btrfsic_block_data_ctx tmp_next_block_ctx;
+                       struct btrfsic_block_link *l;
+
+                       if (btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+                                             &tmp_next_block_ctx,
+                                             mirror_num)) {
+                               printk(KERN_INFO "btrfsic: btrfsic_map_block("
+                                      "bytenr @%llu, mirror %d) failed!\n",
+                                      (unsigned long long)next_bytenr,
+                                      mirror_num);
+                               brelse(bh);
+                               return -1;
+                       }
+
+                       next_block = btrfsic_block_lookup_or_add(
+                                       state, &tmp_next_block_ctx,
+                                       additional_string, 1, 1, 0,
+                                       mirror_num, NULL);
+                       if (NULL == next_block) {
+                               btrfsic_release_block_ctx(&tmp_next_block_ctx);
+                               brelse(bh);
+                               return -1;
+                       }
+
+                       next_block->disk_key = tmp_disk_key;
+                       next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
+                       l = btrfsic_block_link_lookup_or_add(
+                                       state, &tmp_next_block_ctx,
+                                       next_block, superblock_tmp,
+                                       BTRFSIC_GENERATION_UNKNOWN);
+                       btrfsic_release_block_ctx(&tmp_next_block_ctx);
+                       if (NULL == l) {
+                               brelse(bh);
+                               return -1;
+                       }
+               }
+       }
+       if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES)
+               btrfsic_dump_tree_sub(state, superblock_tmp, 0);
+
+       brelse(bh);
+       return 0;
+}
+
+static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void)
+{
+       struct btrfsic_stack_frame *sf;
+
+       sf = kzalloc(sizeof(*sf), GFP_NOFS);
+       if (NULL == sf)
+               printk(KERN_INFO "btrfsic: alloc memory failed!\n");
+       else
+               sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER;
+       return sf;
+}
+
+static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf)
+{
+       BUG_ON(!(NULL == sf ||
+                BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER == sf->magic));
+       kfree(sf);
+}
+
+static int btrfsic_process_metablock(
+               struct btrfsic_state *state,
+               struct btrfsic_block *const first_block,
+               struct btrfsic_block_data_ctx *const first_block_ctx,
+               struct btrfs_header *const first_hdr,
+               int first_limit_nesting, int force_iodone_flag)
+{
+       struct btrfsic_stack_frame initial_stack_frame = { 0 };
+       struct btrfsic_stack_frame *sf;
+       struct btrfsic_stack_frame *next_stack;
+
+       sf = &initial_stack_frame;
+       sf->error = 0;
+       sf->i = -1;
+       sf->limit_nesting = first_limit_nesting;
+       sf->block = first_block;
+       sf->block_ctx = first_block_ctx;
+       sf->next_block = NULL;
+       sf->hdr = first_hdr;
+       sf->prev = NULL;
+
+continue_with_new_stack_frame:
+       sf->block->generation = le64_to_cpu(sf->hdr->generation);
+       if (0 == sf->hdr->level) {
+               struct btrfs_leaf *const leafhdr =
+                   (struct btrfs_leaf *)sf->hdr;
+
+               if (-1 == sf->i) {
+                       sf->nr = le32_to_cpu(leafhdr->header.nritems);
+
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO
+                                      "leaf %llu items %d generation %llu"
+                                      " owner %llu\n",
+                                      (unsigned long long)
+                                      sf->block_ctx->start,
+                                      sf->nr,
+                                      (unsigned long long)
+                                      le64_to_cpu(leafhdr->header.generation),
+                                      (unsigned long long)
+                                      le64_to_cpu(leafhdr->header.owner));
+               }
+
+continue_with_current_leaf_stack_frame:
+               if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
+                       sf->i++;
+                       sf->num_copies = 0;
+               }
+
+               if (sf->i < sf->nr) {
+                       struct btrfs_item *disk_item = leafhdr->items + sf->i;
+                       struct btrfs_disk_key *disk_key = &disk_item->key;
+                       u8 type;
+                       const u32 item_offset = le32_to_cpu(disk_item->offset);
+
+                       type = disk_key->type;
+
+                       if (BTRFS_ROOT_ITEM_KEY == type) {
+                               const struct btrfs_root_item *const root_item =
+                                   (struct btrfs_root_item *)
+                                   (sf->block_ctx->data +
+                                    offsetof(struct btrfs_leaf, items) +
+                                    item_offset);
+                               const u64 next_bytenr =
+                                   le64_to_cpu(root_item->bytenr);
+
+                               sf->error =
+                                   btrfsic_create_link_to_next_block(
+                                               state,
+                                               sf->block,
+                                               sf->block_ctx,
+                                               next_bytenr,
+                                               sf->limit_nesting,
+                                               &sf->next_block_ctx,
+                                               &sf->next_block,
+                                               force_iodone_flag,
+                                               &sf->num_copies,
+                                               &sf->mirror_num,
+                                               disk_key,
+                                               le64_to_cpu(root_item->
+                                               generation));
+                               if (sf->error)
+                                       goto one_stack_frame_backwards;
+
+                               if (NULL != sf->next_block) {
+                                       struct btrfs_header *const next_hdr =
+                                           (struct btrfs_header *)
+                                           sf->next_block_ctx.data;
+
+                                       next_stack =
+                                           btrfsic_stack_frame_alloc();
+                                       if (NULL == next_stack) {
+                                               btrfsic_release_block_ctx(
+                                                               &sf->
+                                                               next_block_ctx);
+                                               goto one_stack_frame_backwards;
+                                       }
+
+                                       next_stack->i = -1;
+                                       next_stack->block = sf->next_block;
+                                       next_stack->block_ctx =
+                                           &sf->next_block_ctx;
+                                       next_stack->next_block = NULL;
+                                       next_stack->hdr = next_hdr;
+                                       next_stack->limit_nesting =
+                                           sf->limit_nesting - 1;
+                                       next_stack->prev = sf;
+                                       sf = next_stack;
+                                       goto continue_with_new_stack_frame;
+                               }
+                       } else if (BTRFS_EXTENT_DATA_KEY == type &&
+                                  state->include_extent_data) {
+                               sf->error = btrfsic_handle_extent_data(
+                                               state,
+                                               sf->block,
+                                               sf->block_ctx,
+                                               item_offset,
+                                               force_iodone_flag);
+                               if (sf->error)
+                                       goto one_stack_frame_backwards;
+                       }
+
+                       goto continue_with_current_leaf_stack_frame;
+               }
+       } else {
+               struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr;
+
+               if (-1 == sf->i) {
+                       sf->nr = le32_to_cpu(nodehdr->header.nritems);
+
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO "node %llu level %d items %d"
+                                      " generation %llu owner %llu\n",
+                                      (unsigned long long)
+                                      sf->block_ctx->start,
+                                      nodehdr->header.level, sf->nr,
+                                      (unsigned long long)
+                                      le64_to_cpu(nodehdr->header.generation),
+                                      (unsigned long long)
+                                      le64_to_cpu(nodehdr->header.owner));
+               }
+
+continue_with_current_node_stack_frame:
+               if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) {
+                       sf->i++;
+                       sf->num_copies = 0;
+               }
+
+               if (sf->i < sf->nr) {
+                       struct btrfs_key_ptr *disk_key_ptr =
+                           nodehdr->ptrs + sf->i;
+                       const u64 next_bytenr =
+                           le64_to_cpu(disk_key_ptr->blockptr);
+
+                       sf->error = btrfsic_create_link_to_next_block(
+                                       state,
+                                       sf->block,
+                                       sf->block_ctx,
+                                       next_bytenr,
+                                       sf->limit_nesting,
+                                       &sf->next_block_ctx,
+                                       &sf->next_block,
+                                       force_iodone_flag,
+                                       &sf->num_copies,
+                                       &sf->mirror_num,
+                                       &disk_key_ptr->key,
+                                       le64_to_cpu(disk_key_ptr->generation));
+                       if (sf->error)
+                               goto one_stack_frame_backwards;
+
+                       if (NULL != sf->next_block) {
+                               struct btrfs_header *const next_hdr =
+                                   (struct btrfs_header *)
+                                   sf->next_block_ctx.data;
+
+                               next_stack = btrfsic_stack_frame_alloc();
+                               if (NULL == next_stack)
+                                       goto one_stack_frame_backwards;
+
+                               next_stack->i = -1;
+                               next_stack->block = sf->next_block;
+                               next_stack->block_ctx = &sf->next_block_ctx;
+                               next_stack->next_block = NULL;
+                               next_stack->hdr = next_hdr;
+                               next_stack->limit_nesting =
+                                   sf->limit_nesting - 1;
+                               next_stack->prev = sf;
+                               sf = next_stack;
+                               goto continue_with_new_stack_frame;
+                       }
+
+                       goto continue_with_current_node_stack_frame;
+               }
+       }
+
+one_stack_frame_backwards:
+       if (NULL != sf->prev) {
+               struct btrfsic_stack_frame *const prev = sf->prev;
+
+               /* the one for the initial block is freed in the caller */
+               btrfsic_release_block_ctx(sf->block_ctx);
+
+               if (sf->error) {
+                       prev->error = sf->error;
+                       btrfsic_stack_frame_free(sf);
+                       sf = prev;
+                       goto one_stack_frame_backwards;
+               }
+
+               btrfsic_stack_frame_free(sf);
+               sf = prev;
+               goto continue_with_new_stack_frame;
+       } else {
+               BUG_ON(&initial_stack_frame != sf);
+       }
+
+       return sf->error;
+}
+
+static int btrfsic_create_link_to_next_block(
+               struct btrfsic_state *state,
+               struct btrfsic_block *block,
+               struct btrfsic_block_data_ctx *block_ctx,
+               u64 next_bytenr,
+               int limit_nesting,
+               struct btrfsic_block_data_ctx *next_block_ctx,
+               struct btrfsic_block **next_blockp,
+               int force_iodone_flag,
+               int *num_copiesp, int *mirror_nump,
+               struct btrfs_disk_key *disk_key,
+               u64 parent_generation)
+{
+       struct btrfsic_block *next_block = NULL;
+       int ret;
+       struct btrfsic_block_link *l;
+       int did_alloc_block_link;
+       int block_was_created;
+
+       *next_blockp = NULL;
+       if (0 == *num_copiesp) {
+               *num_copiesp =
+                   btrfs_num_copies(&state->root->fs_info->mapping_tree,
+                                    next_bytenr, PAGE_SIZE);
+               if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
+                       printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+                              (unsigned long long)next_bytenr, *num_copiesp);
+               *mirror_nump = 1;
+       }
+
+       if (*mirror_nump > *num_copiesp)
+               return 0;
+
+       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+               printk(KERN_INFO
+                      "btrfsic_create_link_to_next_block(mirror_num=%d)\n",
+                      *mirror_nump);
+       ret = btrfsic_map_block(state, next_bytenr,
+                               BTRFSIC_BLOCK_SIZE,
+                               next_block_ctx, *mirror_nump);
+       if (ret) {
+               printk(KERN_INFO
+                      "btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n",
+                      (unsigned long long)next_bytenr, *mirror_nump);
+               btrfsic_release_block_ctx(next_block_ctx);
+               *next_blockp = NULL;
+               return -1;
+       }
+
+       next_block = btrfsic_block_lookup_or_add(state,
+                                                next_block_ctx, "referenced ",
+                                                1, force_iodone_flag,
+                                                !force_iodone_flag,
+                                                *mirror_nump,
+                                                &block_was_created);
+       if (NULL == next_block) {
+               btrfsic_release_block_ctx(next_block_ctx);
+               *next_blockp = NULL;
+               return -1;
+       }
+       if (block_was_created) {
+               l = NULL;
+               next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
+       } else {
+               if (next_block->logical_bytenr != next_bytenr &&
+                   !(!next_block->is_metadata &&
+                     0 == next_block->logical_bytenr)) {
+                       printk(KERN_INFO
+                              "Referenced block @%llu (%s/%llu/%d)"
+                              " found in hash table, %c,"
+                              " bytenr mismatch (!= stored %llu).\n",
+                              (unsigned long long)next_bytenr,
+                              next_block_ctx->dev->name,
+                              (unsigned long long)next_block_ctx->dev_bytenr,
+                              *mirror_nump,
+                              btrfsic_get_block_type(state, next_block),
+                              (unsigned long long)next_block->logical_bytenr);
+               } else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "Referenced block @%llu (%s/%llu/%d)"
+                              " found in hash table, %c.\n",
+                              (unsigned long long)next_bytenr,
+                              next_block_ctx->dev->name,
+                              (unsigned long long)next_block_ctx->dev_bytenr,
+                              *mirror_nump,
+                              btrfsic_get_block_type(state, next_block));
+               next_block->logical_bytenr = next_bytenr;
+
+               next_block->mirror_num = *mirror_nump;
+               l = btrfsic_block_link_hashtable_lookup(
+                               next_block_ctx->dev->bdev,
+                               next_block_ctx->dev_bytenr,
+                               block_ctx->dev->bdev,
+                               block_ctx->dev_bytenr,
+                               &state->block_link_hashtable);
+       }
+
+       next_block->disk_key = *disk_key;
+       if (NULL == l) {
+               l = btrfsic_block_link_alloc();
+               if (NULL == l) {
+                       printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+                       btrfsic_release_block_ctx(next_block_ctx);
+                       *next_blockp = NULL;
+                       return -1;
+               }
+
+               did_alloc_block_link = 1;
+               l->block_ref_to = next_block;
+               l->block_ref_from = block;
+               l->ref_cnt = 1;
+               l->parent_generation = parent_generation;
+
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       btrfsic_print_add_link(state, l);
+
+               list_add(&l->node_ref_to, &block->ref_to_list);
+               list_add(&l->node_ref_from, &next_block->ref_from_list);
+
+               btrfsic_block_link_hashtable_add(l,
+                                                &state->block_link_hashtable);
+       } else {
+               did_alloc_block_link = 0;
+               if (0 == limit_nesting) {
+                       l->ref_cnt++;
+                       l->parent_generation = parent_generation;
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               btrfsic_print_add_link(state, l);
+               }
+       }
+
+       if (limit_nesting > 0 && did_alloc_block_link) {
+               ret = btrfsic_read_block(state, next_block_ctx);
+               if (ret < (int)BTRFSIC_BLOCK_SIZE) {
+                       printk(KERN_INFO
+                              "btrfsic: read block @logical %llu failed!\n",
+                              (unsigned long long)next_bytenr);
+                       btrfsic_release_block_ctx(next_block_ctx);
+                       *next_blockp = NULL;
+                       return -1;
+               }
+
+               *next_blockp = next_block;
+       } else {
+               *next_blockp = NULL;
+       }
+       (*mirror_nump)++;
+
+       return 0;
+}
+
+static int btrfsic_handle_extent_data(
+               struct btrfsic_state *state,
+               struct btrfsic_block *block,
+               struct btrfsic_block_data_ctx *block_ctx,
+               u32 item_offset, int force_iodone_flag)
+{
+       int ret;
+       struct btrfs_file_extent_item *file_extent_item =
+           (struct btrfs_file_extent_item *)(block_ctx->data +
+                                             offsetof(struct btrfs_leaf,
+                                                      items) + item_offset);
+       u64 next_bytenr =
+           le64_to_cpu(file_extent_item->disk_bytenr) +
+           le64_to_cpu(file_extent_item->offset);
+       u64 num_bytes = le64_to_cpu(file_extent_item->num_bytes);
+       u64 generation = le64_to_cpu(file_extent_item->generation);
+       struct btrfsic_block_link *l;
+
+       if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
+               printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu,"
+                      " offset = %llu, num_bytes = %llu\n",
+                      file_extent_item->type,
+                      (unsigned long long)
+                      le64_to_cpu(file_extent_item->disk_bytenr),
+                      (unsigned long long)
+                      le64_to_cpu(file_extent_item->offset),
+                      (unsigned long long)
+                      le64_to_cpu(file_extent_item->num_bytes));
+       if (BTRFS_FILE_EXTENT_REG != file_extent_item->type ||
+           ((u64)0) == le64_to_cpu(file_extent_item->disk_bytenr))
+               return 0;
+       while (num_bytes > 0) {
+               u32 chunk_len;
+               int num_copies;
+               int mirror_num;
+
+               if (num_bytes > BTRFSIC_BLOCK_SIZE)
+                       chunk_len = BTRFSIC_BLOCK_SIZE;
+               else
+                       chunk_len = num_bytes;
+
+               num_copies =
+                   btrfs_num_copies(&state->root->fs_info->mapping_tree,
+                                    next_bytenr, PAGE_SIZE);
+               if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
+                       printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+                              (unsigned long long)next_bytenr, num_copies);
+               for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
+                       struct btrfsic_block_data_ctx next_block_ctx;
+                       struct btrfsic_block *next_block;
+                       int block_was_created;
+
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO "btrfsic_handle_extent_data("
+                                      "mirror_num=%d)\n", mirror_num);
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
+                               printk(KERN_INFO
+                                      "\tdisk_bytenr = %llu, num_bytes %u\n",
+                                      (unsigned long long)next_bytenr,
+                                      chunk_len);
+                       ret = btrfsic_map_block(state, next_bytenr,
+                                               chunk_len, &next_block_ctx,
+                                               mirror_num);
+                       if (ret) {
+                               printk(KERN_INFO
+                                      "btrfsic: btrfsic_map_block(@%llu,"
+                                      " mirror=%d) failed!\n",
+                                      (unsigned long long)next_bytenr,
+                                      mirror_num);
+                               return -1;
+                       }
+
+                       next_block = btrfsic_block_lookup_or_add(
+                                       state,
+                                       &next_block_ctx,
+                                       "referenced ",
+                                       0,
+                                       force_iodone_flag,
+                                       !force_iodone_flag,
+                                       mirror_num,
+                                       &block_was_created);
+                       if (NULL == next_block) {
+                               printk(KERN_INFO
+                                      "btrfsic: error, kmalloc failed!\n");
+                               btrfsic_release_block_ctx(&next_block_ctx);
+                               return -1;
+                       }
+                       if (!block_was_created) {
+                               if (next_block->logical_bytenr != next_bytenr &&
+                                   !(!next_block->is_metadata &&
+                                     0 == next_block->logical_bytenr)) {
+                                       printk(KERN_INFO
+                                              "Referenced block"
+                                              " @%llu (%s/%llu/%d)"
+                                              " found in hash table, D,"
+                                              " bytenr mismatch"
+                                              " (!= stored %llu).\n",
+                                              (unsigned long long)next_bytenr,
+                                              next_block_ctx.dev->name,
+                                              (unsigned long long)
+                                              next_block_ctx.dev_bytenr,
+                                              mirror_num,
+                                              (unsigned long long)
+                                              next_block->logical_bytenr);
+                               }
+                               next_block->logical_bytenr = next_bytenr;
+                               next_block->mirror_num = mirror_num;
+                       }
+
+                       l = btrfsic_block_link_lookup_or_add(state,
+                                                            &next_block_ctx,
+                                                            next_block, block,
+                                                            generation);
+                       btrfsic_release_block_ctx(&next_block_ctx);
+                       if (NULL == l)
+                               return -1;
+               }
+
+               next_bytenr += chunk_len;
+               num_bytes -= chunk_len;
+       }
+
+       return 0;
+}
+
+static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
+                            struct btrfsic_block_data_ctx *block_ctx_out,
+                            int mirror_num)
+{
+       int ret;
+       u64 length;
+       struct btrfs_bio *multi = NULL;
+       struct btrfs_device *device;
+
+       length = len;
+       ret = btrfs_map_block(&state->root->fs_info->mapping_tree, READ,
+                             bytenr, &length, &multi, mirror_num);
+
+       device = multi->stripes[0].dev;
+       block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev);
+       block_ctx_out->dev_bytenr = multi->stripes[0].physical;
+       block_ctx_out->start = bytenr;
+       block_ctx_out->len = len;
+       block_ctx_out->data = NULL;
+       block_ctx_out->bh = NULL;
+
+       if (0 == ret)
+               kfree(multi);
+       if (NULL == block_ctx_out->dev) {
+               ret = -ENXIO;
+               printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n");
+       }
+
+       return ret;
+}
+
+static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
+                                 u32 len, struct block_device *bdev,
+                                 struct btrfsic_block_data_ctx *block_ctx_out)
+{
+       block_ctx_out->dev = btrfsic_dev_state_lookup(bdev);
+       block_ctx_out->dev_bytenr = bytenr;
+       block_ctx_out->start = bytenr;
+       block_ctx_out->len = len;
+       block_ctx_out->data = NULL;
+       block_ctx_out->bh = NULL;
+       if (NULL != block_ctx_out->dev) {
+               return 0;
+       } else {
+               printk(KERN_INFO "btrfsic: error, cannot lookup dev (#2)!\n");
+               return -ENXIO;
+       }
+}
+
+static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
+{
+       if (NULL != block_ctx->bh) {
+               brelse(block_ctx->bh);
+               block_ctx->bh = NULL;
+       }
+}
+
+static int btrfsic_read_block(struct btrfsic_state *state,
+                             struct btrfsic_block_data_ctx *block_ctx)
+{
+       block_ctx->bh = NULL;
+       if (block_ctx->dev_bytenr & 4095) {
+               printk(KERN_INFO
+                      "btrfsic: read_block() with unaligned bytenr %llu\n",
+                      (unsigned long long)block_ctx->dev_bytenr);
+               return -1;
+       }
+       if (block_ctx->len > 4096) {
+               printk(KERN_INFO
+                      "btrfsic: read_block() with too huge size %d\n",
+                      block_ctx->len);
+               return -1;
+       }
+
+       block_ctx->bh = __bread(block_ctx->dev->bdev,
+                               block_ctx->dev_bytenr >> 12, 4096);
+       if (NULL == block_ctx->bh)
+               return -1;
+       block_ctx->data = block_ctx->bh->b_data;
+
+       return block_ctx->len;
+}
+
+static void btrfsic_dump_database(struct btrfsic_state *state)
+{
+       struct list_head *elem_all;
+
+       BUG_ON(NULL == state);
+
+       printk(KERN_INFO "all_blocks_list:\n");
+       list_for_each(elem_all, &state->all_blocks_list) {
+               const struct btrfsic_block *const b_all =
+                   list_entry(elem_all, struct btrfsic_block,
+                              all_blocks_node);
+               struct list_head *elem_ref_to;
+               struct list_head *elem_ref_from;
+
+               printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
+                      btrfsic_get_block_type(state, b_all),
+                      (unsigned long long)b_all->logical_bytenr,
+                      b_all->dev_state->name,
+                      (unsigned long long)b_all->dev_bytenr,
+                      b_all->mirror_num);
+
+               list_for_each(elem_ref_to, &b_all->ref_to_list) {
+                       const struct btrfsic_block_link *const l =
+                           list_entry(elem_ref_to,
+                                      struct btrfsic_block_link,
+                                      node_ref_to);
+
+                       printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
+                              " refers %u* to"
+                              " %c @%llu (%s/%llu/%d)\n",
+                              btrfsic_get_block_type(state, b_all),
+                              (unsigned long long)b_all->logical_bytenr,
+                              b_all->dev_state->name,
+                              (unsigned long long)b_all->dev_bytenr,
+                              b_all->mirror_num,
+                              l->ref_cnt,
+                              btrfsic_get_block_type(state, l->block_ref_to),
+                              (unsigned long long)
+                              l->block_ref_to->logical_bytenr,
+                              l->block_ref_to->dev_state->name,
+                              (unsigned long long)l->block_ref_to->dev_bytenr,
+                              l->block_ref_to->mirror_num);
+               }
+
+               list_for_each(elem_ref_from, &b_all->ref_from_list) {
+                       const struct btrfsic_block_link *const l =
+                           list_entry(elem_ref_from,
+                                      struct btrfsic_block_link,
+                                      node_ref_from);
+
+                       printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
+                              " is ref %u* from"
+                              " %c @%llu (%s/%llu/%d)\n",
+                              btrfsic_get_block_type(state, b_all),
+                              (unsigned long long)b_all->logical_bytenr,
+                              b_all->dev_state->name,
+                              (unsigned long long)b_all->dev_bytenr,
+                              b_all->mirror_num,
+                              l->ref_cnt,
+                              btrfsic_get_block_type(state, l->block_ref_from),
+                              (unsigned long long)
+                              l->block_ref_from->logical_bytenr,
+                              l->block_ref_from->dev_state->name,
+                              (unsigned long long)
+                              l->block_ref_from->dev_bytenr,
+                              l->block_ref_from->mirror_num);
+               }
+
+               printk(KERN_INFO "\n");
+       }
+}
+
+/*
+ * Test whether the disk block contains a tree block (leaf or node)
+ * (note that this test fails for the super block)
+ */
+static int btrfsic_test_for_metadata(struct btrfsic_state *state,
+                                    const u8 *data, unsigned int size)
+{
+       struct btrfs_header *h;
+       u8 csum[BTRFS_CSUM_SIZE];
+       u32 crc = ~(u32)0;
+       int fail = 0;
+       int crc_fail = 0;
+
+       h = (struct btrfs_header *)data;
+
+       if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
+               fail++;
+
+       crc = crc32c(crc, data + BTRFS_CSUM_SIZE, PAGE_SIZE - BTRFS_CSUM_SIZE);
+       btrfs_csum_final(crc, csum);
+       if (memcmp(csum, h->csum, state->csum_size))
+               crc_fail++;
+
+       return fail || crc_fail;
+}
+
+static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
+                                         u64 dev_bytenr,
+                                         u8 *mapped_data, unsigned int len,
+                                         struct bio *bio,
+                                         int *bio_is_patched,
+                                         struct buffer_head *bh,
+                                         int submit_bio_bh_rw)
+{
+       int is_metadata;
+       struct btrfsic_block *block;
+       struct btrfsic_block_data_ctx block_ctx;
+       int ret;
+       struct btrfsic_state *state = dev_state->state;
+       struct block_device *bdev = dev_state->bdev;
+
+       WARN_ON(len > PAGE_SIZE);
+       is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_data, len));
+       if (NULL != bio_is_patched)
+               *bio_is_patched = 0;
+
+       block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
+                                              &state->block_hashtable);
+       if (NULL != block) {
+               u64 bytenr = 0;
+               struct list_head *elem_ref_to;
+               struct list_head *tmp_ref_to;
+
+               if (block->is_superblock) {
+                       bytenr = le64_to_cpu(((struct btrfs_super_block *)
+                                             mapped_data)->bytenr);
+                       is_metadata = 1;
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
+                               printk(KERN_INFO
+                                      "[before new superblock is written]:\n");
+                               btrfsic_dump_tree_sub(state, block, 0);
+                       }
+               }
+               if (is_metadata) {
+                       if (!block->is_superblock) {
+                               bytenr = le64_to_cpu(((struct btrfs_header *)
+                                                     mapped_data)->bytenr);
+                               btrfsic_cmp_log_and_dev_bytenr(state, bytenr,
+                                                              dev_state,
+                                                              dev_bytenr,
+                                                              mapped_data);
+                       }
+                       if (block->logical_bytenr != bytenr) {
+                               printk(KERN_INFO
+                                      "Written block @%llu (%s/%llu/%d)"
+                                      " found in hash table, %c,"
+                                      " bytenr mismatch"
+                                      " (!= stored %llu).\n",
+                                      (unsigned long long)bytenr,
+                                      dev_state->name,
+                                      (unsigned long long)dev_bytenr,
+                                      block->mirror_num,
+                                      btrfsic_get_block_type(state, block),
+                                      (unsigned long long)
+                                      block->logical_bytenr);
+                               block->logical_bytenr = bytenr;
+                       } else if (state->print_mask &
+                                  BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO
+                                      "Written block @%llu (%s/%llu/%d)"
+                                      " found in hash table, %c.\n",
+                                      (unsigned long long)bytenr,
+                                      dev_state->name,
+                                      (unsigned long long)dev_bytenr,
+                                      block->mirror_num,
+                                      btrfsic_get_block_type(state, block));
+               } else {
+                       bytenr = block->logical_bytenr;
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO
+                                      "Written block @%llu (%s/%llu/%d)"
+                                      " found in hash table, %c.\n",
+                                      (unsigned long long)bytenr,
+                                      dev_state->name,
+                                      (unsigned long long)dev_bytenr,
+                                      block->mirror_num,
+                                      btrfsic_get_block_type(state, block));
+               }
+
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "ref_to_list: %cE, ref_from_list: %cE\n",
+                              list_empty(&block->ref_to_list) ? ' ' : '!',
+                              list_empty(&block->ref_from_list) ? ' ' : '!');
+               if (btrfsic_is_block_ref_by_superblock(state, block, 0)) {
+                       printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
+                              " @%llu (%s/%llu/%d), old(gen=%llu,"
+                              " objectid=%llu, type=%d, offset=%llu),"
+                              " new(gen=%llu),"
+                              " which is referenced by most recent superblock"
+                              " (superblockgen=%llu)!\n",
+                              btrfsic_get_block_type(state, block),
+                              (unsigned long long)bytenr,
+                              dev_state->name,
+                              (unsigned long long)dev_bytenr,
+                              block->mirror_num,
+                              (unsigned long long)block->generation,
+                              (unsigned long long)
+                              le64_to_cpu(block->disk_key.objectid),
+                              block->disk_key.type,
+                              (unsigned long long)
+                              le64_to_cpu(block->disk_key.offset),
+                              (unsigned long long)
+                              le64_to_cpu(((struct btrfs_header *)
+                                           mapped_data)->generation),
+                              (unsigned long long)
+                              state->max_superblock_generation);
+                       btrfsic_dump_tree(state);
+               }
+
+               if (!block->is_iodone && !block->never_written) {
+                       printk(KERN_INFO "btrfs: attempt to overwrite %c-block"
+                              " @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu,"
+                              " which is not yet iodone!\n",
+                              btrfsic_get_block_type(state, block),
+                              (unsigned long long)bytenr,
+                              dev_state->name,
+                              (unsigned long long)dev_bytenr,
+                              block->mirror_num,
+                              (unsigned long long)block->generation,
+                              (unsigned long long)
+                              le64_to_cpu(((struct btrfs_header *)
+                                           mapped_data)->generation));
+                       /* it would not be safe to go on */
+                       btrfsic_dump_tree(state);
+                       return;
+               }
+
+               /*
+                * Clear all references of this block. Do not free
+                * the block itself even if is not referenced anymore
+                * because it still carries valueable information
+                * like whether it was ever written and IO completed.
+                */
+               list_for_each_safe(elem_ref_to, tmp_ref_to,
+                                  &block->ref_to_list) {
+                       struct btrfsic_block_link *const l =
+                           list_entry(elem_ref_to,
+                                      struct btrfsic_block_link,
+                                      node_ref_to);
+
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               btrfsic_print_rem_link(state, l);
+                       l->ref_cnt--;
+                       if (0 == l->ref_cnt) {
+                               list_del(&l->node_ref_to);
+                               list_del(&l->node_ref_from);
+                               btrfsic_block_link_hashtable_remove(l);
+                               btrfsic_block_link_free(l);
+                       }
+               }
+
+               if (block->is_superblock)
+                       ret = btrfsic_map_superblock(state, bytenr, len,
+                                                    bdev, &block_ctx);
+               else
+                       ret = btrfsic_map_block(state, bytenr, len,
+                                               &block_ctx, 0);
+               if (ret) {
+                       printk(KERN_INFO
+                              "btrfsic: btrfsic_map_block(root @%llu)"
+                              " failed!\n", (unsigned long long)bytenr);
+                       return;
+               }
+               block_ctx.data = mapped_data;
+               /* the following is required in case of writes to mirrors,
+                * use the same that was used for the lookup */
+               block_ctx.dev = dev_state;
+               block_ctx.dev_bytenr = dev_bytenr;
+
+               if (is_metadata || state->include_extent_data) {
+                       block->never_written = 0;
+                       block->iodone_w_error = 0;
+                       if (NULL != bio) {
+                               block->is_iodone = 0;
+                               BUG_ON(NULL == bio_is_patched);
+                               if (!*bio_is_patched) {
+                                       block->orig_bio_bh_private =
+                                           bio->bi_private;
+                                       block->orig_bio_bh_end_io.bio =
+                                           bio->bi_end_io;
+                                       block->next_in_same_bio = NULL;
+                                       bio->bi_private = block;
+                                       bio->bi_end_io = btrfsic_bio_end_io;
+                                       *bio_is_patched = 1;
+                               } else {
+                                       struct btrfsic_block *chained_block =
+                                           (struct btrfsic_block *)
+                                           bio->bi_private;
+
+                                       BUG_ON(NULL == chained_block);
+                                       block->orig_bio_bh_private =
+                                           chained_block->orig_bio_bh_private;
+                                       block->orig_bio_bh_end_io.bio =
+                                           chained_block->orig_bio_bh_end_io.
+                                           bio;
+                                       block->next_in_same_bio = chained_block;
+                                       bio->bi_private = block;
+                               }
+                       } else if (NULL != bh) {
+                               block->is_iodone = 0;
+                               block->orig_bio_bh_private = bh->b_private;
+                               block->orig_bio_bh_end_io.bh = bh->b_end_io;
+                               block->next_in_same_bio = NULL;
+                               bh->b_private = block;
+                               bh->b_end_io = btrfsic_bh_end_io;
+                       } else {
+                               block->is_iodone = 1;
+                               block->orig_bio_bh_private = NULL;
+                               block->orig_bio_bh_end_io.bio = NULL;
+                               block->next_in_same_bio = NULL;
+                       }
+               }
+
+               block->flush_gen = dev_state->last_flush_gen + 1;
+               block->submit_bio_bh_rw = submit_bio_bh_rw;
+               if (is_metadata) {
+                       block->logical_bytenr = bytenr;
+                       block->is_metadata = 1;
+                       if (block->is_superblock) {
+                               ret = btrfsic_process_written_superblock(
+                                               state,
+                                               block,
+                                               (struct btrfs_super_block *)
+                                               mapped_data);
+                               if (state->print_mask &
+                                   BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
+                                       printk(KERN_INFO
+                                       "[after new superblock is written]:\n");
+                                       btrfsic_dump_tree_sub(state, block, 0);
+                               }
+                       } else {
+                               block->mirror_num = 0;  /* unknown */
+                               ret = btrfsic_process_metablock(
+                                               state,
+                                               block,
+                                               &block_ctx,
+                                               (struct btrfs_header *)
+                                               block_ctx.data,
+                                               0, 0);
+                       }
+                       if (ret)
+                               printk(KERN_INFO
+                                      "btrfsic: btrfsic_process_metablock"
+                                      "(root @%llu) failed!\n",
+                                      (unsigned long long)dev_bytenr);
+               } else {
+                       block->is_metadata = 0;
+                       block->mirror_num = 0;  /* unknown */
+                       block->generation = BTRFSIC_GENERATION_UNKNOWN;
+                       if (!state->include_extent_data
+                           && list_empty(&block->ref_from_list)) {
+                               /*
+                                * disk block is overwritten with extent
+                                * data (not meta data) and we are configured
+                                * to not include extent data: take the
+                                * chance and free the block's memory
+                                */
+                               btrfsic_block_hashtable_remove(block);
+                               list_del(&block->all_blocks_node);
+                               btrfsic_block_free(block);
+                       }
+               }
+               btrfsic_release_block_ctx(&block_ctx);
+       } else {
+               /* block has not been found in hash table */
+               u64 bytenr;
+
+               if (!is_metadata) {
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO "Written block (%s/%llu/?)"
+                                      " !found in hash table, D.\n",
+                                      dev_state->name,
+                                      (unsigned long long)dev_bytenr);
+                       if (!state->include_extent_data)
+                               return; /* ignore that written D block */
+
+                       /* this is getting ugly for the
+                        * include_extent_data case... */
+                       bytenr = 0;     /* unknown */
+                       block_ctx.start = bytenr;
+                       block_ctx.len = len;
+                       block_ctx.bh = NULL;
+               } else {
+                       bytenr = le64_to_cpu(((struct btrfs_header *)
+                                             mapped_data)->bytenr);
+                       btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
+                                                      dev_bytenr,
+                                                      mapped_data);
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO
+                                      "Written block @%llu (%s/%llu/?)"
+                                      " !found in hash table, M.\n",
+                                      (unsigned long long)bytenr,
+                                      dev_state->name,
+                                      (unsigned long long)dev_bytenr);
+
+                       ret = btrfsic_map_block(state, bytenr, len, &block_ctx,
+                                               0);
+                       if (ret) {
+                               printk(KERN_INFO
+                                      "btrfsic: btrfsic_map_block(root @%llu)"
+                                      " failed!\n",
+                                      (unsigned long long)dev_bytenr);
+                               return;
+                       }
+               }
+               block_ctx.data = mapped_data;
+               /* the following is required in case of writes to mirrors,
+                * use the same that was used for the lookup */
+               block_ctx.dev = dev_state;
+               block_ctx.dev_bytenr = dev_bytenr;
+
+               block = btrfsic_block_alloc();
+               if (NULL == block) {
+                       printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+                       btrfsic_release_block_ctx(&block_ctx);
+                       return;
+               }
+               block->dev_state = dev_state;
+               block->dev_bytenr = dev_bytenr;
+               block->logical_bytenr = bytenr;
+               block->is_metadata = is_metadata;
+               block->never_written = 0;
+               block->iodone_w_error = 0;
+               block->mirror_num = 0;  /* unknown */
+               block->flush_gen = dev_state->last_flush_gen + 1;
+               block->submit_bio_bh_rw = submit_bio_bh_rw;
+               if (NULL != bio) {
+                       block->is_iodone = 0;
+                       BUG_ON(NULL == bio_is_patched);
+                       if (!*bio_is_patched) {
+                               block->orig_bio_bh_private = bio->bi_private;
+                               block->orig_bio_bh_end_io.bio = bio->bi_end_io;
+                               block->next_in_same_bio = NULL;
+                               bio->bi_private = block;
+                               bio->bi_end_io = btrfsic_bio_end_io;
+                               *bio_is_patched = 1;
+                       } else {
+                               struct btrfsic_block *chained_block =
+                                   (struct btrfsic_block *)
+                                   bio->bi_private;
+
+                               BUG_ON(NULL == chained_block);
+                               block->orig_bio_bh_private =
+                                   chained_block->orig_bio_bh_private;
+                               block->orig_bio_bh_end_io.bio =
+                                   chained_block->orig_bio_bh_end_io.bio;
+                               block->next_in_same_bio = chained_block;
+                               bio->bi_private = block;
+                       }
+               } else if (NULL != bh) {
+                       block->is_iodone = 0;
+                       block->orig_bio_bh_private = bh->b_private;
+                       block->orig_bio_bh_end_io.bh = bh->b_end_io;
+                       block->next_in_same_bio = NULL;
+                       bh->b_private = block;
+                       bh->b_end_io = btrfsic_bh_end_io;
+               } else {
+                       block->is_iodone = 1;
+                       block->orig_bio_bh_private = NULL;
+                       block->orig_bio_bh_end_io.bio = NULL;
+                       block->next_in_same_bio = NULL;
+               }
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "New written %c-block @%llu (%s/%llu/%d)\n",
+                              is_metadata ? 'M' : 'D',
+                              (unsigned long long)block->logical_bytenr,
+                              block->dev_state->name,
+                              (unsigned long long)block->dev_bytenr,
+                              block->mirror_num);
+               list_add(&block->all_blocks_node, &state->all_blocks_list);
+               btrfsic_block_hashtable_add(block, &state->block_hashtable);
+
+               if (is_metadata) {
+                       ret = btrfsic_process_metablock(state, block,
+                                                       &block_ctx,
+                                                       (struct btrfs_header *)
+                                                       block_ctx.data, 0, 0);
+                       if (ret)
+                               printk(KERN_INFO
+                                      "btrfsic: process_metablock(root @%llu)"
+                                      " failed!\n",
+                                      (unsigned long long)dev_bytenr);
+               }
+               btrfsic_release_block_ctx(&block_ctx);
+       }
+}
+
+static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status)
+{
+       struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private;
+       int iodone_w_error;
+
+       /* mutex is not held! This is not save if IO is not yet completed
+        * on umount */
+       iodone_w_error = 0;
+       if (bio_error_status)
+               iodone_w_error = 1;
+
+       BUG_ON(NULL == block);
+       bp->bi_private = block->orig_bio_bh_private;
+       bp->bi_end_io = block->orig_bio_bh_end_io.bio;
+
+       do {
+               struct btrfsic_block *next_block;
+               struct btrfsic_dev_state *const dev_state = block->dev_state;
+
+               if ((dev_state->state->print_mask &
+                    BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
+                       printk(KERN_INFO
+                              "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n",
+                              bio_error_status,
+                              btrfsic_get_block_type(dev_state->state, block),
+                              (unsigned long long)block->logical_bytenr,
+                              dev_state->name,
+                              (unsigned long long)block->dev_bytenr,
+                              block->mirror_num);
+               next_block = block->next_in_same_bio;
+               block->iodone_w_error = iodone_w_error;
+               if (block->submit_bio_bh_rw & REQ_FLUSH) {
+                       dev_state->last_flush_gen++;
+                       if ((dev_state->state->print_mask &
+                            BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
+                               printk(KERN_INFO
+                                      "bio_end_io() new %s flush_gen=%llu\n",
+                                      dev_state->name,
+                                      (unsigned long long)
+                                      dev_state->last_flush_gen);
+               }
+               if (block->submit_bio_bh_rw & REQ_FUA)
+                       block->flush_gen = 0; /* FUA completed means block is
+                                              * on disk */
+               block->is_iodone = 1; /* for FLUSH, this releases the block */
+               block = next_block;
+       } while (NULL != block);
+
+       bp->bi_end_io(bp, bio_error_status);
+}
+
+static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate)
+{
+       struct btrfsic_block *block = (struct btrfsic_block *)bh->b_private;
+       int iodone_w_error = !uptodate;
+       struct btrfsic_dev_state *dev_state;
+
+       BUG_ON(NULL == block);
+       dev_state = block->dev_state;
+       if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
+               printk(KERN_INFO
+                      "bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n",
+                      iodone_w_error,
+                      btrfsic_get_block_type(dev_state->state, block),
+                      (unsigned long long)block->logical_bytenr,
+                      block->dev_state->name,
+                      (unsigned long long)block->dev_bytenr,
+                      block->mirror_num);
+
+       block->iodone_w_error = iodone_w_error;
+       if (block->submit_bio_bh_rw & REQ_FLUSH) {
+               dev_state->last_flush_gen++;
+               if ((dev_state->state->print_mask &
+                    BTRFSIC_PRINT_MASK_END_IO_BIO_BH))
+                       printk(KERN_INFO
+                              "bh_end_io() new %s flush_gen=%llu\n",
+                              dev_state->name,
+                              (unsigned long long)dev_state->last_flush_gen);
+       }
+       if (block->submit_bio_bh_rw & REQ_FUA)
+               block->flush_gen = 0; /* FUA completed means block is on disk */
+
+       bh->b_private = block->orig_bio_bh_private;
+       bh->b_end_io = block->orig_bio_bh_end_io.bh;
+       block->is_iodone = 1; /* for FLUSH, this releases the block */
+       bh->b_end_io(bh, uptodate);
+}
+
+static int btrfsic_process_written_superblock(
+               struct btrfsic_state *state,
+               struct btrfsic_block *const superblock,
+               struct btrfs_super_block *const super_hdr)
+{
+       int pass;
+
+       superblock->generation = btrfs_super_generation(super_hdr);
+       if (!(superblock->generation > state->max_superblock_generation ||
+             0 == state->max_superblock_generation)) {
+               if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
+                       printk(KERN_INFO
+                              "btrfsic: superblock @%llu (%s/%llu/%d)"
+                              " with old gen %llu <= %llu\n",
+                              (unsigned long long)superblock->logical_bytenr,
+                              superblock->dev_state->name,
+                              (unsigned long long)superblock->dev_bytenr,
+                              superblock->mirror_num,
+                              (unsigned long long)
+                              btrfs_super_generation(super_hdr),
+                              (unsigned long long)
+                              state->max_superblock_generation);
+       } else {
+               if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
+                       printk(KERN_INFO
+                              "btrfsic: got new superblock @%llu (%s/%llu/%d)"
+                              " with new gen %llu > %llu\n",
+                              (unsigned long long)superblock->logical_bytenr,
+                              superblock->dev_state->name,
+                              (unsigned long long)superblock->dev_bytenr,
+                              superblock->mirror_num,
+                              (unsigned long long)
+                              btrfs_super_generation(super_hdr),
+                              (unsigned long long)
+                              state->max_superblock_generation);
+
+               state->max_superblock_generation =
+                   btrfs_super_generation(super_hdr);
+               state->latest_superblock = superblock;
+       }
+
+       for (pass = 0; pass < 3; pass++) {
+               int ret;
+               u64 next_bytenr;
+               struct btrfsic_block *next_block;
+               struct btrfsic_block_data_ctx tmp_next_block_ctx;
+               struct btrfsic_block_link *l;
+               int num_copies;
+               int mirror_num;
+               const char *additional_string = NULL;
+               struct btrfs_disk_key tmp_disk_key;
+
+               tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY;
+               tmp_disk_key.offset = 0;
+
+               switch (pass) {
+               case 0:
+                       tmp_disk_key.objectid =
+                           cpu_to_le64(BTRFS_ROOT_TREE_OBJECTID);
+                       additional_string = "root ";
+                       next_bytenr = btrfs_super_root(super_hdr);
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
+                               printk(KERN_INFO "root@%llu\n",
+                                      (unsigned long long)next_bytenr);
+                       break;
+               case 1:
+                       tmp_disk_key.objectid =
+                           cpu_to_le64(BTRFS_CHUNK_TREE_OBJECTID);
+                       additional_string = "chunk ";
+                       next_bytenr = btrfs_super_chunk_root(super_hdr);
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
+                               printk(KERN_INFO "chunk@%llu\n",
+                                      (unsigned long long)next_bytenr);
+                       break;
+               case 2:
+                       tmp_disk_key.objectid =
+                           cpu_to_le64(BTRFS_TREE_LOG_OBJECTID);
+                       additional_string = "log ";
+                       next_bytenr = btrfs_super_log_root(super_hdr);
+                       if (0 == next_bytenr)
+                               continue;
+                       if (state->print_mask &
+                           BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION)
+                               printk(KERN_INFO "log@%llu\n",
+                                      (unsigned long long)next_bytenr);
+                       break;
+               }
+
+               num_copies =
+                   btrfs_num_copies(&state->root->fs_info->mapping_tree,
+                                    next_bytenr, PAGE_SIZE);
+               if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
+                       printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
+                              (unsigned long long)next_bytenr, num_copies);
+               for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
+                       int was_created;
+
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               printk(KERN_INFO
+                                      "btrfsic_process_written_superblock("
+                                      "mirror_num=%d)\n", mirror_num);
+                       ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE,
+                                               &tmp_next_block_ctx,
+                                               mirror_num);
+                       if (ret) {
+                               printk(KERN_INFO
+                                      "btrfsic: btrfsic_map_block(@%llu,"
+                                      " mirror=%d) failed!\n",
+                                      (unsigned long long)next_bytenr,
+                                      mirror_num);
+                               return -1;
+                       }
+
+                       next_block = btrfsic_block_lookup_or_add(
+                                       state,
+                                       &tmp_next_block_ctx,
+                                       additional_string,
+                                       1, 0, 1,
+                                       mirror_num,
+                                       &was_created);
+                       if (NULL == next_block) {
+                               printk(KERN_INFO
+                                      "btrfsic: error, kmalloc failed!\n");
+                               btrfsic_release_block_ctx(&tmp_next_block_ctx);
+                               return -1;
+                       }
+
+                       next_block->disk_key = tmp_disk_key;
+                       if (was_created)
+                               next_block->generation =
+                                   BTRFSIC_GENERATION_UNKNOWN;
+                       l = btrfsic_block_link_lookup_or_add(
+                                       state,
+                                       &tmp_next_block_ctx,
+                                       next_block,
+                                       superblock,
+                                       BTRFSIC_GENERATION_UNKNOWN);
+                       btrfsic_release_block_ctx(&tmp_next_block_ctx);
+                       if (NULL == l)
+                               return -1;
+               }
+       }
+
+       if (-1 == btrfsic_check_all_ref_blocks(state, superblock, 0)) {
+               WARN_ON(1);
+               btrfsic_dump_tree(state);
+       }
+
+       return 0;
+}
+
+static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
+                                       struct btrfsic_block *const block,
+                                       int recursion_level)
+{
+       struct list_head *elem_ref_to;
+       int ret = 0;
+
+       if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
+               /*
+                * Note that this situation can happen and does not
+                * indicate an error in regular cases. It happens
+                * when disk blocks are freed and later reused.
+                * The check-integrity module is not aware of any
+                * block free operations, it just recognizes block
+                * write operations. Therefore it keeps the linkage
+                * information for a block until a block is
+                * rewritten. This can temporarily cause incorrect
+                * and even circular linkage informations. This
+                * causes no harm unless such blocks are referenced
+                * by the most recent super block.
+                */
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "btrfsic: abort cyclic linkage (case 1).\n");
+
+               return ret;
+       }
+
+       /*
+        * This algorithm is recursive because the amount of used stack
+        * space is very small and the max recursion depth is limited.
+        */
+       list_for_each(elem_ref_to, &block->ref_to_list) {
+               const struct btrfsic_block_link *const l =
+                   list_entry(elem_ref_to, struct btrfsic_block_link,
+                              node_ref_to);
+
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "rl=%d, %c @%llu (%s/%llu/%d)"
+                              " %u* refers to %c @%llu (%s/%llu/%d)\n",
+                              recursion_level,
+                              btrfsic_get_block_type(state, block),
+                              (unsigned long long)block->logical_bytenr,
+                              block->dev_state->name,
+                              (unsigned long long)block->dev_bytenr,
+                              block->mirror_num,
+                              l->ref_cnt,
+                              btrfsic_get_block_type(state, l->block_ref_to),
+                              (unsigned long long)
+                              l->block_ref_to->logical_bytenr,
+                              l->block_ref_to->dev_state->name,
+                              (unsigned long long)l->block_ref_to->dev_bytenr,
+                              l->block_ref_to->mirror_num);
+               if (l->block_ref_to->never_written) {
+                       printk(KERN_INFO "btrfs: attempt to write superblock"
+                              " which references block %c @%llu (%s/%llu/%d)"
+                              " which is never written!\n",
+                              btrfsic_get_block_type(state, l->block_ref_to),
+                              (unsigned long long)
+                              l->block_ref_to->logical_bytenr,
+                              l->block_ref_to->dev_state->name,
+                              (unsigned long long)l->block_ref_to->dev_bytenr,
+                              l->block_ref_to->mirror_num);
+                       ret = -1;
+               } else if (!l->block_ref_to->is_iodone) {
+                       printk(KERN_INFO "btrfs: attempt to write superblock"
+                              " which references block %c @%llu (%s/%llu/%d)"
+                              " which is not yet iodone!\n",
+                              btrfsic_get_block_type(state, l->block_ref_to),
+                              (unsigned long long)
+                              l->block_ref_to->logical_bytenr,
+                              l->block_ref_to->dev_state->name,
+                              (unsigned long long)l->block_ref_to->dev_bytenr,
+                              l->block_ref_to->mirror_num);
+                       ret = -1;
+               } else if (l->parent_generation !=
+                          l->block_ref_to->generation &&
+                          BTRFSIC_GENERATION_UNKNOWN !=
+                          l->parent_generation &&
+                          BTRFSIC_GENERATION_UNKNOWN !=
+                          l->block_ref_to->generation) {
+                       printk(KERN_INFO "btrfs: attempt to write superblock"
+                              " which references block %c @%llu (%s/%llu/%d)"
+                              " with generation %llu !="
+                              " parent generation %llu!\n",
+                              btrfsic_get_block_type(state, l->block_ref_to),
+                              (unsigned long long)
+                              l->block_ref_to->logical_bytenr,
+                              l->block_ref_to->dev_state->name,
+                              (unsigned long long)l->block_ref_to->dev_bytenr,
+                              l->block_ref_to->mirror_num,
+                              (unsigned long long)l->block_ref_to->generation,
+                              (unsigned long long)l->parent_generation);
+                       ret = -1;
+               } else if (l->block_ref_to->flush_gen >
+                          l->block_ref_to->dev_state->last_flush_gen) {
+                       printk(KERN_INFO "btrfs: attempt to write superblock"
+                              " which references block %c @%llu (%s/%llu/%d)"
+                              " which is not flushed out of disk's write cache"
+                              " (block flush_gen=%llu,"
+                              " dev->flush_gen=%llu)!\n",
+                              btrfsic_get_block_type(state, l->block_ref_to),
+                              (unsigned long long)
+                              l->block_ref_to->logical_bytenr,
+                              l->block_ref_to->dev_state->name,
+                              (unsigned long long)l->block_ref_to->dev_bytenr,
+                              l->block_ref_to->mirror_num,
+                              (unsigned long long)block->flush_gen,
+                              (unsigned long long)
+                              l->block_ref_to->dev_state->last_flush_gen);
+                       ret = -1;
+               } else if (-1 == btrfsic_check_all_ref_blocks(state,
+                                                             l->block_ref_to,
+                                                             recursion_level +
+                                                             1)) {
+                       ret = -1;
+               }
+       }
+
+       return ret;
+}
+
+static int btrfsic_is_block_ref_by_superblock(
+               const struct btrfsic_state *state,
+               const struct btrfsic_block *block,
+               int recursion_level)
+{
+       struct list_head *elem_ref_from;
+
+       if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
+               /* refer to comment at "abort cyclic linkage (case 1)" */
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "btrfsic: abort cyclic linkage (case 2).\n");
+
+               return 0;
+       }
+
+       /*
+        * This algorithm is recursive because the amount of used stack space
+        * is very small and the max recursion depth is limited.
+        */
+       list_for_each(elem_ref_from, &block->ref_from_list) {
+               const struct btrfsic_block_link *const l =
+                   list_entry(elem_ref_from, struct btrfsic_block_link,
+                              node_ref_from);
+
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "rl=%d, %c @%llu (%s/%llu/%d)"
+                              " is ref %u* from %c @%llu (%s/%llu/%d)\n",
+                              recursion_level,
+                              btrfsic_get_block_type(state, block),
+                              (unsigned long long)block->logical_bytenr,
+                              block->dev_state->name,
+                              (unsigned long long)block->dev_bytenr,
+                              block->mirror_num,
+                              l->ref_cnt,
+                              btrfsic_get_block_type(state, l->block_ref_from),
+                              (unsigned long long)
+                              l->block_ref_from->logical_bytenr,
+                              l->block_ref_from->dev_state->name,
+                              (unsigned long long)
+                              l->block_ref_from->dev_bytenr,
+                              l->block_ref_from->mirror_num);
+               if (l->block_ref_from->is_superblock &&
+                   state->latest_superblock->dev_bytenr ==
+                   l->block_ref_from->dev_bytenr &&
+                   state->latest_superblock->dev_state->bdev ==
+                   l->block_ref_from->dev_state->bdev)
+                       return 1;
+               else if (btrfsic_is_block_ref_by_superblock(state,
+                                                           l->block_ref_from,
+                                                           recursion_level +
+                                                           1))
+                       return 1;
+       }
+
+       return 0;
+}
+
+static void btrfsic_print_add_link(const struct btrfsic_state *state,
+                                  const struct btrfsic_block_link *l)
+{
+       printk(KERN_INFO
+              "Add %u* link from %c @%llu (%s/%llu/%d)"
+              " to %c @%llu (%s/%llu/%d).\n",
+              l->ref_cnt,
+              btrfsic_get_block_type(state, l->block_ref_from),
+              (unsigned long long)l->block_ref_from->logical_bytenr,
+              l->block_ref_from->dev_state->name,
+              (unsigned long long)l->block_ref_from->dev_bytenr,
+              l->block_ref_from->mirror_num,
+              btrfsic_get_block_type(state, l->block_ref_to),
+              (unsigned long long)l->block_ref_to->logical_bytenr,
+              l->block_ref_to->dev_state->name,
+              (unsigned long long)l->block_ref_to->dev_bytenr,
+              l->block_ref_to->mirror_num);
+}
+
+static void btrfsic_print_rem_link(const struct btrfsic_state *state,
+                                  const struct btrfsic_block_link *l)
+{
+       printk(KERN_INFO
+              "Rem %u* link from %c @%llu (%s/%llu/%d)"
+              " to %c @%llu (%s/%llu/%d).\n",
+              l->ref_cnt,
+              btrfsic_get_block_type(state, l->block_ref_from),
+              (unsigned long long)l->block_ref_from->logical_bytenr,
+              l->block_ref_from->dev_state->name,
+              (unsigned long long)l->block_ref_from->dev_bytenr,
+              l->block_ref_from->mirror_num,
+              btrfsic_get_block_type(state, l->block_ref_to),
+              (unsigned long long)l->block_ref_to->logical_bytenr,
+              l->block_ref_to->dev_state->name,
+              (unsigned long long)l->block_ref_to->dev_bytenr,
+              l->block_ref_to->mirror_num);
+}
+
+static char btrfsic_get_block_type(const struct btrfsic_state *state,
+                                  const struct btrfsic_block *block)
+{
+       if (block->is_superblock &&
+           state->latest_superblock->dev_bytenr == block->dev_bytenr &&
+           state->latest_superblock->dev_state->bdev == block->dev_state->bdev)
+               return 'S';
+       else if (block->is_superblock)
+               return 's';
+       else if (block->is_metadata)
+               return 'M';
+       else
+               return 'D';
+}
+
+static void btrfsic_dump_tree(const struct btrfsic_state *state)
+{
+       btrfsic_dump_tree_sub(state, state->latest_superblock, 0);
+}
+
+static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
+                                 const struct btrfsic_block *block,
+                                 int indent_level)
+{
+       struct list_head *elem_ref_to;
+       int indent_add;
+       static char buf[80];
+       int cursor_position;
+
+       /*
+        * Should better fill an on-stack buffer with a complete line and
+        * dump it at once when it is time to print a newline character.
+        */
+
+       /*
+        * This algorithm is recursive because the amount of used stack space
+        * is very small and the max recursion depth is limited.
+        */
+       indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)",
+                            btrfsic_get_block_type(state, block),
+                            (unsigned long long)block->logical_bytenr,
+                            block->dev_state->name,
+                            (unsigned long long)block->dev_bytenr,
+                            block->mirror_num);
+       if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) {
+               printk("[...]\n");
+               return;
+       }
+       printk(buf);
+       indent_level += indent_add;
+       if (list_empty(&block->ref_to_list)) {
+               printk("\n");
+               return;
+       }
+       if (block->mirror_num > 1 &&
+           !(state->print_mask & BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS)) {
+               printk(" [...]\n");
+               return;
+       }
+
+       cursor_position = indent_level;
+       list_for_each(elem_ref_to, &block->ref_to_list) {
+               const struct btrfsic_block_link *const l =
+                   list_entry(elem_ref_to, struct btrfsic_block_link,
+                              node_ref_to);
+
+               while (cursor_position < indent_level) {
+                       printk(" ");
+                       cursor_position++;
+               }
+               if (l->ref_cnt > 1)
+                       indent_add = sprintf(buf, " %d*--> ", l->ref_cnt);
+               else
+                       indent_add = sprintf(buf, " --> ");
+               if (indent_level + indent_add >
+                   BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) {
+                       printk("[...]\n");
+                       cursor_position = 0;
+                       continue;
+               }
+
+               printk(buf);
+
+               btrfsic_dump_tree_sub(state, l->block_ref_to,
+                                     indent_level + indent_add);
+               cursor_position = 0;
+       }
+}
+
+static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add(
+               struct btrfsic_state *state,
+               struct btrfsic_block_data_ctx *next_block_ctx,
+               struct btrfsic_block *next_block,
+               struct btrfsic_block *from_block,
+               u64 parent_generation)
+{
+       struct btrfsic_block_link *l;
+
+       l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev,
+                                               next_block_ctx->dev_bytenr,
+                                               from_block->dev_state->bdev,
+                                               from_block->dev_bytenr,
+                                               &state->block_link_hashtable);
+       if (NULL == l) {
+               l = btrfsic_block_link_alloc();
+               if (NULL == l) {
+                       printk(KERN_INFO
+                              "btrfsic: error, kmalloc" " failed!\n");
+                       return NULL;
+               }
+
+               l->block_ref_to = next_block;
+               l->block_ref_from = from_block;
+               l->ref_cnt = 1;
+               l->parent_generation = parent_generation;
+
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       btrfsic_print_add_link(state, l);
+
+               list_add(&l->node_ref_to, &from_block->ref_to_list);
+               list_add(&l->node_ref_from, &next_block->ref_from_list);
+
+               btrfsic_block_link_hashtable_add(l,
+                                                &state->block_link_hashtable);
+       } else {
+               l->ref_cnt++;
+               l->parent_generation = parent_generation;
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       btrfsic_print_add_link(state, l);
+       }
+
+       return l;
+}
+
+static struct btrfsic_block *btrfsic_block_lookup_or_add(
+               struct btrfsic_state *state,
+               struct btrfsic_block_data_ctx *block_ctx,
+               const char *additional_string,
+               int is_metadata,
+               int is_iodone,
+               int never_written,
+               int mirror_num,
+               int *was_created)
+{
+       struct btrfsic_block *block;
+
+       block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev,
+                                              block_ctx->dev_bytenr,
+                                              &state->block_hashtable);
+       if (NULL == block) {
+               struct btrfsic_dev_state *dev_state;
+
+               block = btrfsic_block_alloc();
+               if (NULL == block) {
+                       printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
+                       return NULL;
+               }
+               dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev);
+               if (NULL == dev_state) {
+                       printk(KERN_INFO
+                              "btrfsic: error, lookup dev_state failed!\n");
+                       btrfsic_block_free(block);
+                       return NULL;
+               }
+               block->dev_state = dev_state;
+               block->dev_bytenr = block_ctx->dev_bytenr;
+               block->logical_bytenr = block_ctx->start;
+               block->is_metadata = is_metadata;
+               block->is_iodone = is_iodone;
+               block->never_written = never_written;
+               block->mirror_num = mirror_num;
+               if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                       printk(KERN_INFO
+                              "New %s%c-block @%llu (%s/%llu/%d)\n",
+                              additional_string,
+                              btrfsic_get_block_type(state, block),
+                              (unsigned long long)block->logical_bytenr,
+                              dev_state->name,
+                              (unsigned long long)block->dev_bytenr,
+                              mirror_num);
+               list_add(&block->all_blocks_node, &state->all_blocks_list);
+               btrfsic_block_hashtable_add(block, &state->block_hashtable);
+               if (NULL != was_created)
+                       *was_created = 1;
+       } else {
+               if (NULL != was_created)
+                       *was_created = 0;
+       }
+
+       return block;
+}
+
+static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
+                                          u64 bytenr,
+                                          struct btrfsic_dev_state *dev_state,
+                                          u64 dev_bytenr, char *data)
+{
+       int num_copies;
+       int mirror_num;
+       int ret;
+       struct btrfsic_block_data_ctx block_ctx;
+       int match = 0;
+
+       num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree,
+                                     bytenr, PAGE_SIZE);
+
+       for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
+               ret = btrfsic_map_block(state, bytenr, PAGE_SIZE,
+                                       &block_ctx, mirror_num);
+               if (ret) {
+                       printk(KERN_INFO "btrfsic:"
+                              " btrfsic_map_block(logical @%llu,"
+                              " mirror %d) failed!\n",
+                              (unsigned long long)bytenr, mirror_num);
+                       continue;
+               }
+
+               if (dev_state->bdev == block_ctx.dev->bdev &&
+                   dev_bytenr == block_ctx.dev_bytenr) {
+                       match++;
+                       btrfsic_release_block_ctx(&block_ctx);
+                       break;
+               }
+               btrfsic_release_block_ctx(&block_ctx);
+       }
+
+       if (!match) {
+               printk(KERN_INFO "btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio,"
+                      " buffer->log_bytenr=%llu, submit_bio(bdev=%s,"
+                      " phys_bytenr=%llu)!\n",
+                      (unsigned long long)bytenr, dev_state->name,
+                      (unsigned long long)dev_bytenr);
+               for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
+                       ret = btrfsic_map_block(state, bytenr, PAGE_SIZE,
+                                               &block_ctx, mirror_num);
+                       if (ret)
+                               continue;
+
+                       printk(KERN_INFO "Read logical bytenr @%llu maps to"
+                              " (%s/%llu/%d)\n",
+                              (unsigned long long)bytenr,
+                              block_ctx.dev->name,
+                              (unsigned long long)block_ctx.dev_bytenr,
+                              mirror_num);
+               }
+               WARN_ON(1);
+       }
+}
+
+static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
+               struct block_device *bdev)
+{
+       struct btrfsic_dev_state *ds;
+
+       ds = btrfsic_dev_state_hashtable_lookup(bdev,
+                                               &btrfsic_dev_state_hashtable);
+       return ds;
+}
+
+int btrfsic_submit_bh(int rw, struct buffer_head *bh)
+{
+       struct btrfsic_dev_state *dev_state;
+
+       if (!btrfsic_is_initialized)
+               return submit_bh(rw, bh);
+
+       mutex_lock(&btrfsic_mutex);
+       /* since btrfsic_submit_bh() might also be called before
+        * btrfsic_mount(), this might return NULL */
+       dev_state = btrfsic_dev_state_lookup(bh->b_bdev);
+
+       /* Only called to write the superblock (incl. FLUSH/FUA) */
+       if (NULL != dev_state &&
+           (rw & WRITE) && bh->b_size > 0) {
+               u64 dev_bytenr;
+
+               dev_bytenr = 4096 * bh->b_blocknr;
+               if (dev_state->state->print_mask &
+                   BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
+                       printk(KERN_INFO
+                              "submit_bh(rw=0x%x, blocknr=%lu (bytenr %llu),"
+                              " size=%lu, data=%p, bdev=%p)\n",
+                              rw, (unsigned long)bh->b_blocknr,
+                              (unsigned long long)dev_bytenr,
+                              (unsigned long)bh->b_size, bh->b_data,
+                              bh->b_bdev);
+               btrfsic_process_written_block(dev_state, dev_bytenr,
+                                             bh->b_data, bh->b_size, NULL,
+                                             NULL, bh, rw);
+       } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
+               if (dev_state->state->print_mask &
+                   BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
+                       printk(KERN_INFO
+                              "submit_bh(rw=0x%x) FLUSH, bdev=%p)\n",
+                              rw, bh->b_bdev);
+               if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
+                       if ((dev_state->state->print_mask &
+                            (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
+                             BTRFSIC_PRINT_MASK_VERBOSE)))
+                               printk(KERN_INFO
+                                      "btrfsic_submit_bh(%s) with FLUSH"
+                                      " but dummy block already in use"
+                                      " (ignored)!\n",
+                                      dev_state->name);
+               } else {
+                       struct btrfsic_block *const block =
+                               &dev_state->dummy_block_for_bio_bh_flush;
+
+                       block->is_iodone = 0;
+                       block->never_written = 0;
+                       block->iodone_w_error = 0;
+                       block->flush_gen = dev_state->last_flush_gen + 1;
+                       block->submit_bio_bh_rw = rw;
+                       block->orig_bio_bh_private = bh->b_private;
+                       block->orig_bio_bh_end_io.bh = bh->b_end_io;
+                       block->next_in_same_bio = NULL;
+                       bh->b_private = block;
+                       bh->b_end_io = btrfsic_bh_end_io;
+               }
+       }
+       mutex_unlock(&btrfsic_mutex);
+       return submit_bh(rw, bh);
+}
+
+void btrfsic_submit_bio(int rw, struct bio *bio)
+{
+       struct btrfsic_dev_state *dev_state;
+
+       if (!btrfsic_is_initialized) {
+               submit_bio(rw, bio);
+               return;
+       }
+
+       mutex_lock(&btrfsic_mutex);
+       /* since btrfsic_submit_bio() is also called before
+        * btrfsic_mount(), this might return NULL */
+       dev_state = btrfsic_dev_state_lookup(bio->bi_bdev);
+       if (NULL != dev_state &&
+           (rw & WRITE) && NULL != bio->bi_io_vec) {
+               unsigned int i;
+               u64 dev_bytenr;
+               int bio_is_patched;
+
+               dev_bytenr = 512 * bio->bi_sector;
+               bio_is_patched = 0;
+               if (dev_state->state->print_mask &
+                   BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
+                       printk(KERN_INFO
+                              "submit_bio(rw=0x%x, bi_vcnt=%u,"
+                              " bi_sector=%lu (bytenr %llu), bi_bdev=%p)\n",
+                              rw, bio->bi_vcnt, (unsigned long)bio->bi_sector,
+                              (unsigned long long)dev_bytenr,
+                              bio->bi_bdev);
+
+               for (i = 0; i < bio->bi_vcnt; i++) {
+                       u8 *mapped_data;
+
+                       mapped_data = kmap(bio->bi_io_vec[i].bv_page);
+                       if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
+                            BTRFSIC_PRINT_MASK_VERBOSE) ==
+                           (dev_state->state->print_mask &
+                            (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
+                             BTRFSIC_PRINT_MASK_VERBOSE)))
+                               printk(KERN_INFO
+                                      "#%u: page=%p, mapped=%p, len=%u,"
+                                      " offset=%u\n",
+                                      i, bio->bi_io_vec[i].bv_page,
+                                      mapped_data,
+                                      bio->bi_io_vec[i].bv_len,
+                                      bio->bi_io_vec[i].bv_offset);
+                       btrfsic_process_written_block(dev_state, dev_bytenr,
+                                                     mapped_data,
+                                                     bio->bi_io_vec[i].bv_len,
+                                                     bio, &bio_is_patched,
+                                                     NULL, rw);
+                       kunmap(bio->bi_io_vec[i].bv_page);
+                       dev_bytenr += bio->bi_io_vec[i].bv_len;
+               }
+       } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
+               if (dev_state->state->print_mask &
+                   BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
+                       printk(KERN_INFO
+                              "submit_bio(rw=0x%x) FLUSH, bdev=%p)\n",
+                              rw, bio->bi_bdev);
+               if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
+                       if ((dev_state->state->print_mask &
+                            (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
+                             BTRFSIC_PRINT_MASK_VERBOSE)))
+                               printk(KERN_INFO
+                                      "btrfsic_submit_bio(%s) with FLUSH"
+                                      " but dummy block already in use"
+                                      " (ignored)!\n",
+                                      dev_state->name);
+               } else {
+                       struct btrfsic_block *const block =
+                               &dev_state->dummy_block_for_bio_bh_flush;
+
+                       block->is_iodone = 0;
+                       block->never_written = 0;
+                       block->iodone_w_error = 0;
+                       block->flush_gen = dev_state->last_flush_gen + 1;
+                       block->submit_bio_bh_rw = rw;
+                       block->orig_bio_bh_private = bio->bi_private;
+                       block->orig_bio_bh_end_io.bio = bio->bi_end_io;
+                       block->next_in_same_bio = NULL;
+                       bio->bi_private = block;
+                       bio->bi_end_io = btrfsic_bio_end_io;
+               }
+       }
+       mutex_unlock(&btrfsic_mutex);
+
+       submit_bio(rw, bio);
+}
+
+int btrfsic_mount(struct btrfs_root *root,
+                 struct btrfs_fs_devices *fs_devices,
+                 int including_extent_data, u32 print_mask)
+{
+       int ret;
+       struct btrfsic_state *state;
+       struct list_head *dev_head = &fs_devices->devices;
+       struct btrfs_device *device;
+
+       state = kzalloc(sizeof(*state), GFP_NOFS);
+       if (NULL == state) {
+               printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n");
+               return -1;
+       }
+
+       if (!btrfsic_is_initialized) {
+               mutex_init(&btrfsic_mutex);
+               btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable);
+               btrfsic_is_initialized = 1;
+       }
+       mutex_lock(&btrfsic_mutex);
+       state->root = root;
+       state->print_mask = print_mask;
+       state->include_extent_data = including_extent_data;
+       state->csum_size = 0;
+       INIT_LIST_HEAD(&state->all_blocks_list);
+       btrfsic_block_hashtable_init(&state->block_hashtable);
+       btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
+       state->max_superblock_generation = 0;
+       state->latest_superblock = NULL;
+
+       list_for_each_entry(device, dev_head, dev_list) {
+               struct btrfsic_dev_state *ds;
+               char *p;
+
+               if (!device->bdev || !device->name)
+                       continue;
+
+               ds = btrfsic_dev_state_alloc();
+               if (NULL == ds) {
+                       printk(KERN_INFO
+                              "btrfs check-integrity: kmalloc() failed!\n");
+                       mutex_unlock(&btrfsic_mutex);
+                       return -1;
+               }
+               ds->bdev = device->bdev;
+               ds->state = state;
+               bdevname(ds->bdev, ds->name);
+               ds->name[BDEVNAME_SIZE - 1] = '\0';
+               for (p = ds->name; *p != '\0'; p++);
+               while (p > ds->name && *p != '/')
+                       p--;
+               if (*p == '/')
+                       p++;
+               strlcpy(ds->name, p, sizeof(ds->name));
+               btrfsic_dev_state_hashtable_add(ds,
+                                               &btrfsic_dev_state_hashtable);
+       }
+
+       ret = btrfsic_process_superblock(state, fs_devices);
+       if (0 != ret) {
+               mutex_unlock(&btrfsic_mutex);
+               btrfsic_unmount(root, fs_devices);
+               return ret;
+       }
+
+       if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_DATABASE)
+               btrfsic_dump_database(state);
+       if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_TREE)
+               btrfsic_dump_tree(state);
+
+       mutex_unlock(&btrfsic_mutex);
+       return 0;
+}
+
+void btrfsic_unmount(struct btrfs_root *root,
+                    struct btrfs_fs_devices *fs_devices)
+{
+       struct list_head *elem_all;
+       struct list_head *tmp_all;
+       struct btrfsic_state *state;
+       struct list_head *dev_head = &fs_devices->devices;
+       struct btrfs_device *device;
+
+       if (!btrfsic_is_initialized)
+               return;
+
+       mutex_lock(&btrfsic_mutex);
+
+       state = NULL;
+       list_for_each_entry(device, dev_head, dev_list) {
+               struct btrfsic_dev_state *ds;
+
+               if (!device->bdev || !device->name)
+                       continue;
+
+               ds = btrfsic_dev_state_hashtable_lookup(
+                               device->bdev,
+                               &btrfsic_dev_state_hashtable);
+               if (NULL != ds) {
+                       state = ds->state;
+                       btrfsic_dev_state_hashtable_remove(ds);
+                       btrfsic_dev_state_free(ds);
+               }
+       }
+
+       if (NULL == state) {
+               printk(KERN_INFO
+                      "btrfsic: error, cannot find state information"
+                      " on umount!\n");
+               mutex_unlock(&btrfsic_mutex);
+               return;
+       }
+
+       /*
+        * Don't care about keeping the lists' state up to date,
+        * just free all memory that was allocated dynamically.
+        * Free the blocks and the block_links.
+        */
+       list_for_each_safe(elem_all, tmp_all, &state->all_blocks_list) {
+               struct btrfsic_block *const b_all =
+                   list_entry(elem_all, struct btrfsic_block,
+                              all_blocks_node);
+               struct list_head *elem_ref_to;
+               struct list_head *tmp_ref_to;
+
+               list_for_each_safe(elem_ref_to, tmp_ref_to,
+                                  &b_all->ref_to_list) {
+                       struct btrfsic_block_link *const l =
+                           list_entry(elem_ref_to,
+                                      struct btrfsic_block_link,
+                                      node_ref_to);
+
+                       if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
+                               btrfsic_print_rem_link(state, l);
+
+                       l->ref_cnt--;
+                       if (0 == l->ref_cnt)
+                               btrfsic_block_link_free(l);
+               }
+
+               if (b_all->is_iodone)
+                       btrfsic_block_free(b_all);
+               else
+                       printk(KERN_INFO "btrfs: attempt to free %c-block"
+                              " @%llu (%s/%llu/%d) on umount which is"
+                              " not yet iodone!\n",
+                              btrfsic_get_block_type(state, b_all),
+                              (unsigned long long)b_all->logical_bytenr,
+                              b_all->dev_state->name,
+                              (unsigned long long)b_all->dev_bytenr,
+                              b_all->mirror_num);
+       }
+
+       mutex_unlock(&btrfsic_mutex);
+
+       kfree(state);
+}
diff --git a/fs/btrfs/check-integrity.h b/fs/btrfs/check-integrity.h
new file mode 100644 (file)
index 0000000..8b59175
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) STRATO AG 2011.  All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+
+#if !defined(__BTRFS_CHECK_INTEGRITY__)
+#define __BTRFS_CHECK_INTEGRITY__
+
+#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+int btrfsic_submit_bh(int rw, struct buffer_head *bh);
+void btrfsic_submit_bio(int rw, struct bio *bio);
+#else
+#define btrfsic_submit_bh submit_bh
+#define btrfsic_submit_bio submit_bio
+#endif
+
+int btrfsic_mount(struct btrfs_root *root,
+                 struct btrfs_fs_devices *fs_devices,
+                 int including_extent_data, u32 print_mask);
+void btrfsic_unmount(struct btrfs_root *root,
+                    struct btrfs_fs_devices *fs_devices);
+
+#endif
index dede441bdeee2678225187bece170710abe1a9b4..0639a555e16ed1975702ed5509dc9bc1c4dbf490 100644 (file)
@@ -240,7 +240,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
 
        cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
                                     new_root_objectid, &disk_key, level,
-                                    buf->start, 0);
+                                    buf->start, 0, 1);
        if (IS_ERR(cow))
                return PTR_ERR(cow);
 
@@ -261,9 +261,9 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
 
        WARN_ON(btrfs_header_generation(buf) > trans->transid);
        if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
-               ret = btrfs_inc_ref(trans, root, cow, 1);
+               ret = btrfs_inc_ref(trans, root, cow, 1, 1);
        else
-               ret = btrfs_inc_ref(trans, root, cow, 0);
+               ret = btrfs_inc_ref(trans, root, cow, 0, 1);
 
        if (ret)
                return ret;
@@ -350,14 +350,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                if ((owner == root->root_key.objectid ||
                     root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
                    !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
-                       ret = btrfs_inc_ref(trans, root, buf, 1);
+                       ret = btrfs_inc_ref(trans, root, buf, 1, 1);
                        BUG_ON(ret);
 
                        if (root->root_key.objectid ==
                            BTRFS_TREE_RELOC_OBJECTID) {
-                               ret = btrfs_dec_ref(trans, root, buf, 0);
+                               ret = btrfs_dec_ref(trans, root, buf, 0, 1);
                                BUG_ON(ret);
-                               ret = btrfs_inc_ref(trans, root, cow, 1);
+                               ret = btrfs_inc_ref(trans, root, cow, 1, 1);
                                BUG_ON(ret);
                        }
                        new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
@@ -365,9 +365,9 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
 
                        if (root->root_key.objectid ==
                            BTRFS_TREE_RELOC_OBJECTID)
-                               ret = btrfs_inc_ref(trans, root, cow, 1);
+                               ret = btrfs_inc_ref(trans, root, cow, 1, 1);
                        else
-                               ret = btrfs_inc_ref(trans, root, cow, 0);
+                               ret = btrfs_inc_ref(trans, root, cow, 0, 1);
                        BUG_ON(ret);
                }
                if (new_flags != 0) {
@@ -381,11 +381,11 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
                        if (root->root_key.objectid ==
                            BTRFS_TREE_RELOC_OBJECTID)
-                               ret = btrfs_inc_ref(trans, root, cow, 1);
+                               ret = btrfs_inc_ref(trans, root, cow, 1, 1);
                        else
-                               ret = btrfs_inc_ref(trans, root, cow, 0);
+                               ret = btrfs_inc_ref(trans, root, cow, 0, 1);
                        BUG_ON(ret);
-                       ret = btrfs_dec_ref(trans, root, buf, 1);
+                       ret = btrfs_dec_ref(trans, root, buf, 1, 1);
                        BUG_ON(ret);
                }
                clean_tree_block(trans, root, buf);
@@ -446,7 +446,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
 
        cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
                                     root->root_key.objectid, &disk_key,
-                                    level, search_start, empty_size);
+                                    level, search_start, empty_size, 1);
        if (IS_ERR(cow))
                return PTR_ERR(cow);
 
@@ -484,7 +484,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                rcu_assign_pointer(root->node, cow);
 
                btrfs_free_tree_block(trans, root, buf, parent_start,
-                                     last_ref);
+                                     last_ref, 1);
                free_extent_buffer(buf);
                add_root_to_dirty_list(root);
        } else {
@@ -500,7 +500,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
                                              trans->transid);
                btrfs_mark_buffer_dirty(parent);
                btrfs_free_tree_block(trans, root, buf, parent_start,
-                                     last_ref);
+                                     last_ref, 1);
        }
        if (unlock_orig)
                btrfs_tree_unlock(buf);
@@ -957,7 +957,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                free_extent_buffer(mid);
 
                root_sub_used(root, mid->len);
-               btrfs_free_tree_block(trans, root, mid, 0, 1);
+               btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
                /* once for the root ptr */
                free_extent_buffer(mid);
                return 0;
@@ -1015,7 +1015,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                        if (wret)
                                ret = wret;
                        root_sub_used(root, right->len);
-                       btrfs_free_tree_block(trans, root, right, 0, 1);
+                       btrfs_free_tree_block(trans, root, right, 0, 1, 0);
                        free_extent_buffer(right);
                        right = NULL;
                } else {
@@ -1055,7 +1055,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                if (wret)
                        ret = wret;
                root_sub_used(root, mid->len);
-               btrfs_free_tree_block(trans, root, mid, 0, 1);
+               btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
                free_extent_buffer(mid);
                mid = NULL;
        } else {
@@ -2089,7 +2089,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
 
        c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
                                   root->root_key.objectid, &lower_key,
-                                  level, root->node->start, 0);
+                                  level, root->node->start, 0, 0);
        if (IS_ERR(c))
                return PTR_ERR(c);
 
@@ -2216,7 +2216,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
 
        split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
                                        root->root_key.objectid,
-                                       &disk_key, level, c->start, 0);
+                                       &disk_key, level, c->start, 0, 0);
        if (IS_ERR(split))
                return PTR_ERR(split);
 
@@ -2970,7 +2970,7 @@ again:
 
        right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
                                        root->root_key.objectid,
-                                       &disk_key, 0, l->start, 0);
+                                       &disk_key, 0, l->start, 0, 0);
        if (IS_ERR(right))
                return PTR_ERR(right);
 
@@ -3781,7 +3781,7 @@ static noinline int btrfs_del_leaf(struct btrfs_trans_handle *trans,
 
        root_sub_used(root, leaf->len);
 
-       btrfs_free_tree_block(trans, root, leaf, 0, 1);
+       btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
        return 0;
 }
 /*
index 67385033323d6e49817398a1df9b1596df07e839..27ebe61d3cccd0233518d743216dce4b7b8c933f 100644 (file)
@@ -86,6 +86,9 @@ struct btrfs_ordered_sum;
 /* holds checksums of all the data extents */
 #define BTRFS_CSUM_TREE_OBJECTID 7ULL
 
+/* for storing balance parameters in the root tree */
+#define BTRFS_BALANCE_OBJECTID -4ULL
+
 /* orhpan objectid for tracking unlinked/truncated files */
 #define BTRFS_ORPHAN_OBJECTID -5ULL
 
@@ -692,6 +695,54 @@ struct btrfs_root_ref {
        __le16 name_len;
 } __attribute__ ((__packed__));
 
+struct btrfs_disk_balance_args {
+       /*
+        * profiles to operate on, single is denoted by
+        * BTRFS_AVAIL_ALLOC_BIT_SINGLE
+        */
+       __le64 profiles;
+
+       /* usage filter */
+       __le64 usage;
+
+       /* devid filter */
+       __le64 devid;
+
+       /* devid subset filter [pstart..pend) */
+       __le64 pstart;
+       __le64 pend;
+
+       /* btrfs virtual address space subset filter [vstart..vend) */
+       __le64 vstart;
+       __le64 vend;
+
+       /*
+        * profile to convert to, single is denoted by
+        * BTRFS_AVAIL_ALLOC_BIT_SINGLE
+        */
+       __le64 target;
+
+       /* BTRFS_BALANCE_ARGS_* */
+       __le64 flags;
+
+       __le64 unused[8];
+} __attribute__ ((__packed__));
+
+/*
+ * store balance parameters to disk so that balance can be properly
+ * resumed after crash or unmount
+ */
+struct btrfs_balance_item {
+       /* BTRFS_BALANCE_* */
+       __le64 flags;
+
+       struct btrfs_disk_balance_args data;
+       struct btrfs_disk_balance_args meta;
+       struct btrfs_disk_balance_args sys;
+
+       __le64 unused[4];
+} __attribute__ ((__packed__));
+
 #define BTRFS_FILE_EXTENT_INLINE 0
 #define BTRFS_FILE_EXTENT_REG 1
 #define BTRFS_FILE_EXTENT_PREALLOC 2
@@ -751,14 +802,32 @@ struct btrfs_csum_item {
 } __attribute__ ((__packed__));
 
 /* different types of block groups (and chunks) */
-#define BTRFS_BLOCK_GROUP_DATA     (1 << 0)
-#define BTRFS_BLOCK_GROUP_SYSTEM   (1 << 1)
-#define BTRFS_BLOCK_GROUP_METADATA (1 << 2)
-#define BTRFS_BLOCK_GROUP_RAID0    (1 << 3)
-#define BTRFS_BLOCK_GROUP_RAID1    (1 << 4)
-#define BTRFS_BLOCK_GROUP_DUP     (1 << 5)
-#define BTRFS_BLOCK_GROUP_RAID10   (1 << 6)
-#define BTRFS_NR_RAID_TYPES       5
+#define BTRFS_BLOCK_GROUP_DATA         (1ULL << 0)
+#define BTRFS_BLOCK_GROUP_SYSTEM       (1ULL << 1)
+#define BTRFS_BLOCK_GROUP_METADATA     (1ULL << 2)
+#define BTRFS_BLOCK_GROUP_RAID0                (1ULL << 3)
+#define BTRFS_BLOCK_GROUP_RAID1                (1ULL << 4)
+#define BTRFS_BLOCK_GROUP_DUP          (1ULL << 5)
+#define BTRFS_BLOCK_GROUP_RAID10       (1ULL << 6)
+#define BTRFS_BLOCK_GROUP_RESERVED     BTRFS_AVAIL_ALLOC_BIT_SINGLE
+#define BTRFS_NR_RAID_TYPES            5
+
+#define BTRFS_BLOCK_GROUP_TYPE_MASK    (BTRFS_BLOCK_GROUP_DATA |    \
+                                        BTRFS_BLOCK_GROUP_SYSTEM |  \
+                                        BTRFS_BLOCK_GROUP_METADATA)
+
+#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 |   \
+                                        BTRFS_BLOCK_GROUP_RAID1 |   \
+                                        BTRFS_BLOCK_GROUP_DUP |     \
+                                        BTRFS_BLOCK_GROUP_RAID10)
+/*
+ * We need a bit for restriper to be able to tell when chunks of type
+ * SINGLE are available.  This "extended" profile format is used in
+ * fs_info->avail_*_alloc_bits (in-memory) and balance item fields
+ * (on-disk).  The corresponding on-disk bit in chunk.type is reserved
+ * to avoid remappings between two formats in future.
+ */
+#define BTRFS_AVAIL_ALLOC_BIT_SINGLE   (1ULL << 48)
 
 struct btrfs_block_group_item {
        __le64 used;
@@ -916,6 +985,7 @@ struct btrfs_block_group_cache {
 struct reloc_control;
 struct btrfs_device;
 struct btrfs_fs_devices;
+struct btrfs_balance_control;
 struct btrfs_delayed_root;
 struct btrfs_fs_info {
        u8 fsid[BTRFS_FSID_SIZE];
@@ -971,7 +1041,7 @@ struct btrfs_fs_info {
         * is required instead of the faster short fsync log commits
         */
        u64 last_trans_log_full_commit;
-       unsigned long mount_opt:20;
+       unsigned long mount_opt:21;
        unsigned long compress_type:4;
        u64 max_inline;
        u64 alloc_start;
@@ -1132,12 +1202,23 @@ struct btrfs_fs_info {
        spinlock_t ref_cache_lock;
        u64 total_ref_cache_size;
 
+       /*
+        * these three are in extended format (availability of single
+        * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
+        * types are denoted by corresponding BTRFS_BLOCK_GROUP_* bits)
+        */
        u64 avail_data_alloc_bits;
        u64 avail_metadata_alloc_bits;
        u64 avail_system_alloc_bits;
-       u64 data_alloc_profile;
-       u64 metadata_alloc_profile;
-       u64 system_alloc_profile;
+
+       /* restriper state */
+       spinlock_t balance_lock;
+       struct mutex balance_mutex;
+       atomic_t balance_running;
+       atomic_t balance_pause_req;
+       atomic_t balance_cancel_req;
+       struct btrfs_balance_control *balance_ctl;
+       wait_queue_head_t balance_wait_q;
 
        unsigned data_chunk_allocations;
        unsigned metadata_ratio;
@@ -1155,6 +1236,10 @@ struct btrfs_fs_info {
        int scrub_workers_refcnt;
        struct btrfs_workers scrub_workers;
 
+#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+       u32 check_integrity_print_mask;
+#endif
+
        /* filesystem state */
        u64 fs_state;
 
@@ -1383,6 +1468,8 @@ struct btrfs_ioctl_defrag_range_args {
 #define BTRFS_DEV_ITEM_KEY     216
 #define BTRFS_CHUNK_ITEM_KEY   228
 
+#define BTRFS_BALANCE_ITEM_KEY 248
+
 /*
  * string items are for debugging.  They just store a short string of
  * data in the FS
@@ -1413,6 +1500,9 @@ struct btrfs_ioctl_defrag_range_args {
 #define BTRFS_MOUNT_AUTO_DEFRAG                (1 << 16)
 #define BTRFS_MOUNT_INODE_MAP_CACHE    (1 << 17)
 #define BTRFS_MOUNT_RECOVERY           (1 << 18)
+#define BTRFS_MOUNT_SKIP_BALANCE       (1 << 19)
+#define BTRFS_MOUNT_CHECK_INTEGRITY    (1 << 20)
+#define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21)
 
 #define btrfs_clear_opt(o, opt)                ((o) &= ~BTRFS_MOUNT_##opt)
 #define btrfs_set_opt(o, opt)          ((o) |= BTRFS_MOUNT_##opt)
@@ -2077,8 +2167,86 @@ BTRFS_SETGET_STACK_FUNCS(backup_bytes_used, struct btrfs_root_backup,
 BTRFS_SETGET_STACK_FUNCS(backup_num_devices, struct btrfs_root_backup,
                   num_devices, 64);
 
-/* struct btrfs_super_block */
+/* struct btrfs_balance_item */
+BTRFS_SETGET_FUNCS(balance_flags, struct btrfs_balance_item, flags, 64);
 
+static inline void btrfs_balance_data(struct extent_buffer *eb,
+                                     struct btrfs_balance_item *bi,
+                                     struct btrfs_disk_balance_args *ba)
+{
+       read_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
+}
+
+static inline void btrfs_set_balance_data(struct extent_buffer *eb,
+                                         struct btrfs_balance_item *bi,
+                                         struct btrfs_disk_balance_args *ba)
+{
+       write_eb_member(eb, bi, struct btrfs_balance_item, data, ba);
+}
+
+static inline void btrfs_balance_meta(struct extent_buffer *eb,
+                                     struct btrfs_balance_item *bi,
+                                     struct btrfs_disk_balance_args *ba)
+{
+       read_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
+}
+
+static inline void btrfs_set_balance_meta(struct extent_buffer *eb,
+                                         struct btrfs_balance_item *bi,
+                                         struct btrfs_disk_balance_args *ba)
+{
+       write_eb_member(eb, bi, struct btrfs_balance_item, meta, ba);
+}
+
+static inline void btrfs_balance_sys(struct extent_buffer *eb,
+                                    struct btrfs_balance_item *bi,
+                                    struct btrfs_disk_balance_args *ba)
+{
+       read_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
+}
+
+static inline void btrfs_set_balance_sys(struct extent_buffer *eb,
+                                        struct btrfs_balance_item *bi,
+                                        struct btrfs_disk_balance_args *ba)
+{
+       write_eb_member(eb, bi, struct btrfs_balance_item, sys, ba);
+}
+
+static inline void
+btrfs_disk_balance_args_to_cpu(struct btrfs_balance_args *cpu,
+                              struct btrfs_disk_balance_args *disk)
+{
+       memset(cpu, 0, sizeof(*cpu));
+
+       cpu->profiles = le64_to_cpu(disk->profiles);
+       cpu->usage = le64_to_cpu(disk->usage);
+       cpu->devid = le64_to_cpu(disk->devid);
+       cpu->pstart = le64_to_cpu(disk->pstart);
+       cpu->pend = le64_to_cpu(disk->pend);
+       cpu->vstart = le64_to_cpu(disk->vstart);
+       cpu->vend = le64_to_cpu(disk->vend);
+       cpu->target = le64_to_cpu(disk->target);
+       cpu->flags = le64_to_cpu(disk->flags);
+}
+
+static inline void
+btrfs_cpu_balance_args_to_disk(struct btrfs_disk_balance_args *disk,
+                              struct btrfs_balance_args *cpu)
+{
+       memset(disk, 0, sizeof(*disk));
+
+       disk->profiles = cpu_to_le64(cpu->profiles);
+       disk->usage = cpu_to_le64(cpu->usage);
+       disk->devid = cpu_to_le64(cpu->devid);
+       disk->pstart = cpu_to_le64(cpu->pstart);
+       disk->pend = cpu_to_le64(cpu->pend);
+       disk->vstart = cpu_to_le64(cpu->vstart);
+       disk->vend = cpu_to_le64(cpu->vend);
+       disk->target = cpu_to_le64(cpu->target);
+       disk->flags = cpu_to_le64(cpu->flags);
+}
+
+/* struct btrfs_super_block */
 BTRFS_SETGET_STACK_FUNCS(super_bytenr, struct btrfs_super_block, bytenr, 64);
 BTRFS_SETGET_STACK_FUNCS(super_flags, struct btrfs_super_block, flags, 64);
 BTRFS_SETGET_STACK_FUNCS(super_generation, struct btrfs_super_block,
@@ -2196,7 +2364,7 @@ static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
        return btrfs_item_size(eb, e) - offset;
 }
 
-static inline struct btrfs_root *btrfs_sb(struct super_block *sb)
+static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
 {
        return sb->s_fs_info;
 }
@@ -2277,11 +2445,11 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                        struct btrfs_root *root, u32 blocksize,
                                        u64 parent, u64 root_objectid,
                                        struct btrfs_disk_key *key, int level,
-                                       u64 hint, u64 empty_size);
+                                       u64 hint, u64 empty_size, int for_cow);
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
-                          u64 parent, int last_ref);
+                          u64 parent, int last_ref, int for_cow);
 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
                                            struct btrfs_root *root,
                                            u64 bytenr, u32 blocksize,
@@ -2301,17 +2469,17 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
                                  u64 search_end, struct btrfs_key *ins,
                                  u64 data);
 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                 struct extent_buffer *buf, int full_backref);
+                 struct extent_buffer *buf, int full_backref, int for_cow);
 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                 struct extent_buffer *buf, int full_backref);
+                 struct extent_buffer *buf, int full_backref, int for_cow);
 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
                                u64 bytenr, u64 num_bytes, u64 flags,
                                int is_data);
 int btrfs_free_extent(struct btrfs_trans_handle *trans,
                      struct btrfs_root *root,
-                     u64 bytenr, u64 num_bytes, u64 parent,
-                     u64 root_objectid, u64 owner, u64 offset);
+                     u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
+                     u64 owner, u64 offset, int for_cow);
 
 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
@@ -2323,7 +2491,7 @@ int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
                         struct btrfs_root *root,
                         u64 bytenr, u64 num_bytes, u64 parent,
-                        u64 root_objectid, u64 owner, u64 offset);
+                        u64 root_objectid, u64 owner, u64 offset, int for_cow);
 
 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                                    struct btrfs_root *root);
@@ -2482,10 +2650,18 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
 }
 
 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
+static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
+{
+       ++p->slots[0];
+       if (p->slots[0] >= btrfs_header_nritems(p->nodes[0]))
+               return btrfs_next_leaf(root, p);
+       return 0;
+}
 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
 int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
 void btrfs_drop_snapshot(struct btrfs_root *root,
-                        struct btrfs_block_rsv *block_rsv, int update_ref);
+                        struct btrfs_block_rsv *block_rsv, int update_ref,
+                        int for_reloc);
 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
                        struct btrfs_root *root,
                        struct extent_buffer *node,
@@ -2500,6 +2676,7 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
 }
 static inline void free_fs_info(struct btrfs_fs_info *fs_info)
 {
+       kfree(fs_info->balance_ctl);
        kfree(fs_info->delayed_root);
        kfree(fs_info->extent_root);
        kfree(fs_info->tree_root);
@@ -2510,6 +2687,24 @@ static inline void free_fs_info(struct btrfs_fs_info *fs_info)
        kfree(fs_info->super_for_commit);
        kfree(fs_info);
 }
+/**
+ * profile_is_valid - tests whether a given profile is valid and reduced
+ * @flags: profile to validate
+ * @extended: if true @flags is treated as an extended profile
+ */
+static inline int profile_is_valid(u64 flags, int extended)
+{
+       u64 mask = ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+       flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
+       if (extended)
+               mask &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+       if (flags & mask)
+               return 0;
+       /* true if zero or exactly one bit set */
+       return (flags & (~flags + 1)) == flags;
+}
 
 /* root-item.c */
 int btrfs_find_root_ref(struct btrfs_root *tree_root,
index 9c1eccc2c503e5eec8bd20d3dfc057a417eef89a..fe4cd0f1cef188b8cf584c67c8ab0f58ffb62cbb 100644 (file)
@@ -595,8 +595,12 @@ static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans,
 
        num_bytes = btrfs_calc_trans_metadata_size(root, 1);
        ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes);
-       if (!ret)
+       if (!ret) {
+               trace_btrfs_space_reservation(root->fs_info, "delayed_item",
+                                             item->key.objectid,
+                                             num_bytes, 1);
                item->bytes_reserved = num_bytes;
+       }
 
        return ret;
 }
@@ -610,6 +614,9 @@ static void btrfs_delayed_item_release_metadata(struct btrfs_root *root,
                return;
 
        rsv = &root->fs_info->delayed_block_rsv;
+       trace_btrfs_space_reservation(root->fs_info, "delayed_item",
+                                     item->key.objectid, item->bytes_reserved,
+                                     0);
        btrfs_block_rsv_release(root, rsv,
                                item->bytes_reserved);
 }
@@ -624,7 +631,7 @@ static int btrfs_delayed_inode_reserve_metadata(
        struct btrfs_block_rsv *dst_rsv;
        u64 num_bytes;
        int ret;
-       int release = false;
+       bool release = false;
 
        src_rsv = trans->block_rsv;
        dst_rsv = &root->fs_info->delayed_block_rsv;
@@ -651,8 +658,13 @@ static int btrfs_delayed_inode_reserve_metadata(
                 */
                if (ret == -EAGAIN)
                        ret = -ENOSPC;
-               if (!ret)
+               if (!ret) {
                        node->bytes_reserved = num_bytes;
+                       trace_btrfs_space_reservation(root->fs_info,
+                                                     "delayed_inode",
+                                                     btrfs_ino(inode),
+                                                     num_bytes, 1);
+               }
                return ret;
        } else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
                spin_lock(&BTRFS_I(inode)->lock);
@@ -707,11 +719,17 @@ out:
         * reservation here.  I think it may be time for a documentation page on
         * how block rsvs. work.
         */
-       if (!ret)
+       if (!ret) {
+               trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
+                                             btrfs_ino(inode), num_bytes, 1);
                node->bytes_reserved = num_bytes;
+       }
 
-       if (release)
+       if (release) {
+               trace_btrfs_space_reservation(root->fs_info, "delalloc",
+                                             btrfs_ino(inode), num_bytes, 0);
                btrfs_block_rsv_release(root, src_rsv, num_bytes);
+       }
 
        return ret;
 }
@@ -725,6 +743,8 @@ static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root,
                return;
 
        rsv = &root->fs_info->delayed_block_rsv;
+       trace_btrfs_space_reservation(root->fs_info, "delayed_inode",
+                                     node->inode_id, node->bytes_reserved, 0);
        btrfs_block_rsv_release(root, rsv,
                                node->bytes_reserved);
        node->bytes_reserved = 0;
@@ -1372,13 +1392,6 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
                goto release_node;
        }
 
-       ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
-       /*
-        * we have reserved enough space when we start a new transaction,
-        * so reserving metadata failure is impossible
-        */
-       BUG_ON(ret);
-
        delayed_item->key.objectid = btrfs_ino(dir);
        btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY);
        delayed_item->key.offset = index;
@@ -1391,6 +1404,14 @@ int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans,
        dir_item->type = type;
        memcpy((char *)(dir_item + 1), name, name_len);
 
+       ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item);
+       /*
+        * we have reserved enough space when we start a new transaction,
+        * so reserving metadata failure is impossible
+        */
+       BUG_ON(ret);
+
+
        mutex_lock(&delayed_node->mutex);
        ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item);
        if (unlikely(ret)) {
index 125cf76fcd086803d35bd257bdb54168486e0e0b..66e4f29505a33dbecd45b5d6a80e878c87818bc0 100644 (file)
@@ -101,6 +101,11 @@ static int comp_entry(struct btrfs_delayed_ref_node *ref2,
                return -1;
        if (ref1->type > ref2->type)
                return 1;
+       /* merging of sequenced refs is not allowed */
+       if (ref1->seq < ref2->seq)
+               return -1;
+       if (ref1->seq > ref2->seq)
+               return 1;
        if (ref1->type == BTRFS_TREE_BLOCK_REF_KEY ||
            ref1->type == BTRFS_SHARED_BLOCK_REF_KEY) {
                return comp_tree_refs(btrfs_delayed_node_to_tree_ref(ref2),
@@ -150,16 +155,22 @@ static struct btrfs_delayed_ref_node *tree_insert(struct rb_root *root,
 
 /*
  * find an head entry based on bytenr. This returns the delayed ref
- * head if it was able to find one, or NULL if nothing was in that spot
+ * head if it was able to find one, or NULL if nothing was in that spot.
+ * If return_bigger is given, the next bigger entry is returned if no exact
+ * match is found.
  */
 static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
                                  u64 bytenr,
-                                 struct btrfs_delayed_ref_node **last)
+                                 struct btrfs_delayed_ref_node **last,
+                                 int return_bigger)
 {
-       struct rb_node *n = root->rb_node;
+       struct rb_node *n;
        struct btrfs_delayed_ref_node *entry;
-       int cmp;
+       int cmp = 0;
 
+again:
+       n = root->rb_node;
+       entry = NULL;
        while (n) {
                entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
                WARN_ON(!entry->in_tree);
@@ -182,6 +193,19 @@ static struct btrfs_delayed_ref_node *find_ref_head(struct rb_root *root,
                else
                        return entry;
        }
+       if (entry && return_bigger) {
+               if (cmp > 0) {
+                       n = rb_next(&entry->rb_node);
+                       if (!n)
+                               n = rb_first(root);
+                       entry = rb_entry(n, struct btrfs_delayed_ref_node,
+                                        rb_node);
+                       bytenr = entry->bytenr;
+                       return_bigger = 0;
+                       goto again;
+               }
+               return entry;
+       }
        return NULL;
 }
 
@@ -209,6 +233,24 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
+                           u64 seq)
+{
+       struct seq_list *elem;
+
+       assert_spin_locked(&delayed_refs->lock);
+       if (list_empty(&delayed_refs->seq_head))
+               return 0;
+
+       elem = list_first_entry(&delayed_refs->seq_head, struct seq_list, list);
+       if (seq >= elem->seq) {
+               pr_debug("holding back delayed_ref %llu, lowest is %llu (%p)\n",
+                        seq, elem->seq, delayed_refs);
+               return 1;
+       }
+       return 0;
+}
+
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
                           struct list_head *cluster, u64 start)
 {
@@ -223,20 +265,8 @@ int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
                node = rb_first(&delayed_refs->root);
        } else {
                ref = NULL;
-               find_ref_head(&delayed_refs->root, start, &ref);
+               find_ref_head(&delayed_refs->root, start + 1, &ref, 1);
                if (ref) {
-                       struct btrfs_delayed_ref_node *tmp;
-
-                       node = rb_prev(&ref->rb_node);
-                       while (node) {
-                               tmp = rb_entry(node,
-                                              struct btrfs_delayed_ref_node,
-                                              rb_node);
-                               if (tmp->bytenr < start)
-                                       break;
-                               ref = tmp;
-                               node = rb_prev(&ref->rb_node);
-                       }
                        node = &ref->rb_node;
                } else
                        node = rb_first(&delayed_refs->root);
@@ -390,7 +420,8 @@ update_existing_head_ref(struct btrfs_delayed_ref_node *existing,
  * this does all the dirty work in terms of maintaining the correct
  * overall modification count.
  */
-static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
+static noinline int add_delayed_ref_head(struct btrfs_fs_info *fs_info,
+                                       struct btrfs_trans_handle *trans,
                                        struct btrfs_delayed_ref_node *ref,
                                        u64 bytenr, u64 num_bytes,
                                        int action, int is_data)
@@ -437,6 +468,7 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
        ref->action  = 0;
        ref->is_head = 1;
        ref->in_tree = 1;
+       ref->seq = 0;
 
        head_ref = btrfs_delayed_node_to_head(ref);
        head_ref->must_insert_reserved = must_insert_reserved;
@@ -468,14 +500,17 @@ static noinline int add_delayed_ref_head(struct btrfs_trans_handle *trans,
 /*
  * helper to insert a delayed tree ref into the rbtree.
  */
-static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+static noinline int add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+                                        struct btrfs_trans_handle *trans,
                                         struct btrfs_delayed_ref_node *ref,
                                         u64 bytenr, u64 num_bytes, u64 parent,
-                                        u64 ref_root, int level, int action)
+                                        u64 ref_root, int level, int action,
+                                        int for_cow)
 {
        struct btrfs_delayed_ref_node *existing;
        struct btrfs_delayed_tree_ref *full_ref;
        struct btrfs_delayed_ref_root *delayed_refs;
+       u64 seq = 0;
 
        if (action == BTRFS_ADD_DELAYED_EXTENT)
                action = BTRFS_ADD_DELAYED_REF;
@@ -491,14 +526,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
        ref->is_head = 0;
        ref->in_tree = 1;
 
+       if (need_ref_seq(for_cow, ref_root))
+               seq = inc_delayed_seq(delayed_refs);
+       ref->seq = seq;
+
        full_ref = btrfs_delayed_node_to_tree_ref(ref);
-       if (parent) {
-               full_ref->parent = parent;
+       full_ref->parent = parent;
+       full_ref->root = ref_root;
+       if (parent)
                ref->type = BTRFS_SHARED_BLOCK_REF_KEY;
-       } else {
-               full_ref->root = ref_root;
+       else
                ref->type = BTRFS_TREE_BLOCK_REF_KEY;
-       }
        full_ref->level = level;
 
        trace_btrfs_delayed_tree_ref(ref, full_ref, action);
@@ -522,15 +560,17 @@ static noinline int add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 /*
  * helper to insert a delayed data ref into the rbtree.
  */
-static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
+static noinline int add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+                                        struct btrfs_trans_handle *trans,
                                         struct btrfs_delayed_ref_node *ref,
                                         u64 bytenr, u64 num_bytes, u64 parent,
                                         u64 ref_root, u64 owner, u64 offset,
-                                        int action)
+                                        int action, int for_cow)
 {
        struct btrfs_delayed_ref_node *existing;
        struct btrfs_delayed_data_ref *full_ref;
        struct btrfs_delayed_ref_root *delayed_refs;
+       u64 seq = 0;
 
        if (action == BTRFS_ADD_DELAYED_EXTENT)
                action = BTRFS_ADD_DELAYED_REF;
@@ -546,14 +586,18 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
        ref->is_head = 0;
        ref->in_tree = 1;
 
+       if (need_ref_seq(for_cow, ref_root))
+               seq = inc_delayed_seq(delayed_refs);
+       ref->seq = seq;
+
        full_ref = btrfs_delayed_node_to_data_ref(ref);
-       if (parent) {
-               full_ref->parent = parent;
+       full_ref->parent = parent;
+       full_ref->root = ref_root;
+       if (parent)
                ref->type = BTRFS_SHARED_DATA_REF_KEY;
-       } else {
-               full_ref->root = ref_root;
+       else
                ref->type = BTRFS_EXTENT_DATA_REF_KEY;
-       }
+
        full_ref->objectid = owner;
        full_ref->offset = offset;
 
@@ -580,10 +624,12 @@ static noinline int add_delayed_data_ref(struct btrfs_trans_handle *trans,
  * to make sure the delayed ref is eventually processed before this
  * transaction commits.
  */
-int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+                              struct btrfs_trans_handle *trans,
                               u64 bytenr, u64 num_bytes, u64 parent,
                               u64 ref_root,  int level, int action,
-                              struct btrfs_delayed_extent_op *extent_op)
+                              struct btrfs_delayed_extent_op *extent_op,
+                              int for_cow)
 {
        struct btrfs_delayed_tree_ref *ref;
        struct btrfs_delayed_ref_head *head_ref;
@@ -610,13 +656,17 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
         * insert both the head node and the new ref without dropping
         * the spin lock
         */
-       ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
-                                  action, 0);
+       ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
+                                  num_bytes, action, 0);
        BUG_ON(ret);
 
-       ret = add_delayed_tree_ref(trans, &ref->node, bytenr, num_bytes,
-                                  parent, ref_root, level, action);
+       ret = add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
+                                  num_bytes, parent, ref_root, level, action,
+                                  for_cow);
        BUG_ON(ret);
+       if (!need_ref_seq(for_cow, ref_root) &&
+           waitqueue_active(&delayed_refs->seq_wait))
+               wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
        return 0;
 }
@@ -624,11 +674,13 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
 /*
  * add a delayed data ref. it's similar to btrfs_add_delayed_tree_ref.
  */
-int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+                              struct btrfs_trans_handle *trans,
                               u64 bytenr, u64 num_bytes,
                               u64 parent, u64 ref_root,
                               u64 owner, u64 offset, int action,
-                              struct btrfs_delayed_extent_op *extent_op)
+                              struct btrfs_delayed_extent_op *extent_op,
+                              int for_cow)
 {
        struct btrfs_delayed_data_ref *ref;
        struct btrfs_delayed_ref_head *head_ref;
@@ -655,18 +707,23 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
         * insert both the head node and the new ref without dropping
         * the spin lock
         */
-       ret = add_delayed_ref_head(trans, &head_ref->node, bytenr, num_bytes,
-                                  action, 1);
+       ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
+                                  num_bytes, action, 1);
        BUG_ON(ret);
 
-       ret = add_delayed_data_ref(trans, &ref->node, bytenr, num_bytes,
-                                  parent, ref_root, owner, offset, action);
+       ret = add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
+                                  num_bytes, parent, ref_root, owner, offset,
+                                  action, for_cow);
        BUG_ON(ret);
+       if (!need_ref_seq(for_cow, ref_root) &&
+           waitqueue_active(&delayed_refs->seq_wait))
+               wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
        return 0;
 }
 
-int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
+                               struct btrfs_trans_handle *trans,
                                u64 bytenr, u64 num_bytes,
                                struct btrfs_delayed_extent_op *extent_op)
 {
@@ -683,11 +740,13 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
        delayed_refs = &trans->transaction->delayed_refs;
        spin_lock(&delayed_refs->lock);
 
-       ret = add_delayed_ref_head(trans, &head_ref->node, bytenr,
+       ret = add_delayed_ref_head(fs_info, trans, &head_ref->node, bytenr,
                                   num_bytes, BTRFS_UPDATE_DELAYED_HEAD,
                                   extent_op->is_data);
        BUG_ON(ret);
 
+       if (waitqueue_active(&delayed_refs->seq_wait))
+               wake_up(&delayed_refs->seq_wait);
        spin_unlock(&delayed_refs->lock);
        return 0;
 }
@@ -704,7 +763,7 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr)
        struct btrfs_delayed_ref_root *delayed_refs;
 
        delayed_refs = &trans->transaction->delayed_refs;
-       ref = find_ref_head(&delayed_refs->root, bytenr, NULL);
+       ref = find_ref_head(&delayed_refs->root, bytenr, NULL, 0);
        if (ref)
                return btrfs_delayed_node_to_head(ref);
        return NULL;
index e287e3b0eab0d970d37f0f4c70fd688b22f276ac..d8f244d9492511e3b108b26bcf4da1bc9fbf6826 100644 (file)
@@ -33,6 +33,9 @@ struct btrfs_delayed_ref_node {
        /* the size of the extent */
        u64 num_bytes;
 
+       /* seq number to keep track of insertion order */
+       u64 seq;
+
        /* ref count on this data structure */
        atomic_t refs;
 
@@ -98,19 +101,15 @@ struct btrfs_delayed_ref_head {
 
 struct btrfs_delayed_tree_ref {
        struct btrfs_delayed_ref_node node;
-       union {
-               u64 root;
-               u64 parent;
-       };
+       u64 root;
+       u64 parent;
        int level;
 };
 
 struct btrfs_delayed_data_ref {
        struct btrfs_delayed_ref_node node;
-       union {
-               u64 root;
-               u64 parent;
-       };
+       u64 root;
+       u64 parent;
        u64 objectid;
        u64 offset;
 };
@@ -140,6 +139,26 @@ struct btrfs_delayed_ref_root {
        int flushing;
 
        u64 run_delayed_start;
+
+       /*
+        * seq number of delayed refs. We need to know if a backref was being
+        * added before the currently processed ref or afterwards.
+        */
+       u64 seq;
+
+       /*
+        * seq_list holds a list of all seq numbers that are currently being
+        * added to the list. While walking backrefs (btrfs_find_all_roots,
+        * qgroups), which might take some time, no newer ref must be processed,
+        * as it might influence the outcome of the walk.
+        */
+       struct list_head seq_head;
+
+       /*
+        * when the only refs we have in the list must not be processed, we want
+        * to wait for more refs to show up or for the end of backref walking.
+        */
+       wait_queue_head_t seq_wait;
 };
 
 static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
@@ -151,16 +170,21 @@ static inline void btrfs_put_delayed_ref(struct btrfs_delayed_ref_node *ref)
        }
 }
 
-int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
+                              struct btrfs_trans_handle *trans,
                               u64 bytenr, u64 num_bytes, u64 parent,
                               u64 ref_root, int level, int action,
-                              struct btrfs_delayed_extent_op *extent_op);
-int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+                              struct btrfs_delayed_extent_op *extent_op,
+                              int for_cow);
+int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
+                              struct btrfs_trans_handle *trans,
                               u64 bytenr, u64 num_bytes,
                               u64 parent, u64 ref_root,
                               u64 owner, u64 offset, int action,
-                              struct btrfs_delayed_extent_op *extent_op);
-int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans,
+                              struct btrfs_delayed_extent_op *extent_op,
+                              int for_cow);
+int btrfs_add_delayed_extent_op(struct btrfs_fs_info *fs_info,
+                               struct btrfs_trans_handle *trans,
                                u64 bytenr, u64 num_bytes,
                                struct btrfs_delayed_extent_op *extent_op);
 
@@ -170,6 +194,60 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
                           struct btrfs_delayed_ref_head *head);
 int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
                           struct list_head *cluster, u64 search_start);
+
+struct seq_list {
+       struct list_head list;
+       u64 seq;
+};
+
+static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs)
+{
+       assert_spin_locked(&delayed_refs->lock);
+       ++delayed_refs->seq;
+       return delayed_refs->seq;
+}
+
+static inline void
+btrfs_get_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
+                     struct seq_list *elem)
+{
+       assert_spin_locked(&delayed_refs->lock);
+       elem->seq = delayed_refs->seq;
+       list_add_tail(&elem->list, &delayed_refs->seq_head);
+}
+
+static inline void
+btrfs_put_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
+                     struct seq_list *elem)
+{
+       spin_lock(&delayed_refs->lock);
+       list_del(&elem->list);
+       wake_up(&delayed_refs->seq_wait);
+       spin_unlock(&delayed_refs->lock);
+}
+
+int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
+                           u64 seq);
+
+/*
+ * delayed refs with a ref_seq > 0 must be held back during backref walking.
+ * this only applies to items in one of the fs-trees. for_cow items never need
+ * to be held back, so they won't get a ref_seq number.
+ */
+static inline int need_ref_seq(int for_cow, u64 rootid)
+{
+       if (for_cow)
+               return 0;
+
+       if (rootid == BTRFS_FS_TREE_OBJECTID)
+               return 1;
+
+       if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
+               return 1;
+
+       return 0;
+}
+
 /*
  * a node might live in a head or a regular ref, this lets you
  * test for the proper type to use.
index d8525662ca7a721b18e197191816ea16b095eb19..811d9f918b1c2923c1b35a5ae966808b0e3c2fe6 100644 (file)
@@ -43,6 +43,7 @@
 #include "tree-log.h"
 #include "free-space-cache.h"
 #include "inode-map.h"
+#include "check-integrity.h"
 
 static struct extent_io_ops btree_extent_io_ops;
 static void end_workqueue_fn(struct btrfs_work *work);
@@ -961,6 +962,13 @@ static int btree_releasepage(struct page *page, gfp_t gfp_flags)
        tree = &BTRFS_I(page->mapping->host)->io_tree;
        map = &BTRFS_I(page->mapping->host)->extent_tree;
 
+       /*
+        * We need to mask out eg. __GFP_HIGHMEM and __GFP_DMA32 as we're doing
+        * slab allocation from alloc_extent_state down the callchain where
+        * it'd hit a BUG_ON as those flags are not allowed.
+        */
+       gfp_flags &= ~GFP_SLAB_BUG_MASK;
+
        ret = try_release_extent_state(map, tree, page, gfp_flags);
        if (!ret)
                return 0;
@@ -1143,7 +1151,6 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
        root->orphan_item_inserted = 0;
        root->orphan_cleanup_state = 0;
 
-       root->fs_info = fs_info;
        root->objectid = objectid;
        root->last_trans = 0;
        root->highest_objectid = 0;
@@ -1217,6 +1224,14 @@ static int find_and_setup_root(struct btrfs_root *tree_root,
        return 0;
 }
 
+static struct btrfs_root *btrfs_alloc_root(struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_root *root = kzalloc(sizeof(*root), GFP_NOFS);
+       if (root)
+               root->fs_info = fs_info;
+       return root;
+}
+
 static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
                                         struct btrfs_fs_info *fs_info)
 {
@@ -1224,7 +1239,7 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
        struct btrfs_root *tree_root = fs_info->tree_root;
        struct extent_buffer *leaf;
 
-       root = kzalloc(sizeof(*root), GFP_NOFS);
+       root = btrfs_alloc_root(fs_info);
        if (!root)
                return ERR_PTR(-ENOMEM);
 
@@ -1244,7 +1259,8 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
        root->ref_cows = 0;
 
        leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
-                                     BTRFS_TREE_LOG_OBJECTID, NULL, 0, 0, 0);
+                                     BTRFS_TREE_LOG_OBJECTID, NULL,
+                                     0, 0, 0, 0);
        if (IS_ERR(leaf)) {
                kfree(root);
                return ERR_CAST(leaf);
@@ -1318,7 +1334,7 @@ struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root,
        u32 blocksize;
        int ret = 0;
 
-       root = kzalloc(sizeof(*root), GFP_NOFS);
+       root = btrfs_alloc_root(fs_info);
        if (!root)
                return ERR_PTR(-ENOMEM);
        if (location->offset == (u64)-1) {
@@ -1874,9 +1890,9 @@ static void free_root_pointers(struct btrfs_fs_info *info, int chunk_root)
 }
 
 
-struct btrfs_root *open_ctree(struct super_block *sb,
-                             struct btrfs_fs_devices *fs_devices,
-                             char *options)
+int open_ctree(struct super_block *sb,
+              struct btrfs_fs_devices *fs_devices,
+              char *options)
 {
        u32 sectorsize;
        u32 nodesize;
@@ -1888,8 +1904,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        struct btrfs_key location;
        struct buffer_head *bh;
        struct btrfs_super_block *disk_super;
-       struct btrfs_root *tree_root = btrfs_sb(sb);
-       struct btrfs_fs_info *fs_info = tree_root->fs_info;
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       struct btrfs_root *tree_root;
        struct btrfs_root *extent_root;
        struct btrfs_root *csum_root;
        struct btrfs_root *chunk_root;
@@ -1900,16 +1916,14 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        int num_backups_tried = 0;
        int backup_index = 0;
 
-       extent_root = fs_info->extent_root =
-               kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
-       csum_root = fs_info->csum_root =
-               kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
-       chunk_root = fs_info->chunk_root =
-               kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
-       dev_root = fs_info->dev_root =
-               kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
+       tree_root = fs_info->tree_root = btrfs_alloc_root(fs_info);
+       extent_root = fs_info->extent_root = btrfs_alloc_root(fs_info);
+       csum_root = fs_info->csum_root = btrfs_alloc_root(fs_info);
+       chunk_root = fs_info->chunk_root = btrfs_alloc_root(fs_info);
+       dev_root = fs_info->dev_root = btrfs_alloc_root(fs_info);
 
-       if (!extent_root || !csum_root || !chunk_root || !dev_root) {
+       if (!tree_root || !extent_root || !csum_root ||
+           !chunk_root || !dev_root) {
                err = -ENOMEM;
                goto fail;
        }
@@ -1998,6 +2012,17 @@ struct btrfs_root *open_ctree(struct super_block *sb,
        init_waitqueue_head(&fs_info->scrub_pause_wait);
        init_rwsem(&fs_info->scrub_super_lock);
        fs_info->scrub_workers_refcnt = 0;
+#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+       fs_info->check_integrity_print_mask = 0;
+#endif
+
+       spin_lock_init(&fs_info->balance_lock);
+       mutex_init(&fs_info->balance_mutex);
+       atomic_set(&fs_info->balance_running, 0);
+       atomic_set(&fs_info->balance_pause_req, 0);
+       atomic_set(&fs_info->balance_cancel_req, 0);
+       fs_info->balance_ctl = NULL;
+       init_waitqueue_head(&fs_info->balance_wait_q);
 
        sb->s_blocksize = 4096;
        sb->s_blocksize_bits = blksize_bits(4096);
@@ -2267,9 +2292,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
           (unsigned long)btrfs_header_chunk_tree_uuid(chunk_root->node),
           BTRFS_UUID_SIZE);
 
-       mutex_lock(&fs_info->chunk_mutex);
        ret = btrfs_read_chunk_tree(chunk_root);
-       mutex_unlock(&fs_info->chunk_mutex);
        if (ret) {
                printk(KERN_WARNING "btrfs: failed to read chunk tree on %s\n",
                       sb->s_id);
@@ -2318,9 +2341,6 @@ retry_root_backup:
 
        fs_info->generation = generation;
        fs_info->last_trans_committed = generation;
-       fs_info->data_alloc_profile = (u64)-1;
-       fs_info->metadata_alloc_profile = (u64)-1;
-       fs_info->system_alloc_profile = fs_info->metadata_alloc_profile;
 
        ret = btrfs_init_space_info(fs_info);
        if (ret) {
@@ -2353,6 +2373,19 @@ retry_root_backup:
                btrfs_set_opt(fs_info->mount_opt, SSD);
        }
 
+#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+       if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
+               ret = btrfsic_mount(tree_root, fs_devices,
+                                   btrfs_test_opt(tree_root,
+                                       CHECK_INTEGRITY_INCLUDING_EXTENT_DATA) ?
+                                   1 : 0,
+                                   fs_info->check_integrity_print_mask);
+               if (ret)
+                       printk(KERN_WARNING "btrfs: failed to initialize"
+                              " integrity check module %s\n", sb->s_id);
+       }
+#endif
+
        /* do not make disk changes in broken FS */
        if (btrfs_super_log_root(disk_super) != 0 &&
            !(fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR)) {
@@ -2368,7 +2401,7 @@ retry_root_backup:
                     btrfs_level_size(tree_root,
                                      btrfs_super_log_root_level(disk_super));
 
-               log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
+               log_tree_root = btrfs_alloc_root(fs_info);
                if (!log_tree_root) {
                        err = -ENOMEM;
                        goto fail_trans_kthread;
@@ -2423,13 +2456,17 @@ retry_root_backup:
                if (!err)
                        err = btrfs_orphan_cleanup(fs_info->tree_root);
                up_read(&fs_info->cleanup_work_sem);
+
+               if (!err)
+                       err = btrfs_recover_balance(fs_info->tree_root);
+
                if (err) {
                        close_ctree(tree_root);
-                       return ERR_PTR(err);
+                       return err;
                }
        }
 
-       return tree_root;
+       return 0;
 
 fail_trans_kthread:
        kthread_stop(fs_info->transaction_kthread);
@@ -2475,8 +2512,7 @@ fail_srcu:
        cleanup_srcu_struct(&fs_info->subvol_srcu);
 fail:
        btrfs_close_devices(fs_info->fs_devices);
-       free_fs_info(fs_info);
-       return ERR_PTR(err);
+       return err;
 
 recovery_tree_root:
        if (!btrfs_test_opt(tree_root, RECOVERY))
@@ -2631,7 +2667,7 @@ static int write_dev_supers(struct btrfs_device *device,
                 * we fua the first super.  The others we allow
                 * to go down lazy.
                 */
-               ret = submit_bh(WRITE_FUA, bh);
+               ret = btrfsic_submit_bh(WRITE_FUA, bh);
                if (ret)
                        errors++;
        }
@@ -2708,7 +2744,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
        device->flush_bio = bio;
 
        bio_get(bio);
-       submit_bio(WRITE_FLUSH, bio);
+       btrfsic_submit_bio(WRITE_FLUSH, bio);
 
        return 0;
 }
@@ -2972,6 +3008,9 @@ int close_ctree(struct btrfs_root *root)
        fs_info->closing = 1;
        smp_mb();
 
+       /* pause restriper - we want to resume on mount */
+       btrfs_pause_balance(root->fs_info);
+
        btrfs_scrub_cancel(root);
 
        /* wait for any defraggers to finish */
@@ -2979,7 +3018,7 @@ int close_ctree(struct btrfs_root *root)
                   (atomic_read(&fs_info->defrag_running) == 0));
 
        /* clear out the rbtree of defraggable inodes */
-       btrfs_run_defrag_inodes(root->fs_info);
+       btrfs_run_defrag_inodes(fs_info);
 
        /*
         * Here come 2 situations when btrfs is broken to flip readonly:
@@ -3008,8 +3047,8 @@ int close_ctree(struct btrfs_root *root)
 
        btrfs_put_block_group_cache(fs_info);
 
-       kthread_stop(root->fs_info->transaction_kthread);
-       kthread_stop(root->fs_info->cleaner_kthread);
+       kthread_stop(fs_info->transaction_kthread);
+       kthread_stop(fs_info->cleaner_kthread);
 
        fs_info->closing = 2;
        smp_mb();
@@ -3027,14 +3066,14 @@ int close_ctree(struct btrfs_root *root)
        free_extent_buffer(fs_info->extent_root->commit_root);
        free_extent_buffer(fs_info->tree_root->node);
        free_extent_buffer(fs_info->tree_root->commit_root);
-       free_extent_buffer(root->fs_info->chunk_root->node);
-       free_extent_buffer(root->fs_info->chunk_root->commit_root);
-       free_extent_buffer(root->fs_info->dev_root->node);
-       free_extent_buffer(root->fs_info->dev_root->commit_root);
-       free_extent_buffer(root->fs_info->csum_root->node);
-       free_extent_buffer(root->fs_info->csum_root->commit_root);
+       free_extent_buffer(fs_info->chunk_root->node);
+       free_extent_buffer(fs_info->chunk_root->commit_root);
+       free_extent_buffer(fs_info->dev_root->node);
+       free_extent_buffer(fs_info->dev_root->commit_root);
+       free_extent_buffer(fs_info->csum_root->node);
+       free_extent_buffer(fs_info->csum_root->commit_root);
 
-       btrfs_free_block_groups(root->fs_info);
+       btrfs_free_block_groups(fs_info);
 
        del_fs_roots(fs_info);
 
@@ -3054,14 +3093,17 @@ int close_ctree(struct btrfs_root *root)
        btrfs_stop_workers(&fs_info->caching_workers);
        btrfs_stop_workers(&fs_info->readahead_workers);
 
+#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+       if (btrfs_test_opt(root, CHECK_INTEGRITY))
+               btrfsic_unmount(root, fs_info->fs_devices);
+#endif
+
        btrfs_close_devices(fs_info->fs_devices);
        btrfs_mapping_tree_free(&fs_info->mapping_tree);
 
        bdi_destroy(&fs_info->bdi);
        cleanup_srcu_struct(&fs_info->subvol_srcu);
 
-       free_fs_info(fs_info);
-
        return 0;
 }
 
index c99d0a8f13fa2f38642f7826cb00c45070ac95ae..e4bc4741319bd3a1e094de03566feeb360d35e4d 100644 (file)
@@ -46,9 +46,9 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root,
                                                   u64 bytenr, u32 blocksize);
 int clean_tree_block(struct btrfs_trans_handle *trans,
                     struct btrfs_root *root, struct extent_buffer *buf);
-struct btrfs_root *open_ctree(struct super_block *sb,
-                             struct btrfs_fs_devices *fs_devices,
-                             char *options);
+int open_ctree(struct super_block *sb,
+              struct btrfs_fs_devices *fs_devices,
+              char *options);
 int close_ctree(struct btrfs_root *root);
 int write_ctree_super(struct btrfs_trans_handle *trans,
                      struct btrfs_root *root, int max_mirrors);
index 1b8dc33778f9c411206cc6ad9467546412431947..5f77166fd01c7eb3d33cb78666e31822fc0a1083 100644 (file)
@@ -67,7 +67,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
                                       u64 root_objectid, u32 generation,
                                       int check_generation)
 {
-       struct btrfs_fs_info *fs_info = btrfs_sb(sb)->fs_info;
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
        struct btrfs_root *root;
        struct inode *inode;
        struct btrfs_key key;
index f5fbe576d2baf48519a01bd449344b49edffa070..283af7a676a39b4c2f31d7d9b7caf81607eee4ea 100644 (file)
 #include "locking.h"
 #include "free-space-cache.h"
 
-/* control flags for do_chunk_alloc's force field
+/*
+ * control flags for do_chunk_alloc's force field
  * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
  * if we really need one.
  *
- * CHUNK_ALLOC_FORCE means it must try to allocate one
- *
  * CHUNK_ALLOC_LIMITED means to only try and allocate one
  * if we have very few chunks already allocated.  This is
  * used as part of the clustering code to help make sure
  * we have a good pool of storage to cluster in, without
  * filling the FS with empty chunks
  *
+ * CHUNK_ALLOC_FORCE means it must try to allocate one
+ *
  */
 enum {
        CHUNK_ALLOC_NO_FORCE = 0,
-       CHUNK_ALLOC_FORCE = 1,
-       CHUNK_ALLOC_LIMITED = 2,
+       CHUNK_ALLOC_LIMITED = 1,
+       CHUNK_ALLOC_FORCE = 2,
 };
 
 /*
@@ -618,8 +619,7 @@ static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
        struct list_head *head = &info->space_info;
        struct btrfs_space_info *found;
 
-       flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
-                BTRFS_BLOCK_GROUP_METADATA;
+       flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
 
        rcu_read_lock();
        list_for_each_entry_rcu(found, head, list) {
@@ -1872,20 +1872,24 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
                         struct btrfs_root *root,
                         u64 bytenr, u64 num_bytes, u64 parent,
-                        u64 root_objectid, u64 owner, u64 offset)
+                        u64 root_objectid, u64 owner, u64 offset, int for_cow)
 {
        int ret;
+       struct btrfs_fs_info *fs_info = root->fs_info;
+
        BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
               root_objectid == BTRFS_TREE_LOG_OBJECTID);
 
        if (owner < BTRFS_FIRST_FREE_OBJECTID) {
-               ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
+               ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+                                       num_bytes,
                                        parent, root_objectid, (int)owner,
-                                       BTRFS_ADD_DELAYED_REF, NULL);
+                                       BTRFS_ADD_DELAYED_REF, NULL, for_cow);
        } else {
-               ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
+               ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+                                       num_bytes,
                                        parent, root_objectid, owner, offset,
-                                       BTRFS_ADD_DELAYED_REF, NULL);
+                                       BTRFS_ADD_DELAYED_REF, NULL, for_cow);
        }
        return ret;
 }
@@ -2232,6 +2236,28 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                        }
                }
 
+               /*
+                * locked_ref is the head node, so we have to go one
+                * node back for any delayed ref updates
+                */
+               ref = select_delayed_ref(locked_ref);
+
+               if (ref && ref->seq &&
+                   btrfs_check_delayed_seq(delayed_refs, ref->seq)) {
+                       /*
+                        * there are still refs with lower seq numbers in the
+                        * process of being added. Don't run this ref yet.
+                        */
+                       list_del_init(&locked_ref->cluster);
+                       mutex_unlock(&locked_ref->mutex);
+                       locked_ref = NULL;
+                       delayed_refs->num_heads_ready++;
+                       spin_unlock(&delayed_refs->lock);
+                       cond_resched();
+                       spin_lock(&delayed_refs->lock);
+                       continue;
+               }
+
                /*
                 * record the must insert reserved flag before we
                 * drop the spin lock.
@@ -2242,11 +2268,6 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                extent_op = locked_ref->extent_op;
                locked_ref->extent_op = NULL;
 
-               /*
-                * locked_ref is the head node, so we have to go one
-                * node back for any delayed ref updates
-                */
-               ref = select_delayed_ref(locked_ref);
                if (!ref) {
                        /* All delayed refs have been processed, Go ahead
                         * and send the head node to run_one_delayed_ref,
@@ -2267,9 +2288,7 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                                BUG_ON(ret);
                                kfree(extent_op);
 
-                               cond_resched();
-                               spin_lock(&delayed_refs->lock);
-                               continue;
+                               goto next;
                        }
 
                        list_del_init(&locked_ref->cluster);
@@ -2279,7 +2298,12 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                ref->in_tree = 0;
                rb_erase(&ref->rb_node, &delayed_refs->root);
                delayed_refs->num_entries--;
-
+               /*
+                * we modified num_entries, but as we're currently running
+                * delayed refs, skip
+                *     wake_up(&delayed_refs->seq_wait);
+                * here.
+                */
                spin_unlock(&delayed_refs->lock);
 
                ret = run_one_delayed_ref(trans, root, ref, extent_op,
@@ -2289,13 +2313,34 @@ static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
                btrfs_put_delayed_ref(ref);
                kfree(extent_op);
                count++;
-
+next:
+               do_chunk_alloc(trans, root->fs_info->extent_root,
+                              2 * 1024 * 1024,
+                              btrfs_get_alloc_profile(root, 0),
+                              CHUNK_ALLOC_NO_FORCE);
                cond_resched();
                spin_lock(&delayed_refs->lock);
        }
        return count;
 }
 
+
+static void wait_for_more_refs(struct btrfs_delayed_ref_root *delayed_refs,
+                       unsigned long num_refs)
+{
+       struct list_head *first_seq = delayed_refs->seq_head.next;
+
+       spin_unlock(&delayed_refs->lock);
+       pr_debug("waiting for more refs (num %ld, first %p)\n",
+                num_refs, first_seq);
+       wait_event(delayed_refs->seq_wait,
+                  num_refs != delayed_refs->num_entries ||
+                  delayed_refs->seq_head.next != first_seq);
+       pr_debug("done waiting for more refs (num %ld, first %p)\n",
+                delayed_refs->num_entries, delayed_refs->seq_head.next);
+       spin_lock(&delayed_refs->lock);
+}
+
 /*
  * this starts processing the delayed reference count updates and
  * extent insertions we have queued up so far.  count can be
@@ -2311,15 +2356,23 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
        struct btrfs_delayed_ref_node *ref;
        struct list_head cluster;
        int ret;
+       u64 delayed_start;
        int run_all = count == (unsigned long)-1;
        int run_most = 0;
+       unsigned long num_refs = 0;
+       int consider_waiting;
 
        if (root == root->fs_info->extent_root)
                root = root->fs_info->tree_root;
 
+       do_chunk_alloc(trans, root->fs_info->extent_root,
+                      2 * 1024 * 1024, btrfs_get_alloc_profile(root, 0),
+                      CHUNK_ALLOC_NO_FORCE);
+
        delayed_refs = &trans->transaction->delayed_refs;
        INIT_LIST_HEAD(&cluster);
 again:
+       consider_waiting = 0;
        spin_lock(&delayed_refs->lock);
        if (count == 0) {
                count = delayed_refs->num_entries * 2;
@@ -2336,11 +2389,35 @@ again:
                 * of refs to process starting at the first one we are able to
                 * lock
                 */
+               delayed_start = delayed_refs->run_delayed_start;
                ret = btrfs_find_ref_cluster(trans, &cluster,
                                             delayed_refs->run_delayed_start);
                if (ret)
                        break;
 
+               if (delayed_start >= delayed_refs->run_delayed_start) {
+                       if (consider_waiting == 0) {
+                               /*
+                                * btrfs_find_ref_cluster looped. let's do one
+                                * more cycle. if we don't run any delayed ref
+                                * during that cycle (because we can't because
+                                * all of them are blocked) and if the number of
+                                * refs doesn't change, we avoid busy waiting.
+                                */
+                               consider_waiting = 1;
+                               num_refs = delayed_refs->num_entries;
+                       } else {
+                               wait_for_more_refs(delayed_refs, num_refs);
+                               /*
+                                * after waiting, things have changed. we
+                                * dropped the lock and someone else might have
+                                * run some refs, built new clusters and so on.
+                                * therefore, we restart staleness detection.
+                                */
+                               consider_waiting = 0;
+                       }
+               }
+
                ret = run_clustered_refs(trans, root, &cluster);
                BUG_ON(ret < 0);
 
@@ -2348,6 +2425,11 @@ again:
 
                if (count == 0)
                        break;
+
+               if (ret || delayed_refs->run_delayed_start == 0) {
+                       /* refs were run, let's reset staleness detection */
+                       consider_waiting = 0;
+               }
        }
 
        if (run_all) {
@@ -2405,7 +2487,8 @@ int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
        extent_op->update_key = 0;
        extent_op->is_data = is_data ? 1 : 0;
 
-       ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
+       ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
+                                         num_bytes, extent_op);
        if (ret)
                kfree(extent_op);
        return ret;
@@ -2590,7 +2673,7 @@ out:
 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
-                          int full_backref, int inc)
+                          int full_backref, int inc, int for_cow)
 {
        u64 bytenr;
        u64 num_bytes;
@@ -2603,7 +2686,7 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
        int level;
        int ret = 0;
        int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
-                           u64, u64, u64, u64, u64, u64);
+                           u64, u64, u64, u64, u64, u64, int);
 
        ref_root = btrfs_header_owner(buf);
        nritems = btrfs_header_nritems(buf);
@@ -2640,14 +2723,15 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
                        key.offset -= btrfs_file_extent_offset(buf, fi);
                        ret = process_func(trans, root, bytenr, num_bytes,
                                           parent, ref_root, key.objectid,
-                                          key.offset);
+                                          key.offset, for_cow);
                        if (ret)
                                goto fail;
                } else {
                        bytenr = btrfs_node_blockptr(buf, i);
                        num_bytes = btrfs_level_size(root, level - 1);
                        ret = process_func(trans, root, bytenr, num_bytes,
-                                          parent, ref_root, level - 1, 0);
+                                          parent, ref_root, level - 1, 0,
+                                          for_cow);
                        if (ret)
                                goto fail;
                }
@@ -2659,15 +2743,15 @@ fail:
 }
 
 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                 struct extent_buffer *buf, int full_backref)
+                 struct extent_buffer *buf, int full_backref, int for_cow)
 {
-       return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
+       return __btrfs_mod_ref(trans, root, buf, full_backref, 1, for_cow);
 }
 
 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
-                 struct extent_buffer *buf, int full_backref)
+                 struct extent_buffer *buf, int full_backref, int for_cow)
 {
-       return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
+       return __btrfs_mod_ref(trans, root, buf, full_backref, 0, for_cow);
 }
 
 static int write_one_cache_group(struct btrfs_trans_handle *trans,
@@ -2993,9 +3077,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
                INIT_LIST_HEAD(&found->block_groups[i]);
        init_rwsem(&found->groups_sem);
        spin_lock_init(&found->lock);
-       found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
-                               BTRFS_BLOCK_GROUP_SYSTEM |
-                               BTRFS_BLOCK_GROUP_METADATA);
+       found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
        found->total_bytes = total_bytes;
        found->disk_total = total_bytes * factor;
        found->bytes_used = bytes_used;
@@ -3016,20 +3098,27 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
 
 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
 {
-       u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
-                                  BTRFS_BLOCK_GROUP_RAID1 |
-                                  BTRFS_BLOCK_GROUP_RAID10 |
-                                  BTRFS_BLOCK_GROUP_DUP);
-       if (extra_flags) {
-               if (flags & BTRFS_BLOCK_GROUP_DATA)
-                       fs_info->avail_data_alloc_bits |= extra_flags;
-               if (flags & BTRFS_BLOCK_GROUP_METADATA)
-                       fs_info->avail_metadata_alloc_bits |= extra_flags;
-               if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
-                       fs_info->avail_system_alloc_bits |= extra_flags;
-       }
+       u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+       /* chunk -> extended profile */
+       if (extra_flags == 0)
+               extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+       if (flags & BTRFS_BLOCK_GROUP_DATA)
+               fs_info->avail_data_alloc_bits |= extra_flags;
+       if (flags & BTRFS_BLOCK_GROUP_METADATA)
+               fs_info->avail_metadata_alloc_bits |= extra_flags;
+       if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+               fs_info->avail_system_alloc_bits |= extra_flags;
 }
 
+/*
+ * @flags: available profiles in extended format (see ctree.h)
+ *
+ * Returns reduced profile in chunk format.  If profile changing is in
+ * progress (either running or paused) picks the target profile (if it's
+ * already available), otherwise falls back to plain reducing.
+ */
 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
 {
        /*
@@ -3040,6 +3129,34 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
        u64 num_devices = root->fs_info->fs_devices->rw_devices +
                root->fs_info->fs_devices->missing_devices;
 
+       /* pick restriper's target profile if it's available */
+       spin_lock(&root->fs_info->balance_lock);
+       if (root->fs_info->balance_ctl) {
+               struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
+               u64 tgt = 0;
+
+               if ((flags & BTRFS_BLOCK_GROUP_DATA) &&
+                   (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+                   (flags & bctl->data.target)) {
+                       tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
+               } else if ((flags & BTRFS_BLOCK_GROUP_SYSTEM) &&
+                          (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+                          (flags & bctl->sys.target)) {
+                       tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
+               } else if ((flags & BTRFS_BLOCK_GROUP_METADATA) &&
+                          (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+                          (flags & bctl->meta.target)) {
+                       tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
+               }
+
+               if (tgt) {
+                       spin_unlock(&root->fs_info->balance_lock);
+                       flags = tgt;
+                       goto out;
+               }
+       }
+       spin_unlock(&root->fs_info->balance_lock);
+
        if (num_devices == 1)
                flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
        if (num_devices < 4)
@@ -3059,22 +3176,25 @@ u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
        if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
            ((flags & BTRFS_BLOCK_GROUP_RAID1) |
             (flags & BTRFS_BLOCK_GROUP_RAID10) |
-            (flags & BTRFS_BLOCK_GROUP_DUP)))
+            (flags & BTRFS_BLOCK_GROUP_DUP))) {
                flags &= ~BTRFS_BLOCK_GROUP_RAID0;
+       }
+
+out:
+       /* extended -> chunk profile */
+       flags &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
        return flags;
 }
 
 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
 {
        if (flags & BTRFS_BLOCK_GROUP_DATA)
-               flags |= root->fs_info->avail_data_alloc_bits &
-                        root->fs_info->data_alloc_profile;
+               flags |= root->fs_info->avail_data_alloc_bits;
        else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
-               flags |= root->fs_info->avail_system_alloc_bits &
-                        root->fs_info->system_alloc_profile;
+               flags |= root->fs_info->avail_system_alloc_bits;
        else if (flags & BTRFS_BLOCK_GROUP_METADATA)
-               flags |= root->fs_info->avail_metadata_alloc_bits &
-                        root->fs_info->metadata_alloc_profile;
+               flags |= root->fs_info->avail_metadata_alloc_bits;
+
        return btrfs_reduce_alloc_profile(root, flags);
 }
 
@@ -3191,6 +3311,8 @@ commit_trans:
                return -ENOSPC;
        }
        data_sinfo->bytes_may_use += bytes;
+       trace_btrfs_space_reservation(root->fs_info, "space_info",
+                                     (u64)data_sinfo, bytes, 1);
        spin_unlock(&data_sinfo->lock);
 
        return 0;
@@ -3210,6 +3332,8 @@ void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
        data_sinfo = BTRFS_I(inode)->space_info;
        spin_lock(&data_sinfo->lock);
        data_sinfo->bytes_may_use -= bytes;
+       trace_btrfs_space_reservation(root->fs_info, "space_info",
+                                     (u64)data_sinfo, bytes, 0);
        spin_unlock(&data_sinfo->lock);
 }
 
@@ -3257,27 +3381,15 @@ static int should_alloc_chunk(struct btrfs_root *root,
                if (num_bytes - num_allocated < thresh)
                        return 1;
        }
-
-       /*
-        * we have two similar checks here, one based on percentage
-        * and once based on a hard number of 256MB.  The idea
-        * is that if we have a good amount of free
-        * room, don't allocate a chunk.  A good mount is
-        * less than 80% utilized of the chunks we have allocated,
-        * or more than 256MB free
-        */
-       if (num_allocated + alloc_bytes + 256 * 1024 * 1024 < num_bytes)
-               return 0;
-
-       if (num_allocated + alloc_bytes < div_factor(num_bytes, 8))
-               return 0;
-
        thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
 
-       /* 256MB or 5% of the FS */
-       thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
+       /* 256MB or 2% of the FS */
+       thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 2));
+       /* system chunks need a much small threshold */
+       if (sinfo->flags & BTRFS_BLOCK_GROUP_SYSTEM)
+               thresh = 32 * 1024 * 1024;
 
-       if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
+       if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 8))
                return 0;
        return 1;
 }
@@ -3291,7 +3403,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
        int wait_for_alloc = 0;
        int ret = 0;
 
-       flags = btrfs_reduce_alloc_profile(extent_root, flags);
+       BUG_ON(!profile_is_valid(flags, 0));
 
        space_info = __find_space_info(extent_root->fs_info, flags);
        if (!space_info) {
@@ -3303,7 +3415,7 @@ static int do_chunk_alloc(struct btrfs_trans_handle *trans,
 
 again:
        spin_lock(&space_info->lock);
-       if (space_info->force_alloc)
+       if (force < space_info->force_alloc)
                force = space_info->force_alloc;
        if (space_info->full) {
                spin_unlock(&space_info->lock);
@@ -3582,6 +3694,10 @@ again:
        if (used <= space_info->total_bytes) {
                if (used + orig_bytes <= space_info->total_bytes) {
                        space_info->bytes_may_use += orig_bytes;
+                       trace_btrfs_space_reservation(root->fs_info,
+                                                     "space_info",
+                                                     (u64)space_info,
+                                                     orig_bytes, 1);
                        ret = 0;
                } else {
                        /*
@@ -3649,6 +3765,10 @@ again:
 
                if (used + num_bytes < space_info->total_bytes + avail) {
                        space_info->bytes_may_use += orig_bytes;
+                       trace_btrfs_space_reservation(root->fs_info,
+                                                     "space_info",
+                                                     (u64)space_info,
+                                                     orig_bytes, 1);
                        ret = 0;
                } else {
                        wait_ordered = true;
@@ -3755,7 +3875,8 @@ static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
        spin_unlock(&block_rsv->lock);
 }
 
-static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
+static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
+                                   struct btrfs_block_rsv *block_rsv,
                                    struct btrfs_block_rsv *dest, u64 num_bytes)
 {
        struct btrfs_space_info *space_info = block_rsv->space_info;
@@ -3791,6 +3912,9 @@ static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
                if (num_bytes) {
                        spin_lock(&space_info->lock);
                        space_info->bytes_may_use -= num_bytes;
+                       trace_btrfs_space_reservation(fs_info, "space_info",
+                                                     (u64)space_info,
+                                                     num_bytes, 0);
                        space_info->reservation_progress++;
                        spin_unlock(&space_info->lock);
                }
@@ -3947,7 +4071,8 @@ void btrfs_block_rsv_release(struct btrfs_root *root,
        if (global_rsv->full || global_rsv == block_rsv ||
            block_rsv->space_info != global_rsv->space_info)
                global_rsv = NULL;
-       block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
+       block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
+                               num_bytes);
 }
 
 /*
@@ -4006,11 +4131,15 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
                num_bytes = sinfo->total_bytes - num_bytes;
                block_rsv->reserved += num_bytes;
                sinfo->bytes_may_use += num_bytes;
+               trace_btrfs_space_reservation(fs_info, "space_info",
+                                             (u64)sinfo, num_bytes, 1);
        }
 
        if (block_rsv->reserved >= block_rsv->size) {
                num_bytes = block_rsv->reserved - block_rsv->size;
                sinfo->bytes_may_use -= num_bytes;
+               trace_btrfs_space_reservation(fs_info, "space_info",
+                                             (u64)sinfo, num_bytes, 0);
                sinfo->reservation_progress++;
                block_rsv->reserved = block_rsv->size;
                block_rsv->full = 1;
@@ -4045,7 +4174,8 @@ static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
 
 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
 {
-       block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
+       block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
+                               (u64)-1);
        WARN_ON(fs_info->delalloc_block_rsv.size > 0);
        WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
        WARN_ON(fs_info->trans_block_rsv.size > 0);
@@ -4062,6 +4192,8 @@ void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
        if (!trans->bytes_reserved)
                return;
 
+       trace_btrfs_space_reservation(root->fs_info, "transaction", (u64)trans,
+                                     trans->bytes_reserved, 0);
        btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
        trans->bytes_reserved = 0;
 }
@@ -4079,6 +4211,8 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
         * when we are truly done with the orphan item.
         */
        u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+       trace_btrfs_space_reservation(root->fs_info, "orphan",
+                                     btrfs_ino(inode), num_bytes, 1);
        return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
 }
 
@@ -4086,6 +4220,8 @@ void btrfs_orphan_release_metadata(struct inode *inode)
 {
        struct btrfs_root *root = BTRFS_I(inode)->root;
        u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
+       trace_btrfs_space_reservation(root->fs_info, "orphan",
+                                     btrfs_ino(inode), num_bytes, 0);
        btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
 }
 
@@ -4213,12 +4349,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        /* Need to be holding the i_mutex here if we aren't free space cache */
        if (btrfs_is_free_space_inode(root, inode))
                flush = 0;
-       else
-               WARN_ON(!mutex_is_locked(&inode->i_mutex));
 
        if (flush && btrfs_transaction_in_commit(root->fs_info))
                schedule_timeout(1);
 
+       mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
        num_bytes = ALIGN(num_bytes, root->sectorsize);
 
        spin_lock(&BTRFS_I(inode)->lock);
@@ -4266,8 +4401,14 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
                if (dropped)
                        to_free += btrfs_calc_trans_metadata_size(root, dropped);
 
-               if (to_free)
+               if (to_free) {
                        btrfs_block_rsv_release(root, block_rsv, to_free);
+                       trace_btrfs_space_reservation(root->fs_info,
+                                                     "delalloc",
+                                                     btrfs_ino(inode),
+                                                     to_free, 0);
+               }
+               mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
                return ret;
        }
 
@@ -4278,7 +4419,11 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
        }
        BTRFS_I(inode)->reserved_extents += nr_extents;
        spin_unlock(&BTRFS_I(inode)->lock);
+       mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
 
+       if (to_reserve)
+               trace_btrfs_space_reservation(root->fs_info,"delalloc",
+                                             btrfs_ino(inode), to_reserve, 1);
        block_rsv_add_bytes(block_rsv, to_reserve, 1);
 
        return 0;
@@ -4308,6 +4453,8 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
        if (dropped > 0)
                to_free += btrfs_calc_trans_metadata_size(root, dropped);
 
+       trace_btrfs_space_reservation(root->fs_info, "delalloc",
+                                     btrfs_ino(inode), to_free, 0);
        btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
                                to_free);
 }
@@ -4562,7 +4709,10 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
                        cache->reserved += num_bytes;
                        space_info->bytes_reserved += num_bytes;
                        if (reserve == RESERVE_ALLOC) {
-                               BUG_ON(space_info->bytes_may_use < num_bytes);
+                               trace_btrfs_space_reservation(cache->fs_info,
+                                                             "space_info",
+                                                             (u64)space_info,
+                                                             num_bytes, 0);
                                space_info->bytes_may_use -= num_bytes;
                        }
                }
@@ -4928,6 +5078,8 @@ static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
        rb_erase(&head->node.rb_node, &delayed_refs->root);
 
        delayed_refs->num_entries--;
+       if (waitqueue_active(&delayed_refs->seq_wait))
+               wake_up(&delayed_refs->seq_wait);
 
        /*
         * we don't take a ref on the node because we're removing it from the
@@ -4955,16 +5107,17 @@ out:
 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root,
                           struct extent_buffer *buf,
-                          u64 parent, int last_ref)
+                          u64 parent, int last_ref, int for_cow)
 {
        struct btrfs_block_group_cache *cache = NULL;
        int ret;
 
        if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
-               ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
-                                               parent, root->root_key.objectid,
-                                               btrfs_header_level(buf),
-                                               BTRFS_DROP_DELAYED_REF, NULL);
+               ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
+                                       buf->start, buf->len,
+                                       parent, root->root_key.objectid,
+                                       btrfs_header_level(buf),
+                                       BTRFS_DROP_DELAYED_REF, NULL, for_cow);
                BUG_ON(ret);
        }
 
@@ -4999,12 +5152,12 @@ out:
        btrfs_put_block_group(cache);
 }
 
-int btrfs_free_extent(struct btrfs_trans_handle *trans,
-                     struct btrfs_root *root,
-                     u64 bytenr, u64 num_bytes, u64 parent,
-                     u64 root_objectid, u64 owner, u64 offset)
+int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
+                     u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
+                     u64 owner, u64 offset, int for_cow)
 {
        int ret;
+       struct btrfs_fs_info *fs_info = root->fs_info;
 
        /*
         * tree log blocks never actually go into the extent allocation
@@ -5016,14 +5169,17 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
                btrfs_pin_extent(root, bytenr, num_bytes, 1);
                ret = 0;
        } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
-               ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
+               ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
+                                       num_bytes,
                                        parent, root_objectid, (int)owner,
-                                       BTRFS_DROP_DELAYED_REF, NULL);
+                                       BTRFS_DROP_DELAYED_REF, NULL, for_cow);
                BUG_ON(ret);
        } else {
-               ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
-                                       parent, root_objectid, owner,
-                                       offset, BTRFS_DROP_DELAYED_REF, NULL);
+               ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
+                                               num_bytes,
+                                               parent, root_objectid, owner,
+                                               offset, BTRFS_DROP_DELAYED_REF,
+                                               NULL, for_cow);
                BUG_ON(ret);
        }
        return ret;
@@ -5146,6 +5302,8 @@ static noinline int find_free_extent(struct btrfs_trans_handle *trans,
        ins->objectid = 0;
        ins->offset = 0;
 
+       trace_find_free_extent(orig_root, num_bytes, empty_size, data);
+
        space_info = __find_space_info(root->fs_info, data);
        if (!space_info) {
                printk(KERN_ERR "No space info for %llu\n", data);
@@ -5295,15 +5453,6 @@ alloc:
                if (unlikely(block_group->ro))
                        goto loop;
 
-               spin_lock(&block_group->free_space_ctl->tree_lock);
-               if (cached &&
-                   block_group->free_space_ctl->free_space <
-                   num_bytes + empty_cluster + empty_size) {
-                       spin_unlock(&block_group->free_space_ctl->tree_lock);
-                       goto loop;
-               }
-               spin_unlock(&block_group->free_space_ctl->tree_lock);
-
                /*
                 * Ok we want to try and use the cluster allocator, so
                 * lets look there
@@ -5331,6 +5480,8 @@ alloc:
                        if (offset) {
                                /* we have a block, we're done */
                                spin_unlock(&last_ptr->refill_lock);
+                               trace_btrfs_reserve_extent_cluster(root,
+                                       block_group, search_start, num_bytes);
                                goto checks;
                        }
 
@@ -5349,8 +5500,15 @@ refill_cluster:
                         * plenty of times and not have found
                         * anything, so we are likely way too
                         * fragmented for the clustering stuff to find
-                        * anything.  */
-                       if (loop >= LOOP_NO_EMPTY_SIZE) {
+                        * anything.
+                        *
+                        * However, if the cluster is taken from the
+                        * current block group, release the cluster
+                        * first, so that we stand a better chance of
+                        * succeeding in the unclustered
+                        * allocation.  */
+                       if (loop >= LOOP_NO_EMPTY_SIZE &&
+                           last_ptr->block_group != block_group) {
                                spin_unlock(&last_ptr->refill_lock);
                                goto unclustered_alloc;
                        }
@@ -5361,6 +5519,11 @@ refill_cluster:
                         */
                        btrfs_return_cluster_to_free_space(NULL, last_ptr);
 
+                       if (loop >= LOOP_NO_EMPTY_SIZE) {
+                               spin_unlock(&last_ptr->refill_lock);
+                               goto unclustered_alloc;
+                       }
+
                        /* allocate a cluster in this block group */
                        ret = btrfs_find_space_cluster(trans, root,
                                               block_group, last_ptr,
@@ -5377,6 +5540,9 @@ refill_cluster:
                                if (offset) {
                                        /* we found one, proceed */
                                        spin_unlock(&last_ptr->refill_lock);
+                                       trace_btrfs_reserve_extent_cluster(root,
+                                               block_group, search_start,
+                                               num_bytes);
                                        goto checks;
                                }
                        } else if (!cached && loop > LOOP_CACHING_NOWAIT
@@ -5401,6 +5567,15 @@ refill_cluster:
                }
 
 unclustered_alloc:
+               spin_lock(&block_group->free_space_ctl->tree_lock);
+               if (cached &&
+                   block_group->free_space_ctl->free_space <
+                   num_bytes + empty_cluster + empty_size) {
+                       spin_unlock(&block_group->free_space_ctl->tree_lock);
+                       goto loop;
+               }
+               spin_unlock(&block_group->free_space_ctl->tree_lock);
+
                offset = btrfs_find_space_for_alloc(block_group, search_start,
                                                    num_bytes, empty_size);
                /*
@@ -5438,9 +5613,6 @@ checks:
                        goto loop;
                }
 
-               ins->objectid = search_start;
-               ins->offset = num_bytes;
-
                if (offset < search_start)
                        btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
@@ -5457,6 +5629,8 @@ checks:
                ins->objectid = search_start;
                ins->offset = num_bytes;
 
+               trace_btrfs_reserve_extent(orig_root, block_group,
+                                          search_start, num_bytes);
                if (offset < search_start)
                        btrfs_add_free_space(used_block_group, offset,
                                             search_start - offset);
@@ -5621,6 +5795,7 @@ int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
                         u64 search_end, struct btrfs_key *ins,
                         u64 data)
 {
+       bool final_tried = false;
        int ret;
        u64 search_start = 0;
 
@@ -5640,22 +5815,25 @@ again:
                               search_start, search_end, hint_byte,
                               ins, data);
 
-       if (ret == -ENOSPC && num_bytes > min_alloc_size) {
-               num_bytes = num_bytes >> 1;
-               num_bytes = num_bytes & ~(root->sectorsize - 1);
-               num_bytes = max(num_bytes, min_alloc_size);
-               do_chunk_alloc(trans, root->fs_info->extent_root,
-                              num_bytes, data, CHUNK_ALLOC_FORCE);
-               goto again;
-       }
-       if (ret == -ENOSPC && btrfs_test_opt(root, ENOSPC_DEBUG)) {
-               struct btrfs_space_info *sinfo;
-
-               sinfo = __find_space_info(root->fs_info, data);
-               printk(KERN_ERR "btrfs allocation failed flags %llu, "
-                      "wanted %llu\n", (unsigned long long)data,
-                      (unsigned long long)num_bytes);
-               dump_space_info(sinfo, num_bytes, 1);
+       if (ret == -ENOSPC) {
+               if (!final_tried) {
+                       num_bytes = num_bytes >> 1;
+                       num_bytes = num_bytes & ~(root->sectorsize - 1);
+                       num_bytes = max(num_bytes, min_alloc_size);
+                       do_chunk_alloc(trans, root->fs_info->extent_root,
+                                      num_bytes, data, CHUNK_ALLOC_FORCE);
+                       if (num_bytes == min_alloc_size)
+                               final_tried = true;
+                       goto again;
+               } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
+                       struct btrfs_space_info *sinfo;
+
+                       sinfo = __find_space_info(root->fs_info, data);
+                       printk(KERN_ERR "btrfs allocation failed flags %llu, "
+                              "wanted %llu\n", (unsigned long long)data,
+                              (unsigned long long)num_bytes);
+                       dump_space_info(sinfo, num_bytes, 1);
+               }
        }
 
        trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
@@ -5842,9 +6020,10 @@ int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
 
        BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
 
-       ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
-                                        0, root_objectid, owner, offset,
-                                        BTRFS_ADD_DELAYED_EXTENT, NULL);
+       ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
+                                        ins->offset, 0,
+                                        root_objectid, owner, offset,
+                                        BTRFS_ADD_DELAYED_EXTENT, NULL, 0);
        return ret;
 }
 
@@ -5997,10 +6176,11 @@ use_block_rsv(struct btrfs_trans_handle *trans,
        return ERR_PTR(-ENOSPC);
 }
 
-static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
+static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
+                           struct btrfs_block_rsv *block_rsv, u32 blocksize)
 {
        block_rsv_add_bytes(block_rsv, blocksize, 0);
-       block_rsv_release_bytes(block_rsv, NULL, 0);
+       block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
 }
 
 /*
@@ -6014,7 +6194,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                                        struct btrfs_root *root, u32 blocksize,
                                        u64 parent, u64 root_objectid,
                                        struct btrfs_disk_key *key, int level,
-                                       u64 hint, u64 empty_size)
+                                       u64 hint, u64 empty_size, int for_cow)
 {
        struct btrfs_key ins;
        struct btrfs_block_rsv *block_rsv;
@@ -6030,7 +6210,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
        ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
                                   empty_size, hint, (u64)-1, &ins, 0);
        if (ret) {
-               unuse_block_rsv(block_rsv, blocksize);
+               unuse_block_rsv(root->fs_info, block_rsv, blocksize);
                return ERR_PTR(ret);
        }
 
@@ -6058,10 +6238,11 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                extent_op->update_flags = 1;
                extent_op->is_data = 0;
 
-               ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
+               ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
+                                       ins.objectid,
                                        ins.offset, parent, root_objectid,
                                        level, BTRFS_ADD_DELAYED_EXTENT,
-                                       extent_op);
+                                       extent_op, for_cow);
                BUG_ON(ret);
        }
        return buf;
@@ -6078,6 +6259,7 @@ struct walk_control {
        int keep_locks;
        int reada_slot;
        int reada_count;
+       int for_reloc;
 };
 
 #define DROP_REFERENCE 1
@@ -6216,9 +6398,9 @@ static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
        /* wc->stage == UPDATE_BACKREF */
        if (!(wc->flags[level] & flag)) {
                BUG_ON(!path->locks[level]);
-               ret = btrfs_inc_ref(trans, root, eb, 1);
+               ret = btrfs_inc_ref(trans, root, eb, 1, wc->for_reloc);
                BUG_ON(ret);
-               ret = btrfs_dec_ref(trans, root, eb, 0);
+               ret = btrfs_dec_ref(trans, root, eb, 0, wc->for_reloc);
                BUG_ON(ret);
                ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
                                                  eb->len, flag, 0);
@@ -6362,7 +6544,7 @@ skip:
                }
 
                ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
-                                       root->root_key.objectid, level - 1, 0);
+                               root->root_key.objectid, level - 1, 0, 0);
                BUG_ON(ret);
        }
        btrfs_tree_unlock(next);
@@ -6436,9 +6618,11 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
        if (wc->refs[level] == 1) {
                if (level == 0) {
                        if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
-                               ret = btrfs_dec_ref(trans, root, eb, 1);
+                               ret = btrfs_dec_ref(trans, root, eb, 1,
+                                                   wc->for_reloc);
                        else
-                               ret = btrfs_dec_ref(trans, root, eb, 0);
+                               ret = btrfs_dec_ref(trans, root, eb, 0,
+                                                   wc->for_reloc);
                        BUG_ON(ret);
                }
                /* make block locked assertion in clean_tree_block happy */
@@ -6465,7 +6649,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
                               btrfs_header_owner(path->nodes[level + 1]));
        }
 
-       btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
+       btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0);
 out:
        wc->refs[level] = 0;
        wc->flags[level] = 0;
@@ -6549,7 +6733,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
  * blocks are properly updated.
  */
 void btrfs_drop_snapshot(struct btrfs_root *root,
-                        struct btrfs_block_rsv *block_rsv, int update_ref)
+                        struct btrfs_block_rsv *block_rsv, int update_ref,
+                        int for_reloc)
 {
        struct btrfs_path *path;
        struct btrfs_trans_handle *trans;
@@ -6637,6 +6822,7 @@ void btrfs_drop_snapshot(struct btrfs_root *root,
        wc->stage = DROP_REFERENCE;
        wc->update_ref = update_ref;
        wc->keep_locks = 0;
+       wc->for_reloc = for_reloc;
        wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
 
        while (1) {
@@ -6721,6 +6907,7 @@ out:
  * drop subtree rooted at tree block 'node'.
  *
  * NOTE: this function will unlock and release tree block 'node'
+ * only used by relocation code
  */
 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
                        struct btrfs_root *root,
@@ -6765,6 +6952,7 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
        wc->stage = DROP_REFERENCE;
        wc->update_ref = 0;
        wc->keep_locks = 1;
+       wc->for_reloc = 1;
        wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
 
        while (1) {
@@ -6792,6 +6980,29 @@ static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
        u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
                BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
 
+       if (root->fs_info->balance_ctl) {
+               struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
+               u64 tgt = 0;
+
+               /* pick restriper's target profile and return */
+               if (flags & BTRFS_BLOCK_GROUP_DATA &&
+                   bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+                       tgt = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
+               } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
+                          bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+                       tgt = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
+               } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
+                          bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
+                       tgt = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
+               }
+
+               if (tgt) {
+                       /* extended -> chunk profile */
+                       tgt &= ~BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+                       return tgt;
+               }
+       }
+
        /*
         * we add in the count of missing devices because we want
         * to make sure that any RAID levels on a degraded FS
@@ -7085,7 +7296,7 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
                 * space to fit our block group in.
                 */
                if (device->total_bytes > device->bytes_used + min_free) {
-                       ret = find_free_dev_extent(NULL, device, min_free,
+                       ret = find_free_dev_extent(device, min_free,
                                                   &dev_offset, NULL);
                        if (!ret)
                                dev_nr++;
@@ -7447,6 +7658,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
                                &cache->space_info);
        BUG_ON(ret);
+       update_global_block_rsv(root->fs_info);
 
        spin_lock(&cache->space_info->lock);
        cache->space_info->bytes_readonly += cache->bytes_super;
@@ -7466,6 +7678,22 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
        return 0;
 }
 
+static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
+{
+       u64 extra_flags = flags & BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+       /* chunk -> extended profile */
+       if (extra_flags == 0)
+               extra_flags = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+       if (flags & BTRFS_BLOCK_GROUP_DATA)
+               fs_info->avail_data_alloc_bits &= ~extra_flags;
+       if (flags & BTRFS_BLOCK_GROUP_METADATA)
+               fs_info->avail_metadata_alloc_bits &= ~extra_flags;
+       if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
+               fs_info->avail_system_alloc_bits &= ~extra_flags;
+}
+
 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
                             struct btrfs_root *root, u64 group_start)
 {
@@ -7476,6 +7704,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        struct btrfs_key key;
        struct inode *inode;
        int ret;
+       int index;
        int factor;
 
        root = root->fs_info->extent_root;
@@ -7491,6 +7720,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
        free_excluded_extents(root, block_group);
 
        memcpy(&key, &block_group->key, sizeof(key));
+       index = get_block_group_index(block_group);
        if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
                                  BTRFS_BLOCK_GROUP_RAID1 |
                                  BTRFS_BLOCK_GROUP_RAID10))
@@ -7565,6 +7795,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
         * are still on the list after taking the semaphore
         */
        list_del_init(&block_group->list);
+       if (list_empty(&block_group->space_info->block_groups[index]))
+               clear_avail_alloc_bits(root->fs_info, block_group->flags);
        up_write(&block_group->space_info->groups_sem);
 
        if (block_group->cached == BTRFS_CACHE_STARTED)
index 49f3c9dc09f4c81902299fd81c62da1ed8423250..fcf77e1ded40e8d47ed8f64742d8139a61295023 100644 (file)
@@ -18,6 +18,7 @@
 #include "ctree.h"
 #include "btrfs_inode.h"
 #include "volumes.h"
+#include "check-integrity.h"
 
 static struct kmem_cache *extent_state_cache;
 static struct kmem_cache *extent_buffer_cache;
@@ -1895,7 +1896,7 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
        }
        bio->bi_bdev = dev->bdev;
        bio_add_page(bio, page, length, start-page_offset(page));
-       submit_bio(WRITE_SYNC, bio);
+       btrfsic_submit_bio(WRITE_SYNC, bio);
        wait_for_completion(&compl);
 
        if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
@@ -2393,7 +2394,7 @@ static int submit_one_bio(int rw, struct bio *bio, int mirror_num,
                ret = tree->ops->submit_bio_hook(page->mapping->host, rw, bio,
                                           mirror_num, bio_flags, start);
        else
-               submit_bio(rw, bio);
+               btrfsic_submit_bio(rw, bio);
 
        if (bio_flagged(bio, BIO_EOPNOTSUPP))
                ret = -EOPNOTSUPP;
@@ -3579,6 +3580,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
        atomic_set(&eb->blocking_writers, 0);
        atomic_set(&eb->spinning_readers, 0);
        atomic_set(&eb->spinning_writers, 0);
+       eb->lock_nested = 0;
        init_waitqueue_head(&eb->write_lock_wq);
        init_waitqueue_head(&eb->read_lock_wq);
 
@@ -3907,6 +3909,8 @@ int extent_range_uptodate(struct extent_io_tree *tree,
        while (start <= end) {
                index = start >> PAGE_CACHE_SHIFT;
                page = find_get_page(tree->mapping, index);
+               if (!page)
+                       return 1;
                uptodate = PageUptodate(page);
                page_cache_release(page);
                if (!uptodate) {
index 7604c30013227fd823b1523503f8faaeccf283c4..bc6a042cb6fc496e6910d21fb3cdaec000f43b8e 100644 (file)
@@ -129,6 +129,7 @@ struct extent_buffer {
        struct list_head leak_list;
        struct rcu_head rcu_head;
        atomic_t refs;
+       pid_t lock_owner;
 
        /* count of read lock holders on the extent buffer */
        atomic_t write_locks;
@@ -137,6 +138,7 @@ struct extent_buffer {
        atomic_t blocking_readers;
        atomic_t spinning_readers;
        atomic_t spinning_writers;
+       int lock_nested;
 
        /* protects write locks */
        rwlock_t lock;
index 034d985032296cd4dfbc80e4d6717ac8b4ea77c4..859ba2dd88903ba207c7b0e448e0f5ce7f99e46e 100644 (file)
@@ -678,7 +678,7 @@ next_slot:
                                                disk_bytenr, num_bytes, 0,
                                                root->root_key.objectid,
                                                new_key.objectid,
-                                               start - extent_offset);
+                                               start - extent_offset, 0);
                                BUG_ON(ret);
                                *hint_byte = disk_bytenr;
                        }
@@ -753,7 +753,7 @@ next_slot:
                                                disk_bytenr, num_bytes, 0,
                                                root->root_key.objectid,
                                                key.objectid, key.offset -
-                                               extent_offset);
+                                               extent_offset, 0);
                                BUG_ON(ret);
                                inode_sub_bytes(inode,
                                                extent_end - key.offset);
@@ -962,7 +962,7 @@ again:
 
                ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0,
                                           root->root_key.objectid,
-                                          ino, orig_offset);
+                                          ino, orig_offset, 0);
                BUG_ON(ret);
 
                if (split == start) {
@@ -989,7 +989,7 @@ again:
                del_nr++;
                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
                                        0, root->root_key.objectid,
-                                       ino, orig_offset);
+                                       ino, orig_offset, 0);
                BUG_ON(ret);
        }
        other_start = 0;
@@ -1006,7 +1006,7 @@ again:
                del_nr++;
                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
                                        0, root->root_key.objectid,
-                                       ino, orig_offset);
+                                       ino, orig_offset, 0);
                BUG_ON(ret);
        }
        if (del_nr == 0) {
@@ -1274,7 +1274,6 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
                                                   dirty_pages);
                if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
                        btrfs_btree_balance_dirty(root, 1);
-               btrfs_throttle(root);
 
                pos += copied;
                num_written += copied;
index 9a897bf795380808e728c8d074cb58c5bcd9cf89..c2f20594c9f74fde7e97cbdf4dea728b9c5a4bd8 100644 (file)
@@ -319,9 +319,11 @@ static void io_ctl_drop_pages(struct io_ctl *io_ctl)
        io_ctl_unmap_page(io_ctl);
 
        for (i = 0; i < io_ctl->num_pages; i++) {
-               ClearPageChecked(io_ctl->pages[i]);
-               unlock_page(io_ctl->pages[i]);
-               page_cache_release(io_ctl->pages[i]);
+               if (io_ctl->pages[i]) {
+                       ClearPageChecked(io_ctl->pages[i]);
+                       unlock_page(io_ctl->pages[i]);
+                       page_cache_release(io_ctl->pages[i]);
+               }
        }
 }
 
@@ -635,7 +637,10 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        if (!num_entries)
                return 0;
 
-       io_ctl_init(&io_ctl, inode, root);
+       ret = io_ctl_init(&io_ctl, inode, root);
+       if (ret)
+               return ret;
+
        ret = readahead_cache(inode);
        if (ret)
                goto out;
@@ -838,7 +843,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        struct io_ctl io_ctl;
        struct list_head bitmap_list;
        struct btrfs_key key;
-       u64 start, end, len;
+       u64 start, extent_start, extent_end, len;
        int entries = 0;
        int bitmaps = 0;
        int ret;
@@ -849,7 +854,9 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
        if (!i_size_read(inode))
                return -1;
 
-       io_ctl_init(&io_ctl, inode, root);
+       ret = io_ctl_init(&io_ctl, inode, root);
+       if (ret)
+               return -1;
 
        /* Get the cluster for this block_group if it exists */
        if (block_group && !list_empty(&block_group->cluster_list))
@@ -857,25 +864,12 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                                     struct btrfs_free_cluster,
                                     block_group_list);
 
-       /*
-        * We shouldn't have switched the pinned extents yet so this is the
-        * right one
-        */
-       unpin = root->fs_info->pinned_extents;
-
        /* Lock all pages first so we can lock the extent safely. */
        io_ctl_prepare_pages(&io_ctl, inode, 0);
 
        lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
                         0, &cached_state, GFP_NOFS);
 
-       /*
-        * When searching for pinned extents, we need to start at our start
-        * offset.
-        */
-       if (block_group)
-               start = block_group->key.objectid;
-
        node = rb_first(&ctl->free_space_offset);
        if (!node && cluster) {
                node = rb_first(&cluster->root);
@@ -918,9 +912,20 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
         * We want to add any pinned extents to our free space cache
         * so we don't leak the space
         */
+
+       /*
+        * We shouldn't have switched the pinned extents yet so this is the
+        * right one
+        */
+       unpin = root->fs_info->pinned_extents;
+
+       if (block_group)
+               start = block_group->key.objectid;
+
        while (block_group && (start < block_group->key.objectid +
                               block_group->key.offset)) {
-               ret = find_first_extent_bit(unpin, start, &start, &end,
+               ret = find_first_extent_bit(unpin, start,
+                                           &extent_start, &extent_end,
                                            EXTENT_DIRTY);
                if (ret) {
                        ret = 0;
@@ -928,20 +933,21 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
                }
 
                /* This pinned extent is out of our range */
-               if (start >= block_group->key.objectid +
+               if (extent_start >= block_group->key.objectid +
                    block_group->key.offset)
                        break;
 
-               len = block_group->key.objectid +
-                       block_group->key.offset - start;
-               len = min(len, end + 1 - start);
+               extent_start = max(extent_start, start);
+               extent_end = min(block_group->key.objectid +
+                                block_group->key.offset, extent_end + 1);
+               len = extent_end - extent_start;
 
                entries++;
-               ret = io_ctl_add_entry(&io_ctl, start, len, NULL);
+               ret = io_ctl_add_entry(&io_ctl, extent_start, len, NULL);
                if (ret)
                        goto out_nospc;
 
-               start = end + 1;
+               start = extent_end;
        }
 
        /* Write out the bitmaps */
@@ -2236,7 +2242,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
                if (entry->bitmap) {
                        ret = btrfs_alloc_from_bitmap(block_group,
                                                      cluster, entry, bytes,
-                                                     min_start);
+                                                     cluster->window_start);
                        if (ret == 0) {
                                node = rb_next(&entry->offset_index);
                                if (!node)
@@ -2245,6 +2251,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
                                                 offset_index);
                                continue;
                        }
+                       cluster->window_start += bytes;
                } else {
                        ret = entry->offset;
 
@@ -2283,23 +2290,23 @@ out:
 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
                                struct btrfs_free_space *entry,
                                struct btrfs_free_cluster *cluster,
-                               u64 offset, u64 bytes, u64 min_bytes)
+                               u64 offset, u64 bytes,
+                               u64 cont1_bytes, u64 min_bytes)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        unsigned long next_zero;
        unsigned long i;
-       unsigned long search_bits;
-       unsigned long total_bits;
+       unsigned long want_bits;
+       unsigned long min_bits;
        unsigned long found_bits;
        unsigned long start = 0;
        unsigned long total_found = 0;
        int ret;
-       bool found = false;
 
        i = offset_to_bit(entry->offset, block_group->sectorsize,
                          max_t(u64, offset, entry->offset));
-       search_bits = bytes_to_bits(bytes, block_group->sectorsize);
-       total_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
+       want_bits = bytes_to_bits(bytes, block_group->sectorsize);
+       min_bits = bytes_to_bits(min_bytes, block_group->sectorsize);
 
 again:
        found_bits = 0;
@@ -2308,7 +2315,7 @@ again:
             i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
                next_zero = find_next_zero_bit(entry->bitmap,
                                               BITS_PER_BITMAP, i);
-               if (next_zero - i >= search_bits) {
+               if (next_zero - i >= min_bits) {
                        found_bits = next_zero - i;
                        break;
                }
@@ -2318,10 +2325,9 @@ again:
        if (!found_bits)
                return -ENOSPC;
 
-       if (!found) {
+       if (!total_found) {
                start = i;
                cluster->max_size = 0;
-               found = true;
        }
 
        total_found += found_bits;
@@ -2329,13 +2335,8 @@ again:
        if (cluster->max_size < found_bits * block_group->sectorsize)
                cluster->max_size = found_bits * block_group->sectorsize;
 
-       if (total_found < total_bits) {
-               i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, next_zero);
-               if (i - start > total_bits * 2) {
-                       total_found = 0;
-                       cluster->max_size = 0;
-                       found = false;
-               }
+       if (total_found < want_bits || cluster->max_size < cont1_bytes) {
+               i = next_zero + 1;
                goto again;
        }
 
@@ -2346,28 +2347,31 @@ again:
                                 &entry->offset_index, 1);
        BUG_ON(ret);
 
+       trace_btrfs_setup_cluster(block_group, cluster,
+                                 total_found * block_group->sectorsize, 1);
        return 0;
 }
 
 /*
  * This searches the block group for just extents to fill the cluster with.
+ * Try to find a cluster with at least bytes total bytes, at least one
+ * extent of cont1_bytes, and other clusters of at least min_bytes.
  */
 static noinline int
 setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
                        struct btrfs_free_cluster *cluster,
                        struct list_head *bitmaps, u64 offset, u64 bytes,
-                       u64 min_bytes)
+                       u64 cont1_bytes, u64 min_bytes)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *first = NULL;
        struct btrfs_free_space *entry = NULL;
-       struct btrfs_free_space *prev = NULL;
        struct btrfs_free_space *last;
        struct rb_node *node;
        u64 window_start;
        u64 window_free;
        u64 max_extent;
-       u64 max_gap = 128 * 1024;
+       u64 total_size = 0;
 
        entry = tree_search_offset(ctl, offset, 0, 1);
        if (!entry)
@@ -2377,8 +2381,8 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
         * We don't want bitmaps, so just move along until we find a normal
         * extent entry.
         */
-       while (entry->bitmap) {
-               if (list_empty(&entry->list))
+       while (entry->bitmap || entry->bytes < min_bytes) {
+               if (entry->bitmap && list_empty(&entry->list))
                        list_add_tail(&entry->list, bitmaps);
                node = rb_next(&entry->offset_index);
                if (!node)
@@ -2391,12 +2395,9 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
        max_extent = entry->bytes;
        first = entry;
        last = entry;
-       prev = entry;
 
-       while (window_free <= min_bytes) {
-               node = rb_next(&entry->offset_index);
-               if (!node)
-                       return -ENOSPC;
+       for (node = rb_next(&entry->offset_index); node;
+            node = rb_next(&entry->offset_index)) {
                entry = rb_entry(node, struct btrfs_free_space, offset_index);
 
                if (entry->bitmap) {
@@ -2405,26 +2406,18 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
                        continue;
                }
 
-               /*
-                * we haven't filled the empty size and the window is
-                * very large.  reset and try again
-                */
-               if (entry->offset - (prev->offset + prev->bytes) > max_gap ||
-                   entry->offset - window_start > (min_bytes * 2)) {
-                       first = entry;
-                       window_start = entry->offset;
-                       window_free = entry->bytes;
-                       last = entry;
+               if (entry->bytes < min_bytes)
+                       continue;
+
+               last = entry;
+               window_free += entry->bytes;
+               if (entry->bytes > max_extent)
                        max_extent = entry->bytes;
-               } else {
-                       last = entry;
-                       window_free += entry->bytes;
-                       if (entry->bytes > max_extent)
-                               max_extent = entry->bytes;
-               }
-               prev = entry;
        }
 
+       if (window_free < bytes || max_extent < cont1_bytes)
+               return -ENOSPC;
+
        cluster->window_start = first->offset;
 
        node = &first->offset_index;
@@ -2438,17 +2431,18 @@ setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group,
 
                entry = rb_entry(node, struct btrfs_free_space, offset_index);
                node = rb_next(&entry->offset_index);
-               if (entry->bitmap)
+               if (entry->bitmap || entry->bytes < min_bytes)
                        continue;
 
                rb_erase(&entry->offset_index, &ctl->free_space_offset);
                ret = tree_insert_offset(&cluster->root, entry->offset,
                                         &entry->offset_index, 0);
+               total_size += entry->bytes;
                BUG_ON(ret);
        } while (node && entry != last);
 
        cluster->max_size = max_extent;
-
+       trace_btrfs_setup_cluster(block_group, cluster, total_size, 0);
        return 0;
 }
 
@@ -2460,7 +2454,7 @@ static noinline int
 setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
                     struct btrfs_free_cluster *cluster,
                     struct list_head *bitmaps, u64 offset, u64 bytes,
-                    u64 min_bytes)
+                    u64 cont1_bytes, u64 min_bytes)
 {
        struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
        struct btrfs_free_space *entry;
@@ -2482,10 +2476,10 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
        }
 
        list_for_each_entry(entry, bitmaps, list) {
-               if (entry->bytes < min_bytes)
+               if (entry->bytes < bytes)
                        continue;
                ret = btrfs_bitmap_cluster(block_group, entry, cluster, offset,
-                                          bytes, min_bytes);
+                                          bytes, cont1_bytes, min_bytes);
                if (!ret)
                        return 0;
        }
@@ -2499,7 +2493,7 @@ setup_cluster_bitmap(struct btrfs_block_group_cache *block_group,
 
 /*
  * here we try to find a cluster of blocks in a block group.  The goal
- * is to find at least bytes free and up to empty_size + bytes free.
+ * is to find at least bytes+empty_size.
  * We might not find them all in one contiguous area.
  *
  * returns zero and sets up cluster if things worked out, otherwise
@@ -2515,23 +2509,24 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
        struct btrfs_free_space *entry, *tmp;
        LIST_HEAD(bitmaps);
        u64 min_bytes;
+       u64 cont1_bytes;
        int ret;
 
-       /* for metadata, allow allocates with more holes */
+       /*
+        * Choose the minimum extent size we'll require for this
+        * cluster.  For SSD_SPREAD, don't allow any fragmentation.
+        * For metadata, allow allocates with smaller extents.  For
+        * data, keep it dense.
+        */
        if (btrfs_test_opt(root, SSD_SPREAD)) {
-               min_bytes = bytes + empty_size;
+               cont1_bytes = min_bytes = bytes + empty_size;
        } else if (block_group->flags & BTRFS_BLOCK_GROUP_METADATA) {
-               /*
-                * we want to do larger allocations when we are
-                * flushing out the delayed refs, it helps prevent
-                * making more work as we go along.
-                */
-               if (trans->transaction->delayed_refs.flushing)
-                       min_bytes = max(bytes, (bytes + empty_size) >> 1);
-               else
-                       min_bytes = max(bytes, (bytes + empty_size) >> 4);
-       } else
-               min_bytes = max(bytes, (bytes + empty_size) >> 2);
+               cont1_bytes = bytes;
+               min_bytes = block_group->sectorsize;
+       } else {
+               cont1_bytes = max(bytes, (bytes + empty_size) >> 2);
+               min_bytes = block_group->sectorsize;
+       }
 
        spin_lock(&ctl->tree_lock);
 
@@ -2539,7 +2534,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
         * If we know we don't have enough space to make a cluster don't even
         * bother doing all the work to try and find one.
         */
-       if (ctl->free_space < min_bytes) {
+       if (ctl->free_space < bytes) {
                spin_unlock(&ctl->tree_lock);
                return -ENOSPC;
        }
@@ -2552,11 +2547,17 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
                goto out;
        }
 
+       trace_btrfs_find_cluster(block_group, offset, bytes, empty_size,
+                                min_bytes);
+
+       INIT_LIST_HEAD(&bitmaps);
        ret = setup_cluster_no_bitmap(block_group, cluster, &bitmaps, offset,
-                                     bytes, min_bytes);
+                                     bytes + empty_size,
+                                     cont1_bytes, min_bytes);
        if (ret)
                ret = setup_cluster_bitmap(block_group, cluster, &bitmaps,
-                                          offset, bytes, min_bytes);
+                                          offset, bytes + empty_size,
+                                          cont1_bytes, min_bytes);
 
        /* Clear our temporary list */
        list_for_each_entry_safe(entry, tmp, &bitmaps, list)
@@ -2567,6 +2568,8 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans,
                list_add_tail(&cluster->block_group_list,
                              &block_group->cluster_list);
                cluster->block_group = block_group;
+       } else {
+               trace_btrfs_failed_cluster_setup(block_group);
        }
 out:
        spin_unlock(&cluster->lock);
@@ -2588,17 +2591,57 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
        cluster->block_group = NULL;
 }
 
-int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
-                          u64 *trimmed, u64 start, u64 end, u64 minlen)
+static int do_trimming(struct btrfs_block_group_cache *block_group,
+                      u64 *total_trimmed, u64 start, u64 bytes,
+                      u64 reserved_start, u64 reserved_bytes)
 {
-       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
-       struct btrfs_free_space *entry = NULL;
+       struct btrfs_space_info *space_info = block_group->space_info;
        struct btrfs_fs_info *fs_info = block_group->fs_info;
-       u64 bytes = 0;
-       u64 actually_trimmed;
-       int ret = 0;
+       int ret;
+       int update = 0;
+       u64 trimmed = 0;
 
-       *trimmed = 0;
+       spin_lock(&space_info->lock);
+       spin_lock(&block_group->lock);
+       if (!block_group->ro) {
+               block_group->reserved += reserved_bytes;
+               space_info->bytes_reserved += reserved_bytes;
+               update = 1;
+       }
+       spin_unlock(&block_group->lock);
+       spin_unlock(&space_info->lock);
+
+       ret = btrfs_error_discard_extent(fs_info->extent_root,
+                                        start, bytes, &trimmed);
+       if (!ret)
+               *total_trimmed += trimmed;
+
+       btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
+
+       if (update) {
+               spin_lock(&space_info->lock);
+               spin_lock(&block_group->lock);
+               if (block_group->ro)
+                       space_info->bytes_readonly += reserved_bytes;
+               block_group->reserved -= reserved_bytes;
+               space_info->bytes_reserved -= reserved_bytes;
+               spin_unlock(&space_info->lock);
+               spin_unlock(&block_group->lock);
+       }
+
+       return ret;
+}
+
+static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
+                         u64 *total_trimmed, u64 start, u64 end, u64 minlen)
+{
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+       struct btrfs_free_space *entry;
+       struct rb_node *node;
+       int ret = 0;
+       u64 extent_start;
+       u64 extent_bytes;
+       u64 bytes;
 
        while (start < end) {
                spin_lock(&ctl->tree_lock);
@@ -2609,81 +2652,118 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
                }
 
                entry = tree_search_offset(ctl, start, 0, 1);
-               if (!entry)
-                       entry = tree_search_offset(ctl,
-                                                  offset_to_bitmap(ctl, start),
-                                                  1, 1);
-
-               if (!entry || entry->offset >= end) {
+               if (!entry) {
                        spin_unlock(&ctl->tree_lock);
                        break;
                }
 
-               if (entry->bitmap) {
-                       ret = search_bitmap(ctl, entry, &start, &bytes);
-                       if (!ret) {
-                               if (start >= end) {
-                                       spin_unlock(&ctl->tree_lock);
-                                       break;
-                               }
-                               bytes = min(bytes, end - start);
-                               bitmap_clear_bits(ctl, entry, start, bytes);
-                               if (entry->bytes == 0)
-                                       free_bitmap(ctl, entry);
-                       } else {
-                               start = entry->offset + BITS_PER_BITMAP *
-                                       block_group->sectorsize;
+               /* skip bitmaps */
+               while (entry->bitmap) {
+                       node = rb_next(&entry->offset_index);
+                       if (!node) {
                                spin_unlock(&ctl->tree_lock);
-                               ret = 0;
-                               continue;
+                               goto out;
                        }
-               } else {
-                       start = entry->offset;
-                       bytes = min(entry->bytes, end - start);
-                       unlink_free_space(ctl, entry);
-                       kmem_cache_free(btrfs_free_space_cachep, entry);
+                       entry = rb_entry(node, struct btrfs_free_space,
+                                        offset_index);
                }
 
+               if (entry->offset >= end) {
+                       spin_unlock(&ctl->tree_lock);
+                       break;
+               }
+
+               extent_start = entry->offset;
+               extent_bytes = entry->bytes;
+               start = max(start, extent_start);
+               bytes = min(extent_start + extent_bytes, end) - start;
+               if (bytes < minlen) {
+                       spin_unlock(&ctl->tree_lock);
+                       goto next;
+               }
+
+               unlink_free_space(ctl, entry);
+               kmem_cache_free(btrfs_free_space_cachep, entry);
+
                spin_unlock(&ctl->tree_lock);
 
-               if (bytes >= minlen) {
-                       struct btrfs_space_info *space_info;
-                       int update = 0;
-
-                       space_info = block_group->space_info;
-                       spin_lock(&space_info->lock);
-                       spin_lock(&block_group->lock);
-                       if (!block_group->ro) {
-                               block_group->reserved += bytes;
-                               space_info->bytes_reserved += bytes;
-                               update = 1;
-                       }
-                       spin_unlock(&block_group->lock);
-                       spin_unlock(&space_info->lock);
-
-                       ret = btrfs_error_discard_extent(fs_info->extent_root,
-                                                        start,
-                                                        bytes,
-                                                        &actually_trimmed);
-
-                       btrfs_add_free_space(block_group, start, bytes);
-                       if (update) {
-                               spin_lock(&space_info->lock);
-                               spin_lock(&block_group->lock);
-                               if (block_group->ro)
-                                       space_info->bytes_readonly += bytes;
-                               block_group->reserved -= bytes;
-                               space_info->bytes_reserved -= bytes;
-                               spin_unlock(&space_info->lock);
-                               spin_unlock(&block_group->lock);
-                       }
+               ret = do_trimming(block_group, total_trimmed, start, bytes,
+                                 extent_start, extent_bytes);
+               if (ret)
+                       break;
+next:
+               start += bytes;
 
-                       if (ret)
-                               break;
-                       *trimmed += actually_trimmed;
+               if (fatal_signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+
+               cond_resched();
+       }
+out:
+       return ret;
+}
+
+static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
+                       u64 *total_trimmed, u64 start, u64 end, u64 minlen)
+{
+       struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
+       struct btrfs_free_space *entry;
+       int ret = 0;
+       int ret2;
+       u64 bytes;
+       u64 offset = offset_to_bitmap(ctl, start);
+
+       while (offset < end) {
+               bool next_bitmap = false;
+
+               spin_lock(&ctl->tree_lock);
+
+               if (ctl->free_space < minlen) {
+                       spin_unlock(&ctl->tree_lock);
+                       break;
+               }
+
+               entry = tree_search_offset(ctl, offset, 1, 0);
+               if (!entry) {
+                       spin_unlock(&ctl->tree_lock);
+                       next_bitmap = true;
+                       goto next;
+               }
+
+               bytes = minlen;
+               ret2 = search_bitmap(ctl, entry, &start, &bytes);
+               if (ret2 || start >= end) {
+                       spin_unlock(&ctl->tree_lock);
+                       next_bitmap = true;
+                       goto next;
+               }
+
+               bytes = min(bytes, end - start);
+               if (bytes < minlen) {
+                       spin_unlock(&ctl->tree_lock);
+                       goto next;
+               }
+
+               bitmap_clear_bits(ctl, entry, start, bytes);
+               if (entry->bytes == 0)
+                       free_bitmap(ctl, entry);
+
+               spin_unlock(&ctl->tree_lock);
+
+               ret = do_trimming(block_group, total_trimmed, start, bytes,
+                                 start, bytes);
+               if (ret)
+                       break;
+next:
+               if (next_bitmap) {
+                       offset += BITS_PER_BITMAP * ctl->unit;
+               } else {
+                       start += bytes;
+                       if (start >= offset + BITS_PER_BITMAP * ctl->unit)
+                               offset += BITS_PER_BITMAP * ctl->unit;
                }
-               start += bytes;
-               bytes = 0;
 
                if (fatal_signal_pending(current)) {
                        ret = -ERESTARTSYS;
@@ -2696,6 +2776,22 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
        return ret;
 }
 
+int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
+                          u64 *trimmed, u64 start, u64 end, u64 minlen)
+{
+       int ret;
+
+       *trimmed = 0;
+
+       ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
+       if (ret)
+               return ret;
+
+       ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
+
+       return ret;
+}
+
 /*
  * Find the left-most item in the cache tree, and then return the
  * smallest inode number in the item.
index f8962a957d656b385d0f99d65f300598e419db4d..213ffa86ce1b81f30a5f4ad50753c6d2ac6435aa 100644 (file)
@@ -438,6 +438,8 @@ int btrfs_save_ino_cache(struct btrfs_root *root,
                                          trans->bytes_reserved);
        if (ret)
                goto out;
+       trace_btrfs_space_reservation(root->fs_info, "ino_cache", (u64)trans,
+                                     trans->bytes_reserved, 1);
 again:
        inode = lookup_free_ino_inode(root, path);
        if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
@@ -498,6 +500,8 @@ again:
 out_put:
        iput(inode);
 out_release:
+       trace_btrfs_space_reservation(root->fs_info, "ino_cache", (u64)trans,
+                                     trans->bytes_reserved, 0);
        btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
 out:
        trans->block_rsv = rsv;
index 81b235a61f8c4149dd880d1d7d1238ab690a4e09..32214fe0f7e32eda73449b0063aecd3874ade562 100644 (file)
@@ -1951,12 +1951,28 @@ enum btrfs_orphan_cleanup_state {
 void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root)
 {
+       struct btrfs_block_rsv *block_rsv;
        int ret;
 
        if (!list_empty(&root->orphan_list) ||
            root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
                return;
 
+       spin_lock(&root->orphan_lock);
+       if (!list_empty(&root->orphan_list)) {
+               spin_unlock(&root->orphan_lock);
+               return;
+       }
+
+       if (root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) {
+               spin_unlock(&root->orphan_lock);
+               return;
+       }
+
+       block_rsv = root->orphan_block_rsv;
+       root->orphan_block_rsv = NULL;
+       spin_unlock(&root->orphan_lock);
+
        if (root->orphan_item_inserted &&
            btrfs_root_refs(&root->root_item) > 0) {
                ret = btrfs_del_orphan_item(trans, root->fs_info->tree_root,
@@ -1965,10 +1981,9 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
                root->orphan_item_inserted = 0;
        }
 
-       if (root->orphan_block_rsv) {
-               WARN_ON(root->orphan_block_rsv->size > 0);
-               btrfs_free_block_rsv(root, root->orphan_block_rsv);
-               root->orphan_block_rsv = NULL;
+       if (block_rsv) {
+               WARN_ON(block_rsv->size > 0);
+               btrfs_free_block_rsv(root, block_rsv);
        }
 }
 
@@ -2224,14 +2239,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
                                continue;
                        }
                        nr_truncate++;
-                       /*
-                        * Need to hold the imutex for reservation purposes, not
-                        * a huge deal here but I have a WARN_ON in
-                        * btrfs_delalloc_reserve_space to catch offenders.
-                        */
-                       mutex_lock(&inode->i_mutex);
                        ret = btrfs_truncate(inode);
-                       mutex_unlock(&inode->i_mutex);
                } else {
                        nr_unlink++;
                }
@@ -2845,7 +2853,7 @@ static void __unlink_end_trans(struct btrfs_trans_handle *trans,
                BUG_ON(!root->fs_info->enospc_unlink);
                root->fs_info->enospc_unlink = 0;
        }
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
 }
 
 static int btrfs_unlink(struct inode *dir, struct dentry *dentry)
@@ -3009,7 +3017,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
        int pending_del_nr = 0;
        int pending_del_slot = 0;
        int extent_type = -1;
-       int encoding;
        int ret;
        int err = 0;
        u64 ino = btrfs_ino(inode);
@@ -3059,7 +3066,6 @@ search_again:
                leaf = path->nodes[0];
                btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
                found_type = btrfs_key_type(&found_key);
-               encoding = 0;
 
                if (found_key.objectid != ino)
                        break;
@@ -3072,10 +3078,6 @@ search_again:
                        fi = btrfs_item_ptr(leaf, path->slots[0],
                                            struct btrfs_file_extent_item);
                        extent_type = btrfs_file_extent_type(leaf, fi);
-                       encoding = btrfs_file_extent_compression(leaf, fi);
-                       encoding |= btrfs_file_extent_encryption(leaf, fi);
-                       encoding |= btrfs_file_extent_other_encoding(leaf, fi);
-
                        if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
                                item_end +=
                                    btrfs_file_extent_num_bytes(leaf, fi);
@@ -3103,7 +3105,7 @@ search_again:
                if (extent_type != BTRFS_FILE_EXTENT_INLINE) {
                        u64 num_dec;
                        extent_start = btrfs_file_extent_disk_bytenr(leaf, fi);
-                       if (!del_item && !encoding) {
+                       if (!del_item) {
                                u64 orig_num_bytes =
                                        btrfs_file_extent_num_bytes(leaf, fi);
                                extent_num_bytes = new_size -
@@ -3179,7 +3181,7 @@ delete:
                        ret = btrfs_free_extent(trans, root, extent_start,
                                                extent_num_bytes, 0,
                                                btrfs_header_owner(leaf),
-                                               ino, extent_offset);
+                                               ino, extent_offset, 0);
                        BUG_ON(ret);
                }
 
@@ -3434,7 +3436,7 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
                i_size_write(inode, newsize);
                btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
                ret = btrfs_update_inode(trans, root, inode);
-               btrfs_end_transaction_throttle(trans, root);
+               btrfs_end_transaction(trans, root);
        } else {
 
                /*
@@ -4655,7 +4657,7 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry,
        }
 out_unlock:
        nr = trans->blocks_used;
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
        btrfs_btree_balance_dirty(root, nr);
        if (drop_inode) {
                inode_dec_link_count(inode);
@@ -4723,7 +4725,7 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry,
        }
 out_unlock:
        nr = trans->blocks_used;
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
@@ -4782,7 +4784,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
        }
 
        nr = trans->blocks_used;
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
 fail:
        if (drop_inode) {
                inode_dec_link_count(inode);
@@ -4848,7 +4850,7 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
 
 out_fail:
        nr = trans->blocks_used;
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
        if (drop_on_err)
                iput(inode);
        btrfs_btree_balance_dirty(root, nr);
@@ -5121,7 +5123,7 @@ again:
                        }
                        flush_dcache_page(page);
                } else if (create && PageUptodate(page)) {
-                       WARN_ON(1);
+                       BUG();
                        if (!trans) {
                                kunmap(page);
                                free_extent_map(em);
@@ -6399,21 +6401,23 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
        unsigned long zero_start;
        loff_t size;
        int ret;
+       int reserved = 0;
        u64 page_start;
        u64 page_end;
 
-       /* Need this to keep space reservations serialized */
-       mutex_lock(&inode->i_mutex);
        ret  = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
-       mutex_unlock(&inode->i_mutex);
-       if (!ret)
+       if (!ret) {
                ret = btrfs_update_time(vma->vm_file);
+               reserved = 1;
+       }
        if (ret) {
                if (ret == -ENOMEM)
                        ret = VM_FAULT_OOM;
                else /* -ENOSPC, -EIO, etc */
                        ret = VM_FAULT_SIGBUS;
-               goto out;
+               if (reserved)
+                       goto out;
+               goto out_noreserve;
        }
 
        ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */
@@ -6494,8 +6498,9 @@ out_unlock:
        if (!ret)
                return VM_FAULT_LOCKED;
        unlock_page(page);
-       btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
 out:
+       btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE);
+out_noreserve:
        return ret;
 }
 
@@ -6668,7 +6673,7 @@ end_trans:
                        err = ret;
 
                nr = trans->blocks_used;
-               ret = btrfs_end_transaction_throttle(trans, root);
+               ret = btrfs_end_transaction(trans, root);
                btrfs_btree_balance_dirty(root, nr);
        }
 
@@ -6749,6 +6754,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
        extent_io_tree_init(&ei->io_tree, &inode->i_data);
        extent_io_tree_init(&ei->io_failure_tree, &inode->i_data);
        mutex_init(&ei->log_mutex);
+       mutex_init(&ei->delalloc_mutex);
        btrfs_ordered_inode_tree_init(&ei->ordered_tree);
        INIT_LIST_HEAD(&ei->i_orphan);
        INIT_LIST_HEAD(&ei->delalloc_inodes);
@@ -7074,7 +7080,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                btrfs_end_log_trans(root);
        }
 out_fail:
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
 out_notrans:
        if (old_ino == BTRFS_FIRST_FREE_OBJECTID)
                up_read(&root->fs_info->subvol_sem);
@@ -7246,7 +7252,7 @@ out_unlock:
        if (!err)
                d_instantiate(dentry, inode);
        nr = trans->blocks_used;
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
        if (drop_inode) {
                inode_dec_link_count(inode);
                iput(inode);
index 5441ff1480fdbbd9ce9fe4c9caa6fc0c62392a3c..03bb62a9ee24d3e84cef380cda53af10ff673590 100644 (file)
@@ -176,6 +176,8 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
        struct btrfs_trans_handle *trans;
        unsigned int flags, oldflags;
        int ret;
+       u64 ip_oldflags;
+       unsigned int i_oldflags;
 
        if (btrfs_root_readonly(root))
                return -EROFS;
@@ -192,6 +194,9 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
 
        mutex_lock(&inode->i_mutex);
 
+       ip_oldflags = ip->flags;
+       i_oldflags = inode->i_flags;
+
        flags = btrfs_mask_flags(inode->i_mode, flags);
        oldflags = btrfs_flags_to_ioctl(ip->flags);
        if ((flags ^ oldflags) & (FS_APPEND_FL | FS_IMMUTABLE_FL)) {
@@ -249,19 +254,24 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
                ip->flags &= ~(BTRFS_INODE_COMPRESS | BTRFS_INODE_NOCOMPRESS);
        }
 
-       trans = btrfs_join_transaction(root);
-       BUG_ON(IS_ERR(trans));
+       trans = btrfs_start_transaction(root, 1);
+       if (IS_ERR(trans)) {
+               ret = PTR_ERR(trans);
+               goto out_drop;
+       }
 
        btrfs_update_iflags(inode);
        inode->i_ctime = CURRENT_TIME;
        ret = btrfs_update_inode(trans, root, inode);
-       BUG_ON(ret);
 
        btrfs_end_transaction(trans, root);
+ out_drop:
+       if (ret) {
+               ip->flags = ip_oldflags;
+               inode->i_flags = i_oldflags;
+       }
 
        mnt_drop_write_file(file);
-
-       ret = 0;
  out_unlock:
        mutex_unlock(&inode->i_mutex);
        return ret;
@@ -276,14 +286,13 @@ static int btrfs_ioctl_getversion(struct file *file, int __user *arg)
 
 static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
 {
-       struct btrfs_root *root = fdentry(file)->d_sb->s_fs_info;
-       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_fs_info *fs_info = btrfs_sb(fdentry(file)->d_sb);
        struct btrfs_device *device;
        struct request_queue *q;
        struct fstrim_range range;
        u64 minlen = ULLONG_MAX;
        u64 num_devices = 0;
-       u64 total_bytes = btrfs_super_total_bytes(root->fs_info->super_copy);
+       u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
        int ret;
 
        if (!capable(CAP_SYS_ADMIN))
@@ -312,7 +321,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
 
        range.len = min(range.len, total_bytes - range.start);
        range.minlen = max(range.minlen, minlen);
-       ret = btrfs_trim_fs(root, &range);
+       ret = btrfs_trim_fs(fs_info->tree_root, &range);
        if (ret < 0)
                return ret;
 
@@ -358,7 +367,7 @@ static noinline int create_subvol(struct btrfs_root *root,
                return PTR_ERR(trans);
 
        leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
-                                     0, objectid, NULL, 0, 0, 0);
+                                     0, objectid, NULL, 0, 0, 0, 0);
        if (IS_ERR(leaf)) {
                ret = PTR_ERR(leaf);
                goto fail;
@@ -858,10 +867,8 @@ static int cluster_pages_for_defrag(struct inode *inode,
                return 0;
        file_end = (isize - 1) >> PAGE_CACHE_SHIFT;
 
-       mutex_lock(&inode->i_mutex);
        ret = btrfs_delalloc_reserve_space(inode,
                                           num_pages << PAGE_CACHE_SHIFT);
-       mutex_unlock(&inode->i_mutex);
        if (ret)
                return ret;
 again:
@@ -1058,7 +1065,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
                i = range->start >> PAGE_CACHE_SHIFT;
        }
        if (!max_to_defrag)
-               max_to_defrag = last_index;
+               max_to_defrag = last_index + 1;
 
        /*
         * make writeback starts from i, so the defrag range can be
@@ -1203,13 +1210,21 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       mutex_lock(&root->fs_info->volume_mutex);
+       if (root->fs_info->balance_ctl) {
+               printk(KERN_INFO "btrfs: balance in progress\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
-       if (IS_ERR(vol_args))
-               return PTR_ERR(vol_args);
+       if (IS_ERR(vol_args)) {
+               ret = PTR_ERR(vol_args);
+               goto out;
+       }
 
        vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
 
-       mutex_lock(&root->fs_info->volume_mutex);
        sizestr = vol_args->name;
        devstr = strchr(sizestr, ':');
        if (devstr) {
@@ -1226,7 +1241,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                printk(KERN_INFO "btrfs: resizer unable to find device %llu\n",
                       (unsigned long long)devid);
                ret = -EINVAL;
-               goto out_unlock;
+               goto out_free;
        }
        if (!strcmp(sizestr, "max"))
                new_size = device->bdev->bd_inode->i_size;
@@ -1241,7 +1256,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                new_size = memparse(sizestr, NULL);
                if (new_size == 0) {
                        ret = -EINVAL;
-                       goto out_unlock;
+                       goto out_free;
                }
        }
 
@@ -1250,7 +1265,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
        if (mod < 0) {
                if (new_size > old_size) {
                        ret = -EINVAL;
-                       goto out_unlock;
+                       goto out_free;
                }
                new_size = old_size - new_size;
        } else if (mod > 0) {
@@ -1259,11 +1274,11 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
 
        if (new_size < 256 * 1024 * 1024) {
                ret = -EINVAL;
-               goto out_unlock;
+               goto out_free;
        }
        if (new_size > device->bdev->bd_inode->i_size) {
                ret = -EFBIG;
-               goto out_unlock;
+               goto out_free;
        }
 
        do_div(new_size, root->sectorsize);
@@ -1276,7 +1291,7 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                trans = btrfs_start_transaction(root, 0);
                if (IS_ERR(trans)) {
                        ret = PTR_ERR(trans);
-                       goto out_unlock;
+                       goto out_free;
                }
                ret = btrfs_grow_device(trans, device, new_size);
                btrfs_commit_transaction(trans, root);
@@ -1284,9 +1299,10 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
                ret = btrfs_shrink_device(device, new_size);
        }
 
-out_unlock:
-       mutex_unlock(&root->fs_info->volume_mutex);
+out_free:
        kfree(vol_args);
+out:
+       mutex_unlock(&root->fs_info->volume_mutex);
        return ret;
 }
 
@@ -2052,14 +2068,25 @@ static long btrfs_ioctl_add_dev(struct btrfs_root *root, void __user *arg)
        if (!capable(CAP_SYS_ADMIN))
                return -EPERM;
 
+       mutex_lock(&root->fs_info->volume_mutex);
+       if (root->fs_info->balance_ctl) {
+               printk(KERN_INFO "btrfs: balance in progress\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
-       if (IS_ERR(vol_args))
-               return PTR_ERR(vol_args);
+       if (IS_ERR(vol_args)) {
+               ret = PTR_ERR(vol_args);
+               goto out;
+       }
 
        vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
        ret = btrfs_init_new_device(root, vol_args->name);
 
        kfree(vol_args);
+out:
+       mutex_unlock(&root->fs_info->volume_mutex);
        return ret;
 }
 
@@ -2074,14 +2101,25 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg)
        if (root->fs_info->sb->s_flags & MS_RDONLY)
                return -EROFS;
 
+       mutex_lock(&root->fs_info->volume_mutex);
+       if (root->fs_info->balance_ctl) {
+               printk(KERN_INFO "btrfs: balance in progress\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
        vol_args = memdup_user(arg, sizeof(*vol_args));
-       if (IS_ERR(vol_args))
-               return PTR_ERR(vol_args);
+       if (IS_ERR(vol_args)) {
+               ret = PTR_ERR(vol_args);
+               goto out;
+       }
 
        vol_args->name[BTRFS_PATH_NAME_MAX] = '\0';
        ret = btrfs_rm_device(root, vol_args->name);
 
        kfree(vol_args);
+out:
+       mutex_unlock(&root->fs_info->volume_mutex);
        return ret;
 }
 
@@ -2427,7 +2465,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
                                                        disko, diskl, 0,
                                                        root->root_key.objectid,
                                                        btrfs_ino(inode),
-                                                       new_key.offset - datao);
+                                                       new_key.offset - datao,
+                                                       0);
                                        BUG_ON(ret);
                                }
                        } else if (type == BTRFS_FILE_EXTENT_INLINE) {
@@ -2977,7 +3016,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
 {
        int ret = 0;
        int size;
-       u64 extent_offset;
+       u64 extent_item_pos;
        struct btrfs_ioctl_logical_ino_args *loi;
        struct btrfs_data_container *inodes = NULL;
        struct btrfs_path *path = NULL;
@@ -3008,15 +3047,17 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
        }
 
        ret = extent_from_logical(root->fs_info, loi->logical, path, &key);
+       btrfs_release_path(path);
 
        if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
                ret = -ENOENT;
        if (ret < 0)
                goto out;
 
-       extent_offset = loi->logical - key.objectid;
+       extent_item_pos = loi->logical - key.objectid;
        ret = iterate_extent_inodes(root->fs_info, path, key.objectid,
-                                       extent_offset, build_ino_list, inodes);
+                                       extent_item_pos, build_ino_list,
+                                       inodes);
 
        if (ret < 0)
                goto out;
@@ -3034,6 +3075,163 @@ out:
        return ret;
 }
 
+void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
+                              struct btrfs_ioctl_balance_args *bargs)
+{
+       struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+
+       bargs->flags = bctl->flags;
+
+       if (atomic_read(&fs_info->balance_running))
+               bargs->state |= BTRFS_BALANCE_STATE_RUNNING;
+       if (atomic_read(&fs_info->balance_pause_req))
+               bargs->state |= BTRFS_BALANCE_STATE_PAUSE_REQ;
+       if (atomic_read(&fs_info->balance_cancel_req))
+               bargs->state |= BTRFS_BALANCE_STATE_CANCEL_REQ;
+
+       memcpy(&bargs->data, &bctl->data, sizeof(bargs->data));
+       memcpy(&bargs->meta, &bctl->meta, sizeof(bargs->meta));
+       memcpy(&bargs->sys, &bctl->sys, sizeof(bargs->sys));
+
+       if (lock) {
+               spin_lock(&fs_info->balance_lock);
+               memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
+               spin_unlock(&fs_info->balance_lock);
+       } else {
+               memcpy(&bargs->stat, &bctl->stat, sizeof(bargs->stat));
+       }
+}
+
+static long btrfs_ioctl_balance(struct btrfs_root *root, void __user *arg)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_ioctl_balance_args *bargs;
+       struct btrfs_balance_control *bctl;
+       int ret;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       if (fs_info->sb->s_flags & MS_RDONLY)
+               return -EROFS;
+
+       mutex_lock(&fs_info->volume_mutex);
+       mutex_lock(&fs_info->balance_mutex);
+
+       if (arg) {
+               bargs = memdup_user(arg, sizeof(*bargs));
+               if (IS_ERR(bargs)) {
+                       ret = PTR_ERR(bargs);
+                       goto out;
+               }
+
+               if (bargs->flags & BTRFS_BALANCE_RESUME) {
+                       if (!fs_info->balance_ctl) {
+                               ret = -ENOTCONN;
+                               goto out_bargs;
+                       }
+
+                       bctl = fs_info->balance_ctl;
+                       spin_lock(&fs_info->balance_lock);
+                       bctl->flags |= BTRFS_BALANCE_RESUME;
+                       spin_unlock(&fs_info->balance_lock);
+
+                       goto do_balance;
+               }
+       } else {
+               bargs = NULL;
+       }
+
+       if (fs_info->balance_ctl) {
+               ret = -EINPROGRESS;
+               goto out_bargs;
+       }
+
+       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
+       if (!bctl) {
+               ret = -ENOMEM;
+               goto out_bargs;
+       }
+
+       bctl->fs_info = fs_info;
+       if (arg) {
+               memcpy(&bctl->data, &bargs->data, sizeof(bctl->data));
+               memcpy(&bctl->meta, &bargs->meta, sizeof(bctl->meta));
+               memcpy(&bctl->sys, &bargs->sys, sizeof(bctl->sys));
+
+               bctl->flags = bargs->flags;
+       } else {
+               /* balance everything - no filters */
+               bctl->flags |= BTRFS_BALANCE_TYPE_MASK;
+       }
+
+do_balance:
+       ret = btrfs_balance(bctl, bargs);
+       /*
+        * bctl is freed in __cancel_balance or in free_fs_info if
+        * restriper was paused all the way until unmount
+        */
+       if (arg) {
+               if (copy_to_user(arg, bargs, sizeof(*bargs)))
+                       ret = -EFAULT;
+       }
+
+out_bargs:
+       kfree(bargs);
+out:
+       mutex_unlock(&fs_info->balance_mutex);
+       mutex_unlock(&fs_info->volume_mutex);
+       return ret;
+}
+
+static long btrfs_ioctl_balance_ctl(struct btrfs_root *root, int cmd)
+{
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       switch (cmd) {
+       case BTRFS_BALANCE_CTL_PAUSE:
+               return btrfs_pause_balance(root->fs_info);
+       case BTRFS_BALANCE_CTL_CANCEL:
+               return btrfs_cancel_balance(root->fs_info);
+       }
+
+       return -EINVAL;
+}
+
+static long btrfs_ioctl_balance_progress(struct btrfs_root *root,
+                                        void __user *arg)
+{
+       struct btrfs_fs_info *fs_info = root->fs_info;
+       struct btrfs_ioctl_balance_args *bargs;
+       int ret = 0;
+
+       if (!capable(CAP_SYS_ADMIN))
+               return -EPERM;
+
+       mutex_lock(&fs_info->balance_mutex);
+       if (!fs_info->balance_ctl) {
+               ret = -ENOTCONN;
+               goto out;
+       }
+
+       bargs = kzalloc(sizeof(*bargs), GFP_NOFS);
+       if (!bargs) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       update_ioctl_balance_args(fs_info, 1, bargs);
+
+       if (copy_to_user(arg, bargs, sizeof(*bargs)))
+               ret = -EFAULT;
+
+       kfree(bargs);
+out:
+       mutex_unlock(&fs_info->balance_mutex);
+       return ret;
+}
+
 long btrfs_ioctl(struct file *file, unsigned int
                cmd, unsigned long arg)
 {
@@ -3078,7 +3276,7 @@ long btrfs_ioctl(struct file *file, unsigned int
        case BTRFS_IOC_DEV_INFO:
                return btrfs_ioctl_dev_info(root, argp);
        case BTRFS_IOC_BALANCE:
-               return btrfs_balance(root->fs_info->dev_root);
+               return btrfs_ioctl_balance(root, NULL);
        case BTRFS_IOC_CLONE:
                return btrfs_ioctl_clone(file, arg, 0, 0, 0);
        case BTRFS_IOC_CLONE_RANGE:
@@ -3110,6 +3308,12 @@ long btrfs_ioctl(struct file *file, unsigned int
                return btrfs_ioctl_scrub_cancel(root, argp);
        case BTRFS_IOC_SCRUB_PROGRESS:
                return btrfs_ioctl_scrub_progress(root, argp);
+       case BTRFS_IOC_BALANCE_V2:
+               return btrfs_ioctl_balance(root, argp);
+       case BTRFS_IOC_BALANCE_CTL:
+               return btrfs_ioctl_balance_ctl(root, arg);
+       case BTRFS_IOC_BALANCE_PROGRESS:
+               return btrfs_ioctl_balance_progress(root, argp);
        }
 
        return -ENOTTY;
index 252ae9915de8fcfa4b6b7a3a502735c28d1819f6..4f69028a68c486268bf5bcfc097a5412dbdd2ad0 100644 (file)
@@ -109,6 +109,55 @@ struct btrfs_ioctl_fs_info_args {
        __u64 reserved[124];                    /* pad to 1k */
 };
 
+/* balance control ioctl modes */
+#define BTRFS_BALANCE_CTL_PAUSE                1
+#define BTRFS_BALANCE_CTL_CANCEL       2
+
+/*
+ * this is packed, because it should be exactly the same as its disk
+ * byte order counterpart (struct btrfs_disk_balance_args)
+ */
+struct btrfs_balance_args {
+       __u64 profiles;
+       __u64 usage;
+       __u64 devid;
+       __u64 pstart;
+       __u64 pend;
+       __u64 vstart;
+       __u64 vend;
+
+       __u64 target;
+
+       __u64 flags;
+
+       __u64 unused[8];
+} __attribute__ ((__packed__));
+
+/* report balance progress to userspace */
+struct btrfs_balance_progress {
+       __u64 expected;         /* estimated # of chunks that will be
+                                * relocated to fulfill the request */
+       __u64 considered;       /* # of chunks we have considered so far */
+       __u64 completed;        /* # of chunks relocated so far */
+};
+
+#define BTRFS_BALANCE_STATE_RUNNING    (1ULL << 0)
+#define BTRFS_BALANCE_STATE_PAUSE_REQ  (1ULL << 1)
+#define BTRFS_BALANCE_STATE_CANCEL_REQ (1ULL << 2)
+
+struct btrfs_ioctl_balance_args {
+       __u64 flags;                            /* in/out */
+       __u64 state;                            /* out */
+
+       struct btrfs_balance_args data;         /* in/out */
+       struct btrfs_balance_args meta;         /* in/out */
+       struct btrfs_balance_args sys;          /* in/out */
+
+       struct btrfs_balance_progress stat;     /* out */
+
+       __u64 unused[72];                       /* pad to 1k */
+};
+
 #define BTRFS_INO_LOOKUP_PATH_MAX 4080
 struct btrfs_ioctl_ino_lookup_args {
        __u64 treeid;
@@ -272,6 +321,11 @@ struct btrfs_ioctl_logical_ino_args {
                                 struct btrfs_ioctl_dev_info_args)
 #define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \
                               struct btrfs_ioctl_fs_info_args)
+#define BTRFS_IOC_BALANCE_V2 _IOWR(BTRFS_IOCTL_MAGIC, 32, \
+                                  struct btrfs_ioctl_balance_args)
+#define BTRFS_IOC_BALANCE_CTL _IOW(BTRFS_IOCTL_MAGIC, 33, int)
+#define BTRFS_IOC_BALANCE_PROGRESS _IOR(BTRFS_IOCTL_MAGIC, 34, \
+                                       struct btrfs_ioctl_balance_args)
 #define BTRFS_IOC_INO_PATHS _IOWR(BTRFS_IOCTL_MAGIC, 35, \
                                        struct btrfs_ioctl_ino_path_args)
 #define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
index d77b67c4b275731417c11e04ad38b3dc9c4d456b..5e178d8f7167f496e928613b6c1f0000c2ea242e 100644 (file)
@@ -33,6 +33,14 @@ void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
  */
 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
+       if (eb->lock_nested) {
+               read_lock(&eb->lock);
+               if (eb->lock_nested && current->pid == eb->lock_owner) {
+                       read_unlock(&eb->lock);
+                       return;
+               }
+               read_unlock(&eb->lock);
+       }
        if (rw == BTRFS_WRITE_LOCK) {
                if (atomic_read(&eb->blocking_writers) == 0) {
                        WARN_ON(atomic_read(&eb->spinning_writers) != 1);
@@ -57,6 +65,14 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
  */
 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
+       if (eb->lock_nested) {
+               read_lock(&eb->lock);
+               if (&eb->lock_nested && current->pid == eb->lock_owner) {
+                       read_unlock(&eb->lock);
+                       return;
+               }
+               read_unlock(&eb->lock);
+       }
        if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
                BUG_ON(atomic_read(&eb->blocking_writers) != 1);
                write_lock(&eb->lock);
@@ -81,12 +97,25 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 void btrfs_tree_read_lock(struct extent_buffer *eb)
 {
 again:
+       read_lock(&eb->lock);
+       if (atomic_read(&eb->blocking_writers) &&
+           current->pid == eb->lock_owner) {
+               /*
+                * This extent is already write-locked by our thread. We allow
+                * an additional read lock to be added because it's for the same
+                * thread. btrfs_find_all_roots() depends on this as it may be
+                * called on a partly (write-)locked tree.
+                */
+               BUG_ON(eb->lock_nested);
+               eb->lock_nested = 1;
+               read_unlock(&eb->lock);
+               return;
+       }
+       read_unlock(&eb->lock);
        wait_event(eb->write_lock_wq, atomic_read(&eb->blocking_writers) == 0);
        read_lock(&eb->lock);
        if (atomic_read(&eb->blocking_writers)) {
                read_unlock(&eb->lock);
-               wait_event(eb->write_lock_wq,
-                          atomic_read(&eb->blocking_writers) == 0);
                goto again;
        }
        atomic_inc(&eb->read_locks);
@@ -129,6 +158,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
        }
        atomic_inc(&eb->write_locks);
        atomic_inc(&eb->spinning_writers);
+       eb->lock_owner = current->pid;
        return 1;
 }
 
@@ -137,6 +167,15 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
  */
 void btrfs_tree_read_unlock(struct extent_buffer *eb)
 {
+       if (eb->lock_nested) {
+               read_lock(&eb->lock);
+               if (eb->lock_nested && current->pid == eb->lock_owner) {
+                       eb->lock_nested = 0;
+                       read_unlock(&eb->lock);
+                       return;
+               }
+               read_unlock(&eb->lock);
+       }
        btrfs_assert_tree_read_locked(eb);
        WARN_ON(atomic_read(&eb->spinning_readers) == 0);
        atomic_dec(&eb->spinning_readers);
@@ -149,6 +188,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
  */
 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
 {
+       if (eb->lock_nested) {
+               read_lock(&eb->lock);
+               if (eb->lock_nested && current->pid == eb->lock_owner) {
+                       eb->lock_nested = 0;
+                       read_unlock(&eb->lock);
+                       return;
+               }
+               read_unlock(&eb->lock);
+       }
        btrfs_assert_tree_read_locked(eb);
        WARN_ON(atomic_read(&eb->blocking_readers) == 0);
        if (atomic_dec_and_test(&eb->blocking_readers))
@@ -181,6 +229,7 @@ again:
        WARN_ON(atomic_read(&eb->spinning_writers));
        atomic_inc(&eb->spinning_writers);
        atomic_inc(&eb->write_locks);
+       eb->lock_owner = current->pid;
        return 0;
 }
 
index cfb55434a46981fa64416e68fa3fd29cf58238f5..8c1aae2c845d49960fe352c809033f1bdf5ffb74 100644 (file)
@@ -1604,12 +1604,12 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
                ret = btrfs_inc_extent_ref(trans, root, new_bytenr,
                                           num_bytes, parent,
                                           btrfs_header_owner(leaf),
-                                          key.objectid, key.offset);
+                                          key.objectid, key.offset, 1);
                BUG_ON(ret);
 
                ret = btrfs_free_extent(trans, root, bytenr, num_bytes,
                                        parent, btrfs_header_owner(leaf),
-                                       key.objectid, key.offset);
+                                       key.objectid, key.offset, 1);
                BUG_ON(ret);
        }
        if (dirty)
@@ -1778,21 +1778,23 @@ again:
 
                ret = btrfs_inc_extent_ref(trans, src, old_bytenr, blocksize,
                                        path->nodes[level]->start,
-                                       src->root_key.objectid, level - 1, 0);
+                                       src->root_key.objectid, level - 1, 0,
+                                       1);
                BUG_ON(ret);
                ret = btrfs_inc_extent_ref(trans, dest, new_bytenr, blocksize,
                                        0, dest->root_key.objectid, level - 1,
-                                       0);
+                                       0, 1);
                BUG_ON(ret);
 
                ret = btrfs_free_extent(trans, src, new_bytenr, blocksize,
                                        path->nodes[level]->start,
-                                       src->root_key.objectid, level - 1, 0);
+                                       src->root_key.objectid, level - 1, 0,
+                                       1);
                BUG_ON(ret);
 
                ret = btrfs_free_extent(trans, dest, old_bytenr, blocksize,
                                        0, dest->root_key.objectid, level - 1,
-                                       0);
+                                       0, 1);
                BUG_ON(ret);
 
                btrfs_unlock_up_safe(path, 0);
@@ -2244,7 +2246,7 @@ again:
                } else {
                        list_del_init(&reloc_root->root_list);
                }
-               btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0);
+               btrfs_drop_snapshot(reloc_root, rc->block_rsv, 0, 1);
        }
 
        if (found) {
@@ -2558,7 +2560,7 @@ static int do_relocation(struct btrfs_trans_handle *trans,
                                                node->eb->start, blocksize,
                                                upper->eb->start,
                                                btrfs_header_owner(upper->eb),
-                                               node->level, 0);
+                                               node->level, 0, 1);
                        BUG_ON(ret);
 
                        ret = btrfs_drop_subtree(trans, root, eb, upper->eb);
@@ -2947,9 +2949,7 @@ static int relocate_file_extent_cluster(struct inode *inode,
        index = (cluster->start - offset) >> PAGE_CACHE_SHIFT;
        last_index = (cluster->end - offset) >> PAGE_CACHE_SHIFT;
        while (index <= last_index) {
-               mutex_lock(&inode->i_mutex);
                ret = btrfs_delalloc_reserve_metadata(inode, PAGE_CACHE_SIZE);
-               mutex_unlock(&inode->i_mutex);
                if (ret)
                        goto out;
 
index ddf2c90d3fc0c475cbfabf6397c84f734abcc5e8..9770cc5bfb76c6829f96924bb82f9b3b564ca646 100644 (file)
@@ -25,6 +25,7 @@
 #include "transaction.h"
 #include "backref.h"
 #include "extent_io.h"
+#include "check-integrity.h"
 
 /*
  * This is only the first step towards a full-features scrub. It reads all
@@ -309,7 +310,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
        u8 ref_level;
        unsigned long ptr = 0;
        const int bufsize = 4096;
-       u64 extent_offset;
+       u64 extent_item_pos;
 
        path = btrfs_alloc_path();
 
@@ -329,12 +330,13 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
        if (ret < 0)
                goto out;
 
-       extent_offset = swarn.logical - found_key.objectid;
+       extent_item_pos = swarn.logical - found_key.objectid;
        swarn.extent_item_size = found_key.offset;
 
        eb = path->nodes[0];
        ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
        item_size = btrfs_item_size_nr(eb, path->slots[0]);
+       btrfs_release_path(path);
 
        if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
                do {
@@ -351,7 +353,7 @@ static void scrub_print_warning(const char *errstr, struct scrub_bio *sbio,
        } else {
                swarn.path = path;
                iterate_extent_inodes(fs_info, path, found_key.objectid,
-                                       extent_offset,
+                                       extent_item_pos,
                                        scrub_print_warning_inode, &swarn);
        }
 
@@ -732,7 +734,7 @@ static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector,
        bio_add_page(bio, page, PAGE_SIZE, 0);
        bio->bi_end_io = scrub_fixup_end_io;
        bio->bi_private = &complete;
-       submit_bio(rw, bio);
+       btrfsic_submit_bio(rw, bio);
 
        /* this will also unplug the queue */
        wait_for_completion(&complete);
@@ -958,7 +960,7 @@ static int scrub_submit(struct scrub_dev *sdev)
        sdev->curr = -1;
        atomic_inc(&sdev->in_flight);
 
-       submit_bio(READ, sbio->bio);
+       btrfsic_submit_bio(READ, sbio->bio);
 
        return 0;
 }
index ae488aa1966a1b32d6a3c4438424eb690740d4c9..3ce97b217cbeae21ef23e57fe2a605ed11572af1 100644 (file)
@@ -147,13 +147,13 @@ void __btrfs_std_error(struct btrfs_fs_info *fs_info, const char *function,
 
 static void btrfs_put_super(struct super_block *sb)
 {
-       struct btrfs_root *root = btrfs_sb(sb);
-       int ret;
-
-       ret = close_ctree(root);
-       sb->s_fs_info = NULL;
-
-       (void)ret; /* FIXME: need to fix VFS to return error? */
+       (void)close_ctree(btrfs_sb(sb)->tree_root);
+       /* FIXME: need to fix VFS to return error? */
+       /* AV: return it _where_?  ->put_super() can be triggered by any number
+        * of async events, up to and including delivery of SIGKILL to the
+        * last process that kept it busy.  Or segfault in the aforementioned
+        * process...  Whom would you report that to?
+        */
 }
 
 enum {
@@ -163,8 +163,11 @@ enum {
        Opt_compress_type, Opt_compress_force, Opt_compress_force_type,
        Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard,
        Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed,
-       Opt_enospc_debug, Opt_subvolrootid, Opt_defrag,
-       Opt_inode_cache, Opt_no_space_cache, Opt_recovery, Opt_err,
+       Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_inode_cache,
+       Opt_no_space_cache, Opt_recovery, Opt_skip_balance,
+       Opt_check_integrity, Opt_check_integrity_including_extent_data,
+       Opt_check_integrity_print_mask,
+       Opt_err,
 };
 
 static match_table_t tokens = {
@@ -199,6 +202,10 @@ static match_table_t tokens = {
        {Opt_inode_cache, "inode_cache"},
        {Opt_no_space_cache, "nospace_cache"},
        {Opt_recovery, "recovery"},
+       {Opt_skip_balance, "skip_balance"},
+       {Opt_check_integrity, "check_int"},
+       {Opt_check_integrity_including_extent_data, "check_int_data"},
+       {Opt_check_integrity_print_mask, "check_int_print_mask=%d"},
        {Opt_err, NULL},
 };
 
@@ -397,6 +404,40 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                        printk(KERN_INFO "btrfs: enabling auto recovery");
                        btrfs_set_opt(info->mount_opt, RECOVERY);
                        break;
+               case Opt_skip_balance:
+                       btrfs_set_opt(info->mount_opt, SKIP_BALANCE);
+                       break;
+#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+               case Opt_check_integrity_including_extent_data:
+                       printk(KERN_INFO "btrfs: enabling check integrity"
+                              " including extent data\n");
+                       btrfs_set_opt(info->mount_opt,
+                                     CHECK_INTEGRITY_INCLUDING_EXTENT_DATA);
+                       btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
+                       break;
+               case Opt_check_integrity:
+                       printk(KERN_INFO "btrfs: enabling check integrity\n");
+                       btrfs_set_opt(info->mount_opt, CHECK_INTEGRITY);
+                       break;
+               case Opt_check_integrity_print_mask:
+                       intarg = 0;
+                       match_int(&args[0], &intarg);
+                       if (intarg) {
+                               info->check_integrity_print_mask = intarg;
+                               printk(KERN_INFO "btrfs:"
+                                      " check_integrity_print_mask 0x%x\n",
+                                      info->check_integrity_print_mask);
+                       }
+                       break;
+#else
+               case Opt_check_integrity_including_extent_data:
+               case Opt_check_integrity:
+               case Opt_check_integrity_print_mask:
+                       printk(KERN_ERR "btrfs: support for check_integrity*"
+                              " not compiled in!\n");
+                       ret = -EINVAL;
+                       goto out;
+#endif
                case Opt_err:
                        printk(KERN_INFO "btrfs: unrecognized mount option "
                               "'%s'\n", p);
@@ -500,7 +541,8 @@ out:
 static struct dentry *get_default_root(struct super_block *sb,
                                       u64 subvol_objectid)
 {
-       struct btrfs_root *root = sb->s_fs_info;
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       struct btrfs_root *root = fs_info->tree_root;
        struct btrfs_root *new_root;
        struct btrfs_dir_item *di;
        struct btrfs_path *path;
@@ -530,7 +572,7 @@ static struct dentry *get_default_root(struct super_block *sb,
         * will mount by default if we haven't been given a specific subvolume
         * to mount.
         */
-       dir_id = btrfs_super_root_dir(root->fs_info->super_copy);
+       dir_id = btrfs_super_root_dir(fs_info->super_copy);
        di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0);
        if (IS_ERR(di)) {
                btrfs_free_path(path);
@@ -544,7 +586,7 @@ static struct dentry *get_default_root(struct super_block *sb,
                 */
                btrfs_free_path(path);
                dir_id = BTRFS_FIRST_FREE_OBJECTID;
-               new_root = root->fs_info->fs_root;
+               new_root = fs_info->fs_root;
                goto setup_root;
        }
 
@@ -552,7 +594,7 @@ static struct dentry *get_default_root(struct super_block *sb,
        btrfs_free_path(path);
 
 find_root:
-       new_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
+       new_root = btrfs_read_fs_root_no_name(fs_info, &location);
        if (IS_ERR(new_root))
                return ERR_CAST(new_root);
 
@@ -588,7 +630,7 @@ static int btrfs_fill_super(struct super_block *sb,
 {
        struct inode *inode;
        struct dentry *root_dentry;
-       struct btrfs_root *tree_root;
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
        struct btrfs_key key;
        int err;
 
@@ -603,18 +645,16 @@ static int btrfs_fill_super(struct super_block *sb,
        sb->s_flags |= MS_POSIXACL;
 #endif
 
-       tree_root = open_ctree(sb, fs_devices, (char *)data);
-
-       if (IS_ERR(tree_root)) {
+       err = open_ctree(sb, fs_devices, (char *)data);
+       if (err) {
                printk("btrfs: open_ctree failed\n");
-               return PTR_ERR(tree_root);
+               return err;
        }
-       sb->s_fs_info = tree_root;
 
        key.objectid = BTRFS_FIRST_FREE_OBJECTID;
        key.type = BTRFS_INODE_ITEM_KEY;
        key.offset = 0;
-       inode = btrfs_iget(sb, &key, tree_root->fs_info->fs_root, NULL);
+       inode = btrfs_iget(sb, &key, fs_info->fs_root, NULL);
        if (IS_ERR(inode)) {
                err = PTR_ERR(inode);
                goto fail_close;
@@ -631,23 +671,25 @@ static int btrfs_fill_super(struct super_block *sb,
 
        save_mount_options(sb, data);
        cleancache_init_fs(sb);
+       sb->s_flags |= MS_ACTIVE;
        return 0;
 
 fail_close:
-       close_ctree(tree_root);
+       close_ctree(fs_info->tree_root);
        return err;
 }
 
 int btrfs_sync_fs(struct super_block *sb, int wait)
 {
        struct btrfs_trans_handle *trans;
-       struct btrfs_root *root = btrfs_sb(sb);
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       struct btrfs_root *root = fs_info->tree_root;
        int ret;
 
        trace_btrfs_sync_fs(wait);
 
        if (!wait) {
-               filemap_flush(root->fs_info->btree_inode->i_mapping);
+               filemap_flush(fs_info->btree_inode->i_mapping);
                return 0;
        }
 
@@ -663,8 +705,8 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
 
 static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
 {
-       struct btrfs_root *root = btrfs_sb(dentry->d_sb);
-       struct btrfs_fs_info *info = root->fs_info;
+       struct btrfs_fs_info *info = btrfs_sb(dentry->d_sb);
+       struct btrfs_root *root = info->tree_root;
        char *compress_type;
 
        if (btrfs_test_opt(root, DEGRADED))
@@ -722,28 +764,25 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
                seq_puts(seq, ",autodefrag");
        if (btrfs_test_opt(root, INODE_MAP_CACHE))
                seq_puts(seq, ",inode_cache");
+       if (btrfs_test_opt(root, SKIP_BALANCE))
+               seq_puts(seq, ",skip_balance");
        return 0;
 }
 
 static int btrfs_test_super(struct super_block *s, void *data)
 {
-       struct btrfs_root *test_root = data;
-       struct btrfs_root *root = btrfs_sb(s);
+       struct btrfs_fs_info *p = data;
+       struct btrfs_fs_info *fs_info = btrfs_sb(s);
 
-       /*
-        * If this super block is going away, return false as it
-        * can't match as an existing super block.
-        */
-       if (!atomic_read(&s->s_active))
-               return 0;
-       return root->fs_info->fs_devices == test_root->fs_info->fs_devices;
+       return fs_info->fs_devices == p->fs_devices;
 }
 
 static int btrfs_set_super(struct super_block *s, void *data)
 {
-       s->s_fs_info = data;
-
-       return set_anon_super(s, data);
+       int err = set_anon_super(s, data);
+       if (!err)
+               s->s_fs_info = data;
+       return err;
 }
 
 /*
@@ -903,12 +942,6 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        if (!fs_info)
                return ERR_PTR(-ENOMEM);
 
-       fs_info->tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS);
-       if (!fs_info->tree_root) {
-               error = -ENOMEM;
-               goto error_fs_info;
-       }
-       fs_info->tree_root->fs_info = fs_info;
        fs_info->fs_devices = fs_devices;
 
        fs_info->super_copy = kzalloc(BTRFS_SUPER_INFO_SIZE, GFP_NOFS);
@@ -928,43 +961,30 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags,
        }
 
        bdev = fs_devices->latest_bdev;
-       s = sget(fs_type, btrfs_test_super, btrfs_set_super,
-                fs_info->tree_root);
+       s = sget(fs_type, btrfs_test_super, btrfs_set_super, fs_info);
        if (IS_ERR(s)) {
                error = PTR_ERR(s);
                goto error_close_devices;
        }
 
        if (s->s_root) {
-               if ((flags ^ s->s_flags) & MS_RDONLY) {
-                       deactivate_locked_super(s);
-                       error = -EBUSY;
-                       goto error_close_devices;
-               }
-
                btrfs_close_devices(fs_devices);
                free_fs_info(fs_info);
+               if ((flags ^ s->s_flags) & MS_RDONLY)
+                       error = -EBUSY;
        } else {
                char b[BDEVNAME_SIZE];
 
                s->s_flags = flags | MS_NOSEC;
                strlcpy(s->s_id, bdevname(bdev, b), sizeof(s->s_id));
-               btrfs_sb(s)->fs_info->bdev_holder = fs_type;
+               btrfs_sb(s)->bdev_holder = fs_type;
                error = btrfs_fill_super(s, fs_devices, data,
                                         flags & MS_SILENT ? 1 : 0);
-               if (error) {
-                       deactivate_locked_super(s);
-                       return ERR_PTR(error);
-               }
-
-               s->s_flags |= MS_ACTIVE;
        }
 
-       root = get_default_root(s, subvol_objectid);
-       if (IS_ERR(root)) {
+       root = !error ? get_default_root(s, subvol_objectid) : ERR_PTR(error);
+       if (IS_ERR(root))
                deactivate_locked_super(s);
-               return root;
-       }
 
        return root;
 
@@ -977,7 +997,8 @@ error_fs_info:
 
 static int btrfs_remount(struct super_block *sb, int *flags, char *data)
 {
-       struct btrfs_root *root = btrfs_sb(sb);
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       struct btrfs_root *root = fs_info->tree_root;
        int ret;
 
        ret = btrfs_parse_options(root, data);
@@ -993,13 +1014,13 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                ret =  btrfs_commit_super(root);
                WARN_ON(ret);
        } else {
-               if (root->fs_info->fs_devices->rw_devices == 0)
+               if (fs_info->fs_devices->rw_devices == 0)
                        return -EACCES;
 
-               if (btrfs_super_log_root(root->fs_info->super_copy) != 0)
+               if (btrfs_super_log_root(fs_info->super_copy) != 0)
                        return -EINVAL;
 
-               ret = btrfs_cleanup_fs_roots(root->fs_info);
+               ret = btrfs_cleanup_fs_roots(fs_info);
                WARN_ON(ret);
 
                /* recover relocation */
@@ -1168,18 +1189,18 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
 
 static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
 {
-       struct btrfs_root *root = btrfs_sb(dentry->d_sb);
-       struct btrfs_super_block *disk_super = root->fs_info->super_copy;
-       struct list_head *head = &root->fs_info->space_info;
+       struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
+       struct btrfs_super_block *disk_super = fs_info->super_copy;
+       struct list_head *head = &fs_info->space_info;
        struct btrfs_space_info *found;
        u64 total_used = 0;
        u64 total_free_data = 0;
        int bits = dentry->d_sb->s_blocksize_bits;
-       __be32 *fsid = (__be32 *)root->fs_info->fsid;
+       __be32 *fsid = (__be32 *)fs_info->fsid;
        int ret;
 
        /* holding chunk_muext to avoid allocating new chunks */
-       mutex_lock(&root->fs_info->chunk_mutex);
+       mutex_lock(&fs_info->chunk_mutex);
        rcu_read_lock();
        list_for_each_entry_rcu(found, head, list) {
                if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
@@ -1198,14 +1219,14 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_bsize = dentry->d_sb->s_blocksize;
        buf->f_type = BTRFS_SUPER_MAGIC;
        buf->f_bavail = total_free_data;
-       ret = btrfs_calc_avail_data_space(root, &total_free_data);
+       ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data);
        if (ret) {
-               mutex_unlock(&root->fs_info->chunk_mutex);
+               mutex_unlock(&fs_info->chunk_mutex);
                return ret;
        }
        buf->f_bavail += total_free_data;
        buf->f_bavail = buf->f_bavail >> bits;
-       mutex_unlock(&root->fs_info->chunk_mutex);
+       mutex_unlock(&fs_info->chunk_mutex);
 
        /* We treat it as constant endianness (it doesn't matter _which_)
           because we want the fsid to come out the same whether mounted
@@ -1219,11 +1240,18 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
        return 0;
 }
 
+static void btrfs_kill_super(struct super_block *sb)
+{
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       kill_anon_super(sb);
+       free_fs_info(fs_info);
+}
+
 static struct file_system_type btrfs_fs_type = {
        .owner          = THIS_MODULE,
        .name           = "btrfs",
        .mount          = btrfs_mount,
-       .kill_sb        = kill_anon_super,
+       .kill_sb        = btrfs_kill_super,
        .fs_flags       = FS_REQUIRES_DEV,
 };
 
@@ -1257,17 +1285,17 @@ static long btrfs_control_ioctl(struct file *file, unsigned int cmd,
 
 static int btrfs_freeze(struct super_block *sb)
 {
-       struct btrfs_root *root = btrfs_sb(sb);
-       mutex_lock(&root->fs_info->transaction_kthread_mutex);
-       mutex_lock(&root->fs_info->cleaner_mutex);
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       mutex_lock(&fs_info->transaction_kthread_mutex);
+       mutex_lock(&fs_info->cleaner_mutex);
        return 0;
 }
 
 static int btrfs_unfreeze(struct super_block *sb)
 {
-       struct btrfs_root *root = btrfs_sb(sb);
-       mutex_unlock(&root->fs_info->cleaner_mutex);
-       mutex_unlock(&root->fs_info->transaction_kthread_mutex);
+       struct btrfs_fs_info *fs_info = btrfs_sb(sb);
+       mutex_unlock(&fs_info->cleaner_mutex);
+       mutex_unlock(&fs_info->transaction_kthread_mutex);
        return 0;
 }
 
index 81376d94cd3c6a4639ebef35df501dbefbfb2435..287a6728b1ad6ddc726b64122223bfc71e7e165d 100644 (file)
@@ -36,6 +36,8 @@ static noinline void put_transaction(struct btrfs_transaction *transaction)
        WARN_ON(atomic_read(&transaction->use_count) == 0);
        if (atomic_dec_and_test(&transaction->use_count)) {
                BUG_ON(!list_empty(&transaction->list));
+               WARN_ON(transaction->delayed_refs.root.rb_node);
+               WARN_ON(!list_empty(&transaction->delayed_refs.seq_head));
                memset(transaction, 0, sizeof(*transaction));
                kmem_cache_free(btrfs_transaction_cachep, transaction);
        }
@@ -108,8 +110,11 @@ loop:
        cur_trans->delayed_refs.num_heads = 0;
        cur_trans->delayed_refs.flushing = 0;
        cur_trans->delayed_refs.run_delayed_start = 0;
+       cur_trans->delayed_refs.seq = 1;
+       init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
        spin_lock_init(&cur_trans->commit_lock);
        spin_lock_init(&cur_trans->delayed_refs.lock);
+       INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
 
        INIT_LIST_HEAD(&cur_trans->pending_snapshots);
        list_add_tail(&cur_trans->list, &root->fs_info->trans_list);
@@ -321,6 +326,8 @@ again:
        }
 
        if (num_bytes) {
+               trace_btrfs_space_reservation(root->fs_info, "transaction",
+                                             (u64)h, num_bytes, 1);
                h->block_rsv = &root->fs_info->trans_block_rsv;
                h->bytes_reserved = num_bytes;
        }
@@ -467,19 +474,12 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
 
        btrfs_trans_release_metadata(trans, root);
        trans->block_rsv = NULL;
-       while (count < 4) {
+       while (count < 2) {
                unsigned long cur = trans->delayed_ref_updates;
                trans->delayed_ref_updates = 0;
                if (cur &&
                    trans->transaction->delayed_refs.num_heads_ready > 64) {
                        trans->delayed_ref_updates = 0;
-
-                       /*
-                        * do a full flush if the transaction is trying
-                        * to close
-                        */
-                       if (trans->transaction->delayed_refs.flushing)
-                               cur = 0;
                        btrfs_run_delayed_refs(trans, root, cur);
                } else {
                        break;
@@ -1393,9 +1393,9 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root)
 
                if (btrfs_header_backref_rev(root->node) <
                    BTRFS_MIXED_BACKREF_REV)
-                       btrfs_drop_snapshot(root, NULL, 0);
+                       btrfs_drop_snapshot(root, NULL, 0, 0);
                else
-                       btrfs_drop_snapshot(root, NULL, 1);
+                       btrfs_drop_snapshot(root, NULL, 1, 0);
        }
        return 0;
 }
index 3568374d419da8ee50eb17f4af5319964750614d..966cc74f5d6c7303b06bb8bd067826f07510ffc8 100644 (file)
@@ -589,7 +589,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
                                ret = btrfs_inc_extent_ref(trans, root,
                                                ins.objectid, ins.offset,
                                                0, root->root_key.objectid,
-                                               key->objectid, offset);
+                                               key->objectid, offset, 0);
                                BUG_ON(ret);
                        } else {
                                /*
@@ -1957,7 +1957,8 @@ static int wait_log_commit(struct btrfs_trans_handle *trans,
 
                finish_wait(&root->log_commit_wait[index], &wait);
                mutex_lock(&root->log_mutex);
-       } while (root->log_transid < transid + 2 &&
+       } while (root->fs_info->last_trans_log_full_commit !=
+                trans->transid && root->log_transid < transid + 2 &&
                 atomic_read(&root->log_commit[index]));
        return 0;
 }
@@ -1966,7 +1967,8 @@ static int wait_for_writer(struct btrfs_trans_handle *trans,
                           struct btrfs_root *root)
 {
        DEFINE_WAIT(wait);
-       while (atomic_read(&root->log_writers)) {
+       while (root->fs_info->last_trans_log_full_commit !=
+              trans->transid && atomic_read(&root->log_writers)) {
                prepare_to_wait(&root->log_writer_wait,
                                &wait, TASK_UNINTERRUPTIBLE);
                mutex_unlock(&root->log_mutex);
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
new file mode 100644 (file)
index 0000000..12f5147
--- /dev/null
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2011 STRATO AG
+ * written by Arne Jansen <sensille@gmx.net>
+ * Distributed under the GNU GPL license version 2.
+ */
+
+#include <linux/slab.h>
+#include <linux/module.h>
+#include "ulist.h"
+
+/*
+ * ulist is a generic data structure to hold a collection of unique u64
+ * values. The only operations it supports is adding to the list and
+ * enumerating it.
+ * It is possible to store an auxiliary value along with the key.
+ *
+ * The implementation is preliminary and can probably be sped up
+ * significantly. A first step would be to store the values in an rbtree
+ * as soon as ULIST_SIZE is exceeded.
+ *
+ * A sample usage for ulists is the enumeration of directed graphs without
+ * visiting a node twice. The pseudo-code could look like this:
+ *
+ * ulist = ulist_alloc();
+ * ulist_add(ulist, root);
+ * elem = NULL;
+ *
+ * while ((elem = ulist_next(ulist, elem)) {
+ *     for (all child nodes n in elem)
+ *             ulist_add(ulist, n);
+ *     do something useful with the node;
+ * }
+ * ulist_free(ulist);
+ *
+ * This assumes the graph nodes are adressable by u64. This stems from the
+ * usage for tree enumeration in btrfs, where the logical addresses are
+ * 64 bit.
+ *
+ * It is also useful for tree enumeration which could be done elegantly
+ * recursively, but is not possible due to kernel stack limitations. The
+ * loop would be similar to the above.
+ */
+
+/**
+ * ulist_init - freshly initialize a ulist
+ * @ulist:     the ulist to initialize
+ *
+ * Note: don't use this function to init an already used ulist, use
+ * ulist_reinit instead.
+ */
+void ulist_init(struct ulist *ulist)
+{
+       ulist->nnodes = 0;
+       ulist->nodes = ulist->int_nodes;
+       ulist->nodes_alloced = ULIST_SIZE;
+}
+EXPORT_SYMBOL(ulist_init);
+
+/**
+ * ulist_fini - free up additionally allocated memory for the ulist
+ * @ulist:     the ulist from which to free the additional memory
+ *
+ * This is useful in cases where the base 'struct ulist' has been statically
+ * allocated.
+ */
+void ulist_fini(struct ulist *ulist)
+{
+       /*
+        * The first ULIST_SIZE elements are stored inline in struct ulist.
+        * Only if more elements are alocated they need to be freed.
+        */
+       if (ulist->nodes_alloced > ULIST_SIZE)
+               kfree(ulist->nodes);
+       ulist->nodes_alloced = 0;       /* in case ulist_fini is called twice */
+}
+EXPORT_SYMBOL(ulist_fini);
+
+/**
+ * ulist_reinit - prepare a ulist for reuse
+ * @ulist:     ulist to be reused
+ *
+ * Free up all additional memory allocated for the list elements and reinit
+ * the ulist.
+ */
+void ulist_reinit(struct ulist *ulist)
+{
+       ulist_fini(ulist);
+       ulist_init(ulist);
+}
+EXPORT_SYMBOL(ulist_reinit);
+
+/**
+ * ulist_alloc - dynamically allocate a ulist
+ * @gfp_mask:  allocation flags to for base allocation
+ *
+ * The allocated ulist will be returned in an initialized state.
+ */
+struct ulist *ulist_alloc(unsigned long gfp_mask)
+{
+       struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
+
+       if (!ulist)
+               return NULL;
+
+       ulist_init(ulist);
+
+       return ulist;
+}
+EXPORT_SYMBOL(ulist_alloc);
+
+/**
+ * ulist_free - free dynamically allocated ulist
+ * @ulist:     ulist to free
+ *
+ * It is not necessary to call ulist_fini before.
+ */
+void ulist_free(struct ulist *ulist)
+{
+       if (!ulist)
+               return;
+       ulist_fini(ulist);
+       kfree(ulist);
+}
+EXPORT_SYMBOL(ulist_free);
+
+/**
+ * ulist_add - add an element to the ulist
+ * @ulist:     ulist to add the element to
+ * @val:       value to add to ulist
+ * @aux:       auxiliary value to store along with val
+ * @gfp_mask:  flags to use for allocation
+ *
+ * Note: locking must be provided by the caller. In case of rwlocks write
+ *       locking is needed
+ *
+ * Add an element to a ulist. The @val will only be added if it doesn't
+ * already exist. If it is added, the auxiliary value @aux is stored along with
+ * it. In case @val already exists in the ulist, @aux is ignored, even if
+ * it differs from the already stored value.
+ *
+ * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
+ * inserted.
+ * In case of allocation failure -ENOMEM is returned and the ulist stays
+ * unaltered.
+ */
+int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
+             unsigned long gfp_mask)
+{
+       int i;
+
+       for (i = 0; i < ulist->nnodes; ++i) {
+               if (ulist->nodes[i].val == val)
+                       return 0;
+       }
+
+       if (ulist->nnodes >= ulist->nodes_alloced) {
+               u64 new_alloced = ulist->nodes_alloced + 128;
+               struct ulist_node *new_nodes;
+               void *old = NULL;
+
+               /*
+                * if nodes_alloced == ULIST_SIZE no memory has been allocated
+                * yet, so pass NULL to krealloc
+                */
+               if (ulist->nodes_alloced > ULIST_SIZE)
+                       old = ulist->nodes;
+
+               new_nodes = krealloc(old, sizeof(*new_nodes) * new_alloced,
+                                    gfp_mask);
+               if (!new_nodes)
+                       return -ENOMEM;
+
+               if (!old)
+                       memcpy(new_nodes, ulist->int_nodes,
+                              sizeof(ulist->int_nodes));
+
+               ulist->nodes = new_nodes;
+               ulist->nodes_alloced = new_alloced;
+       }
+       ulist->nodes[ulist->nnodes].val = val;
+       ulist->nodes[ulist->nnodes].aux = aux;
+       ++ulist->nnodes;
+
+       return 1;
+}
+EXPORT_SYMBOL(ulist_add);
+
+/**
+ * ulist_next - iterate ulist
+ * @ulist:     ulist to iterate
+ * @prev:      previously returned element or %NULL to start iteration
+ *
+ * Note: locking must be provided by the caller. In case of rwlocks only read
+ *       locking is needed
+ *
+ * This function is used to iterate an ulist. The iteration is started with
+ * @prev = %NULL. It returns the next element from the ulist or %NULL when the
+ * end is reached. No guarantee is made with respect to the order in which
+ * the elements are returned. They might neither be returned in order of
+ * addition nor in ascending order.
+ * It is allowed to call ulist_add during an enumeration. Newly added items
+ * are guaranteed to show up in the running enumeration.
+ */
+struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev)
+{
+       int next;
+
+       if (ulist->nnodes == 0)
+               return NULL;
+
+       if (!prev)
+               return &ulist->nodes[0];
+
+       next = (prev - ulist->nodes) + 1;
+       if (next < 0 || next >= ulist->nnodes)
+               return NULL;
+
+       return &ulist->nodes[next];
+}
+EXPORT_SYMBOL(ulist_next);
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
new file mode 100644 (file)
index 0000000..2e25dec
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2011 STRATO AG
+ * written by Arne Jansen <sensille@gmx.net>
+ * Distributed under the GNU GPL license version 2.
+ *
+ */
+
+#ifndef __ULIST__
+#define __ULIST__
+
+/*
+ * ulist is a generic data structure to hold a collection of unique u64
+ * values. The only operations it supports is adding to the list and
+ * enumerating it.
+ * It is possible to store an auxiliary value along with the key.
+ *
+ * The implementation is preliminary and can probably be sped up
+ * significantly. A first step would be to store the values in an rbtree
+ * as soon as ULIST_SIZE is exceeded.
+ */
+
+/*
+ * number of elements statically allocated inside struct ulist
+ */
+#define ULIST_SIZE 16
+
+/*
+ * element of the list
+ */
+struct ulist_node {
+       u64 val;                /* value to store */
+       unsigned long aux;      /* auxiliary value saved along with the val */
+};
+
+struct ulist {
+       /*
+        * number of elements stored in list
+        */
+       unsigned long nnodes;
+
+       /*
+        * number of nodes we already have room for
+        */
+       unsigned long nodes_alloced;
+
+       /*
+        * pointer to the array storing the elements. The first ULIST_SIZE
+        * elements are stored inline. In this case the it points to int_nodes.
+        * After exceeding ULIST_SIZE, dynamic memory is allocated.
+        */
+       struct ulist_node *nodes;
+
+       /*
+        * inline storage space for the first ULIST_SIZE entries
+        */
+       struct ulist_node int_nodes[ULIST_SIZE];
+};
+
+void ulist_init(struct ulist *ulist);
+void ulist_fini(struct ulist *ulist);
+void ulist_reinit(struct ulist *ulist);
+struct ulist *ulist_alloc(unsigned long gfp_mask);
+void ulist_free(struct ulist *ulist);
+int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
+             unsigned long gfp_mask);
+struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev);
+
+#endif
index f4b839fd3c9dd5cd854cb7bada4e3831d8ea1713..0b4e2af7954d3c209d8f1e581d4ee26c0cb60c2f 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/random.h>
 #include <linux/iocontext.h>
 #include <linux/capability.h>
+#include <linux/kthread.h>
 #include <asm/div64.h>
 #include "compat.h"
 #include "ctree.h"
@@ -32,6 +33,7 @@
 #include "print-tree.h"
 #include "volumes.h"
 #include "async-thread.h"
+#include "check-integrity.h"
 
 static int init_first_rw_device(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
@@ -246,7 +248,7 @@ loop_lock:
                        sync_pending = 0;
                }
 
-               submit_bio(cur->bi_rw, cur);
+               btrfsic_submit_bio(cur->bi_rw, cur);
                num_run++;
                batch_run++;
                if (need_resched())
@@ -706,8 +708,6 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
        u64 devid;
        u64 transid;
 
-       mutex_lock(&uuid_mutex);
-
        flags |= FMODE_EXCL;
        bdev = blkdev_get_by_path(path, flags, holder);
 
@@ -716,6 +716,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
                goto error;
        }
 
+       mutex_lock(&uuid_mutex);
        ret = set_blocksize(bdev, 4096);
        if (ret)
                goto error_close;
@@ -737,9 +738,9 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder,
 
        brelse(bh);
 error_close:
+       mutex_unlock(&uuid_mutex);
        blkdev_put(bdev, flags);
 error:
-       mutex_unlock(&uuid_mutex);
        return ret;
 }
 
@@ -829,7 +830,6 @@ out:
 
 /*
  * find_free_dev_extent - find free space in the specified device
- * @trans:     transaction handler
  * @device:    the device which we search the free space in
  * @num_bytes: the size of the free space that we need
  * @start:     store the start of the free space.
@@ -848,8 +848,7 @@ out:
  * But if we don't find suitable free space, it is used to store the size of
  * the max free space.
  */
-int find_free_dev_extent(struct btrfs_trans_handle *trans,
-                        struct btrfs_device *device, u64 num_bytes,
+int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
                         u64 *start, u64 *len)
 {
        struct btrfs_key key;
@@ -893,7 +892,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
        key.offset = search_start;
        key.type = BTRFS_DEV_EXTENT_KEY;
 
-       ret = btrfs_search_slot(trans, root, &key, path, 0, 0);
+       ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
        if (ret < 0)
                goto out;
        if (ret > 0) {
@@ -1282,7 +1281,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        bool clear_super = false;
 
        mutex_lock(&uuid_mutex);
-       mutex_lock(&root->fs_info->volume_mutex);
 
        all_avail = root->fs_info->avail_data_alloc_bits |
                root->fs_info->avail_system_alloc_bits |
@@ -1452,7 +1450,6 @@ error_close:
        if (bdev)
                blkdev_put(bdev, FMODE_READ | FMODE_EXCL);
 out:
-       mutex_unlock(&root->fs_info->volume_mutex);
        mutex_unlock(&uuid_mutex);
        return ret;
 error_undo:
@@ -1469,8 +1466,7 @@ error_undo:
 /*
  * does all the dirty work required for changing file system's UUID.
  */
-static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans,
-                               struct btrfs_root *root)
+static int btrfs_prepare_sprout(struct btrfs_root *root)
 {
        struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
        struct btrfs_fs_devices *old_devices;
@@ -1629,7 +1625,6 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        }
 
        filemap_write_and_wait(bdev->bd_inode->i_mapping);
-       mutex_lock(&root->fs_info->volume_mutex);
 
        devices = &root->fs_info->fs_devices->devices;
        /*
@@ -1695,7 +1690,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
 
        if (seeding_dev) {
                sb->s_flags &= ~MS_RDONLY;
-               ret = btrfs_prepare_sprout(trans, root);
+               ret = btrfs_prepare_sprout(root);
                BUG_ON(ret);
        }
 
@@ -1757,8 +1752,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                ret = btrfs_relocate_sys_chunks(root);
                BUG_ON(ret);
        }
-out:
-       mutex_unlock(&root->fs_info->volume_mutex);
+
        return ret;
 error:
        blkdev_put(bdev, FMODE_EXCL);
@@ -1766,7 +1760,7 @@ error:
                mutex_unlock(&uuid_mutex);
                up_write(&sb->s_umount);
        }
-       goto out;
+       return ret;
 }
 
 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
@@ -2077,6 +2071,362 @@ error:
        return ret;
 }
 
+static int insert_balance_item(struct btrfs_root *root,
+                              struct btrfs_balance_control *bctl)
+{
+       struct btrfs_trans_handle *trans;
+       struct btrfs_balance_item *item;
+       struct btrfs_disk_balance_args disk_bargs;
+       struct btrfs_path *path;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+       int ret, err;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans)) {
+               btrfs_free_path(path);
+               return PTR_ERR(trans);
+       }
+
+       key.objectid = BTRFS_BALANCE_OBJECTID;
+       key.type = BTRFS_BALANCE_ITEM_KEY;
+       key.offset = 0;
+
+       ret = btrfs_insert_empty_item(trans, root, path, &key,
+                                     sizeof(*item));
+       if (ret)
+               goto out;
+
+       leaf = path->nodes[0];
+       item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
+
+       memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
+
+       btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
+       btrfs_set_balance_data(leaf, item, &disk_bargs);
+       btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
+       btrfs_set_balance_meta(leaf, item, &disk_bargs);
+       btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
+       btrfs_set_balance_sys(leaf, item, &disk_bargs);
+
+       btrfs_set_balance_flags(leaf, item, bctl->flags);
+
+       btrfs_mark_buffer_dirty(leaf);
+out:
+       btrfs_free_path(path);
+       err = btrfs_commit_transaction(trans, root);
+       if (err && !ret)
+               ret = err;
+       return ret;
+}
+
+static int del_balance_item(struct btrfs_root *root)
+{
+       struct btrfs_trans_handle *trans;
+       struct btrfs_path *path;
+       struct btrfs_key key;
+       int ret, err;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       trans = btrfs_start_transaction(root, 0);
+       if (IS_ERR(trans)) {
+               btrfs_free_path(path);
+               return PTR_ERR(trans);
+       }
+
+       key.objectid = BTRFS_BALANCE_OBJECTID;
+       key.type = BTRFS_BALANCE_ITEM_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
+       if (ret < 0)
+               goto out;
+       if (ret > 0) {
+               ret = -ENOENT;
+               goto out;
+       }
+
+       ret = btrfs_del_item(trans, root, path);
+out:
+       btrfs_free_path(path);
+       err = btrfs_commit_transaction(trans, root);
+       if (err && !ret)
+               ret = err;
+       return ret;
+}
+
+/*
+ * This is a heuristic used to reduce the number of chunks balanced on
+ * resume after balance was interrupted.
+ */
+static void update_balance_args(struct btrfs_balance_control *bctl)
+{
+       /*
+        * Turn on soft mode for chunk types that were being converted.
+        */
+       if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
+               bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
+       if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
+               bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
+       if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
+               bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
+
+       /*
+        * Turn on usage filter if is not already used.  The idea is
+        * that chunks that we have already balanced should be
+        * reasonably full.  Don't do it for chunks that are being
+        * converted - that will keep us from relocating unconverted
+        * (albeit full) chunks.
+        */
+       if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
+           !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
+               bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
+               bctl->data.usage = 90;
+       }
+       if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
+           !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
+               bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
+               bctl->sys.usage = 90;
+       }
+       if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
+           !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
+               bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
+               bctl->meta.usage = 90;
+       }
+}
+
+/*
+ * Should be called with both balance and volume mutexes held to
+ * serialize other volume operations (add_dev/rm_dev/resize) with
+ * restriper.  Same goes for unset_balance_control.
+ */
+static void set_balance_control(struct btrfs_balance_control *bctl)
+{
+       struct btrfs_fs_info *fs_info = bctl->fs_info;
+
+       BUG_ON(fs_info->balance_ctl);
+
+       spin_lock(&fs_info->balance_lock);
+       fs_info->balance_ctl = bctl;
+       spin_unlock(&fs_info->balance_lock);
+}
+
+static void unset_balance_control(struct btrfs_fs_info *fs_info)
+{
+       struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+
+       BUG_ON(!fs_info->balance_ctl);
+
+       spin_lock(&fs_info->balance_lock);
+       fs_info->balance_ctl = NULL;
+       spin_unlock(&fs_info->balance_lock);
+
+       kfree(bctl);
+}
+
+/*
+ * Balance filters.  Return 1 if chunk should be filtered out
+ * (should not be balanced).
+ */
+static int chunk_profiles_filter(u64 chunk_profile,
+                                struct btrfs_balance_args *bargs)
+{
+       chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+       if (chunk_profile == 0)
+               chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+       if (bargs->profiles & chunk_profile)
+               return 0;
+
+       return 1;
+}
+
+static u64 div_factor_fine(u64 num, int factor)
+{
+       if (factor <= 0)
+               return 0;
+       if (factor >= 100)
+               return num;
+
+       num *= factor;
+       do_div(num, 100);
+       return num;
+}
+
+static int chunk_usage_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
+                             struct btrfs_balance_args *bargs)
+{
+       struct btrfs_block_group_cache *cache;
+       u64 chunk_used, user_thresh;
+       int ret = 1;
+
+       cache = btrfs_lookup_block_group(fs_info, chunk_offset);
+       chunk_used = btrfs_block_group_used(&cache->item);
+
+       user_thresh = div_factor_fine(cache->key.offset, bargs->usage);
+       if (chunk_used < user_thresh)
+               ret = 0;
+
+       btrfs_put_block_group(cache);
+       return ret;
+}
+
+static int chunk_devid_filter(struct extent_buffer *leaf,
+                             struct btrfs_chunk *chunk,
+                             struct btrfs_balance_args *bargs)
+{
+       struct btrfs_stripe *stripe;
+       int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+       int i;
+
+       for (i = 0; i < num_stripes; i++) {
+               stripe = btrfs_stripe_nr(chunk, i);
+               if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
+                       return 0;
+       }
+
+       return 1;
+}
+
+/* [pstart, pend) */
+static int chunk_drange_filter(struct extent_buffer *leaf,
+                              struct btrfs_chunk *chunk,
+                              u64 chunk_offset,
+                              struct btrfs_balance_args *bargs)
+{
+       struct btrfs_stripe *stripe;
+       int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
+       u64 stripe_offset;
+       u64 stripe_length;
+       int factor;
+       int i;
+
+       if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
+               return 0;
+
+       if (btrfs_chunk_type(leaf, chunk) & (BTRFS_BLOCK_GROUP_DUP |
+            BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10))
+               factor = 2;
+       else
+               factor = 1;
+       factor = num_stripes / factor;
+
+       for (i = 0; i < num_stripes; i++) {
+               stripe = btrfs_stripe_nr(chunk, i);
+               if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
+                       continue;
+
+               stripe_offset = btrfs_stripe_offset(leaf, stripe);
+               stripe_length = btrfs_chunk_length(leaf, chunk);
+               do_div(stripe_length, factor);
+
+               if (stripe_offset < bargs->pend &&
+                   stripe_offset + stripe_length > bargs->pstart)
+                       return 0;
+       }
+
+       return 1;
+}
+
+/* [vstart, vend) */
+static int chunk_vrange_filter(struct extent_buffer *leaf,
+                              struct btrfs_chunk *chunk,
+                              u64 chunk_offset,
+                              struct btrfs_balance_args *bargs)
+{
+       if (chunk_offset < bargs->vend &&
+           chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
+               /* at least part of the chunk is inside this vrange */
+               return 0;
+
+       return 1;
+}
+
+static int chunk_soft_convert_filter(u64 chunk_profile,
+                                    struct btrfs_balance_args *bargs)
+{
+       if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
+               return 0;
+
+       chunk_profile &= BTRFS_BLOCK_GROUP_PROFILE_MASK;
+
+       if (chunk_profile == 0)
+               chunk_profile = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+
+       if (bargs->target & chunk_profile)
+               return 1;
+
+       return 0;
+}
+
+static int should_balance_chunk(struct btrfs_root *root,
+                               struct extent_buffer *leaf,
+                               struct btrfs_chunk *chunk, u64 chunk_offset)
+{
+       struct btrfs_balance_control *bctl = root->fs_info->balance_ctl;
+       struct btrfs_balance_args *bargs = NULL;
+       u64 chunk_type = btrfs_chunk_type(leaf, chunk);
+
+       /* type filter */
+       if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
+             (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
+               return 0;
+       }
+
+       if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
+               bargs = &bctl->data;
+       else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
+               bargs = &bctl->sys;
+       else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
+               bargs = &bctl->meta;
+
+       /* profiles filter */
+       if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
+           chunk_profiles_filter(chunk_type, bargs)) {
+               return 0;
+       }
+
+       /* usage filter */
+       if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
+           chunk_usage_filter(bctl->fs_info, chunk_offset, bargs)) {
+               return 0;
+       }
+
+       /* devid filter */
+       if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
+           chunk_devid_filter(leaf, chunk, bargs)) {
+               return 0;
+       }
+
+       /* drange filter, makes sense only with devid filter */
+       if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
+           chunk_drange_filter(leaf, chunk, chunk_offset, bargs)) {
+               return 0;
+       }
+
+       /* vrange filter */
+       if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
+           chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
+               return 0;
+       }
+
+       /* soft profile changing mode */
+       if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
+           chunk_soft_convert_filter(chunk_type, bargs)) {
+               return 0;
+       }
+
+       return 1;
+}
+
 static u64 div_factor(u64 num, int factor)
 {
        if (factor == 10)
@@ -2086,29 +2436,28 @@ static u64 div_factor(u64 num, int factor)
        return num;
 }
 
-int btrfs_balance(struct btrfs_root *dev_root)
+static int __btrfs_balance(struct btrfs_fs_info *fs_info)
 {
-       int ret;
-       struct list_head *devices = &dev_root->fs_info->fs_devices->devices;
+       struct btrfs_balance_control *bctl = fs_info->balance_ctl;
+       struct btrfs_root *chunk_root = fs_info->chunk_root;
+       struct btrfs_root *dev_root = fs_info->dev_root;
+       struct list_head *devices;
        struct btrfs_device *device;
        u64 old_size;
        u64 size_to_free;
+       struct btrfs_chunk *chunk;
        struct btrfs_path *path;
        struct btrfs_key key;
-       struct btrfs_root *chunk_root = dev_root->fs_info->chunk_root;
-       struct btrfs_trans_handle *trans;
        struct btrfs_key found_key;
-
-       if (dev_root->fs_info->sb->s_flags & MS_RDONLY)
-               return -EROFS;
-
-       if (!capable(CAP_SYS_ADMIN))
-               return -EPERM;
-
-       mutex_lock(&dev_root->fs_info->volume_mutex);
-       dev_root = dev_root->fs_info->dev_root;
+       struct btrfs_trans_handle *trans;
+       struct extent_buffer *leaf;
+       int slot;
+       int ret;
+       int enospc_errors = 0;
+       bool counting = true;
 
        /* step one make some room on all the devices */
+       devices = &fs_info->fs_devices->devices;
        list_for_each_entry(device, devices, dev_list) {
                old_size = device->total_bytes;
                size_to_free = div_factor(old_size, 1);
@@ -2137,11 +2486,23 @@ int btrfs_balance(struct btrfs_root *dev_root)
                ret = -ENOMEM;
                goto error;
        }
+
+       /* zero out stat counters */
+       spin_lock(&fs_info->balance_lock);
+       memset(&bctl->stat, 0, sizeof(bctl->stat));
+       spin_unlock(&fs_info->balance_lock);
+again:
        key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
        key.offset = (u64)-1;
        key.type = BTRFS_CHUNK_ITEM_KEY;
 
        while (1) {
+               if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
+                   atomic_read(&fs_info->balance_cancel_req)) {
+                       ret = -ECANCELED;
+                       goto error;
+               }
+
                ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
                if (ret < 0)
                        goto error;
@@ -2151,15 +2512,19 @@ int btrfs_balance(struct btrfs_root *dev_root)
                 * failed
                 */
                if (ret == 0)
-                       break;
+                       BUG(); /* FIXME break ? */
 
                ret = btrfs_previous_item(chunk_root, path, 0,
                                          BTRFS_CHUNK_ITEM_KEY);
-               if (ret)
+               if (ret) {
+                       ret = 0;
                        break;
+               }
+
+               leaf = path->nodes[0];
+               slot = path->slots[0];
+               btrfs_item_key_to_cpu(leaf, &found_key, slot);
 
-               btrfs_item_key_to_cpu(path->nodes[0], &found_key,
-                                     path->slots[0]);
                if (found_key.objectid != key.objectid)
                        break;
 
@@ -2167,22 +2532,375 @@ int btrfs_balance(struct btrfs_root *dev_root)
                if (found_key.offset == 0)
                        break;
 
+               chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
+
+               if (!counting) {
+                       spin_lock(&fs_info->balance_lock);
+                       bctl->stat.considered++;
+                       spin_unlock(&fs_info->balance_lock);
+               }
+
+               ret = should_balance_chunk(chunk_root, leaf, chunk,
+                                          found_key.offset);
                btrfs_release_path(path);
+               if (!ret)
+                       goto loop;
+
+               if (counting) {
+                       spin_lock(&fs_info->balance_lock);
+                       bctl->stat.expected++;
+                       spin_unlock(&fs_info->balance_lock);
+                       goto loop;
+               }
+
                ret = btrfs_relocate_chunk(chunk_root,
                                           chunk_root->root_key.objectid,
                                           found_key.objectid,
                                           found_key.offset);
                if (ret && ret != -ENOSPC)
                        goto error;
+               if (ret == -ENOSPC) {
+                       enospc_errors++;
+               } else {
+                       spin_lock(&fs_info->balance_lock);
+                       bctl->stat.completed++;
+                       spin_unlock(&fs_info->balance_lock);
+               }
+loop:
                key.offset = found_key.offset - 1;
        }
-       ret = 0;
+
+       if (counting) {
+               btrfs_release_path(path);
+               counting = false;
+               goto again;
+       }
 error:
        btrfs_free_path(path);
-       mutex_unlock(&dev_root->fs_info->volume_mutex);
+       if (enospc_errors) {
+               printk(KERN_INFO "btrfs: %d enospc errors during balance\n",
+                      enospc_errors);
+               if (!ret)
+                       ret = -ENOSPC;
+       }
+
        return ret;
 }
 
+static inline int balance_need_close(struct btrfs_fs_info *fs_info)
+{
+       /* cancel requested || normal exit path */
+       return atomic_read(&fs_info->balance_cancel_req) ||
+               (atomic_read(&fs_info->balance_pause_req) == 0 &&
+                atomic_read(&fs_info->balance_cancel_req) == 0);
+}
+
+static void __cancel_balance(struct btrfs_fs_info *fs_info)
+{
+       int ret;
+
+       unset_balance_control(fs_info);
+       ret = del_balance_item(fs_info->tree_root);
+       BUG_ON(ret);
+}
+
+void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
+                              struct btrfs_ioctl_balance_args *bargs);
+
+/*
+ * Should be called with both balance and volume mutexes held
+ */
+int btrfs_balance(struct btrfs_balance_control *bctl,
+                 struct btrfs_ioctl_balance_args *bargs)
+{
+       struct btrfs_fs_info *fs_info = bctl->fs_info;
+       u64 allowed;
+       int ret;
+
+       if (btrfs_fs_closing(fs_info) ||
+           atomic_read(&fs_info->balance_pause_req) ||
+           atomic_read(&fs_info->balance_cancel_req)) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /*
+        * In case of mixed groups both data and meta should be picked,
+        * and identical options should be given for both of them.
+        */
+       allowed = btrfs_super_incompat_flags(fs_info->super_copy);
+       if ((allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS) &&
+           (bctl->flags & (BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA))) {
+               if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
+                   !(bctl->flags & BTRFS_BALANCE_METADATA) ||
+                   memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
+                       printk(KERN_ERR "btrfs: with mixed groups data and "
+                              "metadata balance options must be the same\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+       /*
+        * Profile changing sanity checks.  Skip them if a simple
+        * balance is requested.
+        */
+       if (!((bctl->data.flags | bctl->sys.flags | bctl->meta.flags) &
+             BTRFS_BALANCE_ARGS_CONVERT))
+               goto do_balance;
+
+       allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
+       if (fs_info->fs_devices->num_devices == 1)
+               allowed |= BTRFS_BLOCK_GROUP_DUP;
+       else if (fs_info->fs_devices->num_devices < 4)
+               allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1);
+       else
+               allowed |= (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID1 |
+                               BTRFS_BLOCK_GROUP_RAID10);
+
+       if (!profile_is_valid(bctl->data.target, 1) ||
+           bctl->data.target & ~allowed) {
+               printk(KERN_ERR "btrfs: unable to start balance with target "
+                      "data profile %llu\n",
+                      (unsigned long long)bctl->data.target);
+               ret = -EINVAL;
+               goto out;
+       }
+       if (!profile_is_valid(bctl->meta.target, 1) ||
+           bctl->meta.target & ~allowed) {
+               printk(KERN_ERR "btrfs: unable to start balance with target "
+                      "metadata profile %llu\n",
+                      (unsigned long long)bctl->meta.target);
+               ret = -EINVAL;
+               goto out;
+       }
+       if (!profile_is_valid(bctl->sys.target, 1) ||
+           bctl->sys.target & ~allowed) {
+               printk(KERN_ERR "btrfs: unable to start balance with target "
+                      "system profile %llu\n",
+                      (unsigned long long)bctl->sys.target);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       if (bctl->data.target & BTRFS_BLOCK_GROUP_DUP) {
+               printk(KERN_ERR "btrfs: dup for data is not allowed\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* allow to reduce meta or sys integrity only if force set */
+       allowed = BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
+                       BTRFS_BLOCK_GROUP_RAID10;
+       if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+            (fs_info->avail_system_alloc_bits & allowed) &&
+            !(bctl->sys.target & allowed)) ||
+           ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
+            (fs_info->avail_metadata_alloc_bits & allowed) &&
+            !(bctl->meta.target & allowed))) {
+               if (bctl->flags & BTRFS_BALANCE_FORCE) {
+                       printk(KERN_INFO "btrfs: force reducing metadata "
+                              "integrity\n");
+               } else {
+                       printk(KERN_ERR "btrfs: balance will reduce metadata "
+                              "integrity, use force if you want this\n");
+                       ret = -EINVAL;
+                       goto out;
+               }
+       }
+
+do_balance:
+       ret = insert_balance_item(fs_info->tree_root, bctl);
+       if (ret && ret != -EEXIST)
+               goto out;
+
+       if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
+               BUG_ON(ret == -EEXIST);
+               set_balance_control(bctl);
+       } else {
+               BUG_ON(ret != -EEXIST);
+               spin_lock(&fs_info->balance_lock);
+               update_balance_args(bctl);
+               spin_unlock(&fs_info->balance_lock);
+       }
+
+       atomic_inc(&fs_info->balance_running);
+       mutex_unlock(&fs_info->balance_mutex);
+
+       ret = __btrfs_balance(fs_info);
+
+       mutex_lock(&fs_info->balance_mutex);
+       atomic_dec(&fs_info->balance_running);
+
+       if (bargs) {
+               memset(bargs, 0, sizeof(*bargs));
+               update_ioctl_balance_args(fs_info, 0, bargs);
+       }
+
+       if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
+           balance_need_close(fs_info)) {
+               __cancel_balance(fs_info);
+       }
+
+       wake_up(&fs_info->balance_wait_q);
+
+       return ret;
+out:
+       if (bctl->flags & BTRFS_BALANCE_RESUME)
+               __cancel_balance(fs_info);
+       else
+               kfree(bctl);
+       return ret;
+}
+
+static int balance_kthread(void *data)
+{
+       struct btrfs_balance_control *bctl =
+                       (struct btrfs_balance_control *)data;
+       struct btrfs_fs_info *fs_info = bctl->fs_info;
+       int ret = 0;
+
+       mutex_lock(&fs_info->volume_mutex);
+       mutex_lock(&fs_info->balance_mutex);
+
+       set_balance_control(bctl);
+
+       if (btrfs_test_opt(fs_info->tree_root, SKIP_BALANCE)) {
+               printk(KERN_INFO "btrfs: force skipping balance\n");
+       } else {
+               printk(KERN_INFO "btrfs: continuing balance\n");
+               ret = btrfs_balance(bctl, NULL);
+       }
+
+       mutex_unlock(&fs_info->balance_mutex);
+       mutex_unlock(&fs_info->volume_mutex);
+       return ret;
+}
+
+int btrfs_recover_balance(struct btrfs_root *tree_root)
+{
+       struct task_struct *tsk;
+       struct btrfs_balance_control *bctl;
+       struct btrfs_balance_item *item;
+       struct btrfs_disk_balance_args disk_bargs;
+       struct btrfs_path *path;
+       struct extent_buffer *leaf;
+       struct btrfs_key key;
+       int ret;
+
+       path = btrfs_alloc_path();
+       if (!path)
+               return -ENOMEM;
+
+       bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
+       if (!bctl) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
+       key.objectid = BTRFS_BALANCE_OBJECTID;
+       key.type = BTRFS_BALANCE_ITEM_KEY;
+       key.offset = 0;
+
+       ret = btrfs_search_slot(NULL, tree_root, &key, path, 0, 0);
+       if (ret < 0)
+               goto out_bctl;
+       if (ret > 0) { /* ret = -ENOENT; */
+               ret = 0;
+               goto out_bctl;
+       }
+
+       leaf = path->nodes[0];
+       item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
+
+       bctl->fs_info = tree_root->fs_info;
+       bctl->flags = btrfs_balance_flags(leaf, item) | BTRFS_BALANCE_RESUME;
+
+       btrfs_balance_data(leaf, item, &disk_bargs);
+       btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
+       btrfs_balance_meta(leaf, item, &disk_bargs);
+       btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
+       btrfs_balance_sys(leaf, item, &disk_bargs);
+       btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
+
+       tsk = kthread_run(balance_kthread, bctl, "btrfs-balance");
+       if (IS_ERR(tsk))
+               ret = PTR_ERR(tsk);
+       else
+               goto out;
+
+out_bctl:
+       kfree(bctl);
+out:
+       btrfs_free_path(path);
+       return ret;
+}
+
+int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
+{
+       int ret = 0;
+
+       mutex_lock(&fs_info->balance_mutex);
+       if (!fs_info->balance_ctl) {
+               mutex_unlock(&fs_info->balance_mutex);
+               return -ENOTCONN;
+       }
+
+       if (atomic_read(&fs_info->balance_running)) {
+               atomic_inc(&fs_info->balance_pause_req);
+               mutex_unlock(&fs_info->balance_mutex);
+
+               wait_event(fs_info->balance_wait_q,
+                          atomic_read(&fs_info->balance_running) == 0);
+
+               mutex_lock(&fs_info->balance_mutex);
+               /* we are good with balance_ctl ripped off from under us */
+               BUG_ON(atomic_read(&fs_info->balance_running));
+               atomic_dec(&fs_info->balance_pause_req);
+       } else {
+               ret = -ENOTCONN;
+       }
+
+       mutex_unlock(&fs_info->balance_mutex);
+       return ret;
+}
+
+int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
+{
+       mutex_lock(&fs_info->balance_mutex);
+       if (!fs_info->balance_ctl) {
+               mutex_unlock(&fs_info->balance_mutex);
+               return -ENOTCONN;
+       }
+
+       atomic_inc(&fs_info->balance_cancel_req);
+       /*
+        * if we are running just wait and return, balance item is
+        * deleted in btrfs_balance in this case
+        */
+       if (atomic_read(&fs_info->balance_running)) {
+               mutex_unlock(&fs_info->balance_mutex);
+               wait_event(fs_info->balance_wait_q,
+                          atomic_read(&fs_info->balance_running) == 0);
+               mutex_lock(&fs_info->balance_mutex);
+       } else {
+               /* __cancel_balance needs volume_mutex */
+               mutex_unlock(&fs_info->balance_mutex);
+               mutex_lock(&fs_info->volume_mutex);
+               mutex_lock(&fs_info->balance_mutex);
+
+               if (fs_info->balance_ctl)
+                       __cancel_balance(fs_info);
+
+               mutex_unlock(&fs_info->volume_mutex);
+       }
+
+       BUG_ON(fs_info->balance_ctl || atomic_read(&fs_info->balance_running));
+       atomic_dec(&fs_info->balance_cancel_req);
+       mutex_unlock(&fs_info->balance_mutex);
+       return 0;
+}
+
 /*
  * shrinking a device means finding all of the device extents past
  * the new size, and then following the back refs to the chunks.
@@ -2323,8 +3041,7 @@ done:
        return ret;
 }
 
-static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans,
-                          struct btrfs_root *root,
+static int btrfs_add_system_chunk(struct btrfs_root *root,
                           struct btrfs_key *key,
                           struct btrfs_chunk *chunk, int item_size)
 {
@@ -2441,10 +3158,14 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                max_stripe_size = 1024 * 1024 * 1024;
                max_chunk_size = 10 * max_stripe_size;
        } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
-               max_stripe_size = 256 * 1024 * 1024;
+               /* for larger filesystems, use larger metadata chunks */
+               if (fs_devices->total_rw_bytes > 50ULL * 1024 * 1024 * 1024)
+                       max_stripe_size = 1024 * 1024 * 1024;
+               else
+                       max_stripe_size = 256 * 1024 * 1024;
                max_chunk_size = max_stripe_size;
        } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
-               max_stripe_size = 8 * 1024 * 1024;
+               max_stripe_size = 32 * 1024 * 1024;
                max_chunk_size = 2 * max_stripe_size;
        } else {
                printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n",
@@ -2496,7 +3217,7 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
                if (total_avail == 0)
                        continue;
 
-               ret = find_free_dev_extent(trans, device,
+               ret = find_free_dev_extent(device,
                                           max_stripe_size * dev_stripes,
                                           &dev_offset, &max_avail);
                if (ret && ret != -ENOSPC)
@@ -2687,7 +3408,7 @@ static int __finish_chunk_alloc(struct btrfs_trans_handle *trans,
        BUG_ON(ret);
 
        if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
-               ret = btrfs_add_system_chunk(trans, chunk_root, &key, chunk,
+               ret = btrfs_add_system_chunk(chunk_root, &key, chunk,
                                             item_size);
                BUG_ON(ret);
        }
@@ -2752,8 +3473,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
                return ret;
 
        alloc_profile = BTRFS_BLOCK_GROUP_METADATA |
-                       (fs_info->metadata_alloc_profile &
-                        fs_info->avail_metadata_alloc_bits);
+                               fs_info->avail_metadata_alloc_bits;
        alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
 
        ret = __btrfs_alloc_chunk(trans, extent_root, &map, &chunk_size,
@@ -2763,8 +3483,7 @@ static noinline int init_first_rw_device(struct btrfs_trans_handle *trans,
        sys_chunk_offset = chunk_offset + chunk_size;
 
        alloc_profile = BTRFS_BLOCK_GROUP_SYSTEM |
-                       (fs_info->system_alloc_profile &
-                        fs_info->avail_system_alloc_bits);
+                               fs_info->avail_system_alloc_bits;
        alloc_profile = btrfs_reduce_alloc_profile(root, alloc_profile);
 
        ret = __btrfs_alloc_chunk(trans, extent_root, &sys_map,
@@ -2901,26 +3620,13 @@ static int __btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
        u64 stripe_nr;
        u64 stripe_nr_orig;
        u64 stripe_nr_end;
-       int stripes_allocated = 8;
-       int stripes_required = 1;
        int stripe_index;
        int i;
+       int ret = 0;
        int num_stripes;
        int max_errors = 0;
        struct btrfs_bio *bbio = NULL;
 
-       if (bbio_ret && !(rw & (REQ_WRITE | REQ_DISCARD)))
-               stripes_allocated = 1;
-again:
-       if (bbio_ret) {
-               bbio = kzalloc(btrfs_bio_size(stripes_allocated),
-                               GFP_NOFS);
-               if (!bbio)
-                       return -ENOMEM;
-
-               atomic_set(&bbio->error, 0);
-       }
-
        read_lock(&em_tree->lock);
        em = lookup_extent_mapping(em_tree, logical, *length);
        read_unlock(&em_tree->lock);
@@ -2939,32 +3645,6 @@ again:
        if (mirror_num > map->num_stripes)
                mirror_num = 0;
 
-       /* if our btrfs_bio struct is too small, back off and try again */
-       if (rw & REQ_WRITE) {
-               if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
-                                BTRFS_BLOCK_GROUP_DUP)) {
-                       stripes_required = map->num_stripes;
-                       max_errors = 1;
-               } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
-                       stripes_required = map->sub_stripes;
-                       max_errors = 1;
-               }
-       }
-       if (rw & REQ_DISCARD) {
-               if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
-                                BTRFS_BLOCK_GROUP_RAID1 |
-                                BTRFS_BLOCK_GROUP_DUP |
-                                BTRFS_BLOCK_GROUP_RAID10)) {
-                       stripes_required = map->num_stripes;
-               }
-       }
-       if (bbio_ret && (rw & (REQ_WRITE | REQ_DISCARD)) &&
-           stripes_allocated < stripes_required) {
-               stripes_allocated = map->num_stripes;
-               free_extent_map(em);
-               kfree(bbio);
-               goto again;
-       }
        stripe_nr = offset;
        /*
         * stripe_nr counts the total number of stripes we have to stride
@@ -2980,10 +3660,7 @@ again:
 
        if (rw & REQ_DISCARD)
                *length = min_t(u64, em->len - offset, *length);
-       else if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
-                             BTRFS_BLOCK_GROUP_RAID1 |
-                             BTRFS_BLOCK_GROUP_RAID10 |
-                             BTRFS_BLOCK_GROUP_DUP)) {
+       else if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
                /* we limit the length of each bio to what fits in a stripe */
                *length = min_t(u64, em->len - offset,
                                map->stripe_len - stripe_offset);
@@ -3059,81 +3736,55 @@ again:
        }
        BUG_ON(stripe_index >= map->num_stripes);
 
+       bbio = kzalloc(btrfs_bio_size(num_stripes), GFP_NOFS);
+       if (!bbio) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       atomic_set(&bbio->error, 0);
+
        if (rw & REQ_DISCARD) {
+               int factor = 0;
+               int sub_stripes = 0;
+               u64 stripes_per_dev = 0;
+               u32 remaining_stripes = 0;
+
+               if (map->type &
+                   (BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
+                       if (map->type & BTRFS_BLOCK_GROUP_RAID0)
+                               sub_stripes = 1;
+                       else
+                               sub_stripes = map->sub_stripes;
+
+                       factor = map->num_stripes / sub_stripes;
+                       stripes_per_dev = div_u64_rem(stripe_nr_end -
+                                                     stripe_nr_orig,
+                                                     factor,
+                                                     &remaining_stripes);
+               }
+
                for (i = 0; i < num_stripes; i++) {
                        bbio->stripes[i].physical =
                                map->stripes[stripe_index].physical +
                                stripe_offset + stripe_nr * map->stripe_len;
                        bbio->stripes[i].dev = map->stripes[stripe_index].dev;
 
-                       if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
-                               u64 stripes;
-                               u32 last_stripe = 0;
-                               int j;
-
-                               div_u64_rem(stripe_nr_end - 1,
-                                           map->num_stripes,
-                                           &last_stripe);
-
-                               for (j = 0; j < map->num_stripes; j++) {
-                                       u32 test;
-
-                                       div_u64_rem(stripe_nr_end - 1 - j,
-                                                   map->num_stripes, &test);
-                                       if (test == stripe_index)
-                                               break;
-                               }
-                               stripes = stripe_nr_end - 1 - j;
-                               do_div(stripes, map->num_stripes);
-                               bbio->stripes[i].length = map->stripe_len *
-                                       (stripes - stripe_nr + 1);
-
-                               if (i == 0) {
-                                       bbio->stripes[i].length -=
-                                               stripe_offset;
-                                       stripe_offset = 0;
-                               }
-                               if (stripe_index == last_stripe)
-                                       bbio->stripes[i].length -=
-                                               stripe_end_offset;
-                       } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
-                               u64 stripes;
-                               int j;
-                               int factor = map->num_stripes /
-                                            map->sub_stripes;
-                               u32 last_stripe = 0;
-
-                               div_u64_rem(stripe_nr_end - 1,
-                                           factor, &last_stripe);
-                               last_stripe *= map->sub_stripes;
-
-                               for (j = 0; j < factor; j++) {
-                                       u32 test;
-
-                                       div_u64_rem(stripe_nr_end - 1 - j,
-                                                   factor, &test);
-
-                                       if (test ==
-                                           stripe_index / map->sub_stripes)
-                                               break;
-                               }
-                               stripes = stripe_nr_end - 1 - j;
-                               do_div(stripes, factor);
-                               bbio->stripes[i].length = map->stripe_len *
-                                       (stripes - stripe_nr + 1);
-
-                               if (i < map->sub_stripes) {
+                       if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
+                                        BTRFS_BLOCK_GROUP_RAID10)) {
+                               bbio->stripes[i].length = stripes_per_dev *
+                                                         map->stripe_len;
+                               if (i / sub_stripes < remaining_stripes)
+                                       bbio->stripes[i].length +=
+                                               map->stripe_len;
+                               if (i < sub_stripes)
                                        bbio->stripes[i].length -=
                                                stripe_offset;
-                                       if (i == map->sub_stripes - 1)
-                                               stripe_offset = 0;
-                               }
-                               if (stripe_index >= last_stripe &&
-                                   stripe_index <= (last_stripe +
-                                                    map->sub_stripes - 1)) {
+                               if ((i / sub_stripes + 1) %
+                                   sub_stripes == remaining_stripes)
                                        bbio->stripes[i].length -=
                                                stripe_end_offset;
-                               }
+                               if (i == sub_stripes - 1)
+                                       stripe_offset = 0;
                        } else
                                bbio->stripes[i].length = *length;
 
@@ -3155,15 +3806,22 @@ again:
                        stripe_index++;
                }
        }
-       if (bbio_ret) {
-               *bbio_ret = bbio;
-               bbio->num_stripes = num_stripes;
-               bbio->max_errors = max_errors;
-               bbio->mirror_num = mirror_num;
+
+       if (rw & REQ_WRITE) {
+               if (map->type & (BTRFS_BLOCK_GROUP_RAID1 |
+                                BTRFS_BLOCK_GROUP_RAID10 |
+                                BTRFS_BLOCK_GROUP_DUP)) {
+                       max_errors = 1;
+               }
        }
+
+       *bbio_ret = bbio;
+       bbio->num_stripes = num_stripes;
+       bbio->max_errors = max_errors;
+       bbio->mirror_num = mirror_num;
 out:
        free_extent_map(em);
-       return 0;
+       return ret;
 }
 
 int btrfs_map_block(struct btrfs_mapping_tree *map_tree, int rw,
@@ -3304,7 +3962,7 @@ static noinline int schedule_bio(struct btrfs_root *root,
        /* don't bother with additional async steps for reads, right now */
        if (!(rw & REQ_WRITE)) {
                bio_get(bio);
-               submit_bio(rw, bio);
+               btrfsic_submit_bio(rw, bio);
                bio_put(bio);
                return 0;
        }
@@ -3399,7 +4057,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                        if (async_submit)
                                schedule_bio(root, dev, rw, bio);
                        else
-                               submit_bio(rw, bio);
+                               btrfsic_submit_bio(rw, bio);
                } else {
                        bio->bi_bdev = root->fs_info->fs_devices->latest_bdev;
                        bio->bi_sector = logical >> 9;
@@ -3568,7 +4226,7 @@ static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
        struct btrfs_fs_devices *fs_devices;
        int ret;
 
-       mutex_lock(&uuid_mutex);
+       BUG_ON(!mutex_is_locked(&uuid_mutex));
 
        fs_devices = root->fs_info->fs_devices->seed;
        while (fs_devices) {
@@ -3606,7 +4264,6 @@ static int open_seed_devices(struct btrfs_root *root, u8 *fsid)
        fs_devices->seed = root->fs_info->fs_devices->seed;
        root->fs_info->fs_devices->seed = fs_devices;
 out:
-       mutex_unlock(&uuid_mutex);
        return ret;
 }
 
@@ -3749,6 +4406,9 @@ int btrfs_read_chunk_tree(struct btrfs_root *root)
        if (!path)
                return -ENOMEM;
 
+       mutex_lock(&uuid_mutex);
+       lock_chunks(root);
+
        /* first we search for all of the device items, and then we
         * read in all of the chunk items.  This way we can create chunk
         * mappings that reference all of the devices that are afound
@@ -3799,6 +4459,9 @@ again:
        }
        ret = 0;
 error:
+       unlock_chunks(root);
+       mutex_unlock(&uuid_mutex);
+
        btrfs_free_path(path);
        return ret;
 }
index 78f2d4d4f37fe81317395688a8b090b71e53a612..19ac95048b88596e44b6dd667b050fb796ab20e7 100644 (file)
@@ -186,6 +186,51 @@ struct map_lookup {
 #define map_lookup_size(n) (sizeof(struct map_lookup) + \
                            (sizeof(struct btrfs_bio_stripe) * (n)))
 
+/*
+ * Restriper's general type filter
+ */
+#define BTRFS_BALANCE_DATA             (1ULL << 0)
+#define BTRFS_BALANCE_SYSTEM           (1ULL << 1)
+#define BTRFS_BALANCE_METADATA         (1ULL << 2)
+
+#define BTRFS_BALANCE_TYPE_MASK                (BTRFS_BALANCE_DATA |       \
+                                        BTRFS_BALANCE_SYSTEM |     \
+                                        BTRFS_BALANCE_METADATA)
+
+#define BTRFS_BALANCE_FORCE            (1ULL << 3)
+#define BTRFS_BALANCE_RESUME           (1ULL << 4)
+
+/*
+ * Balance filters
+ */
+#define BTRFS_BALANCE_ARGS_PROFILES    (1ULL << 0)
+#define BTRFS_BALANCE_ARGS_USAGE       (1ULL << 1)
+#define BTRFS_BALANCE_ARGS_DEVID       (1ULL << 2)
+#define BTRFS_BALANCE_ARGS_DRANGE      (1ULL << 3)
+#define BTRFS_BALANCE_ARGS_VRANGE      (1ULL << 4)
+
+/*
+ * Profile changing flags.  When SOFT is set we won't relocate chunk if
+ * it already has the target profile (even though it may be
+ * half-filled).
+ */
+#define BTRFS_BALANCE_ARGS_CONVERT     (1ULL << 8)
+#define BTRFS_BALANCE_ARGS_SOFT                (1ULL << 9)
+
+struct btrfs_balance_args;
+struct btrfs_balance_progress;
+struct btrfs_balance_control {
+       struct btrfs_fs_info *fs_info;
+
+       struct btrfs_balance_args data;
+       struct btrfs_balance_args meta;
+       struct btrfs_balance_args sys;
+
+       u64 flags;
+
+       struct btrfs_balance_progress stat;
+};
+
 int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
                                   u64 end, u64 *length);
 
@@ -228,9 +273,12 @@ struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid,
                                       u8 *uuid, u8 *fsid);
 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size);
 int btrfs_init_new_device(struct btrfs_root *root, char *path);
-int btrfs_balance(struct btrfs_root *dev_root);
+int btrfs_balance(struct btrfs_balance_control *bctl,
+                 struct btrfs_ioctl_balance_args *bargs);
+int btrfs_recover_balance(struct btrfs_root *tree_root);
+int btrfs_pause_balance(struct btrfs_fs_info *fs_info);
+int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
 int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
-int find_free_dev_extent(struct btrfs_trans_handle *trans,
-                        struct btrfs_device *device, u64 num_bytes,
+int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
                         u64 *start, u64 *max_avail);
 #endif
index 3848b04e310e4800f6768160c6ef5111734ad1d5..e7a5659087e66f93769bc750562d21294c9bd2b6 100644 (file)
@@ -200,7 +200,7 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
        ret = btrfs_update_inode(trans, root, inode);
        BUG_ON(ret);
 out:
-       btrfs_end_transaction_throttle(trans, root);
+       btrfs_end_transaction(trans, root);
        return ret;
 }
 
index b60fc8bfb3e9fdb71ffd1e7ded00ea2154958900..620daad201dbc8905f09359b8237a542be1f311e 100644 (file)
@@ -641,10 +641,10 @@ static int __cap_is_valid(struct ceph_cap *cap)
        unsigned long ttl;
        u32 gen;
 
-       spin_lock(&cap->session->s_cap_lock);
+       spin_lock(&cap->session->s_gen_ttl_lock);
        gen = cap->session->s_cap_gen;
        ttl = cap->session->s_cap_ttl;
-       spin_unlock(&cap->session->s_cap_lock);
+       spin_unlock(&cap->session->s_gen_ttl_lock);
 
        if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
                dout("__cap_is_valid %p cap %p issued %s "
index 618246bc2196eee020f6c53ccf789d784603e090..3e8094be4604a481999e2921fb0942441c9086a7 100644 (file)
@@ -975,10 +975,10 @@ static int dentry_lease_is_valid(struct dentry *dentry)
        di = ceph_dentry(dentry);
        if (di->lease_session) {
                s = di->lease_session;
-               spin_lock(&s->s_cap_lock);
+               spin_lock(&s->s_gen_ttl_lock);
                gen = s->s_cap_gen;
                ttl = s->s_cap_ttl;
-               spin_unlock(&s->s_cap_lock);
+               spin_unlock(&s->s_gen_ttl_lock);
 
                if (di->lease_gen == gen &&
                    time_before(jiffies, dentry->d_time) &&
index 23ab6a3f1825e85cb839f508535a56cdc0cc55fe..866e8d7ca37d7343fe7c30cc5e036cb5d6b9494e 100644 (file)
@@ -262,6 +262,7 @@ static int parse_reply_info(struct ceph_msg *msg,
        /* trace */
        ceph_decode_32_safe(&p, end, len, bad);
        if (len > 0) {
+               ceph_decode_need(&p, end, len, bad);
                err = parse_reply_info_trace(&p, p+len, info, features);
                if (err < 0)
                        goto out_bad;
@@ -270,6 +271,7 @@ static int parse_reply_info(struct ceph_msg *msg,
        /* extra */
        ceph_decode_32_safe(&p, end, len, bad);
        if (len > 0) {
+               ceph_decode_need(&p, end, len, bad);
                err = parse_reply_info_extra(&p, p+len, info, features);
                if (err < 0)
                        goto out_bad;
@@ -398,9 +400,11 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
        s->s_con.peer_name.type = CEPH_ENTITY_TYPE_MDS;
        s->s_con.peer_name.num = cpu_to_le64(mds);
 
-       spin_lock_init(&s->s_cap_lock);
+       spin_lock_init(&s->s_gen_ttl_lock);
        s->s_cap_gen = 0;
        s->s_cap_ttl = 0;
+
+       spin_lock_init(&s->s_cap_lock);
        s->s_renew_requested = 0;
        s->s_renew_seq = 0;
        INIT_LIST_HEAD(&s->s_caps);
@@ -2326,10 +2330,10 @@ static void handle_session(struct ceph_mds_session *session,
        case CEPH_SESSION_STALE:
                pr_info("mds%d caps went stale, renewing\n",
                        session->s_mds);
-               spin_lock(&session->s_cap_lock);
+               spin_lock(&session->s_gen_ttl_lock);
                session->s_cap_gen++;
                session->s_cap_ttl = 0;
-               spin_unlock(&session->s_cap_lock);
+               spin_unlock(&session->s_gen_ttl_lock);
                send_renew_caps(mdsc, session);
                break;
 
index a50ca0e39475794018c2350570547bff4f6a7df8..8c7c04ebb595a1a8bd2e9c1b177890f8ba234b9a 100644 (file)
@@ -117,10 +117,13 @@ struct ceph_mds_session {
        void             *s_authorizer_buf, *s_authorizer_reply_buf;
        size_t            s_authorizer_buf_len, s_authorizer_reply_buf_len;
 
-       /* protected by s_cap_lock */
-       spinlock_t        s_cap_lock;
+       /* protected by s_gen_ttl_lock */
+       spinlock_t        s_gen_ttl_lock;
        u32               s_cap_gen;  /* inc each time we get mds stale msg */
        unsigned long     s_cap_ttl;  /* when session caps expire */
+
+       /* protected by s_cap_lock */
+       spinlock_t        s_cap_lock;
        struct list_head  s_caps;     /* all caps issued by this session */
        int               s_nr_caps, s_trim_caps;
        int               s_num_cap_releases;
index 857214ae8c0893181c85c6fb081ef13ae171f0e7..a76f697303d9e5db700598fc817008494908bb99 100644 (file)
@@ -111,8 +111,10 @@ static size_t ceph_vxattrcb_layout(struct ceph_inode_info *ci, char *val,
 }
 
 static struct ceph_vxattr_cb ceph_file_vxattrs[] = {
+       { true, "ceph.file.layout", ceph_vxattrcb_layout},
+       /* The following extended attribute name is deprecated */
        { true, "ceph.layout", ceph_vxattrcb_layout},
-       { NULL, NULL }
+       { true, NULL, NULL }
 };
 
 static struct ceph_vxattr_cb *ceph_inode_vxattrs(struct inode *inode)
index f66cc1625150839244870bb6bbb3d27893b7ec82..2b243af70aa325b3c6049ded6121473f98cfbe8e 100644 (file)
@@ -139,8 +139,7 @@ config CIFS_DFS_UPCALL
            points. If unsure, say N.
 
 config CIFS_FSCACHE
-         bool "Provide CIFS client caching support (EXPERIMENTAL)"
-         depends on EXPERIMENTAL
+         bool "Provide CIFS client caching support"
          depends on CIFS=m && FSCACHE || CIFS=y && FSCACHE=y
          help
            Makes CIFS FS-Cache capable. Say Y here if you want your CIFS data
@@ -148,8 +147,8 @@ config CIFS_FSCACHE
            manager. If unsure, say N.
 
 config CIFS_ACL
-         bool "Provide CIFS ACL support (EXPERIMENTAL)"
-         depends on EXPERIMENTAL && CIFS_XATTR && KEYS
+         bool "Provide CIFS ACL support"
+         depends on CIFS_XATTR && KEYS
          help
            Allows to fetch CIFS/NTFS ACL from the server.  The DACL blob
            is handed over to the application/caller.
index 84e8c0724704173669659f899e7cc83c6741b857..24b3dfc05282e2214df5f3eb014eb124651d6ffe 100644 (file)
@@ -676,14 +676,23 @@ static ssize_t cifs_multiuser_mount_proc_write(struct file *file,
 {
        char c;
        int rc;
+       static bool warned;
 
        rc = get_user(c, buffer);
        if (rc)
                return rc;
        if (c == '0' || c == 'n' || c == 'N')
                multiuser_mount = 0;
-       else if (c == '1' || c == 'y' || c == 'Y')
+       else if (c == '1' || c == 'y' || c == 'Y') {
                multiuser_mount = 1;
+               if (!warned) {
+                       warned = true;
+                       printk(KERN_WARNING "CIFS VFS: The legacy multiuser "
+                               "mount code is scheduled to be deprecated in "
+                               "3.5. Please switch to using the multiuser "
+                               "mount option.");
+               }
+       }
 
        return count;
 }
index 2272fd5fe5b74fcac62d001987ce3980a8a0b3e0..e622863b292f736fc8cc6e5d2ab8105986ad83bf 100644 (file)
@@ -113,9 +113,11 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
                   MAX_MECH_STR_LEN +
                   UID_KEY_LEN + (sizeof(uid_t) * 2) +
                   CREDUID_KEY_LEN + (sizeof(uid_t) * 2) +
-                  USER_KEY_LEN + strlen(sesInfo->user_name) +
                   PID_KEY_LEN + (sizeof(pid_t) * 2) + 1;
 
+       if (sesInfo->user_name)
+               desc_len += USER_KEY_LEN + strlen(sesInfo->user_name);
+
        spnego_key = ERR_PTR(-ENOMEM);
        description = kzalloc(desc_len, GFP_KERNEL);
        if (description == NULL)
@@ -152,8 +154,10 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo)
        dp = description + strlen(description);
        sprintf(dp, ";creduid=0x%x", sesInfo->cred_uid);
 
-       dp = description + strlen(description);
-       sprintf(dp, ";user=%s", sesInfo->user_name);
+       if (sesInfo->user_name) {
+               dp = description + strlen(description);
+               sprintf(dp, ";user=%s", sesInfo->user_name);
+       }
 
        dp = description + strlen(description);
        sprintf(dp, ";pid=0x%x", current->pid);
index 1b2e180b018dd01e9d65041c6eb8cc288974b2c3..fbb9da95184379bcec53499a7fcac55ff6078f17 100644 (file)
 #include "cifs_debug.h"
 
 /*
- * cifs_ucs2_bytes - how long will a string be after conversion?
- * @ucs - pointer to input string
+ * cifs_utf16_bytes - how long will a string be after conversion?
+ * @utf16 - pointer to input string
  * @maxbytes - don't go past this many bytes of input string
  * @codepage - destination codepage
  *
- * Walk a ucs2le string and return the number of bytes that the string will
+ * Walk a utf16le string and return the number of bytes that the string will
  * be after being converted to the given charset, not including any null
  * termination required. Don't walk past maxbytes in the source buffer.
  */
 int
-cifs_ucs2_bytes(const __le16 *from, int maxbytes,
+cifs_utf16_bytes(const __le16 *from, int maxbytes,
                const struct nls_table *codepage)
 {
        int i;
@@ -122,7 +122,7 @@ cp_convert:
 }
 
 /*
- * cifs_from_ucs2 - convert utf16le string to local charset
+ * cifs_from_utf16 - convert utf16le string to local charset
  * @to - destination buffer
  * @from - source buffer
  * @tolen - destination buffer size (in bytes)
@@ -130,7 +130,7 @@ cp_convert:
  * @codepage - codepage to which characters should be converted
  * @mapchar - should characters be remapped according to the mapchars option?
  *
- * Convert a little-endian ucs2le string (as sent by the server) to a string
+ * Convert a little-endian utf16le string (as sent by the server) to a string
  * in the provided codepage. The tolen and fromlen parameters are to ensure
  * that the code doesn't walk off of the end of the buffer (which is always
  * a danger if the alignment of the source buffer is off). The destination
@@ -139,12 +139,12 @@ cp_convert:
  * null terminator).
  *
  * Note that some windows versions actually send multiword UTF-16 characters
- * instead of straight UCS-2. The linux nls routines however aren't able to
+ * instead of straight UTF16-2. The linux nls routines however aren't able to
  * deal with those characters properly. In the event that we get some of
  * those characters, they won't be translated properly.
  */
 int
-cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen,
+cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
                 const struct nls_table *codepage, bool mapchar)
 {
        int i, charlen, safelen;
@@ -190,13 +190,13 @@ cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen,
 }
 
 /*
- * NAME:       cifs_strtoUCS()
+ * NAME:       cifs_strtoUTF16()
  *
  * FUNCTION:   Convert character string to unicode string
  *
  */
 int
-cifs_strtoUCS(__le16 *to, const char *from, int len,
+cifs_strtoUTF16(__le16 *to, const char *from, int len,
              const struct nls_table *codepage)
 {
        int charlen;
@@ -206,7 +206,7 @@ cifs_strtoUCS(__le16 *to, const char *from, int len,
        for (i = 0; len && *from; i++, from += charlen, len -= charlen) {
                charlen = codepage->char2uni(from, len, &wchar_to);
                if (charlen < 1) {
-                       cERROR(1, "strtoUCS: char2uni of 0x%x returned %d",
+                       cERROR(1, "strtoUTF16: char2uni of 0x%x returned %d",
                                *from, charlen);
                        /* A question mark */
                        wchar_to = 0x003f;
@@ -220,7 +220,8 @@ cifs_strtoUCS(__le16 *to, const char *from, int len,
 }
 
 /*
- * cifs_strndup_from_ucs - copy a string from wire format to the local codepage
+ * cifs_strndup_from_utf16 - copy a string from wire format to the local
+ * codepage
  * @src - source string
  * @maxlen - don't walk past this many bytes in the source string
  * @is_unicode - is this a unicode string?
@@ -231,19 +232,19 @@ cifs_strtoUCS(__le16 *to, const char *from, int len,
  * error.
  */
 char *
-cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode,
-            const struct nls_table *codepage)
+cifs_strndup_from_utf16(const char *src, const int maxlen,
+                       const bool is_unicode, const struct nls_table *codepage)
 {
        int len;
        char *dst;
 
        if (is_unicode) {
-               len = cifs_ucs2_bytes((__le16 *) src, maxlen, codepage);
+               len = cifs_utf16_bytes((__le16 *) src, maxlen, codepage);
                len += nls_nullsize(codepage);
                dst = kmalloc(len, GFP_KERNEL);
                if (!dst)
                        return NULL;
-               cifs_from_ucs2(dst, (__le16 *) src, len, maxlen, codepage,
+               cifs_from_utf16(dst, (__le16 *) src, len, maxlen, codepage,
                               false);
        } else {
                len = strnlen(src, maxlen);
@@ -264,7 +265,7 @@ cifs_strndup_from_ucs(const char *src, const int maxlen, const bool is_unicode,
  * names are little endian 16 bit Unicode on the wire
  */
 int
-cifsConvertToUCS(__le16 *target, const char *source, int srclen,
+cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
                 const struct nls_table *cp, int mapChars)
 {
        int i, j, charlen;
@@ -273,7 +274,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
        wchar_t tmp;
 
        if (!mapChars)
-               return cifs_strtoUCS(target, source, PATH_MAX, cp);
+               return cifs_strtoUTF16(target, source, PATH_MAX, cp);
 
        for (i = 0, j = 0; i < srclen; j++) {
                src_char = source[i];
@@ -281,7 +282,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
                switch (src_char) {
                case 0:
                        put_unaligned(0, &target[j]);
-                       goto ctoUCS_out;
+                       goto ctoUTF16_out;
                case ':':
                        dst_char = cpu_to_le16(UNI_COLON);
                        break;
@@ -326,7 +327,7 @@ cifsConvertToUCS(__le16 *target, const char *source, int srclen,
                put_unaligned(dst_char, &target[j]);
        }
 
-ctoUCS_out:
+ctoUTF16_out:
        return i;
 }
 
index 6d02fd560566b8184f004f9087dd7ea48ef590d7..a513a546700b5e1a9be653b77316b9487d04cde0 100644 (file)
@@ -74,16 +74,16 @@ extern const struct UniCaseRange CifsUniLowerRange[];
 #endif                         /* UNIUPR_NOLOWER */
 
 #ifdef __KERNEL__
-int cifs_from_ucs2(char *to, const __le16 *from, int tolen, int fromlen,
-                  const struct nls_table *codepage, bool mapchar);
-int cifs_ucs2_bytes(const __le16 *from, int maxbytes,
-                   const struct nls_table *codepage);
-int cifs_strtoUCS(__le16 *, const char *, int, const struct nls_table *);
-char *cifs_strndup_from_ucs(const char *src, const int maxlen,
-                           const bool is_unicode,
-                           const struct nls_table *codepage);
-extern int cifsConvertToUCS(__le16 *target, const char *source, int maxlen,
-                       const struct nls_table *cp, int mapChars);
+int cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
+                   const struct nls_table *codepage, bool mapchar);
+int cifs_utf16_bytes(const __le16 *from, int maxbytes,
+                    const struct nls_table *codepage);
+int cifs_strtoUTF16(__le16 *, const char *, int, const struct nls_table *);
+char *cifs_strndup_from_utf16(const char *src, const int maxlen,
+                             const bool is_unicode,
+                             const struct nls_table *codepage);
+extern int cifsConvertToUTF16(__le16 *target, const char *source, int maxlen,
+                             const struct nls_table *cp, int mapChars);
 
 #endif
 
index 854749d21bb1a6a28496e88b0061fbe98a2693b2..3cc1b251ca08eba0a39fc4180d5de360c580f25c 100644 (file)
@@ -910,6 +910,8 @@ static void parse_dacl(struct cifs_acl *pdacl, char *end_of_acl,
                umode_t group_mask = S_IRWXG;
                umode_t other_mask = S_IRWXU | S_IRWXG | S_IRWXO;
 
+               if (num_aces > ULONG_MAX / sizeof(struct cifs_ace *))
+                       return;
                ppace = kmalloc(num_aces * sizeof(struct cifs_ace *),
                                GFP_KERNEL);
                if (!ppace) {
index 5d9b9acc5fcebd1b9c9eff1ab3458acc19fbc72b..63c460e503b601b6bd71ee8185849e82aa04f1dd 100644 (file)
@@ -327,7 +327,7 @@ build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp)
        attrptr->type = cpu_to_le16(NTLMSSP_AV_NB_DOMAIN_NAME);
        attrptr->length = cpu_to_le16(2 * dlen);
        blobptr = (unsigned char *)attrptr + sizeof(struct ntlmssp2_name);
-       cifs_strtoUCS((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
+       cifs_strtoUTF16((__le16 *)blobptr, ses->domainName, dlen, nls_cp);
 
        return 0;
 }
@@ -376,7 +376,7 @@ find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp)
                                        kmalloc(attrsize + 1, GFP_KERNEL);
                                if (!ses->domainName)
                                                return -ENOMEM;
-                               cifs_from_ucs2(ses->domainName,
+                               cifs_from_utf16(ses->domainName,
                                        (__le16 *)blobptr, attrsize, attrsize,
                                        nls_cp, false);
                                break;
@@ -420,15 +420,20 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
        }
 
        /* convert ses->user_name to unicode and uppercase */
-       len = strlen(ses->user_name);
+       len = ses->user_name ? strlen(ses->user_name) : 0;
        user = kmalloc(2 + (len * 2), GFP_KERNEL);
        if (user == NULL) {
                cERROR(1, "calc_ntlmv2_hash: user mem alloc failure\n");
                rc = -ENOMEM;
                return rc;
        }
-       len = cifs_strtoUCS((__le16 *)user, ses->user_name, len, nls_cp);
-       UniStrupr(user);
+
+       if (len) {
+               len = cifs_strtoUTF16((__le16 *)user, ses->user_name, len, nls_cp);
+               UniStrupr(user);
+       } else {
+               memset(user, '\0', 2);
+       }
 
        rc = crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
                                (char *)user, 2 * len);
@@ -448,8 +453,8 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
                        rc = -ENOMEM;
                        return rc;
                }
-               len = cifs_strtoUCS((__le16 *)domain, ses->domainName, len,
-                                       nls_cp);
+               len = cifs_strtoUTF16((__le16 *)domain, ses->domainName, len,
+                                     nls_cp);
                rc =
                crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
                                        (char *)domain, 2 * len);
@@ -468,7 +473,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
                        rc = -ENOMEM;
                        return rc;
                }
-               len = cifs_strtoUCS((__le16 *)server, ses->serverName, len,
+               len = cifs_strtoUTF16((__le16 *)server, ses->serverName, len,
                                        nls_cp);
                rc =
                crypto_shash_update(&ses->server->secmech.sdeschmacmd5->shash,
index ba53c1c6c6cc091f045de3769d7fd8d295de8ce6..76e7d8b6da171c6af1d8cb8466c9bfbc77d00dbf 100644 (file)
@@ -879,6 +879,8 @@ require use of the stronger protocol */
 #define   CIFSSEC_MASK          0xB70B7 /* current flags supported if weak */
 #endif /* UPCALL */
 #else /* do not allow weak pw hash */
+#define   CIFSSEC_MUST_LANMAN  0
+#define   CIFSSEC_MUST_PLNTXT  0
 #ifdef CONFIG_CIFS_UPCALL
 #define   CIFSSEC_MASK          0x8F08F /* flags supported if no weak allowed */
 #else
index 6600aa2d2ef38a38c228a1cd26cbeae2ea2b6b06..8b7794c315919c5328a5bffbcb2aa5a9d7ad5b30 100644 (file)
@@ -821,8 +821,8 @@ PsxDelete:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
-                                    PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+                                      PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else { /* BB add path length overrun check */
@@ -893,8 +893,8 @@ DelFileRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->fileName, fileName,
-                                    PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->fileName, fileName,
+                                      PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {                /* BB improve check for buffer overruns BB */
@@ -938,8 +938,8 @@ RmDirRetry:
                return rc;
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
-               name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, dirName,
-                                        PATH_MAX, nls_codepage, remap);
+               name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, dirName,
+                                             PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {                /* BB improve check for buffer overruns BB */
@@ -981,8 +981,8 @@ MkDirRetry:
                return rc;
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
-               name_len = cifsConvertToUCS((__le16 *) pSMB->DirName, name,
-                                           PATH_MAX, nls_codepage, remap);
+               name_len = cifsConvertToUTF16((__le16 *) pSMB->DirName, name,
+                                             PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {                /* BB improve check for buffer overruns BB */
@@ -1030,8 +1030,8 @@ PsxCreat:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->FileName, name,
-                                    PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, name,
+                                      PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -1197,8 +1197,8 @@ OldOpenRetry:
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                count = 1;      /* account for one byte pad to word boundary */
                name_len =
-                  cifsConvertToUCS((__le16 *) (pSMB->fileName + 1),
-                                   fileName, PATH_MAX, nls_codepage, remap);
+                  cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1),
+                                     fileName, PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {                /* BB improve check for buffer overruns BB */
@@ -1304,8 +1304,8 @@ openRetry:
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                count = 1;      /* account for one byte pad to word boundary */
                name_len =
-                   cifsConvertToUCS((__le16 *) (pSMB->fileName + 1),
-                                    fileName, PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) (pSMB->fileName + 1),
+                                      fileName, PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
                pSMB->NameLength = cpu_to_le16(name_len);
@@ -2649,16 +2649,16 @@ renameRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName,
-                                    PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->OldFileName, fromName,
+                                      PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
                pSMB->OldFileName[name_len] = 0x04;     /* pad */
        /* protocol requires ASCII signature byte on Unicode string */
                pSMB->OldFileName[name_len + 1] = 0x00;
                name_len2 =
-                   cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
-                                    toName, PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
+                                      toName, PATH_MAX, nls_codepage, remap);
                name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
                name_len2 *= 2; /* convert to bytes */
        } else {        /* BB improve the check for buffer overruns BB */
@@ -2738,10 +2738,12 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon,
        /* unicode only call */
        if (target_name == NULL) {
                sprintf(dummy_string, "cifs%x", pSMB->hdr.Mid);
-               len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name,
+               len_of_str =
+                       cifsConvertToUTF16((__le16 *)rename_info->target_name,
                                        dummy_string, 24, nls_codepage, remap);
        } else {
-               len_of_str = cifsConvertToUCS((__le16 *)rename_info->target_name,
+               len_of_str =
+                       cifsConvertToUTF16((__le16 *)rename_info->target_name,
                                        target_name, PATH_MAX, nls_codepage,
                                        remap);
        }
@@ -2795,17 +2797,17 @@ copyRetry:
        pSMB->Flags = cpu_to_le16(flags & COPY_TREE);
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
-               name_len = cifsConvertToUCS((__le16 *) pSMB->OldFileName,
-                                           fromName, PATH_MAX, nls_codepage,
-                                           remap);
+               name_len = cifsConvertToUTF16((__le16 *) pSMB->OldFileName,
+                                             fromName, PATH_MAX, nls_codepage,
+                                             remap);
                name_len++;     /* trailing null */
                name_len *= 2;
                pSMB->OldFileName[name_len] = 0x04;     /* pad */
                /* protocol requires ASCII signature byte on Unicode string */
                pSMB->OldFileName[name_len + 1] = 0x00;
                name_len2 =
-                   cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
-                               toName, PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
+                                      toName, PATH_MAX, nls_codepage, remap);
                name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
                name_len2 *= 2; /* convert to bytes */
        } else {        /* BB improve the check for buffer overruns BB */
@@ -2861,9 +2863,9 @@ createSymLinkRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifs_strtoUCS((__le16 *) pSMB->FileName, fromName, PATH_MAX
-                                 /* find define for this maxpathcomponent */
-                                 , nls_codepage);
+                   cifs_strtoUTF16((__le16 *) pSMB->FileName, fromName,
+                                   /* find define for this maxpathcomponent */
+                                   PATH_MAX, nls_codepage);
                name_len++;     /* trailing null */
                name_len *= 2;
 
@@ -2885,9 +2887,9 @@ createSymLinkRetry:
        data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len_target =
-                   cifs_strtoUCS((__le16 *) data_offset, toName, PATH_MAX
-                                 /* find define for this maxpathcomponent */
-                                 , nls_codepage);
+                   cifs_strtoUTF16((__le16 *) data_offset, toName, PATH_MAX
+                                   /* find define for this maxpathcomponent */
+                                   , nls_codepage);
                name_len_target++;      /* trailing null */
                name_len_target *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -2949,8 +2951,8 @@ createHardLinkRetry:
                return rc;
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
-               name_len = cifsConvertToUCS((__le16 *) pSMB->FileName, toName,
-                                           PATH_MAX, nls_codepage, remap);
+               name_len = cifsConvertToUTF16((__le16 *) pSMB->FileName, toName,
+                                             PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
 
@@ -2972,8 +2974,8 @@ createHardLinkRetry:
        data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len_target =
-                   cifsConvertToUCS((__le16 *) data_offset, fromName, PATH_MAX,
-                                    nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) data_offset, fromName,
+                                      PATH_MAX, nls_codepage, remap);
                name_len_target++;      /* trailing null */
                name_len_target *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -3042,8 +3044,8 @@ winCreateHardLinkRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->OldFileName, fromName,
-                                    PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->OldFileName, fromName,
+                                      PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
 
@@ -3051,8 +3053,8 @@ winCreateHardLinkRetry:
                pSMB->OldFileName[name_len] = 0x04;
                pSMB->OldFileName[name_len + 1] = 0x00; /* pad */
                name_len2 =
-                   cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
-                                    toName, PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *)&pSMB->OldFileName[name_len+2],
+                                      toName, PATH_MAX, nls_codepage, remap);
                name_len2 += 1 /* trailing null */  + 1 /* Signature word */ ;
                name_len2 *= 2; /* convert to bytes */
        } else {        /* BB improve the check for buffer overruns BB */
@@ -3108,8 +3110,8 @@ querySymLinkRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifs_strtoUCS((__le16 *) pSMB->FileName, searchName,
-                                 PATH_MAX, nls_codepage);
+                       cifs_strtoUTF16((__le16 *) pSMB->FileName, searchName,
+                                       PATH_MAX, nls_codepage);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -3166,8 +3168,8 @@ querySymLinkRetry:
                                is_unicode = false;
 
                        /* BB FIXME investigate remapping reserved chars here */
-                       *symlinkinfo = cifs_strndup_from_ucs(data_start, count,
-                                                   is_unicode, nls_codepage);
+                       *symlinkinfo = cifs_strndup_from_utf16(data_start,
+                                       count, is_unicode, nls_codepage);
                        if (!*symlinkinfo)
                                rc = -ENOMEM;
                }
@@ -3450,8 +3452,9 @@ queryAclRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                       cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-                                        PATH_MAX, nls_codepage, remap);
+                       cifsConvertToUTF16((__le16 *) pSMB->FileName,
+                                          searchName, PATH_MAX, nls_codepage,
+                                          remap);
                name_len++;     /* trailing null */
                name_len *= 2;
                pSMB->FileName[name_len] = 0;
@@ -3537,8 +3540,8 @@ setAclRetry:
                return rc;
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                       cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
-                                     PATH_MAX, nls_codepage, remap);
+                       cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+                                          PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -3948,8 +3951,9 @@ QInfRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                       cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-                                       PATH_MAX, nls_codepage, remap);
+                       cifsConvertToUTF16((__le16 *) pSMB->FileName,
+                                          searchName, PATH_MAX, nls_codepage,
+                                          remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {
@@ -4086,8 +4090,8 @@ QPathInfoRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-                                    PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+                                      PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -4255,8 +4259,8 @@ UnixQPathInfoRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-                                 PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+                                      PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -4344,8 +4348,8 @@ findFirstRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-                                PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+                                      PATH_MAX, nls_codepage, remap);
                /* We can not add the asterik earlier in case
                it got remapped to 0xF03A as if it were part of the
                directory name instead of a wildcard */
@@ -4656,8 +4660,9 @@ GetInodeNumberRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                       cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-                                        PATH_MAX, nls_codepage, remap);
+                       cifsConvertToUTF16((__le16 *) pSMB->FileName,
+                                          searchName, PATH_MAX, nls_codepage,
+                                          remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -4794,9 +4799,9 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
                                rc = -ENOMEM;
                                goto parse_DFS_referrals_exit;
                        }
-                       cifsConvertToUCS((__le16 *) tmp, searchName,
-                                       PATH_MAX, nls_codepage, remap);
-                       node->path_consumed = cifs_ucs2_bytes(tmp,
+                       cifsConvertToUTF16((__le16 *) tmp, searchName,
+                                          PATH_MAX, nls_codepage, remap);
+                       node->path_consumed = cifs_utf16_bytes(tmp,
                                        le16_to_cpu(pSMBr->PathConsumed),
                                        nls_codepage);
                        kfree(tmp);
@@ -4809,8 +4814,8 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
                /* copy DfsPath */
                temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
                max_len = data_end - temp;
-               node->path_name = cifs_strndup_from_ucs(temp, max_len,
-                                                     is_unicode, nls_codepage);
+               node->path_name = cifs_strndup_from_utf16(temp, max_len,
+                                               is_unicode, nls_codepage);
                if (!node->path_name) {
                        rc = -ENOMEM;
                        goto parse_DFS_referrals_exit;
@@ -4819,8 +4824,8 @@ parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
                /* copy link target UNC */
                temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
                max_len = data_end - temp;
-               node->node_name = cifs_strndup_from_ucs(temp, max_len,
-                                                     is_unicode, nls_codepage);
+               node->node_name = cifs_strndup_from_utf16(temp, max_len,
+                                               is_unicode, nls_codepage);
                if (!node->node_name)
                        rc = -ENOMEM;
        }
@@ -4873,8 +4878,9 @@ getDFSRetry:
        if (ses->capabilities & CAP_UNICODE) {
                pSMB->hdr.Flags2 |= SMBFLG2_UNICODE;
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->RequestFileName,
-                                    searchName, PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->RequestFileName,
+                                      searchName, PATH_MAX, nls_codepage,
+                                      remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -5506,8 +5512,8 @@ SetEOFRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
-                                    PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+                                      PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -5796,8 +5802,8 @@ SetTimesRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
-                                    PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+                                      PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -5877,8 +5883,8 @@ SetAttrLgcyRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                       ConvertToUCS((__le16 *) pSMB->fileName, fileName,
-                               PATH_MAX, nls_codepage);
+                       ConvertToUTF16((__le16 *) pSMB->fileName, fileName,
+                                      PATH_MAX, nls_codepage);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -6030,8 +6036,8 @@ setPermsRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
-                                    PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+                                      PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -6123,8 +6129,8 @@ QAllEAsRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                list_len =
-                   cifsConvertToUCS((__le16 *) pSMB->FileName, searchName,
-                                    PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, searchName,
+                                      PATH_MAX, nls_codepage, remap);
                list_len++;     /* trailing null */
                list_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -6301,8 +6307,8 @@ SetEARetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifsConvertToUCS((__le16 *) pSMB->FileName, fileName,
-                                    PATH_MAX, nls_codepage, remap);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, fileName,
+                                      PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
index 4666780f315d22e1674abce6db61288871c9ad47..9c288653e6d6a6f6d6bdb813f41f23f1cd5552c4 100644 (file)
@@ -38,6 +38,7 @@
 #include <asm/processor.h>
 #include <linux/inet.h>
 #include <linux/module.h>
+#include <keys/user-type.h>
 #include <net/ipv6.h>
 #include "cifspdu.h"
 #include "cifsglob.h"
@@ -225,74 +226,90 @@ static int check2ndT2(struct smb_hdr *pSMB)
 
 static int coalesce_t2(struct smb_hdr *psecond, struct smb_hdr *pTargetSMB)
 {
-       struct smb_t2_rsp *pSMB2 = (struct smb_t2_rsp *)psecond;
+       struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)psecond;
        struct smb_t2_rsp *pSMBt  = (struct smb_t2_rsp *)pTargetSMB;
-       char *data_area_of_target;
-       char *data_area_of_buf2;
+       char *data_area_of_tgt;
+       char *data_area_of_src;
        int remaining;
-       unsigned int byte_count, total_in_buf;
-       __u16 total_data_size, total_in_buf2;
+       unsigned int byte_count, total_in_tgt;
+       __u16 tgt_total_cnt, src_total_cnt, total_in_src;
 
-       total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
+       src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount);
+       tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount);
 
-       if (total_data_size !=
-           get_unaligned_le16(&pSMB2->t2_rsp.TotalDataCount))
-               cFYI(1, "total data size of primary and secondary t2 differ");
+       if (tgt_total_cnt != src_total_cnt)
+               cFYI(1, "total data count of primary and secondary t2 differ "
+                       "source=%hu target=%hu", src_total_cnt, tgt_total_cnt);
 
-       total_in_buf = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
+       total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount);
 
-       remaining = total_data_size - total_in_buf;
+       remaining = tgt_total_cnt - total_in_tgt;
 
-       if (remaining < 0)
+       if (remaining < 0) {
+               cFYI(1, "Server sent too much data. tgt_total_cnt=%hu "
+                       "total_in_tgt=%hu", tgt_total_cnt, total_in_tgt);
                return -EPROTO;
+       }
 
-       if (remaining == 0) /* nothing to do, ignore */
+       if (remaining == 0) {
+               /* nothing to do, ignore */
+               cFYI(1, "no more data remains");
                return 0;
+       }
 
-       total_in_buf2 = get_unaligned_le16(&pSMB2->t2_rsp.DataCount);
-       if (remaining < total_in_buf2) {
+       total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount);
+       if (remaining < total_in_src)
                cFYI(1, "transact2 2nd response contains too much data");
-       }
 
        /* find end of first SMB data area */
-       data_area_of_target = (char *)&pSMBt->hdr.Protocol +
+       data_area_of_tgt = (char *)&pSMBt->hdr.Protocol +
                                get_unaligned_le16(&pSMBt->t2_rsp.DataOffset);
-       /* validate target area */
 
-       data_area_of_buf2 = (char *)&pSMB2->hdr.Protocol +
-                               get_unaligned_le16(&pSMB2->t2_rsp.DataOffset);
+       /* validate target area */
+       data_area_of_src = (char *)&pSMBs->hdr.Protocol +
+                               get_unaligned_le16(&pSMBs->t2_rsp.DataOffset);
 
-       data_area_of_target += total_in_buf;
+       data_area_of_tgt += total_in_tgt;
 
-       /* copy second buffer into end of first buffer */
-       total_in_buf += total_in_buf2;
+       total_in_tgt += total_in_src;
        /* is the result too big for the field? */
-       if (total_in_buf > USHRT_MAX)
+       if (total_in_tgt > USHRT_MAX) {
+               cFYI(1, "coalesced DataCount too large (%u)", total_in_tgt);
                return -EPROTO;
-       put_unaligned_le16(total_in_buf, &pSMBt->t2_rsp.DataCount);
+       }
+       put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount);
 
        /* fix up the BCC */
        byte_count = get_bcc(pTargetSMB);
-       byte_count += total_in_buf2;
+       byte_count += total_in_src;
        /* is the result too big for the field? */
-       if (byte_count > USHRT_MAX)
+       if (byte_count > USHRT_MAX) {
+               cFYI(1, "coalesced BCC too large (%u)", byte_count);
                return -EPROTO;
+       }
        put_bcc(byte_count, pTargetSMB);
 
        byte_count = be32_to_cpu(pTargetSMB->smb_buf_length);
-       byte_count += total_in_buf2;
+       byte_count += total_in_src;
        /* don't allow buffer to overflow */
-       if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4)
+       if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) {
+               cFYI(1, "coalesced BCC exceeds buffer size (%u)", byte_count);
                return -ENOBUFS;
+       }
        pTargetSMB->smb_buf_length = cpu_to_be32(byte_count);
 
-       memcpy(data_area_of_target, data_area_of_buf2, total_in_buf2);
+       /* copy second buffer into end of first buffer */
+       memcpy(data_area_of_tgt, data_area_of_src, total_in_src);
 
-       if (remaining == total_in_buf2) {
-               cFYI(1, "found the last secondary response");
-               return 0; /* we are done */
-       } else /* more responses to go */
+       if (remaining != total_in_src) {
+               /* more responses to go */
+               cFYI(1, "waiting for more secondary responses");
                return 1;
+       }
+
+       /* we are done */
+       cFYI(1, "found the last secondary response");
+       return 0;
 }
 
 static void
@@ -1578,11 +1595,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
                }
        }
 
-       if (vol->multiuser && !(vol->secFlg & CIFSSEC_MAY_KRB5)) {
-               cERROR(1, "Multiuser mounts currently require krb5 "
-                         "authentication!");
+#ifndef CONFIG_KEYS
+       /* Muliuser mounts require CONFIG_KEYS support */
+       if (vol->multiuser) {
+               cERROR(1, "Multiuser mounts require kernels with "
+                         "CONFIG_KEYS enabled.");
                goto cifs_parse_mount_err;
        }
+#endif
 
        if (vol->UNCip == NULL)
                vol->UNCip = &vol->UNC[2];
@@ -1981,10 +2001,16 @@ static int match_session(struct cifs_ses *ses, struct smb_vol *vol)
                        return 0;
                break;
        default:
+               /* NULL username means anonymous session */
+               if (ses->user_name == NULL) {
+                       if (!vol->nullauth)
+                               return 0;
+                       break;
+               }
+
                /* anything else takes username/password */
-               if (ses->user_name == NULL)
-                       return 0;
-               if (strncmp(ses->user_name, vol->username,
+               if (strncmp(ses->user_name,
+                           vol->username ? vol->username : "",
                            MAX_USERNAME_SIZE))
                        return 0;
                if (strlen(vol->username) != 0 &&
@@ -2039,6 +2065,132 @@ cifs_put_smb_ses(struct cifs_ses *ses)
        cifs_put_tcp_session(server);
 }
 
+#ifdef CONFIG_KEYS
+
+/* strlen("cifs:a:") + INET6_ADDRSTRLEN + 1 */
+#define CIFSCREDS_DESC_SIZE (7 + INET6_ADDRSTRLEN + 1)
+
+/* Populate username and pw fields from keyring if possible */
+static int
+cifs_set_cifscreds(struct smb_vol *vol, struct cifs_ses *ses)
+{
+       int rc = 0;
+       char *desc, *delim, *payload;
+       ssize_t len;
+       struct key *key;
+       struct TCP_Server_Info *server = ses->server;
+       struct sockaddr_in *sa;
+       struct sockaddr_in6 *sa6;
+       struct user_key_payload *upayload;
+
+       desc = kmalloc(CIFSCREDS_DESC_SIZE, GFP_KERNEL);
+       if (!desc)
+               return -ENOMEM;
+
+       /* try to find an address key first */
+       switch (server->dstaddr.ss_family) {
+       case AF_INET:
+               sa = (struct sockaddr_in *)&server->dstaddr;
+               sprintf(desc, "cifs:a:%pI4", &sa->sin_addr.s_addr);
+               break;
+       case AF_INET6:
+               sa6 = (struct sockaddr_in6 *)&server->dstaddr;
+               sprintf(desc, "cifs:a:%pI6c", &sa6->sin6_addr.s6_addr);
+               break;
+       default:
+               cFYI(1, "Bad ss_family (%hu)", server->dstaddr.ss_family);
+               rc = -EINVAL;
+               goto out_err;
+       }
+
+       cFYI(1, "%s: desc=%s", __func__, desc);
+       key = request_key(&key_type_logon, desc, "");
+       if (IS_ERR(key)) {
+               if (!ses->domainName) {
+                       cFYI(1, "domainName is NULL");
+                       rc = PTR_ERR(key);
+                       goto out_err;
+               }
+
+               /* didn't work, try to find a domain key */
+               sprintf(desc, "cifs:d:%s", ses->domainName);
+               cFYI(1, "%s: desc=%s", __func__, desc);
+               key = request_key(&key_type_logon, desc, "");
+               if (IS_ERR(key)) {
+                       rc = PTR_ERR(key);
+                       goto out_err;
+               }
+       }
+
+       down_read(&key->sem);
+       upayload = key->payload.data;
+       if (IS_ERR_OR_NULL(upayload)) {
+               rc = PTR_ERR(key);
+               goto out_key_put;
+       }
+
+       /* find first : in payload */
+       payload = (char *)upayload->data;
+       delim = strnchr(payload, upayload->datalen, ':');
+       cFYI(1, "payload=%s", payload);
+       if (!delim) {
+               cFYI(1, "Unable to find ':' in payload (datalen=%d)",
+                               upayload->datalen);
+               rc = -EINVAL;
+               goto out_key_put;
+       }
+
+       len = delim - payload;
+       if (len > MAX_USERNAME_SIZE || len <= 0) {
+               cFYI(1, "Bad value from username search (len=%zd)", len);
+               rc = -EINVAL;
+               goto out_key_put;
+       }
+
+       vol->username = kstrndup(payload, len, GFP_KERNEL);
+       if (!vol->username) {
+               cFYI(1, "Unable to allocate %zd bytes for username", len);
+               rc = -ENOMEM;
+               goto out_key_put;
+       }
+       cFYI(1, "%s: username=%s", __func__, vol->username);
+
+       len = key->datalen - (len + 1);
+       if (len > MAX_PASSWORD_SIZE || len <= 0) {
+               cFYI(1, "Bad len for password search (len=%zd)", len);
+               rc = -EINVAL;
+               kfree(vol->username);
+               vol->username = NULL;
+               goto out_key_put;
+       }
+
+       ++delim;
+       vol->password = kstrndup(delim, len, GFP_KERNEL);
+       if (!vol->password) {
+               cFYI(1, "Unable to allocate %zd bytes for password", len);
+               rc = -ENOMEM;
+               kfree(vol->username);
+               vol->username = NULL;
+               goto out_key_put;
+       }
+
+out_key_put:
+       up_read(&key->sem);
+       key_put(key);
+out_err:
+       kfree(desc);
+       cFYI(1, "%s: returning %d", __func__, rc);
+       return rc;
+}
+#else /* ! CONFIG_KEYS */
+static inline int
+cifs_set_cifscreds(struct smb_vol *vol __attribute__((unused)),
+                  struct cifs_ses *ses __attribute__((unused)))
+{
+       return -ENOSYS;
+}
+#endif /* CONFIG_KEYS */
+
 static bool warned_on_ntlm;  /* globals init to false automatically */
 
 static struct cifs_ses *
@@ -2914,18 +3066,33 @@ void cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
 #define CIFS_DEFAULT_IOSIZE (1024 * 1024)
 
 /*
- * Windows only supports a max of 60k reads. Default to that when posix
- * extensions aren't in force.
+ * Windows only supports a max of 60kb reads and 65535 byte writes. Default to
+ * those values when posix extensions aren't in force. In actuality here, we
+ * use 65536 to allow for a write that is a multiple of 4k. Most servers seem
+ * to be ok with the extra byte even though Windows doesn't send writes that
+ * are that large.
+ *
+ * Citation:
+ *
+ * http://blogs.msdn.com/b/openspecification/archive/2009/04/10/smb-maximum-transmit-buffer-size-and-performance-tuning.aspx
  */
 #define CIFS_DEFAULT_NON_POSIX_RSIZE (60 * 1024)
+#define CIFS_DEFAULT_NON_POSIX_WSIZE (65536)
 
 static unsigned int
 cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info)
 {
        __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability);
        struct TCP_Server_Info *server = tcon->ses->server;
-       unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize :
-                               CIFS_DEFAULT_IOSIZE;
+       unsigned int wsize;
+
+       /* start with specified wsize, or default */
+       if (pvolume_info->wsize)
+               wsize = pvolume_info->wsize;
+       else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
+               wsize = CIFS_DEFAULT_IOSIZE;
+       else
+               wsize = CIFS_DEFAULT_NON_POSIX_WSIZE;
 
        /* can server support 24-bit write sizes? (via UNIX extensions) */
        if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP))
@@ -3136,10 +3303,9 @@ cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
                return -EINVAL;
 
        if (volume_info->nullauth) {
-               cFYI(1, "null user");
-               volume_info->username = kzalloc(1, GFP_KERNEL);
-               if (volume_info->username == NULL)
-                       return -ENOMEM;
+               cFYI(1, "Anonymous login");
+               kfree(volume_info->username);
+               volume_info->username = NULL;
        } else if (volume_info->username) {
                /* BB fixme parse for domain name here */
                cFYI(1, "Username: %s", volume_info->username);
@@ -3478,7 +3644,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
        if (ses->capabilities & CAP_UNICODE) {
                smb_buffer->Flags2 |= SMBFLG2_UNICODE;
                length =
-                   cifs_strtoUCS((__le16 *) bcc_ptr, tree,
+                   cifs_strtoUTF16((__le16 *) bcc_ptr, tree,
                        6 /* max utf8 char length in bytes */ *
                        (/* server len*/ + 256 /* share len */), nls_codepage);
                bcc_ptr += 2 * length;  /* convert num 16 bit words to bytes */
@@ -3533,7 +3699,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
 
                /* mostly informational -- no need to fail on error here */
                kfree(tcon->nativeFileSystem);
-               tcon->nativeFileSystem = cifs_strndup_from_ucs(bcc_ptr,
+               tcon->nativeFileSystem = cifs_strndup_from_utf16(bcc_ptr,
                                                      bytes_left, is_unicode,
                                                      nls_codepage);
 
@@ -3657,25 +3823,43 @@ int cifs_setup_session(unsigned int xid, struct cifs_ses *ses,
        return rc;
 }
 
+static int
+cifs_set_vol_auth(struct smb_vol *vol, struct cifs_ses *ses)
+{
+       switch (ses->server->secType) {
+       case Kerberos:
+               vol->secFlg = CIFSSEC_MUST_KRB5;
+               return 0;
+       case NTLMv2:
+               vol->secFlg = CIFSSEC_MUST_NTLMV2;
+               break;
+       case NTLM:
+               vol->secFlg = CIFSSEC_MUST_NTLM;
+               break;
+       case RawNTLMSSP:
+               vol->secFlg = CIFSSEC_MUST_NTLMSSP;
+               break;
+       case LANMAN:
+               vol->secFlg = CIFSSEC_MUST_LANMAN;
+               break;
+       }
+
+       return cifs_set_cifscreds(vol, ses);
+}
+
 static struct cifs_tcon *
 cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
 {
+       int rc;
        struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb);
        struct cifs_ses *ses;
        struct cifs_tcon *tcon = NULL;
        struct smb_vol *vol_info;
-       char username[28]; /* big enough for "krb50x" + hex of ULONG_MAX 6+16 */
-                          /* We used to have this as MAX_USERNAME which is   */
-                          /* way too big now (256 instead of 32) */
 
        vol_info = kzalloc(sizeof(*vol_info), GFP_KERNEL);
-       if (vol_info == NULL) {
-               tcon = ERR_PTR(-ENOMEM);
-               goto out;
-       }
+       if (vol_info == NULL)
+               return ERR_PTR(-ENOMEM);
 
-       snprintf(username, sizeof(username), "krb50x%x", fsuid);
-       vol_info->username = username;
        vol_info->local_nls = cifs_sb->local_nls;
        vol_info->linux_uid = fsuid;
        vol_info->cred_uid = fsuid;
@@ -3685,8 +3869,11 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
        vol_info->local_lease = master_tcon->local_lease;
        vol_info->no_linux_ext = !master_tcon->unix_ext;
 
-       /* FIXME: allow for other secFlg settings */
-       vol_info->secFlg = CIFSSEC_MUST_KRB5;
+       rc = cifs_set_vol_auth(vol_info, master_tcon->ses);
+       if (rc) {
+               tcon = ERR_PTR(rc);
+               goto out;
+       }
 
        /* get a reference for the same TCP session */
        spin_lock(&cifs_tcp_ses_lock);
@@ -3709,6 +3896,8 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid)
        if (ses->capabilities & CAP_UNIX)
                reset_cifs_unix_caps(0, tcon, NULL, vol_info);
 out:
+       kfree(vol_info->username);
+       kfree(vol_info->password);
        kfree(vol_info);
 
        return tcon;
index a090bbe6ee29e196018867c9f5e4da3efe9d82b9..e2bbc683e0184a736509b7a41337b2fda401e4a3 100644 (file)
@@ -647,10 +647,11 @@ static int cifs_filldir(char *find_entry, struct file *file, filldir_t filldir,
 
                name.name = scratch_buf;
                name.len =
-                       cifs_from_ucs2((char *)name.name, (__le16 *)de.name,
-                                      UNICODE_NAME_MAX,
-                                      min(de.namelen, (size_t)max_len), nlt,
-                                      cifs_sb->mnt_cifs_flags &
+                       cifs_from_utf16((char *)name.name, (__le16 *)de.name,
+                                       UNICODE_NAME_MAX,
+                                       min_t(size_t, de.namelen,
+                                             (size_t)max_len), nlt,
+                                       cifs_sb->mnt_cifs_flags &
                                                CIFS_MOUNT_MAP_SPECIAL_CHR);
                name.len -= nls_nullsize(nlt);
        } else {
index 4ec3ee9d72ccc228b9fac929c23b80fc8f314fff..551d0c2b973699634bdd057d3eaf2e19d3b55b8a 100644 (file)
@@ -167,16 +167,16 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp)
        int bytes_ret = 0;
 
        /* Copy OS version */
-       bytes_ret = cifs_strtoUCS((__le16 *)bcc_ptr, "Linux version ", 32,
-                                 nls_cp);
+       bytes_ret = cifs_strtoUTF16((__le16 *)bcc_ptr, "Linux version ", 32,
+                                   nls_cp);
        bcc_ptr += 2 * bytes_ret;
-       bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, init_utsname()->release,
-                                 32, nls_cp);
+       bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, init_utsname()->release,
+                                   32, nls_cp);
        bcc_ptr += 2 * bytes_ret;
        bcc_ptr += 2; /* trailing null */
 
-       bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
-                                 32, nls_cp);
+       bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, CIFS_NETWORK_OPSYS,
+                                   32, nls_cp);
        bcc_ptr += 2 * bytes_ret;
        bcc_ptr += 2; /* trailing null */
 
@@ -197,8 +197,8 @@ static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses,
                *(bcc_ptr+1) = 0;
                bytes_ret = 0;
        } else
-               bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->domainName,
-                                         256, nls_cp);
+               bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->domainName,
+                                           256, nls_cp);
        bcc_ptr += 2 * bytes_ret;
        bcc_ptr += 2;  /* account for null terminator */
 
@@ -226,8 +226,8 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
                *bcc_ptr = 0;
                *(bcc_ptr+1) = 0;
        } else {
-               bytes_ret = cifs_strtoUCS((__le16 *) bcc_ptr, ses->user_name,
-                                         MAX_USERNAME_SIZE, nls_cp);
+               bytes_ret = cifs_strtoUTF16((__le16 *) bcc_ptr, ses->user_name,
+                                           MAX_USERNAME_SIZE, nls_cp);
        }
        bcc_ptr += 2 * bytes_ret;
        bcc_ptr += 2; /* account for null termination */
@@ -246,16 +246,15 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses,
        /* copy user */
        /* BB what about null user mounts - check that we do this BB */
        /* copy user */
-       if (ses->user_name != NULL)
+       if (ses->user_name != NULL) {
                strncpy(bcc_ptr, ses->user_name, MAX_USERNAME_SIZE);
+               bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
+       }
        /* else null user mount */
-
-       bcc_ptr += strnlen(ses->user_name, MAX_USERNAME_SIZE);
        *bcc_ptr = 0;
        bcc_ptr++; /* account for null termination */
 
        /* copy domain */
-
        if (ses->domainName != NULL) {
                strncpy(bcc_ptr, ses->domainName, 256);
                bcc_ptr += strnlen(ses->domainName, 256);
@@ -287,7 +286,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses,
        cFYI(1, "bleft %d", bleft);
 
        kfree(ses->serverOS);
-       ses->serverOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
+       ses->serverOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
        cFYI(1, "serverOS=%s", ses->serverOS);
        len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
        data += len;
@@ -296,7 +295,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses,
                return;
 
        kfree(ses->serverNOS);
-       ses->serverNOS = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
+       ses->serverNOS = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
        cFYI(1, "serverNOS=%s", ses->serverNOS);
        len = (UniStrnlen((wchar_t *) data, bleft / 2) * 2) + 2;
        data += len;
@@ -305,7 +304,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses,
                return;
 
        kfree(ses->serverDomain);
-       ses->serverDomain = cifs_strndup_from_ucs(data, bleft, true, nls_cp);
+       ses->serverDomain = cifs_strndup_from_utf16(data, bleft, true, nls_cp);
        cFYI(1, "serverDomain=%s", ses->serverDomain);
 
        return;
@@ -395,6 +394,10 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
        ses->ntlmssp->server_flags = le32_to_cpu(pblob->NegotiateFlags);
        tioffset = le32_to_cpu(pblob->TargetInfoArray.BufferOffset);
        tilen = le16_to_cpu(pblob->TargetInfoArray.Length);
+       if (tioffset > blob_len || tioffset + tilen > blob_len) {
+               cERROR(1, "tioffset + tilen too high %u + %u", tioffset, tilen);
+               return -EINVAL;
+       }
        if (tilen) {
                ses->auth_key.response = kmalloc(tilen, GFP_KERNEL);
                if (!ses->auth_key.response) {
@@ -502,8 +505,8 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                tmp += 2;
        } else {
                int len;
-               len = cifs_strtoUCS((__le16 *)tmp, ses->domainName,
-                                   MAX_USERNAME_SIZE, nls_cp);
+               len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
+                                     MAX_USERNAME_SIZE, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
                sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->DomainName.Length = cpu_to_le16(len);
@@ -518,8 +521,8 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer,
                tmp += 2;
        } else {
                int len;
-               len = cifs_strtoUCS((__le16 *)tmp, ses->user_name,
-                                   MAX_USERNAME_SIZE, nls_cp);
+               len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
+                                     MAX_USERNAME_SIZE, nls_cp);
                len *= 2; /* unicode is 2 bytes each */
                sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - pbuffer);
                sec_blob->UserName.Length = cpu_to_le16(len);
index 80d850881938d0c0950addc4d97ae4855dadfa4a..d5cd9aa7eacc1cc8f0413b401ae69cf7b959d690 100644 (file)
@@ -213,7 +213,7 @@ E_md4hash(const unsigned char *passwd, unsigned char *p16,
 
        /* Password cannot be longer than 128 characters */
        if (passwd) /* Password must be converted to NT unicode */
-               len = cifs_strtoUCS(wpwd, passwd, 128, codepage);
+               len = cifs_strtoUTF16(wpwd, passwd, 128, codepage);
        else {
                len = 0;
                *wpwd = 0; /* Ensure string is null terminated */
index f65d4455c5e521dbde1a41407f740a2371848046..ef023eef0464c90e2619de6b7978b537270a54e9 100644 (file)
@@ -540,7 +540,7 @@ EXPORT_SYMBOL_GPL(debugfs_create_blob);
  * debugfs_print_regs32 - use seq_print to describe a set of registers
  * @s: the seq_file structure being used to generate output
  * @regs: an array if struct debugfs_reg32 structures
- * @mregs: the length of the above array
+ * @nregs: the length of the above array
  * @base: the base address to be used in reading the registers
  * @prefix: a string to be prefixed to every output line
  *
index 2a834255c75de911b7e1f8eb10026972913e14b4..63ab24510649cc3a3604e9dd603904d9676a2794 100644 (file)
@@ -417,17 +417,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
                        (unsigned long long)(extent_base + extent_offset), rc);
                goto out;
        }
-       if (unlikely(ecryptfs_verbosity > 0)) {
-               ecryptfs_printk(KERN_DEBUG, "Encrypting extent "
-                               "with iv:\n");
-               ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
-               ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
-                               "encryption:\n");
-               ecryptfs_dump_hex((char *)
-                                 (page_address(page)
-                                  + (extent_offset * crypt_stat->extent_size)),
-                                 8);
-       }
        rc = ecryptfs_encrypt_page_offset(crypt_stat, enc_extent_page, 0,
                                          page, (extent_offset
                                                 * crypt_stat->extent_size),
@@ -440,14 +429,6 @@ static int ecryptfs_encrypt_extent(struct page *enc_extent_page,
                goto out;
        }
        rc = 0;
-       if (unlikely(ecryptfs_verbosity > 0)) {
-               ecryptfs_printk(KERN_DEBUG, "Encrypt extent [0x%.16llx]; "
-                       "rc = [%d]\n",
-                       (unsigned long long)(extent_base + extent_offset), rc);
-               ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
-                               "encryption:\n");
-               ecryptfs_dump_hex((char *)(page_address(enc_extent_page)), 8);
-       }
 out:
        return rc;
 }
@@ -543,17 +524,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
                        (unsigned long long)(extent_base + extent_offset), rc);
                goto out;
        }
-       if (unlikely(ecryptfs_verbosity > 0)) {
-               ecryptfs_printk(KERN_DEBUG, "Decrypting extent "
-                               "with iv:\n");
-               ecryptfs_dump_hex(extent_iv, crypt_stat->iv_bytes);
-               ecryptfs_printk(KERN_DEBUG, "First 8 bytes before "
-                               "decryption:\n");
-               ecryptfs_dump_hex((char *)
-                                 (page_address(enc_extent_page)
-                                  + (extent_offset * crypt_stat->extent_size)),
-                                 8);
-       }
        rc = ecryptfs_decrypt_page_offset(crypt_stat, page,
                                          (extent_offset
                                           * crypt_stat->extent_size),
@@ -567,16 +537,6 @@ static int ecryptfs_decrypt_extent(struct page *page,
                goto out;
        }
        rc = 0;
-       if (unlikely(ecryptfs_verbosity > 0)) {
-               ecryptfs_printk(KERN_DEBUG, "Decrypt extent [0x%.16llx]; "
-                       "rc = [%d]\n",
-                       (unsigned long long)(extent_base + extent_offset), rc);
-               ecryptfs_printk(KERN_DEBUG, "First 8 bytes after "
-                               "decryption:\n");
-               ecryptfs_dump_hex((char *)(page_address(page)
-                                          + (extent_offset
-                                             * crypt_stat->extent_size)), 8);
-       }
 out:
        return rc;
 }
@@ -1590,8 +1550,8 @@ int ecryptfs_read_and_validate_xattr_region(struct dentry *dentry,
  */
 int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
 {
-       int rc = 0;
-       char *page_virt = NULL;
+       int rc;
+       char *page_virt;
        struct inode *ecryptfs_inode = ecryptfs_dentry->d_inode;
        struct ecryptfs_crypt_stat *crypt_stat =
            &ecryptfs_inode_to_private(ecryptfs_inode)->crypt_stat;
@@ -1616,11 +1576,13 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
                                                ecryptfs_dentry,
                                                ECRYPTFS_VALIDATE_HEADER_SIZE);
        if (rc) {
+               /* metadata is not in the file header, so try xattrs */
                memset(page_virt, 0, PAGE_CACHE_SIZE);
                rc = ecryptfs_read_xattr_region(page_virt, ecryptfs_inode);
                if (rc) {
                        printk(KERN_DEBUG "Valid eCryptfs headers not found in "
-                              "file header region or xattr region\n");
+                              "file header region or xattr region, inode %lu\n",
+                               ecryptfs_inode->i_ino);
                        rc = -EINVAL;
                        goto out;
                }
@@ -1629,7 +1591,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
                                                ECRYPTFS_DONT_VALIDATE_HEADER_SIZE);
                if (rc) {
                        printk(KERN_DEBUG "Valid eCryptfs headers not found in "
-                              "file xattr region either\n");
+                              "file xattr region either, inode %lu\n",
+                               ecryptfs_inode->i_ino);
                        rc = -EINVAL;
                }
                if (crypt_stat->mount_crypt_stat->flags
@@ -1640,7 +1603,8 @@ int ecryptfs_read_metadata(struct dentry *ecryptfs_dentry)
                               "crypto metadata only in the extended attribute "
                               "region, but eCryptfs was mounted without "
                               "xattr support enabled. eCryptfs will not treat "
-                              "this like an encrypted file.\n");
+                              "this like an encrypted file, inode %lu\n",
+                               ecryptfs_inode->i_ino);
                        rc = -EINVAL;
                }
        }
index a9f29b12fbf290ba4987f778e582357d38ae1258..a2362df58ae8dfcebd95b78503bbb4f095506678 100644 (file)
@@ -151,6 +151,11 @@ ecryptfs_get_key_payload_data(struct key *key)
                                          * dentry name */
 #define ECRYPTFS_TAG_73_PACKET_TYPE 0x49 /* FEK-encrypted filename as
                                          * metadata */
+#define ECRYPTFS_MIN_PKT_LEN_SIZE 1 /* Min size to specify packet length */
+#define ECRYPTFS_MAX_PKT_LEN_SIZE 2 /* Pass at least this many bytes to
+                                    * ecryptfs_parse_packet_length() and
+                                    * ecryptfs_write_packet_length()
+                                    */
 /* Constraint: ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES >=
  * ECRYPTFS_MAX_IV_BYTES */
 #define ECRYPTFS_FILENAME_MIN_RANDOM_PREPEND_BYTES 16
index 19a8ca4ab1ddc54bf4c8f389ef0dfcf8ce52a824..19892d7d2ed1122547536d5bee21cff9293e7a8a 100644 (file)
@@ -822,18 +822,6 @@ static int truncate_upper(struct dentry *dentry, struct iattr *ia,
                size_t num_zeros = (PAGE_CACHE_SIZE
                                    - (ia->ia_size & ~PAGE_CACHE_MASK));
 
-
-               /*
-                * XXX(truncate) this should really happen at the begginning
-                * of ->setattr.  But the code is too messy to that as part
-                * of a larger patch.  ecryptfs is also totally missing out
-                * on the inode_change_ok check at the beginning of
-                * ->setattr while would include this.
-                */
-               rc = inode_newsize_ok(inode, ia->ia_size);
-               if (rc)
-                       goto out;
-
                if (!(crypt_stat->flags & ECRYPTFS_ENCRYPTED)) {
                        truncate_setsize(inode, ia->ia_size);
                        lower_ia->ia_size = ia->ia_size;
@@ -883,6 +871,28 @@ out:
        return rc;
 }
 
+static int ecryptfs_inode_newsize_ok(struct inode *inode, loff_t offset)
+{
+       struct ecryptfs_crypt_stat *crypt_stat;
+       loff_t lower_oldsize, lower_newsize;
+
+       crypt_stat = &ecryptfs_inode_to_private(inode)->crypt_stat;
+       lower_oldsize = upper_size_to_lower_size(crypt_stat,
+                                                i_size_read(inode));
+       lower_newsize = upper_size_to_lower_size(crypt_stat, offset);
+       if (lower_newsize > lower_oldsize) {
+               /*
+                * The eCryptfs inode and the new *lower* size are mixed here
+                * because we may not have the lower i_mutex held and/or it may
+                * not be appropriate to call inode_newsize_ok() with inodes
+                * from other filesystems.
+                */
+               return inode_newsize_ok(inode, lower_newsize);
+       }
+
+       return 0;
+}
+
 /**
  * ecryptfs_truncate
  * @dentry: The ecryptfs layer dentry
@@ -899,6 +909,10 @@ int ecryptfs_truncate(struct dentry *dentry, loff_t new_length)
        struct iattr lower_ia = { .ia_valid = 0 };
        int rc;
 
+       rc = ecryptfs_inode_newsize_ok(dentry->d_inode, new_length);
+       if (rc)
+               return rc;
+
        rc = truncate_upper(dentry, &ia, &lower_ia);
        if (!rc && lower_ia.ia_valid & ATTR_SIZE) {
                struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
@@ -978,6 +992,16 @@ static int ecryptfs_setattr(struct dentry *dentry, struct iattr *ia)
                }
        }
        mutex_unlock(&crypt_stat->cs_mutex);
+
+       rc = inode_change_ok(inode, ia);
+       if (rc)
+               goto out;
+       if (ia->ia_valid & ATTR_SIZE) {
+               rc = ecryptfs_inode_newsize_ok(inode, ia->ia_size);
+               if (rc)
+                       goto out;
+       }
+
        if (S_ISREG(inode->i_mode)) {
                rc = filemap_write_and_wait(inode->i_mapping);
                if (rc)
index ac1ad48c2376df4b3061e55a973b27b42388f510..8e3b943e330f5b1bd25953d07ba8e9c3d9baaf4c 100644 (file)
@@ -109,7 +109,7 @@ int ecryptfs_parse_packet_length(unsigned char *data, size_t *size,
                (*size) += ((unsigned char)(data[1]) + 192);
                (*length_size) = 2;
        } else if (data[0] == 255) {
-               /* Five-byte length; we're not supposed to see this */
+               /* If support is added, adjust ECRYPTFS_MAX_PKT_LEN_SIZE */
                ecryptfs_printk(KERN_ERR, "Five-byte packet length not "
                                "supported\n");
                rc = -EINVAL;
@@ -126,7 +126,7 @@ out:
 /**
  * ecryptfs_write_packet_length
  * @dest: The byte array target into which to write the length. Must
- *        have at least 5 bytes allocated.
+ *        have at least ECRYPTFS_MAX_PKT_LEN_SIZE bytes allocated.
  * @size: The length to write.
  * @packet_size_length: The number of bytes used to encode the packet
  *                      length is written to this address.
@@ -146,6 +146,7 @@ int ecryptfs_write_packet_length(char *dest, size_t size,
                dest[1] = ((size - 192) % 256);
                (*packet_size_length) = 2;
        } else {
+               /* If support is added, adjust ECRYPTFS_MAX_PKT_LEN_SIZE */
                rc = -EINVAL;
                ecryptfs_printk(KERN_WARNING,
                                "Unsupported packet size: [%zd]\n", size);
index 940a82e63dc3fcdc52797f9176842fdbf391d272..349209dc6a9162d18b7da50c891a389aa9c648d8 100644 (file)
@@ -218,6 +218,29 @@ out_unlock:
        return rc;
 }
 
+/*
+ * miscdevfs packet format:
+ *  Octet 0: Type
+ *  Octets 1-4: network byte order msg_ctx->counter
+ *  Octets 5-N0: Size of struct ecryptfs_message to follow
+ *  Octets N0-N1: struct ecryptfs_message (including data)
+ *
+ *  Octets 5-N1 not written if the packet type does not include a message
+ */
+#define PKT_TYPE_SIZE          1
+#define PKT_CTR_SIZE           4
+#define MIN_NON_MSG_PKT_SIZE   (PKT_TYPE_SIZE + PKT_CTR_SIZE)
+#define MIN_MSG_PKT_SIZE       (PKT_TYPE_SIZE + PKT_CTR_SIZE \
+                                + ECRYPTFS_MIN_PKT_LEN_SIZE)
+/* 4 + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES comes from tag 65 packet format */
+#define MAX_MSG_PKT_SIZE       (PKT_TYPE_SIZE + PKT_CTR_SIZE \
+                                + ECRYPTFS_MAX_PKT_LEN_SIZE \
+                                + sizeof(struct ecryptfs_message) \
+                                + 4 + ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES)
+#define PKT_TYPE_OFFSET                0
+#define PKT_CTR_OFFSET         PKT_TYPE_SIZE
+#define PKT_LEN_OFFSET         (PKT_TYPE_SIZE + PKT_CTR_SIZE)
+
 /**
  * ecryptfs_miscdev_read - format and send message from queue
  * @file: fs/ecryptfs/euid miscdevfs handle (ignored)
@@ -237,7 +260,7 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
        struct ecryptfs_daemon *daemon;
        struct ecryptfs_msg_ctx *msg_ctx;
        size_t packet_length_size;
-       char packet_length[3];
+       char packet_length[ECRYPTFS_MAX_PKT_LEN_SIZE];
        size_t i;
        size_t total_length;
        uid_t euid = current_euid();
@@ -305,15 +328,8 @@ check_list:
                packet_length_size = 0;
                msg_ctx->msg_size = 0;
        }
-       /* miscdevfs packet format:
-        *  Octet 0: Type
-        *  Octets 1-4: network byte order msg_ctx->counter
-        *  Octets 5-N0: Size of struct ecryptfs_message to follow
-        *  Octets N0-N1: struct ecryptfs_message (including data)
-        *
-        *  Octets 5-N1 not written if the packet type does not
-        *  include a message */
-       total_length = (1 + 4 + packet_length_size + msg_ctx->msg_size);
+       total_length = (PKT_TYPE_SIZE + PKT_CTR_SIZE + packet_length_size
+                       + msg_ctx->msg_size);
        if (count < total_length) {
                rc = 0;
                printk(KERN_WARNING "%s: Only given user buffer of "
@@ -324,9 +340,10 @@ check_list:
        rc = -EFAULT;
        if (put_user(msg_ctx->type, buf))
                goto out_unlock_msg_ctx;
-       if (put_user(cpu_to_be32(msg_ctx->counter), (__be32 __user *)(buf + 1)))
+       if (put_user(cpu_to_be32(msg_ctx->counter),
+                    (__be32 __user *)(&buf[PKT_CTR_OFFSET])))
                goto out_unlock_msg_ctx;
-       i = 5;
+       i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
        if (msg_ctx->msg) {
                if (copy_to_user(&buf[i], packet_length, packet_length_size))
                        goto out_unlock_msg_ctx;
@@ -391,12 +408,6 @@ out:
  * @count: Amount of data in @buf
  * @ppos: Pointer to offset in file (ignored)
  *
- * miscdevfs packet format:
- *  Octet 0: Type
- *  Octets 1-4: network byte order msg_ctx->counter (0's for non-response)
- *  Octets 5-N0: Size of struct ecryptfs_message to follow
- *  Octets N0-N1: struct ecryptfs_message (including data)
- *
  * Returns the number of bytes read from @buf
  */
 static ssize_t
@@ -405,60 +416,78 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
 {
        __be32 counter_nbo;
        u32 seq;
-       size_t packet_size, packet_size_length, i;
-       ssize_t sz = 0;
+       size_t packet_size, packet_size_length;
        char *data;
        uid_t euid = current_euid();
-       int rc;
+       unsigned char packet_size_peek[ECRYPTFS_MAX_PKT_LEN_SIZE];
+       ssize_t rc;
 
-       if (count == 0)
-               goto out;
+       if (count == 0) {
+               return 0;
+       } else if (count == MIN_NON_MSG_PKT_SIZE) {
+               /* Likely a harmless MSG_HELO or MSG_QUIT - no packet length */
+               goto memdup;
+       } else if (count < MIN_MSG_PKT_SIZE || count > MAX_MSG_PKT_SIZE) {
+               printk(KERN_WARNING "%s: Acceptable packet size range is "
+                      "[%d-%lu], but amount of data written is [%zu].",
+                      __func__, MIN_MSG_PKT_SIZE, MAX_MSG_PKT_SIZE, count);
+               return -EINVAL;
+       }
+
+       if (copy_from_user(packet_size_peek, &buf[PKT_LEN_OFFSET],
+                          sizeof(packet_size_peek))) {
+               printk(KERN_WARNING "%s: Error while inspecting packet size\n",
+                      __func__);
+               return -EFAULT;
+       }
 
+       rc = ecryptfs_parse_packet_length(packet_size_peek, &packet_size,
+                                         &packet_size_length);
+       if (rc) {
+               printk(KERN_WARNING "%s: Error parsing packet length; "
+                      "rc = [%zd]\n", __func__, rc);
+               return rc;
+       }
+
+       if ((PKT_TYPE_SIZE + PKT_CTR_SIZE + packet_size_length + packet_size)
+           != count) {
+               printk(KERN_WARNING "%s: Invalid packet size [%zu]\n", __func__,
+                      packet_size);
+               return -EINVAL;
+       }
+
+memdup:
        data = memdup_user(buf, count);
        if (IS_ERR(data)) {
                printk(KERN_ERR "%s: memdup_user returned error [%ld]\n",
                       __func__, PTR_ERR(data));
-               goto out;
+               return PTR_ERR(data);
        }
-       sz = count;
-       i = 0;
-       switch (data[i++]) {
+       switch (data[PKT_TYPE_OFFSET]) {
        case ECRYPTFS_MSG_RESPONSE:
-               if (count < (1 + 4 + 1 + sizeof(struct ecryptfs_message))) {
+               if (count < (MIN_MSG_PKT_SIZE
+                            + sizeof(struct ecryptfs_message))) {
                        printk(KERN_WARNING "%s: Minimum acceptable packet "
                               "size is [%zd], but amount of data written is "
                               "only [%zd]. Discarding response packet.\n",
                               __func__,
-                              (1 + 4 + 1 + sizeof(struct ecryptfs_message)),
-                              count);
+                              (MIN_MSG_PKT_SIZE
+                               + sizeof(struct ecryptfs_message)), count);
+                       rc = -EINVAL;
                        goto out_free;
                }
-               memcpy(&counter_nbo, &data[i], 4);
+               memcpy(&counter_nbo, &data[PKT_CTR_OFFSET], PKT_CTR_SIZE);
                seq = be32_to_cpu(counter_nbo);
-               i += 4;
-               rc = ecryptfs_parse_packet_length(&data[i], &packet_size,
-                                                 &packet_size_length);
+               rc = ecryptfs_miscdev_response(
+                               &data[PKT_LEN_OFFSET + packet_size_length],
+                               packet_size, euid, current_user_ns(),
+                               task_pid(current), seq);
                if (rc) {
-                       printk(KERN_WARNING "%s: Error parsing packet length; "
-                              "rc = [%d]\n", __func__, rc);
-                       goto out_free;
-               }
-               i += packet_size_length;
-               if ((1 + 4 + packet_size_length + packet_size) != count) {
-                       printk(KERN_WARNING "%s: (1 + packet_size_length([%zd])"
-                              " + packet_size([%zd]))([%zd]) != "
-                              "count([%zd]). Invalid packet format.\n",
-                              __func__, packet_size_length, packet_size,
-                              (1 + packet_size_length + packet_size), count);
-                       goto out_free;
-               }
-               rc = ecryptfs_miscdev_response(&data[i], packet_size,
-                                              euid, current_user_ns(),
-                                              task_pid(current), seq);
-               if (rc)
                        printk(KERN_WARNING "%s: Failed to deliver miscdev "
-                              "response to requesting operation; rc = [%d]\n",
+                              "response to requesting operation; rc = [%zd]\n",
                               __func__, rc);
+                       goto out_free;
+               }
                break;
        case ECRYPTFS_MSG_HELO:
        case ECRYPTFS_MSG_QUIT:
@@ -467,12 +496,13 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
                ecryptfs_printk(KERN_WARNING, "Dropping miscdev "
                                "message of unrecognized type [%d]\n",
                                data[0]);
-               break;
+               rc = -EINVAL;
+               goto out_free;
        }
+       rc = count;
 out_free:
        kfree(data);
-out:
-       return sz;
+       return rc;
 }
 
 
index 6a44148c5fb97e3cfb866a024de27dbea2fbff39..10ec695ccd6832fc1c2293b2e1e83ffe524e9855 100644 (file)
@@ -57,6 +57,10 @@ struct page *ecryptfs_get_locked_page(struct inode *inode, loff_t index)
  * @page: Page that is locked before this call is made
  *
  * Returns zero on success; non-zero otherwise
+ *
+ * This is where we encrypt the data and pass the encrypted data to
+ * the lower filesystem.  In OpenPGP-compatible mode, we operate on
+ * entire underlying packets.
  */
 static int ecryptfs_writepage(struct page *page, struct writeback_control *wbc)
 {
@@ -481,10 +485,6 @@ int ecryptfs_write_inode_size_to_metadata(struct inode *ecryptfs_inode)
  * @copied: The amount of data copied
  * @page: The eCryptfs page
  * @fsdata: The fsdata (unused)
- *
- * This is where we encrypt the data and pass the encrypted data to
- * the lower filesystem.  In OpenPGP-compatible mode, we operate on
- * entire underlying packets.
  */
 static int ecryptfs_write_end(struct file *file,
                        struct address_space *mapping,
index 3745f7c2b9c214756b778ab40a00af325c0e9e99..5c0106f757756e95d29fefba0f5d66c702200a29 100644 (file)
@@ -130,13 +130,18 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
                pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
                size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
                size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
-               size_t total_remaining_bytes = ((offset + size) - pos);
+               loff_t total_remaining_bytes = ((offset + size) - pos);
+
+               if (fatal_signal_pending(current)) {
+                       rc = -EINTR;
+                       break;
+               }
 
                if (num_bytes > total_remaining_bytes)
                        num_bytes = total_remaining_bytes;
                if (pos < offset) {
                        /* remaining zeros to write, up to destination offset */
-                       size_t total_remaining_zeros = (offset - pos);
+                       loff_t total_remaining_zeros = (offset - pos);
 
                        if (num_bytes > total_remaining_zeros)
                                num_bytes = total_remaining_zeros;
@@ -193,15 +198,19 @@ int ecryptfs_write(struct inode *ecryptfs_inode, char *data, loff_t offset,
                }
                pos += num_bytes;
        }
-       if ((offset + size) > ecryptfs_file_size) {
-               i_size_write(ecryptfs_inode, (offset + size));
+       if (pos > ecryptfs_file_size) {
+               i_size_write(ecryptfs_inode, pos);
                if (crypt_stat->flags & ECRYPTFS_ENCRYPTED) {
-                       rc = ecryptfs_write_inode_size_to_metadata(
+                       int rc2;
+
+                       rc2 = ecryptfs_write_inode_size_to_metadata(
                                                                ecryptfs_inode);
-                       if (rc) {
+                       if (rc2) {
                                printk(KERN_ERR "Problem with "
                                       "ecryptfs_write_inode_size_to_metadata; "
-                                      "rc = [%d]\n", rc);
+                                      "rc = [%d]\n", rc2);
+                               if (!rc)
+                                       rc = rc2;
                                goto out;
                        }
                }
@@ -273,76 +282,3 @@ int ecryptfs_read_lower_page_segment(struct page *page_for_ecryptfs,
        flush_dcache_page(page_for_ecryptfs);
        return rc;
 }
-
-#if 0
-/**
- * ecryptfs_read
- * @data: The virtual address into which to write the data read (and
- *        possibly decrypted) from the lower file
- * @offset: The offset in the decrypted view of the file from which to
- *          read into @data
- * @size: The number of bytes to read into @data
- * @ecryptfs_file: The eCryptfs file from which to read
- *
- * Read an arbitrary amount of data from an arbitrary location in the
- * eCryptfs page cache. This is done on an extent-by-extent basis;
- * individual extents are decrypted and read from the lower page
- * cache (via VFS reads). This function takes care of all the
- * address translation to locations in the lower filesystem.
- *
- * Returns zero on success; non-zero otherwise
- */
-int ecryptfs_read(char *data, loff_t offset, size_t size,
-                 struct file *ecryptfs_file)
-{
-       struct inode *ecryptfs_inode = ecryptfs_file->f_dentry->d_inode;
-       struct page *ecryptfs_page;
-       char *ecryptfs_page_virt;
-       loff_t ecryptfs_file_size = i_size_read(ecryptfs_inode);
-       loff_t data_offset = 0;
-       loff_t pos;
-       int rc = 0;
-
-       if ((offset + size) > ecryptfs_file_size) {
-               rc = -EINVAL;
-               printk(KERN_ERR "%s: Attempt to read data past the end of the "
-                       "file; offset = [%lld]; size = [%td]; "
-                      "ecryptfs_file_size = [%lld]\n",
-                      __func__, offset, size, ecryptfs_file_size);
-               goto out;
-       }
-       pos = offset;
-       while (pos < (offset + size)) {
-               pgoff_t ecryptfs_page_idx = (pos >> PAGE_CACHE_SHIFT);
-               size_t start_offset_in_page = (pos & ~PAGE_CACHE_MASK);
-               size_t num_bytes = (PAGE_CACHE_SIZE - start_offset_in_page);
-               size_t total_remaining_bytes = ((offset + size) - pos);
-
-               if (num_bytes > total_remaining_bytes)
-                       num_bytes = total_remaining_bytes;
-               ecryptfs_page = ecryptfs_get_locked_page(ecryptfs_inode,
-                                                        ecryptfs_page_idx);
-               if (IS_ERR(ecryptfs_page)) {
-                       rc = PTR_ERR(ecryptfs_page);
-                       printk(KERN_ERR "%s: Error getting page at "
-                              "index [%ld] from eCryptfs inode "
-                              "mapping; rc = [%d]\n", __func__,
-                              ecryptfs_page_idx, rc);
-                       goto out;
-               }
-               ecryptfs_page_virt = kmap_atomic(ecryptfs_page, KM_USER0);
-               memcpy((data + data_offset),
-                      ((char *)ecryptfs_page_virt + start_offset_in_page),
-                      num_bytes);
-               kunmap_atomic(ecryptfs_page_virt, KM_USER0);
-               flush_dcache_page(ecryptfs_page);
-               SetPageUptodate(ecryptfs_page);
-               unlock_page(ecryptfs_page);
-               page_cache_release(ecryptfs_page);
-               pos += num_bytes;
-               data_offset += num_bytes;
-       }
-out:
-       return rc;
-}
-#endif  /*  0  */
index aeb135c7ff5c0c1a6f1a1dfe3b5f2018dfbd6731..92ce83a11e90acbb0bceda45b682a83535089342 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1071,6 +1071,21 @@ void set_task_comm(struct task_struct *tsk, char *buf)
        perf_event_comm(tsk);
 }
 
+static void filename_to_taskname(char *tcomm, const char *fn, unsigned int len)
+{
+       int i, ch;
+
+       /* Copies the binary name from after last slash */
+       for (i = 0; (ch = *(fn++)) != '\0';) {
+               if (ch == '/')
+                       i = 0; /* overwrite what we wrote */
+               else
+                       if (i < len - 1)
+                               tcomm[i++] = ch;
+       }
+       tcomm[i] = '\0';
+}
+
 int flush_old_exec(struct linux_binprm * bprm)
 {
        int retval;
@@ -1085,6 +1100,7 @@ int flush_old_exec(struct linux_binprm * bprm)
 
        set_mm_exe_file(bprm->mm, bprm->file);
 
+       filename_to_taskname(bprm->tcomm, bprm->filename, sizeof(bprm->tcomm));
        /*
         * Release all of the old mmap stuff
         */
@@ -1116,10 +1132,6 @@ EXPORT_SYMBOL(would_dump);
 
 void setup_new_exec(struct linux_binprm * bprm)
 {
-       int i, ch;
-       const char *name;
-       char tcomm[sizeof(current->comm)];
-
        arch_pick_mmap_layout(current->mm);
 
        /* This is the point of no return */
@@ -1130,18 +1142,7 @@ void setup_new_exec(struct linux_binprm * bprm)
        else
                set_dumpable(current->mm, suid_dumpable);
 
-       name = bprm->filename;
-
-       /* Copies the binary name from after last slash */
-       for (i=0; (ch = *(name++)) != '\0';) {
-               if (ch == '/')
-                       i = 0; /* overwrite what we wrote */
-               else
-                       if (i < (sizeof(tcomm) - 1))
-                               tcomm[i++] = ch;
-       }
-       tcomm[i] = '\0';
-       set_task_comm(current, tcomm);
+       set_task_comm(current, bprm->tcomm);
 
        /* Set the new mm task size. We have to do that late because it may
         * depend on TIF_32BIT which is only updated in flush_thread() on
index 1089f760c8470f25c0cac5744c880de9377775d5..2de655f5d625365a3aed996d00c0a73d4052559d 100644 (file)
@@ -77,10 +77,11 @@ long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
                flags = flags & EXT2_FL_USER_MODIFIABLE;
                flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE;
                ei->i_flags = flags;
-               mutex_unlock(&inode->i_mutex);
 
                ext2_set_inode_flags(inode);
                inode->i_ctime = CURRENT_TIME_SEC;
+               mutex_unlock(&inode->i_mutex);
+
                mark_inode_dirty(inode);
 setflags_out:
                mnt_drop_write_file(filp);
@@ -88,20 +89,29 @@ setflags_out:
        }
        case EXT2_IOC_GETVERSION:
                return put_user(inode->i_generation, (int __user *) arg);
-       case EXT2_IOC_SETVERSION:
+       case EXT2_IOC_SETVERSION: {
+               __u32 generation;
+
                if (!inode_owner_or_capable(inode))
                        return -EPERM;
                ret = mnt_want_write_file(filp);
                if (ret)
                        return ret;
-               if (get_user(inode->i_generation, (int __user *) arg)) {
+               if (get_user(generation, (int __user *) arg)) {
                        ret = -EFAULT;
-               } else {
-                       inode->i_ctime = CURRENT_TIME_SEC;
-                       mark_inode_dirty(inode);
+                       goto setversion_out;
                }
+
+               mutex_lock(&inode->i_mutex);
+               inode->i_ctime = CURRENT_TIME_SEC;
+               inode->i_generation = generation;
+               mutex_unlock(&inode->i_mutex);
+
+               mark_inode_dirty(inode);
+setversion_out:
                mnt_drop_write_file(filp);
                return ret;
+       }
        case EXT2_IOC_GETRSVSZ:
                if (test_opt(inode->i_sb, RESERVATION)
                        && S_ISREG(inode->i_mode)
index 4fa4f0916af9047ef57d8db43315244d92fef427..fb10d86ffad70f8c4ded56ff664470d71f06dc48 100644 (file)
@@ -322,9 +322,6 @@ EXPORT_SYMBOL(clear_nlink);
 void set_nlink(struct inode *inode, unsigned int nlink)
 {
        if (!nlink) {
-               printk_ratelimited(KERN_INFO
-                       "set_nlink() clearing i_nlink on %s inode %li\n",
-                       inode->i_sb->s_type->name, inode->i_ino);
                clear_nlink(inode);
        } else {
                /* Yes, some filesystems do change nlink from zero to one */
index 5d1a00a5041b35b34fced354b5bdd98f508d0dd9..05f0754f2b466f1446b8501331bc5b698f34f8a3 100644 (file)
@@ -453,8 +453,6 @@ out:
  *
  * Return <0 on error, 0 on success, 1 if there was nothing to clean up.
  *
- * Called with the journal lock held.
- *
  * This is the only part of the journaling code which really needs to be
  * aware of transaction aborts.  Checkpointing involves writing to the
  * main filesystem area rather than to the journal, so it can proceed
@@ -472,13 +470,14 @@ int cleanup_journal_tail(journal_t *journal)
        if (is_journal_aborted(journal))
                return 1;
 
-       /* OK, work out the oldest transaction remaining in the log, and
+       /*
+        * OK, work out the oldest transaction remaining in the log, and
         * the log block it starts at.
         *
         * If the log is now empty, we need to work out which is the
         * next transaction ID we will write, and where it will
-        * start. */
-
+        * start.
+        */
        spin_lock(&journal->j_state_lock);
        spin_lock(&journal->j_list_lock);
        transaction = journal->j_checkpoint_transactions;
@@ -504,7 +503,25 @@ int cleanup_journal_tail(journal_t *journal)
                spin_unlock(&journal->j_state_lock);
                return 1;
        }
+       spin_unlock(&journal->j_state_lock);
+
+       /*
+        * We need to make sure that any blocks that were recently written out
+        * --- perhaps by log_do_checkpoint() --- are flushed out before we
+        * drop the transactions from the journal. It's unlikely this will be
+        * necessary, especially with an appropriately sized journal, but we
+        * need this to guarantee correctness.  Fortunately
+        * cleanup_journal_tail() doesn't get called all that often.
+        */
+       if (journal->j_flags & JFS_BARRIER)
+               blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
 
+       spin_lock(&journal->j_state_lock);
+       if (!tid_gt(first_tid, journal->j_tail_sequence)) {
+               spin_unlock(&journal->j_state_lock);
+               /* Someone else cleaned up journal so return 0 */
+               return 0;
+       }
        /* OK, update the superblock to recover the freed space.
         * Physical blocks come first: have we wrapped beyond the end of
         * the log?  */
index 5b43e96788e6553ec0a716baa45e1a1ac509e838..008bf062fd26e1bdfe0de195159c66aeb9fa1aab 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/fs.h>
 #include <linux/jbd.h>
 #include <linux/errno.h>
+#include <linux/blkdev.h>
 #endif
 
 /*
@@ -263,6 +264,9 @@ int journal_recover(journal_t *journal)
        err2 = sync_blockdev(journal->j_fs_dev);
        if (!err)
                err = err2;
+       /* Flush disk caches to get replayed data on the permanent storage */
+       if (journal->j_flags & JFS_BARRIER)
+               blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL);
 
        return err;
 }
index a01cdad6aad1810f0323d6494865f7540d50421c..eafb8d37a6fb89173d26ecf527a3f69433a2565f 100644 (file)
@@ -335,7 +335,7 @@ static int jffs2_block_check_erase(struct jffs2_sb_info *c, struct jffs2_erasebl
        void *ebuf;
        uint32_t ofs;
        size_t retlen;
-       int ret = -EIO;
+       int ret;
        unsigned long *wordebuf;
 
        ret = mtd_point(c->mtd, jeb->offset, c->sector_size, &retlen,
index e97404d611e0abf4f627d6c080aea0c0739422f0..9c501449450dc9be6891e5d9c1a035ca31b5687b 100644 (file)
@@ -152,9 +152,6 @@ static struct page *logfs_mtd_find_first_sb(struct super_block *sb, u64 *ofs)
        filler_t *filler = logfs_mtd_readpage;
        struct mtd_info *mtd = super->s_mtd;
 
-       if (!mtd_can_have_bb(mtd))
-               return NULL;
-
        *ofs = 0;
        while (mtd_block_isbad(mtd, *ofs)) {
                *ofs += mtd->erasesize;
@@ -172,9 +169,6 @@ static struct page *logfs_mtd_find_last_sb(struct super_block *sb, u64 *ofs)
        filler_t *filler = logfs_mtd_readpage;
        struct mtd_info *mtd = super->s_mtd;
 
-       if (!mtd_can_have_bb(mtd))
-               return NULL;
-
        *ofs = mtd->size - mtd->erasesize;
        while (mtd_block_isbad(mtd, *ofs)) {
                *ofs -= mtd->erasesize;
index 501043e8966ce010bd77412c767461e99da411e8..3de7a32cadbe109fb7d270235f20df1314e663de 100644 (file)
@@ -71,7 +71,7 @@ static int write_dir(struct inode *dir, struct logfs_disk_dentry *dd,
 
 static int write_inode(struct inode *inode)
 {
-       return __logfs_write_inode(inode, WF_LOCK);
+       return __logfs_write_inode(inode, NULL, WF_LOCK);
 }
 
 static s64 dir_seek_data(struct inode *inode, s64 pos)
index b548c87a86f1dbe6ff3b15b185fa432697ea0096..3886cded283c4f355f1533712efc12c5e3798c61 100644 (file)
@@ -230,7 +230,9 @@ int logfs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
                return ret;
 
        mutex_lock(&inode->i_mutex);
+       logfs_get_wblocks(sb, NULL, WF_LOCK);
        logfs_write_anchor(sb);
+       logfs_put_wblocks(sb, NULL, WF_LOCK);
        mutex_unlock(&inode->i_mutex);
 
        return 0;
index caa4419285dcac78e1743a0e2e602eadd57b19bb..d4efb061bdc5d1dd62ecddcb7575cf891e3b792e 100644 (file)
@@ -367,7 +367,7 @@ static struct gc_candidate *get_candidate(struct super_block *sb)
        int i, max_dist;
        struct gc_candidate *cand = NULL, *this;
 
-       max_dist = min(no_free_segments(sb), LOGFS_NO_AREAS);
+       max_dist = min(no_free_segments(sb), LOGFS_NO_AREAS - 1);
 
        for (i = max_dist; i >= 0; i--) {
                this = first_in_list(&super->s_low_list[i]);
index 388df1aa35e583f728d0a085db9fc7b810635438..a422f42238b250764011fa421d24a1a0858dd153 100644 (file)
@@ -286,7 +286,7 @@ static int logfs_write_inode(struct inode *inode, struct writeback_control *wbc)
        if (logfs_inode(inode)->li_flags & LOGFS_IF_STILLBORN)
                return 0;
 
-       ret = __logfs_write_inode(inode, flags);
+       ret = __logfs_write_inode(inode, NULL, flags);
        LOGFS_BUG_ON(ret, inode->i_sb);
        return ret;
 }
@@ -363,7 +363,9 @@ static void logfs_init_once(void *_li)
 
 static int logfs_sync_fs(struct super_block *sb, int wait)
 {
+       logfs_get_wblocks(sb, NULL, WF_LOCK);
        logfs_write_anchor(sb);
+       logfs_put_wblocks(sb, NULL, WF_LOCK);
        return 0;
 }
 
index 9da29706f91cd74772169e6391333ae877a1fa05..1e1c369df22bb085f62519b1100eb300053aaae8 100644 (file)
@@ -612,7 +612,6 @@ static size_t __logfs_write_je(struct super_block *sb, void *buf, u16 type,
        if (len == 0)
                return logfs_write_header(super, header, 0, type);
 
-       BUG_ON(len > sb->s_blocksize);
        compr_len = logfs_compress(buf, data, len, sb->s_blocksize);
        if (compr_len < 0 || type == JE_ANCHOR) {
                memcpy(data, buf, len);
index 926373866a5510c310936c0d6cc52140a335f645..5f09376094651c76c7ded9f52535fe1a1351ed6b 100644 (file)
@@ -528,7 +528,7 @@ void logfs_destroy_inode_cache(void);
 void logfs_set_blocks(struct inode *inode, u64 no);
 /* these logically belong into inode.c but actually reside in readwrite.c */
 int logfs_read_inode(struct inode *inode);
-int __logfs_write_inode(struct inode *inode, long flags);
+int __logfs_write_inode(struct inode *inode, struct page *, long flags);
 void logfs_evict_inode(struct inode *inode);
 
 /* journal.c */
@@ -577,6 +577,8 @@ void initialize_block_counters(struct page *page, struct logfs_block *block,
                __be64 *array, int page_is_empty);
 int logfs_exist_block(struct inode *inode, u64 bix);
 int get_page_reserve(struct inode *inode, struct page *page);
+void logfs_get_wblocks(struct super_block *sb, struct page *page, int lock);
+void logfs_put_wblocks(struct super_block *sb, struct page *page, int lock);
 extern struct logfs_block_ops indirect_block_ops;
 
 /* segment.c */
@@ -594,6 +596,7 @@ int logfs_init_mapping(struct super_block *sb);
 void logfs_sync_area(struct logfs_area *area);
 void logfs_sync_segments(struct super_block *sb);
 void freeseg(struct super_block *sb, u32 segno);
+void free_areas(struct super_block *sb);
 
 /* area handling */
 int logfs_init_areas(struct super_block *sb);
index 2ac4217b7901cb60b726986fc964f29b3e09168f..4153e65b01488b55d6a5646cee01689b26afc6a3 100644 (file)
@@ -244,8 +244,7 @@ static void preunlock_page(struct super_block *sb, struct page *page, int lock)
  * is waiting for s_write_mutex.  We annotate this fact by setting PG_pre_locked
  * in addition to PG_locked.
  */
-static void logfs_get_wblocks(struct super_block *sb, struct page *page,
-               int lock)
+void logfs_get_wblocks(struct super_block *sb, struct page *page, int lock)
 {
        struct logfs_super *super = logfs_super(sb);
 
@@ -260,8 +259,7 @@ static void logfs_get_wblocks(struct super_block *sb, struct page *page,
        }
 }
 
-static void logfs_put_wblocks(struct super_block *sb, struct page *page,
-               int lock)
+void logfs_put_wblocks(struct super_block *sb, struct page *page, int lock)
 {
        struct logfs_super *super = logfs_super(sb);
 
@@ -424,7 +422,7 @@ static void inode_write_block(struct logfs_block *block)
        if (inode->i_ino == LOGFS_INO_MASTER)
                logfs_write_anchor(inode->i_sb);
        else {
-               ret = __logfs_write_inode(inode, 0);
+               ret = __logfs_write_inode(inode, NULL, 0);
                /* see indirect_write_block comment */
                BUG_ON(ret);
        }
@@ -560,8 +558,13 @@ static void inode_free_block(struct super_block *sb, struct logfs_block *block)
 static void indirect_free_block(struct super_block *sb,
                struct logfs_block *block)
 {
-       ClearPagePrivate(block->page);
-       block->page->private = 0;
+       struct page *page = block->page;
+
+       if (PagePrivate(page)) {
+               ClearPagePrivate(page);
+               page_cache_release(page);
+               set_page_private(page, 0);
+       }
        __free_block(sb, block);
 }
 
@@ -650,8 +653,11 @@ static void alloc_data_block(struct inode *inode, struct page *page)
        logfs_unpack_index(page->index, &bix, &level);
        block = __alloc_block(inode->i_sb, inode->i_ino, bix, level);
        block->page = page;
+
        SetPagePrivate(page);
-       page->private = (unsigned long)block;
+       page_cache_get(page);
+       set_page_private(page, (unsigned long) block);
+
        block->ops = &indirect_block_ops;
 }
 
@@ -1570,11 +1576,15 @@ int logfs_write_buf(struct inode *inode, struct page *page, long flags)
 static int __logfs_delete(struct inode *inode, struct page *page)
 {
        long flags = WF_DELETE;
+       int err;
 
        inode->i_ctime = inode->i_mtime = CURRENT_TIME;
 
        if (page->index < I0_BLOCKS)
                return logfs_write_direct(inode, page, flags);
+       err = grow_inode(inode, page->index, 0);
+       if (err)
+               return err;
        return logfs_write_rec(inode, page, page->index, 0, flags);
 }
 
@@ -1623,7 +1633,7 @@ int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
                        if (inode->i_ino == LOGFS_INO_MASTER)
                                logfs_write_anchor(inode->i_sb);
                        else {
-                               err = __logfs_write_inode(inode, flags);
+                               err = __logfs_write_inode(inode, page, flags);
                        }
                }
        }
@@ -1873,7 +1883,7 @@ int logfs_truncate(struct inode *inode, u64 target)
                logfs_get_wblocks(sb, NULL, 1);
                err = __logfs_truncate(inode, size);
                if (!err)
-                       err = __logfs_write_inode(inode, 0);
+                       err = __logfs_write_inode(inode, NULL, 0);
                logfs_put_wblocks(sb, NULL, 1);
        }
 
@@ -1901,8 +1911,11 @@ static void move_page_to_inode(struct inode *inode, struct page *page)
        li->li_block = block;
 
        block->page = NULL;
-       page->private = 0;
-       ClearPagePrivate(page);
+       if (PagePrivate(page)) {
+               ClearPagePrivate(page);
+               page_cache_release(page);
+               set_page_private(page, 0);
+       }
 }
 
 static void move_inode_to_page(struct page *page, struct inode *inode)
@@ -1918,8 +1931,12 @@ static void move_inode_to_page(struct page *page, struct inode *inode)
        BUG_ON(PagePrivate(page));
        block->ops = &indirect_block_ops;
        block->page = page;
-       page->private = (unsigned long)block;
-       SetPagePrivate(page);
+
+       if (!PagePrivate(page)) {
+               SetPagePrivate(page);
+               page_cache_get(page);
+               set_page_private(page, (unsigned long) block);
+       }
 
        block->inode = NULL;
        li->li_block = NULL;
@@ -2106,14 +2123,14 @@ void logfs_set_segment_unreserved(struct super_block *sb, u32 segno, u32 ec)
                        ec_level);
 }
 
-int __logfs_write_inode(struct inode *inode, long flags)
+int __logfs_write_inode(struct inode *inode, struct page *page, long flags)
 {
        struct super_block *sb = inode->i_sb;
        int ret;
 
-       logfs_get_wblocks(sb, NULL, flags & WF_LOCK);
+       logfs_get_wblocks(sb, page, flags & WF_LOCK);
        ret = do_write_inode(inode);
-       logfs_put_wblocks(sb, NULL, flags & WF_LOCK);
+       logfs_put_wblocks(sb, page, flags & WF_LOCK);
        return ret;
 }
 
index 9d5187353255ddf630a44afcb5e7d020dc8dd25c..ab798ed1cc8839e2c9c97eb87d7bedff64693fa4 100644 (file)
@@ -86,7 +86,11 @@ int __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
                BUG_ON(!page); /* FIXME: reserve a pool */
                SetPageUptodate(page);
                memcpy(page_address(page) + offset, buf, copylen);
-               SetPagePrivate(page);
+
+               if (!PagePrivate(page)) {
+                       SetPagePrivate(page);
+                       page_cache_get(page);
+               }
                page_cache_release(page);
 
                buf += copylen;
@@ -110,7 +114,10 @@ static void pad_partial_page(struct logfs_area *area)
                page = get_mapping_page(sb, index, 0);
                BUG_ON(!page); /* FIXME: reserve a pool */
                memset(page_address(page) + offset, 0xff, len);
-               SetPagePrivate(page);
+               if (!PagePrivate(page)) {
+                       SetPagePrivate(page);
+                       page_cache_get(page);
+               }
                page_cache_release(page);
        }
 }
@@ -130,7 +137,10 @@ static void pad_full_pages(struct logfs_area *area)
                BUG_ON(!page); /* FIXME: reserve a pool */
                SetPageUptodate(page);
                memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
-               SetPagePrivate(page);
+               if (!PagePrivate(page)) {
+                       SetPagePrivate(page);
+                       page_cache_get(page);
+               }
                page_cache_release(page);
                index++;
                no_indizes--;
@@ -485,8 +495,12 @@ static void move_btree_to_page(struct inode *inode, struct page *page,
                mempool_free(item, super->s_alias_pool);
        }
        block->page = page;
-       SetPagePrivate(page);
-       page->private = (unsigned long)block;
+
+       if (!PagePrivate(page)) {
+               SetPagePrivate(page);
+               page_cache_get(page);
+               set_page_private(page, (unsigned long) block);
+       }
        block->ops = &indirect_block_ops;
        initialize_block_counters(page, block, data, 0);
 }
@@ -536,8 +550,12 @@ void move_page_to_btree(struct page *page)
                list_add(&item->list, &block->item_list);
        }
        block->page = NULL;
-       ClearPagePrivate(page);
-       page->private = 0;
+
+       if (PagePrivate(page)) {
+               ClearPagePrivate(page);
+               page_cache_release(page);
+               set_page_private(page, 0);
+       }
        block->ops = &btree_block_ops;
        err = alias_tree_insert(block->sb, block->ino, block->bix, block->level,
                        block);
@@ -702,7 +720,10 @@ void freeseg(struct super_block *sb, u32 segno)
                page = find_get_page(mapping, ofs >> PAGE_SHIFT);
                if (!page)
                        continue;
-               ClearPagePrivate(page);
+               if (PagePrivate(page)) {
+                       ClearPagePrivate(page);
+                       page_cache_release(page);
+               }
                page_cache_release(page);
        }
 }
@@ -841,6 +862,16 @@ static void free_area(struct logfs_area *area)
        kfree(area);
 }
 
+void free_areas(struct super_block *sb)
+{
+       struct logfs_super *super = logfs_super(sb);
+       int i;
+
+       for_each_area(i)
+               free_area(super->s_area[i]);
+       free_area(super->s_journal_area);
+}
+
 static struct logfs_area *alloc_area(struct super_block *sb)
 {
        struct logfs_area *area;
@@ -923,10 +954,6 @@ err:
 void logfs_cleanup_areas(struct super_block *sb)
 {
        struct logfs_super *super = logfs_super(sb);
-       int i;
 
        btree_grim_visitor128(&super->s_object_alias_tree, 0, kill_alias);
-       for_each_area(i)
-               free_area(super->s_area[i]);
-       free_area(super->s_journal_area);
 }
index e795c234ea33592e12cb264c0c8c84d2146f29c3..c9ee7f5d1cafe2c66bd5edab9dc0240feb6f68ff 100644 (file)
@@ -486,14 +486,15 @@ static void logfs_kill_sb(struct super_block *sb)
        /* Alias entries slow down mount, so evict as many as possible */
        sync_filesystem(sb);
        logfs_write_anchor(sb);
+       free_areas(sb);
 
        /*
         * From this point on alias entries are simply dropped - and any
         * writes to the object store are considered bugs.
         */
-       super->s_flags |= LOGFS_SB_FLAG_SHUTDOWN;
        log_super("LogFS: Now in shutdown\n");
        generic_shutdown_super(sb);
+       super->s_flags |= LOGFS_SB_FLAG_SHUTDOWN;
 
        BUG_ON(super->s_dirty_used_bytes || super->s_dirty_free_bytes);
 
index c283a1ec008ee3cd40bb7a6bb106ac5df9f52f41..208c6aa4a989dade864a0ca6fcd3bd4b21ba8252 100644 (file)
@@ -140,21 +140,19 @@ static int do_getname(const char __user *filename, char *page)
 
 static char *getname_flags(const char __user *filename, int flags, int *empty)
 {
-       char *tmp, *result;
-
-       result = ERR_PTR(-ENOMEM);
-       tmp = __getname();
-       if (tmp)  {
-               int retval = do_getname(filename, tmp);
-
-               result = tmp;
-               if (retval < 0) {
-                       if (retval == -ENOENT && empty)
-                               *empty = 1;
-                       if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) {
-                               __putname(tmp);
-                               result = ERR_PTR(retval);
-                       }
+       char *result = __getname();
+       int retval;
+
+       if (!result)
+               return ERR_PTR(-ENOMEM);
+
+       retval = do_getname(filename, result);
+       if (retval < 0) {
+               if (retval == -ENOENT && empty)
+                       *empty = 1;
+               if (retval != -ENOENT || !(flags & LOOKUP_EMPTY)) {
+                       __putname(result);
+                       return ERR_PTR(retval);
                }
        }
        audit_getname(result);
index 281ae95932c923f3df37fbe8df53b10509974ef4..48cfac31f64ce2b3679362b91f324ff9afc4e262 100644 (file)
@@ -90,9 +90,9 @@ static int is_writable(struct pnfs_block_extent *be, sector_t isect)
  */
 struct parallel_io {
        struct kref refcnt;
-       struct rpc_call_ops call_ops;
-       void (*pnfs_callback) (void *data);
+       void (*pnfs_callback) (void *data, int num_se);
        void *data;
+       int bse_count;
 };
 
 static inline struct parallel_io *alloc_parallel(void *data)
@@ -103,6 +103,7 @@ static inline struct parallel_io *alloc_parallel(void *data)
        if (rv) {
                rv->data = data;
                kref_init(&rv->refcnt);
+               rv->bse_count = 0;
        }
        return rv;
 }
@@ -117,7 +118,7 @@ static void destroy_parallel(struct kref *kref)
        struct parallel_io *p = container_of(kref, struct parallel_io, refcnt);
 
        dprintk("%s enter\n", __func__);
-       p->pnfs_callback(p->data);
+       p->pnfs_callback(p->data, p->bse_count);
        kfree(p);
 }
 
@@ -146,14 +147,19 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
 {
        struct bio *bio;
 
+       npg = min(npg, BIO_MAX_PAGES);
        bio = bio_alloc(GFP_NOIO, npg);
-       if (!bio)
-               return NULL;
+       if (!bio && (current->flags & PF_MEMALLOC)) {
+               while (!bio && (npg /= 2))
+                       bio = bio_alloc(GFP_NOIO, npg);
+       }
 
-       bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
-       bio->bi_bdev = be->be_mdev;
-       bio->bi_end_io = end_io;
-       bio->bi_private = par;
+       if (bio) {
+               bio->bi_sector = isect - be->be_f_offset + be->be_v_offset;
+               bio->bi_bdev = be->be_mdev;
+               bio->bi_end_io = end_io;
+               bio->bi_private = par;
+       }
        return bio;
 }
 
@@ -212,22 +218,15 @@ static void bl_read_cleanup(struct work_struct *work)
 }
 
 static void
-bl_end_par_io_read(void *data)
+bl_end_par_io_read(void *data, int unused)
 {
        struct nfs_read_data *rdata = data;
 
+       rdata->task.tk_status = rdata->pnfs_error;
        INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
        schedule_work(&rdata->task.u.tk_work);
 }
 
-/* We don't want normal .rpc_call_done callback used, so we replace it
- * with this stub.
- */
-static void bl_rpc_do_nothing(struct rpc_task *task, void *calldata)
-{
-       return;
-}
-
 static enum pnfs_try_status
 bl_read_pagelist(struct nfs_read_data *rdata)
 {
@@ -247,8 +246,6 @@ bl_read_pagelist(struct nfs_read_data *rdata)
        par = alloc_parallel(rdata);
        if (!par)
                goto use_mds;
-       par->call_ops = *rdata->mds_ops;
-       par->call_ops.rpc_call_done = bl_rpc_do_nothing;
        par->pnfs_callback = bl_end_par_io_read;
        /* At this point, we can no longer jump to use_mds */
 
@@ -322,6 +319,7 @@ static void mark_extents_written(struct pnfs_block_layout *bl,
 {
        sector_t isect, end;
        struct pnfs_block_extent *be;
+       struct pnfs_block_short_extent *se;
 
        dprintk("%s(%llu, %u)\n", __func__, offset, count);
        if (count == 0)
@@ -334,8 +332,11 @@ static void mark_extents_written(struct pnfs_block_layout *bl,
                be = bl_find_get_extent(bl, isect, NULL);
                BUG_ON(!be); /* FIXME */
                len = min(end, be->be_f_offset + be->be_length) - isect;
-               if (be->be_state == PNFS_BLOCK_INVALID_DATA)
-                       bl_mark_for_commit(be, isect, len); /* What if fails? */
+               if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+                       se = bl_pop_one_short_extent(be->be_inval);
+                       BUG_ON(!se);
+                       bl_mark_for_commit(be, isect, len, se);
+               }
                isect += len;
                bl_put_extent(be);
        }
@@ -357,7 +358,8 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
                end_page_writeback(page);
                page_cache_release(page);
        } while (bvec >= bio->bi_io_vec);
-       if (!uptodate) {
+
+       if (unlikely(!uptodate)) {
                if (!wdata->pnfs_error)
                        wdata->pnfs_error = -EIO;
                pnfs_set_lo_fail(wdata->lseg);
@@ -366,7 +368,6 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
        put_parallel(par);
 }
 
-/* This is basically copied from mpage_end_io_read */
 static void bl_end_io_write(struct bio *bio, int err)
 {
        struct parallel_io *par = bio->bi_private;
@@ -392,7 +393,7 @@ static void bl_write_cleanup(struct work_struct *work)
        dprintk("%s enter\n", __func__);
        task = container_of(work, struct rpc_task, u.tk_work);
        wdata = container_of(task, struct nfs_write_data, task);
-       if (!wdata->pnfs_error) {
+       if (likely(!wdata->pnfs_error)) {
                /* Marks for LAYOUTCOMMIT */
                mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
                                     wdata->args.offset, wdata->args.count);
@@ -401,11 +402,16 @@ static void bl_write_cleanup(struct work_struct *work)
 }
 
 /* Called when last of bios associated with a bl_write_pagelist call finishes */
-static void bl_end_par_io_write(void *data)
+static void bl_end_par_io_write(void *data, int num_se)
 {
        struct nfs_write_data *wdata = data;
 
-       wdata->task.tk_status = 0;
+       if (unlikely(wdata->pnfs_error)) {
+               bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval,
+                                       num_se);
+       }
+
+       wdata->task.tk_status = wdata->pnfs_error;
        wdata->verf.committed = NFS_FILE_SYNC;
        INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
        schedule_work(&wdata->task.u.tk_work);
@@ -484,6 +490,55 @@ cleanup:
        return ret;
 }
 
+/* Find or create a zeroing page marked being writeback.
+ * Return ERR_PTR on error, NULL to indicate skip this page and page itself
+ * to indicate write out.
+ */
+static struct page *
+bl_find_get_zeroing_page(struct inode *inode, pgoff_t index,
+                       struct pnfs_block_extent *cow_read)
+{
+       struct page *page;
+       int locked = 0;
+       page = find_get_page(inode->i_mapping, index);
+       if (page)
+               goto check_page;
+
+       page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
+       if (unlikely(!page)) {
+               dprintk("%s oom\n", __func__);
+               return ERR_PTR(-ENOMEM);
+       }
+       locked = 1;
+
+check_page:
+       /* PageDirty: Other will write this out
+        * PageWriteback: Other is writing this out
+        * PageUptodate: It was read before
+        */
+       if (PageDirty(page) || PageWriteback(page)) {
+               print_page(page);
+               if (locked)
+                       unlock_page(page);
+               page_cache_release(page);
+               return NULL;
+       }
+
+       if (!locked) {
+               lock_page(page);
+               locked = 1;
+               goto check_page;
+       }
+       if (!PageUptodate(page)) {
+               /* New page, readin or zero it */
+               init_page_for_write(page, cow_read);
+       }
+       set_page_writeback(page);
+       unlock_page(page);
+
+       return page;
+}
+
 static enum pnfs_try_status
 bl_write_pagelist(struct nfs_write_data *wdata, int sync)
 {
@@ -508,9 +563,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
         */
        par = alloc_parallel(wdata);
        if (!par)
-               return PNFS_NOT_ATTEMPTED;
-       par->call_ops = *wdata->mds_ops;
-       par->call_ops.rpc_call_done = bl_rpc_do_nothing;
+               goto out_mds;
        par->pnfs_callback = bl_end_par_io_write;
        /* At this point, have to be more careful with error handling */
 
@@ -518,12 +571,15 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
        be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read);
        if (!be || !is_writable(be, isect)) {
                dprintk("%s no matching extents!\n", __func__);
-               wdata->pnfs_error = -EINVAL;
-               goto out;
+               goto out_mds;
        }
 
        /* First page inside INVALID extent */
        if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+               if (likely(!bl_push_one_short_extent(be->be_inval)))
+                       par->bse_count++;
+               else
+                       goto out_mds;
                temp = offset >> PAGE_CACHE_SHIFT;
                npg_zero = do_div(temp, npg_per_block);
                isect = (sector_t) (((offset - npg_zero * PAGE_CACHE_SIZE) &
@@ -543,36 +599,16 @@ fill_invalid_ext:
                        dprintk("%s zero %dth page: index %lu isect %llu\n",
                                __func__, npg_zero, index,
                                (unsigned long long)isect);
-                       page =
-                           find_or_create_page(wdata->inode->i_mapping, index,
-                                               GFP_NOFS);
-                       if (!page) {
-                               dprintk("%s oom\n", __func__);
-                               wdata->pnfs_error = -ENOMEM;
+                       page = bl_find_get_zeroing_page(wdata->inode, index,
+                                                       cow_read);
+                       if (unlikely(IS_ERR(page))) {
+                               wdata->pnfs_error = PTR_ERR(page);
                                goto out;
-                       }
-
-                       /* PageDirty: Other will write this out
-                        * PageWriteback: Other is writing this out
-                        * PageUptodate: It was read before
-                        * sector_initialized: already written out
-                        */
-                       if (PageDirty(page) || PageWriteback(page)) {
-                               print_page(page);
-                               unlock_page(page);
-                               page_cache_release(page);
+                       } else if (page == NULL)
                                goto next_page;
-                       }
-                       if (!PageUptodate(page)) {
-                               /* New page, readin or zero it */
-                               init_page_for_write(page, cow_read);
-                       }
-                       set_page_writeback(page);
-                       unlock_page(page);
 
                        ret = bl_mark_sectors_init(be->be_inval, isect,
-                                                      PAGE_CACHE_SECTORS,
-                                                      NULL);
+                                                      PAGE_CACHE_SECTORS);
                        if (unlikely(ret)) {
                                dprintk("%s bl_mark_sectors_init fail %d\n",
                                        __func__, ret);
@@ -581,6 +617,19 @@ fill_invalid_ext:
                                wdata->pnfs_error = ret;
                                goto out;
                        }
+                       if (likely(!bl_push_one_short_extent(be->be_inval)))
+                               par->bse_count++;
+                       else {
+                               end_page_writeback(page);
+                               page_cache_release(page);
+                               wdata->pnfs_error = -ENOMEM;
+                               goto out;
+                       }
+                       /* FIXME: This should be done in bi_end_io */
+                       mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
+                                            page->index << PAGE_CACHE_SHIFT,
+                                            PAGE_CACHE_SIZE);
+
                        bio = bl_add_page_to_bio(bio, npg_zero, WRITE,
                                                 isect, page, be,
                                                 bl_end_io_write_zero, par);
@@ -589,10 +638,6 @@ fill_invalid_ext:
                                bio = NULL;
                                goto out;
                        }
-                       /* FIXME: This should be done in bi_end_io */
-                       mark_extents_written(BLK_LSEG2EXT(wdata->lseg),
-                                            page->index << PAGE_CACHE_SHIFT,
-                                            PAGE_CACHE_SIZE);
 next_page:
                        isect += PAGE_CACHE_SECTORS;
                        extent_length -= PAGE_CACHE_SECTORS;
@@ -616,13 +661,21 @@ next_page:
                                wdata->pnfs_error = -EINVAL;
                                goto out;
                        }
+                       if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
+                               if (likely(!bl_push_one_short_extent(
+                                                               be->be_inval)))
+                                       par->bse_count++;
+                               else {
+                                       wdata->pnfs_error = -ENOMEM;
+                                       goto out;
+                               }
+                       }
                        extent_length = be->be_length -
                            (isect - be->be_f_offset);
                }
                if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
                        ret = bl_mark_sectors_init(be->be_inval, isect,
-                                                      PAGE_CACHE_SECTORS,
-                                                      NULL);
+                                                      PAGE_CACHE_SECTORS);
                        if (unlikely(ret)) {
                                dprintk("%s bl_mark_sectors_init fail %d\n",
                                        __func__, ret);
@@ -664,6 +717,10 @@ out:
        bl_submit_bio(WRITE, bio);
        put_parallel(par);
        return PNFS_ATTEMPTED;
+out_mds:
+       bl_put_extent(be);
+       kfree(par);
+       return PNFS_NOT_ATTEMPTED;
 }
 
 /* FIXME - range ignored */
@@ -690,11 +747,17 @@ static void
 release_inval_marks(struct pnfs_inval_markings *marks)
 {
        struct pnfs_inval_tracking *pos, *temp;
+       struct pnfs_block_short_extent *se, *stemp;
 
        list_for_each_entry_safe(pos, temp, &marks->im_tree.mtt_stub, it_link) {
                list_del(&pos->it_link);
                kfree(pos);
        }
+
+       list_for_each_entry_safe(se, stemp, &marks->im_extents, bse_node) {
+               list_del(&se->bse_node);
+               kfree(se);
+       }
        return;
 }
 
@@ -779,16 +842,13 @@ bl_cleanup_layoutcommit(struct nfs4_layoutcommit_data *lcdata)
 static void free_blk_mountid(struct block_mount_id *mid)
 {
        if (mid) {
-               struct pnfs_block_dev *dev;
-               spin_lock(&mid->bm_lock);
-               while (!list_empty(&mid->bm_devlist)) {
-                       dev = list_first_entry(&mid->bm_devlist,
-                                              struct pnfs_block_dev,
-                                              bm_node);
+               struct pnfs_block_dev *dev, *tmp;
+
+               /* No need to take bm_lock as we are last user freeing bm_devlist */
+               list_for_each_entry_safe(dev, tmp, &mid->bm_devlist, bm_node) {
                        list_del(&dev->bm_node);
                        bl_free_block_dev(dev);
                }
-               spin_unlock(&mid->bm_lock);
                kfree(mid);
        }
 }
index 42acf7ef59926d61ce0b4a12e208585d928a9aad..e31a2df28e70aca040560b8d94403d85d67cd170 100644 (file)
@@ -70,6 +70,7 @@ struct pnfs_inval_markings {
        spinlock_t      im_lock;
        struct my_tree  im_tree;        /* Sectors that need LAYOUTCOMMIT */
        sector_t        im_block_size;  /* Server blocksize in sectors */
+       struct list_head im_extents;    /* Short extents for INVAL->RW conversion */
 };
 
 struct pnfs_inval_tracking {
@@ -105,6 +106,7 @@ BL_INIT_INVAL_MARKS(struct pnfs_inval_markings *marks, sector_t blocksize)
 {
        spin_lock_init(&marks->im_lock);
        INIT_LIST_HEAD(&marks->im_tree.mtt_stub);
+       INIT_LIST_HEAD(&marks->im_extents);
        marks->im_block_size = blocksize;
        marks->im_tree.mtt_step_size = min((sector_t)PAGE_CACHE_SECTORS,
                                           blocksize);
@@ -186,8 +188,7 @@ struct pnfs_block_extent *
 bl_find_get_extent(struct pnfs_block_layout *bl, sector_t isect,
                struct pnfs_block_extent **cow_read);
 int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
-                            sector_t offset, sector_t length,
-                            sector_t **pages);
+                            sector_t offset, sector_t length);
 void bl_put_extent(struct pnfs_block_extent *be);
 struct pnfs_block_extent *bl_alloc_extent(void);
 int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect);
@@ -200,6 +201,11 @@ void clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
 int bl_add_merge_extent(struct pnfs_block_layout *bl,
                         struct pnfs_block_extent *new);
 int bl_mark_for_commit(struct pnfs_block_extent *be,
-                       sector_t offset, sector_t length);
+                       sector_t offset, sector_t length,
+                       struct pnfs_block_short_extent *new);
+int bl_push_one_short_extent(struct pnfs_inval_markings *marks);
+struct pnfs_block_short_extent *
+bl_pop_one_short_extent(struct pnfs_inval_markings *marks);
+void bl_free_short_extents(struct pnfs_inval_markings *marks, int num_to_free);
 
 #endif /* FS_NFS_NFS4BLOCKLAYOUT_H */
index 19fa7b0b8c00d8d2fdebef8b25e59a1b5b0598de..1abac09f7cd5f9fd46cc07401873067e49a4b7f7 100644 (file)
@@ -110,13 +110,7 @@ static int _add_entry(struct my_tree *tree, u64 s, int32_t tag,
                return 0;
        } else {
                struct pnfs_inval_tracking *new;
-               if (storage)
-                       new = storage;
-               else {
-                       new = kmalloc(sizeof(*new), GFP_NOFS);
-                       if (!new)
-                               return -ENOMEM;
-               }
+               new = storage;
                new->it_sector = s;
                new->it_tags = (1 << tag);
                list_add(&new->it_link, &pos->it_link);
@@ -139,11 +133,13 @@ static int _set_range(struct my_tree *tree, int32_t tag, u64 s, u64 length)
 }
 
 /* Ensure that future operations on given range of tree will not malloc */
-static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
+static int _preload_range(struct pnfs_inval_markings *marks,
+               u64 offset, u64 length)
 {
        u64 start, end, s;
        int count, i, used = 0, status = -ENOMEM;
        struct pnfs_inval_tracking **storage;
+       struct my_tree  *tree = &marks->im_tree;
 
        dprintk("%s(%llu, %llu) enter\n", __func__, offset, length);
        start = normalize(offset, tree->mtt_step_size);
@@ -161,12 +157,11 @@ static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
                        goto out_cleanup;
        }
 
-       /* Now need lock - HOW??? */
-
+       spin_lock_bh(&marks->im_lock);
        for (s = start; s < end; s += tree->mtt_step_size)
                used += _add_entry(tree, s, INTERNAL_EXISTS, storage[used]);
+       spin_unlock_bh(&marks->im_lock);
 
-       /* Unlock - HOW??? */
        status = 0;
 
  out_cleanup:
@@ -179,41 +174,14 @@ static int _preload_range(struct my_tree *tree, u64 offset, u64 length)
        return status;
 }
 
-static void set_needs_init(sector_t *array, sector_t offset)
-{
-       sector_t *p = array;
-
-       dprintk("%s enter\n", __func__);
-       if (!p)
-               return;
-       while (*p < offset)
-               p++;
-       if (*p == offset)
-               return;
-       else if (*p == ~0) {
-               *p++ = offset;
-               *p = ~0;
-               return;
-       } else {
-               sector_t *save = p;
-               dprintk("%s Adding %llu\n", __func__, (u64)offset);
-               while (*p != ~0)
-                       p++;
-               p++;
-               memmove(save + 1, save, (char *)p - (char *)save);
-               *save = offset;
-               return;
-       }
-}
-
 /* We are relying on page lock to serialize this */
 int bl_is_sector_init(struct pnfs_inval_markings *marks, sector_t isect)
 {
        int rv;
 
-       spin_lock(&marks->im_lock);
+       spin_lock_bh(&marks->im_lock);
        rv = _has_tag(&marks->im_tree, isect, EXTENT_INITIALIZED);
-       spin_unlock(&marks->im_lock);
+       spin_unlock_bh(&marks->im_lock);
        return rv;
 }
 
@@ -253,78 +221,39 @@ static int is_range_written(struct pnfs_inval_markings *marks,
 {
        int rv;
 
-       spin_lock(&marks->im_lock);
+       spin_lock_bh(&marks->im_lock);
        rv = _range_has_tag(&marks->im_tree, start, end, EXTENT_WRITTEN);
-       spin_unlock(&marks->im_lock);
+       spin_unlock_bh(&marks->im_lock);
        return rv;
 }
 
 /* Marks sectors in [offest, offset_length) as having been initialized.
  * All lengths are step-aligned, where step is min(pagesize, blocksize).
- * Notes where partial block is initialized, and helps prepare it for
- * complete initialization later.
+ * Currently assumes offset is page-aligned
  */
-/* Currently assumes offset is page-aligned */
 int bl_mark_sectors_init(struct pnfs_inval_markings *marks,
-                            sector_t offset, sector_t length,
-                            sector_t **pages)
+                            sector_t offset, sector_t length)
 {
-       sector_t s, start, end;
-       sector_t *array = NULL; /* Pages to mark */
+       sector_t start, end;
 
        dprintk("%s(offset=%llu,len=%llu) enter\n",
                __func__, (u64)offset, (u64)length);
-       s = max((sector_t) 3,
-               2 * (marks->im_block_size / (PAGE_CACHE_SECTORS)));
-       dprintk("%s set max=%llu\n", __func__, (u64)s);
-       if (pages) {
-               array = kmalloc(s * sizeof(sector_t), GFP_NOFS);
-               if (!array)
-                       goto outerr;
-               array[0] = ~0;
-       }
 
        start = normalize(offset, marks->im_block_size);
        end = normalize_up(offset + length, marks->im_block_size);
-       if (_preload_range(&marks->im_tree, start, end - start))
+       if (_preload_range(marks, start, end - start))
                goto outerr;
 
-       spin_lock(&marks->im_lock);
-
-       for (s = normalize_up(start, PAGE_CACHE_SECTORS);
-            s < offset; s += PAGE_CACHE_SECTORS) {
-               dprintk("%s pre-area pages\n", __func__);
-               /* Portion of used block is not initialized */
-               if (!_has_tag(&marks->im_tree, s, EXTENT_INITIALIZED))
-                       set_needs_init(array, s);
-       }
+       spin_lock_bh(&marks->im_lock);
        if (_set_range(&marks->im_tree, EXTENT_INITIALIZED, offset, length))
                goto out_unlock;
-       for (s = normalize_up(offset + length, PAGE_CACHE_SECTORS);
-            s < end; s += PAGE_CACHE_SECTORS) {
-               dprintk("%s post-area pages\n", __func__);
-               if (!_has_tag(&marks->im_tree, s, EXTENT_INITIALIZED))
-                       set_needs_init(array, s);
-       }
-
-       spin_unlock(&marks->im_lock);
+       spin_unlock_bh(&marks->im_lock);
 
-       if (pages) {
-               if (array[0] == ~0) {
-                       kfree(array);
-                       *pages = NULL;
-               } else
-                       *pages = array;
-       }
        return 0;
 
- out_unlock:
-       spin_unlock(&marks->im_lock);
- outerr:
-       if (pages) {
-               kfree(array);
-               *pages = NULL;
-       }
+out_unlock:
+       spin_unlock_bh(&marks->im_lock);
+outerr:
        return -ENOMEM;
 }
 
@@ -338,9 +267,9 @@ static int mark_written_sectors(struct pnfs_inval_markings *marks,
 
        dprintk("%s(offset=%llu,len=%llu) enter\n", __func__,
                (u64)offset, (u64)length);
-       spin_lock(&marks->im_lock);
+       spin_lock_bh(&marks->im_lock);
        status = _set_range(&marks->im_tree, EXTENT_WRITTEN, offset, length);
-       spin_unlock(&marks->im_lock);
+       spin_unlock_bh(&marks->im_lock);
        return status;
 }
 
@@ -440,20 +369,18 @@ static void add_to_commitlist(struct pnfs_block_layout *bl,
 
 /* Note the range described by offset, length is guaranteed to be contained
  * within be.
+ * new will be freed, either by this function or add_to_commitlist if they
+ * decide not to use it, or after LAYOUTCOMMIT uses it in the commitlist.
  */
 int bl_mark_for_commit(struct pnfs_block_extent *be,
-                   sector_t offset, sector_t length)
+                   sector_t offset, sector_t length,
+                   struct pnfs_block_short_extent *new)
 {
        sector_t new_end, end = offset + length;
-       struct pnfs_block_short_extent *new;
        struct pnfs_block_layout *bl = container_of(be->be_inval,
                                                    struct pnfs_block_layout,
                                                    bl_inval);
 
-       new = kmalloc(sizeof(*new), GFP_NOFS);
-       if (!new)
-               return -ENOMEM;
-
        mark_written_sectors(be->be_inval, offset, length);
        /* We want to add the range to commit list, but it must be
         * block-normalized, and verified that the normalized range has
@@ -483,9 +410,6 @@ int bl_mark_for_commit(struct pnfs_block_extent *be,
        new->bse_mdev = be->be_mdev;
 
        spin_lock(&bl->bl_ext_lock);
-       /* new will be freed, either by add_to_commitlist if it decides not
-        * to use it, or after LAYOUTCOMMIT uses it in the commitlist.
-        */
        add_to_commitlist(bl, new);
        spin_unlock(&bl->bl_ext_lock);
        return 0;
@@ -933,3 +857,53 @@ clean_pnfs_block_layoutupdate(struct pnfs_block_layout *bl,
                }
        }
 }
+
+int bl_push_one_short_extent(struct pnfs_inval_markings *marks)
+{
+       struct pnfs_block_short_extent *new;
+
+       new = kmalloc(sizeof(*new), GFP_NOFS);
+       if (unlikely(!new))
+               return -ENOMEM;
+
+       spin_lock_bh(&marks->im_lock);
+       list_add(&new->bse_node, &marks->im_extents);
+       spin_unlock_bh(&marks->im_lock);
+
+       return 0;
+}
+
+struct pnfs_block_short_extent *
+bl_pop_one_short_extent(struct pnfs_inval_markings *marks)
+{
+       struct pnfs_block_short_extent *rv = NULL;
+
+       spin_lock_bh(&marks->im_lock);
+       if (!list_empty(&marks->im_extents)) {
+               rv = list_entry((&marks->im_extents)->next,
+                               struct pnfs_block_short_extent, bse_node);
+               list_del_init(&rv->bse_node);
+       }
+       spin_unlock_bh(&marks->im_lock);
+
+       return rv;
+}
+
+void bl_free_short_extents(struct pnfs_inval_markings *marks, int num_to_free)
+{
+       struct pnfs_block_short_extent *se = NULL, *tmp;
+
+       if (num_to_free <= 0)
+               return;
+
+       spin_lock(&marks->im_lock);
+       list_for_each_entry_safe(se, tmp, &marks->im_extents, bse_node) {
+               list_del(&se->bse_node);
+               kfree(se);
+               if (--num_to_free == 0)
+                       break;
+       }
+       spin_unlock(&marks->im_lock);
+
+       BUG_ON(num_to_free > 0);
+}
index 07df5f1d85e5188b16d51f13049dd925870b5238..c89d3b9e483c463cb1b9232e4b97520a7b7e1eaf 100644 (file)
@@ -162,7 +162,7 @@ struct cb_layoutrecallargs {
        };
 };
 
-extern unsigned nfs4_callback_layoutrecall(
+extern __be32 nfs4_callback_layoutrecall(
        struct cb_layoutrecallargs *args,
        void *dummy, struct cb_process_state *cps);
 
index 726e59a9e50f6ad20471895793efdb171aed7dd7..d50b2742f23baeb20d54c44d6919ed126faf74c8 100644 (file)
@@ -305,6 +305,10 @@ __be32 decode_devicenotify_args(struct svc_rqst *rqstp,
        n = ntohl(*p++);
        if (n <= 0)
                goto out;
+       if (n > ULONG_MAX / sizeof(*args->devs)) {
+               status = htonl(NFS4ERR_BADXDR);
+               goto out;
+       }
 
        args->devs = kmalloc(n * sizeof(*args->devs), GFP_KERNEL);
        if (!args->devs) {
index ed388aae96893628dfcecb25743acac320b8a1b4..8ae91908f5aa6fa38c128348beb272ddb6226a3b 100644 (file)
@@ -382,7 +382,7 @@ decode_ds_addr(struct xdr_stream *streamp, gfp_t gfp_flags)
 {
        struct nfs4_pnfs_ds_addr *da = NULL;
        char *buf, *portstr;
-       u32 port;
+       __be16 port;
        int nlen, rlen;
        int tmp[2];
        __be32 *p;
index 75366dc89686d88655c6569f4840a1651ca98690..f0c849c98fe4bfb49208ed6a34f0714257431e75 100644 (file)
@@ -3587,7 +3587,7 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
                res.acl_flags |= NFS4_ACL_LEN_REQUEST;
        resp_buf = page_address(pages[0]);
 
-       dprintk("%s  buf %p buflen %ld npages %d args.acl_len %ld\n",
+       dprintk("%s  buf %p buflen %zu npages %d args.acl_len %zu\n",
                __func__, buf, buflen, npages, args.acl_len);
        ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
                             &msg, &args.seq_args, &res.seq_res, 0);
index 886649627c3d68d568914ee3d88983cbebcf08a5..2a70fce70c65be1151783e3aba3c221e39642ba7 100644 (file)
@@ -603,6 +603,8 @@ static int nilfs_ioctl_clean_segments(struct inode *inode, struct file *filp,
        nsegs = argv[4].v_nmembs;
        if (argv[4].v_size != argsz[4])
                goto out;
+       if (nsegs > UINT_MAX / sizeof(__u64))
+               goto out;
 
        /*
         * argv[4] points to segment numbers this ioctl cleans.  We
index 5485a5388ecb2919ecb5033cbf99d1d22916a72b..d4548dd49b028fbdc180ef679f20f0952319c4c3 100644 (file)
@@ -198,82 +198,9 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
        return result;
 }
 
-static struct mm_struct *__check_mem_permission(struct task_struct *task)
-{
-       struct mm_struct *mm;
-
-       mm = get_task_mm(task);
-       if (!mm)
-               return ERR_PTR(-EINVAL);
-
-       /*
-        * A task can always look at itself, in case it chooses
-        * to use system calls instead of load instructions.
-        */
-       if (task == current)
-               return mm;
-
-       /*
-        * If current is actively ptrace'ing, and would also be
-        * permitted to freshly attach with ptrace now, permit it.
-        */
-       if (task_is_stopped_or_traced(task)) {
-               int match;
-               rcu_read_lock();
-               match = (ptrace_parent(task) == current);
-               rcu_read_unlock();
-               if (match && ptrace_may_access(task, PTRACE_MODE_ATTACH))
-                       return mm;
-       }
-
-       /*
-        * No one else is allowed.
-        */
-       mmput(mm);
-       return ERR_PTR(-EPERM);
-}
-
-/*
- * If current may access user memory in @task return a reference to the
- * corresponding mm, otherwise ERR_PTR.
- */
-static struct mm_struct *check_mem_permission(struct task_struct *task)
-{
-       struct mm_struct *mm;
-       int err;
-
-       /*
-        * Avoid racing if task exec's as we might get a new mm but validate
-        * against old credentials.
-        */
-       err = mutex_lock_killable(&task->signal->cred_guard_mutex);
-       if (err)
-               return ERR_PTR(err);
-
-       mm = __check_mem_permission(task);
-       mutex_unlock(&task->signal->cred_guard_mutex);
-
-       return mm;
-}
-
 struct mm_struct *mm_for_maps(struct task_struct *task)
 {
-       struct mm_struct *mm;
-       int err;
-
-       err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
-       if (err)
-               return ERR_PTR(err);
-
-       mm = get_task_mm(task);
-       if (mm && mm != current->mm &&
-                       !ptrace_may_access(task, PTRACE_MODE_READ)) {
-               mmput(mm);
-               mm = ERR_PTR(-EACCES);
-       }
-       mutex_unlock(&task->signal->cred_guard_mutex);
-
-       return mm;
+       return mm_access(task, PTRACE_MODE_READ);
 }
 
 static int proc_pid_cmdline(struct task_struct *task, char * buffer)
@@ -751,134 +678,97 @@ static const struct file_operations proc_single_file_operations = {
 };
 
 static int mem_open(struct inode* inode, struct file* file)
-{
-       file->private_data = (void*)((long)current->self_exec_id);
-       /* OK to pass negative loff_t, we can catch out-of-range */
-       file->f_mode |= FMODE_UNSIGNED_OFFSET;
-       return 0;
-}
-
-static ssize_t mem_read(struct file * file, char __user * buf,
-                       size_t count, loff_t *ppos)
 {
        struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
-       char *page;
-       unsigned long src = *ppos;
-       int ret = -ESRCH;
        struct mm_struct *mm;
 
        if (!task)
-               goto out_no_task;
+               return -ESRCH;
 
-       ret = -ENOMEM;
-       page = (char *)__get_free_page(GFP_TEMPORARY);
-       if (!page)
-               goto out;
+       mm = mm_access(task, PTRACE_MODE_ATTACH);
+       put_task_struct(task);
 
-       mm = check_mem_permission(task);
-       ret = PTR_ERR(mm);
        if (IS_ERR(mm))
-               goto out_free;
-
-       ret = -EIO;
-       if (file->private_data != (void*)((long)current->self_exec_id))
-               goto out_put;
-
-       ret = 0;
-       while (count > 0) {
-               int this_len, retval;
-
-               this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
-               retval = access_remote_vm(mm, src, page, this_len, 0);
-               if (!retval) {
-                       if (!ret)
-                               ret = -EIO;
-                       break;
-               }
+               return PTR_ERR(mm);
 
-               if (copy_to_user(buf, page, retval)) {
-                       ret = -EFAULT;
-                       break;
-               }
-               ret += retval;
-               src += retval;
-               buf += retval;
-               count -= retval;
+       if (mm) {
+               /* ensure this mm_struct can't be freed */
+               atomic_inc(&mm->mm_count);
+               /* but do not pin its memory */
+               mmput(mm);
        }
-       *ppos = src;
 
-out_put:
-       mmput(mm);
-out_free:
-       free_page((unsigned long) page);
-out:
-       put_task_struct(task);
-out_no_task:
-       return ret;
+       /* OK to pass negative loff_t, we can catch out-of-range */
+       file->f_mode |= FMODE_UNSIGNED_OFFSET;
+       file->private_data = mm;
+
+       return 0;
 }
 
-static ssize_t mem_write(struct file * file, const char __user *buf,
-                        size_t count, loff_t *ppos)
+static ssize_t mem_rw(struct file *file, char __user *buf,
+                       size_t count, loff_t *ppos, int write)
 {
-       int copied;
+       struct mm_struct *mm = file->private_data;
+       unsigned long addr = *ppos;
+       ssize_t copied;
        char *page;
-       struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
-       unsigned long dst = *ppos;
-       struct mm_struct *mm;
 
-       copied = -ESRCH;
-       if (!task)
-               goto out_no_task;
+       if (!mm)
+               return 0;
 
-       copied = -ENOMEM;
        page = (char *)__get_free_page(GFP_TEMPORARY);
        if (!page)
-               goto out_task;
-
-       mm = check_mem_permission(task);
-       copied = PTR_ERR(mm);
-       if (IS_ERR(mm))
-               goto out_free;
-
-       copied = -EIO;
-       if (file->private_data != (void *)((long)current->self_exec_id))
-               goto out_mm;
+               return -ENOMEM;
 
        copied = 0;
+       if (!atomic_inc_not_zero(&mm->mm_users))
+               goto free;
+
        while (count > 0) {
-               int this_len, retval;
+               int this_len = min_t(int, count, PAGE_SIZE);
 
-               this_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
-               if (copy_from_user(page, buf, this_len)) {
+               if (write && copy_from_user(page, buf, this_len)) {
                        copied = -EFAULT;
                        break;
                }
-               retval = access_remote_vm(mm, dst, page, this_len, 1);
-               if (!retval) {
+
+               this_len = access_remote_vm(mm, addr, page, this_len, write);
+               if (!this_len) {
                        if (!copied)
                                copied = -EIO;
                        break;
                }
-               copied += retval;
-               buf += retval;
-               dst += retval;
-               count -= retval;                        
+
+               if (!write && copy_to_user(buf, page, this_len)) {
+                       copied = -EFAULT;
+                       break;
+               }
+
+               buf += this_len;
+               addr += this_len;
+               copied += this_len;
+               count -= this_len;
        }
-       *ppos = dst;
+       *ppos = addr;
 
-out_mm:
        mmput(mm);
-out_free:
+free:
        free_page((unsigned long) page);
-out_task:
-       put_task_struct(task);
-out_no_task:
        return copied;
 }
 
+static ssize_t mem_read(struct file *file, char __user *buf,
+                       size_t count, loff_t *ppos)
+{
+       return mem_rw(file, buf, count, ppos, 0);
+}
+
+static ssize_t mem_write(struct file *file, const char __user *buf,
+                        size_t count, loff_t *ppos)
+{
+       return mem_rw(file, (char __user*)buf, count, ppos, 1);
+}
+
 loff_t mem_lseek(struct file *file, loff_t offset, int orig)
 {
        switch (orig) {
@@ -895,11 +785,20 @@ loff_t mem_lseek(struct file *file, loff_t offset, int orig)
        return file->f_pos;
 }
 
+static int mem_release(struct inode *inode, struct file *file)
+{
+       struct mm_struct *mm = file->private_data;
+       if (mm)
+               mmdrop(mm);
+       return 0;
+}
+
 static const struct file_operations proc_mem_operations = {
        .llseek         = mem_lseek,
        .read           = mem_read,
        .write          = mem_write,
        .open           = mem_open,
+       .release        = mem_release,
 };
 
 static ssize_t environ_read(struct file *file, char __user *buf,
@@ -1199,9 +1098,6 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
        ssize_t length;
        uid_t loginuid;
 
-       if (!capable(CAP_AUDIT_CONTROL))
-               return -EPERM;
-
        rcu_read_lock();
        if (current != pid_task(proc_pid(inode), PIDTYPE_PID)) {
                rcu_read_unlock();
@@ -1230,7 +1126,7 @@ static ssize_t proc_loginuid_write(struct file * file, const char __user * buf,
                goto out_free_page;
 
        }
-       length = audit_set_loginuid(current, loginuid);
+       length = audit_set_loginuid(loginuid);
        if (likely(length == 0))
                length = count;
 
index d76ca6ae2b1b0bd91becd9a80b9a5480e2abb478..121f77cfef76ce0da7b96d8ae475db38943e4f8b 100644 (file)
@@ -77,6 +77,8 @@ static int show_stat(struct seq_file *p, void *v)
                steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL];
                guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
                guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
+               sum += kstat_cpu_irqs_sum(i);
+               sum += arch_irq_stat_cpu(i);
 
                for (j = 0; j < NR_SOFTIRQS; j++) {
                        unsigned int softirq_stat = kstat_softirqs_cpu(j, i);
index e418c5abdb0ef954eed21771ca0ff1fe9958077d..7dcd2a250495d9a1777e6d7992637fb0a4897a81 100644 (file)
@@ -518,6 +518,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
                if (!page)
                        continue;
 
+               if (PageReserved(page))
+                       continue;
+
                /* Clear accessed and referenced bits. */
                ptep_test_and_clear_young(vma, addr, pte);
                ClearPageReferenced(page);
index 2bfd987f4853407be1027ede89bc940460346bf8..6b009548d2e0380dbbbd8c061570f2805371bf4d 100644 (file)
@@ -179,47 +179,33 @@ static const char *qnx4_checkroot(struct super_block *sb)
        struct qnx4_inode_entry *rootdir;
        int rd, rl;
        int i, j;
-       int found = 0;
 
-       if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/') {
+       if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/')
                return "no qnx4 filesystem (no root dir).";
-       } else {
-               QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id));
-               rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
-               rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size);
-               for (j = 0; j < rl; j++) {
-                       bh = sb_bread(sb, rd + j);      /* root dir, first block */
-                       if (bh == NULL) {
-                               return "unable to read root entry.";
-                       }
-                       for (i = 0; i < QNX4_INODES_PER_BLOCK; i++) {
-                               rootdir = (struct qnx4_inode_entry *) (bh->b_data + i * QNX4_DIR_ENTRY_SIZE);
-                               if (rootdir->di_fname != NULL) {
-                                       QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname));
-                                       if (!strcmp(rootdir->di_fname,
-                                                   QNX4_BMNAME)) {
-                                               found = 1;
-                                               qnx4_sb(sb)->BitMap = kmemdup(rootdir,
-                                                                             sizeof(struct qnx4_inode_entry),
-                                                                             GFP_KERNEL);
-                                               if (!qnx4_sb(sb)->BitMap) {
-                                                       brelse (bh);
-                                                       return "not enough memory for bitmap inode";
-                                               }/* keep bitmap inode known */
-                                               break;
-                                       }
-                               }
-                       }
+       QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id));
+       rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
+       rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size);
+       for (j = 0; j < rl; j++) {
+               bh = sb_bread(sb, rd + j);      /* root dir, first block */
+               if (bh == NULL)
+                       return "unable to read root entry.";
+               rootdir = (struct qnx4_inode_entry *) bh->b_data;
+               for (i = 0; i < QNX4_INODES_PER_BLOCK; i++, rootdir++) {
+                       QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname));
+                       if (strcmp(rootdir->di_fname, QNX4_BMNAME) != 0)
+                               continue;
+                       qnx4_sb(sb)->BitMap = kmemdup(rootdir,
+                                                     sizeof(struct qnx4_inode_entry),
+                                                     GFP_KERNEL);
                        brelse(bh);
-                       if (found != 0) {
-                               break;
-                       }
-               }
-               if (found == 0) {
-                       return "bitmap file not found.";
+                       if (!qnx4_sb(sb)->BitMap)
+                               return "not enough memory for bitmap inode";
+                       /* keep bitmap inode known */
+                       return NULL;
                }
+               brelse(bh);
        }
-       return NULL;
+       return "bitmap file not found.";
 }
 
 static int qnx4_fill_super(struct super_block *s, void *data, int silent)
@@ -270,7 +256,7 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
        if (IS_ERR(root)) {
                printk(KERN_ERR "qnx4: get inode failed\n");
                ret = PTR_ERR(root);
-               goto out;
+               goto outb;
        }
 
        ret = -ENOMEM;
@@ -283,6 +269,8 @@ static int qnx4_fill_super(struct super_block *s, void *data, int silent)
 
       outi:
        iput(root);
+      outb:
+       kfree(qs->BitMap);
       out:
        brelse(bh);
       outnobh:
index 5ec59b20cf761732f57e5eb76231579ab7c03b97..46741970371b3c720d554a7da92e9fd39d2d0cbe 100644 (file)
@@ -2125,6 +2125,8 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
                mutex_unlock(&dqopt->dqio_mutex);
                goto out_file_init;
        }
+       if (dqopt->flags & DQUOT_QUOTA_SYS_FILE)
+               dqopt->info[type].dqi_flags |= DQF_SYS_FILE;
        mutex_unlock(&dqopt->dqio_mutex);
        spin_lock(&dq_state_lock);
        dqopt->flags |= dquot_state_flag(flags, type);
@@ -2464,7 +2466,7 @@ int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
        spin_lock(&dq_data_lock);
        ii->dqi_bgrace = mi->dqi_bgrace;
        ii->dqi_igrace = mi->dqi_igrace;
-       ii->dqi_flags = mi->dqi_flags & DQF_MASK;
+       ii->dqi_flags = mi->dqi_flags & DQF_GETINFO_MASK;
        ii->dqi_valid = IIF_ALL;
        spin_unlock(&dq_data_lock);
        mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
@@ -2490,8 +2492,8 @@ int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
        if (ii->dqi_valid & IIF_IGRACE)
                mi->dqi_igrace = ii->dqi_igrace;
        if (ii->dqi_valid & IIF_FLAGS)
-               mi->dqi_flags = (mi->dqi_flags & ~DQF_MASK) |
-                               (ii->dqi_flags & DQF_MASK);
+               mi->dqi_flags = (mi->dqi_flags & ~DQF_SETINFO_MASK) |
+                               (ii->dqi_flags & DQF_SETINFO_MASK);
        spin_unlock(&dq_data_lock);
        mark_info_dirty(sb, type);
        /* Force write to disk */
index de41e1e46f0970b3c262fb577e0b6f332c6a59e5..6015c02296b7ad2adba75c7946b8fb4762a62265 100644 (file)
@@ -1186,6 +1186,8 @@ int freeze_super(struct super_block *sb)
                        printk(KERN_ERR
                                "VFS:Filesystem freeze failed\n");
                        sb->s_frozen = SB_UNFROZEN;
+                       smp_wmb();
+                       wake_up(&sb->s_wait_unfrozen);
                        deactivate_locked_super(sb);
                        return ret;
                }
index 62f4fb37789e0f5b634c44b7adc782d1c04c67fd..00012e31829d111fbd002461b7086a70c85d29f6 100644 (file)
@@ -493,6 +493,12 @@ int sysfs_attr_ns(struct kobject *kobj, const struct attribute *attr,
        const void *ns = NULL;
        int err;
 
+       if (!dir_sd) {
+               WARN(1, KERN_ERR "sysfs: kobject %s without dirent\n",
+                       kobject_name(kobj));
+               return -ENOENT;
+       }
+
        err = 0;
        if (!sysfs_ns_type(dir_sd))
                goto out;
index 4a802b4a90566aaf45e4e0c710faf870085caf6a..85eb81683a29ec3e06f74c5fe71a3a254acdedd8 100644 (file)
@@ -318,8 +318,11 @@ int sysfs_hash_and_remove(struct sysfs_dirent *dir_sd, const void *ns, const cha
        struct sysfs_addrm_cxt acxt;
        struct sysfs_dirent *sd;
 
-       if (!dir_sd)
+       if (!dir_sd) {
+               WARN(1, KERN_WARNING "sysfs: can not remove '%s', no directory\n",
+                       name);
                return -ENOENT;
+       }
 
        sysfs_addrm_start(&acxt, dir_sd);
 
index 574d4ee9b6253ea3d589f23288aa7385907642bb..74b9baf36ac39038f827c8e262ea62aa33d81de8 100644 (file)
@@ -111,8 +111,7 @@ xfs_ioend_new_eof(
        xfs_fsize_t             bsize;
 
        bsize = ioend->io_offset + ioend->io_size;
-       isize = MAX(ip->i_size, ip->i_new_size);
-       isize = MIN(isize, bsize);
+       isize = MIN(i_size_read(VFS_I(ip)), bsize);
        return isize > ip->i_d.di_size ? isize : 0;
 }
 
@@ -126,11 +125,7 @@ static inline bool xfs_ioend_is_append(struct xfs_ioend *ioend)
 }
 
 /*
- * Update on-disk file size now that data has been written to disk.  The
- * current in-memory file size is i_size.  If a write is beyond eof i_new_size
- * will be the intended file size until i_size is updated.  If this write does
- * not extend all the way to the valid file size then restrict this update to
- * the end of the write.
+ * Update on-disk file size now that data has been written to disk.
  *
  * This function does not block as blocking on the inode lock in IO completion
  * can lead to IO completion order dependency deadlocks.. If it can't get the
@@ -1278,6 +1273,15 @@ xfs_end_io_direct_write(
 {
        struct xfs_ioend        *ioend = iocb->private;
 
+       /*
+        * While the generic direct I/O code updates the inode size, it does
+        * so only after the end_io handler is called, which means our
+        * end_io handler thinks the on-disk size is outside the in-core
+        * size.  To prevent this just update it a little bit earlier here.
+        */
+       if (offset + size > i_size_read(ioend->io_inode))
+               i_size_write(ioend->io_inode, offset + size);
+
        /*
         * blockdev_direct_IO can return an error even after the I/O
         * completion handler was called.  Thus we need to protect
@@ -1340,12 +1344,11 @@ xfs_vm_write_failed(
 
        if (to > inode->i_size) {
                /*
-                * punch out the delalloc blocks we have already allocated. We
-                * don't call xfs_setattr() to do this as we may be in the
-                * middle of a multi-iovec write and so the vfs inode->i_size
-                * will not match the xfs ip->i_size and so it will zero too
-                * much. Hence we jus truncate the page cache to zero what is
-                * necessary and punch the delalloc blocks directly.
+                * Punch out the delalloc blocks we have already allocated.
+                *
+                * Don't bother with xfs_setattr given that nothing can have
+                * made it to disk yet as the page is still locked at this
+                * point.
                 */
                struct xfs_inode        *ip = XFS_I(inode);
                xfs_fileoff_t           start_fsb;
index 1e5d97f86ea819493b9acfd1584696fc7ce5d733..08b9ac644c3140f6f6cc007f6f3eab05331ff35b 100644 (file)
@@ -827,10 +827,6 @@ xfs_attr_inactive(xfs_inode_t *dp)
        if (error)
                goto out;
 
-       /*
-        * Commit the last in the sequence of transactions.
-        */
-       xfs_trans_log_inode(trans, dp, XFS_ILOG_CORE);
        error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
        xfs_iunlock(dp, XFS_ILOCK_EXCL);
 
index c1b55e5965517a9407f678610b62f29fdabf33b3..d25eafd4d28de31d326078dbfdc21140c69b5acb 100644 (file)
@@ -271,10 +271,6 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
        dp = args->dp;
        mp = dp->i_mount;
        dp->i_d.di_forkoff = forkoff;
-       dp->i_df.if_ext_max =
-               XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
-       dp->i_afp->if_ext_max =
-               XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
 
        ifp = dp->i_afp;
        ASSERT(ifp->if_flags & XFS_IFINLINE);
@@ -326,7 +322,6 @@ xfs_attr_fork_reset(
        ASSERT(ip->i_d.di_anextents == 0);
        ASSERT(ip->i_afp == NULL);
 
-       ip->i_df.if_ext_max = XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
        xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
 }
 
@@ -389,10 +384,6 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
                                (args->op_flags & XFS_DA_OP_ADDNAME) ||
                                !(mp->m_flags & XFS_MOUNT_ATTR2) ||
                                dp->i_d.di_format == XFS_DINODE_FMT_BTREE);
-               dp->i_afp->if_ext_max =
-                       XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
-               dp->i_df.if_ext_max =
-                       XFS_IFORK_DSIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
                xfs_trans_log_inode(args->trans, dp,
                                        XFS_ILOG_CORE | XFS_ILOG_ADATA);
        }
index d0ab78837057815f17605150d31a633c2eeb2739..188ef2fbd62880614a29ea0432e20707d5cf45a2 100644 (file)
@@ -249,7 +249,27 @@ xfs_bmbt_lookup_ge(
 }
 
 /*
-* Update the record referred to by cur to the value given
+ * Check if the inode needs to be converted to btree format.
+ */
+static inline bool xfs_bmap_needs_btree(struct xfs_inode *ip, int whichfork)
+{
+       return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
+               XFS_IFORK_NEXTENTS(ip, whichfork) >
+                       XFS_IFORK_MAXEXT(ip, whichfork);
+}
+
+/*
+ * Check if the inode should be converted to extent format.
+ */
+static inline bool xfs_bmap_wants_extents(struct xfs_inode *ip, int whichfork)
+{
+       return XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
+               XFS_IFORK_NEXTENTS(ip, whichfork) <=
+                       XFS_IFORK_MAXEXT(ip, whichfork);
+}
+
+/*
+ * Update the record referred to by cur to the value given
  * by [off, bno, len, state].
  * This either works (return 0) or gets an EFSCORRUPTED error.
  */
@@ -683,8 +703,8 @@ xfs_bmap_add_extent_delay_real(
                                goto done;
                        XFS_WANT_CORRUPTED_GOTO(i == 1, done);
                }
-               if (bma->ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-                   bma->ip->i_d.di_nextents > bma->ip->i_df.if_ext_max) {
+
+               if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
                        error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
                                        bma->firstblock, bma->flist,
                                        &bma->cur, 1, &tmp_rval, XFS_DATA_FORK);
@@ -767,8 +787,8 @@ xfs_bmap_add_extent_delay_real(
                                goto done;
                        XFS_WANT_CORRUPTED_GOTO(i == 1, done);
                }
-               if (bma->ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-                   bma->ip->i_d.di_nextents > bma->ip->i_df.if_ext_max) {
+
+               if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
                        error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
                                bma->firstblock, bma->flist, &bma->cur, 1,
                                &tmp_rval, XFS_DATA_FORK);
@@ -836,8 +856,8 @@ xfs_bmap_add_extent_delay_real(
                                goto done;
                        XFS_WANT_CORRUPTED_GOTO(i == 1, done);
                }
-               if (bma->ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-                   bma->ip->i_d.di_nextents > bma->ip->i_df.if_ext_max) {
+
+               if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
                        error = xfs_bmap_extents_to_btree(bma->tp, bma->ip,
                                        bma->firstblock, bma->flist, &bma->cur,
                                        1, &tmp_rval, XFS_DATA_FORK);
@@ -884,8 +904,7 @@ xfs_bmap_add_extent_delay_real(
        }
 
        /* convert to a btree if necessary */
-       if (XFS_IFORK_FORMAT(bma->ip, XFS_DATA_FORK) == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(bma->ip, XFS_DATA_FORK) > ifp->if_ext_max) {
+       if (xfs_bmap_needs_btree(bma->ip, XFS_DATA_FORK)) {
                int     tmp_logflags;   /* partial log flag return val */
 
                ASSERT(bma->cur == NULL);
@@ -1421,8 +1440,7 @@ xfs_bmap_add_extent_unwritten_real(
        }
 
        /* convert to a btree if necessary */
-       if (XFS_IFORK_FORMAT(ip, XFS_DATA_FORK) == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > ifp->if_ext_max) {
+       if (xfs_bmap_needs_btree(ip, XFS_DATA_FORK)) {
                int     tmp_logflags;   /* partial log flag return val */
 
                ASSERT(cur == NULL);
@@ -1812,8 +1830,7 @@ xfs_bmap_add_extent_hole_real(
        }
 
        /* convert to a btree if necessary */
-       if (XFS_IFORK_FORMAT(bma->ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(bma->ip, whichfork) > ifp->if_ext_max) {
+       if (xfs_bmap_needs_btree(bma->ip, whichfork)) {
                int     tmp_logflags;   /* partial log flag return val */
 
                ASSERT(bma->cur == NULL);
@@ -3037,8 +3054,7 @@ xfs_bmap_extents_to_btree(
 
        ifp = XFS_IFORK_PTR(ip, whichfork);
        ASSERT(XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS);
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+
        /*
         * Make space in the inode incore.
         */
@@ -3184,13 +3200,8 @@ xfs_bmap_forkoff_reset(
            ip->i_d.di_format != XFS_DINODE_FMT_BTREE) {
                uint    dfl_forkoff = xfs_default_attroffset(ip) >> 3;
 
-               if (dfl_forkoff > ip->i_d.di_forkoff) {
+               if (dfl_forkoff > ip->i_d.di_forkoff)
                        ip->i_d.di_forkoff = dfl_forkoff;
-                       ip->i_df.if_ext_max =
-                               XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t);
-                       ip->i_afp->if_ext_max =
-                               XFS_IFORK_ASIZE(ip) / sizeof(xfs_bmbt_rec_t);
-               }
        }
 }
 
@@ -3430,8 +3441,6 @@ xfs_bmap_add_attrfork(
        int                     error;          /* error return value */
 
        ASSERT(XFS_IFORK_Q(ip) == 0);
-       ASSERT(ip->i_df.if_ext_max ==
-              XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
 
        mp = ip->i_mount;
        ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
@@ -3486,12 +3495,9 @@ xfs_bmap_add_attrfork(
                error = XFS_ERROR(EINVAL);
                goto error1;
        }
-       ip->i_df.if_ext_max =
-               XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
+
        ASSERT(ip->i_afp == NULL);
        ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
-       ip->i_afp->if_ext_max =
-               XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
        ip->i_afp->if_flags = XFS_IFEXTENTS;
        logflags = 0;
        xfs_bmap_init(&flist, &firstblock);
@@ -3535,20 +3541,17 @@ xfs_bmap_add_attrfork(
                } else
                        spin_unlock(&mp->m_sb_lock);
        }
-       if ((error = xfs_bmap_finish(&tp, &flist, &committed)))
+
+       error = xfs_bmap_finish(&tp, &flist, &committed);
+       if (error)
                goto error2;
-       error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
-       ASSERT(ip->i_df.if_ext_max ==
-              XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
-       return error;
+       return xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
 error2:
        xfs_bmap_cancel(&flist);
 error1:
        xfs_iunlock(ip, XFS_ILOCK_EXCL);
 error0:
        xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
-       ASSERT(ip->i_df.if_ext_max ==
-              XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
        return error;
 }
 
@@ -3994,11 +3997,8 @@ xfs_bmap_one_block(
        xfs_bmbt_irec_t s;              /* internal version of extent */
 
 #ifndef DEBUG
-       if (whichfork == XFS_DATA_FORK) {
-               return S_ISREG(ip->i_d.di_mode) ?
-                       (ip->i_size == ip->i_mount->m_sb.sb_blocksize) :
-                       (ip->i_d.di_size == ip->i_mount->m_sb.sb_blocksize);
-       }
+       if (whichfork == XFS_DATA_FORK)
+               return XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize;
 #endif /* !DEBUG */
        if (XFS_IFORK_NEXTENTS(ip, whichfork) != 1)
                return 0;
@@ -4010,7 +4010,7 @@ xfs_bmap_one_block(
        xfs_bmbt_get_all(ep, &s);
        rval = s.br_startoff == 0 && s.br_blockcount == 1;
        if (rval && whichfork == XFS_DATA_FORK)
-               ASSERT(ip->i_size == ip->i_mount->m_sb.sb_blocksize);
+               ASSERT(XFS_ISIZE(ip) == ip->i_mount->m_sb.sb_blocksize);
        return rval;
 }
 
@@ -4379,8 +4379,6 @@ xfs_bmapi_read(
        XFS_STATS_INC(xs_blk_mapr);
 
        ifp = XFS_IFORK_PTR(ip, whichfork);
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
 
        if (!(ifp->if_flags & XFS_IFEXTENTS)) {
                error = xfs_iread_extents(NULL, ip, whichfork);
@@ -4871,8 +4869,6 @@ xfs_bmapi_write(
                return XFS_ERROR(EIO);
 
        ifp = XFS_IFORK_PTR(ip, whichfork);
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
 
        XFS_STATS_INC(xs_blk_mapw);
 
@@ -4981,8 +4977,7 @@ xfs_bmapi_write(
        /*
         * Transform from btree to extents, give it cur.
         */
-       if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
-           XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
+       if (xfs_bmap_wants_extents(ip, whichfork)) {
                int             tmp_logflags = 0;
 
                ASSERT(bma.cur);
@@ -4992,10 +4987,10 @@ xfs_bmapi_write(
                if (error)
                        goto error0;
        }
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+
        ASSERT(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE ||
-              XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max);
+              XFS_IFORK_NEXTENTS(ip, whichfork) >
+               XFS_IFORK_MAXEXT(ip, whichfork));
        error = 0;
 error0:
        /*
@@ -5095,8 +5090,7 @@ xfs_bunmapi(
 
        ASSERT(len > 0);
        ASSERT(nexts >= 0);
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+
        if (!(ifp->if_flags & XFS_IFEXTENTS) &&
            (error = xfs_iread_extents(tp, ip, whichfork)))
                return error;
@@ -5322,7 +5316,8 @@ xfs_bunmapi(
                 */
                if (!wasdel && xfs_trans_get_block_res(tp) == 0 &&
                    XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
-                   XFS_IFORK_NEXTENTS(ip, whichfork) >= ifp->if_ext_max &&
+                   XFS_IFORK_NEXTENTS(ip, whichfork) >= /* Note the >= */
+                       XFS_IFORK_MAXEXT(ip, whichfork) &&
                    del.br_startoff > got.br_startoff &&
                    del.br_startoff + del.br_blockcount <
                    got.br_startoff + got.br_blockcount) {
@@ -5353,13 +5348,11 @@ nodelete:
                }
        }
        *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0;
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
+
        /*
         * Convert to a btree if necessary.
         */
-       if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(ip, whichfork) > ifp->if_ext_max) {
+       if (xfs_bmap_needs_btree(ip, whichfork)) {
                ASSERT(cur == NULL);
                error = xfs_bmap_extents_to_btree(tp, ip, firstblock, flist,
                        &cur, 0, &tmp_logflags, whichfork);
@@ -5370,8 +5363,7 @@ nodelete:
        /*
         * transform from btree to extents, give it cur
         */
-       else if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_BTREE &&
-                XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max) {
+       else if (xfs_bmap_wants_extents(ip, whichfork)) {
                ASSERT(cur != NULL);
                error = xfs_bmap_btree_to_extents(tp, ip, cur, &tmp_logflags,
                        whichfork);
@@ -5382,8 +5374,6 @@ nodelete:
        /*
         * transform from extents to local?
         */
-       ASSERT(ifp->if_ext_max ==
-              XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t));
        error = 0;
 error0:
        /*
@@ -5434,7 +5424,7 @@ xfs_getbmapx_fix_eof_hole(
        if (startblock == HOLESTARTBLOCK) {
                mp = ip->i_mount;
                out->bmv_block = -1;
-               fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, ip->i_size));
+               fixlen = XFS_FSB_TO_BB(mp, XFS_B_TO_FSB(mp, XFS_ISIZE(ip)));
                fixlen -= out->bmv_offset;
                if (prealloced && out->bmv_offset + out->bmv_length == end) {
                        /* Came to hole at EOF. Trim it. */
@@ -5522,7 +5512,7 @@ xfs_getbmap(
                        fixlen = XFS_MAXIOFFSET(mp);
                } else {
                        prealloced = 0;
-                       fixlen = ip->i_size;
+                       fixlen = XFS_ISIZE(ip);
                }
        }
 
@@ -5551,7 +5541,7 @@ xfs_getbmap(
 
        xfs_ilock(ip, XFS_IOLOCK_SHARED);
        if (whichfork == XFS_DATA_FORK && !(iflags & BMV_IF_DELALLOC)) {
-               if (ip->i_delayed_blks || ip->i_size > ip->i_d.di_size) {
+               if (ip->i_delayed_blks || XFS_ISIZE(ip) > ip->i_d.di_size) {
                        error = xfs_flush_pages(ip, 0, -1, 0, FI_REMAPF);
                        if (error)
                                goto out_unlock_iolock;
index 654dc6f05bac7781f288f63fbcb1a5a970b06faa..dd974a55c77daee6de56a44c527e871d7cfe7fca 100644 (file)
@@ -163,12 +163,14 @@ xfs_swap_extents_check_format(
 
        /* Check temp in extent form to max in target */
        if (tip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) > ip->i_df.if_ext_max)
+           XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) >
+                       XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
                return EINVAL;
 
        /* Check target in extent form to max in temp */
        if (ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS &&
-           XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) > tip->i_df.if_ext_max)
+           XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) >
+                       XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
                return EINVAL;
 
        /*
@@ -180,18 +182,25 @@ xfs_swap_extents_check_format(
         * (a common defrag case) which will occur when the temp inode is in
         * extent format...
         */
-       if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
-           ((XFS_IFORK_BOFF(ip) &&
-             tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip)) ||
-            XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <= ip->i_df.if_ext_max))
-               return EINVAL;
+       if (tip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+               if (XFS_IFORK_BOFF(ip) &&
+                   tip->i_df.if_broot_bytes > XFS_IFORK_BOFF(ip))
+                       return EINVAL;
+               if (XFS_IFORK_NEXTENTS(tip, XFS_DATA_FORK) <=
+                   XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK))
+                       return EINVAL;
+       }
 
        /* Reciprocal target->temp btree format checks */
-       if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE &&
-           ((XFS_IFORK_BOFF(tip) &&
-             ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip)) ||
-            XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <= tip->i_df.if_ext_max))
-               return EINVAL;
+       if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
+               if (XFS_IFORK_BOFF(tip) &&
+                   ip->i_df.if_broot_bytes > XFS_IFORK_BOFF(tip))
+                       return EINVAL;
+
+               if (XFS_IFORK_NEXTENTS(ip, XFS_DATA_FORK) <=
+                   XFS_IFORK_MAXEXT(tip, XFS_DATA_FORK))
+                       return EINVAL;
+       }
 
        return 0;
 }
@@ -348,16 +357,6 @@ xfs_swap_extents(
        *ifp = *tifp;           /* struct copy */
        *tifp = *tempifp;       /* struct copy */
 
-       /*
-        * Fix the in-memory data fork values that are dependent on the fork
-        * offset in the inode. We can't assume they remain the same as attr2
-        * has dynamic fork offsets.
-        */
-       ifp->if_ext_max = XFS_IFORK_SIZE(ip, XFS_DATA_FORK) /
-                                       (uint)sizeof(xfs_bmbt_rec_t);
-       tifp->if_ext_max = XFS_IFORK_SIZE(tip, XFS_DATA_FORK) /
-                                       (uint)sizeof(xfs_bmbt_rec_t);
-
        /*
         * Fix the on-disk inode values
         */
index f675f3d9d7b3b3693cfabc0437d52ca31a4f598a..7e5bc872f2b4fb12d67f3da3796f3c5b86ac162c 100644 (file)
@@ -327,7 +327,7 @@ xfs_file_aio_read(
                                mp->m_rtdev_targp : mp->m_ddev_targp;
                if ((iocb->ki_pos & target->bt_smask) ||
                    (size & target->bt_smask)) {
-                       if (iocb->ki_pos == ip->i_size)
+                       if (iocb->ki_pos == i_size_read(inode))
                                return 0;
                        return -XFS_ERROR(EINVAL);
                }
@@ -412,51 +412,6 @@ xfs_file_splice_read(
        return ret;
 }
 
-STATIC void
-xfs_aio_write_isize_update(
-       struct inode    *inode,
-       loff_t          *ppos,
-       ssize_t         bytes_written)
-{
-       struct xfs_inode        *ip = XFS_I(inode);
-       xfs_fsize_t             isize = i_size_read(inode);
-
-       if (bytes_written > 0)
-               XFS_STATS_ADD(xs_write_bytes, bytes_written);
-
-       if (unlikely(bytes_written < 0 && bytes_written != -EFAULT &&
-                                       *ppos > isize))
-               *ppos = isize;
-
-       if (*ppos > ip->i_size) {
-               xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
-               if (*ppos > ip->i_size)
-                       ip->i_size = *ppos;
-               xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
-       }
-}
-
-/*
- * If this was a direct or synchronous I/O that failed (such as ENOSPC) then
- * part of the I/O may have been written to disk before the error occurred.  In
- * this case the on-disk file size may have been adjusted beyond the in-memory
- * file size and now needs to be truncated back.
- */
-STATIC void
-xfs_aio_write_newsize_update(
-       struct xfs_inode        *ip,
-       xfs_fsize_t             new_size)
-{
-       if (new_size == ip->i_new_size) {
-               xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
-               if (new_size == ip->i_new_size)
-                       ip->i_new_size = 0;
-               if (ip->i_d.di_size > ip->i_size)
-                       ip->i_d.di_size = ip->i_size;
-               xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
-       }
-}
-
 /*
  * xfs_file_splice_write() does not use xfs_rw_ilock() because
  * generic_file_splice_write() takes the i_mutex itself. This, in theory,
@@ -475,7 +430,6 @@ xfs_file_splice_write(
 {
        struct inode            *inode = outfilp->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
-       xfs_fsize_t             new_size;
        int                     ioflags = 0;
        ssize_t                 ret;
 
@@ -489,19 +443,12 @@ xfs_file_splice_write(
 
        xfs_ilock(ip, XFS_IOLOCK_EXCL);
 
-       new_size = *ppos + count;
-
-       xfs_ilock(ip, XFS_ILOCK_EXCL);
-       if (new_size > ip->i_size)
-               ip->i_new_size = new_size;
-       xfs_iunlock(ip, XFS_ILOCK_EXCL);
-
        trace_xfs_file_splice_write(ip, count, *ppos, ioflags);
 
        ret = generic_file_splice_write(pipe, outfilp, ppos, count, flags);
+       if (ret > 0)
+               XFS_STATS_ADD(xs_write_bytes, ret);
 
-       xfs_aio_write_isize_update(inode, ppos, ret);
-       xfs_aio_write_newsize_update(ip, new_size);
        xfs_iunlock(ip, XFS_IOLOCK_EXCL);
        return ret;
 }
@@ -689,28 +636,26 @@ out_lock:
 /*
  * Common pre-write limit and setup checks.
  *
- * Returns with iolock held according to @iolock.
+ * Called with the iolocked held either shared and exclusive according to
+ * @iolock, and returns with it held.  Might upgrade the iolock to exclusive
+ * if called for a direct write beyond i_size.
  */
 STATIC ssize_t
 xfs_file_aio_write_checks(
        struct file             *file,
        loff_t                  *pos,
        size_t                  *count,
-       xfs_fsize_t             *new_sizep,
        int                     *iolock)
 {
        struct inode            *inode = file->f_mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
-       xfs_fsize_t             new_size;
        int                     error = 0;
 
        xfs_rw_ilock(ip, XFS_ILOCK_EXCL);
-       *new_sizep = 0;
 restart:
        error = generic_write_checks(file, pos, count, S_ISBLK(inode->i_mode));
        if (error) {
-               xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
-               *iolock = 0;
+               xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
                return error;
        }
 
@@ -720,36 +665,21 @@ restart:
        /*
         * If the offset is beyond the size of the file, we need to zero any
         * blocks that fall between the existing EOF and the start of this
-        * write. There is no need to issue zeroing if another in-flght IO ends
-        * at or before this one If zeronig is needed and we are currently
-        * holding the iolock shared, we need to update it to exclusive which
-        * involves dropping all locks and relocking to maintain correct locking
-        * order. If we do this, restart the function to ensure all checks and
-        * values are still valid.
+        * write.  If zeroing is needed and we are currently holding the
+        * iolock shared, we need to update it to exclusive which involves
+        * dropping all locks and relocking to maintain correct locking order.
+        * If we do this, restart the function to ensure all checks and values
+        * are still valid.
         */
-       if ((ip->i_new_size && *pos > ip->i_new_size) ||
-           (!ip->i_new_size && *pos > ip->i_size)) {
+       if (*pos > i_size_read(inode)) {
                if (*iolock == XFS_IOLOCK_SHARED) {
                        xfs_rw_iunlock(ip, XFS_ILOCK_EXCL | *iolock);
                        *iolock = XFS_IOLOCK_EXCL;
                        xfs_rw_ilock(ip, XFS_ILOCK_EXCL | *iolock);
                        goto restart;
                }
-               error = -xfs_zero_eof(ip, *pos, ip->i_size);
+               error = -xfs_zero_eof(ip, *pos, i_size_read(inode));
        }
-
-       /*
-        * If this IO extends beyond EOF, we may need to update ip->i_new_size.
-        * We have already zeroed space beyond EOF (if necessary).  Only update
-        * ip->i_new_size if this IO ends beyond any other in-flight writes.
-        */
-       new_size = *pos + *count;
-       if (new_size > ip->i_size) {
-               if (new_size > ip->i_new_size)
-                       ip->i_new_size = new_size;
-               *new_sizep = new_size;
-       }
-
        xfs_rw_iunlock(ip, XFS_ILOCK_EXCL);
        if (error)
                return error;
@@ -794,9 +724,7 @@ xfs_file_dio_aio_write(
        const struct iovec      *iovp,
        unsigned long           nr_segs,
        loff_t                  pos,
-       size_t                  ocount,
-       xfs_fsize_t             *new_size,
-       int                     *iolock)
+       size_t                  ocount)
 {
        struct file             *file = iocb->ki_filp;
        struct address_space    *mapping = file->f_mapping;
@@ -806,10 +734,10 @@ xfs_file_dio_aio_write(
        ssize_t                 ret = 0;
        size_t                  count = ocount;
        int                     unaligned_io = 0;
+       int                     iolock;
        struct xfs_buftarg      *target = XFS_IS_REALTIME_INODE(ip) ?
                                        mp->m_rtdev_targp : mp->m_ddev_targp;
 
-       *iolock = 0;
        if ((pos & target->bt_smask) || (count & target->bt_smask))
                return -XFS_ERROR(EINVAL);
 
@@ -824,31 +752,31 @@ xfs_file_dio_aio_write(
         * EOF zeroing cases and fill out the new inode size as appropriate.
         */
        if (unaligned_io || mapping->nrpages)
-               *iolock = XFS_IOLOCK_EXCL;
+               iolock = XFS_IOLOCK_EXCL;
        else
-               *iolock = XFS_IOLOCK_SHARED;
-       xfs_rw_ilock(ip, *iolock);
+               iolock = XFS_IOLOCK_SHARED;
+       xfs_rw_ilock(ip, iolock);
 
        /*
         * Recheck if there are cached pages that need invalidate after we got
         * the iolock to protect against other threads adding new pages while
         * we were waiting for the iolock.
         */
-       if (mapping->nrpages && *iolock == XFS_IOLOCK_SHARED) {
-               xfs_rw_iunlock(ip, *iolock);
-               *iolock = XFS_IOLOCK_EXCL;
-               xfs_rw_ilock(ip, *iolock);
+       if (mapping->nrpages && iolock == XFS_IOLOCK_SHARED) {
+               xfs_rw_iunlock(ip, iolock);
+               iolock = XFS_IOLOCK_EXCL;
+               xfs_rw_ilock(ip, iolock);
        }
 
-       ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock);
+       ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
        if (ret)
-               return ret;
+               goto out;
 
        if (mapping->nrpages) {
                ret = -xfs_flushinval_pages(ip, (pos & PAGE_CACHE_MASK), -1,
                                                        FI_REMAPF_LOCKED);
                if (ret)
-                       return ret;
+                       goto out;
        }
 
        /*
@@ -857,15 +785,18 @@ xfs_file_dio_aio_write(
         */
        if (unaligned_io)
                inode_dio_wait(inode);
-       else if (*iolock == XFS_IOLOCK_EXCL) {
+       else if (iolock == XFS_IOLOCK_EXCL) {
                xfs_rw_ilock_demote(ip, XFS_IOLOCK_EXCL);
-               *iolock = XFS_IOLOCK_SHARED;
+               iolock = XFS_IOLOCK_SHARED;
        }
 
        trace_xfs_file_direct_write(ip, count, iocb->ki_pos, 0);
        ret = generic_file_direct_write(iocb, iovp,
                        &nr_segs, pos, &iocb->ki_pos, count, ocount);
 
+out:
+       xfs_rw_iunlock(ip, iolock);
+
        /* No fallback to buffered IO on errors for XFS. */
        ASSERT(ret < 0 || ret == count);
        return ret;
@@ -877,9 +808,7 @@ xfs_file_buffered_aio_write(
        const struct iovec      *iovp,
        unsigned long           nr_segs,
        loff_t                  pos,
-       size_t                  ocount,
-       xfs_fsize_t             *new_size,
-       int                     *iolock)
+       size_t                  ocount)
 {
        struct file             *file = iocb->ki_filp;
        struct address_space    *mapping = file->f_mapping;
@@ -887,14 +816,14 @@ xfs_file_buffered_aio_write(
        struct xfs_inode        *ip = XFS_I(inode);
        ssize_t                 ret;
        int                     enospc = 0;
+       int                     iolock = XFS_IOLOCK_EXCL;
        size_t                  count = ocount;
 
-       *iolock = XFS_IOLOCK_EXCL;
-       xfs_rw_ilock(ip, *iolock);
+       xfs_rw_ilock(ip, iolock);
 
-       ret = xfs_file_aio_write_checks(file, &pos, &count, new_size, iolock);
+       ret = xfs_file_aio_write_checks(file, &pos, &count, &iolock);
        if (ret)
-               return ret;
+               goto out;
 
        /* We can write back this queue in page reclaim */
        current->backing_dev_info = mapping->backing_dev_info;
@@ -908,13 +837,15 @@ write_retry:
         * page locks and retry *once*
         */
        if (ret == -ENOSPC && !enospc) {
-               ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
-               if (ret)
-                       return ret;
                enospc = 1;
-               goto write_retry;
+               ret = -xfs_flush_pages(ip, 0, -1, 0, FI_NONE);
+               if (!ret)
+                       goto write_retry;
        }
+
        current->backing_dev_info = NULL;
+out:
+       xfs_rw_iunlock(ip, iolock);
        return ret;
 }
 
@@ -930,9 +861,7 @@ xfs_file_aio_write(
        struct inode            *inode = mapping->host;
        struct xfs_inode        *ip = XFS_I(inode);
        ssize_t                 ret;
-       int                     iolock;
        size_t                  ocount = 0;
-       xfs_fsize_t             new_size = 0;
 
        XFS_STATS_INC(xs_write_calls);
 
@@ -951,33 +880,22 @@ xfs_file_aio_write(
                return -EIO;
 
        if (unlikely(file->f_flags & O_DIRECT))
-               ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos,
-                                               ocount, &new_size, &iolock);
+               ret = xfs_file_dio_aio_write(iocb, iovp, nr_segs, pos, ocount);
        else
                ret = xfs_file_buffered_aio_write(iocb, iovp, nr_segs, pos,
-                                               ocount, &new_size, &iolock);
-
-       xfs_aio_write_isize_update(inode, &iocb->ki_pos, ret);
+                                                 ocount);
 
-       if (ret <= 0)
-               goto out_unlock;
+       if (ret > 0) {
+               ssize_t err;
 
-       /* Handle various SYNC-type writes */
-       if ((file->f_flags & O_DSYNC) || IS_SYNC(inode)) {
-               loff_t end = pos + ret - 1;
-               int error;
+               XFS_STATS_ADD(xs_write_bytes, ret);
 
-               xfs_rw_iunlock(ip, iolock);
-               error = xfs_file_fsync(file, pos, end,
-                                     (file->f_flags & __O_SYNC) ? 0 : 1);
-               xfs_rw_ilock(ip, iolock);
-               if (error)
-                       ret = error;
+               /* Handle various SYNC-type writes */
+               err = generic_write_sync(file, pos, ret);
+               if (err < 0)
+                       ret = err;
        }
 
-out_unlock:
-       xfs_aio_write_newsize_update(ip, new_size);
-       xfs_rw_iunlock(ip, iolock);
        return ret;
 }
 
index ed88ed16811c6ba33c1d211b3195bd1dcea18484..652b875a9d4c441bd3c759df185c153dfee0e8f5 100644 (file)
@@ -90,7 +90,7 @@ xfs_wait_on_pages(
 
        if (mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK)) {
                return -filemap_fdatawait_range(mapping, first,
-                                       last == -1 ? ip->i_size - 1 : last);
+                                       last == -1 ? XFS_ISIZE(ip) - 1 : last);
        }
        return 0;
 }
index 3960a066d7ffcb06a02aadaa34d874a92c0b643b..8c3e46394d484c3fbd798913c6862d90b3f820e7 100644 (file)
@@ -77,7 +77,7 @@ xfs_inode_alloc(
 
        ASSERT(atomic_read(&ip->i_pincount) == 0);
        ASSERT(!spin_is_locked(&ip->i_flags_lock));
-       ASSERT(completion_done(&ip->i_flush));
+       ASSERT(!xfs_isiflocked(ip));
        ASSERT(ip->i_ino == 0);
 
        mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino);
@@ -94,8 +94,6 @@ xfs_inode_alloc(
        ip->i_update_core = 0;
        ip->i_delayed_blks = 0;
        memset(&ip->i_d, 0, sizeof(xfs_icdinode_t));
-       ip->i_size = 0;
-       ip->i_new_size = 0;
 
        return ip;
 }
@@ -150,7 +148,7 @@ xfs_inode_free(
        /* asserts to verify all state is correct here */
        ASSERT(atomic_read(&ip->i_pincount) == 0);
        ASSERT(!spin_is_locked(&ip->i_flags_lock));
-       ASSERT(completion_done(&ip->i_flush));
+       ASSERT(!xfs_isiflocked(ip));
 
        /*
         * Because we use RCU freeing we need to ensure the inode always
@@ -450,8 +448,6 @@ again:
 
        *ipp = ip;
 
-       ASSERT(ip->i_df.if_ext_max ==
-              XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t));
        /*
         * If we have a real type for an on-disk inode, we can set ops(&unlock)
         * now.  If it's a new inode being created, xfs_ialloc will handle it.
@@ -715,3 +711,19 @@ xfs_isilocked(
        return 0;
 }
 #endif
+
+void
+__xfs_iflock(
+       struct xfs_inode        *ip)
+{
+       wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IFLOCK_BIT);
+       DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IFLOCK_BIT);
+
+       do {
+               prepare_to_wait_exclusive(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+               if (xfs_isiflocked(ip))
+                       io_schedule();
+       } while (!xfs_iflock_nowait(ip));
+
+       finish_wait(wq, &wait.wait);
+}
index 9dda7cc328485014eb86baf48a013f3833faf27f..b21022499c2e8f302699f80ca2af344301fee941 100644 (file)
@@ -299,11 +299,8 @@ xfs_iformat(
 {
        xfs_attr_shortform_t    *atp;
        int                     size;
-       int                     error;
+       int                     error = 0;
        xfs_fsize_t             di_size;
-       ip->i_df.if_ext_max =
-               XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
-       error = 0;
 
        if (unlikely(be32_to_cpu(dip->di_nextents) +
                     be16_to_cpu(dip->di_anextents) >
@@ -350,7 +347,6 @@ xfs_iformat(
                        return XFS_ERROR(EFSCORRUPTED);
                }
                ip->i_d.di_size = 0;
-               ip->i_size = 0;
                ip->i_df.if_u2.if_rdev = xfs_dinode_get_rdev(dip);
                break;
 
@@ -409,10 +405,10 @@ xfs_iformat(
        }
        if (!XFS_DFORK_Q(dip))
                return 0;
+
        ASSERT(ip->i_afp == NULL);
        ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP | KM_NOFS);
-       ip->i_afp->if_ext_max =
-               XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
+
        switch (dip->di_aformat) {
        case XFS_DINODE_FMT_LOCAL:
                atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
@@ -604,10 +600,11 @@ xfs_iformat_btree(
         * or the number of extents is greater than the number of
         * blocks.
         */
-       if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
-           || XFS_BMDR_SPACE_CALC(nrecs) >
-                       XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
-           || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
+       if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <=
+                       XFS_IFORK_MAXEXT(ip, whichfork) ||
+                    XFS_BMDR_SPACE_CALC(nrecs) >
+                       XFS_DFORK_SIZE(dip, ip->i_mount, whichfork) ||
+                    XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
                xfs_warn(ip->i_mount, "corrupt inode %Lu (btree).",
                        (unsigned long long) ip->i_ino);
                XFS_CORRUPTION_ERROR("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
@@ -835,12 +832,6 @@ xfs_iread(
                 * with the uninitialized part of it.
                 */
                ip->i_d.di_mode = 0;
-               /*
-                * Initialize the per-fork minima and maxima for a new
-                * inode here.  xfs_iformat will do it for old inodes.
-                */
-               ip->i_df.if_ext_max =
-                       XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
        }
 
        /*
@@ -861,7 +852,6 @@ xfs_iread(
        }
 
        ip->i_delayed_blks = 0;
-       ip->i_size = ip->i_d.di_size;
 
        /*
         * Mark the buffer containing the inode as something to keep
@@ -1051,7 +1041,6 @@ xfs_ialloc(
        }
 
        ip->i_d.di_size = 0;
-       ip->i_size = 0;
        ip->i_d.di_nextents = 0;
        ASSERT(ip->i_d.di_nblocks == 0);
 
@@ -1165,52 +1154,6 @@ xfs_ialloc(
        return 0;
 }
 
-/*
- * Check to make sure that there are no blocks allocated to the
- * file beyond the size of the file.  We don't check this for
- * files with fixed size extents or real time extents, but we
- * at least do it for regular files.
- */
-#ifdef DEBUG
-STATIC void
-xfs_isize_check(
-       struct xfs_inode        *ip,
-       xfs_fsize_t             isize)
-{
-       struct xfs_mount        *mp = ip->i_mount;
-       xfs_fileoff_t           map_first;
-       int                     nimaps;
-       xfs_bmbt_irec_t         imaps[2];
-       int                     error;
-
-       if (!S_ISREG(ip->i_d.di_mode))
-               return;
-
-       if (XFS_IS_REALTIME_INODE(ip))
-               return;
-
-       if (ip->i_d.di_flags & XFS_DIFLAG_EXTSIZE)
-               return;
-
-       nimaps = 2;
-       map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
-       /*
-        * The filesystem could be shutting down, so bmapi may return
-        * an error.
-        */
-       error = xfs_bmapi_read(ip, map_first,
-                        (XFS_B_TO_FSB(mp,
-                              (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) - map_first),
-                        imaps, &nimaps, XFS_BMAPI_ENTIRE);
-       if (error)
-               return;
-       ASSERT(nimaps == 1);
-       ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
-}
-#else  /* DEBUG */
-#define xfs_isize_check(ip, isize)
-#endif /* DEBUG */
-
 /*
  * Free up the underlying blocks past new_size.  The new size must be smaller
  * than the current size.  This routine can be used both for the attribute and
@@ -1252,12 +1195,14 @@ xfs_itruncate_extents(
        int                     done = 0;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
-       ASSERT(new_size <= ip->i_size);
+       ASSERT(new_size <= XFS_ISIZE(ip));
        ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES);
        ASSERT(ip->i_itemp != NULL);
        ASSERT(ip->i_itemp->ili_lock_flags == 0);
        ASSERT(!XFS_NOT_DQATTACHED(mp, ip));
 
+       trace_xfs_itruncate_extents_start(ip, new_size);
+
        /*
         * Since it is possible for space to become allocated beyond
         * the end of the file (in a crash where the space is allocated
@@ -1325,6 +1270,14 @@ xfs_itruncate_extents(
                        goto out;
        }
 
+       /*
+        * Always re-log the inode so that our permanent transaction can keep
+        * on rolling it forward in the log.
+        */
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       trace_xfs_itruncate_extents_end(ip, new_size);
+
 out:
        *tpp = tp;
        return error;
@@ -1338,74 +1291,6 @@ out_bmap_cancel:
        goto out;
 }
 
-int
-xfs_itruncate_data(
-       struct xfs_trans        **tpp,
-       struct xfs_inode        *ip,
-       xfs_fsize_t             new_size)
-{
-       int                     error;
-
-       trace_xfs_itruncate_data_start(ip, new_size);
-
-       /*
-        * The first thing we do is set the size to new_size permanently on
-        * disk.  This way we don't have to worry about anyone ever being able
-        * to look at the data being freed even in the face of a crash.
-        * What we're getting around here is the case where we free a block, it
-        * is allocated to another file, it is written to, and then we crash.
-        * If the new data gets written to the file but the log buffers
-        * containing the free and reallocation don't, then we'd end up with
-        * garbage in the blocks being freed.  As long as we make the new_size
-        * permanent before actually freeing any blocks it doesn't matter if
-        * they get written to.
-        */
-       if (ip->i_d.di_nextents > 0) {
-               /*
-                * If we are not changing the file size then do not update
-                * the on-disk file size - we may be called from
-                * xfs_inactive_free_eofblocks().  If we update the on-disk
-                * file size and then the system crashes before the contents
-                * of the file are flushed to disk then the files may be
-                * full of holes (ie NULL files bug).
-                */
-               if (ip->i_size != new_size) {
-                       ip->i_d.di_size = new_size;
-                       ip->i_size = new_size;
-                       xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
-               }
-       }
-
-       error = xfs_itruncate_extents(tpp, ip, XFS_DATA_FORK, new_size);
-       if (error)
-               return error;
-
-       /*
-        * If we are not changing the file size then do not update the on-disk
-        * file size - we may be called from xfs_inactive_free_eofblocks().
-        * If we update the on-disk file size and then the system crashes
-        * before the contents of the file are flushed to disk then the files
-        * may be full of holes (ie NULL files bug).
-        */
-       xfs_isize_check(ip, new_size);
-       if (ip->i_size != new_size) {
-               ip->i_d.di_size = new_size;
-               ip->i_size = new_size;
-       }
-
-       ASSERT(new_size != 0 || ip->i_delayed_blks == 0);
-       ASSERT(new_size != 0 || ip->i_d.di_nextents == 0);
-
-       /*
-        * Always re-log the inode so that our permanent transaction can keep
-        * on rolling it forward in the log.
-        */
-       xfs_trans_log_inode(*tpp, ip, XFS_ILOG_CORE);
-
-       trace_xfs_itruncate_data_end(ip, new_size);
-       return 0;
-}
-
 /*
  * This is called when the inode's link count goes to 0.
  * We place the on-disk inode on a list in the AGI.  It
@@ -1824,8 +1709,7 @@ xfs_ifree(
        ASSERT(ip->i_d.di_nlink == 0);
        ASSERT(ip->i_d.di_nextents == 0);
        ASSERT(ip->i_d.di_anextents == 0);
-       ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
-              (!S_ISREG(ip->i_d.di_mode)));
+       ASSERT(ip->i_d.di_size == 0 || !S_ISREG(ip->i_d.di_mode));
        ASSERT(ip->i_d.di_nblocks == 0);
 
        /*
@@ -1844,8 +1728,6 @@ xfs_ifree(
        ip->i_d.di_flags = 0;
        ip->i_d.di_dmevmask = 0;
        ip->i_d.di_forkoff = 0;         /* mark the attr fork not in use */
-       ip->i_df.if_ext_max =
-               XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
        ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
        ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
        /*
@@ -2151,7 +2033,7 @@ xfs_idestroy_fork(
  * once someone is waiting for it to be unpinned.
  */
 static void
-xfs_iunpin_nowait(
+xfs_iunpin(
        struct xfs_inode        *ip)
 {
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
@@ -2163,14 +2045,29 @@ xfs_iunpin_nowait(
 
 }
 
+static void
+__xfs_iunpin_wait(
+       struct xfs_inode        *ip)
+{
+       wait_queue_head_t *wq = bit_waitqueue(&ip->i_flags, __XFS_IPINNED_BIT);
+       DEFINE_WAIT_BIT(wait, &ip->i_flags, __XFS_IPINNED_BIT);
+
+       xfs_iunpin(ip);
+
+       do {
+               prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE);
+               if (xfs_ipincount(ip))
+                       io_schedule();
+       } while (xfs_ipincount(ip));
+       finish_wait(wq, &wait.wait);
+}
+
 void
 xfs_iunpin_wait(
        struct xfs_inode        *ip)
 {
-       if (xfs_ipincount(ip)) {
-               xfs_iunpin_nowait(ip);
-               wait_event(ip->i_ipin_wait, (xfs_ipincount(ip) == 0));
-       }
+       if (xfs_ipincount(ip))
+               __xfs_iunpin_wait(ip);
 }
 
 /*
@@ -2510,9 +2407,9 @@ xfs_iflush(
        XFS_STATS_INC(xs_iflush_count);
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
-       ASSERT(!completion_done(&ip->i_flush));
+       ASSERT(xfs_isiflocked(ip));
        ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
-              ip->i_d.di_nextents > ip->i_df.if_ext_max);
+              ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
 
        iip = ip->i_itemp;
        mp = ip->i_mount;
@@ -2529,7 +2426,7 @@ xfs_iflush(
         * out for us if they occur after the log force completes.
         */
        if (!(flags & SYNC_WAIT) && xfs_ipincount(ip)) {
-               xfs_iunpin_nowait(ip);
+               xfs_iunpin(ip);
                xfs_ifunlock(ip);
                return EAGAIN;
        }
@@ -2626,9 +2523,9 @@ xfs_iflush_int(
 #endif
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
-       ASSERT(!completion_done(&ip->i_flush));
+       ASSERT(xfs_isiflocked(ip));
        ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
-              ip->i_d.di_nextents > ip->i_df.if_ext_max);
+              ip->i_d.di_nextents > XFS_IFORK_MAXEXT(ip, XFS_DATA_FORK));
 
        iip = ip->i_itemp;
        mp = ip->i_mount;
index f0e6b151ba37e4d0c8cc92f12a84ac03848dd0a1..2f27b745408520b73bab9bd8a1a2ca4ed1f96ea0 100644 (file)
@@ -66,7 +66,6 @@ typedef struct xfs_ifork {
        struct xfs_btree_block  *if_broot;      /* file's incore btree root */
        short                   if_broot_bytes; /* bytes allocated for root */
        unsigned char           if_flags;       /* per-fork flags */
-       unsigned char           if_ext_max;     /* max # of extent records */
        union {
                xfs_bmbt_rec_host_t *if_extents;/* linear map file exts */
                xfs_ext_irec_t  *if_ext_irec;   /* irec map file exts */
@@ -206,12 +205,12 @@ typedef struct xfs_icdinode {
        ((w) == XFS_DATA_FORK ? \
                ((ip)->i_d.di_nextents = (n)) : \
                ((ip)->i_d.di_anextents = (n)))
-
+#define XFS_IFORK_MAXEXT(ip, w) \
+       (XFS_IFORK_SIZE(ip, w) / sizeof(xfs_bmbt_rec_t))
 
 
 #ifdef __KERNEL__
 
-struct bhv_desc;
 struct xfs_buf;
 struct xfs_bmap_free;
 struct xfs_bmbt_irec;
@@ -220,12 +219,6 @@ struct xfs_mount;
 struct xfs_trans;
 struct xfs_dquot;
 
-typedef struct dm_attrs_s {
-       __uint32_t      da_dmevmask;    /* DMIG event mask */
-       __uint16_t      da_dmstate;     /* DMIG state info */
-       __uint16_t      da_pad;         /* DMIG extra padding */
-} dm_attrs_t;
-
 typedef struct xfs_inode {
        /* Inode linking and identification information. */
        struct xfs_mount        *i_mount;       /* fs mount struct ptr */
@@ -244,27 +237,19 @@ typedef struct xfs_inode {
        struct xfs_inode_log_item *i_itemp;     /* logging information */
        mrlock_t                i_lock;         /* inode lock */
        mrlock_t                i_iolock;       /* inode IO lock */
-       struct completion       i_flush;        /* inode flush completion q */
        atomic_t                i_pincount;     /* inode pin count */
-       wait_queue_head_t       i_ipin_wait;    /* inode pinning wait queue */
        spinlock_t              i_flags_lock;   /* inode i_flags lock */
        /* Miscellaneous state. */
-       unsigned short          i_flags;        /* see defined flags below */
+       unsigned long           i_flags;        /* see defined flags below */
        unsigned char           i_update_core;  /* timestamps/size is dirty */
        unsigned int            i_delayed_blks; /* count of delay alloc blks */
 
        xfs_icdinode_t          i_d;            /* most of ondisk inode */
 
-       xfs_fsize_t             i_size;         /* in-memory size */
-       xfs_fsize_t             i_new_size;     /* size when write completes */
-
        /* VFS inode */
        struct inode            i_vnode;        /* embedded VFS inode */
 } xfs_inode_t;
 
-#define XFS_ISIZE(ip)  S_ISREG((ip)->i_d.di_mode) ? \
-                               (ip)->i_size : (ip)->i_d.di_size;
-
 /* Convert from vfs inode to xfs inode */
 static inline struct xfs_inode *XFS_I(struct inode *inode)
 {
@@ -277,6 +262,18 @@ static inline struct inode *VFS_I(struct xfs_inode *ip)
        return &ip->i_vnode;
 }
 
+/*
+ * For regular files we only update the on-disk filesize when actually
+ * writing data back to disk.  Until then only the copy in the VFS inode
+ * is uptodate.
+ */
+static inline xfs_fsize_t XFS_ISIZE(struct xfs_inode *ip)
+{
+       if (S_ISREG(ip->i_d.di_mode))
+               return i_size_read(VFS_I(ip));
+       return ip->i_d.di_size;
+}
+
 /*
  * i_flags helper functions
  */
@@ -331,6 +328,19 @@ xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags)
        return ret;
 }
 
+static inline int
+xfs_iflags_test_and_set(xfs_inode_t *ip, unsigned short flags)
+{
+       int ret;
+
+       spin_lock(&ip->i_flags_lock);
+       ret = ip->i_flags & flags;
+       if (!ret)
+               ip->i_flags |= flags;
+       spin_unlock(&ip->i_flags_lock);
+       return ret;
+}
+
 /*
  * Project quota id helpers (previously projid was 16bit only
  * and using two 16bit values to hold new 32bit projid was chosen
@@ -350,36 +360,20 @@ xfs_set_projid(struct xfs_inode *ip,
        ip->i_d.di_projid_lo = (__uint16_t) (projid & 0xffff);
 }
 
-/*
- * Manage the i_flush queue embedded in the inode.  This completion
- * queue synchronizes processes attempting to flush the in-core
- * inode back to disk.
- */
-static inline void xfs_iflock(xfs_inode_t *ip)
-{
-       wait_for_completion(&ip->i_flush);
-}
-
-static inline int xfs_iflock_nowait(xfs_inode_t *ip)
-{
-       return try_wait_for_completion(&ip->i_flush);
-}
-
-static inline void xfs_ifunlock(xfs_inode_t *ip)
-{
-       complete(&ip->i_flush);
-}
-
 /*
  * In-core inode flags.
  */
-#define XFS_IRECLAIM           0x0001  /* started reclaiming this inode */
-#define XFS_ISTALE             0x0002  /* inode has been staled */
-#define XFS_IRECLAIMABLE       0x0004  /* inode can be reclaimed */
-#define XFS_INEW               0x0008  /* inode has just been allocated */
-#define XFS_IFILESTREAM                0x0010  /* inode is in a filestream directory */
-#define XFS_ITRUNCATED         0x0020  /* truncated down so flush-on-close */
-#define XFS_IDIRTY_RELEASE     0x0040  /* dirty release already seen */
+#define XFS_IRECLAIM           (1 << 0) /* started reclaiming this inode */
+#define XFS_ISTALE             (1 << 1) /* inode has been staled */
+#define XFS_IRECLAIMABLE       (1 << 2) /* inode can be reclaimed */
+#define XFS_INEW               (1 << 3) /* inode has just been allocated */
+#define XFS_IFILESTREAM                (1 << 4) /* inode is in a filestream dir. */
+#define XFS_ITRUNCATED         (1 << 5) /* truncated down so flush-on-close */
+#define XFS_IDIRTY_RELEASE     (1 << 6) /* dirty release already seen */
+#define __XFS_IFLOCK_BIT       7        /* inode is being flushed right now */
+#define XFS_IFLOCK             (1 << __XFS_IFLOCK_BIT)
+#define __XFS_IPINNED_BIT      8        /* wakeup key for zero pin count */
+#define XFS_IPINNED            (1 << __XFS_IPINNED_BIT)
 
 /*
  * Per-lifetime flags need to be reset when re-using a reclaimable inode during
@@ -391,6 +385,34 @@ static inline void xfs_ifunlock(xfs_inode_t *ip)
         XFS_IDIRTY_RELEASE | XFS_ITRUNCATED | \
         XFS_IFILESTREAM);
 
+/*
+ * Synchronize processes attempting to flush the in-core inode back to disk.
+ */
+
+extern void __xfs_iflock(struct xfs_inode *ip);
+
+static inline int xfs_iflock_nowait(struct xfs_inode *ip)
+{
+       return !xfs_iflags_test_and_set(ip, XFS_IFLOCK);
+}
+
+static inline void xfs_iflock(struct xfs_inode *ip)
+{
+       if (!xfs_iflock_nowait(ip))
+               __xfs_iflock(ip);
+}
+
+static inline void xfs_ifunlock(struct xfs_inode *ip)
+{
+       xfs_iflags_clear(ip, XFS_IFLOCK);
+       wake_up_bit(&ip->i_flags, __XFS_IFLOCK_BIT);
+}
+
+static inline int xfs_isiflocked(struct xfs_inode *ip)
+{
+       return xfs_iflags_test(ip, XFS_IFLOCK);
+}
+
 /*
  * Flags for inode locking.
  * Bit ranges: 1<<1  - 1<<16-1 -- iolock/ilock modes (bitfield)
@@ -491,8 +513,6 @@ int         xfs_ifree(struct xfs_trans *, xfs_inode_t *,
                           struct xfs_bmap_free *);
 int            xfs_itruncate_extents(struct xfs_trans **, struct xfs_inode *,
                                      int, xfs_fsize_t);
-int            xfs_itruncate_data(struct xfs_trans **, struct xfs_inode *,
-                                  xfs_fsize_t);
 int            xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
 
 void           xfs_iext_realloc(xfs_inode_t *, int, int);
index cfd6c7f8cc3c09450e1ad6dfff372f12938d1d42..91d71dcd4852eed6339bd1ceb54a8dbdf04cd27a 100644 (file)
@@ -79,8 +79,6 @@ xfs_inode_item_size(
                break;
 
        case XFS_DINODE_FMT_BTREE:
-               ASSERT(ip->i_df.if_ext_max ==
-                      XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t));
                iip->ili_format.ilf_fields &=
                        ~(XFS_ILOG_DDATA | XFS_ILOG_DEXT |
                          XFS_ILOG_DEV | XFS_ILOG_UUID);
@@ -557,7 +555,7 @@ xfs_inode_item_unpin(
        trace_xfs_inode_unpin(ip, _RET_IP_);
        ASSERT(atomic_read(&ip->i_pincount) > 0);
        if (atomic_dec_and_test(&ip->i_pincount))
-               wake_up(&ip->i_ipin_wait);
+               wake_up_bit(&ip->i_flags, __XFS_IPINNED_BIT);
 }
 
 /*
@@ -719,7 +717,7 @@ xfs_inode_item_pushbuf(
         * If a flush is not in progress anymore, chances are that the
         * inode was taken off the AIL. So, just get out.
         */
-       if (completion_done(&ip->i_flush) ||
+       if (!xfs_isiflocked(ip) ||
            !(lip->li_flags & XFS_LI_IN_AIL)) {
                xfs_iunlock(ip, XFS_ILOCK_SHARED);
                return true;
@@ -752,7 +750,7 @@ xfs_inode_item_push(
        struct xfs_inode        *ip = iip->ili_inode;
 
        ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
-       ASSERT(!completion_done(&ip->i_flush));
+       ASSERT(xfs_isiflocked(ip));
 
        /*
         * Since we were able to lock the inode's flush lock and
index 9afa282aa937b473aad0d90fc6f2a0bfb659dd29..246c7d57c6f96c876778128e8d21c90fca692ce9 100644 (file)
@@ -57,26 +57,26 @@ xfs_iomap_eof_align_last_fsb(
        xfs_fileoff_t   *last_fsb)
 {
        xfs_fileoff_t   new_last_fsb = 0;
-       xfs_extlen_t    align;
+       xfs_extlen_t    align = 0;
        int             eof, error;
 
-       if (XFS_IS_REALTIME_INODE(ip))
-               ;
-       /*
-        * If mounted with the "-o swalloc" option, roundup the allocation
-        * request to a stripe width boundary if the file size is >=
-        * stripe width and we are allocating past the allocation eof.
-        */
-       else if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC) &&
-               (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_swidth)))
-               new_last_fsb = roundup_64(*last_fsb, mp->m_swidth);
-       /*
-        * Roundup the allocation request to a stripe unit (m_dalign) boundary
-        * if the file size is >= stripe unit size, and we are allocating past
-        * the allocation eof.
-        */
-       else if (mp->m_dalign && (ip->i_size >= XFS_FSB_TO_B(mp, mp->m_dalign)))
-               new_last_fsb = roundup_64(*last_fsb, mp->m_dalign);
+       if (!XFS_IS_REALTIME_INODE(ip)) {
+               /*
+                * Round up the allocation request to a stripe unit
+                * (m_dalign) boundary if the file size is >= stripe unit
+                * size, and we are allocating past the allocation eof.
+                *
+                * If mounted with the "-o swalloc" option the alignment is
+                * increased from the strip unit size to the stripe width.
+                */
+               if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
+                       align = mp->m_swidth;
+               else if (mp->m_dalign)
+                       align = mp->m_dalign;
+
+               if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align))
+                       new_last_fsb = roundup_64(*last_fsb, align);
+       }
 
        /*
         * Always round up the allocation request to an extent boundary
@@ -154,7 +154,7 @@ xfs_iomap_write_direct(
 
        offset_fsb = XFS_B_TO_FSBT(mp, offset);
        last_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)(offset + count)));
-       if ((offset + count) > ip->i_size) {
+       if ((offset + count) > XFS_ISIZE(ip)) {
                error = xfs_iomap_eof_align_last_fsb(mp, ip, extsz, &last_fsb);
                if (error)
                        goto error_out;
@@ -211,7 +211,7 @@ xfs_iomap_write_direct(
        xfs_trans_ijoin(tp, ip, 0);
 
        bmapi_flag = 0;
-       if (offset < ip->i_size || extsz)
+       if (offset < XFS_ISIZE(ip) || extsz)
                bmapi_flag |= XFS_BMAPI_PREALLOC;
 
        /*
@@ -286,7 +286,7 @@ xfs_iomap_eof_want_preallocate(
        int             found_delalloc = 0;
 
        *prealloc = 0;
-       if ((offset + count) <= ip->i_size)
+       if (offset + count <= XFS_ISIZE(ip))
                return 0;
 
        /*
@@ -340,7 +340,7 @@ xfs_iomap_prealloc_size(
                 * if we pass in alloc_blocks = 0. Hence the "+ 1" to
                 * ensure we always pass in a non-zero value.
                 */
-               alloc_blocks = XFS_B_TO_FSB(mp, ip->i_size) + 1;
+               alloc_blocks = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)) + 1;
                alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
                                        rounddown_pow_of_two(alloc_blocks));
 
@@ -564,7 +564,7 @@ xfs_iomap_write_allocate(
                         * back....
                         */
                        nimaps = 1;
-                       end_fsb = XFS_B_TO_FSB(mp, ip->i_size);
+                       end_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip));
                        error = xfs_bmap_last_offset(NULL, ip, &last_block,
                                                        XFS_DATA_FORK);
                        if (error)
index f9babd17922377e422024ac07d9910cb78bc1708..ab302539e5b9603b8a67bb9f4399c03625fe1fd7 100644 (file)
@@ -750,6 +750,7 @@ xfs_setattr_size(
        struct xfs_mount        *mp = ip->i_mount;
        struct inode            *inode = VFS_I(ip);
        int                     mask = iattr->ia_valid;
+       xfs_off_t               oldsize, newsize;
        struct xfs_trans        *tp;
        int                     error;
        uint                    lock_flags;
@@ -777,11 +778,13 @@ xfs_setattr_size(
                lock_flags |= XFS_IOLOCK_EXCL;
        xfs_ilock(ip, lock_flags);
 
+       oldsize = inode->i_size;
+       newsize = iattr->ia_size;
+
        /*
         * Short circuit the truncate case for zero length files.
         */
-       if (iattr->ia_size == 0 &&
-           ip->i_size == 0 && ip->i_d.di_nextents == 0) {
+       if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {
                if (!(mask & (ATTR_CTIME|ATTR_MTIME)))
                        goto out_unlock;
 
@@ -807,14 +810,14 @@ xfs_setattr_size(
         * the inode to the transaction, because the inode cannot be unlocked
         * once it is a part of the transaction.
         */
-       if (iattr->ia_size > ip->i_size) {
+       if (newsize > oldsize) {
                /*
                 * Do the first part of growing a file: zero any data in the
                 * last block that is beyond the old EOF.  We need to do this
                 * before the inode is joined to the transaction to modify
                 * i_size.
                 */
-               error = xfs_zero_eof(ip, iattr->ia_size, ip->i_size);
+               error = xfs_zero_eof(ip, newsize, oldsize);
                if (error)
                        goto out_unlock;
        }
@@ -833,8 +836,8 @@ xfs_setattr_size(
         * here and prevents waiting for other data not within the range we
         * care about here.
         */
-       if (ip->i_size != ip->i_d.di_size && iattr->ia_size > ip->i_d.di_size) {
-               error = xfs_flush_pages(ip, ip->i_d.di_size, iattr->ia_size, 0,
+       if (oldsize != ip->i_d.di_size && newsize > ip->i_d.di_size) {
+               error = xfs_flush_pages(ip, ip->i_d.di_size, newsize, 0,
                                        FI_NONE);
                if (error)
                        goto out_unlock;
@@ -845,8 +848,7 @@ xfs_setattr_size(
         */
        inode_dio_wait(inode);
 
-       error = -block_truncate_page(inode->i_mapping, iattr->ia_size,
-                                    xfs_get_blocks);
+       error = -block_truncate_page(inode->i_mapping, newsize, xfs_get_blocks);
        if (error)
                goto out_unlock;
 
@@ -857,7 +859,7 @@ xfs_setattr_size(
        if (error)
                goto out_trans_cancel;
 
-       truncate_setsize(inode, iattr->ia_size);
+       truncate_setsize(inode, newsize);
 
        commit_flags = XFS_TRANS_RELEASE_LOG_RES;
        lock_flags |= XFS_ILOCK_EXCL;
@@ -876,19 +878,29 @@ xfs_setattr_size(
         * these flags set.  For all other operations the VFS set these flags
         * explicitly if it wants a timestamp update.
         */
-       if (iattr->ia_size != ip->i_size &&
-           (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
+       if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
                iattr->ia_ctime = iattr->ia_mtime =
                        current_fs_time(inode->i_sb);
                mask |= ATTR_CTIME | ATTR_MTIME;
        }
 
-       if (iattr->ia_size > ip->i_size) {
-               ip->i_d.di_size = iattr->ia_size;
-               ip->i_size = iattr->ia_size;
-       } else if (iattr->ia_size <= ip->i_size ||
-                  (iattr->ia_size == 0 && ip->i_d.di_nextents)) {
-               error = xfs_itruncate_data(&tp, ip, iattr->ia_size);
+       /*
+        * The first thing we do is set the size to new_size permanently on
+        * disk.  This way we don't have to worry about anyone ever being able
+        * to look at the data being freed even in the face of a crash.
+        * What we're getting around here is the case where we free a block, it
+        * is allocated to another file, it is written to, and then we crash.
+        * If the new data gets written to the file but the log buffers
+        * containing the free and reallocation don't, then we'd end up with
+        * garbage in the blocks being freed.  As long as we make the new size
+        * permanent before actually freeing any blocks it doesn't matter if
+        * they get written to.
+        */
+       ip->i_d.di_size = newsize;
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       if (newsize <= oldsize) {
+               error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, newsize);
                if (error)
                        goto out_trans_abort;
 
index 5cc3dde1bc9039de237a102c466b1fb4aa249cf8..eafbcff81f3af43c9dae0a73175bc71abba4a7c5 100644 (file)
@@ -31,6 +31,7 @@
 #include "xfs_mount.h"
 #include "xfs_bmap_btree.h"
 #include "xfs_inode.h"
+#include "xfs_inode_item.h"
 #include "xfs_itable.h"
 #include "xfs_bmap.h"
 #include "xfs_rtalloc.h"
@@ -263,13 +264,18 @@ xfs_qm_scall_trunc_qfile(
        xfs_ilock(ip, XFS_ILOCK_EXCL);
        xfs_trans_ijoin(tp, ip, 0);
 
-       error = xfs_itruncate_data(&tp, ip, 0);
+       ip->i_d.di_size = 0;
+       xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+       error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
        if (error) {
                xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
                                     XFS_TRANS_ABORT);
                goto out_unlock;
        }
 
+       ASSERT(ip->i_d.di_nextents == 0);
+
        xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
        error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
 
index 281961c1d81a73df18ab8dcf31c39e6be3d94588..ee5b695c99a700275683d26ac78acd6351c9cc1f 100644 (file)
@@ -828,14 +828,6 @@ xfs_fs_inode_init_once(
        /* xfs inode */
        atomic_set(&ip->i_pincount, 0);
        spin_lock_init(&ip->i_flags_lock);
-       init_waitqueue_head(&ip->i_ipin_wait);
-       /*
-        * Because we want to use a counting completion, complete
-        * the flush completion once to allow a single access to
-        * the flush completion without blocking.
-        */
-       init_completion(&ip->i_flush);
-       complete(&ip->i_flush);
 
        mrlock_init(&ip->i_lock, MRLOCK_ALLOW_EQUAL_PRI|MRLOCK_BARRIER,
                     "xfsino", ip->i_ino);
index 72c01a1c16e7d16ca0a49e284addf7d884dbfea6..40b75eecd2b4b376253e0e9408e42bc475e63f9b 100644 (file)
@@ -707,14 +707,13 @@ xfs_reclaim_inode_grab(
                return 1;
 
        /*
-        * do some unlocked checks first to avoid unnecessary lock traffic.
-        * The first is a flush lock check, the second is a already in reclaim
-        * check. Only do these checks if we are not going to block on locks.
+        * If we are asked for non-blocking operation, do unlocked checks to
+        * see if the inode already is being flushed or in reclaim to avoid
+        * lock traffic.
         */
        if ((flags & SYNC_TRYLOCK) &&
-           (!ip->i_flush.done || __xfs_iflags_test(ip, XFS_IRECLAIM))) {
+           __xfs_iflags_test(ip, XFS_IFLOCK | XFS_IRECLAIM))
                return 1;
-       }
 
        /*
         * The radix tree lock here protects a thread in xfs_iget from racing
index a9d5b1e06efee95920e0bc0dd7b75c6f867b537b..6b6df5802e957009f8c3f657411bf6e6e89cfad6 100644 (file)
@@ -891,7 +891,6 @@ DECLARE_EVENT_CLASS(xfs_file_class,
                __field(dev_t, dev)
                __field(xfs_ino_t, ino)
                __field(xfs_fsize_t, size)
-               __field(xfs_fsize_t, new_size)
                __field(loff_t, offset)
                __field(size_t, count)
                __field(int, flags)
@@ -900,17 +899,15 @@ DECLARE_EVENT_CLASS(xfs_file_class,
                __entry->dev = VFS_I(ip)->i_sb->s_dev;
                __entry->ino = ip->i_ino;
                __entry->size = ip->i_d.di_size;
-               __entry->new_size = ip->i_new_size;
                __entry->offset = offset;
                __entry->count = count;
                __entry->flags = flags;
        ),
-       TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
+       TP_printk("dev %d:%d ino 0x%llx size 0x%llx "
                  "offset 0x%llx count 0x%zx ioflags %s",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  __entry->size,
-                 __entry->new_size,
                  __entry->offset,
                  __entry->count,
                  __print_flags(__entry->flags, "|", XFS_IO_FLAGS))
@@ -978,7 +975,6 @@ DECLARE_EVENT_CLASS(xfs_imap_class,
                __field(dev_t, dev)
                __field(xfs_ino_t, ino)
                __field(loff_t, size)
-               __field(loff_t, new_size)
                __field(loff_t, offset)
                __field(size_t, count)
                __field(int, type)
@@ -990,7 +986,6 @@ DECLARE_EVENT_CLASS(xfs_imap_class,
                __entry->dev = VFS_I(ip)->i_sb->s_dev;
                __entry->ino = ip->i_ino;
                __entry->size = ip->i_d.di_size;
-               __entry->new_size = ip->i_new_size;
                __entry->offset = offset;
                __entry->count = count;
                __entry->type = type;
@@ -998,13 +993,11 @@ DECLARE_EVENT_CLASS(xfs_imap_class,
                __entry->startblock = irec ? irec->br_startblock : 0;
                __entry->blockcount = irec ? irec->br_blockcount : 0;
        ),
-       TP_printk("dev %d:%d ino 0x%llx size 0x%llx new_size 0x%llx "
-                 "offset 0x%llx count %zd type %s "
-                 "startoff 0x%llx startblock %lld blockcount 0x%llx",
+       TP_printk("dev %d:%d ino 0x%llx size 0x%llx offset 0x%llx count %zd "
+                 "type %s startoff 0x%llx startblock %lld blockcount 0x%llx",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  __entry->size,
-                 __entry->new_size,
                  __entry->offset,
                  __entry->count,
                  __print_symbolic(__entry->type, XFS_IO_TYPES),
@@ -1031,26 +1024,23 @@ DECLARE_EVENT_CLASS(xfs_simple_io_class,
                __field(xfs_ino_t, ino)
                __field(loff_t, isize)
                __field(loff_t, disize)
-               __field(loff_t, new_size)
                __field(loff_t, offset)
                __field(size_t, count)
        ),
        TP_fast_assign(
                __entry->dev = VFS_I(ip)->i_sb->s_dev;
                __entry->ino = ip->i_ino;
-               __entry->isize = ip->i_size;
+               __entry->isize = VFS_I(ip)->i_size;
                __entry->disize = ip->i_d.di_size;
-               __entry->new_size = ip->i_new_size;
                __entry->offset = offset;
                __entry->count = count;
        ),
-       TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx new_size 0x%llx "
+       TP_printk("dev %d:%d ino 0x%llx isize 0x%llx disize 0x%llx "
                  "offset 0x%llx count %zd",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  __entry->isize,
                  __entry->disize,
-                 __entry->new_size,
                  __entry->offset,
                  __entry->count)
 );
@@ -1090,8 +1080,8 @@ DECLARE_EVENT_CLASS(xfs_itrunc_class,
 DEFINE_EVENT(xfs_itrunc_class, name, \
        TP_PROTO(struct xfs_inode *ip, xfs_fsize_t new_size), \
        TP_ARGS(ip, new_size))
-DEFINE_ITRUNC_EVENT(xfs_itruncate_data_start);
-DEFINE_ITRUNC_EVENT(xfs_itruncate_data_end);
+DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_start);
+DEFINE_ITRUNC_EVENT(xfs_itruncate_extents_end);
 
 TRACE_EVENT(xfs_pagecache_inval,
        TP_PROTO(struct xfs_inode *ip, xfs_off_t start, xfs_off_t finish),
@@ -1568,7 +1558,6 @@ DECLARE_EVENT_CLASS(xfs_swap_extent_class,
                __field(xfs_ino_t, ino)
                __field(int, format)
                __field(int, nex)
-               __field(int, max_nex)
                __field(int, broot_size)
                __field(int, fork_off)
        ),
@@ -1578,18 +1567,16 @@ DECLARE_EVENT_CLASS(xfs_swap_extent_class,
                __entry->ino = ip->i_ino;
                __entry->format = ip->i_d.di_format;
                __entry->nex = ip->i_d.di_nextents;
-               __entry->max_nex = ip->i_df.if_ext_max;
                __entry->broot_size = ip->i_df.if_broot_bytes;
                __entry->fork_off = XFS_IFORK_BOFF(ip);
        ),
        TP_printk("dev %d:%d ino 0x%llx (%s), %s format, num_extents %d, "
-                 "Max in-fork extents %d, broot size %d, fork offset %d",
+                 "broot size %d, fork offset %d",
                  MAJOR(__entry->dev), MINOR(__entry->dev),
                  __entry->ino,
                  __print_symbolic(__entry->which, XFS_SWAPEXT_INODES),
                  __print_symbolic(__entry->format, XFS_INODE_FORMAT_STR),
                  __entry->nex,
-                 __entry->max_nex,
                  __entry->broot_size,
                  __entry->fork_off)
 )
index f2fea868d4db5da562804b2a0a9bd212608af058..ebdb88840a47817b5704df2aaa0fefb8b11fd96e 100644 (file)
@@ -131,7 +131,8 @@ xfs_readlink(
                         __func__, (unsigned long long) ip->i_ino,
                         (long long) pathlen);
                ASSERT(0);
-               return XFS_ERROR(EFSCORRUPTED);
+               error = XFS_ERROR(EFSCORRUPTED);
+               goto out;
        }
 
 
@@ -175,7 +176,7 @@ xfs_free_eofblocks(
         * Figure out if there are any blocks beyond the end
         * of the file.  If not, then there is nothing to do.
         */
-       end_fsb = XFS_B_TO_FSB(mp, ((xfs_ufsize_t)ip->i_size));
+       end_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_ISIZE(ip));
        last_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
        if (last_fsb <= end_fsb)
                return 0;
@@ -226,7 +227,14 @@ xfs_free_eofblocks(
                xfs_ilock(ip, XFS_ILOCK_EXCL);
                xfs_trans_ijoin(tp, ip, 0);
 
-               error = xfs_itruncate_data(&tp, ip, ip->i_size);
+               /*
+                * Do not update the on-disk file size.  If we update the
+                * on-disk file size and then the system crashes before the
+                * contents of the file are flushed to disk then the files
+                * may be full of holes (ie NULL files bug).
+                */
+               error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK,
+                                             XFS_ISIZE(ip));
                if (error) {
                        /*
                         * If we get an error at this point we simply don't
@@ -540,8 +548,8 @@ xfs_release(
                return 0;
 
        if ((S_ISREG(ip->i_d.di_mode) &&
-            ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
-              ip->i_delayed_blks > 0)) &&
+            (VFS_I(ip)->i_size > 0 ||
+             (VN_CACHED(VFS_I(ip)) > 0 || ip->i_delayed_blks > 0)) &&
             (ip->i_df.if_flags & XFS_IFEXTENTS))  &&
            (!(ip->i_d.di_flags & (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)))) {
 
@@ -618,7 +626,7 @@ xfs_inactive(
         * only one with a reference to the inode.
         */
        truncate = ((ip->i_d.di_nlink == 0) &&
-           ((ip->i_d.di_size != 0) || (ip->i_size != 0) ||
+           ((ip->i_d.di_size != 0) || XFS_ISIZE(ip) != 0 ||
             (ip->i_d.di_nextents > 0) || (ip->i_delayed_blks > 0)) &&
            S_ISREG(ip->i_d.di_mode));
 
@@ -632,12 +640,12 @@ xfs_inactive(
 
        if (ip->i_d.di_nlink != 0) {
                if ((S_ISREG(ip->i_d.di_mode) &&
-                     ((ip->i_size > 0) || (VN_CACHED(VFS_I(ip)) > 0 ||
-                       ip->i_delayed_blks > 0)) &&
-                     (ip->i_df.if_flags & XFS_IFEXTENTS) &&
-                    (!(ip->i_d.di_flags &
+                   (VFS_I(ip)->i_size > 0 ||
+                    (VN_CACHED(VFS_I(ip)) > 0 || ip->i_delayed_blks > 0)) &&
+                   (ip->i_df.if_flags & XFS_IFEXTENTS) &&
+                   (!(ip->i_d.di_flags &
                                (XFS_DIFLAG_PREALLOC | XFS_DIFLAG_APPEND)) ||
-                     (ip->i_delayed_blks != 0)))) {
+                    ip->i_delayed_blks != 0))) {
                        error = xfs_free_eofblocks(mp, ip, 0);
                        if (error)
                                return VN_INACTIVE_CACHE;
@@ -670,13 +678,18 @@ xfs_inactive(
                xfs_ilock(ip, XFS_ILOCK_EXCL);
                xfs_trans_ijoin(tp, ip, 0);
 
-               error = xfs_itruncate_data(&tp, ip, 0);
+               ip->i_d.di_size = 0;
+               xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
+
+               error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
                if (error) {
                        xfs_trans_cancel(tp,
                                XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT);
                        xfs_iunlock(ip, XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL);
                        return VN_INACTIVE_CACHE;
                }
+
+               ASSERT(ip->i_d.di_nextents == 0);
        } else if (S_ISLNK(ip->i_d.di_mode)) {
 
                /*
@@ -1961,11 +1974,11 @@ xfs_zero_remaining_bytes(
         * since nothing can read beyond eof.  The space will
         * be zeroed when the file is extended anyway.
         */
-       if (startoff >= ip->i_size)
+       if (startoff >= XFS_ISIZE(ip))
                return 0;
 
-       if (endoff > ip->i_size)
-               endoff = ip->i_size;
+       if (endoff > XFS_ISIZE(ip))
+               endoff = XFS_ISIZE(ip);
 
        bp = xfs_buf_get_uncached(XFS_IS_REALTIME_INODE(ip) ?
                                        mp->m_rtdev_targp : mp->m_ddev_targp,
@@ -2260,7 +2273,7 @@ xfs_change_file_space(
                bf->l_start += offset;
                break;
        case 2: /*SEEK_END*/
-               bf->l_start += ip->i_size;
+               bf->l_start += XFS_ISIZE(ip);
                break;
        default:
                return XFS_ERROR(EINVAL);
@@ -2277,7 +2290,7 @@ xfs_change_file_space(
        bf->l_whence = 0;
 
        startoffset = bf->l_start;
-       fsize = ip->i_size;
+       fsize = XFS_ISIZE(ip);
 
        /*
         * XFS_IOC_RESVSP and XFS_IOC_UNRESVSP will reserve or unreserve
index fc1575fd4596e484c0fc44c7614e6decb5e8c6b6..5b5af0d30a9738a7cb938a04b839c3d6a5f5727e 100644 (file)
@@ -58,6 +58,7 @@
 #define METHOD_NAME__PRT        "_PRT"
 #define METHOD_NAME__CRS        "_CRS"
 #define METHOD_NAME__PRS        "_PRS"
+#define METHOD_NAME__AEI        "_AEI"
 #define METHOD_NAME__PRW        "_PRW"
 #define METHOD_NAME__SRS        "_SRS"
 
index 173972672175588c181718aef6cf7f1537c37758..451823cb88372b2130793abbbb4e17869af34faf 100644 (file)
@@ -15,6 +15,7 @@ extern int pxm_to_node(int);
 extern int node_to_pxm(int);
 extern void __acpi_map_pxm_to_node(int, int);
 extern int acpi_map_pxm_to_node(int);
+extern unsigned char acpi_srat_revision;
 
 #endif                         /* CONFIG_ACPI_NUMA */
 #endif                         /* __ACP_NUMA_H */
index 83062ed0ef2f7177285e6688a07e81db8c1a90e4..7c9aebe8a7aa27ebf091c1a0ec2470e1e23d7f0a 100644 (file)
@@ -218,9 +218,13 @@ acpi_status acpi_os_write_port(acpi_io_address address, u32 value, u32 width);
  */
 acpi_status
 acpi_os_read_memory(acpi_physical_address address, u32 * value, u32 width);
+acpi_status
+acpi_os_read_memory64(acpi_physical_address address, u64 *value, u32 width);
 
 acpi_status
 acpi_os_write_memory(acpi_physical_address address, u32 value, u32 width);
+acpi_status
+acpi_os_write_memory64(acpi_physical_address address, u64 value, u32 width);
 
 /*
  * Platform and hardware-independent PCI configuration space access
@@ -238,13 +242,6 @@ acpi_os_write_pci_configuration(struct acpi_pci_id *pci_id,
 /*
  * Miscellaneous
  */
-acpi_status
-acpi_os_validate_address(u8 space_id, acpi_physical_address address,
-                        acpi_size length, char *name);
-acpi_status
-acpi_os_invalidate_address(u8 space_id, acpi_physical_address address,
-                        acpi_size length);
-
 u64 acpi_os_get_timer(void);
 
 acpi_status acpi_os_signal(u32 function, void *info);
index 7762bc2d8404370d27be30e362b031e4da26af4c..a28da35ba45ee3f1fd3bcdf2b8a7f64bbe5d2391 100644 (file)
@@ -47,7 +47,7 @@
 
 /* Current ACPICA subsystem version in YYYYMMDD format */
 
-#define ACPI_CA_VERSION                 0x20110623
+#define ACPI_CA_VERSION                 0x20120111
 
 #include "actypes.h"
 #include "actbl.h"
@@ -74,6 +74,7 @@ extern u8 acpi_gbl_disable_auto_repair;
 extern u32 acpi_current_gpe_count;
 extern struct acpi_table_fadt acpi_gbl_FADT;
 extern u8 acpi_gbl_system_awake_and_running;
+extern u8 acpi_gbl_reduced_hardware;   /* ACPI 5.0 */
 
 extern u32 acpi_rsdt_forced;
 /*
@@ -111,6 +112,11 @@ acpi_status acpi_install_interface(acpi_string interface_name);
 
 acpi_status acpi_remove_interface(acpi_string interface_name);
 
+u32
+acpi_check_address_range(acpi_adr_space_type space_id,
+                        acpi_physical_address address,
+                        acpi_size length, u8 warn);
+
 /*
  * ACPI Memory management
  */
@@ -276,12 +282,23 @@ acpi_status acpi_install_exception_handler(acpi_exception_handler handler);
 acpi_status acpi_install_interface_handler(acpi_interface_handler handler);
 
 /*
- * Event interfaces
+ * Global Lock interfaces
  */
 acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle);
 
 acpi_status acpi_release_global_lock(u32 handle);
 
+/*
+ * Interfaces to AML mutex objects
+ */
+acpi_status
+acpi_acquire_mutex(acpi_handle handle, acpi_string pathname, u16 timeout);
+
+acpi_status acpi_release_mutex(acpi_handle handle, acpi_string pathname);
+
+/*
+ * Fixed Event interfaces
+ */
 acpi_status acpi_enable_event(u32 event, u32 flags);
 
 acpi_status acpi_disable_event(u32 event, u32 flags);
@@ -291,7 +308,7 @@ acpi_status acpi_clear_event(u32 event);
 acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status);
 
 /*
- * GPE Interfaces
+ * General Purpose Event (GPE) Interfaces
  */
 acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number);
 
@@ -345,6 +362,10 @@ acpi_status
 acpi_get_possible_resources(acpi_handle device, struct acpi_buffer *ret_buffer);
 #endif
 
+acpi_status
+acpi_get_event_resources(acpi_handle device_handle,
+                        struct acpi_buffer *ret_buffer);
+
 acpi_status
 acpi_walk_resources(acpi_handle device,
                    char *name,
@@ -360,6 +381,11 @@ acpi_status
 acpi_resource_to_address64(struct acpi_resource *resource,
                           struct acpi_resource_address64 *out);
 
+acpi_status
+acpi_buffer_to_resource(u8 *aml_buffer,
+                       u16 aml_buffer_length,
+                       struct acpi_resource **resource_ptr);
+
 /*
  * Hardware (ACPI device) interfaces
  */
index 0a66cc45dd6b79a0fdb17be3368b87cdd1c646f5..3506e39a66b15962a5d6de4ab54a8117ff0267bb 100644 (file)
@@ -61,11 +61,14 @@ typedef u32 acpi_rsdesc_size;       /* Max Resource Descriptor size is (Length+3) = (6
 #define ACPI_WRITE_COMBINING_MEMORY     (u8) 0x02
 #define ACPI_PREFETCHABLE_MEMORY        (u8) 0x03
 
+/*! [Begin] no source code translation */
 /*
  * IO Attributes
- * The ISA IO ranges are:     n000-n0_fFh, n400-n4_fFh, n800-n8_fFh, n_c00-n_cFFh.
- * The non-ISA IO ranges are: n100-n3_fFh, n500-n7_fFh, n900-n_bFFh, n_cd0-n_fFFh.
+ * The ISA IO ranges are:     n000-n0FFh,  n400-n4FFh, n800-n8FFh, nC00-nCFFh.
+ * The non-ISA IO ranges are: n100-n3FFh,  n500-n7FFh, n900-nBFFh, nCD0-nFFFh.
  */
+/*! [End] no source code translation !*/
+
 #define ACPI_NON_ISA_ONLY_RANGES        (u8) 0x01
 #define ACPI_ISA_ONLY_RANGES            (u8) 0x02
 #define ACPI_ENTIRE_RANGE               (ACPI_NON_ISA_ONLY_RANGES | ACPI_ISA_ONLY_RANGES)
@@ -81,16 +84,26 @@ typedef u32 acpi_rsdesc_size;       /* Max Resource Descriptor size is (Length+3) = (6
 #define ACPI_DECODE_16                  (u8) 0x01      /* 16-bit IO address decode */
 
 /*
- * IRQ Attributes
+ * Interrupt attributes - used in multiple descriptors
  */
+
+/* Triggering */
+
 #define ACPI_LEVEL_SENSITIVE            (u8) 0x00
 #define ACPI_EDGE_SENSITIVE             (u8) 0x01
 
+/* Polarity */
+
 #define ACPI_ACTIVE_HIGH                (u8) 0x00
 #define ACPI_ACTIVE_LOW                 (u8) 0x01
+#define ACPI_ACTIVE_BOTH                (u8) 0x02
+
+/* Sharing */
 
 #define ACPI_EXCLUSIVE                  (u8) 0x00
 #define ACPI_SHARED                     (u8) 0x01
+#define ACPI_EXCLUSIVE_AND_WAKE         (u8) 0x02
+#define ACPI_SHARED_AND_WAKE            (u8) 0x03
 
 /*
  * DMA Attributes
@@ -127,6 +140,8 @@ typedef u32 acpi_rsdesc_size;       /* Max Resource Descriptor size is (Length+3) = (6
 #define ACPI_POS_DECODE                 (u8) 0x00
 #define ACPI_SUB_DECODE                 (u8) 0x01
 
+/* Producer/Consumer */
+
 #define ACPI_PRODUCER                   (u8) 0x00
 #define ACPI_CONSUMER                   (u8) 0x01
 
@@ -192,6 +207,21 @@ struct acpi_resource_fixed_io {
        u8 address_length;
 };
 
+struct acpi_resource_fixed_dma {
+       u16 request_lines;
+       u16 channels;
+       u8 width;
+};
+
+/* Values for Width field above */
+
+#define ACPI_DMA_WIDTH8                         0
+#define ACPI_DMA_WIDTH16                        1
+#define ACPI_DMA_WIDTH32                        2
+#define ACPI_DMA_WIDTH64                        3
+#define ACPI_DMA_WIDTH128                       4
+#define ACPI_DMA_WIDTH256                       5
+
 struct acpi_resource_vendor {
        u16 byte_length;
        u8 byte_data[1];
@@ -329,6 +359,166 @@ struct acpi_resource_generic_register {
        u64 address;
 };
 
+struct acpi_resource_gpio {
+       u8 revision_id;
+       u8 connection_type;
+       u8 producer_consumer;   /* For values, see Producer/Consumer above */
+       u8 pin_config;
+       u8 sharable;            /* For values, see Interrupt Attributes above */
+       u8 io_restriction;
+       u8 triggering;          /* For values, see Interrupt Attributes above */
+       u8 polarity;            /* For values, see Interrupt Attributes above */
+       u16 drive_strength;
+       u16 debounce_timeout;
+       u16 pin_table_length;
+       u16 vendor_length;
+       struct acpi_resource_source resource_source;
+       u16 *pin_table;
+       u8 *vendor_data;
+};
+
+/* Values for GPIO connection_type field above */
+
+#define ACPI_RESOURCE_GPIO_TYPE_INT             0
+#define ACPI_RESOURCE_GPIO_TYPE_IO              1
+
+/* Values for pin_config field above */
+
+#define ACPI_PIN_CONFIG_DEFAULT                 0
+#define ACPI_PIN_CONFIG_PULLUP                  1
+#define ACPI_PIN_CONFIG_PULLDOWN                2
+#define ACPI_PIN_CONFIG_NOPULL                  3
+
+/* Values for io_restriction field above */
+
+#define ACPI_IO_RESTRICT_NONE                   0
+#define ACPI_IO_RESTRICT_INPUT                  1
+#define ACPI_IO_RESTRICT_OUTPUT                 2
+#define ACPI_IO_RESTRICT_NONE_PRESERVE          3
+
+/* Common structure for I2C, SPI, and UART serial descriptors */
+
+#define ACPI_RESOURCE_SERIAL_COMMON \
+       u8                                      revision_id; \
+       u8                                      type; \
+       u8                                      producer_consumer;   /* For values, see Producer/Consumer above */\
+       u8                                      slave_mode; \
+       u8                                      type_revision_id; \
+       u16                                     type_data_length; \
+       u16                                     vendor_length; \
+       struct acpi_resource_source             resource_source; \
+       u8                                      *vendor_data;
+
+struct acpi_resource_common_serialbus {
+ACPI_RESOURCE_SERIAL_COMMON};
+
+/* Values for the Type field above */
+
+#define ACPI_RESOURCE_SERIAL_TYPE_I2C           1
+#define ACPI_RESOURCE_SERIAL_TYPE_SPI           2
+#define ACPI_RESOURCE_SERIAL_TYPE_UART          3
+
+/* Values for slave_mode field above */
+
+#define ACPI_CONTROLLER_INITIATED               0
+#define ACPI_DEVICE_INITIATED                   1
+
+struct acpi_resource_i2c_serialbus {
+       ACPI_RESOURCE_SERIAL_COMMON u8 access_mode;
+       u16 slave_address;
+       u32 connection_speed;
+};
+
+/* Values for access_mode field above */
+
+#define ACPI_I2C_7BIT_MODE                      0
+#define ACPI_I2C_10BIT_MODE                     1
+
+struct acpi_resource_spi_serialbus {
+       ACPI_RESOURCE_SERIAL_COMMON u8 wire_mode;
+       u8 device_polarity;
+       u8 data_bit_length;
+       u8 clock_phase;
+       u8 clock_polarity;
+       u16 device_selection;
+       u32 connection_speed;
+};
+
+/* Values for wire_mode field above */
+
+#define ACPI_SPI_4WIRE_MODE                     0
+#define ACPI_SPI_3WIRE_MODE                     1
+
+/* Values for device_polarity field above */
+
+#define ACPI_SPI_ACTIVE_LOW                     0
+#define ACPI_SPI_ACTIVE_HIGH                    1
+
+/* Values for clock_phase field above */
+
+#define ACPI_SPI_FIRST_PHASE                    0
+#define ACPI_SPI_SECOND_PHASE                   1
+
+/* Values for clock_polarity field above */
+
+#define ACPI_SPI_START_LOW                      0
+#define ACPI_SPI_START_HIGH                     1
+
+struct acpi_resource_uart_serialbus {
+       ACPI_RESOURCE_SERIAL_COMMON u8 endian;
+       u8 data_bits;
+       u8 stop_bits;
+       u8 flow_control;
+       u8 parity;
+       u8 lines_enabled;
+       u16 rx_fifo_size;
+       u16 tx_fifo_size;
+       u32 default_baud_rate;
+};
+
+/* Values for Endian field above */
+
+#define ACPI_UART_LITTLE_ENDIAN                 0
+#define ACPI_UART_BIG_ENDIAN                    1
+
+/* Values for data_bits field above */
+
+#define ACPI_UART_5_DATA_BITS                   0
+#define ACPI_UART_6_DATA_BITS                   1
+#define ACPI_UART_7_DATA_BITS                   2
+#define ACPI_UART_8_DATA_BITS                   3
+#define ACPI_UART_9_DATA_BITS                   4
+
+/* Values for stop_bits field above */
+
+#define ACPI_UART_NO_STOP_BITS                  0
+#define ACPI_UART_1_STOP_BIT                    1
+#define ACPI_UART_1P5_STOP_BITS                 2
+#define ACPI_UART_2_STOP_BITS                   3
+
+/* Values for flow_control field above */
+
+#define ACPI_UART_FLOW_CONTROL_NONE             0
+#define ACPI_UART_FLOW_CONTROL_HW               1
+#define ACPI_UART_FLOW_CONTROL_XON_XOFF         2
+
+/* Values for Parity field above */
+
+#define ACPI_UART_PARITY_NONE                   0
+#define ACPI_UART_PARITY_EVEN                   1
+#define ACPI_UART_PARITY_ODD                    2
+#define ACPI_UART_PARITY_MARK                   3
+#define ACPI_UART_PARITY_SPACE                  4
+
+/* Values for lines_enabled bitfield above */
+
+#define ACPI_UART_CARRIER_DETECT                (1<<2)
+#define ACPI_UART_RING_INDICATOR                (1<<3)
+#define ACPI_UART_DATA_SET_READY                (1<<4)
+#define ACPI_UART_DATA_TERMINAL_READY           (1<<5)
+#define ACPI_UART_CLEAR_TO_SEND                 (1<<6)
+#define ACPI_UART_REQUEST_TO_SEND               (1<<7)
+
 /* ACPI_RESOURCE_TYPEs */
 
 #define ACPI_RESOURCE_TYPE_IRQ                  0
@@ -348,7 +538,10 @@ struct acpi_resource_generic_register {
 #define ACPI_RESOURCE_TYPE_EXTENDED_ADDRESS64   14     /* ACPI 3.0 */
 #define ACPI_RESOURCE_TYPE_EXTENDED_IRQ         15
 #define ACPI_RESOURCE_TYPE_GENERIC_REGISTER     16
-#define ACPI_RESOURCE_TYPE_MAX                  16
+#define ACPI_RESOURCE_TYPE_GPIO                 17     /* ACPI 5.0 */
+#define ACPI_RESOURCE_TYPE_FIXED_DMA            18     /* ACPI 5.0 */
+#define ACPI_RESOURCE_TYPE_SERIAL_BUS           19     /* ACPI 5.0 */
+#define ACPI_RESOURCE_TYPE_MAX                  19
 
 /* Master union for resource descriptors */
 
@@ -358,6 +551,7 @@ union acpi_resource_data {
        struct acpi_resource_start_dependent start_dpf;
        struct acpi_resource_io io;
        struct acpi_resource_fixed_io fixed_io;
+       struct acpi_resource_fixed_dma fixed_dma;
        struct acpi_resource_vendor vendor;
        struct acpi_resource_vendor_typed vendor_typed;
        struct acpi_resource_end_tag end_tag;
@@ -370,6 +564,11 @@ union acpi_resource_data {
        struct acpi_resource_extended_address64 ext_address64;
        struct acpi_resource_extended_irq extended_irq;
        struct acpi_resource_generic_register generic_reg;
+       struct acpi_resource_gpio gpio;
+       struct acpi_resource_i2c_serialbus i2c_serial_bus;
+       struct acpi_resource_spi_serialbus spi_serial_bus;
+       struct acpi_resource_uart_serialbus uart_serial_bus;
+       struct acpi_resource_common_serialbus common_serial_bus;
 
        /* Common fields */
 
index f1380287ed4d821474f58a39c372fca6cb5909a1..8e1b92f6f650332e6130486d6b7e7a5bd3f1b3f8 100644 (file)
@@ -255,6 +255,8 @@ struct acpi_table_fadt {
        struct acpi_generic_address xpm_timer_block;    /* 64-bit Extended Power Mgt Timer Ctrl Reg Blk address */
        struct acpi_generic_address xgpe0_block;        /* 64-bit Extended General Purpose Event 0 Reg Blk address */
        struct acpi_generic_address xgpe1_block;        /* 64-bit Extended General Purpose Event 1 Reg Blk address */
+       struct acpi_generic_address sleep_control;      /* 64-bit Sleep Control register */
+       struct acpi_generic_address sleep_status;       /* 64-bit Sleep Status register */
 };
 
 /* Masks for FADT Boot Architecture Flags (boot_flags) */
@@ -264,6 +266,7 @@ struct acpi_table_fadt {
 #define ACPI_FADT_NO_VGA            (1<<2)     /* 02: [V4] It is not safe to probe for VGA hardware */
 #define ACPI_FADT_NO_MSI            (1<<3)     /* 03: [V4] Message Signaled Interrupts (MSI) must not be enabled */
 #define ACPI_FADT_NO_ASPM           (1<<4)     /* 04: [V4] PCIe ASPM control must not be enabled */
+#define ACPI_FADT_NO_CMOS_RTC       (1<<5)     /* 05: [V5] No CMOS real-time clock present */
 
 #define FADT2_REVISION_ID               3
 
@@ -289,6 +292,8 @@ struct acpi_table_fadt {
 #define ACPI_FADT_REMOTE_POWER_ON   (1<<17)    /* 17: [V4] System is compatible with remote power on (ACPI 3.0) */
 #define ACPI_FADT_APIC_CLUSTER      (1<<18)    /* 18: [V4] All local APICs must use cluster model (ACPI 3.0) */
 #define ACPI_FADT_APIC_PHYSICAL     (1<<19)    /* 19: [V4] All local x_aPICs must use physical dest mode (ACPI 3.0) */
+#define ACPI_FADT_HW_REDUCED        (1<<20)    /* 20: [V5] ACPI hardware is not implemented (ACPI 5.0) */
+#define ACPI_FADT_LOW_POWER_S0      (1<<21)    /* 21: [V5] S0 power savings are equal or better than S3 (ACPI 5.0) */
 
 /* Values for preferred_profile (Preferred Power Management Profiles) */
 
@@ -299,14 +304,16 @@ enum acpi_prefered_pm_profiles {
        PM_WORKSTATION = 3,
        PM_ENTERPRISE_SERVER = 4,
        PM_SOHO_SERVER = 5,
-       PM_APPLIANCE_PC = 6
+       PM_APPLIANCE_PC = 6,
+       PM_PERFORMANCE_SERVER = 7,
+       PM_TABLET = 8
 };
 
 /* Reset to default packing */
 
 #pragma pack()
 
-#define ACPI_FADT_OFFSET(f)             (u8) ACPI_OFFSET (struct acpi_table_fadt, f)
+#define ACPI_FADT_OFFSET(f)             (u16) ACPI_OFFSET (struct acpi_table_fadt, f)
 
 /*
  * Internal table-related structures
@@ -342,6 +349,7 @@ struct acpi_table_desc {
 
 #include <acpi/actbl1.h>
 #include <acpi/actbl2.h>
+#include <acpi/actbl3.h>
 
 /*
  * Sizes of the various flavors of FADT. We need to look closely
@@ -351,12 +359,15 @@ struct acpi_table_desc {
  * FADT is the bottom line as to what the version really is.
  *
  * For reference, the values below are as follows:
- *     FADT V1  size: 0x74
- *     FADT V2  size: 0x84
- *     FADT V3+ size: 0xF4
+ *     FADT V1  size: 0x074
+ *     FADT V2  size: 0x084
+ *     FADT V3  size: 0x0F4
+ *     FADT V4  size: 0x0F4
+ *     FADT V5  size: 0x10C
  */
 #define ACPI_FADT_V1_SIZE       (u32) (ACPI_FADT_OFFSET (flags) + 4)
 #define ACPI_FADT_V2_SIZE       (u32) (ACPI_FADT_OFFSET (reserved4[0]) + 3)
-#define ACPI_FADT_V3_SIZE       (u32) (sizeof (struct acpi_table_fadt))
+#define ACPI_FADT_V3_SIZE       (u32) (ACPI_FADT_OFFSET (sleep_control))
+#define ACPI_FADT_V5_SIZE       (u32) (sizeof (struct acpi_table_fadt))
 
 #endif                         /* __ACTBL_H__ */
index 7504bc99b29b7359f9ec503a6c0def183c98c2c9..71e747beac8f3e016fa3719f4b75e7706a3b64fb 100644 (file)
@@ -228,7 +228,8 @@ enum acpi_einj_actions {
        ACPI_EINJ_EXECUTE_OPERATION = 5,
        ACPI_EINJ_CHECK_BUSY_STATUS = 6,
        ACPI_EINJ_GET_COMMAND_STATUS = 7,
-       ACPI_EINJ_ACTION_RESERVED = 8,  /* 8 and greater are reserved */
+       ACPI_EINJ_SET_ERROR_TYPE_WITH_ADDRESS = 8,
+       ACPI_EINJ_ACTION_RESERVED = 9,  /* 9 and greater are reserved */
        ACPI_EINJ_TRIGGER_ERROR = 0xFF  /* Except for this value */
 };
 
@@ -240,7 +241,27 @@ enum acpi_einj_instructions {
        ACPI_EINJ_WRITE_REGISTER = 2,
        ACPI_EINJ_WRITE_REGISTER_VALUE = 3,
        ACPI_EINJ_NOOP = 4,
-       ACPI_EINJ_INSTRUCTION_RESERVED = 5      /* 5 and greater are reserved */
+       ACPI_EINJ_FLUSH_CACHELINE = 5,
+       ACPI_EINJ_INSTRUCTION_RESERVED = 6      /* 6 and greater are reserved */
+};
+
+struct acpi_einj_error_type_with_addr {
+       u32 error_type;
+       u32 vendor_struct_offset;
+       u32 flags;
+       u32 apic_id;
+       u64 address;
+       u64 range;
+       u32 pcie_id;
+};
+
+struct acpi_einj_vendor {
+       u32 length;
+       u32 pcie_id;
+       u16 vendor_id;
+       u16 device_id;
+       u8 revision_id;
+       u8 reserved[3];
 };
 
 /* EINJ Trigger Error Action Table */
@@ -275,6 +296,7 @@ enum acpi_einj_command_status {
 #define ACPI_EINJ_PLATFORM_CORRECTABLE      (1<<9)
 #define ACPI_EINJ_PLATFORM_UNCORRECTABLE    (1<<10)
 #define ACPI_EINJ_PLATFORM_FATAL            (1<<11)
+#define ACPI_EINJ_VENDOR_DEFINED            (1<<31)
 
 /*******************************************************************************
  *
@@ -631,7 +653,9 @@ enum acpi_madt_type {
        ACPI_MADT_TYPE_INTERRUPT_SOURCE = 8,
        ACPI_MADT_TYPE_LOCAL_X2APIC = 9,
        ACPI_MADT_TYPE_LOCAL_X2APIC_NMI = 10,
-       ACPI_MADT_TYPE_RESERVED = 11    /* 11 and greater are reserved */
+       ACPI_MADT_TYPE_GENERIC_INTERRUPT = 11,
+       ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR = 12,
+       ACPI_MADT_TYPE_RESERVED = 13    /* 13 and greater are reserved */
 };
 
 /*
@@ -752,11 +776,36 @@ struct acpi_madt_local_x2apic_nmi {
        u8 reserved[3];
 };
 
+/* 11: Generic Interrupt (ACPI 5.0) */
+
+struct acpi_madt_generic_interrupt {
+       struct acpi_subtable_header header;
+       u16 reserved;           /* Reserved - must be zero */
+       u32 gic_id;
+       u32 uid;
+       u32 flags;
+       u32 parking_version;
+       u32 performance_interrupt;
+       u64 parked_address;
+       u64 base_address;
+};
+
+/* 12: Generic Distributor (ACPI 5.0) */
+
+struct acpi_madt_generic_distributor {
+       struct acpi_subtable_header header;
+       u16 reserved;           /* Reserved - must be zero */
+       u32 gic_id;
+       u64 base_address;
+       u32 global_irq_base;
+       u32 reserved2;          /* Reserved - must be zero */
+};
+
 /*
  * Common flags fields for MADT subtables
  */
 
-/* MADT Local APIC flags (lapic_flags) */
+/* MADT Local APIC flags (lapic_flags) and GIC flags */
 
 #define ACPI_MADT_ENABLED           (1)        /* 00: Processor is usable if set */
 
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
new file mode 100644 (file)
index 0000000..c22ce80
--- /dev/null
@@ -0,0 +1,552 @@
+/******************************************************************************
+ *
+ * Name: actbl3.h - ACPI Table Definitions
+ *
+ *****************************************************************************/
+
+/*
+ * Copyright (C) 2000 - 2011, Intel Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions, and the following disclaimer,
+ *    without modification.
+ * 2. Redistributions in binary form must reproduce at minimum a disclaimer
+ *    substantially similar to the "NO WARRANTY" disclaimer below
+ *    ("Disclaimer") and any redistribution must be conditioned upon
+ *    including a substantially similar Disclaimer requirement for further
+ *    binary redistribution.
+ * 3. Neither the names of the above-listed copyright holders nor the names
+ *    of any contributors may be used to endorse or promote products derived
+ *    from this software without specific prior written permission.
+ *
+ * Alternatively, this software may be distributed under the terms of the
+ * GNU General Public License ("GPL") version 2 as published by the Free
+ * Software Foundation.
+ *
+ * NO WARRANTY
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGES.
+ */
+
+#ifndef __ACTBL3_H__
+#define __ACTBL3_H__
+
+/*******************************************************************************
+ *
+ * Additional ACPI Tables (3)
+ *
+ * These tables are not consumed directly by the ACPICA subsystem, but are
+ * included here to support device drivers and the AML disassembler.
+ *
+ * The tables in this file are fully defined within the ACPI specification.
+ *
+ ******************************************************************************/
+
+/*
+ * Values for description table header signatures for tables defined in this
+ * file. Useful because they make it more difficult to inadvertently type in
+ * the wrong signature.
+ */
+#define ACPI_SIG_BGRT           "BGRT" /* Boot Graphics Resource Table */
+#define ACPI_SIG_DRTM           "DRTM" /* Dynamic Root of Trust for Measurement table */
+#define ACPI_SIG_FPDT           "FPDT" /* Firmware Performance Data Table */
+#define ACPI_SIG_GTDT           "GTDT" /* Generic Timer Description Table */
+#define ACPI_SIG_MPST           "MPST" /* Memory Power State Table */
+#define ACPI_SIG_PCCT           "PCCT" /* Platform Communications Channel Table */
+#define ACPI_SIG_PMTT           "PMTT" /* Platform Memory Topology Table */
+#define ACPI_SIG_RASF           "RASF" /* RAS Feature table */
+
+#define ACPI_SIG_S3PT           "S3PT" /* S3 Performance (sub)Table */
+#define ACPI_SIG_PCCS           "PCC"  /* PCC Shared Memory Region */
+
+/* Reserved table signatures */
+
+#define ACPI_SIG_CSRT           "CSRT" /* Core System Resources Table */
+#define ACPI_SIG_DBG2           "DBG2" /* Debug Port table 2 */
+#define ACPI_SIG_MATR           "MATR" /* Memory Address Translation Table */
+#define ACPI_SIG_MSDM           "MSDM" /* Microsoft Data Management Table */
+#define ACPI_SIG_WPBT           "WPBT" /* Windows Platform Binary Table */
+
+/*
+ * All tables must be byte-packed to match the ACPI specification, since
+ * the tables are provided by the system BIOS.
+ */
+#pragma pack(1)
+
+/*
+ * Note about bitfields: The u8 type is used for bitfields in ACPI tables.
+ * This is the only type that is even remotely portable. Anything else is not
+ * portable, so do not use any other bitfield types.
+ */
+
+/*******************************************************************************
+ *
+ * BGRT - Boot Graphics Resource Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_bgrt {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u16 version;
+       u8 status;
+       u8 image_type;
+       u64 image_address;
+       u32 image_offset_x;
+       u32 image_offset_y;
+};
+
+/*******************************************************************************
+ *
+ * DRTM - Dynamic Root of Trust for Measurement table
+ *
+ ******************************************************************************/
+
+struct acpi_table_drtm {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u64 entry_base_address;
+       u64 entry_length;
+       u32 entry_address32;
+       u64 entry_address64;
+       u64 exit_address;
+       u64 log_area_address;
+       u32 log_area_length;
+       u64 arch_dependent_address;
+       u32 flags;
+};
+
+/* 1) Validated Tables List */
+
+struct acpi_drtm_vtl_list {
+       u32 validated_table_list_count;
+};
+
+/* 2) Resources List */
+
+struct acpi_drtm_resource_list {
+       u32 resource_list_count;
+};
+
+/* 3) Platform-specific Identifiers List */
+
+struct acpi_drtm_id_list {
+       u32 id_list_count;
+};
+
+/*******************************************************************************
+ *
+ * FPDT - Firmware Performance Data Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_fpdt {
+       struct acpi_table_header header;        /* Common ACPI table header */
+};
+
+/* FPDT subtable header */
+
+struct acpi_fpdt_header {
+       u16 type;
+       u8 length;
+       u8 revision;
+};
+
+/* Values for Type field above */
+
+enum acpi_fpdt_type {
+       ACPI_FPDT_TYPE_BOOT = 0,
+       ACPI_FPDT_TYPE_S3PERF = 1,
+};
+
+/*
+ * FPDT subtables
+ */
+
+/* 0: Firmware Basic Boot Performance Record */
+
+struct acpi_fpdt_boot {
+       struct acpi_fpdt_header header;
+       u8 reserved[4];
+       u64 reset_end;
+       u64 load_start;
+       u64 startup_start;
+       u64 exit_services_entry;
+       u64 exit_services_exit;
+};
+
+/* 1: S3 Performance Table Pointer Record */
+
+struct acpi_fpdt_s3pt_ptr {
+       struct acpi_fpdt_header header;
+       u8 reserved[4];
+       u64 address;
+};
+
+/*
+ * S3PT - S3 Performance Table. This table is pointed to by the
+ * FPDT S3 Pointer Record above.
+ */
+struct acpi_table_s3pt {
+       u8 signature[4];        /* "S3PT" */
+       u32 length;
+};
+
+/*
+ * S3PT Subtables
+ */
+struct acpi_s3pt_header {
+       u16 type;
+       u8 length;
+       u8 revision;
+};
+
+/* Values for Type field above */
+
+enum acpi_s3pt_type {
+       ACPI_S3PT_TYPE_RESUME = 0,
+       ACPI_S3PT_TYPE_SUSPEND = 1,
+};
+
+struct acpi_s3pt_resume {
+       struct acpi_s3pt_header header;
+       u32 resume_count;
+       u64 full_resume;
+       u64 average_resume;
+};
+
+struct acpi_s3pt_suspend {
+       struct acpi_s3pt_header header;
+       u64 suspend_start;
+       u64 suspend_end;
+};
+
+/*******************************************************************************
+ *
+ * GTDT - Generic Timer Description Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_gtdt {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u64 address;
+       u32 flags;
+       u32 secure_pl1_interrupt;
+       u32 secure_pl1_flags;
+       u32 non_secure_pl1_interrupt;
+       u32 non_secure_pl1_flags;
+       u32 virtual_timer_interrupt;
+       u32 virtual_timer_flags;
+       u32 non_secure_pl2_interrupt;
+       u32 non_secure_pl2_flags;
+};
+
+/* Values for Flags field above */
+
+#define ACPI_GTDT_MAPPED_BLOCK_PRESENT      1
+
+/* Values for all "TimerFlags" fields above */
+
+#define ACPI_GTDT_INTERRUPT_MODE            1
+#define ACPI_GTDT_INTERRUPT_POLARITY        2
+
+/*******************************************************************************
+ *
+ * MPST - Memory Power State Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+#define ACPI_MPST_CHANNEL_INFO \
+       u16                             reserved1; \
+       u8                              channel_id; \
+       u8                              reserved2; \
+       u16                             power_node_count;
+
+/* Main table */
+
+struct acpi_table_mpst {
+       struct acpi_table_header header;        /* Common ACPI table header */
+        ACPI_MPST_CHANNEL_INFO /* Platform Communication Channel */
+};
+
+/* Memory Platform Communication Channel Info */
+
+struct acpi_mpst_channel {
+       ACPI_MPST_CHANNEL_INFO  /* Platform Communication Channel */
+};
+
+/* Memory Power Node Structure */
+
+struct acpi_mpst_power_node {
+       u8 flags;
+       u8 reserved1;
+       u16 node_id;
+       u32 length;
+       u64 range_address;
+       u64 range_length;
+       u8 num_power_states;
+       u8 num_physical_components;
+       u16 reserved2;
+};
+
+/* Values for Flags field above */
+
+#define ACPI_MPST_ENABLED               1
+#define ACPI_MPST_POWER_MANAGED         2
+#define ACPI_MPST_HOT_PLUG_CAPABLE      4
+
+/* Memory Power State Structure (follows POWER_NODE above) */
+
+struct acpi_mpst_power_state {
+       u8 power_state;
+       u8 info_index;
+};
+
+/* Physical Component ID Structure (follows POWER_STATE above) */
+
+struct acpi_mpst_component {
+       u16 component_id;
+};
+
+/* Memory Power State Characteristics Structure (follows all POWER_NODEs) */
+
+struct acpi_mpst_data_hdr {
+       u16 characteristics_count;
+};
+
+struct acpi_mpst_power_data {
+       u8 revision;
+       u8 flags;
+       u16 reserved1;
+       u32 average_power;
+       u32 power_saving;
+       u64 exit_latency;
+       u64 reserved2;
+};
+
+/* Values for Flags field above */
+
+#define ACPI_MPST_PRESERVE              1
+#define ACPI_MPST_AUTOENTRY             2
+#define ACPI_MPST_AUTOEXIT              4
+
+/* Shared Memory Region (not part of an ACPI table) */
+
+struct acpi_mpst_shared {
+       u32 signature;
+       u16 pcc_command;
+       u16 pcc_status;
+       u16 command_register;
+       u16 status_register;
+       u16 power_state_id;
+       u16 power_node_id;
+       u64 energy_consumed;
+       u64 average_power;
+};
+
+/*******************************************************************************
+ *
+ * PCCT - Platform Communications Channel Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_pcct {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u32 flags;
+       u32 latency;
+       u32 reserved;
+};
+
+/* Values for Flags field above */
+
+#define ACPI_PCCT_DOORBELL              1
+
+/*
+ * PCCT subtables
+ */
+
+/* 0: Generic Communications Subspace */
+
+struct acpi_pcct_subspace {
+       struct acpi_subtable_header header;
+       u8 reserved[6];
+       u64 base_address;
+       u64 length;
+       struct acpi_generic_address doorbell_register;
+       u64 preserve_mask;
+       u64 write_mask;
+};
+
+/*
+ * PCC memory structures (not part of the ACPI table)
+ */
+
+/* Shared Memory Region */
+
+struct acpi_pcct_shared_memory {
+       u32 signature;
+       u16 command;
+       u16 status;
+};
+
+/*******************************************************************************
+ *
+ * PMTT - Platform Memory Topology Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_pmtt {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u32 reserved;
+};
+
+/* Common header for PMTT subtables that follow main table */
+
+struct acpi_pmtt_header {
+       u8 type;
+       u8 reserved1;
+       u16 length;
+       u16 flags;
+       u16 reserved2;
+};
+
+/* Values for Type field above */
+
+#define ACPI_PMTT_TYPE_SOCKET           0
+#define ACPI_PMTT_TYPE_CONTROLLER       1
+#define ACPI_PMTT_TYPE_DIMM             2
+#define ACPI_PMTT_TYPE_RESERVED         3      /* 0x03-0xFF are reserved */
+
+/* Values for Flags field above */
+
+#define ACPI_PMTT_TOP_LEVEL             0x0001
+#define ACPI_PMTT_PHYSICAL              0x0002
+#define ACPI_PMTT_MEMORY_TYPE           0x000C
+
+/*
+ * PMTT subtables, correspond to Type in struct acpi_pmtt_header
+ */
+
+/* 0: Socket Structure */
+
+struct acpi_pmtt_socket {
+       struct acpi_pmtt_header header;
+       u16 socket_id;
+       u16 reserved;
+};
+
+/* 1: Memory Controller subtable */
+
+struct acpi_pmtt_controller {
+       struct acpi_pmtt_header header;
+       u32 read_latency;
+       u32 write_latency;
+       u32 read_bandwidth;
+       u32 write_bandwidth;
+       u16 access_width;
+       u16 alignment;
+       u16 reserved;
+       u16 domain_count;
+};
+
+/* 1a: Proximity Domain substructure */
+
+struct acpi_pmtt_domain {
+       u32 proximity_domain;
+};
+
+/* 2: Physical Component Identifier (DIMM) */
+
+struct acpi_pmtt_physical_component {
+       struct acpi_pmtt_header header;
+       u16 component_id;
+       u16 reserved;
+       u32 memory_size;
+       u32 bios_handle;
+};
+
+/*******************************************************************************
+ *
+ * RASF - RAS Feature Table (ACPI 5.0)
+ *        Version 1
+ *
+ ******************************************************************************/
+
+struct acpi_table_rasf {
+       struct acpi_table_header header;        /* Common ACPI table header */
+       u8 channel_id[12];
+};
+
+/* RASF Platform Communication Channel Shared Memory Region */
+
+struct acpi_rasf_shared_memory {
+       u32 signature;
+       u16 command;
+       u16 status;
+       u64 requested_address;
+       u64 requested_length;
+       u64 actual_address;
+       u64 actual_length;
+       u16 flags;
+       u8 speed;
+};
+
+/* Masks for Flags and Speed fields above */
+
+#define ACPI_RASF_SCRUBBER_RUNNING      1
+#define ACPI_RASF_SPEED                 (7<<1)
+
+/* Channel Commands */
+
+enum acpi_rasf_commands {
+       ACPI_RASF_GET_RAS_CAPABILITIES = 1,
+       ACPI_RASF_GET_PATROL_PARAMETERS = 2,
+       ACPI_RASF_START_PATROL_SCRUBBER = 3,
+       ACPI_RASF_STOP_PATROL_SCRUBBER = 4
+};
+
+/* Channel Command flags */
+
+#define ACPI_RASF_GENERATE_SCI          (1<<15)
+
+/* Status values */
+
+enum acpi_rasf_status {
+       ACPI_RASF_SUCCESS = 0,
+       ACPI_RASF_NOT_VALID = 1,
+       ACPI_RASF_NOT_SUPPORTED = 2,
+       ACPI_RASF_BUSY = 3,
+       ACPI_RASF_FAILED = 4,
+       ACPI_RASF_ABORTED = 5,
+       ACPI_RASF_INVALID_DATA = 6
+};
+
+/* Status flags */
+
+#define ACPI_RASF_COMMAND_COMPLETE      (1)
+#define ACPI_RASF_SCI_DOORBELL          (1<<1)
+#define ACPI_RASF_ERROR                 (1<<2)
+#define ACPI_RASF_STATUS                (0x1F<<3)
+
+/* Reset to default packing */
+
+#pragma pack()
+
+#endif                         /* __ACTBL3_H__ */
index ed73f6705c860d2bc8c28db91b7191c13c144b31..d5dee7ce9474ee88c4bf3cda01339e74067311ad 100644 (file)
@@ -712,8 +712,10 @@ typedef u8 acpi_adr_space_type;
 #define ACPI_ADR_SPACE_CMOS             (acpi_adr_space_type) 5
 #define ACPI_ADR_SPACE_PCI_BAR_TARGET   (acpi_adr_space_type) 6
 #define ACPI_ADR_SPACE_IPMI             (acpi_adr_space_type) 7
+#define ACPI_ADR_SPACE_GPIO             (acpi_adr_space_type) 8
+#define ACPI_ADR_SPACE_GSBUS            (acpi_adr_space_type) 9
 
-#define ACPI_NUM_PREDEFINED_REGIONS     8
+#define ACPI_NUM_PREDEFINED_REGIONS     10
 
 /*
  * Special Address Spaces
@@ -957,6 +959,14 @@ acpi_status(*acpi_adr_space_handler) (u32 function,
 
 #define ACPI_DEFAULT_HANDLER            NULL
 
+/* Special Context data for generic_serial_bus/general_purpose_io (ACPI 5.0) */
+
+struct acpi_connection_info {
+       u8 *connection;
+       u16 length;
+       u8 access_length;
+};
+
 typedef
 acpi_status(*acpi_adr_space_setup) (acpi_handle region_handle,
                                    u32 function,
diff --git a/include/acpi/atomicio.h b/include/acpi/atomicio.h
deleted file mode 100644 (file)
index 8b9fb4b..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef ACPI_ATOMIC_IO_H
-#define ACPI_ATOMIC_IO_H
-
-int acpi_pre_map_gar(struct acpi_generic_address *reg);
-int acpi_post_unmap_gar(struct acpi_generic_address *reg);
-
-int acpi_atomic_read(u64 *val, struct acpi_generic_address *reg);
-int acpi_atomic_write(u64 val, struct acpi_generic_address *reg);
-
-#endif
index 610f6fb1bbc2ed71109c3ad4e290d51febbb4376..8cf7e98a2c7bc21dc050b85dc778fa6329c4d076 100644 (file)
@@ -195,6 +195,7 @@ struct acpi_processor_flags {
        u8 has_cst:1;
        u8 power_setup_done:1;
        u8 bm_rld_set:1;
+       u8 need_hotplug_init:1;
 };
 
 struct acpi_processor {
index 8de4b73e19e25b8c72099bfdd90f55e5821a2e61..e58fcf891370e73996a7fe09b510a7b61d7a9fb8 100644 (file)
@@ -15,6 +15,16 @@ struct pci_dev;
 #ifdef CONFIG_PCI
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+/* Create a virtual mapping cookie for a port on a given PCI device.
+ * Do not call this directly, it exists to make it easier for architectures
+ * to override */
+#ifdef CONFIG_NO_GENERIC_PCI_IOPORT_MAP
+extern void __iomem *__pci_ioport_map(struct pci_dev *dev, unsigned long port,
+                                     unsigned int nr);
+#else
+#define __pci_ioport_map(dev, port, nr) ioport_map((port), (nr))
+#endif
+
 #else
 static inline void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max)
 {
index 76caa67c22e2629391a4124396723a94a5736e56..92f0981b5fb862e214ecf8b5c906ad98ef065a92 100644 (file)
@@ -1328,6 +1328,7 @@ extern int drm_getmagic(struct drm_device *dev, void *data,
                        struct drm_file *file_priv);
 extern int drm_authmagic(struct drm_device *dev, void *data,
                         struct drm_file *file_priv);
+extern int drm_remove_magic(struct drm_master *master, drm_magic_t magic);
 
 /* Cache management (drm_cache.c) */
 void drm_clflush_pages(struct page *pages[], unsigned long num_pages);
index c37c34275a449626b31bd02731fa3fa649bfef70..bc9ec1d7698cd730df2ef58c4a3ee58f1c067568 100644 (file)
@@ -17,7 +17,7 @@
 
 /*****************************************************************************/
 /*
- * the payload for a key of type "user"
+ * the payload for a key of type "user" or "logon"
  * - once filled in and attached to a key:
  *   - the payload struct is invariant may not be changed, only replaced
  *   - the payload must be read with RCU procedures or with the key semaphore
@@ -33,6 +33,7 @@ struct user_key_payload {
 };
 
 extern struct key_type key_type_user;
+extern struct key_type key_type_logon;
 
 extern int user_instantiate(struct key *key, const void *data, size_t datalen);
 extern int user_update(struct key *key, const void *data, size_t datalen);
index 627a3a42e4d8e3ba015ad32a6bf01f779c356943..3f968665899b9cadeef825dab03666d04eac3c64 100644 (file)
@@ -310,6 +310,11 @@ extern acpi_status acpi_pci_osc_control_set(acpi_handle handle,
                                             u32 *mask, u32 req);
 extern void acpi_early_init(void);
 
+extern int acpi_nvs_register(__u64 start, __u64 size);
+
+extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
+                                   void *data);
+
 #else  /* !CONFIG_ACPI */
 
 #define acpi_disabled 1
@@ -352,15 +357,18 @@ static inline int acpi_table_parse(char *id,
 {
        return -1;
 }
-#endif /* !CONFIG_ACPI */
 
-#ifdef CONFIG_ACPI_SLEEP
-int suspend_nvs_register(unsigned long start, unsigned long size);
-#else
-static inline int suspend_nvs_register(unsigned long a, unsigned long b)
+static inline int acpi_nvs_register(__u64 start, __u64 size)
 {
        return 0;
 }
-#endif
+
+static inline int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *),
+                                          void *data)
+{
+       return 0;
+}
+
+#endif /* !CONFIG_ACPI */
 
 #endif /*_LINUX_ACPI_H*/
index 4afd7102459d7fbdef49be451141e16b0591f5e7..b0ffa219993ec2be82a01854718d5e1269ef05bb 100644 (file)
@@ -12,4 +12,7 @@ static inline void __iomem *acpi_os_ioremap(acpi_physical_address phys,
 
 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size);
 
+int acpi_os_map_generic_address(struct acpi_generic_address *addr);
+void acpi_os_unmap_generic_address(struct acpi_generic_address *addr);
+
 #endif
index 9eabffbc4e50c4b2ae7a1049970d7f7bc39f12ed..033f6aa670de5086ba64dcde7ee6ab7c80e269a7 100644 (file)
@@ -134,7 +134,7 @@ struct pl08x_txd {
        struct dma_async_tx_descriptor tx;
        struct list_head node;
        struct list_head dsg_list;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        dma_addr_t llis_bus;
        struct pl08x_lli *llis_va;
        /* Default cctl value for LLIs */
@@ -197,7 +197,7 @@ struct pl08x_dma_chan {
        dma_addr_t dst_addr;
        u32 src_cctl;
        u32 dst_cctl;
-       enum dma_data_direction runtime_direction;
+       enum dma_transfer_direction runtime_direction;
        dma_cookie_t lc;
        struct list_head pend_list;
        struct pl08x_txd *at;
index 426ab9f4dd853b18d3e8df1b6a3fd2831fec0a57..9ff7a2c48b508103576d175d88fed1c5b3593592 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <linux/types.h>
 #include <linux/elf-em.h>
+#include <linux/ptrace.h>
 
 /* The netlink messages for the audit system is divided into blocks:
  * 1000 - 1099 are for commanding the audit system
  * AUDIT_UNUSED_BITS is updated if need be. */
 #define AUDIT_UNUSED_BITS      0x07FFFC00
 
+/* AUDIT_FIELD_COMPARE rule list */
+#define AUDIT_COMPARE_UID_TO_OBJ_UID   1
+#define AUDIT_COMPARE_GID_TO_OBJ_GID   2
+#define AUDIT_COMPARE_EUID_TO_OBJ_UID  3
+#define AUDIT_COMPARE_EGID_TO_OBJ_GID  4
+#define AUDIT_COMPARE_AUID_TO_OBJ_UID  5
+#define AUDIT_COMPARE_SUID_TO_OBJ_UID  6
+#define AUDIT_COMPARE_SGID_TO_OBJ_GID  7
+#define AUDIT_COMPARE_FSUID_TO_OBJ_UID 8
+#define AUDIT_COMPARE_FSGID_TO_OBJ_GID 9
+
+#define AUDIT_COMPARE_UID_TO_AUID      10
+#define AUDIT_COMPARE_UID_TO_EUID      11
+#define AUDIT_COMPARE_UID_TO_FSUID     12
+#define AUDIT_COMPARE_UID_TO_SUID      13
+
+#define AUDIT_COMPARE_AUID_TO_FSUID    14
+#define AUDIT_COMPARE_AUID_TO_SUID     15
+#define AUDIT_COMPARE_AUID_TO_EUID     16
+
+#define AUDIT_COMPARE_EUID_TO_SUID     17
+#define AUDIT_COMPARE_EUID_TO_FSUID    18
+
+#define AUDIT_COMPARE_SUID_TO_FSUID    19
+
+#define AUDIT_COMPARE_GID_TO_EGID      20
+#define AUDIT_COMPARE_GID_TO_FSGID     21
+#define AUDIT_COMPARE_GID_TO_SGID      22
+
+#define AUDIT_COMPARE_EGID_TO_FSGID    23
+#define AUDIT_COMPARE_EGID_TO_SGID     24
+#define AUDIT_COMPARE_SGID_TO_FSGID    25
+
+#define AUDIT_MAX_FIELD_COMPARE                AUDIT_COMPARE_SGID_TO_FSGID
 
 /* Rule fields */
                                /* These are useful when checking the
 #define AUDIT_PERM     106
 #define AUDIT_DIR      107
 #define AUDIT_FILETYPE 108
+#define AUDIT_OBJ_UID  109
+#define AUDIT_OBJ_GID  110
+#define AUDIT_FIELD_COMPARE    111
 
 #define AUDIT_ARG0      200
 #define AUDIT_ARG1      (AUDIT_ARG0+1)
@@ -408,28 +446,24 @@ struct audit_field {
        void                            *lsm_rule;
 };
 
-#define AUDITSC_INVALID 0
-#define AUDITSC_SUCCESS 1
-#define AUDITSC_FAILURE 2
-#define AUDITSC_RESULT(x) ( ((long)(x))<0?AUDITSC_FAILURE:AUDITSC_SUCCESS )
 extern int __init audit_register_class(int class, unsigned *list);
 extern int audit_classify_syscall(int abi, unsigned syscall);
 extern int audit_classify_arch(int arch);
 #ifdef CONFIG_AUDITSYSCALL
 /* These are defined in auditsc.c */
                                /* Public API */
-extern void audit_finish_fork(struct task_struct *child);
 extern int  audit_alloc(struct task_struct *task);
-extern void audit_free(struct task_struct *task);
-extern void audit_syscall_entry(int arch,
-                               int major, unsigned long a0, unsigned long a1,
-                               unsigned long a2, unsigned long a3);
-extern void audit_syscall_exit(int failed, long return_code);
+extern void __audit_free(struct task_struct *task);
+extern void __audit_syscall_entry(int arch,
+                                 int major, unsigned long a0, unsigned long a1,
+                                 unsigned long a2, unsigned long a3);
+extern void __audit_syscall_exit(int ret_success, long ret_value);
 extern void __audit_getname(const char *name);
 extern void audit_putname(const char *name);
 extern void __audit_inode(const char *name, const struct dentry *dentry);
 extern void __audit_inode_child(const struct dentry *dentry,
                                const struct inode *parent);
+extern void __audit_seccomp(unsigned long syscall);
 extern void __audit_ptrace(struct task_struct *t);
 
 static inline int audit_dummy_context(void)
@@ -437,6 +471,27 @@ static inline int audit_dummy_context(void)
        void *p = current->audit_context;
        return !p || *(int *)p;
 }
+static inline void audit_free(struct task_struct *task)
+{
+       if (unlikely(task->audit_context))
+               __audit_free(task);
+}
+static inline void audit_syscall_entry(int arch, int major, unsigned long a0,
+                                      unsigned long a1, unsigned long a2,
+                                      unsigned long a3)
+{
+       if (unlikely(!audit_dummy_context()))
+               __audit_syscall_entry(arch, major, a0, a1, a2, a3);
+}
+static inline void audit_syscall_exit(void *pt_regs)
+{
+       if (unlikely(current->audit_context)) {
+               int success = is_syscall_success(pt_regs);
+               int return_code = regs_return_value(pt_regs);
+
+               __audit_syscall_exit(success, return_code);
+       }
+}
 static inline void audit_getname(const char *name)
 {
        if (unlikely(!audit_dummy_context()))
@@ -453,6 +508,12 @@ static inline void audit_inode_child(const struct dentry *dentry,
 }
 void audit_core_dumps(long signr);
 
+static inline void audit_seccomp(unsigned long syscall)
+{
+       if (unlikely(!audit_dummy_context()))
+               __audit_seccomp(syscall);
+}
+
 static inline void audit_ptrace(struct task_struct *t)
 {
        if (unlikely(!audit_dummy_context()))
@@ -463,17 +524,16 @@ static inline void audit_ptrace(struct task_struct *t)
 extern unsigned int audit_serial(void);
 extern int auditsc_get_stamp(struct audit_context *ctx,
                              struct timespec *t, unsigned int *serial);
-extern int  audit_set_loginuid(struct task_struct *task, uid_t loginuid);
+extern int  audit_set_loginuid(uid_t loginuid);
 #define audit_get_loginuid(t) ((t)->loginuid)
 #define audit_get_sessionid(t) ((t)->sessionid)
 extern void audit_log_task_context(struct audit_buffer *ab);
 extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp);
 extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode);
-extern int audit_bprm(struct linux_binprm *bprm);
-extern void audit_socketcall(int nargs, unsigned long *args);
-extern int audit_sockaddr(int len, void *addr);
+extern int __audit_bprm(struct linux_binprm *bprm);
+extern void __audit_socketcall(int nargs, unsigned long *args);
+extern int __audit_sockaddr(int len, void *addr);
 extern void __audit_fd_pair(int fd1, int fd2);
-extern int audit_set_macxattr(const char *name);
 extern void __audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr);
 extern void __audit_mq_sendrecv(mqd_t mqdes, size_t msg_len, unsigned int msg_prio, const struct timespec *abs_timeout);
 extern void __audit_mq_notify(mqd_t mqdes, const struct sigevent *notification);
@@ -499,6 +559,23 @@ static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid
        if (unlikely(!audit_dummy_context()))
                __audit_ipc_set_perm(qbytes, uid, gid, mode);
 }
+static inline int audit_bprm(struct linux_binprm *bprm)
+{
+       if (unlikely(!audit_dummy_context()))
+               return __audit_bprm(bprm);
+       return 0;
+}
+static inline void audit_socketcall(int nargs, unsigned long *args)
+{
+       if (unlikely(!audit_dummy_context()))
+               __audit_socketcall(nargs, args);
+}
+static inline int audit_sockaddr(int len, void *addr)
+{
+       if (unlikely(!audit_dummy_context()))
+               return __audit_sockaddr(len, addr);
+       return 0;
+}
 static inline void audit_mq_open(int oflag, umode_t mode, struct mq_attr *attr)
 {
        if (unlikely(!audit_dummy_context()))
@@ -544,12 +621,11 @@ static inline void audit_mmap_fd(int fd, int flags)
 
 extern int audit_n_rules;
 extern int audit_signals;
-#else
-#define audit_finish_fork(t)
+#else /* CONFIG_AUDITSYSCALL */
 #define audit_alloc(t) ({ 0; })
 #define audit_free(t) do { ; } while (0)
 #define audit_syscall_entry(ta,a,b,c,d,e) do { ; } while (0)
-#define audit_syscall_exit(f,r) do { ; } while (0)
+#define audit_syscall_exit(r) do { ; } while (0)
 #define audit_dummy_context() 1
 #define audit_getname(n) do { ; } while (0)
 #define audit_putname(n) do { ; } while (0)
@@ -558,6 +634,7 @@ extern int audit_signals;
 #define audit_inode(n,d) do { (void)(d); } while (0)
 #define audit_inode_child(i,p) do { ; } while (0)
 #define audit_core_dumps(i) do { ; } while (0)
+#define audit_seccomp(i) do { ; } while (0)
 #define auditsc_get_stamp(c,t,s) (0)
 #define audit_get_loginuid(t) (-1)
 #define audit_get_sessionid(t) (-1)
@@ -568,7 +645,6 @@ extern int audit_signals;
 #define audit_socketcall(n,a) ((void)0)
 #define audit_fd_pair(n,a) ((void)0)
 #define audit_sockaddr(len, addr) ({ 0; })
-#define audit_set_macxattr(n) do { ; } while (0)
 #define audit_mq_open(o,m,a) ((void)0)
 #define audit_mq_sendrecv(d,l,p,t) ((void)0)
 #define audit_mq_notify(d,n) ((void)0)
@@ -579,7 +655,7 @@ extern int audit_signals;
 #define audit_ptrace(t) ((void)0)
 #define audit_n_rules 0
 #define audit_signals 0
-#endif
+#endif /* CONFIG_AUDITSYSCALL */
 
 #ifdef CONFIG_AUDIT
 /* These are defined in audit.c */
index f4b8346b1a331f90fc9f29431b84b9176e4e6acb..83c209f39493adf3ef4ae659c2cc46b3770c2948 100644 (file)
@@ -162,7 +162,7 @@ struct bcma_driver {
 
        int (*probe)(struct bcma_device *dev);
        void (*remove)(struct bcma_device *dev);
-       int (*suspend)(struct bcma_device *dev, pm_message_t state);
+       int (*suspend)(struct bcma_device *dev);
        int (*resume)(struct bcma_device *dev);
        void (*shutdown)(struct bcma_device *dev);
 
index fd88a3945aa149af16b2671d656fede2e46fafe2..0092102db2de7be71f648d0e1c761530cb8f2b14 100644 (file)
@@ -18,7 +18,7 @@ struct pt_regs;
 #define BINPRM_BUF_SIZE 128
 
 #ifdef __KERNEL__
-#include <linux/list.h>
+#include <linux/sched.h>
 
 #define CORENAME_MAX_SIZE 128
 
@@ -58,6 +58,7 @@ struct linux_binprm {
        unsigned interp_flags;
        unsigned interp_data;
        unsigned long loader, exec;
+       char tcomm[TASK_COMM_LEN];
 };
 
 #define BINPRM_FLAGS_ENFORCE_NONDUMP_BIT 0
index 23f81de518298dda6349ee3f44d425045cf9f62b..712abcc205ae0f4a68bda6c4da51e22a7b4000d3 100644 (file)
@@ -186,7 +186,14 @@ struct cpuidle_governor {
 extern int cpuidle_register_governor(struct cpuidle_governor *gov);
 extern void cpuidle_unregister_governor(struct cpuidle_governor *gov);
 
+#ifdef CONFIG_INTEL_IDLE
+extern int intel_idle_cpu_init(int cpu);
 #else
+static inline int intel_idle_cpu_init(int cpu) { return -1; }
+#endif
+
+#else
+static inline int intel_idle_cpu_init(int cpu) { return -1; }
 
 static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
 {return 0;}
index 5b3adb8f9588226657fedf612948e59952e09aea..b63fb393aa58a2c6ab73bab121960be022269d65 100644 (file)
@@ -279,11 +279,11 @@ struct device *driver_find_device(struct device_driver *drv,
 
 /**
  * struct subsys_interface - interfaces to device functions
- * @name        name of the device function
- * @subsystem   subsytem of the devices to attach to
- * @node        the list of functions registered at the subsystem
- * @add         device hookup to device function handler
- * @remove      device hookup to device function handler
+ * @name:       name of the device function
+ * @subsys:     subsytem of the devices to attach to
+ * @node:       the list of functions registered at the subsystem
+ * @add_dev:    device hookup to device function handler
+ * @remove_dev: device hookup to device function handler
  *
  * Simple interfaces attached to a subsystem. Multiple interfaces can
  * attach to a subsystem and its devices. Unlike drivers, they do not
@@ -612,6 +612,7 @@ struct device_dma_parameters {
  * @archdata:  For arch-specific additions.
  * @of_node:   Associated device tree node.
  * @devt:      For creating the sysfs "dev".
+ * @id:                device instance
  * @devres_lock: Spinlock to protect the resource of the device.
  * @devres_head: The resources list of the device.
  * @knode_class: The node used to add the device to the class list.
@@ -1003,6 +1004,10 @@ extern long sysfs_deprecated;
  * Each module may only use this macro once, and calling it replaces
  * module_init() and module_exit().
  *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @__unregister: unregister function for this driver type
+ *
  * Use this macro to construct bus specific macros for registering
  * drivers, and do not use it on its own.
  */
index efae755017d7de93d4bc40b28c29488aa959bf46..b01558b15814f6d94be21272d385df3a1c4d23b5 100644 (file)
@@ -46,7 +46,7 @@ struct signature_hdr {
        char            mpi[0];
 } __packed;
 
-#if defined(CONFIG_DIGSIG) || defined(CONFIG_DIGSIG_MODULE)
+#if defined(CONFIG_SIGNATURE) || defined(CONFIG_SIGNATURE_MODULE)
 
 int digsig_verify(struct key *keyring, const char *sig, int siglen,
                                        const char *digest, int digestlen);
@@ -59,6 +59,6 @@ static inline int digsig_verify(struct key *keyring, const char *sig,
        return -EOPNOTSUPP;
 }
 
-#endif /* CONFIG_DIGSIG */
+#endif /* CONFIG_SIGNATURE */
 
 #endif /* _DIGSIG_H */
index 75f53f874b24a0c0abb790f501f2f60ace48e17e..679b349d9b66695f65bb3597b2a530e3e9709600 100644 (file)
@@ -23,7 +23,6 @@
 
 #include <linux/device.h>
 #include <linux/uio.h>
-#include <linux/dma-direction.h>
 #include <linux/scatterlist.h>
 #include <linux/bitmap.h>
 #include <asm/page.h>
@@ -72,11 +71,93 @@ enum dma_transaction_type {
        DMA_ASYNC_TX,
        DMA_SLAVE,
        DMA_CYCLIC,
+       DMA_INTERLEAVE,
+/* last transaction type for creation of the capabilities mask */
+       DMA_TX_TYPE_END,
 };
 
-/* last transaction type for creation of the capabilities mask */
-#define DMA_TX_TYPE_END (DMA_CYCLIC + 1)
+/**
+ * enum dma_transfer_direction - dma transfer mode and direction indicator
+ * @DMA_MEM_TO_MEM: Async/Memcpy mode
+ * @DMA_MEM_TO_DEV: Slave mode & From Memory to Device
+ * @DMA_DEV_TO_MEM: Slave mode & From Device to Memory
+ * @DMA_DEV_TO_DEV: Slave mode & From Device to Device
+ */
+enum dma_transfer_direction {
+       DMA_MEM_TO_MEM,
+       DMA_MEM_TO_DEV,
+       DMA_DEV_TO_MEM,
+       DMA_DEV_TO_DEV,
+       DMA_TRANS_NONE,
+};
+
+/**
+ * Interleaved Transfer Request
+ * ----------------------------
+ * A chunk is collection of contiguous bytes to be transfered.
+ * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
+ * ICGs may or maynot change between chunks.
+ * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
+ *  that when repeated an integral number of times, specifies the transfer.
+ * A transfer template is specification of a Frame, the number of times
+ *  it is to be repeated and other per-transfer attributes.
+ *
+ * Practically, a client driver would have ready a template for each
+ *  type of transfer it is going to need during its lifetime and
+ *  set only 'src_start' and 'dst_start' before submitting the requests.
+ *
+ *
+ *  |      Frame-1        |       Frame-2       | ~ |       Frame-'numf'  |
+ *  |====....==.===...=...|====....==.===...=...| ~ |====....==.===...=...|
+ *
+ *    ==  Chunk size
+ *    ... ICG
+ */
+
+/**
+ * struct data_chunk - Element of scatter-gather list that makes a frame.
+ * @size: Number of bytes to read from source.
+ *       size_dst := fn(op, size_src), so doesn't mean much for destination.
+ * @icg: Number of bytes to jump after last src/dst address of this
+ *      chunk and before first src/dst address for next chunk.
+ *      Ignored for dst(assumed 0), if dst_inc is true and dst_sgl is false.
+ *      Ignored for src(assumed 0), if src_inc is true and src_sgl is false.
+ */
+struct data_chunk {
+       size_t size;
+       size_t icg;
+};
 
+/**
+ * struct dma_interleaved_template - Template to convey DMAC the transfer pattern
+ *      and attributes.
+ * @src_start: Bus address of source for the first chunk.
+ * @dst_start: Bus address of destination for the first chunk.
+ * @dir: Specifies the type of Source and Destination.
+ * @src_inc: If the source address increments after reading from it.
+ * @dst_inc: If the destination address increments after writing to it.
+ * @src_sgl: If the 'icg' of sgl[] applies to Source (scattered read).
+ *             Otherwise, source is read contiguously (icg ignored).
+ *             Ignored if src_inc is false.
+ * @dst_sgl: If the 'icg' of sgl[] applies to Destination (scattered write).
+ *             Otherwise, destination is filled contiguously (icg ignored).
+ *             Ignored if dst_inc is false.
+ * @numf: Number of frames in this template.
+ * @frame_size: Number of chunks in a frame i.e, size of sgl[].
+ * @sgl: Array of {chunk,icg} pairs that make up a frame.
+ */
+struct dma_interleaved_template {
+       dma_addr_t src_start;
+       dma_addr_t dst_start;
+       enum dma_transfer_direction dir;
+       bool src_inc;
+       bool dst_inc;
+       bool src_sgl;
+       bool dst_sgl;
+       size_t numf;
+       size_t frame_size;
+       struct data_chunk sgl[0];
+};
 
 /**
  * enum dma_ctrl_flags - DMA flags to augment operation preparation,
@@ -269,7 +350,7 @@ enum dma_slave_buswidth {
  * struct, if applicable.
  */
 struct dma_slave_config {
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        dma_addr_t src_addr;
        dma_addr_t dst_addr;
        enum dma_slave_buswidth src_addr_width;
@@ -433,6 +514,7 @@ struct dma_tx_state {
  * @device_prep_dma_cyclic: prepare a cyclic dma operation suitable for audio.
  *     The function takes a buffer of size buf_len. The callback function will
  *     be called after period_len bytes have been transferred.
+ * @device_prep_interleaved_dma: Transfer expression in a generic way.
  * @device_control: manipulate all pending operations on a channel, returns
  *     zero or error code
  * @device_tx_status: poll for transaction completion, the optional
@@ -492,11 +574,14 @@ struct dma_device {
 
        struct dma_async_tx_descriptor *(*device_prep_slave_sg)(
                struct dma_chan *chan, struct scatterlist *sgl,
-               unsigned int sg_len, enum dma_data_direction direction,
+               unsigned int sg_len, enum dma_transfer_direction direction,
                unsigned long flags);
        struct dma_async_tx_descriptor *(*device_prep_dma_cyclic)(
                struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
-               size_t period_len, enum dma_data_direction direction);
+               size_t period_len, enum dma_transfer_direction direction);
+       struct dma_async_tx_descriptor *(*device_prep_interleaved_dma)(
+               struct dma_chan *chan, struct dma_interleaved_template *xt,
+               unsigned long flags);
        int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
                unsigned long arg);
 
@@ -522,7 +607,7 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
 
 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
        struct dma_chan *chan, void *buf, size_t len,
-       enum dma_data_direction dir, unsigned long flags)
+       enum dma_transfer_direction dir, unsigned long flags)
 {
        struct scatterlist sg;
        sg_init_one(&sg, buf, len);
index 4bfe0a2f7d50cc218bce24040154286a582aaf94..f2c64f92c4a006394e21e022bcf6b5c03590c930 100644 (file)
@@ -127,7 +127,7 @@ struct dw_cyclic_desc {
 
 struct dw_cyclic_desc *dw_dma_cyclic_prep(struct dma_chan *chan,
                dma_addr_t buf_addr, size_t buf_len, size_t period_len,
-               enum dma_data_direction direction);
+               enum dma_transfer_direction direction);
 void dw_dma_cyclic_free(struct dma_chan *chan);
 int dw_dma_cyclic_start(struct dma_chan *chan);
 void dw_dma_cyclic_stop(struct dma_chan *chan);
index 0ab54e16a91f499b37db61fcd2be564232144a4a..d09af4b67cf121ad91fd4ca02bec01560b6110fd 100644 (file)
@@ -39,6 +39,7 @@ extern bool __refrigerator(bool check_kthr_stop);
 extern int freeze_processes(void);
 extern int freeze_kernel_threads(void);
 extern void thaw_processes(void);
+extern void thaw_kernel_threads(void);
 
 static inline bool try_to_freeze(void)
 {
@@ -174,6 +175,7 @@ static inline bool __refrigerator(bool check_kthr_stop) { return false; }
 static inline int freeze_processes(void) { return -ENOSYS; }
 static inline int freeze_kernel_threads(void) { return -ENOSYS; }
 static inline void thaw_processes(void) {}
+static inline void thaw_kernel_threads(void) {}
 
 static inline bool try_to_freeze(void) { return false; }
 
index 0244082d42c5794ba7c2b7b5a3d6a0181f73c6b9..386da09f229dfad8ff8ef5600c8045f221ae862f 100644 (file)
@@ -396,6 +396,7 @@ struct inodes_stat_t {
 #include <linux/rculist_bl.h>
 #include <linux/atomic.h>
 #include <linux/shrinker.h>
+#include <linux/migrate_mode.h>
 
 #include <asm/byteorder.h>
 
@@ -526,7 +527,6 @@ enum positive_aop_returns {
 struct page;
 struct address_space;
 struct writeback_control;
-enum migrate_mode;
 
 struct iov_iter {
        const struct iovec *iov;
index b5ca4b2c08ecad2fad00c33d48b62466737966b0..004ff33ab38e4dc3e2135f01ebe6b0d84fbb1529 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _GPIO_KEYS_H
 #define _GPIO_KEYS_H
 
+struct device;
+
 struct gpio_keys_button {
        /* Configuration parameters */
        unsigned int code;      /* input event code (KEY_*, SW_*) */
index 828181fbad5d709db91797e359b617efdd606a40..58404b0c50101e93b2978d6d0807447f08e4a049 100644 (file)
@@ -46,6 +46,10 @@ struct team_port {
        u32 speed;
        u8 duplex;
 
+       /* Custom gennetlink interface related flags */
+       bool changed;
+       bool removed;
+
        struct rcu_head rcu;
 };
 
@@ -72,6 +76,10 @@ struct team_option {
        enum team_option_type type;
        int (*getter)(struct team *team, void *arg);
        int (*setter)(struct team *team, void *arg);
+
+       /* Custom gennetlink interface related flags */
+       bool changed;
+       bool removed;
 };
 
 struct team_mode {
@@ -207,6 +215,7 @@ enum {
        TEAM_ATTR_OPTION_CHANGED,       /* flag */
        TEAM_ATTR_OPTION_TYPE,          /* u8 */
        TEAM_ATTR_OPTION_DATA,          /* dynamic */
+       TEAM_ATTR_OPTION_REMOVED,       /* flag */
 
        __TEAM_ATTR_OPTION_MAX,
        TEAM_ATTR_OPTION_MAX = __TEAM_ATTR_OPTION_MAX - 1,
@@ -227,6 +236,7 @@ enum {
        TEAM_ATTR_PORT_LINKUP,          /* flag */
        TEAM_ATTR_PORT_SPEED,           /* u32 */
        TEAM_ATTR_PORT_DUPLEX,          /* u8 */
+       TEAM_ATTR_PORT_REMOVED,         /* flag */
 
        __TEAM_ATTR_PORT_MAX,
        TEAM_ATTR_PORT_MAX = __TEAM_ATTR_PORT_MAX - 1,
index 2fa0901219d4b7fe69158689e08b33db5ac69db1..0d7d6a1b172f29fde03d030168b253197b84f479 100644 (file)
  * note header.  For kdump, the code in vmcore.c runs in the context
  * of the second kernel to combine them into one note.
  */
+#ifndef KEXEC_NOTE_BYTES
 #define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) +               \
                            KEXEC_CORE_NOTE_NAME_BYTES +                \
                            KEXEC_CORE_NOTE_DESC_BYTES )
+#endif
 
 /*
  * This structure is used to hold the arguments that are used when loading
index 3ac412855d835feb05552cd09ddf37be3c7837f5..1600ebf717a79b4259a721e21bd096ec6206b1ed 100644 (file)
@@ -272,7 +272,7 @@ extern int keyring_add_key(struct key *keyring,
 
 extern struct key *key_lookup(key_serial_t id);
 
-static inline key_serial_t key_serial(struct key *key)
+static inline key_serial_t key_serial(const struct key *key)
 {
        return key ? key->serial : 0;
 }
@@ -294,6 +294,9 @@ static inline bool key_is_instantiated(const struct key *key)
        (rcu_dereference_protected((KEY)->payload.rcudata,              \
                                   rwsem_is_locked(&((struct key *)(KEY))->sem)))
 
+#define rcu_assign_keypointer(KEY, PAYLOAD)                            \
+       (rcu_assign_pointer((KEY)->payload.rcudata, PAYLOAD))
+
 #ifdef CONFIG_SYSCTL
 extern ctl_table key_sysctls[];
 #endif
index abc0120b09b772ff90fe539016295a00dfb08107..9c07dcebded747493041c8e6a5c18d68bc43fabf 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <linux/bug.h>
 #include <linux/atomic.h>
+#include <linux/kernel.h>
 
 struct kref {
        atomic_t refcount;
old mode 100755 (executable)
new mode 100644 (file)
index 1515e64e3663abd1a557e339c0eb57d6f13d8117..f88c1cc0cb0f24bfda24e69f63cb728a923d5d45 100644 (file)
@@ -10,7 +10,6 @@
 #ifndef MCP_H
 #define MCP_H
 
-#include <linux/mod_devicetable.h>
 #include <mach/dma.h>
 
 struct mcp_ops;
@@ -27,7 +26,7 @@ struct mcp {
        dma_device_t    dma_telco_rd;
        dma_device_t    dma_telco_wr;
        struct device   attached_device;
-       const char      *codec;
+       int             gpio_base;
 };
 
 struct mcp_ops {
@@ -45,11 +44,10 @@ void mcp_reg_write(struct mcp *, unsigned int, unsigned int);
 unsigned int mcp_reg_read(struct mcp *, unsigned int);
 void mcp_enable(struct mcp *);
 void mcp_disable(struct mcp *);
-const struct mcp_device_id *mcp_get_device_id(const struct mcp *mcp);
 #define mcp_get_sclk_rate(mcp) ((mcp)->sclk_rate)
 
 struct mcp *mcp_host_alloc(struct device *, size_t);
-int mcp_host_register(struct mcp *, void *);
+int mcp_host_register(struct mcp *);
 void mcp_host_unregister(struct mcp *);
 
 struct mcp_driver {
@@ -58,7 +56,6 @@ struct mcp_driver {
        void (*remove)(struct mcp *);
        int (*suspend)(struct mcp *, pm_message_t);
        int (*resume)(struct mcp *);
-       const struct mcp_device_id *id_table;
 };
 
 int mcp_driver_register(struct mcp_driver *);
@@ -67,6 +64,9 @@ void mcp_driver_unregister(struct mcp_driver *);
 #define mcp_get_drvdata(mcp)   dev_get_drvdata(&(mcp)->attached_device)
 #define mcp_set_drvdata(mcp,d) dev_set_drvdata(&(mcp)->attached_device, d)
 
-#define mcp_priv(mcp)          ((void *)((mcp)+1))
+static inline void *mcp_priv(struct mcp *mcp)
+{
+       return mcp + 1;
+}
 
 #endif
index 2463c2619596fab1667b1ca1c5365c5963bf29ea..9bc9ac651dad9bf2be544961c18e06fd1bfbd119 100644 (file)
@@ -187,8 +187,10 @@ struct twl6040 {
        int rev;
        u8 vibra_ctrl_cache[2];
 
+       /* PLL configuration */
        int pll;
        unsigned int sysclk;
+       unsigned int mclk;
 
        unsigned int irq;
        unsigned int irq_base;
index bc19e5fb7ea8f1791960e961e7a9dab803f15a54..4321f044d1e45e1ad5cdaf20eba1440427b1fb5e 100644 (file)
 #define UCB_MODE_DYN_VFLAG_ENA (1 << 12)
 #define UCB_MODE_AUD_OFF_CAN   (1 << 13)
 
-struct ucb1x00_plat_data {
-       int             gpio_base;
-};
 
 struct ucb1x00_irq {
        void *devid;
@@ -119,7 +116,7 @@ struct ucb1x00 {
        unsigned int            irq;
        struct semaphore        adc_sem;
        spinlock_t              io_lock;
-       const struct mcp_device_id *id;
+       u16                     id;
        u16                     io_dir;
        u16                     io_out;
        u16                     adc_cr;
index eaf867412f7adf26e96a3ad4eb3f002a418434e4..05ed2828a5535bfe151c17ab2b5a656a5209ef4c 100644 (file)
@@ -3,22 +3,10 @@
 
 #include <linux/mm.h>
 #include <linux/mempolicy.h>
+#include <linux/migrate_mode.h>
 
 typedef struct page *new_page_t(struct page *, unsigned long private, int **);
 
-/*
- * MIGRATE_ASYNC means never block
- * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
- *     on most operations but not ->writepage as the potential stall time
- *     is too significant
- * MIGRATE_SYNC will block when migrating pages
- */
-enum migrate_mode {
-       MIGRATE_ASYNC,
-       MIGRATE_SYNC_LIGHT,
-       MIGRATE_SYNC,
-};
-
 #ifdef CONFIG_MIGRATION
 #define PAGE_MIGRATION 1
 
diff --git a/include/linux/migrate_mode.h b/include/linux/migrate_mode.h
new file mode 100644 (file)
index 0000000..ebf3d89
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef MIGRATE_MODE_H_INCLUDED
+#define MIGRATE_MODE_H_INCLUDED
+/*
+ * MIGRATE_ASYNC means never block
+ * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
+ *     on most operations but not ->writepage as the potential stall time
+ *     is too significant
+ * MIGRATE_SYNC will block when migrating pages
+ */
+enum migrate_mode {
+       MIGRATE_ASYNC,
+       MIGRATE_SYNC_LIGHT,
+       MIGRATE_SYNC,
+};
+
+#endif         /* MIGRATE_MODE_H_INCLUDED */
index 32085249e9cbf54d0d65ebc63e0e08cb8b6a132f..0549d2115507124405b06f5ea96f072880a3e475 100644 (file)
@@ -42,6 +42,7 @@
 #define AUTOFS_MINOR           235
 #define MAPPER_CTRL_MINOR      236
 #define LOOP_CTRL_MINOR                237
+#define VHOST_NET_MINOR                238
 #define MISC_DYNAMIC_MINOR     255
 
 struct device;
index 5c4fe8e5bfe563669d9f2ef3b23eaa9ad14b1b7c..aea61905499b0e20598969cb02e63f32a3010d2d 100644 (file)
@@ -621,6 +621,7 @@ void mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac);
 int mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
 int mlx4_get_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int *qpn);
 void mlx4_put_eth_qp(struct mlx4_dev *dev, u8 port, u64 mac, int qpn);
+void mlx4_set_stats_bitmap(struct mlx4_dev *dev, u64 *stats_bitmap);
 
 int mlx4_find_cached_vlan(struct mlx4_dev *dev, u8 port, u16 vid, int *idx);
 int mlx4_register_vlan(struct mlx4_dev *dev, u8 port, u16 vlan, int *index);
index b29e7f6f8fa580d9c4c39b4fca596a98e249b2d4..83ac0713ed0aa9a2a0c79b95013c5bfd42137d9c 100644 (file)
@@ -436,17 +436,6 @@ struct spi_device_id {
                        __attribute__((aligned(sizeof(kernel_ulong_t))));
 };
 
-/* mcp */
-
-#define MCP_NAME_SIZE  20
-#define MCP_MODULE_PREFIX "mcp:"
-
-struct mcp_device_id {
-       char name[MCP_NAME_SIZE];
-       kernel_ulong_t driver_data      /* Data private to the driver */
-                       __attribute__((aligned(sizeof(kernel_ulong_t))));
-};
-
 /* dmi */
 enum dmi_field {
        DMI_NONE,
index 06f88994ccaafa36f7c679b5364942a0ffba9cf8..d02cca6cc8ce0552bcf652a29b5562979f72f056 100644 (file)
@@ -57,8 +57,6 @@ struct gcry_mpi {
 
 typedef struct gcry_mpi *MPI;
 
-#define MPI_NULL NULL
-
 #define mpi_get_nlimbs(a)     ((a)->nlimbs)
 #define mpi_is_neg(a)        ((a)->sign)
 
diff --git a/include/linux/mtd/gpmi-nand.h b/include/linux/mtd/gpmi-nand.h
new file mode 100644 (file)
index 0000000..69b6dbf
--- /dev/null
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2011 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef __MACH_MXS_GPMI_NAND_H__
+#define __MACH_MXS_GPMI_NAND_H__
+
+/* The size of the resources is fixed. */
+#define GPMI_NAND_RES_SIZE     6
+
+/* Resource names for the GPMI NAND driver. */
+#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "GPMI NAND GPMI Registers"
+#define GPMI_NAND_GPMI_INTERRUPT_RES_NAME  "GPMI NAND GPMI Interrupt"
+#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "GPMI NAND BCH Registers"
+#define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "GPMI NAND BCH Interrupt"
+#define GPMI_NAND_DMA_CHANNELS_RES_NAME    "GPMI NAND DMA Channels"
+#define GPMI_NAND_DMA_INTERRUPT_RES_NAME   "GPMI NAND DMA Interrupt"
+
+/**
+ * struct gpmi_nand_platform_data - GPMI NAND driver platform data.
+ *
+ * This structure communicates platform-specific information to the GPMI NAND
+ * driver that can't be expressed as resources.
+ *
+ * @platform_init:           A pointer to a function the driver will call to
+ *                           initialize the platform (e.g., set up the pin mux).
+ * @min_prop_delay_in_ns:    Minimum propagation delay of GPMI signals to and
+ *                           from the NAND Flash device, in nanoseconds.
+ * @max_prop_delay_in_ns:    Maximum propagation delay of GPMI signals to and
+ *                           from the NAND Flash device, in nanoseconds.
+ * @max_chip_count:          The maximum number of chips for which the driver
+ *                           should configure the hardware. This value most
+ *                           likely reflects the number of pins that are
+ *                           connected to a NAND Flash device. If this is
+ *                           greater than the SoC hardware can support, the
+ *                           driver will print a message and fail to initialize.
+ * @partitions:              An optional pointer to an array of partition
+ *                           descriptions.
+ * @partition_count:         The number of elements in the partitions array.
+ */
+struct gpmi_nand_platform_data {
+       /* SoC hardware information. */
+       int             (*platform_init)(void);
+
+       /* NAND Flash information. */
+       unsigned int    min_prop_delay_in_ns;
+       unsigned int    max_prop_delay_in_ns;
+       unsigned int    max_chip_count;
+
+       /* Medium information. */
+       struct          mtd_partition *partitions;
+       unsigned        partition_count;
+};
+#endif
index 1a81fde8f3331d9652054325aa36809cb40b45f1..d43dc25af82e23a8c6b18f5f3785b8ad4383c8c7 100644 (file)
@@ -427,9 +427,7 @@ static inline int mtd_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 
 static inline int mtd_suspend(struct mtd_info *mtd)
 {
-       if (!mtd->suspend)
-               return -EOPNOTSUPP;
-       return mtd->suspend(mtd);
+       return mtd->suspend ? mtd->suspend(mtd) : 0;
 }
 
 static inline void mtd_resume(struct mtd_info *mtd)
@@ -441,7 +439,7 @@ static inline void mtd_resume(struct mtd_info *mtd)
 static inline int mtd_block_isbad(struct mtd_info *mtd, loff_t ofs)
 {
        if (!mtd->block_isbad)
-               return -EOPNOTSUPP;
+               return 0;
        return mtd->block_isbad(mtd, ofs);
 }
 
index 9e3a2838291bfe6aee8f6b3e0d88b43ace455abf..0d3dd66322ecbb24529303f6634f36e5ce6f390d 100644 (file)
@@ -83,10 +83,6 @@ enum ip_conntrack_status {
        /* Conntrack is a fake untracked entry */
        IPS_UNTRACKED_BIT = 12,
        IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
-
-       /* Conntrack has a userspace helper. */
-       IPS_USERSPACE_HELPER_BIT = 13,
-       IPS_USERSPACE_HELPER = (1 << IPS_USERSPACE_HELPER_BIT),
 };
 
 /* Connection tracking event types */
index 6390f0992f36f0723393d282c6d39d3f68abb12e..b56e76811c04380e9779dbe82c2cfa4a5b0c6abd 100644 (file)
@@ -3,8 +3,7 @@
 
 #include <linux/types.h>
 
-#define XT_CT_NOTRACK          0x1
-#define XT_CT_USERSPACE_HELPER 0x2
+#define XT_CT_NOTRACK  0x1
 
 struct xt_ct_target_info {
        __u16 flags;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
new file mode 100644 (file)
index 0000000..9490a00
--- /dev/null
@@ -0,0 +1,434 @@
+/*
+ * Definitions for the NVM Express interface
+ * Copyright (c) 2011, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, write to the Free Software Foundation, Inc., 
+ * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ */
+
+#ifndef _LINUX_NVME_H
+#define _LINUX_NVME_H
+
+#include <linux/types.h>
+
+struct nvme_bar {
+       __u64                   cap;    /* Controller Capabilities */
+       __u32                   vs;     /* Version */
+       __u32                   intms;  /* Interrupt Mask Set */
+       __u32                   intmc;  /* Interrupt Mask Clear */
+       __u32                   cc;     /* Controller Configuration */
+       __u32                   rsvd1;  /* Reserved */
+       __u32                   csts;   /* Controller Status */
+       __u32                   rsvd2;  /* Reserved */
+       __u32                   aqa;    /* Admin Queue Attributes */
+       __u64                   asq;    /* Admin SQ Base Address */
+       __u64                   acq;    /* Admin CQ Base Address */
+};
+
+#define NVME_CAP_TIMEOUT(cap)  (((cap) >> 24) & 0xff)
+#define NVME_CAP_STRIDE(cap)   (((cap) >> 32) & 0xf)
+
+enum {
+       NVME_CC_ENABLE          = 1 << 0,
+       NVME_CC_CSS_NVM         = 0 << 4,
+       NVME_CC_MPS_SHIFT       = 7,
+       NVME_CC_ARB_RR          = 0 << 11,
+       NVME_CC_ARB_WRRU        = 1 << 11,
+       NVME_CC_ARB_VS          = 7 << 11,
+       NVME_CC_SHN_NONE        = 0 << 14,
+       NVME_CC_SHN_NORMAL      = 1 << 14,
+       NVME_CC_SHN_ABRUPT      = 2 << 14,
+       NVME_CC_IOSQES          = 6 << 16,
+       NVME_CC_IOCQES          = 4 << 20,
+       NVME_CSTS_RDY           = 1 << 0,
+       NVME_CSTS_CFS           = 1 << 1,
+       NVME_CSTS_SHST_NORMAL   = 0 << 2,
+       NVME_CSTS_SHST_OCCUR    = 1 << 2,
+       NVME_CSTS_SHST_CMPLT    = 2 << 2,
+};
+
+struct nvme_id_power_state {
+       __le16                  max_power;      /* centiwatts */
+       __u16                   rsvd2;
+       __le32                  entry_lat;      /* microseconds */
+       __le32                  exit_lat;       /* microseconds */
+       __u8                    read_tput;
+       __u8                    read_lat;
+       __u8                    write_tput;
+       __u8                    write_lat;
+       __u8                    rsvd16[16];
+};
+
+#define NVME_VS(major, minor)  (major << 16 | minor)
+
+struct nvme_id_ctrl {
+       __le16                  vid;
+       __le16                  ssvid;
+       char                    sn[20];
+       char                    mn[40];
+       char                    fr[8];
+       __u8                    rab;
+       __u8                    ieee[3];
+       __u8                    mic;
+       __u8                    mdts;
+       __u8                    rsvd78[178];
+       __le16                  oacs;
+       __u8                    acl;
+       __u8                    aerl;
+       __u8                    frmw;
+       __u8                    lpa;
+       __u8                    elpe;
+       __u8                    npss;
+       __u8                    rsvd264[248];
+       __u8                    sqes;
+       __u8                    cqes;
+       __u8                    rsvd514[2];
+       __le32                  nn;
+       __le16                  oncs;
+       __le16                  fuses;
+       __u8                    fna;
+       __u8                    vwc;
+       __le16                  awun;
+       __le16                  awupf;
+       __u8                    rsvd530[1518];
+       struct nvme_id_power_state      psd[32];
+       __u8                    vs[1024];
+};
+
+struct nvme_lbaf {
+       __le16                  ms;
+       __u8                    ds;
+       __u8                    rp;
+};
+
+struct nvme_id_ns {
+       __le64                  nsze;
+       __le64                  ncap;
+       __le64                  nuse;
+       __u8                    nsfeat;
+       __u8                    nlbaf;
+       __u8                    flbas;
+       __u8                    mc;
+       __u8                    dpc;
+       __u8                    dps;
+       __u8                    rsvd30[98];
+       struct nvme_lbaf        lbaf[16];
+       __u8                    rsvd192[192];
+       __u8                    vs[3712];
+};
+
+enum {
+       NVME_NS_FEAT_THIN       = 1 << 0,
+       NVME_LBAF_RP_BEST       = 0,
+       NVME_LBAF_RP_BETTER     = 1,
+       NVME_LBAF_RP_GOOD       = 2,
+       NVME_LBAF_RP_DEGRADED   = 3,
+};
+
+struct nvme_lba_range_type {
+       __u8                    type;
+       __u8                    attributes;
+       __u8                    rsvd2[14];
+       __u64                   slba;
+       __u64                   nlb;
+       __u8                    guid[16];
+       __u8                    rsvd48[16];
+};
+
+enum {
+       NVME_LBART_TYPE_FS      = 0x01,
+       NVME_LBART_TYPE_RAID    = 0x02,
+       NVME_LBART_TYPE_CACHE   = 0x03,
+       NVME_LBART_TYPE_SWAP    = 0x04,
+
+       NVME_LBART_ATTRIB_TEMP  = 1 << 0,
+       NVME_LBART_ATTRIB_HIDE  = 1 << 1,
+};
+
+/* I/O commands */
+
+enum nvme_opcode {
+       nvme_cmd_flush          = 0x00,
+       nvme_cmd_write          = 0x01,
+       nvme_cmd_read           = 0x02,
+       nvme_cmd_write_uncor    = 0x04,
+       nvme_cmd_compare        = 0x05,
+       nvme_cmd_dsm            = 0x09,
+};
+
+struct nvme_common_command {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u32                   cdw2[2];
+       __le64                  metadata;
+       __le64                  prp1;
+       __le64                  prp2;
+       __u32                   cdw10[6];
+};
+
+struct nvme_rw_command {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u64                   rsvd2;
+       __le64                  metadata;
+       __le64                  prp1;
+       __le64                  prp2;
+       __le64                  slba;
+       __le16                  length;
+       __le16                  control;
+       __le32                  dsmgmt;
+       __le32                  reftag;
+       __le16                  apptag;
+       __le16                  appmask;
+};
+
+enum {
+       NVME_RW_LR                      = 1 << 15,
+       NVME_RW_FUA                     = 1 << 14,
+       NVME_RW_DSM_FREQ_UNSPEC         = 0,
+       NVME_RW_DSM_FREQ_TYPICAL        = 1,
+       NVME_RW_DSM_FREQ_RARE           = 2,
+       NVME_RW_DSM_FREQ_READS          = 3,
+       NVME_RW_DSM_FREQ_WRITES         = 4,
+       NVME_RW_DSM_FREQ_RW             = 5,
+       NVME_RW_DSM_FREQ_ONCE           = 6,
+       NVME_RW_DSM_FREQ_PREFETCH       = 7,
+       NVME_RW_DSM_FREQ_TEMP           = 8,
+       NVME_RW_DSM_LATENCY_NONE        = 0 << 4,
+       NVME_RW_DSM_LATENCY_IDLE        = 1 << 4,
+       NVME_RW_DSM_LATENCY_NORM        = 2 << 4,
+       NVME_RW_DSM_LATENCY_LOW         = 3 << 4,
+       NVME_RW_DSM_SEQ_REQ             = 1 << 6,
+       NVME_RW_DSM_COMPRESSED          = 1 << 7,
+};
+
+/* Admin commands */
+
+enum nvme_admin_opcode {
+       nvme_admin_delete_sq            = 0x00,
+       nvme_admin_create_sq            = 0x01,
+       nvme_admin_get_log_page         = 0x02,
+       nvme_admin_delete_cq            = 0x04,
+       nvme_admin_create_cq            = 0x05,
+       nvme_admin_identify             = 0x06,
+       nvme_admin_abort_cmd            = 0x08,
+       nvme_admin_set_features         = 0x09,
+       nvme_admin_get_features         = 0x0a,
+       nvme_admin_async_event          = 0x0c,
+       nvme_admin_activate_fw          = 0x10,
+       nvme_admin_download_fw          = 0x11,
+       nvme_admin_format_nvm           = 0x80,
+       nvme_admin_security_send        = 0x81,
+       nvme_admin_security_recv        = 0x82,
+};
+
+enum {
+       NVME_QUEUE_PHYS_CONTIG  = (1 << 0),
+       NVME_CQ_IRQ_ENABLED     = (1 << 1),
+       NVME_SQ_PRIO_URGENT     = (0 << 1),
+       NVME_SQ_PRIO_HIGH       = (1 << 1),
+       NVME_SQ_PRIO_MEDIUM     = (2 << 1),
+       NVME_SQ_PRIO_LOW        = (3 << 1),
+       NVME_FEAT_ARBITRATION   = 0x01,
+       NVME_FEAT_POWER_MGMT    = 0x02,
+       NVME_FEAT_LBA_RANGE     = 0x03,
+       NVME_FEAT_TEMP_THRESH   = 0x04,
+       NVME_FEAT_ERR_RECOVERY  = 0x05,
+       NVME_FEAT_VOLATILE_WC   = 0x06,
+       NVME_FEAT_NUM_QUEUES    = 0x07,
+       NVME_FEAT_IRQ_COALESCE  = 0x08,
+       NVME_FEAT_IRQ_CONFIG    = 0x09,
+       NVME_FEAT_WRITE_ATOMIC  = 0x0a,
+       NVME_FEAT_ASYNC_EVENT   = 0x0b,
+       NVME_FEAT_SW_PROGRESS   = 0x0c,
+};
+
+struct nvme_identify {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u64                   rsvd2[2];
+       __le64                  prp1;
+       __le64                  prp2;
+       __le32                  cns;
+       __u32                   rsvd11[5];
+};
+
+struct nvme_features {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __le32                  nsid;
+       __u64                   rsvd2[2];
+       __le64                  prp1;
+       __le64                  prp2;
+       __le32                  fid;
+       __le32                  dword11;
+       __u32                   rsvd12[4];
+};
+
+struct nvme_create_cq {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __u32                   rsvd1[5];
+       __le64                  prp1;
+       __u64                   rsvd8;
+       __le16                  cqid;
+       __le16                  qsize;
+       __le16                  cq_flags;
+       __le16                  irq_vector;
+       __u32                   rsvd12[4];
+};
+
+struct nvme_create_sq {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __u32                   rsvd1[5];
+       __le64                  prp1;
+       __u64                   rsvd8;
+       __le16                  sqid;
+       __le16                  qsize;
+       __le16                  sq_flags;
+       __le16                  cqid;
+       __u32                   rsvd12[4];
+};
+
+struct nvme_delete_queue {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __u32                   rsvd1[9];
+       __le16                  qid;
+       __u16                   rsvd10;
+       __u32                   rsvd11[5];
+};
+
+struct nvme_download_firmware {
+       __u8                    opcode;
+       __u8                    flags;
+       __u16                   command_id;
+       __u32                   rsvd1[5];
+       __le64                  prp1;
+       __le64                  prp2;
+       __le32                  numd;
+       __le32                  offset;
+       __u32                   rsvd12[4];
+};
+
+struct nvme_command {
+       union {
+               struct nvme_common_command common;
+               struct nvme_rw_command rw;
+               struct nvme_identify identify;
+               struct nvme_features features;
+               struct nvme_create_cq create_cq;
+               struct nvme_create_sq create_sq;
+               struct nvme_delete_queue delete_queue;
+               struct nvme_download_firmware dlfw;
+       };
+};
+
+enum {
+       NVME_SC_SUCCESS                 = 0x0,
+       NVME_SC_INVALID_OPCODE          = 0x1,
+       NVME_SC_INVALID_FIELD           = 0x2,
+       NVME_SC_CMDID_CONFLICT          = 0x3,
+       NVME_SC_DATA_XFER_ERROR         = 0x4,
+       NVME_SC_POWER_LOSS              = 0x5,
+       NVME_SC_INTERNAL                = 0x6,
+       NVME_SC_ABORT_REQ               = 0x7,
+       NVME_SC_ABORT_QUEUE             = 0x8,
+       NVME_SC_FUSED_FAIL              = 0x9,
+       NVME_SC_FUSED_MISSING           = 0xa,
+       NVME_SC_INVALID_NS              = 0xb,
+       NVME_SC_LBA_RANGE               = 0x80,
+       NVME_SC_CAP_EXCEEDED            = 0x81,
+       NVME_SC_NS_NOT_READY            = 0x82,
+       NVME_SC_CQ_INVALID              = 0x100,
+       NVME_SC_QID_INVALID             = 0x101,
+       NVME_SC_QUEUE_SIZE              = 0x102,
+       NVME_SC_ABORT_LIMIT             = 0x103,
+       NVME_SC_ABORT_MISSING           = 0x104,
+       NVME_SC_ASYNC_LIMIT             = 0x105,
+       NVME_SC_FIRMWARE_SLOT           = 0x106,
+       NVME_SC_FIRMWARE_IMAGE          = 0x107,
+       NVME_SC_INVALID_VECTOR          = 0x108,
+       NVME_SC_INVALID_LOG_PAGE        = 0x109,
+       NVME_SC_INVALID_FORMAT          = 0x10a,
+       NVME_SC_BAD_ATTRIBUTES          = 0x180,
+       NVME_SC_WRITE_FAULT             = 0x280,
+       NVME_SC_READ_ERROR              = 0x281,
+       NVME_SC_GUARD_CHECK             = 0x282,
+       NVME_SC_APPTAG_CHECK            = 0x283,
+       NVME_SC_REFTAG_CHECK            = 0x284,
+       NVME_SC_COMPARE_FAILED          = 0x285,
+       NVME_SC_ACCESS_DENIED           = 0x286,
+};
+
+struct nvme_completion {
+       __le32  result;         /* Used by admin commands to return data */
+       __u32   rsvd;
+       __le16  sq_head;        /* how much of this queue may be reclaimed */
+       __le16  sq_id;          /* submission queue that generated this entry */
+       __u16   command_id;     /* of the command which completed */
+       __le16  status;         /* did the command fail, and if so, why? */
+};
+
+struct nvme_user_io {
+       __u8    opcode;
+       __u8    flags;
+       __u16   control;
+       __u16   nblocks;
+       __u16   rsvd;
+       __u64   metadata;
+       __u64   addr;
+       __u64   slba;
+       __u32   dsmgmt;
+       __u32   reftag;
+       __u16   apptag;
+       __u16   appmask;
+};
+
+struct nvme_admin_cmd {
+       __u8    opcode;
+       __u8    flags;
+       __u16   rsvd1;
+       __u32   nsid;
+       __u32   cdw2;
+       __u32   cdw3;
+       __u64   metadata;
+       __u64   addr;
+       __u32   metadata_len;
+       __u32   data_len;
+       __u32   cdw10;
+       __u32   cdw11;
+       __u32   cdw12;
+       __u32   cdw13;
+       __u32   cdw14;
+       __u32   cdw15;
+       __u32   timeout_ms;
+       __u32   result;
+};
+
+#define NVME_IOCTL_ID          _IO('N', 0x40)
+#define NVME_IOCTL_ADMIN_CMD   _IOWR('N', 0x41, struct nvme_admin_cmd)
+#define NVME_IOCTL_SUBMIT_IO   _IOW('N', 0x42, struct nvme_user_io)
+
+#endif /* _LINUX_NVME_H */
index 08855613ceb32ed66e425eb41f69206d6feb61da..abb2776be1ba1bdce8f04788f0b66af967d08050 100644 (file)
@@ -587,6 +587,7 @@ struct hw_perf_event {
        u64                             sample_period;
        u64                             last_period;
        local64_t                       period_left;
+       u64                             interrupts_seq;
        u64                             interrupts;
 
        u64                             freq_time_stamp;
index e5bbcbaa6f5700f3bfcb9d14550cce632fcbd53d..4d99e4e6ef83fa4910e22959bc9326117ce1288e 100644 (file)
@@ -110,7 +110,19 @@ static inline void pm_qos_remove_request(struct pm_qos_request *req)
                        { return; }
 
 static inline int pm_qos_request(int pm_qos_class)
-                       { return 0; }
+{
+       switch (pm_qos_class) {
+       case PM_QOS_CPU_DMA_LATENCY:
+               return PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE;
+       case PM_QOS_NETWORK_LATENCY:
+               return PM_QOS_NETWORK_LAT_DEFAULT_VALUE;
+       case PM_QOS_NETWORK_THROUGHPUT:
+               return PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE;
+       default:
+               return PM_QOS_DEFAULT_VALUE;
+       }
+}
+
 static inline int pm_qos_add_notifier(int pm_qos_class,
                                      struct notifier_block *notifier)
                        { return 0; }
index a27e56ca41a4cf2ae505e08710c88f745f476510..c2f1f6a5fcb8a67f52c90c8397c7384534d3010a 100644 (file)
 
 #include <linux/compiler.h>            /* For unlikely.  */
 #include <linux/sched.h>               /* For struct task_struct.  */
+#include <linux/err.h>                 /* for IS_ERR_VALUE */
 
 
 extern long arch_ptrace(struct task_struct *child, long request,
@@ -266,6 +267,15 @@ static inline void ptrace_release_task(struct task_struct *task)
 #define force_successful_syscall_return() do { } while (0)
 #endif
 
+#ifndef is_syscall_success
+/*
+ * On most systems we can tell if a syscall is a success based on if the retval
+ * is an error value.  On some systems like ia64 and powerpc they have different
+ * indicators of success/failure and must define their own.
+ */
+#define is_syscall_success(regs) (!IS_ERR_VALUE((unsigned long)(regs_return_value(regs))))
+#endif
+
 /*
  * <asm/ptrace.h> should define the following things inside #ifdef __KERNEL__.
  *
index cb785569903759e054141b09e62e4a2c0442425f..c09fa042b5ea077bb748507fd41e464891bbb7b3 100644 (file)
@@ -230,7 +230,11 @@ struct mem_dqinfo {
 struct super_block;
 
 #define DQF_MASK 0xffff                /* Mask for format specific flags */
-#define DQF_INFO_DIRTY_B 16
+#define DQF_GETINFO_MASK 0x1ffff       /* Mask for flags passed to userspace */
+#define DQF_SETINFO_MASK 0xffff                /* Mask for flags modifiable from userspace */
+#define DQF_SYS_FILE_B         16
+#define DQF_SYS_FILE (1 << DQF_SYS_FILE_B)     /* Quota file stored as system file */
+#define DQF_INFO_DIRTY_B       31
 #define DQF_INFO_DIRTY (1 << DQF_INFO_DIRTY_B) /* Is info dirty? */
 
 extern void mark_info_dirty(struct super_block *sb, int type);
index c9d625ca659ec387c6b9456c4d03d0bb4af80fb9..da81af086eaf765ea701247cd332944ba7b3c570 100644 (file)
@@ -109,12 +109,18 @@ void res_counter_init(struct res_counter *counter, struct res_counter *parent);
  *
  * returns 0 on success and <0 if the counter->usage will exceed the
  * counter->limit _locked call expects the counter->lock to be taken
+ *
+ * charge_nofail works the same, except that it charges the resource
+ * counter unconditionally, and returns < 0 if the after the current
+ * charge we are over limit.
  */
 
 int __must_check res_counter_charge_locked(struct res_counter *counter,
                unsigned long val);
 int __must_check res_counter_charge(struct res_counter *counter,
                unsigned long val, struct res_counter **limit_fail_at);
+int __must_check res_counter_charge_nofail(struct res_counter *counter,
+               unsigned long val, struct res_counter **limit_fail_at);
 
 /*
  * uncharge - tell that some portion of the resource is released
@@ -142,7 +148,10 @@ static inline unsigned long long res_counter_margin(struct res_counter *cnt)
        unsigned long flags;
 
        spin_lock_irqsave(&cnt->lock, flags);
-       margin = cnt->limit - cnt->usage;
+       if (cnt->limit > cnt->usage)
+               margin = cnt->limit - cnt->usage;
+       else
+               margin = 0;
        spin_unlock_irqrestore(&cnt->lock, flags);
        return margin;
 }
index 4032ec1cf836fe2055a422c8d2356a4ea620117a..7d379a6bfd886679fcfefee2ed90fb636d6cd028 100644 (file)
@@ -2088,9 +2088,9 @@ extern int sched_setscheduler_nocheck(struct task_struct *, int,
 extern struct task_struct *idle_task(int cpu);
 /**
  * is_idle_task - is the specified task an idle task?
- * @tsk: the task in question.
+ * @p: the task in question.
  */
-static inline bool is_idle_task(struct task_struct *p)
+static inline bool is_idle_task(const struct task_struct *p)
 {
        return p->pid == 0;
 }
@@ -2259,6 +2259,12 @@ static inline void mmdrop(struct mm_struct * mm)
 extern void mmput(struct mm_struct *);
 /* Grab a reference to a task's mm, if it is not already going away */
 extern struct mm_struct *get_task_mm(struct task_struct *task);
+/*
+ * Grab a reference to a task's mm, if it is not already going away
+ * and ptrace_may_access with the mode parameter passed to it
+ * succeeds.
+ */
+extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
 /* Remove the current tasks stale references to the old mm_struct */
 extern void mm_release(struct task_struct *, struct mm_struct *);
 /* Allocate a new mm structure and copy contents from tsk->mm */
index cb2dd118cc0ffb91e62ad7746d230b0376f442dd..425450b980b8a8e60ea532b76702d552c3da6857 100644 (file)
@@ -30,7 +30,7 @@ struct sh_desc {
        struct sh_dmae_regs hw;
        struct list_head node;
        struct dma_async_tx_descriptor async_tx;
-       enum dma_data_direction direction;
+       enum dma_transfer_direction direction;
        dma_cookie_t cookie;
        size_t partial;
        int chunks;
@@ -48,6 +48,7 @@ struct sh_dmae_channel {
        unsigned int    offset;
        unsigned int    dmars;
        unsigned int    dmars_bit;
+       unsigned int    chclr_offset;
 };
 
 struct sh_dmae_pdata {
@@ -68,6 +69,8 @@ struct sh_dmae_pdata {
        unsigned int dmaor_is_32bit:1;
        unsigned int needs_tend_set:1;
        unsigned int no_dmars:1;
+       unsigned int chclr_present:1;
+       unsigned int slave_only:1;
 };
 
 /* DMA register */
index e4c711c6f3213c962e90cb25d528cd40861dbf4e..79ab2555b3b014ce75c780c7f149af2c5ead53fa 100644 (file)
@@ -48,6 +48,7 @@ extern struct file *shmem_file_setup(const char *name,
                                        loff_t size, unsigned long flags);
 extern int shmem_zero_setup(struct vm_area_struct *);
 extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
+extern void shmem_unlock_mapping(struct address_space *mapping);
 extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
                                        pgoff_t index, gfp_t gfp_mask);
 extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
diff --git a/include/linux/sirfsoc_dma.h b/include/linux/sirfsoc_dma.h
new file mode 100644 (file)
index 0000000..29d9593
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef _SIRFSOC_DMA_H_
+#define _SIRFSOC_DMA_H_
+
+bool sirfsoc_dma_filter_id(struct dma_chan *chan, void *chan_id);
+
+#endif
index e16557a357e5dc919036dbb0a4ac1473c153e9b4..c1241c428179c78bce7c8a774cbaeea9b38da35a 100644 (file)
@@ -192,7 +192,6 @@ enum
        LINUX_MIB_TCPPARTIALUNDO,               /* TCPPartialUndo */
        LINUX_MIB_TCPDSACKUNDO,                 /* TCPDSACKUndo */
        LINUX_MIB_TCPLOSSUNDO,                  /* TCPLossUndo */
-       LINUX_MIB_TCPLOSS,                      /* TCPLoss */
        LINUX_MIB_TCPLOSTRETRANSMIT,            /* TCPLostRetransmit */
        LINUX_MIB_TCPRENOFAILURES,              /* TCPRenoFailures */
        LINUX_MIB_TCPSACKFAILURES,              /* TCPSackFailures */
index 95040cc33107e1b59e8b1d7966d349aaaa845c68..91784a4f860852d01f958ecce70439e381dcaabf 100644 (file)
@@ -357,14 +357,29 @@ extern bool pm_save_wakeup_count(unsigned int count);
 
 static inline void lock_system_sleep(void)
 {
-       freezer_do_not_count();
+       current->flags |= PF_FREEZER_SKIP;
        mutex_lock(&pm_mutex);
 }
 
 static inline void unlock_system_sleep(void)
 {
+       /*
+        * Don't use freezer_count() because we don't want the call to
+        * try_to_freeze() here.
+        *
+        * Reason:
+        * Fundamentally, we just don't need it, because freezing condition
+        * doesn't come into effect until we release the pm_mutex lock,
+        * since the freezer always works with pm_mutex held.
+        *
+        * More importantly, in the case of hibernation,
+        * unlock_system_sleep() gets called in snapshot_read() and
+        * snapshot_write() when the freezing condition is still in effect.
+        * Which means, if we use try_to_freeze() here, it would make them
+        * enter the refrigerator, thus causing hibernation to lockup.
+        */
+       current->flags &= ~PF_FREEZER_SKIP;
        mutex_unlock(&pm_mutex);
-       freezer_count();
 }
 
 #else /* !CONFIG_PM_SLEEP */
index 06061a7f8e69131e2401985e243298cbf9795a50..3e60228e7299bade2a864f21c9d4addc6c304cf7 100644 (file)
@@ -273,7 +273,7 @@ static inline int zone_reclaim(struct zone *z, gfp_t mask, unsigned int order)
 #endif
 
 extern int page_evictable(struct page *page, struct vm_area_struct *vma);
-extern void scan_mapping_unevictable_pages(struct address_space *);
+extern void check_move_unevictable_pages(struct page **, int nr_pages);
 
 extern unsigned long scan_unevictable_pages;
 extern int scan_unevictable_handler(struct ctl_table *, int,
diff --git a/include/linux/sysdev.h b/include/linux/sysdev.h
deleted file mode 100644 (file)
index 20f63d3..0000000
+++ /dev/null
@@ -1,164 +0,0 @@
-/**
- * System devices follow a slightly different driver model. 
- * They don't need to do dynammic driver binding, can't be probed, 
- * and don't reside on any type of peripheral bus. 
- * So, we represent and treat them a little differently.
- * 
- * We still have a notion of a driver for a system device, because we still
- * want to perform basic operations on these devices. 
- *
- * We also support auxiliary drivers binding to devices of a certain class.
- * 
- * This allows configurable drivers to register themselves for devices of
- * a certain type. And, it allows class definitions to reside in generic
- * code while arch-specific code can register specific drivers.
- *
- * Auxiliary drivers registered with a NULL cls are registered as drivers
- * for all system devices, and get notification calls for each device. 
- */
-
-
-#ifndef _SYSDEV_H_
-#define _SYSDEV_H_
-
-#include <linux/kobject.h>
-#include <linux/pm.h>
-
-
-struct sys_device;
-struct sysdev_class_attribute;
-
-struct sysdev_class {
-       const char *name;
-       struct list_head        drivers;
-       struct sysdev_class_attribute **attrs;
-       struct kset             kset;
-};
-
-struct sysdev_class_attribute {
-       struct attribute attr;
-       ssize_t (*show)(struct sysdev_class *, struct sysdev_class_attribute *,
-                       char *);
-       ssize_t (*store)(struct sysdev_class *, struct sysdev_class_attribute *,
-                        const char *, size_t);
-};
-
-#define _SYSDEV_CLASS_ATTR(_name,_mode,_show,_store)           \
-{                                                              \
-       .attr = {.name = __stringify(_name), .mode = _mode },   \
-       .show   = _show,                                        \
-       .store  = _store,                                       \
-}
-
-#define SYSDEV_CLASS_ATTR(_name,_mode,_show,_store)            \
-       struct sysdev_class_attribute attr_##_name =            \
-               _SYSDEV_CLASS_ATTR(_name,_mode,_show,_store)
-
-
-extern int sysdev_class_register(struct sysdev_class *);
-extern void sysdev_class_unregister(struct sysdev_class *);
-
-extern int sysdev_class_create_file(struct sysdev_class *,
-       struct sysdev_class_attribute *);
-extern void sysdev_class_remove_file(struct sysdev_class *,
-       struct sysdev_class_attribute *);
-/**
- * Auxiliary system device drivers.
- */
-
-struct sysdev_driver {
-       struct list_head        entry;
-       int     (*add)(struct sys_device *);
-       int     (*remove)(struct sys_device *);
-};
-
-
-extern int sysdev_driver_register(struct sysdev_class *, struct sysdev_driver *);
-extern void sysdev_driver_unregister(struct sysdev_class *, struct sysdev_driver *);
-
-
-/**
- * sys_devices can be simplified a lot from regular devices, because they're
- * simply not as versatile. 
- */
-
-struct sys_device {
-       u32             id;
-       struct sysdev_class     * cls;
-       struct kobject          kobj;
-};
-
-extern int sysdev_register(struct sys_device *);
-extern void sysdev_unregister(struct sys_device *);
-
-
-struct sysdev_attribute { 
-       struct attribute        attr;
-       ssize_t (*show)(struct sys_device *, struct sysdev_attribute *, char *);
-       ssize_t (*store)(struct sys_device *, struct sysdev_attribute *,
-                        const char *, size_t);
-};
-
-
-#define _SYSDEV_ATTR(_name, _mode, _show, _store)              \
-{                                                              \
-       .attr = { .name = __stringify(_name), .mode = _mode },  \
-       .show   = _show,                                        \
-       .store  = _store,                                       \
-}
-
-#define SYSDEV_ATTR(_name, _mode, _show, _store)               \
-       struct sysdev_attribute attr_##_name =                  \
-               _SYSDEV_ATTR(_name, _mode, _show, _store);
-
-extern int sysdev_create_file(struct sys_device *, struct sysdev_attribute *);
-extern void sysdev_remove_file(struct sys_device *, struct sysdev_attribute *);
-
-/* Create/remove NULL terminated attribute list */
-static inline int
-sysdev_create_files(struct sys_device *d, struct sysdev_attribute **a)
-{
-       return sysfs_create_files(&d->kobj, (const struct attribute **)a);
-}
-
-static inline void
-sysdev_remove_files(struct sys_device *d, struct sysdev_attribute **a)
-{
-       return sysfs_remove_files(&d->kobj, (const struct attribute **)a);
-}
-
-struct sysdev_ext_attribute {
-       struct sysdev_attribute attr;
-       void *var;
-};
-
-/*
- * Support for simple variable sysdev attributes.
- * The pointer to the variable is stored in a sysdev_ext_attribute
- */
-
-/* Add more types as needed */
-
-extern ssize_t sysdev_show_ulong(struct sys_device *, struct sysdev_attribute *,
-                               char *);
-extern ssize_t sysdev_store_ulong(struct sys_device *,
-                       struct sysdev_attribute *, const char *, size_t);
-extern ssize_t sysdev_show_int(struct sys_device *, struct sysdev_attribute *,
-                               char *);
-extern ssize_t sysdev_store_int(struct sys_device *,
-                       struct sysdev_attribute *, const char *, size_t);
-
-#define _SYSDEV_ULONG_ATTR(_name, _mode, _var)                         \
-       { _SYSDEV_ATTR(_name, _mode, sysdev_show_ulong, sysdev_store_ulong), \
-         &(_var) }
-#define SYSDEV_ULONG_ATTR(_name, _mode, _var)                  \
-       struct sysdev_ext_attribute attr_##_name =              \
-               _SYSDEV_ULONG_ATTR(_name, _mode, _var);
-#define _SYSDEV_INT_ATTR(_name, _mode, _var)                           \
-       { _SYSDEV_ATTR(_name, _mode, sysdev_show_int, sysdev_store_int), \
-         &(_var) }
-#define SYSDEV_INT_ATTR(_name, _mode, _var)                    \
-       struct sysdev_ext_attribute attr_##_name =              \
-               _SYSDEV_INT_ATTR(_name, _mode, _var);
-
-#endif /* _SYSDEV_H_ */
index 47b4a27e6e97c27f1d2bd6cfab895f4e36adb37c..796f1ff0388c979138b81b7094bb6feda5bfd212 100644 (file)
@@ -152,9 +152,9 @@ struct thermal_cooling_device *thermal_cooling_device_register(char *, void *,
 void thermal_cooling_device_unregister(struct thermal_cooling_device *);
 
 #ifdef CONFIG_NET
-extern int generate_netlink_event(u32 orig, enum events event);
+extern int thermal_generate_netlink_event(u32 orig, enum events event);
 #else
-static inline int generate_netlink_event(u32 orig, enum events event)
+static inline int thermal_generate_netlink_event(u32 orig, enum events event)
 {
        return 0;
 }
index ecdaeb98b293727274b6511ee7ef523c00324564..5cf685086dd3f6d728b6a0374f096a080b1bfc1d 100644 (file)
@@ -312,7 +312,6 @@ struct tty_driver {
         */
        struct tty_struct **ttys;
        struct ktermios **termios;
-       struct ktermios **termios_locked;
        void *driver_state;
 
        /*
index 27a4e16d2bf1c668ab08d721fa93981bd3ab7f9c..69d845739bc2304ff52bfac2f9e18f87b1002d8e 100644 (file)
@@ -1073,6 +1073,7 @@ typedef void (*usb_complete_t)(struct urb *);
  *     which the host controller driver should use in preference to the
  *     transfer_buffer.
  * @sg: scatter gather buffer list
+ * @num_mapped_sgs: (internal) number of mapped sg entries
  * @num_sgs: number of entries in the sg list
  * @transfer_buffer_length: How big is transfer_buffer.  The transfer may
  *     be broken up into chunks according to the current maximum packet
diff --git a/include/linux/usb/langwell_otg.h b/include/linux/usb/langwell_otg.h
deleted file mode 100644 (file)
index 51f17b1..0000000
+++ /dev/null
@@ -1,139 +0,0 @@
-/*
- * Intel Langwell USB OTG transceiver driver
- * Copyright (C) 2008 - 2010, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- *
- */
-
-#ifndef __LANGWELL_OTG_H
-#define __LANGWELL_OTG_H
-
-#include <linux/usb/intel_mid_otg.h>
-
-#define CI_USBCMD              0x30
-#      define USBCMD_RST               BIT(1)
-#      define USBCMD_RS                BIT(0)
-#define CI_USBSTS              0x34
-#      define USBSTS_SLI               BIT(8)
-#      define USBSTS_URI               BIT(6)
-#      define USBSTS_PCI               BIT(2)
-#define CI_PORTSC1             0x74
-#      define PORTSC_PP                BIT(12)
-#      define PORTSC_LS                (BIT(11) | BIT(10))
-#      define PORTSC_SUSP              BIT(7)
-#      define PORTSC_CCS               BIT(0)
-#define CI_HOSTPC1             0xb4
-#      define HOSTPC1_PHCD             BIT(22)
-#define CI_OTGSC               0xf4
-#      define OTGSC_DPIE               BIT(30)
-#      define OTGSC_1MSE               BIT(29)
-#      define OTGSC_BSEIE              BIT(28)
-#      define OTGSC_BSVIE              BIT(27)
-#      define OTGSC_ASVIE              BIT(26)
-#      define OTGSC_AVVIE              BIT(25)
-#      define OTGSC_IDIE               BIT(24)
-#      define OTGSC_DPIS               BIT(22)
-#      define OTGSC_1MSS               BIT(21)
-#      define OTGSC_BSEIS              BIT(20)
-#      define OTGSC_BSVIS              BIT(19)
-#      define OTGSC_ASVIS              BIT(18)
-#      define OTGSC_AVVIS              BIT(17)
-#      define OTGSC_IDIS               BIT(16)
-#      define OTGSC_DPS                BIT(14)
-#      define OTGSC_1MST               BIT(13)
-#      define OTGSC_BSE                BIT(12)
-#      define OTGSC_BSV                BIT(11)
-#      define OTGSC_ASV                BIT(10)
-#      define OTGSC_AVV                BIT(9)
-#      define OTGSC_ID                 BIT(8)
-#      define OTGSC_HABA               BIT(7)
-#      define OTGSC_HADP               BIT(6)
-#      define OTGSC_IDPU               BIT(5)
-#      define OTGSC_DP                 BIT(4)
-#      define OTGSC_OT                 BIT(3)
-#      define OTGSC_HAAR               BIT(2)
-#      define OTGSC_VC                 BIT(1)
-#      define OTGSC_VD                 BIT(0)
-#      define OTGSC_INTEN_MASK         (0x7f << 24)
-#      define OTGSC_INT_MASK           (0x5f << 24)
-#      define OTGSC_INTSTS_MASK        (0x7f << 16)
-#define CI_USBMODE             0xf8
-#      define USBMODE_CM               (BIT(1) | BIT(0))
-#      define USBMODE_IDLE             0
-#      define USBMODE_DEVICE           0x2
-#      define USBMODE_HOST             0x3
-#define USBCFG_ADDR                    0xff10801c
-#define USBCFG_LEN                     4
-#      define USBCFG_VBUSVAL           BIT(14)
-#      define USBCFG_AVALID            BIT(13)
-#      define USBCFG_BVALID            BIT(12)
-#      define USBCFG_SESEND            BIT(11)
-
-#define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI)
-
-enum langwell_otg_timer_type {
-       TA_WAIT_VRISE_TMR,
-       TA_WAIT_BCON_TMR,
-       TA_AIDL_BDIS_TMR,
-       TB_ASE0_BRST_TMR,
-       TB_SE0_SRP_TMR,
-       TB_SRP_INIT_TMR,
-       TB_SRP_FAIL_TMR,
-       TB_BUS_SUSPEND_TMR
-};
-
-#define TA_WAIT_VRISE  100
-#define TA_WAIT_BCON   30000
-#define TA_AIDL_BDIS   15000
-#define TB_ASE0_BRST   5000
-#define TB_SE0_SRP     2
-#define TB_SRP_INIT    100
-#define TB_SRP_FAIL    5500
-#define TB_BUS_SUSPEND 500
-
-struct langwell_otg_timer {
-       unsigned long expires;  /* Number of count increase to timeout */
-       unsigned long count;    /* Tick counter */
-       void (*function)(unsigned long);        /* Timeout function */
-       unsigned long data;     /* Data passed to function */
-       struct list_head list;
-};
-
-struct langwell_otg {
-       struct intel_mid_otg_xceiv      iotg;
-       struct device                   *dev;
-
-       void __iomem                    *usbcfg;        /* SCCBUSB config Reg */
-
-       unsigned                        region;
-       unsigned                        cfg_region;
-
-       struct work_struct              work;
-       struct workqueue_struct         *qwork;
-       struct timer_list               hsm_timer;
-
-       spinlock_t                      lock;
-       spinlock_t                      wq_lock;
-
-       struct notifier_block           iotg_notifier;
-};
-
-static inline
-struct langwell_otg *mid_xceiv_to_lnw(struct intel_mid_otg_xceiv *iotg)
-{
-       return container_of(iotg, struct langwell_otg, iotg);
-}
-
-#endif /* __LANGWELL_OTG_H__ */
index 89c290b69a5c6bf345f7d6e27d3b1921301b87fc..29e1920e7339867124e3605e1afd1a2777d66e8a 100644 (file)
 #define TUNER_PHILIPS_FMD1216MEX_MK3   78
 #define TUNER_PHILIPS_FM1216MK5                79
 #define TUNER_PHILIPS_FQ1216LME_MK3    80      /* Active loopthrough, no FM */
-#define TUNER_XC4000                   81      /* Xceive Silicon Tuner */
 
 #define TUNER_PARTSNIC_PTI_5NF05       81
 #define TUNER_PHILIPS_CU1216L           82
 #define TUNER_PHILIPS_FQ1236_MK5       85      /* NTSC, TDA9885, no FM radio */
 #define TUNER_TENA_TNF_5337            86
 
+#define TUNER_XC4000                   87      /* Xceive Silicon Tuner */
+
 /* tv card specific */
 #define TDA9887_PRESENT                (1<<0)
 #define TDA9887_PORT1_INACTIVE                 (1<<1)
index 5b2fed5eebf2e32063d59be921b2a830b9eaa8ea..00596e816b4d6d8dd79f6272d0e293f163d277d0 100644 (file)
@@ -1388,6 +1388,6 @@ struct hci_inquiry_req {
 };
 #define IREQ_CACHE_FLUSH 0x0001
 
-extern int enable_hs;
+extern bool enable_hs;
 
 #endif /* __HCI_H */
index 15f4be7d768e48e740b5b58f39ba76813e79e2f6..a067d30ce73e88fa86dce220e3f2002cac5be878 100644 (file)
@@ -1140,6 +1140,7 @@ struct cfg80211_disassoc_request {
  * @bssid: Fixed BSSID requested, maybe be %NULL, if set do not
  *     search for IBSSs with a different BSSID.
  * @channel: The channel to use if no IBSS can be found to join.
+ * @channel_type: channel type (HT mode)
  * @channel_fixed: The channel should be fixed -- do not search for
  *     IBSSs to join on other channels.
  * @ie: information element(s) to include in the beacon
@@ -1978,6 +1979,11 @@ struct wiphy_wowlan_support {
  *     configured as RX antennas. Antenna configuration commands will be
  *     rejected unless this or @available_antennas_tx is set.
  *
+ * @probe_resp_offload:
+ *      Bitmap of supported protocols for probe response offloading.
+ *      See &enum nl80211_probe_resp_offload_support_attr. Only valid
+ *      when the wiphy flag @WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD is set.
+ *
  * @max_remain_on_channel_duration: Maximum time a remain-on-channel operation
  *     may request, if implemented.
  *
index da1f064a81b3744688545acef85fcaf507fe6c82..9b582437fbeab11535ef3d024ba11c381ea777b1 100644 (file)
@@ -78,7 +78,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif,
                                      __u32 mark, __u8 tos, __u8 scope,
                                      __u8 proto, __u8 flags,
                                      __be32 daddr, __be32 saddr,
-                                     __be16 dport, __be32 sport)
+                                     __be16 dport, __be16 sport)
 {
        fl4->flowi4_oif = oif;
        fl4->flowi4_iif = 0;
index 3419bf5cd15401d611373f961edb7dcb344315d1..d55f4344333514f43f26d4da76d53574b6a9bd48 100644 (file)
@@ -41,6 +41,7 @@ static inline void *net_generic(const struct net *net, int id)
        ptr = ng->ptr[id - 1];
        rcu_read_unlock();
 
+       BUG_ON(!ptr);
        return ptr;
 }
 #endif
index e503b87c4c1b4d3310be150b07e4a94a36508133..7b2d43139c8e750e4b49ce6bf2244f12858a3211 100644 (file)
@@ -13,7 +13,6 @@
 
 #ifndef _NETPRIO_CGROUP_H
 #define _NETPRIO_CGROUP_H
-#include <linux/module.h>
 #include <linux/cgroup.h>
 #include <linux/hardirq.h>
 #include <linux/rcupdate.h>
index bb972d254dff4550301b189bb487e554da02ae58..91c1c8baf020d3c5e80df8c3f7eb88323003408f 100644 (file)
@@ -55,6 +55,7 @@
 #include <linux/uaccess.h>
 #include <linux/memcontrol.h>
 #include <linux/res_counter.h>
+#include <linux/jump_label.h>
 
 #include <linux/filter.h>
 #include <linux/rculist_nulls.h>
@@ -226,6 +227,7 @@ struct cg_proto;
   *    @sk_ack_backlog: current listen backlog
   *    @sk_max_ack_backlog: listen backlog set in listen()
   *    @sk_priority: %SO_PRIORITY setting
+  *    @sk_cgrp_prioidx: socket group's priority map index
   *    @sk_type: socket type (%SOCK_STREAM, etc)
   *    @sk_protocol: which protocol this socket belongs in this network family
   *    @sk_peer_pid: &struct pid for this socket's peer
@@ -921,7 +923,7 @@ inline void sk_refcnt_debug_release(const struct sock *sk)
 #define sk_refcnt_debug_release(sk) do { } while (0)
 #endif /* SOCK_REFCNT_DEBUG */
 
-#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
+#if defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM) && defined(CONFIG_NET)
 extern struct jump_label_key memcg_socket_limit_enabled;
 static inline struct cg_proto *parent_cg_proto(struct proto *proto,
                                               struct cg_proto *cg_proto)
@@ -1007,9 +1009,8 @@ static inline void memcg_memory_allocated_add(struct cg_proto *prot,
        struct res_counter *fail;
        int ret;
 
-       ret = res_counter_charge(prot->memory_allocated,
-                                amt << PAGE_SHIFT, &fail);
-
+       ret = res_counter_charge_nofail(prot->memory_allocated,
+                                       amt << PAGE_SHIFT, &fail);
        if (ret < 0)
                *parent_status = OVER_LIMIT;
 }
@@ -1053,12 +1054,11 @@ sk_memory_allocated_add(struct sock *sk, int amt, int *parent_status)
 }
 
 static inline void
-sk_memory_allocated_sub(struct sock *sk, int amt, int parent_status)
+sk_memory_allocated_sub(struct sock *sk, int amt)
 {
        struct proto *prot = sk->sk_prot;
 
-       if (mem_cgroup_sockets_enabled && sk->sk_cgrp &&
-           parent_status != OVER_LIMIT) /* Otherwise was uncharged already */
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp)
                memcg_memory_allocated_sub(sk->sk_cgrp, amt);
 
        atomic_long_sub(amt, prot->memory_allocated);
index 0118ea999f67a882f6e4e389aa9f2b685e571955..d49db0113a069d4b085ffb62d413d5983cfb2445 100644 (file)
@@ -311,6 +311,8 @@ extern struct proto tcp_prot;
 #define TCP_ADD_STATS_USER(net, field, val) SNMP_ADD_STATS_USER((net)->mib.tcp_statistics, field, val)
 #define TCP_ADD_STATS(net, field, val) SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
 
+extern void tcp_init_mem(struct net *net);
+
 extern void tcp_v4_err(struct sk_buff *skb, u32);
 
 extern void tcp_shutdown (struct sock *sk, int how);
index 5d1a758e05950a7669157914a15b6025da11a7cc..6a3922fe0be0b840c3d8a416cb6919aa28f78b7a 100644 (file)
@@ -857,7 +857,7 @@ struct fc_lport {
        enum fc_lport_state            state;
        unsigned long                  boot_time;
        struct fc_host_statistics      host_stats;
-       struct fcoe_dev_stats          *dev_stats;
+       struct fcoe_dev_stats __percpu *dev_stats;
        u8                             retry_count;
 
        /* Fabric information */
index 5ab255f196cc85c6b894adc2182f1271562a301e..cea1b5426dfa2f1c42c71f4450224722d1c2daf2 100644 (file)
@@ -417,6 +417,7 @@ static inline int __snd_bug_on(int cond)
 #define gameport_get_port_data(gp) (gp)->port_data
 #endif
 
+#ifdef CONFIG_PCI
 /* PCI quirk list helper */
 struct snd_pci_quirk {
        unsigned short subvendor;       /* PCI subvendor ID */
@@ -456,5 +457,6 @@ snd_pci_quirk_lookup(struct pci_dev *pci, const struct snd_pci_quirk *list);
 const struct snd_pci_quirk *
 snd_pci_quirk_lookup_id(u16 vendor, u16 device,
                        const struct snd_pci_quirk *list);
+#endif
 
 #endif /* __SOUND_CORE_H */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
new file mode 100644 (file)
index 0000000..e5e6ff9
--- /dev/null
@@ -0,0 +1,65 @@
+#ifndef TARGET_CORE_BACKEND_H
+#define TARGET_CORE_BACKEND_H
+
+#define TRANSPORT_PLUGIN_PHBA_PDEV             1
+#define TRANSPORT_PLUGIN_VHBA_PDEV             2
+#define TRANSPORT_PLUGIN_VHBA_VDEV             3
+
+struct se_subsystem_api {
+       struct list_head sub_api_list;
+
+       char name[16];
+       struct module *owner;
+
+       u8 transport_type;
+
+       unsigned int fua_write_emulated : 1;
+       unsigned int write_cache_emulated : 1;
+
+       int (*attach_hba)(struct se_hba *, u32);
+       void (*detach_hba)(struct se_hba *);
+       int (*pmode_enable_hba)(struct se_hba *, unsigned long);
+       void *(*allocate_virtdevice)(struct se_hba *, const char *);
+       struct se_device *(*create_virtdevice)(struct se_hba *,
+                               struct se_subsystem_dev *, void *);
+       void (*free_device)(void *);
+       int (*transport_complete)(struct se_task *task);
+       struct se_task *(*alloc_task)(unsigned char *cdb);
+       int (*do_task)(struct se_task *);
+       int (*do_discard)(struct se_device *, sector_t, u32);
+       void (*do_sync_cache)(struct se_task *);
+       void (*free_task)(struct se_task *);
+       ssize_t (*check_configfs_dev_params)(struct se_hba *,
+                       struct se_subsystem_dev *);
+       ssize_t (*set_configfs_dev_params)(struct se_hba *,
+                       struct se_subsystem_dev *, const char *, ssize_t);
+       ssize_t (*show_configfs_dev_params)(struct se_hba *,
+                       struct se_subsystem_dev *, char *);
+       u32 (*get_device_rev)(struct se_device *);
+       u32 (*get_device_type)(struct se_device *);
+       sector_t (*get_blocks)(struct se_device *);
+       unsigned char *(*get_sense_buffer)(struct se_task *);
+};
+
+int    transport_subsystem_register(struct se_subsystem_api *);
+void   transport_subsystem_release(struct se_subsystem_api *);
+
+struct se_device *transport_add_device_to_core_hba(struct se_hba *,
+               struct se_subsystem_api *, struct se_subsystem_dev *, u32,
+               void *, struct se_dev_limits *, const char *, const char *);
+
+void   transport_complete_sync_cache(struct se_cmd *, int);
+void   transport_complete_task(struct se_task *, int);
+
+void   target_get_task_cdb(struct se_task *, unsigned char *);
+
+void   transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
+int    transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
+int    transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
+int    transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
+
+/* core helpers also used by command snooping in pscsi */
+void   *transport_kmap_data_sg(struct se_cmd *);
+void   transport_kunmap_data_sg(struct se_cmd *);
+
+#endif /* TARGET_CORE_BACKEND_H */
index 6873c7dd9145d2a23f682d9e8d2c695a7571d656..dc4e345a01637078d62b8a0075323b9b24ef6887 100644 (file)
@@ -10,6 +10,7 @@
 #include <net/tcp.h>
 
 #define TARGET_CORE_MOD_VERSION                "v4.1.0-rc1-ml"
+#define TARGET_CORE_VERSION            TARGET_CORE_MOD_VERSION
 
 /* Maximum Number of LUNs per Target Portal Group */
 /* Don't raise above 511 or REPORT_LUNS needs to handle >1 page */
@@ -34,6 +35,7 @@
 #define TRANSPORT_SENSE_BUFFER                 SCSI_SENSE_BUFFERSIZE
 /* Used by transport_send_check_condition_and_sense() */
 #define SPC_SENSE_KEY_OFFSET                   2
+#define SPC_ADD_SENSE_LEN_OFFSET               7
 #define SPC_ASC_KEY_OFFSET                     12
 #define SPC_ASCQ_KEY_OFFSET                    13
 #define TRANSPORT_IQN_LEN                      224
 /* Used by transport_get_inquiry_vpd_device_ident() */
 #define INQUIRY_VPD_DEVICE_IDENTIFIER_LEN      254
 
+/* Attempts before moving from SHORT to LONG */
+#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD  3
+#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3  /* In milliseconds */
+#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG  10 /* In milliseconds */
+
+#define PYX_TRANSPORT_STATUS_INTERVAL          5 /* In seconds */
+
+/*
+ * struct se_subsystem_dev->su_dev_flags
+*/
+#define SDF_FIRMWARE_VPD_UNIT_SERIAL           0x00000001
+#define SDF_EMULATED_VPD_UNIT_SERIAL           0x00000002
+#define SDF_USING_UDEV_PATH                    0x00000004
+#define SDF_USING_ALIAS                                0x00000008
+
+/*
+ * struct se_device->dev_flags
+ */
+#define DF_READ_ONLY                           0x00000001
+#define DF_SPC2_RESERVATIONS                   0x00000002
+#define DF_SPC2_RESERVATIONS_WITH_ISID         0x00000004
+
+/* struct se_dev_attrib sanity values */
+/* Default max_unmap_lba_count */
+#define DA_MAX_UNMAP_LBA_COUNT                 0
+/* Default max_unmap_block_desc_count */
+#define DA_MAX_UNMAP_BLOCK_DESC_COUNT          0
+/* Default unmap_granularity */
+#define DA_UNMAP_GRANULARITY_DEFAULT           0
+/* Default unmap_granularity_alignment */
+#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
+/* Emulation for Direct Page Out */
+#define DA_EMULATE_DPO                         0
+/* Emulation for Forced Unit Access WRITEs */
+#define DA_EMULATE_FUA_WRITE                   1
+/* Emulation for Forced Unit Access READs */
+#define DA_EMULATE_FUA_READ                    0
+/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
+#define DA_EMULATE_WRITE_CACHE                 0
+/* Emulation for UNIT ATTENTION Interlock Control */
+#define DA_EMULATE_UA_INTLLCK_CTRL             0
+/* Emulation for TASK_ABORTED status (TAS) by default */
+#define DA_EMULATE_TAS                         1
+/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
+#define DA_EMULATE_TPU                         0
+/*
+ * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
+ * block/blk-lib.c:blkdev_issue_discard()
+ */
+#define DA_EMULATE_TPWS                                0
+/* No Emulation for PSCSI by default */
+#define DA_EMULATE_RESERVATIONS                        0
+/* No Emulation for PSCSI by default */
+#define DA_EMULATE_ALUA                                0
+/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
+#define DA_ENFORCE_PR_ISIDS                    1
+#define DA_STATUS_MAX_SECTORS_MIN              16
+#define DA_STATUS_MAX_SECTORS_MAX              8192
+/* By default don't report non-rotating (solid state) medium */
+#define DA_IS_NONROT                           0
+/* Queue Algorithm Modifier default for restricted reordering in control mode page */
+#define DA_EMULATE_REST_REORD                  0
+
+#define SE_MODE_PAGE_BUF                       512
+
+
 /* struct se_hba->hba_flags */
 enum hba_flags_table {
        HBA_FLAGS_INTERNAL_USE  = 0x01,
@@ -71,11 +139,12 @@ enum transport_tpg_type_table {
        TRANSPORT_TPG_TYPE_DISCOVERY = 1,
 };
 
-/* Used for generate timer flags */
+/* struct se_task->task_flags */
 enum se_task_flags {
        TF_ACTIVE               = (1 << 0),
        TF_SENT                 = (1 << 1),
        TF_REQUEST_STOP         = (1 << 2),
+       TF_HAS_SENSE            = (1 << 3),
 };
 
 /* Special transport agnostic struct se_cmd->t_states */
@@ -158,9 +227,38 @@ enum tcm_sense_reason_table {
        TCM_RESERVATION_CONFLICT                = 0x10,
 };
 
+enum target_sc_flags_table {
+       TARGET_SCF_BIDI_OP              = 0x01,
+       TARGET_SCF_ACK_KREF             = 0x02,
+};
+
+/* fabric independent task management function values */
+enum tcm_tmreq_table {
+       TMR_ABORT_TASK          = 1,
+       TMR_ABORT_TASK_SET      = 2,
+       TMR_CLEAR_ACA           = 3,
+       TMR_CLEAR_TASK_SET      = 4,
+       TMR_LUN_RESET           = 5,
+       TMR_TARGET_WARM_RESET   = 6,
+       TMR_TARGET_COLD_RESET   = 7,
+       TMR_FABRIC_TMR          = 255,
+};
+
+/* fabric independent task management response values */
+enum tcm_tmrsp_table {
+       TMR_FUNCTION_COMPLETE           = 0,
+       TMR_TASK_DOES_NOT_EXIST         = 1,
+       TMR_LUN_DOES_NOT_EXIST          = 2,
+       TMR_TASK_STILL_ALLEGIANT        = 3,
+       TMR_TASK_FAILOVER_NOT_SUPPORTED = 4,
+       TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED    = 5,
+       TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
+       TMR_FUNCTION_REJECTED           = 255,
+};
+
 struct se_obj {
        atomic_t obj_access_count;
-} ____cacheline_aligned;
+};
 
 /*
  * Used by TCM Core internally to signal if ALUA emulation is enabled or
@@ -207,7 +305,7 @@ struct t10_alua {
        struct config_group alua_tg_pt_gps_group;
        int (*alua_state_check)(struct se_cmd *, unsigned char *, u8 *);
        struct list_head tg_pt_gps_list;
-} ____cacheline_aligned;
+};
 
 struct t10_alua_lu_gp {
        u16     lu_gp_id;
@@ -218,7 +316,7 @@ struct t10_alua_lu_gp {
        struct config_group lu_gp_group;
        struct list_head lu_gp_node;
        struct list_head lu_gp_mem_list;
-} ____cacheline_aligned;
+};
 
 struct t10_alua_lu_gp_member {
        bool lu_gp_assoc;
@@ -227,7 +325,7 @@ struct t10_alua_lu_gp_member {
        struct t10_alua_lu_gp *lu_gp;
        struct se_device *lu_gp_mem_dev;
        struct list_head lu_gp_mem_list;
-} ____cacheline_aligned;
+};
 
 struct t10_alua_tg_pt_gp {
        u16     tg_pt_gp_id;
@@ -250,7 +348,7 @@ struct t10_alua_tg_pt_gp {
        struct config_group tg_pt_gp_group;
        struct list_head tg_pt_gp_list;
        struct list_head tg_pt_gp_mem_list;
-} ____cacheline_aligned;
+};
 
 struct t10_alua_tg_pt_gp_member {
        bool tg_pt_gp_assoc;
@@ -259,7 +357,7 @@ struct t10_alua_tg_pt_gp_member {
        struct t10_alua_tg_pt_gp *tg_pt_gp;
        struct se_port *tg_pt;
        struct list_head tg_pt_gp_mem_list;
-} ____cacheline_aligned;
+};
 
 struct t10_vpd {
        unsigned char device_identifier[INQUIRY_VPD_DEVICE_IDENTIFIER_LEN];
@@ -269,7 +367,7 @@ struct t10_vpd {
        u32 association;
        u32 device_identifier_type;
        struct list_head vpd_list;
-} ____cacheline_aligned;
+};
 
 struct t10_wwn {
        char vendor[8];
@@ -280,7 +378,7 @@ struct t10_wwn {
        struct se_subsystem_dev *t10_sub_dev;
        struct config_group t10_wwn_group;
        struct list_head t10_vpd_list;
-} ____cacheline_aligned;
+};
 
 
 /*
@@ -333,7 +431,7 @@ struct t10_pr_registration {
        struct list_head pr_reg_aptpl_list;
        struct list_head pr_reg_atp_list;
        struct list_head pr_reg_atp_mem_list;
-} ____cacheline_aligned;
+};
 
 /*
  * This set of function pointer ops is set based upon SPC3_PERSISTENT_RESERVATIONS,
@@ -374,20 +472,20 @@ struct t10_reservation {
        struct list_head registration_list;
        struct list_head aptpl_reg_list;
        struct t10_reservation_ops pr_ops;
-} ____cacheline_aligned;
+};
 
 struct se_queue_req {
        int                     state;
        struct se_cmd           *cmd;
        struct list_head        qr_list;
-} ____cacheline_aligned;
+};
 
 struct se_queue_obj {
        atomic_t                queue_cnt;
        spinlock_t              cmd_queue_lock;
        struct list_head        qobj_list;
        wait_queue_head_t       thread_wq;
-} ____cacheline_aligned;
+};
 
 struct se_task {
        unsigned long long      task_lba;
@@ -397,16 +495,14 @@ struct se_task {
        struct scatterlist      *task_sg;
        u32                     task_sg_nents;
        u16                     task_flags;
-       u8                      task_sense;
        u8                      task_scsi_status;
-       int                     task_error_status;
        enum dma_data_direction task_data_direction;
-       atomic_t                task_state_active;
        struct list_head        t_list;
        struct list_head        t_execute_list;
        struct list_head        t_state_list;
+       bool                    t_state_active;
        struct completion       task_stop_comp;
-} ____cacheline_aligned;
+};
 
 struct se_cmd {
        /* SAM response code being sent to initiator */
@@ -451,6 +547,7 @@ struct se_cmd {
        struct list_head        se_queue_node;
        struct list_head        se_cmd_list;
        struct completion       cmd_wait_comp;
+       struct kref             cmd_kref;
        struct target_core_fabric_ops *se_tfo;
        int (*execute_task)(struct se_task *);
        void (*transport_complete_callback)(struct se_cmd *);
@@ -485,6 +582,7 @@ struct se_cmd {
 
        struct scatterlist      *t_data_sg;
        unsigned int            t_data_nents;
+       void                    *t_data_vmap;
        struct scatterlist      *t_bidi_data_sg;
        unsigned int            t_bidi_data_nents;
 
@@ -492,7 +590,7 @@ struct se_cmd {
        struct list_head        t_task_list;
        u32                     t_task_list_num;
 
-} ____cacheline_aligned;
+};
 
 struct se_tmr_req {
        /* Task Management function to be preformed */
@@ -510,7 +608,7 @@ struct se_tmr_req {
        struct se_device        *tmr_dev;
        struct se_lun           *tmr_lun;
        struct list_head        tmr_list;
-} ____cacheline_aligned;
+};
 
 struct se_ua {
        u8                      ua_asc;
@@ -518,7 +616,7 @@ struct se_ua {
        struct se_node_acl      *ua_nacl;
        struct list_head        ua_dev_list;
        struct list_head        ua_nacl_list;
-} ____cacheline_aligned;
+};
 
 struct se_node_acl {
        char                    initiatorname[TRANSPORT_IQN_LEN];
@@ -545,7 +643,7 @@ struct se_node_acl {
        struct config_group     *acl_default_groups[5];
        struct list_head        acl_list;
        struct list_head        acl_sess_list;
-} ____cacheline_aligned;
+};
 
 struct se_session {
        unsigned                sess_tearing_down:1;
@@ -558,7 +656,7 @@ struct se_session {
        struct list_head        sess_cmd_list;
        struct list_head        sess_wait_list;
        spinlock_t              sess_cmd_lock;
-} ____cacheline_aligned;
+};
 
 struct se_device;
 struct se_transform_info;
@@ -578,7 +676,7 @@ struct se_lun_acl {
        struct list_head        lacl_list;
        struct config_group     se_lun_group;
        struct se_ml_stat_grps  ml_stat_grps;
-}  ____cacheline_aligned;
+};
 
 struct se_dev_entry {
        bool                    def_pr_registered;
@@ -603,7 +701,7 @@ struct se_dev_entry {
        struct se_lun           *se_lun;
        struct list_head        alua_port_list;
        struct list_head        ua_list;
-}  ____cacheline_aligned;
+};
 
 struct se_dev_limits {
        /* Max supported HW queue depth */
@@ -612,7 +710,7 @@ struct se_dev_limits {
        u32             queue_depth;
        /* From include/linux/blkdev.h for the other HW/SW limits. */
        struct queue_limits limits;
-} ____cacheline_aligned;
+};
 
 struct se_dev_attrib {
        int             emulate_dpo;
@@ -641,7 +739,7 @@ struct se_dev_attrib {
        u32             unmap_granularity_alignment;
        struct se_subsystem_dev *da_sub_dev;
        struct config_group da_group;
-} ____cacheline_aligned;
+};
 
 struct se_dev_stat_grps {
        struct config_group stat_group;
@@ -674,7 +772,7 @@ struct se_subsystem_dev {
        struct config_group se_dev_pr_group;
        /* For target_core_stat.c groups */
        struct se_dev_stat_grps dev_stat_grps;
-} ____cacheline_aligned;
+};
 
 struct se_device {
        /* RELATIVE TARGET PORT IDENTIFER Counter */
@@ -685,7 +783,6 @@ struct se_device {
        u32                     dev_port_count;
        /* See transport_device_status_table */
        u32                     dev_status;
-       u32                     dev_tcq_window_closed;
        /* Physical device queue depth */
        u32                     queue_depth;
        /* Used for SPC-2 reservations enforce of ISIDs */
@@ -702,7 +799,6 @@ struct se_device {
        spinlock_t              stats_lock;
        /* Active commands on this virtual SE device */
        atomic_t                simple_cmds;
-       atomic_t                depth_left;
        atomic_t                dev_ordered_id;
        atomic_t                execute_tasks;
        atomic_t                dev_ordered_sync;
@@ -740,7 +836,7 @@ struct se_device {
        struct se_subsystem_api *transport;
        /* Linked list for struct se_hba struct se_device list */
        struct list_head        dev_list;
-}  ____cacheline_aligned;
+};
 
 struct se_hba {
        u16                     hba_tpgt;
@@ -759,7 +855,7 @@ struct se_hba {
        struct config_group     hba_group;
        struct mutex            hba_access_mutex;
        struct se_subsystem_api *transport;
-}  ____cacheline_aligned;
+};
 
 struct se_port_stat_grps {
        struct config_group stat_group;
@@ -785,13 +881,13 @@ struct se_lun {
        struct se_port          *lun_sep;
        struct config_group     lun_group;
        struct se_port_stat_grps port_stat_grps;
-} ____cacheline_aligned;
+};
 
 struct scsi_port_stats {
        u64     cmd_pdus;
        u64     tx_data_octets;
        u64     rx_data_octets;
-} ____cacheline_aligned;
+};
 
 struct se_port {
        /* RELATIVE TARGET PORT IDENTIFER */
@@ -811,12 +907,12 @@ struct se_port {
        struct se_portal_group *sep_tpg;
        struct list_head sep_alua_list;
        struct list_head sep_list;
-} ____cacheline_aligned;
+};
 
 struct se_tpg_np {
        struct se_portal_group *tpg_np_parent;
        struct config_group     tpg_np_group;
-} ____cacheline_aligned;
+};
 
 struct se_portal_group {
        /* Type of target portal group, see transport_tpg_type_table */
@@ -849,13 +945,13 @@ struct se_portal_group {
        struct config_group     tpg_acl_group;
        struct config_group     tpg_attrib_group;
        struct config_group     tpg_param_group;
-} ____cacheline_aligned;
+};
 
 struct se_wwn {
        struct target_fabric_configfs *wwn_tf;
        struct config_group     wwn_group;
        struct config_group     *wwn_default_groups[2];
        struct config_group     fabric_stat_group;
-} ____cacheline_aligned;
+};
 
 #endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_device.h b/include/target/target_core_device.h
deleted file mode 100644 (file)
index 2be31ff..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-#ifndef TARGET_CORE_DEVICE_H
-#define TARGET_CORE_DEVICE_H
-
-extern int transport_lookup_cmd_lun(struct se_cmd *, u32);
-extern int transport_lookup_tmr_lun(struct se_cmd *, u32);
-extern struct se_dev_entry *core_get_se_deve_from_rtpi(
-                                       struct se_node_acl *, u16);
-extern int core_free_device_list_for_node(struct se_node_acl *,
-                                       struct se_portal_group *);
-extern void core_dec_lacl_count(struct se_node_acl *, struct se_cmd *);
-extern void core_update_device_list_access(u32, u32, struct se_node_acl *);
-extern int core_update_device_list_for_node(struct se_lun *, struct se_lun_acl *, u32,
-                                       u32, struct se_node_acl *,
-                                       struct se_portal_group *, int);
-extern void core_clear_lun_from_tpg(struct se_lun *, struct se_portal_group *);
-extern int core_dev_export(struct se_device *, struct se_portal_group *,
-                                       struct se_lun *);
-extern void core_dev_unexport(struct se_device *, struct se_portal_group *,
-                                       struct se_lun *);
-extern int target_report_luns(struct se_task *);
-extern void se_release_device_for_hba(struct se_device *);
-extern void se_release_vpd_for_dev(struct se_device *);
-extern void se_clear_dev_ports(struct se_device *);
-extern int se_free_virtual_device(struct se_device *, struct se_hba *);
-extern int se_dev_check_online(struct se_device *);
-extern int se_dev_check_shutdown(struct se_device *);
-extern void se_dev_set_default_attribs(struct se_device *, struct se_dev_limits *);
-extern int se_dev_set_task_timeout(struct se_device *, u32);
-extern int se_dev_set_max_unmap_lba_count(struct se_device *, u32);
-extern int se_dev_set_max_unmap_block_desc_count(struct se_device *, u32);
-extern int se_dev_set_unmap_granularity(struct se_device *, u32);
-extern int se_dev_set_unmap_granularity_alignment(struct se_device *, u32);
-extern int se_dev_set_emulate_dpo(struct se_device *, int);
-extern int se_dev_set_emulate_fua_write(struct se_device *, int);
-extern int se_dev_set_emulate_fua_read(struct se_device *, int);
-extern int se_dev_set_emulate_write_cache(struct se_device *, int);
-extern int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *, int);
-extern int se_dev_set_emulate_tas(struct se_device *, int);
-extern int se_dev_set_emulate_tpu(struct se_device *, int);
-extern int se_dev_set_emulate_tpws(struct se_device *, int);
-extern int se_dev_set_enforce_pr_isids(struct se_device *, int);
-extern int se_dev_set_is_nonrot(struct se_device *, int);
-extern int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
-extern int se_dev_set_queue_depth(struct se_device *, u32);
-extern int se_dev_set_max_sectors(struct se_device *, u32);
-extern int se_dev_set_optimal_sectors(struct se_device *, u32);
-extern int se_dev_set_block_size(struct se_device *, u32);
-extern struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_hba *,
-                                       struct se_device *, u32);
-extern int core_dev_del_lun(struct se_portal_group *, u32);
-extern struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
-extern struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
-                                                       u32, char *, int *);
-extern int core_dev_add_initiator_node_lun_acl(struct se_portal_group *,
-                                               struct se_lun_acl *, u32, u32);
-extern int core_dev_del_initiator_node_lun_acl(struct se_portal_group *,
-                                               struct se_lun *, struct se_lun_acl *);
-extern void core_dev_free_initiator_node_lun_acl(struct se_portal_group *,
-                                               struct se_lun_acl *lacl);
-extern int core_dev_setup_virtual_lun0(void);
-extern void core_dev_release_virtual_lun0(void);
-
-#endif /* TARGET_CORE_DEVICE_H */
similarity index 50%
rename from include/target/target_core_fabric_ops.h
rename to include/target/target_core_fabric.h
index 0256825f923dbd8d5e1824cb45c4af5d1ae3e197..d36fad317e78089ea95e13a9b414558379f6326d 100644 (file)
@@ -1,5 +1,5 @@
-/* Defined in target_core_configfs.h */
-struct target_fabric_configfs;
+#ifndef TARGET_CORE_FABRIC_H
+#define TARGET_CORE_FABRIC_H
 
 struct target_core_fabric_ops {
        struct configfs_subsystem *tf_subsys;
@@ -52,10 +52,6 @@ struct target_core_fabric_ops {
         * Returning 0 will signal a descriptor has not been released.
         */
        int (*check_stop_free)(struct se_cmd *);
-       /*
-        * Optional check for active I/O shutdown
-        */
-       int (*check_release_cmd)(struct se_cmd *);
        void (*release_cmd)(struct se_cmd *);
        /*
         * Called with spin_lock_bh(struct se_portal_group->session_lock held.
@@ -103,3 +99,89 @@ struct target_core_fabric_ops {
                                struct config_group *, const char *);
        void (*fabric_drop_nodeacl)(struct se_node_acl *);
 };
+
+struct se_session *transport_init_session(void);
+void   __transport_register_session(struct se_portal_group *,
+               struct se_node_acl *, struct se_session *, void *);
+void   transport_register_session(struct se_portal_group *,
+               struct se_node_acl *, struct se_session *, void *);
+void   transport_free_session(struct se_session *);
+void   transport_deregister_session_configfs(struct se_session *);
+void   transport_deregister_session(struct se_session *);
+
+
+void   transport_init_se_cmd(struct se_cmd *, struct target_core_fabric_ops *,
+               struct se_session *, u32, int, int, unsigned char *);
+int    transport_lookup_cmd_lun(struct se_cmd *, u32);
+int    transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
+void   target_submit_cmd(struct se_cmd *, struct se_session *, unsigned char *,
+               unsigned char *, u32, u32, int, int, int);
+int    transport_handle_cdb_direct(struct se_cmd *);
+int    transport_generic_handle_cdb_map(struct se_cmd *);
+int    transport_generic_handle_data(struct se_cmd *);
+int    transport_generic_map_mem_to_cmd(struct se_cmd *cmd,
+               struct scatterlist *, u32, struct scatterlist *, u32);
+void   transport_do_task_sg_chain(struct se_cmd *);
+int    transport_generic_new_cmd(struct se_cmd *);
+
+void   transport_generic_process_write(struct se_cmd *);
+
+void   transport_generic_free_cmd(struct se_cmd *, int);
+
+bool   transport_wait_for_tasks(struct se_cmd *);
+int    transport_check_aborted_status(struct se_cmd *, int);
+int    transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
+
+void   target_get_sess_cmd(struct se_session *, struct se_cmd *, bool);
+int    target_put_sess_cmd(struct se_session *, struct se_cmd *);
+void   target_splice_sess_cmd_list(struct se_session *);
+void   target_wait_for_sess_cmds(struct se_session *, int);
+
+int    core_alua_check_nonop_delay(struct se_cmd *);
+
+struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
+void   core_tmr_release_req(struct se_tmr_req *);
+int    transport_generic_handle_tmr(struct se_cmd *);
+int    transport_lookup_tmr_lun(struct se_cmd *, u32);
+
+struct se_node_acl *core_tpg_check_initiator_node_acl(struct se_portal_group *,
+               unsigned char *);
+void   core_tpg_clear_object_luns(struct se_portal_group *);
+struct se_node_acl *core_tpg_add_initiator_node_acl(struct se_portal_group *,
+               struct se_node_acl *, const char *, u32);
+int    core_tpg_del_initiator_node_acl(struct se_portal_group *,
+               struct se_node_acl *, int);
+int    core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
+               unsigned char *, u32, int);
+int    core_tpg_register(struct target_core_fabric_ops *, struct se_wwn *,
+               struct se_portal_group *, void *, int);
+int    core_tpg_deregister(struct se_portal_group *);
+
+/* SAS helpers */
+u8     sas_get_fabric_proto_ident(struct se_portal_group *);
+u32    sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+               struct t10_pr_registration *, int *, unsigned char *);
+u32    sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+               struct t10_pr_registration *, int *);
+char   *sas_parse_pr_out_transport_id(struct se_portal_group *, const char *,
+               u32 *, char **);
+
+/* FC helpers */
+u8     fc_get_fabric_proto_ident(struct se_portal_group *);
+u32    fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+               struct t10_pr_registration *, int *, unsigned char *);
+u32    fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+               struct t10_pr_registration *, int *);
+char   *fc_parse_pr_out_transport_id(struct se_portal_group *, const char *,
+               u32 *, char **);
+
+/* iSCSI helpers */
+u8     iscsi_get_fabric_proto_ident(struct se_portal_group *);
+u32    iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
+               struct t10_pr_registration *, int *, unsigned char *);
+u32    iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
+               struct t10_pr_registration *, int *);
+char   *iscsi_parse_pr_out_transport_id(struct se_portal_group *, const char *,
+               u32 *, char **);
+
+#endif /* TARGET_CORE_FABRICH */
diff --git a/include/target/target_core_fabric_lib.h b/include/target/target_core_fabric_lib.h
deleted file mode 100644 (file)
index c2f8d0e..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-#ifndef TARGET_CORE_FABRIC_LIB_H
-#define TARGET_CORE_FABRIC_LIB_H
-
-extern u8 sas_get_fabric_proto_ident(struct se_portal_group *);
-extern u32 sas_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
-                       struct t10_pr_registration *, int *, unsigned char *);
-extern u32 sas_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
-                       struct t10_pr_registration *, int *);
-extern char *sas_parse_pr_out_transport_id(struct se_portal_group *,
-                       const char *, u32 *, char **);
-
-extern u8 fc_get_fabric_proto_ident(struct se_portal_group *);
-extern u32 fc_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
-                       struct t10_pr_registration *, int *, unsigned char *);
-extern u32 fc_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
-                       struct t10_pr_registration *, int *);
-extern char *fc_parse_pr_out_transport_id(struct se_portal_group *,
-                       const char *, u32 *, char **);
-
-extern u8 iscsi_get_fabric_proto_ident(struct se_portal_group *);
-extern u32 iscsi_get_pr_transport_id(struct se_portal_group *, struct se_node_acl *,
-                       struct t10_pr_registration *, int *, unsigned char *);
-extern u32 iscsi_get_pr_transport_id_len(struct se_portal_group *, struct se_node_acl *,
-                       struct t10_pr_registration *, int *);
-extern char *iscsi_parse_pr_out_transport_id(struct se_portal_group *,
-                       const char *, u32 *, char **);
-
-#endif /* TARGET_CORE_FABRIC_LIB_H */
diff --git a/include/target/target_core_tmr.h b/include/target/target_core_tmr.h
deleted file mode 100644 (file)
index d5876e1..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef TARGET_CORE_TMR_H
-#define TARGET_CORE_TMR_H
-
-/* fabric independent task management function values */
-enum tcm_tmreq_table {
-       TMR_ABORT_TASK          = 1,
-       TMR_ABORT_TASK_SET      = 2,
-       TMR_CLEAR_ACA           = 3,
-       TMR_CLEAR_TASK_SET      = 4,
-       TMR_LUN_RESET           = 5,
-       TMR_TARGET_WARM_RESET   = 6,
-       TMR_TARGET_COLD_RESET   = 7,
-       TMR_FABRIC_TMR          = 255,
-};
-
-/* fabric independent task management response values */
-enum tcm_tmrsp_table {
-       TMR_FUNCTION_COMPLETE           = 0,
-       TMR_TASK_DOES_NOT_EXIST         = 1,
-       TMR_LUN_DOES_NOT_EXIST          = 2,
-       TMR_TASK_STILL_ALLEGIANT        = 3,
-       TMR_TASK_FAILOVER_NOT_SUPPORTED = 4,
-       TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED    = 5,
-       TMR_FUNCTION_AUTHORIZATION_FAILED = 6,
-       TMR_FUNCTION_REJECTED           = 255,
-};
-
-extern struct kmem_cache *se_tmr_req_cache;
-
-extern struct se_tmr_req *core_tmr_alloc_req(struct se_cmd *, void *, u8, gfp_t);
-extern void core_tmr_release_req(struct se_tmr_req *);
-extern int core_tmr_lun_reset(struct se_device *, struct se_tmr_req *,
-                               struct list_head *, struct se_cmd *);
-
-#endif /* TARGET_CORE_TMR_H */
diff --git a/include/target/target_core_tpg.h b/include/target/target_core_tpg.h
deleted file mode 100644 (file)
index 77e1872..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-#ifndef TARGET_CORE_TPG_H
-#define TARGET_CORE_TPG_H
-
-extern struct se_node_acl *__core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
-                                               const char *);
-extern struct se_node_acl *core_tpg_get_initiator_node_acl(struct se_portal_group *tpg,
-                                               unsigned char *);
-extern void core_tpg_add_node_to_devs(struct se_node_acl *,
-                                               struct se_portal_group *);
-extern struct se_node_acl *core_tpg_check_initiator_node_acl(
-                                               struct se_portal_group *,
-                                               unsigned char *);
-extern void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
-extern void core_tpg_wait_for_mib_ref(struct se_node_acl *);
-extern void core_tpg_clear_object_luns(struct se_portal_group *);
-extern struct se_node_acl *core_tpg_add_initiator_node_acl(
-                                       struct se_portal_group *,
-                                       struct se_node_acl *,
-                                       const char *, u32);
-extern int core_tpg_del_initiator_node_acl(struct se_portal_group *,
-                                               struct se_node_acl *, int);
-extern int core_tpg_set_initiator_node_queue_depth(struct se_portal_group *,
-                                               unsigned char *, u32, int);
-extern int core_tpg_register(struct target_core_fabric_ops *,
-                                       struct se_wwn *,
-                                       struct se_portal_group *, void *,
-                                       int);
-extern int core_tpg_deregister(struct se_portal_group *);
-extern struct se_lun *core_tpg_pre_addlun(struct se_portal_group *, u32);
-extern int core_tpg_post_addlun(struct se_portal_group *, struct se_lun *, u32,
-                               void *);
-extern struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32, int *);
-extern int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
-
-#endif /* TARGET_CORE_TPG_H */
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
deleted file mode 100644 (file)
index dac4f2d..0000000
+++ /dev/null
@@ -1,287 +0,0 @@
-#ifndef TARGET_CORE_TRANSPORT_H
-#define TARGET_CORE_TRANSPORT_H
-
-#define TARGET_CORE_VERSION                    TARGET_CORE_MOD_VERSION
-
-/* Attempts before moving from SHORT to LONG */
-#define PYX_TRANSPORT_WINDOW_CLOSED_THRESHOLD  3
-#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_SHORT 3  /* In milliseconds */
-#define PYX_TRANSPORT_WINDOW_CLOSED_WAIT_LONG  10 /* In milliseconds */
-
-#define PYX_TRANSPORT_STATUS_INTERVAL          5 /* In seconds */
-
-#define TRANSPORT_PLUGIN_PHBA_PDEV             1
-#define TRANSPORT_PLUGIN_VHBA_PDEV             2
-#define TRANSPORT_PLUGIN_VHBA_VDEV             3
-
-/*
- * struct se_subsystem_dev->su_dev_flags
-*/
-#define SDF_FIRMWARE_VPD_UNIT_SERIAL           0x00000001
-#define SDF_EMULATED_VPD_UNIT_SERIAL           0x00000002
-#define SDF_USING_UDEV_PATH                    0x00000004
-#define SDF_USING_ALIAS                                0x00000008
-
-/*
- * struct se_device->dev_flags
- */
-#define DF_READ_ONLY                           0x00000001
-#define DF_SPC2_RESERVATIONS                   0x00000002
-#define DF_SPC2_RESERVATIONS_WITH_ISID         0x00000004
-
-/* struct se_dev_attrib sanity values */
-/* Default max_unmap_lba_count */
-#define DA_MAX_UNMAP_LBA_COUNT                 0
-/* Default max_unmap_block_desc_count */
-#define DA_MAX_UNMAP_BLOCK_DESC_COUNT          0
-/* Default unmap_granularity */
-#define DA_UNMAP_GRANULARITY_DEFAULT           0
-/* Default unmap_granularity_alignment */
-#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
-/* Emulation for Direct Page Out */
-#define DA_EMULATE_DPO                         0
-/* Emulation for Forced Unit Access WRITEs */
-#define DA_EMULATE_FUA_WRITE                   1
-/* Emulation for Forced Unit Access READs */
-#define DA_EMULATE_FUA_READ                    0
-/* Emulation for WriteCache and SYNCHRONIZE_CACHE */
-#define DA_EMULATE_WRITE_CACHE                 0
-/* Emulation for UNIT ATTENTION Interlock Control */
-#define DA_EMULATE_UA_INTLLCK_CTRL             0
-/* Emulation for TASK_ABORTED status (TAS) by default */
-#define DA_EMULATE_TAS                         1
-/* Emulation for Thin Provisioning UNMAP using block/blk-lib.c:blkdev_issue_discard() */
-#define DA_EMULATE_TPU                         0
-/*
- * Emulation for Thin Provisioning WRITE_SAME w/ UNMAP=1 bit using
- * block/blk-lib.c:blkdev_issue_discard()
- */
-#define DA_EMULATE_TPWS                                0
-/* No Emulation for PSCSI by default */
-#define DA_EMULATE_RESERVATIONS                        0
-/* No Emulation for PSCSI by default */
-#define DA_EMULATE_ALUA                                0
-/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
-#define DA_ENFORCE_PR_ISIDS                    1
-#define DA_STATUS_MAX_SECTORS_MIN              16
-#define DA_STATUS_MAX_SECTORS_MAX              8192
-/* By default don't report non-rotating (solid state) medium */
-#define DA_IS_NONROT                           0
-/* Queue Algorithm Modifier default for restricted reordering in control mode page */
-#define DA_EMULATE_REST_REORD                  0
-
-#define SE_MODE_PAGE_BUF                       512
-
-#define MOD_MAX_SECTORS(ms, bs)                        (ms % (PAGE_SIZE / bs))
-
-struct se_subsystem_api;
-
-extern int init_se_kmem_caches(void);
-extern void release_se_kmem_caches(void);
-extern u32 scsi_get_new_index(scsi_index_t);
-extern void transport_init_queue_obj(struct se_queue_obj *);
-extern void transport_subsystem_check_init(void);
-extern int transport_subsystem_register(struct se_subsystem_api *);
-extern void transport_subsystem_release(struct se_subsystem_api *);
-extern void transport_load_plugins(void);
-extern struct se_session *transport_init_session(void);
-extern void __transport_register_session(struct se_portal_group *,
-                                       struct se_node_acl *,
-                                       struct se_session *, void *);
-extern void transport_register_session(struct se_portal_group *,
-                                       struct se_node_acl *,
-                                       struct se_session *, void *);
-extern void transport_free_session(struct se_session *);
-extern void transport_deregister_session_configfs(struct se_session *);
-extern void transport_deregister_session(struct se_session *);
-extern void transport_cmd_finish_abort(struct se_cmd *, int);
-extern void transport_complete_sync_cache(struct se_cmd *, int);
-extern void transport_complete_task(struct se_task *, int);
-extern void transport_add_task_to_execute_queue(struct se_task *,
-                                               struct se_task *,
-                                               struct se_device *);
-extern void transport_remove_task_from_execute_queue(struct se_task *,
-                                               struct se_device *);
-extern void __transport_remove_task_from_execute_queue(struct se_task *,
-                                               struct se_device *);
-unsigned char *transport_dump_cmd_direction(struct se_cmd *);
-extern void transport_dump_dev_state(struct se_device *, char *, int *);
-extern void transport_dump_dev_info(struct se_device *, struct se_lun *,
-                                       unsigned long long, char *, int *);
-extern void transport_dump_vpd_proto_id(struct t10_vpd *,
-                                       unsigned char *, int);
-extern void transport_set_vpd_proto_id(struct t10_vpd *, unsigned char *);
-extern int transport_dump_vpd_assoc(struct t10_vpd *,
-                                       unsigned char *, int);
-extern int transport_set_vpd_assoc(struct t10_vpd *, unsigned char *);
-extern int transport_dump_vpd_ident_type(struct t10_vpd *,
-                                       unsigned char *, int);
-extern int transport_set_vpd_ident_type(struct t10_vpd *, unsigned char *);
-extern int transport_dump_vpd_ident(struct t10_vpd *,
-                                       unsigned char *, int);
-extern int transport_set_vpd_ident(struct t10_vpd *, unsigned char *);
-extern struct se_device *transport_add_device_to_core_hba(struct se_hba *,
-                                       struct se_subsystem_api *,
-                                       struct se_subsystem_dev *, u32,
-                                       void *, struct se_dev_limits *,
-                                       const char *, const char *);
-extern void transport_init_se_cmd(struct se_cmd *,
-                                       struct target_core_fabric_ops *,
-                                       struct se_session *, u32, int, int,
-                                       unsigned char *);
-void *transport_kmap_first_data_page(struct se_cmd *cmd);
-void transport_kunmap_first_data_page(struct se_cmd *cmd);
-extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *);
-extern int transport_handle_cdb_direct(struct se_cmd *);
-extern int transport_generic_handle_cdb_map(struct se_cmd *);
-extern int transport_generic_handle_data(struct se_cmd *);
-extern int transport_generic_handle_tmr(struct se_cmd *);
-extern bool target_stop_task(struct se_task *task, unsigned long *flags);
-extern int transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *, u32,
-                               struct scatterlist *, u32);
-extern int transport_clear_lun_from_sessions(struct se_lun *);
-extern bool transport_wait_for_tasks(struct se_cmd *);
-extern int transport_check_aborted_status(struct se_cmd *, int);
-extern int transport_send_check_condition_and_sense(struct se_cmd *, u8, int);
-extern void transport_send_task_abort(struct se_cmd *);
-extern void transport_release_cmd(struct se_cmd *);
-extern void transport_generic_free_cmd(struct se_cmd *, int);
-extern void target_get_sess_cmd(struct se_session *, struct se_cmd *);
-extern int target_put_sess_cmd(struct se_session *, struct se_cmd *);
-extern void target_splice_sess_cmd_list(struct se_session *);
-extern void target_wait_for_sess_cmds(struct se_session *, int);
-extern void transport_generic_wait_for_cmds(struct se_cmd *, int);
-extern void transport_do_task_sg_chain(struct se_cmd *);
-extern void transport_generic_process_write(struct se_cmd *);
-extern int transport_generic_new_cmd(struct se_cmd *);
-extern int transport_generic_do_tmr(struct se_cmd *);
-/* From target_core_alua.c */
-extern int core_alua_check_nonop_delay(struct se_cmd *);
-/* From target_core_cdb.c */
-extern int transport_emulate_control_cdb(struct se_task *);
-extern void target_get_task_cdb(struct se_task *task, unsigned char *cdb);
-
-/*
- * Each se_transport_task_t can have N number of possible struct se_task's
- * for the storage transport(s) to possibly execute.
- * Used primarily for splitting up CDBs that exceed the physical storage
- * HBA's maximum sector count per task.
- */
-struct se_mem {
-       struct page     *se_page;
-       u32             se_len;
-       u32             se_off;
-       struct list_head se_list;
-} ____cacheline_aligned;
-
-/*
- *     Each type of disk transport supported MUST have a template defined
- *     within its .h file.
- */
-struct se_subsystem_api {
-       /*
-        * The Name. :-)
-        */
-       char name[16];
-       /*
-        * Transport Type.
-        */
-       u8 transport_type;
-
-       unsigned int fua_write_emulated : 1;
-       unsigned int write_cache_emulated : 1;
-
-       /*
-        * struct module for struct se_hba references
-        */
-       struct module *owner;
-       /*
-        * Used for global se_subsystem_api list_head
-        */
-       struct list_head sub_api_list;
-       /*
-        * attach_hba():
-        */
-       int (*attach_hba)(struct se_hba *, u32);
-       /*
-        * detach_hba():
-        */
-       void (*detach_hba)(struct se_hba *);
-       /*
-        * pmode_hba(): Used for TCM/pSCSI subsystem plugin HBA ->
-        *              Linux/SCSI struct Scsi_Host passthrough
-       */
-       int (*pmode_enable_hba)(struct se_hba *, unsigned long);
-       /*
-        * allocate_virtdevice():
-        */
-       void *(*allocate_virtdevice)(struct se_hba *, const char *);
-       /*
-        * create_virtdevice(): Only for Virtual HBAs
-        */
-       struct se_device *(*create_virtdevice)(struct se_hba *,
-                               struct se_subsystem_dev *, void *);
-       /*
-        * free_device():
-        */
-       void (*free_device)(void *);
-
-       /*
-        * transport_complete():
-        *
-        * Use transport_generic_complete() for majority of DAS transport
-        * drivers.  Provided out of convenience.
-        */
-       int (*transport_complete)(struct se_task *task);
-       struct se_task *(*alloc_task)(unsigned char *cdb);
-       /*
-        * do_task():
-        */
-       int (*do_task)(struct se_task *);
-       /*
-        * Used by virtual subsystem plugins IBLOCK and FILEIO to emulate
-        * UNMAP and WRITE_SAME_* w/ UNMAP=1 <-> Linux/Block Discard
-        */
-       int (*do_discard)(struct se_device *, sector_t, u32);
-       /*
-        * Used  by virtual subsystem plugins IBLOCK and FILEIO to emulate
-        * SYNCHRONIZE_CACHE_* <-> Linux/Block blkdev_issue_flush()
-        */
-       void (*do_sync_cache)(struct se_task *);
-       /*
-        * free_task():
-        */
-       void (*free_task)(struct se_task *);
-       /*
-        * check_configfs_dev_params():
-        */
-       ssize_t (*check_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *);
-       /*
-        * set_configfs_dev_params():
-        */
-       ssize_t (*set_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
-                                               const char *, ssize_t);
-       /*
-        * show_configfs_dev_params():
-        */
-       ssize_t (*show_configfs_dev_params)(struct se_hba *, struct se_subsystem_dev *,
-                                               char *);
-       /*
-        * get_device_rev():
-        */
-       u32 (*get_device_rev)(struct se_device *);
-       /*
-        * get_device_type():
-        */
-       u32 (*get_device_type)(struct se_device *);
-       /*
-        * Get the sector_t from a subsystem backstore..
-        */
-       sector_t (*get_blocks)(struct se_device *);
-       /*
-        * get_sense_buffer():
-        */
-       unsigned char *(*get_sense_buffer)(struct se_task *);
-} ____cacheline_aligned;
-
-#endif /* TARGET_CORE_TRANSPORT_H */
index b31702ac15beb3f31ea7f2b5c1c36dd2b64b2f70..84f3001a568d9edf435c0465b7d421bbc74e858c 100644 (file)
@@ -16,6 +16,8 @@ struct btrfs_delayed_ref_node;
 struct btrfs_delayed_tree_ref;
 struct btrfs_delayed_data_ref;
 struct btrfs_delayed_ref_head;
+struct btrfs_block_group_cache;
+struct btrfs_free_cluster;
 struct map_lookup;
 struct extent_buffer;
 
@@ -44,6 +46,17 @@ struct extent_buffer;
        obj, ((obj >= BTRFS_DATA_RELOC_TREE_OBJECTID) ||                \
              (obj <= BTRFS_CSUM_TREE_OBJECTID )) ? __show_root_type(obj) : "-"
 
+#define BTRFS_GROUP_FLAGS      \
+       { BTRFS_BLOCK_GROUP_DATA,       "DATA"}, \
+       { BTRFS_BLOCK_GROUP_SYSTEM,     "SYSTEM"}, \
+       { BTRFS_BLOCK_GROUP_METADATA,   "METADATA"}, \
+       { BTRFS_BLOCK_GROUP_RAID0,      "RAID0"}, \
+       { BTRFS_BLOCK_GROUP_RAID1,      "RAID1"}, \
+       { BTRFS_BLOCK_GROUP_DUP,        "DUP"}, \
+       { BTRFS_BLOCK_GROUP_RAID10,     "RAID10"}
+
+#define BTRFS_UUID_SIZE 16
+
 TRACE_EVENT(btrfs_transaction_commit,
 
        TP_PROTO(struct btrfs_root *root),
@@ -621,6 +634,34 @@ TRACE_EVENT(btrfs_cow_block,
                  __entry->cow_level)
 );
 
+TRACE_EVENT(btrfs_space_reservation,
+
+       TP_PROTO(struct btrfs_fs_info *fs_info, char *type, u64 val,
+                u64 bytes, int reserve),
+
+       TP_ARGS(fs_info, type, val, bytes, reserve),
+
+       TP_STRUCT__entry(
+               __array(        u8,     fsid,   BTRFS_UUID_SIZE )
+               __string(       type,   type                    )
+               __field(        u64,    val                     )
+               __field(        u64,    bytes                   )
+               __field(        int,    reserve                 )
+       ),
+
+       TP_fast_assign(
+               memcpy(__entry->fsid, fs_info->fsid, BTRFS_UUID_SIZE);
+               __assign_str(type, type);
+               __entry->val            = val;
+               __entry->bytes          = bytes;
+               __entry->reserve        = reserve;
+       ),
+
+       TP_printk("%pU: %s: %Lu %s %Lu", __entry->fsid, __get_str(type),
+                 __entry->val, __entry->reserve ? "reserve" : "release",
+                 __entry->bytes)
+);
+
 DECLARE_EVENT_CLASS(btrfs__reserved_extent,
 
        TP_PROTO(struct btrfs_root *root, u64 start, u64 len),
@@ -659,6 +700,168 @@ DEFINE_EVENT(btrfs__reserved_extent,  btrfs_reserved_extent_free,
        TP_ARGS(root, start, len)
 );
 
+TRACE_EVENT(find_free_extent,
+
+       TP_PROTO(struct btrfs_root *root, u64 num_bytes, u64 empty_size,
+                u64 data),
+
+       TP_ARGS(root, num_bytes, empty_size, data),
+
+       TP_STRUCT__entry(
+               __field(        u64,    root_objectid           )
+               __field(        u64,    num_bytes               )
+               __field(        u64,    empty_size              )
+               __field(        u64,    data                    )
+       ),
+
+       TP_fast_assign(
+               __entry->root_objectid  = root->root_key.objectid;
+               __entry->num_bytes      = num_bytes;
+               __entry->empty_size     = empty_size;
+               __entry->data           = data;
+       ),
+
+       TP_printk("root = %Lu(%s), len = %Lu, empty_size = %Lu, "
+                 "flags = %Lu(%s)", show_root_type(__entry->root_objectid),
+                 __entry->num_bytes, __entry->empty_size, __entry->data,
+                 __print_flags((unsigned long)__entry->data, "|",
+                                BTRFS_GROUP_FLAGS))
+);
+
+DECLARE_EVENT_CLASS(btrfs__reserve_extent,
+
+       TP_PROTO(struct btrfs_root *root,
+                struct btrfs_block_group_cache *block_group, u64 start,
+                u64 len),
+
+       TP_ARGS(root, block_group, start, len),
+
+       TP_STRUCT__entry(
+               __field(        u64,    root_objectid           )
+               __field(        u64,    bg_objectid             )
+               __field(        u64,    flags                   )
+               __field(        u64,    start                   )
+               __field(        u64,    len                     )
+       ),
+
+       TP_fast_assign(
+               __entry->root_objectid  = root->root_key.objectid;
+               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->flags          = block_group->flags;
+               __entry->start          = start;
+               __entry->len            = len;
+       ),
+
+       TP_printk("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
+                 "start = %Lu, len = %Lu",
+                 show_root_type(__entry->root_objectid), __entry->bg_objectid,
+                 __entry->flags, __print_flags((unsigned long)__entry->flags,
+                                               "|", BTRFS_GROUP_FLAGS),
+                 __entry->start, __entry->len)
+);
+
+DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
+
+       TP_PROTO(struct btrfs_root *root,
+                struct btrfs_block_group_cache *block_group, u64 start,
+                u64 len),
+
+       TP_ARGS(root, block_group, start, len)
+);
+
+DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
+
+       TP_PROTO(struct btrfs_root *root,
+                struct btrfs_block_group_cache *block_group, u64 start,
+                u64 len),
+
+       TP_ARGS(root, block_group, start, len)
+);
+
+TRACE_EVENT(btrfs_find_cluster,
+
+       TP_PROTO(struct btrfs_block_group_cache *block_group, u64 start,
+                u64 bytes, u64 empty_size, u64 min_bytes),
+
+       TP_ARGS(block_group, start, bytes, empty_size, min_bytes),
+
+       TP_STRUCT__entry(
+               __field(        u64,    bg_objectid             )
+               __field(        u64,    flags                   )
+               __field(        u64,    start                   )
+               __field(        u64,    bytes                   )
+               __field(        u64,    empty_size              )
+               __field(        u64,    min_bytes               )
+       ),
+
+       TP_fast_assign(
+               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->flags          = block_group->flags;
+               __entry->start          = start;
+               __entry->bytes          = bytes;
+               __entry->empty_size     = empty_size;
+               __entry->min_bytes      = min_bytes;
+       ),
+
+       TP_printk("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
+                 " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
+                 __entry->flags,
+                 __print_flags((unsigned long)__entry->flags, "|",
+                               BTRFS_GROUP_FLAGS), __entry->start,
+                 __entry->bytes, __entry->empty_size,  __entry->min_bytes)
+);
+
+TRACE_EVENT(btrfs_failed_cluster_setup,
+
+       TP_PROTO(struct btrfs_block_group_cache *block_group),
+
+       TP_ARGS(block_group),
+
+       TP_STRUCT__entry(
+               __field(        u64,    bg_objectid             )
+       ),
+
+       TP_fast_assign(
+               __entry->bg_objectid    = block_group->key.objectid;
+       ),
+
+       TP_printk("block_group = %Lu", __entry->bg_objectid)
+);
+
+TRACE_EVENT(btrfs_setup_cluster,
+
+       TP_PROTO(struct btrfs_block_group_cache *block_group,
+                struct btrfs_free_cluster *cluster, u64 size, int bitmap),
+
+       TP_ARGS(block_group, cluster, size, bitmap),
+
+       TP_STRUCT__entry(
+               __field(        u64,    bg_objectid             )
+               __field(        u64,    flags                   )
+               __field(        u64,    start                   )
+               __field(        u64,    max_size                )
+               __field(        u64,    size                    )
+               __field(        int,    bitmap                  )
+       ),
+
+       TP_fast_assign(
+               __entry->bg_objectid    = block_group->key.objectid;
+               __entry->flags          = block_group->flags;
+               __entry->start          = cluster->window_start;
+               __entry->max_size       = cluster->max_size;
+               __entry->size           = size;
+               __entry->bitmap         = bitmap;
+       ),
+
+       TP_printk("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
+                 "size = %Lu, max_size = %Lu, bitmap = %d",
+                 __entry->bg_objectid,
+                 __entry->flags,
+                 __print_flags((unsigned long)__entry->flags, "|",
+                               BTRFS_GROUP_FLAGS), __entry->start,
+                 __entry->size, __entry->max_size, __entry->bitmap)
+);
+
 #endif /* _TRACE_BTRFS_H */
 
 /* This part must be outside protection */
index 062b3b24ff1071047f6c7fe26c80d19c112fe311..483f67caa7ad42b0b360e99a3df2c1502dc1dae1 100644 (file)
@@ -590,6 +590,11 @@ struct omap_dss_device {
        int (*get_backlight)(struct omap_dss_device *dssdev);
 };
 
+struct omap_dss_hdmi_data
+{
+       int hpd_gpio;
+};
+
 struct omap_dss_driver {
        struct device_driver driver;
 
index 6ac2236244c381f3bef900c600d1f9c1ba41c7f5..3f42cd66f0f87a25510dd000aed37de5c0d5cc71 100644 (file)
@@ -355,7 +355,7 @@ config AUDIT
 
 config AUDITSYSCALL
        bool "Enable system-call auditing support"
-       depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH)
+       depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || ARM)
        default y if SECURITY_SELINUX
        help
          Enable low-overhead system-call auditing infrastructure that
@@ -372,6 +372,20 @@ config AUDIT_TREE
        depends on AUDITSYSCALL
        select FSNOTIFY
 
+config AUDIT_LOGINUID_IMMUTABLE
+       bool "Make audit loginuid immutable"
+       depends on AUDIT
+       help
+         The config option toggles if a task setting its loginuid requires
+         CAP_SYS_AUDITCONTROL or if that task should require no special permissions
+         but should instead only allow setting its loginuid if it was never
+         previously set.  On systems which use systemd or a similar central
+         process to restart login services this should be set to true.  On older
+         systems in which an admin would typically have to directly stop and
+         start processes this should be set to false.  Setting this to true allows
+         one to drop potentially dangerous capabilites from the login tasks,
+         but may not be backwards compatible with older init systems.
+
 source "kernel/irq/Kconfig"
 
 menu "RCU Subsystem"
index 9b7c8ab7d75cad27e92272c0a0c81927c9fbaa03..86ee272de210bbae8236d45a4cebda3021afe2e9 100644 (file)
@@ -128,7 +128,6 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
 
        if (S_ISREG(mode)) {
                struct mqueue_inode_info *info;
-               struct task_struct *p = current;
                unsigned long mq_bytes, mq_msg_tblsz;
 
                inode->i_fop = &mqueue_file_operations;
@@ -159,7 +158,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
 
                spin_lock(&mq_lock);
                if (u->mq_bytes + mq_bytes < u->mq_bytes ||
-                   u->mq_bytes + mq_bytes > task_rlimit(p, RLIMIT_MSGQUEUE)) {
+                   u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
                        spin_unlock(&mq_lock);
                        /* mqueue_evict_inode() releases info->messages */
                        ret = -EMFILE;
index 02ecf2c078fce9b62ee2b1ec0ef252c0320acdf5..b76be5bda6c2f10fd98019bea5730c24b7614e98 100644 (file)
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -870,9 +870,7 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
        case SHM_LOCK:
        case SHM_UNLOCK:
        {
-               struct file *uninitialized_var(shm_file);
-
-               lru_add_drain_all();  /* drain pagevecs to lru lists */
+               struct file *shm_file;
 
                shp = shm_lock_check(ns, shmid);
                if (IS_ERR(shp)) {
@@ -895,22 +893,31 @@ SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf)
                err = security_shm_shmctl(shp, cmd);
                if (err)
                        goto out_unlock;
-               
-               if(cmd==SHM_LOCK) {
+
+               shm_file = shp->shm_file;
+               if (is_file_hugepages(shm_file))
+                       goto out_unlock;
+
+               if (cmd == SHM_LOCK) {
                        struct user_struct *user = current_user();
-                       if (!is_file_hugepages(shp->shm_file)) {
-                               err = shmem_lock(shp->shm_file, 1, user);
-                               if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){
-                                       shp->shm_perm.mode |= SHM_LOCKED;
-                                       shp->mlock_user = user;
-                               }
+                       err = shmem_lock(shm_file, 1, user);
+                       if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) {
+                               shp->shm_perm.mode |= SHM_LOCKED;
+                               shp->mlock_user = user;
                        }
-               } else if (!is_file_hugepages(shp->shm_file)) {
-                       shmem_lock(shp->shm_file, 0, shp->mlock_user);
-                       shp->shm_perm.mode &= ~SHM_LOCKED;
-                       shp->mlock_user = NULL;
+                       goto out_unlock;
                }
+
+               /* SHM_UNLOCK */
+               if (!(shp->shm_perm.mode & SHM_LOCKED))
+                       goto out_unlock;
+               shmem_lock(shm_file, 0, shp->mlock_user);
+               shp->shm_perm.mode &= ~SHM_LOCKED;
+               shp->mlock_user = NULL;
+               get_file(shm_file);
                shm_unlock(shp);
+               shmem_unlock_mapping(shm_file->f_mapping);
+               fput(shm_file);
                goto out;
        }
        case IPC_RMID:
index f70396e5a24b8cef95d994ed612aca85beaaac92..2d9de86b7e767f518e59183a98bf4c04e0038813 100644 (file)
@@ -23,6 +23,7 @@ CFLAGS_REMOVE_irq_work.o = -pg
 endif
 
 obj-y += sched/
+obj-y += power/
 
 obj-$(CONFIG_FREEZER) += freezer.o
 obj-$(CONFIG_PROFILING) += profile.o
@@ -52,8 +53,6 @@ obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
 obj-$(CONFIG_UID16) += uid16.o
 obj-$(CONFIG_MODULES) += module.o
 obj-$(CONFIG_KALLSYMS) += kallsyms.o
-obj-$(CONFIG_PM) += power/
-obj-$(CONFIG_FREEZER) += power/
 obj-$(CONFIG_BSD_PROCESS_ACCT) += acct.o
 obj-$(CONFIG_KEXEC) += kexec.o
 obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
index 57e3f5107937f89951be2482040e294bee659bf5..bb0eb5bb9a0a8761286dfc29cd1aa5b8587e2801 100644 (file)
@@ -631,7 +631,7 @@ static int audit_log_common_recv_msg(struct audit_buffer **ab, u16 msg_type,
        }
 
        *ab = audit_log_start(NULL, GFP_KERNEL, msg_type);
-       audit_log_format(*ab, "user pid=%d uid=%u auid=%u ses=%u",
+       audit_log_format(*ab, "pid=%d uid=%u auid=%u ses=%u",
                         pid, uid, auid, ses);
        if (sid) {
                rc = security_secid_to_secctx(sid, &ctx, &len);
@@ -1423,7 +1423,7 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
        char *p, *pathname;
 
        if (prefix)
-               audit_log_format(ab, " %s", prefix);
+               audit_log_format(ab, "%s", prefix);
 
        /* We will allow 11 spaces for ' (deleted)' to be appended */
        pathname = kmalloc(PATH_MAX+11, ab->gfp_mask);
index 91e7071c4d2c4f06376e7ad5c485a2aa98294edd..81676680337158e20ce6077a522b64cdfa15f59f 100644 (file)
@@ -36,12 +36,8 @@ enum audit_state {
        AUDIT_DISABLED,         /* Do not create per-task audit_context.
                                 * No syscall-specific audit records can
                                 * be generated. */
-       AUDIT_SETUP_CONTEXT,    /* Create the per-task audit_context,
-                                * but don't necessarily fill it in at
-                                * syscall entry time (i.e., filter
-                                * instead). */
        AUDIT_BUILD_CONTEXT,    /* Create the per-task audit_context,
-                                * and always fill it in at syscall
+                                * and fill it in at syscall
                                 * entry time.  This makes a full
                                 * syscall record available if some
                                 * other part of the kernel decides it
index f8277c80d678bfeaefb74ad02805b0a4a2cebaa2..a6c3f1abd206c9d9736cbe5834483e36fd1d62ff 100644 (file)
@@ -235,13 +235,15 @@ static inline struct audit_entry *audit_to_entry_common(struct audit_rule *rule)
        switch(listnr) {
        default:
                goto exit_err;
-       case AUDIT_FILTER_USER:
-       case AUDIT_FILTER_TYPE:
 #ifdef CONFIG_AUDITSYSCALL
        case AUDIT_FILTER_ENTRY:
+               if (rule->action == AUDIT_ALWAYS)
+                       goto exit_err;
        case AUDIT_FILTER_EXIT:
        case AUDIT_FILTER_TASK:
 #endif
+       case AUDIT_FILTER_USER:
+       case AUDIT_FILTER_TYPE:
                ;
        }
        if (unlikely(rule->action == AUDIT_POSSIBLE)) {
@@ -385,7 +387,7 @@ static struct audit_entry *audit_rule_to_entry(struct audit_rule *rule)
                                goto exit_free;
                        break;
                case AUDIT_FILETYPE:
-                       if ((f->val & ~S_IFMT) > S_IFMT)
+                       if (f->val & ~S_IFMT)
                                goto exit_free;
                        break;
                case AUDIT_INODE:
@@ -459,6 +461,8 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                case AUDIT_ARG1:
                case AUDIT_ARG2:
                case AUDIT_ARG3:
+               case AUDIT_OBJ_UID:
+               case AUDIT_OBJ_GID:
                        break;
                case AUDIT_ARCH:
                        entry->rule.arch_f = f;
@@ -522,7 +526,6 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                                goto exit_free;
                        break;
                case AUDIT_FILTERKEY:
-                       err = -EINVAL;
                        if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
                                goto exit_free;
                        str = audit_unpack_string(&bufp, &remain, f->val);
@@ -536,7 +539,11 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
                                goto exit_free;
                        break;
                case AUDIT_FILETYPE:
-                       if ((f->val & ~S_IFMT) > S_IFMT)
+                       if (f->val & ~S_IFMT)
+                               goto exit_free;
+                       break;
+               case AUDIT_FIELD_COMPARE:
+                       if (f->val > AUDIT_MAX_FIELD_COMPARE)
                                goto exit_free;
                        break;
                default:
index e7fe2b0d29b3cd0676caccc6af116a76afee947e..af1de0f34eaed8dbf3dfb0057cd5c200da70b47a 100644 (file)
 
 #include "audit.h"
 
+/* flags stating the success for a syscall */
+#define AUDITSC_INVALID 0
+#define AUDITSC_SUCCESS 1
+#define AUDITSC_FAILURE 2
+
 /* AUDIT_NAMES is the number of slots we reserve in the audit_context
- * for saving names from getname(). */
-#define AUDIT_NAMES    20
+ * for saving names from getname().  If we get more names we will allocate
+ * a name dynamically and also add those to the list anchored by names_list. */
+#define AUDIT_NAMES    5
 
 /* Indicates that audit should log the full pathname. */
 #define AUDIT_NAME_FULL -1
@@ -101,9 +107,8 @@ struct audit_cap_data {
  *
  * Further, in fs/namei.c:path_lookup() we store the inode and device. */
 struct audit_names {
+       struct list_head list;          /* audit_context->names_list */
        const char      *name;
-       int             name_len;       /* number of name's characters to log */
-       unsigned        name_put;       /* call __putname() for this name */
        unsigned long   ino;
        dev_t           dev;
        umode_t         mode;
@@ -113,6 +118,14 @@ struct audit_names {
        u32             osid;
        struct audit_cap_data fcap;
        unsigned int    fcap_ver;
+       int             name_len;       /* number of name's characters to log */
+       bool            name_put;       /* call __putname() for this name */
+       /*
+        * This was an allocated audit_names and not from the array of
+        * names allocated in the task audit context.  Thus this name
+        * should be freed on syscall exit
+        */
+       bool            should_free;
 };
 
 struct audit_aux_data {
@@ -174,8 +187,17 @@ struct audit_context {
        long                return_code;/* syscall return code */
        u64                 prio;
        int                 return_valid; /* return code is valid */
-       int                 name_count;
-       struct audit_names  names[AUDIT_NAMES];
+       /*
+        * The names_list is the list of all audit_names collected during this
+        * syscall.  The first AUDIT_NAMES entries in the names_list will
+        * actually be from the preallocated_names array for performance
+        * reasons.  Except during allocation they should never be referenced
+        * through the preallocated_names array and should only be found/used
+        * by running the names_list.
+        */
+       struct audit_names  preallocated_names[AUDIT_NAMES];
+       int                 name_count; /* total records in names_list */
+       struct list_head    names_list; /* anchor for struct audit_names->list */
        char *              filterkey;  /* key for rule that triggered record */
        struct path         pwd;
        struct audit_context *previous; /* For nested syscalls */
@@ -305,21 +327,21 @@ static int audit_match_perm(struct audit_context *ctx, int mask)
        }
 }
 
-static int audit_match_filetype(struct audit_context *ctx, int which)
+static int audit_match_filetype(struct audit_context *ctx, int val)
 {
-       unsigned index = which & ~S_IFMT;
-       umode_t mode = which & S_IFMT;
+       struct audit_names *n;
+       umode_t mode = (umode_t)val;
 
        if (unlikely(!ctx))
                return 0;
 
-       if (index >= ctx->name_count)
-               return 0;
-       if (ctx->names[index].ino == -1)
-               return 0;
-       if ((ctx->names[index].mode ^ mode) & S_IFMT)
-               return 0;
-       return 1;
+       list_for_each_entry(n, &ctx->names_list, list) {
+               if ((n->ino != -1) &&
+                   ((n->mode & S_IFMT) == mode))
+                       return 1;
+       }
+
+       return 0;
 }
 
 /*
@@ -441,6 +463,134 @@ static int match_tree_refs(struct audit_context *ctx, struct audit_tree *tree)
        return 0;
 }
 
+static int audit_compare_id(uid_t uid1,
+                           struct audit_names *name,
+                           unsigned long name_offset,
+                           struct audit_field *f,
+                           struct audit_context *ctx)
+{
+       struct audit_names *n;
+       unsigned long addr;
+       uid_t uid2;
+       int rc;
+
+       BUILD_BUG_ON(sizeof(uid_t) != sizeof(gid_t));
+
+       if (name) {
+               addr = (unsigned long)name;
+               addr += name_offset;
+
+               uid2 = *(uid_t *)addr;
+               rc = audit_comparator(uid1, f->op, uid2);
+               if (rc)
+                       return rc;
+       }
+
+       if (ctx) {
+               list_for_each_entry(n, &ctx->names_list, list) {
+                       addr = (unsigned long)n;
+                       addr += name_offset;
+
+                       uid2 = *(uid_t *)addr;
+
+                       rc = audit_comparator(uid1, f->op, uid2);
+                       if (rc)
+                               return rc;
+               }
+       }
+       return 0;
+}
+
+static int audit_field_compare(struct task_struct *tsk,
+                              const struct cred *cred,
+                              struct audit_field *f,
+                              struct audit_context *ctx,
+                              struct audit_names *name)
+{
+       switch (f->val) {
+       /* process to file object comparisons */
+       case AUDIT_COMPARE_UID_TO_OBJ_UID:
+               return audit_compare_id(cred->uid,
+                                       name, offsetof(struct audit_names, uid),
+                                       f, ctx);
+       case AUDIT_COMPARE_GID_TO_OBJ_GID:
+               return audit_compare_id(cred->gid,
+                                       name, offsetof(struct audit_names, gid),
+                                       f, ctx);
+       case AUDIT_COMPARE_EUID_TO_OBJ_UID:
+               return audit_compare_id(cred->euid,
+                                       name, offsetof(struct audit_names, uid),
+                                       f, ctx);
+       case AUDIT_COMPARE_EGID_TO_OBJ_GID:
+               return audit_compare_id(cred->egid,
+                                       name, offsetof(struct audit_names, gid),
+                                       f, ctx);
+       case AUDIT_COMPARE_AUID_TO_OBJ_UID:
+               return audit_compare_id(tsk->loginuid,
+                                       name, offsetof(struct audit_names, uid),
+                                       f, ctx);
+       case AUDIT_COMPARE_SUID_TO_OBJ_UID:
+               return audit_compare_id(cred->suid,
+                                       name, offsetof(struct audit_names, uid),
+                                       f, ctx);
+       case AUDIT_COMPARE_SGID_TO_OBJ_GID:
+               return audit_compare_id(cred->sgid,
+                                       name, offsetof(struct audit_names, gid),
+                                       f, ctx);
+       case AUDIT_COMPARE_FSUID_TO_OBJ_UID:
+               return audit_compare_id(cred->fsuid,
+                                       name, offsetof(struct audit_names, uid),
+                                       f, ctx);
+       case AUDIT_COMPARE_FSGID_TO_OBJ_GID:
+               return audit_compare_id(cred->fsgid,
+                                       name, offsetof(struct audit_names, gid),
+                                       f, ctx);
+       /* uid comparisons */
+       case AUDIT_COMPARE_UID_TO_AUID:
+               return audit_comparator(cred->uid, f->op, tsk->loginuid);
+       case AUDIT_COMPARE_UID_TO_EUID:
+               return audit_comparator(cred->uid, f->op, cred->euid);
+       case AUDIT_COMPARE_UID_TO_SUID:
+               return audit_comparator(cred->uid, f->op, cred->suid);
+       case AUDIT_COMPARE_UID_TO_FSUID:
+               return audit_comparator(cred->uid, f->op, cred->fsuid);
+       /* auid comparisons */
+       case AUDIT_COMPARE_AUID_TO_EUID:
+               return audit_comparator(tsk->loginuid, f->op, cred->euid);
+       case AUDIT_COMPARE_AUID_TO_SUID:
+               return audit_comparator(tsk->loginuid, f->op, cred->suid);
+       case AUDIT_COMPARE_AUID_TO_FSUID:
+               return audit_comparator(tsk->loginuid, f->op, cred->fsuid);
+       /* euid comparisons */
+       case AUDIT_COMPARE_EUID_TO_SUID:
+               return audit_comparator(cred->euid, f->op, cred->suid);
+       case AUDIT_COMPARE_EUID_TO_FSUID:
+               return audit_comparator(cred->euid, f->op, cred->fsuid);
+       /* suid comparisons */
+       case AUDIT_COMPARE_SUID_TO_FSUID:
+               return audit_comparator(cred->suid, f->op, cred->fsuid);
+       /* gid comparisons */
+       case AUDIT_COMPARE_GID_TO_EGID:
+               return audit_comparator(cred->gid, f->op, cred->egid);
+       case AUDIT_COMPARE_GID_TO_SGID:
+               return audit_comparator(cred->gid, f->op, cred->sgid);
+       case AUDIT_COMPARE_GID_TO_FSGID:
+               return audit_comparator(cred->gid, f->op, cred->fsgid);
+       /* egid comparisons */
+       case AUDIT_COMPARE_EGID_TO_SGID:
+               return audit_comparator(cred->egid, f->op, cred->sgid);
+       case AUDIT_COMPARE_EGID_TO_FSGID:
+               return audit_comparator(cred->egid, f->op, cred->fsgid);
+       /* sgid comparison */
+       case AUDIT_COMPARE_SGID_TO_FSGID:
+               return audit_comparator(cred->sgid, f->op, cred->fsgid);
+       default:
+               WARN(1, "Missing AUDIT_COMPARE define.  Report as a bug\n");
+               return 0;
+       }
+       return 0;
+}
+
 /* Determine if any context name data matches a rule's watch data */
 /* Compare a task_struct with an audit_rule.  Return 1 on match, 0
  * otherwise.
@@ -457,13 +607,14 @@ static int audit_filter_rules(struct task_struct *tsk,
                              bool task_creation)
 {
        const struct cred *cred;
-       int i, j, need_sid = 1;
+       int i, need_sid = 1;
        u32 sid;
 
        cred = rcu_dereference_check(tsk->cred, tsk == current || task_creation);
 
        for (i = 0; i < rule->field_count; i++) {
                struct audit_field *f = &rule->fields[i];
+               struct audit_names *n;
                int result = 0;
 
                switch (f->type) {
@@ -522,12 +673,14 @@ static int audit_filter_rules(struct task_struct *tsk,
                        }
                        break;
                case AUDIT_DEVMAJOR:
-                       if (name)
-                               result = audit_comparator(MAJOR(name->dev),
-                                                         f->op, f->val);
-                       else if (ctx) {
-                               for (j = 0; j < ctx->name_count; j++) {
-                                       if (audit_comparator(MAJOR(ctx->names[j].dev),  f->op, f->val)) {
+                       if (name) {
+                               if (audit_comparator(MAJOR(name->dev), f->op, f->val) ||
+                                   audit_comparator(MAJOR(name->rdev), f->op, f->val))
+                                       ++result;
+                       } else if (ctx) {
+                               list_for_each_entry(n, &ctx->names_list, list) {
+                                       if (audit_comparator(MAJOR(n->dev), f->op, f->val) ||
+                                           audit_comparator(MAJOR(n->rdev), f->op, f->val)) {
                                                ++result;
                                                break;
                                        }
@@ -535,12 +688,14 @@ static int audit_filter_rules(struct task_struct *tsk,
                        }
                        break;
                case AUDIT_DEVMINOR:
-                       if (name)
-                               result = audit_comparator(MINOR(name->dev),
-                                                         f->op, f->val);
-                       else if (ctx) {
-                               for (j = 0; j < ctx->name_count; j++) {
-                                       if (audit_comparator(MINOR(ctx->names[j].dev), f->op, f->val)) {
+                       if (name) {
+                               if (audit_comparator(MINOR(name->dev), f->op, f->val) ||
+                                   audit_comparator(MINOR(name->rdev), f->op, f->val))
+                                       ++result;
+                       } else if (ctx) {
+                               list_for_each_entry(n, &ctx->names_list, list) {
+                                       if (audit_comparator(MINOR(n->dev), f->op, f->val) ||
+                                           audit_comparator(MINOR(n->rdev), f->op, f->val)) {
                                                ++result;
                                                break;
                                        }
@@ -551,8 +706,32 @@ static int audit_filter_rules(struct task_struct *tsk,
                        if (name)
                                result = (name->ino == f->val);
                        else if (ctx) {
-                               for (j = 0; j < ctx->name_count; j++) {
-                                       if (audit_comparator(ctx->names[j].ino, f->op, f->val)) {
+                               list_for_each_entry(n, &ctx->names_list, list) {
+                                       if (audit_comparator(n->ino, f->op, f->val)) {
+                                               ++result;
+                                               break;
+                                       }
+                               }
+                       }
+                       break;
+               case AUDIT_OBJ_UID:
+                       if (name) {
+                               result = audit_comparator(name->uid, f->op, f->val);
+                       } else if (ctx) {
+                               list_for_each_entry(n, &ctx->names_list, list) {
+                                       if (audit_comparator(n->uid, f->op, f->val)) {
+                                               ++result;
+                                               break;
+                                       }
+                               }
+                       }
+                       break;
+               case AUDIT_OBJ_GID:
+                       if (name) {
+                               result = audit_comparator(name->gid, f->op, f->val);
+                       } else if (ctx) {
+                               list_for_each_entry(n, &ctx->names_list, list) {
+                                       if (audit_comparator(n->gid, f->op, f->val)) {
                                                ++result;
                                                break;
                                        }
@@ -607,11 +786,10 @@ static int audit_filter_rules(struct task_struct *tsk,
                                                   name->osid, f->type, f->op,
                                                   f->lsm_rule, ctx);
                                } else if (ctx) {
-                                       for (j = 0; j < ctx->name_count; j++) {
-                                               if (security_audit_rule_match(
-                                                     ctx->names[j].osid,
-                                                     f->type, f->op,
-                                                     f->lsm_rule, ctx)) {
+                                       list_for_each_entry(n, &ctx->names_list, list) {
+                                               if (security_audit_rule_match(n->osid, f->type,
+                                                                             f->op, f->lsm_rule,
+                                                                             ctx)) {
                                                        ++result;
                                                        break;
                                                }
@@ -643,8 +821,10 @@ static int audit_filter_rules(struct task_struct *tsk,
                case AUDIT_FILETYPE:
                        result = audit_match_filetype(ctx, f->val);
                        break;
+               case AUDIT_FIELD_COMPARE:
+                       result = audit_field_compare(tsk, cred, f, ctx, name);
+                       break;
                }
-
                if (!result)
                        return 0;
        }
@@ -722,40 +902,53 @@ static enum audit_state audit_filter_syscall(struct task_struct *tsk,
        return AUDIT_BUILD_CONTEXT;
 }
 
-/* At syscall exit time, this filter is called if any audit_names[] have been
+/*
+ * Given an audit_name check the inode hash table to see if they match.
+ * Called holding the rcu read lock to protect the use of audit_inode_hash
+ */
+static int audit_filter_inode_name(struct task_struct *tsk,
+                                  struct audit_names *n,
+                                  struct audit_context *ctx) {
+       int word, bit;
+       int h = audit_hash_ino((u32)n->ino);
+       struct list_head *list = &audit_inode_hash[h];
+       struct audit_entry *e;
+       enum audit_state state;
+
+       word = AUDIT_WORD(ctx->major);
+       bit  = AUDIT_BIT(ctx->major);
+
+       if (list_empty(list))
+               return 0;
+
+       list_for_each_entry_rcu(e, list, list) {
+               if ((e->rule.mask[word] & bit) == bit &&
+                   audit_filter_rules(tsk, &e->rule, ctx, n, &state, false)) {
+                       ctx->current_state = state;
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+/* At syscall exit time, this filter is called if any audit_names have been
  * collected during syscall processing.  We only check rules in sublists at hash
- * buckets applicable to the inode numbers in audit_names[].
+ * buckets applicable to the inode numbers in audit_names.
  * Regarding audit_state, same rules apply as for audit_filter_syscall().
  */
 void audit_filter_inodes(struct task_struct *tsk, struct audit_context *ctx)
 {
-       int i;
-       struct audit_entry *e;
-       enum audit_state state;
+       struct audit_names *n;
 
        if (audit_pid && tsk->tgid == audit_pid)
                return;
 
        rcu_read_lock();
-       for (i = 0; i < ctx->name_count; i++) {
-               int word = AUDIT_WORD(ctx->major);
-               int bit  = AUDIT_BIT(ctx->major);
-               struct audit_names *n = &ctx->names[i];
-               int h = audit_hash_ino((u32)n->ino);
-               struct list_head *list = &audit_inode_hash[h];
-
-               if (list_empty(list))
-                       continue;
 
-               list_for_each_entry_rcu(e, list, list) {
-                       if ((e->rule.mask[word] & bit) == bit &&
-                           audit_filter_rules(tsk, &e->rule, ctx, n,
-                                              &state, false)) {
-                               rcu_read_unlock();
-                               ctx->current_state = state;
-                               return;
-                       }
-               }
+       list_for_each_entry(n, &ctx->names_list, list) {
+               if (audit_filter_inode_name(tsk, n, ctx))
+                       break;
        }
        rcu_read_unlock();
 }
@@ -766,7 +959,7 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk,
 {
        struct audit_context *context = tsk->audit_context;
 
-       if (likely(!context))
+       if (!context)
                return NULL;
        context->return_valid = return_valid;
 
@@ -799,7 +992,7 @@ static inline struct audit_context *audit_get_context(struct task_struct *tsk,
 
 static inline void audit_free_names(struct audit_context *context)
 {
-       int i;
+       struct audit_names *n, *next;
 
 #if AUDIT_DEBUG == 2
        if (context->put_count + context->ino_count != context->name_count) {
@@ -810,10 +1003,9 @@ static inline void audit_free_names(struct audit_context *context)
                       context->serial, context->major, context->in_syscall,
                       context->name_count, context->put_count,
                       context->ino_count);
-               for (i = 0; i < context->name_count; i++) {
+               list_for_each_entry(n, &context->names_list, list) {
                        printk(KERN_ERR "names[%d] = %p = %s\n", i,
-                              context->names[i].name,
-                              context->names[i].name ?: "(null)");
+                              n->name, n->name ?: "(null)");
                }
                dump_stack();
                return;
@@ -824,9 +1016,12 @@ static inline void audit_free_names(struct audit_context *context)
        context->ino_count  = 0;
 #endif
 
-       for (i = 0; i < context->name_count; i++) {
-               if (context->names[i].name && context->names[i].name_put)
-                       __putname(context->names[i].name);
+       list_for_each_entry_safe(n, next, &context->names_list, list) {
+               list_del(&n->list);
+               if (n->name && n->name_put)
+                       __putname(n->name);
+               if (n->should_free)
+                       kfree(n);
        }
        context->name_count = 0;
        path_put(&context->pwd);
@@ -864,6 +1059,7 @@ static inline struct audit_context *audit_alloc_context(enum audit_state state)
                return NULL;
        audit_zero_context(context, state);
        INIT_LIST_HEAD(&context->killed_trees);
+       INIT_LIST_HEAD(&context->names_list);
        return context;
 }
 
@@ -886,7 +1082,7 @@ int audit_alloc(struct task_struct *tsk)
                return 0; /* Return if not auditing. */
 
        state = audit_filter_task(tsk, &key);
-       if (likely(state == AUDIT_DISABLED))
+       if (state == AUDIT_DISABLED)
                return 0;
 
        if (!(context = audit_alloc_context(state))) {
@@ -975,7 +1171,7 @@ static void audit_log_task_info(struct audit_buffer *ab, struct task_struct *tsk
                while (vma) {
                        if ((vma->vm_flags & VM_EXECUTABLE) &&
                            vma->vm_file) {
-                               audit_log_d_path(ab, "exe=",
+                               audit_log_d_path(ab, " exe=",
                                                 &vma->vm_file->f_path);
                                break;
                        }
@@ -1166,8 +1362,8 @@ static void audit_log_execve_info(struct audit_context *context,
                                  struct audit_buffer **ab,
                                  struct audit_aux_data_execve *axi)
 {
-       int i;
-       size_t len, len_sent = 0;
+       int i, len;
+       size_t len_sent = 0;
        const char __user *p;
        char *buf;
 
@@ -1324,6 +1520,68 @@ static void show_special(struct audit_context *context, int *call_panic)
        audit_log_end(ab);
 }
 
+static void audit_log_name(struct audit_context *context, struct audit_names *n,
+                          int record_num, int *call_panic)
+{
+       struct audit_buffer *ab;
+       ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
+       if (!ab)
+               return; /* audit_panic has been called */
+
+       audit_log_format(ab, "item=%d", record_num);
+
+       if (n->name) {
+               switch (n->name_len) {
+               case AUDIT_NAME_FULL:
+                       /* log the full path */
+                       audit_log_format(ab, " name=");
+                       audit_log_untrustedstring(ab, n->name);
+                       break;
+               case 0:
+                       /* name was specified as a relative path and the
+                        * directory component is the cwd */
+                       audit_log_d_path(ab, " name=", &context->pwd);
+                       break;
+               default:
+                       /* log the name's directory component */
+                       audit_log_format(ab, " name=");
+                       audit_log_n_untrustedstring(ab, n->name,
+                                                   n->name_len);
+               }
+       } else
+               audit_log_format(ab, " name=(null)");
+
+       if (n->ino != (unsigned long)-1) {
+               audit_log_format(ab, " inode=%lu"
+                                " dev=%02x:%02x mode=%#ho"
+                                " ouid=%u ogid=%u rdev=%02x:%02x",
+                                n->ino,
+                                MAJOR(n->dev),
+                                MINOR(n->dev),
+                                n->mode,
+                                n->uid,
+                                n->gid,
+                                MAJOR(n->rdev),
+                                MINOR(n->rdev));
+       }
+       if (n->osid != 0) {
+               char *ctx = NULL;
+               u32 len;
+               if (security_secid_to_secctx(
+                       n->osid, &ctx, &len)) {
+                       audit_log_format(ab, " osid=%u", n->osid);
+                       *call_panic = 2;
+               } else {
+                       audit_log_format(ab, " obj=%s", ctx);
+                       security_release_secctx(ctx, len);
+               }
+       }
+
+       audit_log_fcaps(ab, n);
+
+       audit_log_end(ab);
+}
+
 static void audit_log_exit(struct audit_context *context, struct task_struct *tsk)
 {
        const struct cred *cred;
@@ -1331,6 +1589,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
        struct audit_buffer *ab;
        struct audit_aux_data *aux;
        const char *tty;
+       struct audit_names *n;
 
        /* tsk == current */
        context->pid = tsk->pid;
@@ -1466,70 +1725,14 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
        if (context->pwd.dentry && context->pwd.mnt) {
                ab = audit_log_start(context, GFP_KERNEL, AUDIT_CWD);
                if (ab) {
-                       audit_log_d_path(ab, "cwd=", &context->pwd);
+                       audit_log_d_path(ab, " cwd=", &context->pwd);
                        audit_log_end(ab);
                }
        }
-       for (i = 0; i < context->name_count; i++) {
-               struct audit_names *n = &context->names[i];
 
-               ab = audit_log_start(context, GFP_KERNEL, AUDIT_PATH);
-               if (!ab)
-                       continue; /* audit_panic has been called */
-
-               audit_log_format(ab, "item=%d", i);
-
-               if (n->name) {
-                       switch(n->name_len) {
-                       case AUDIT_NAME_FULL:
-                               /* log the full path */
-                               audit_log_format(ab, " name=");
-                               audit_log_untrustedstring(ab, n->name);
-                               break;
-                       case 0:
-                               /* name was specified as a relative path and the
-                                * directory component is the cwd */
-                               audit_log_d_path(ab, "name=", &context->pwd);
-                               break;
-                       default:
-                               /* log the name's directory component */
-                               audit_log_format(ab, " name=");
-                               audit_log_n_untrustedstring(ab, n->name,
-                                                           n->name_len);
-                       }
-               } else
-                       audit_log_format(ab, " name=(null)");
-
-               if (n->ino != (unsigned long)-1) {
-                       audit_log_format(ab, " inode=%lu"
-                                        " dev=%02x:%02x mode=%#ho"
-                                        " ouid=%u ogid=%u rdev=%02x:%02x",
-                                        n->ino,
-                                        MAJOR(n->dev),
-                                        MINOR(n->dev),
-                                        n->mode,
-                                        n->uid,
-                                        n->gid,
-                                        MAJOR(n->rdev),
-                                        MINOR(n->rdev));
-               }
-               if (n->osid != 0) {
-                       char *ctx = NULL;
-                       u32 len;
-                       if (security_secid_to_secctx(
-                               n->osid, &ctx, &len)) {
-                               audit_log_format(ab, " osid=%u", n->osid);
-                               call_panic = 2;
-                       } else {
-                               audit_log_format(ab, " obj=%s", ctx);
-                               security_release_secctx(ctx, len);
-                       }
-               }
-
-               audit_log_fcaps(ab, n);
-
-               audit_log_end(ab);
-       }
+       i = 0;
+       list_for_each_entry(n, &context->names_list, list)
+               audit_log_name(context, n, i++, &call_panic);
 
        /* Send end of event record to help user space know we are finished */
        ab = audit_log_start(context, GFP_KERNEL, AUDIT_EOE);
@@ -1545,12 +1748,12 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
  *
  * Called from copy_process and do_exit
  */
-void audit_free(struct task_struct *tsk)
+void __audit_free(struct task_struct *tsk)
 {
        struct audit_context *context;
 
        context = audit_get_context(tsk, 0, 0);
-       if (likely(!context))
+       if (!context)
                return;
 
        /* Check for system calls that do not go through the exit
@@ -1583,7 +1786,7 @@ void audit_free(struct task_struct *tsk)
  * will only be written if another part of the kernel requests that it
  * be written).
  */
-void audit_syscall_entry(int arch, int major,
+void __audit_syscall_entry(int arch, int major,
                         unsigned long a1, unsigned long a2,
                         unsigned long a3, unsigned long a4)
 {
@@ -1591,7 +1794,7 @@ void audit_syscall_entry(int arch, int major,
        struct audit_context *context = tsk->audit_context;
        enum audit_state     state;
 
-       if (unlikely(!context))
+       if (!context)
                return;
 
        /*
@@ -1648,7 +1851,7 @@ void audit_syscall_entry(int arch, int major,
                context->prio = 0;
                state = audit_filter_syscall(tsk, context, &audit_filter_list[AUDIT_FILTER_ENTRY]);
        }
-       if (likely(state == AUDIT_DISABLED))
+       if (state == AUDIT_DISABLED)
                return;
 
        context->serial     = 0;
@@ -1658,45 +1861,29 @@ void audit_syscall_entry(int arch, int major,
        context->ppid       = 0;
 }
 
-void audit_finish_fork(struct task_struct *child)
-{
-       struct audit_context *ctx = current->audit_context;
-       struct audit_context *p = child->audit_context;
-       if (!p || !ctx)
-               return;
-       if (!ctx->in_syscall || ctx->current_state != AUDIT_RECORD_CONTEXT)
-               return;
-       p->arch = ctx->arch;
-       p->major = ctx->major;
-       memcpy(p->argv, ctx->argv, sizeof(ctx->argv));
-       p->ctime = ctx->ctime;
-       p->dummy = ctx->dummy;
-       p->in_syscall = ctx->in_syscall;
-       p->filterkey = kstrdup(ctx->filterkey, GFP_KERNEL);
-       p->ppid = current->pid;
-       p->prio = ctx->prio;
-       p->current_state = ctx->current_state;
-}
-
 /**
  * audit_syscall_exit - deallocate audit context after a system call
- * @valid: success/failure flag
- * @return_code: syscall return value
+ * @success: success value of the syscall
+ * @return_code: return value of the syscall
  *
  * Tear down after system call.  If the audit context has been marked as
  * auditable (either because of the AUDIT_RECORD_CONTEXT state from
- * filtering, or because some other part of the kernel write an audit
+ * filtering, or because some other part of the kernel wrote an audit
  * message), then write out the syscall information.  In call cases,
  * free the names stored from getname().
  */
-void audit_syscall_exit(int valid, long return_code)
+void __audit_syscall_exit(int success, long return_code)
 {
        struct task_struct *tsk = current;
        struct audit_context *context;
 
-       context = audit_get_context(tsk, valid, return_code);
+       if (success)
+               success = AUDITSC_SUCCESS;
+       else
+               success = AUDITSC_FAILURE;
 
-       if (likely(!context))
+       context = audit_get_context(tsk, success, return_code);
+       if (!context)
                return;
 
        if (context->in_syscall && context->current_state == AUDIT_RECORD_CONTEXT)
@@ -1821,6 +2008,30 @@ retry:
 #endif
 }
 
+static struct audit_names *audit_alloc_name(struct audit_context *context)
+{
+       struct audit_names *aname;
+
+       if (context->name_count < AUDIT_NAMES) {
+               aname = &context->preallocated_names[context->name_count];
+               memset(aname, 0, sizeof(*aname));
+       } else {
+               aname = kzalloc(sizeof(*aname), GFP_NOFS);
+               if (!aname)
+                       return NULL;
+               aname->should_free = true;
+       }
+
+       aname->ino = (unsigned long)-1;
+       list_add_tail(&aname->list, &context->names_list);
+
+       context->name_count++;
+#if AUDIT_DEBUG
+       context->ino_count++;
+#endif
+       return aname;
+}
+
 /**
  * audit_getname - add a name to the list
  * @name: name to add
@@ -1831,9 +2042,7 @@ retry:
 void __audit_getname(const char *name)
 {
        struct audit_context *context = current->audit_context;
-
-       if (IS_ERR(name) || !name)
-               return;
+       struct audit_names *n;
 
        if (!context->in_syscall) {
 #if AUDIT_DEBUG == 2
@@ -1843,13 +2052,15 @@ void __audit_getname(const char *name)
 #endif
                return;
        }
-       BUG_ON(context->name_count >= AUDIT_NAMES);
-       context->names[context->name_count].name = name;
-       context->names[context->name_count].name_len = AUDIT_NAME_FULL;
-       context->names[context->name_count].name_put = 1;
-       context->names[context->name_count].ino  = (unsigned long)-1;
-       context->names[context->name_count].osid = 0;
-       ++context->name_count;
+
+       n = audit_alloc_name(context);
+       if (!n)
+               return;
+
+       n->name = name;
+       n->name_len = AUDIT_NAME_FULL;
+       n->name_put = true;
+
        if (!context->pwd.dentry)
                get_fs_pwd(current->fs, &context->pwd);
 }
@@ -1871,12 +2082,13 @@ void audit_putname(const char *name)
                printk(KERN_ERR "%s:%d(:%d): __putname(%p)\n",
                       __FILE__, __LINE__, context->serial, name);
                if (context->name_count) {
+                       struct audit_names *n;
                        int i;
-                       for (i = 0; i < context->name_count; i++)
+
+                       list_for_each_entry(n, &context->names_list, list)
                                printk(KERN_ERR "name[%d] = %p = %s\n", i,
-                                      context->names[i].name,
-                                      context->names[i].name ?: "(null)");
-               }
+                                      n->name, n->name ?: "(null)");
+                       }
 #endif
                __putname(name);
        }
@@ -1897,39 +2109,11 @@ void audit_putname(const char *name)
 #endif
 }
 
-static int audit_inc_name_count(struct audit_context *context,
-                               const struct inode *inode)
-{
-       if (context->name_count >= AUDIT_NAMES) {
-               if (inode)
-                       printk(KERN_DEBUG "audit: name_count maxed, losing inode data: "
-                              "dev=%02x:%02x, inode=%lu\n",
-                              MAJOR(inode->i_sb->s_dev),
-                              MINOR(inode->i_sb->s_dev),
-                              inode->i_ino);
-
-               else
-                       printk(KERN_DEBUG "name_count maxed, losing inode data\n");
-               return 1;
-       }
-       context->name_count++;
-#if AUDIT_DEBUG
-       context->ino_count++;
-#endif
-       return 0;
-}
-
-
 static inline int audit_copy_fcaps(struct audit_names *name, const struct dentry *dentry)
 {
        struct cpu_vfs_cap_data caps;
        int rc;
 
-       memset(&name->fcap.permitted, 0, sizeof(kernel_cap_t));
-       memset(&name->fcap.inheritable, 0, sizeof(kernel_cap_t));
-       name->fcap.fE = 0;
-       name->fcap_ver = 0;
-
        if (!dentry)
                return 0;
 
@@ -1969,30 +2153,25 @@ static void audit_copy_inode(struct audit_names *name, const struct dentry *dent
  */
 void __audit_inode(const char *name, const struct dentry *dentry)
 {
-       int idx;
        struct audit_context *context = current->audit_context;
        const struct inode *inode = dentry->d_inode;
+       struct audit_names *n;
 
        if (!context->in_syscall)
                return;
-       if (context->name_count
-           && context->names[context->name_count-1].name
-           && context->names[context->name_count-1].name == name)
-               idx = context->name_count - 1;
-       else if (context->name_count > 1
-                && context->names[context->name_count-2].name
-                && context->names[context->name_count-2].name == name)
-               idx = context->name_count - 2;
-       else {
-               /* FIXME: how much do we care about inodes that have no
-                * associated name? */
-               if (audit_inc_name_count(context, inode))
-                       return;
-               idx = context->name_count - 1;
-               context->names[idx].name = NULL;
+
+       list_for_each_entry_reverse(n, &context->names_list, list) {
+               if (n->name && (n->name == name))
+                       goto out;
        }
+
+       /* unable to find the name from a previous getname() */
+       n = audit_alloc_name(context);
+       if (!n)
+               return;
+out:
        handle_path(dentry);
-       audit_copy_inode(&context->names[idx], dentry, inode);
+       audit_copy_inode(n, dentry, inode);
 }
 
 /**
@@ -2011,11 +2190,11 @@ void __audit_inode(const char *name, const struct dentry *dentry)
 void __audit_inode_child(const struct dentry *dentry,
                         const struct inode *parent)
 {
-       int idx;
        struct audit_context *context = current->audit_context;
        const char *found_parent = NULL, *found_child = NULL;
        const struct inode *inode = dentry->d_inode;
        const char *dname = dentry->d_name.name;
+       struct audit_names *n;
        int dirlen = 0;
 
        if (!context->in_syscall)
@@ -2025,9 +2204,7 @@ void __audit_inode_child(const struct dentry *dentry,
                handle_one(inode);
 
        /* parent is more likely, look for it first */
-       for (idx = 0; idx < context->name_count; idx++) {
-               struct audit_names *n = &context->names[idx];
-
+       list_for_each_entry(n, &context->names_list, list) {
                if (!n->name)
                        continue;
 
@@ -2040,9 +2217,7 @@ void __audit_inode_child(const struct dentry *dentry,
        }
 
        /* no matching parent, look for matching child */
-       for (idx = 0; idx < context->name_count; idx++) {
-               struct audit_names *n = &context->names[idx];
-
+       list_for_each_entry(n, &context->names_list, list) {
                if (!n->name)
                        continue;
 
@@ -2060,34 +2235,29 @@ void __audit_inode_child(const struct dentry *dentry,
 
 add_names:
        if (!found_parent) {
-               if (audit_inc_name_count(context, parent))
+               n = audit_alloc_name(context);
+               if (!n)
                        return;
-               idx = context->name_count - 1;
-               context->names[idx].name = NULL;
-               audit_copy_inode(&context->names[idx], NULL, parent);
+               audit_copy_inode(n, NULL, parent);
        }
 
        if (!found_child) {
-               if (audit_inc_name_count(context, inode))
+               n = audit_alloc_name(context);
+               if (!n)
                        return;
-               idx = context->name_count - 1;
 
                /* Re-use the name belonging to the slot for a matching parent
                 * directory. All names for this context are relinquished in
                 * audit_free_names() */
                if (found_parent) {
-                       context->names[idx].name = found_parent;
-                       context->names[idx].name_len = AUDIT_NAME_FULL;
+                       n->name = found_parent;
+                       n->name_len = AUDIT_NAME_FULL;
                        /* don't call __putname() */
-                       context->names[idx].name_put = 0;
-               } else {
-                       context->names[idx].name = NULL;
+                       n->name_put = false;
                }
 
                if (inode)
-                       audit_copy_inode(&context->names[idx], NULL, inode);
-               else
-                       context->names[idx].ino = (unsigned long)-1;
+                       audit_copy_inode(n, NULL, inode);
        }
 }
 EXPORT_SYMBOL_GPL(__audit_inode_child);
@@ -2121,19 +2291,28 @@ int auditsc_get_stamp(struct audit_context *ctx,
 static atomic_t session_id = ATOMIC_INIT(0);
 
 /**
- * audit_set_loginuid - set a task's audit_context loginuid
- * @task: task whose audit context is being modified
+ * audit_set_loginuid - set current task's audit_context loginuid
  * @loginuid: loginuid value
  *
  * Returns 0.
  *
  * Called (set) from fs/proc/base.c::proc_loginuid_write().
  */
-int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
+int audit_set_loginuid(uid_t loginuid)
 {
-       unsigned int sessionid = atomic_inc_return(&session_id);
+       struct task_struct *task = current;
        struct audit_context *context = task->audit_context;
+       unsigned int sessionid;
+
+#ifdef CONFIG_AUDIT_LOGINUID_IMMUTABLE
+       if (task->loginuid != -1)
+               return -EPERM;
+#else /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
+       if (!capable(CAP_AUDIT_CONTROL))
+               return -EPERM;
+#endif  /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
 
+       sessionid = atomic_inc_return(&session_id);
        if (context && context->in_syscall) {
                struct audit_buffer *ab;
 
@@ -2271,14 +2450,11 @@ void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mo
        context->ipc.has_perm = 1;
 }
 
-int audit_bprm(struct linux_binprm *bprm)
+int __audit_bprm(struct linux_binprm *bprm)
 {
        struct audit_aux_data_execve *ax;
        struct audit_context *context = current->audit_context;
 
-       if (likely(!audit_enabled || !context || context->dummy))
-               return 0;
-
        ax = kmalloc(sizeof(*ax), GFP_KERNEL);
        if (!ax)
                return -ENOMEM;
@@ -2299,13 +2475,10 @@ int audit_bprm(struct linux_binprm *bprm)
  * @args: args array
  *
  */
-void audit_socketcall(int nargs, unsigned long *args)
+void __audit_socketcall(int nargs, unsigned long *args)
 {
        struct audit_context *context = current->audit_context;
 
-       if (likely(!context || context->dummy))
-               return;
-
        context->type = AUDIT_SOCKETCALL;
        context->socketcall.nargs = nargs;
        memcpy(context->socketcall.args, args, nargs * sizeof(unsigned long));
@@ -2331,13 +2504,10 @@ void __audit_fd_pair(int fd1, int fd2)
  *
  * Returns 0 for success or NULL context or < 0 on error.
  */
-int audit_sockaddr(int len, void *a)
+int __audit_sockaddr(int len, void *a)
 {
        struct audit_context *context = current->audit_context;
 
-       if (likely(!context || context->dummy))
-               return 0;
-
        if (!context->sockaddr) {
                void *p = kmalloc(sizeof(struct sockaddr_storage), GFP_KERNEL);
                if (!p)
@@ -2499,6 +2669,25 @@ void __audit_mmap_fd(int fd, int flags)
        context->type = AUDIT_MMAP;
 }
 
+static void audit_log_abend(struct audit_buffer *ab, char *reason, long signr)
+{
+       uid_t auid, uid;
+       gid_t gid;
+       unsigned int sessionid;
+
+       auid = audit_get_loginuid(current);
+       sessionid = audit_get_sessionid(current);
+       current_uid_gid(&uid, &gid);
+
+       audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u",
+                        auid, uid, gid, sessionid);
+       audit_log_task_context(ab);
+       audit_log_format(ab, " pid=%d comm=", current->pid);
+       audit_log_untrustedstring(ab, current->comm);
+       audit_log_format(ab, " reason=");
+       audit_log_string(ab, reason);
+       audit_log_format(ab, " sig=%ld", signr);
+}
 /**
  * audit_core_dumps - record information about processes that end abnormally
  * @signr: signal value
@@ -2509,10 +2698,6 @@ void __audit_mmap_fd(int fd, int flags)
 void audit_core_dumps(long signr)
 {
        struct audit_buffer *ab;
-       u32 sid;
-       uid_t auid = audit_get_loginuid(current), uid;
-       gid_t gid;
-       unsigned int sessionid = audit_get_sessionid(current);
 
        if (!audit_enabled)
                return;
@@ -2521,24 +2706,17 @@ void audit_core_dumps(long signr)
                return;
 
        ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
-       current_uid_gid(&uid, &gid);
-       audit_log_format(ab, "auid=%u uid=%u gid=%u ses=%u",
-                        auid, uid, gid, sessionid);
-       security_task_getsecid(current, &sid);
-       if (sid) {
-               char *ctx = NULL;
-               u32 len;
+       audit_log_abend(ab, "memory violation", signr);
+       audit_log_end(ab);
+}
 
-               if (security_secid_to_secctx(sid, &ctx, &len))
-                       audit_log_format(ab, " ssid=%u", sid);
-               else {
-                       audit_log_format(ab, " subj=%s", ctx);
-                       security_release_secctx(ctx, len);
-               }
-       }
-       audit_log_format(ab, " pid=%d comm=", current->pid);
-       audit_log_untrustedstring(ab, current->comm);
-       audit_log_format(ab, " sig=%ld", signr);
+void __audit_seccomp(unsigned long syscall)
+{
+       struct audit_buffer *ab;
+
+       ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_ANOM_ABEND);
+       audit_log_abend(ab, "seccomp", SIGKILL);
+       audit_log_format(ab, " syscall=%ld", syscall);
        audit_log_end(ab);
 }
 
index 0fcf1c14a297c57d7cda541b71438441dd1adbde..3f1adb6c647015d80aa6b5b138f118fde7484d11 100644 (file)
@@ -384,7 +384,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
                BUG();
        }
 
-       if (has_ns_capability(current, ns, cap)) {
+       if (security_capable(current_cred(), ns, cap) == 0) {
                current->flags |= PF_SUPERPRIV;
                return true;
        }
index 057e24b665cf7f5633bc43b752202af38c14d413..6581a040f39926dd46878640413bfd180922415a 100644 (file)
@@ -115,8 +115,6 @@ int get_callchain_buffers(void)
        }
 
        err = alloc_callchain_buffers();
-       if (err)
-               release_callchain_buffers();
 exit:
        mutex_unlock(&callchain_mutex);
 
index a8f4ac001a00796da1621b8363cc9bdc9bd2d474..ba36013cfb21db82a7be6587f32434c4d4e173aa 100644 (file)
@@ -815,7 +815,7 @@ static void update_event_times(struct perf_event *event)
         * here.
         */
        if (is_cgroup_event(event))
-               run_end = perf_event_time(event);
+               run_end = perf_cgroup_event_time(event);
        else if (ctx->is_active)
                run_end = ctx->time;
        else
@@ -2300,6 +2300,9 @@ do {                                      \
        return div64_u64(dividend, divisor);
 }
 
+static DEFINE_PER_CPU(int, perf_throttled_count);
+static DEFINE_PER_CPU(u64, perf_throttled_seq);
+
 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
 {
        struct hw_perf_event *hwc = &event->hw;
@@ -2325,16 +2328,29 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
        }
 }
 
-static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
+/*
+ * combine freq adjustment with unthrottling to avoid two passes over the
+ * events. At the same time, make sure, having freq events does not change
+ * the rate of unthrottling as that would introduce bias.
+ */
+static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
+                                          int needs_unthr)
 {
        struct perf_event *event;
        struct hw_perf_event *hwc;
-       u64 interrupts, now;
+       u64 now, period = TICK_NSEC;
        s64 delta;
 
-       if (!ctx->nr_freq)
+       /*
+        * only need to iterate over all events iff:
+        * - context have events in frequency mode (needs freq adjust)
+        * - there are events to unthrottle on this cpu
+        */
+       if (!(ctx->nr_freq || needs_unthr))
                return;
 
+       raw_spin_lock(&ctx->lock);
+
        list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
                if (event->state != PERF_EVENT_STATE_ACTIVE)
                        continue;
@@ -2344,13 +2360,8 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
 
                hwc = &event->hw;
 
-               interrupts = hwc->interrupts;
-               hwc->interrupts = 0;
-
-               /*
-                * unthrottle events on the tick
-                */
-               if (interrupts == MAX_INTERRUPTS) {
+               if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
+                       hwc->interrupts = 0;
                        perf_log_throttle(event, 1);
                        event->pmu->start(event, 0);
                }
@@ -2358,14 +2369,26 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx, u64 period)
                if (!event->attr.freq || !event->attr.sample_freq)
                        continue;
 
-               event->pmu->read(event);
+               /*
+                * stop the event and update event->count
+                */
+               event->pmu->stop(event, PERF_EF_UPDATE);
+
                now = local64_read(&event->count);
                delta = now - hwc->freq_count_stamp;
                hwc->freq_count_stamp = now;
 
+               /*
+                * restart the event
+                * reload only if value has changed
+                */
                if (delta > 0)
                        perf_adjust_period(event, period, delta);
+
+               event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
        }
+
+       raw_spin_unlock(&ctx->lock);
 }
 
 /*
@@ -2388,16 +2411,13 @@ static void rotate_ctx(struct perf_event_context *ctx)
  */
 static void perf_rotate_context(struct perf_cpu_context *cpuctx)
 {
-       u64 interval = (u64)cpuctx->jiffies_interval * TICK_NSEC;
        struct perf_event_context *ctx = NULL;
-       int rotate = 0, remove = 1, freq = 0;
+       int rotate = 0, remove = 1;
 
        if (cpuctx->ctx.nr_events) {
                remove = 0;
                if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
                        rotate = 1;
-               if (cpuctx->ctx.nr_freq)
-                       freq = 1;
        }
 
        ctx = cpuctx->task_ctx;
@@ -2405,37 +2425,26 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
                remove = 0;
                if (ctx->nr_events != ctx->nr_active)
                        rotate = 1;
-               if (ctx->nr_freq)
-                       freq = 1;
        }
 
-       if (!rotate && !freq)
+       if (!rotate)
                goto done;
 
        perf_ctx_lock(cpuctx, cpuctx->task_ctx);
        perf_pmu_disable(cpuctx->ctx.pmu);
 
-       if (freq) {
-               perf_ctx_adjust_freq(&cpuctx->ctx, interval);
-               if (ctx)
-                       perf_ctx_adjust_freq(ctx, interval);
-       }
-
-       if (rotate) {
-               cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
-               if (ctx)
-                       ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
+       cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+       if (ctx)
+               ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
 
-               rotate_ctx(&cpuctx->ctx);
-               if (ctx)
-                       rotate_ctx(ctx);
+       rotate_ctx(&cpuctx->ctx);
+       if (ctx)
+               rotate_ctx(ctx);
 
-               perf_event_sched_in(cpuctx, ctx, current);
-       }
+       perf_event_sched_in(cpuctx, ctx, current);
 
        perf_pmu_enable(cpuctx->ctx.pmu);
        perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
-
 done:
        if (remove)
                list_del_init(&cpuctx->rotation_list);
@@ -2445,10 +2454,22 @@ void perf_event_task_tick(void)
 {
        struct list_head *head = &__get_cpu_var(rotation_list);
        struct perf_cpu_context *cpuctx, *tmp;
+       struct perf_event_context *ctx;
+       int throttled;
 
        WARN_ON(!irqs_disabled());
 
+       __this_cpu_inc(perf_throttled_seq);
+       throttled = __this_cpu_xchg(perf_throttled_count, 0);
+
        list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
+               ctx = &cpuctx->ctx;
+               perf_adjust_freq_unthr_context(ctx, throttled);
+
+               ctx = cpuctx->task_ctx;
+               if (ctx)
+                       perf_adjust_freq_unthr_context(ctx, throttled);
+
                if (cpuctx->jiffies_interval == 1 ||
                                !(jiffies % cpuctx->jiffies_interval))
                        perf_rotate_context(cpuctx);
@@ -4509,6 +4530,7 @@ static int __perf_event_overflow(struct perf_event *event,
 {
        int events = atomic_read(&event->event_limit);
        struct hw_perf_event *hwc = &event->hw;
+       u64 seq;
        int ret = 0;
 
        /*
@@ -4518,14 +4540,20 @@ static int __perf_event_overflow(struct perf_event *event,
        if (unlikely(!is_sampling_event(event)))
                return 0;
 
-       if (unlikely(hwc->interrupts >= max_samples_per_tick)) {
-               if (throttle) {
+       seq = __this_cpu_read(perf_throttled_seq);
+       if (seq != hwc->interrupts_seq) {
+               hwc->interrupts_seq = seq;
+               hwc->interrupts = 1;
+       } else {
+               hwc->interrupts++;
+               if (unlikely(throttle
+                            && hwc->interrupts >= max_samples_per_tick)) {
+                       __this_cpu_inc(perf_throttled_count);
                        hwc->interrupts = MAX_INTERRUPTS;
                        perf_log_throttle(event, 0);
                        ret = 1;
                }
-       } else
-               hwc->interrupts++;
+       }
 
        if (event->attr.freq) {
                u64 now = perf_clock();
index c44738267be770118b64203eb3cbd411d9656686..4b4042f9bc6ade78a14199eb4ce96dc4ce6236b3 100644 (file)
@@ -964,8 +964,7 @@ void do_exit(long code)
        acct_collect(code, group_dead);
        if (group_dead)
                tty_audit_exit();
-       if (unlikely(tsk->audit_context))
-               audit_free(tsk);
+       audit_free(tsk);
 
        tsk->exit_code = code;
        taskstats_exit(tsk, group_dead);
@@ -1039,6 +1038,22 @@ void do_exit(long code)
        if (tsk->nr_dirtied)
                __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
        exit_rcu();
+
+       /*
+        * The setting of TASK_RUNNING by try_to_wake_up() may be delayed
+        * when the following two conditions become true.
+        *   - There is race condition of mmap_sem (It is acquired by
+        *     exit_mm()), and
+        *   - SMI occurs before setting TASK_RUNINNG.
+        *     (or hypervisor of virtual machine switches to other guest)
+        *  As a result, we may become TASK_RUNNING after becoming TASK_DEAD
+        *
+        * To avoid it, we have to wait for releasing tsk->pi_lock which
+        * is held by try_to_wake_up()
+        */
+       smp_mb();
+       raw_spin_unlock_wait(&tsk->pi_lock);
+
        /* causes final put_task_struct in finish_task_switch(). */
        tsk->state = TASK_DEAD;
        tsk->flags |= PF_NOFREEZE;      /* tell freezer to ignore us */
index f3fa18887cc9b8d7fbde14f0e6fe36f57f791b96..1b2ef3c23ae4a7999707f2f61a86f7a7080f457a 100644 (file)
@@ -647,6 +647,26 @@ struct mm_struct *get_task_mm(struct task_struct *task)
 }
 EXPORT_SYMBOL_GPL(get_task_mm);
 
+struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
+{
+       struct mm_struct *mm;
+       int err;
+
+       err =  mutex_lock_killable(&task->signal->cred_guard_mutex);
+       if (err)
+               return ERR_PTR(err);
+
+       mm = get_task_mm(task);
+       if (mm && mm != current->mm &&
+                       !ptrace_may_access(task, mode)) {
+               mmput(mm);
+               mm = ERR_PTR(-EACCES);
+       }
+       mutex_unlock(&task->signal->cred_guard_mutex);
+
+       return mm;
+}
+
 /* Please note the differences between mmput and mm_release.
  * mmput is called whenever we stop holding onto a mm_struct,
  * error success whatever.
@@ -1527,8 +1547,6 @@ long do_fork(unsigned long clone_flags,
                        init_completion(&vfork);
                }
 
-               audit_finish_fork(p);
-
                /*
                 * We set PF_STARTING at creation in case tracing wants to
                 * use this to distinguish a fully live task from one that
index 95dd7212e610b5a3da4ce97997a88f5c67d89b2d..9788c0ec6f4378f19cd80e873d42a0aec7171548 100644 (file)
@@ -1077,6 +1077,7 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
                /* Early boot.  kretprobe_table_locks not yet initialized. */
                return;
 
+       INIT_HLIST_HEAD(&empty_rp);
        hash = hash_ptr(tk, KPROBE_HASH_BITS);
        head = &kretprobe_inst_table[hash];
        kretprobe_table_lock(hash, &flags);
@@ -1085,7 +1086,6 @@ void __kprobes kprobe_flush_task(struct task_struct *tk)
                        recycle_rp_inst(ri, &empty_rp);
        }
        kretprobe_table_unlock(hash, &flags);
-       INIT_HLIST_HEAD(&empty_rp);
        hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
                hlist_del(&ri->hlist);
                kfree(ri);
@@ -1673,8 +1673,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
                ri->rp = rp;
                ri->task = current;
 
-               if (rp->entry_handler && rp->entry_handler(ri, regs))
+               if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+                       raw_spin_lock_irqsave(&rp->lock, flags);
+                       hlist_add_head(&ri->hlist, &rp->free_instances);
+                       raw_spin_unlock_irqrestore(&rp->lock, flags);
                        return 0;
+               }
 
                arch_prepare_kretprobe(ri, regs);
 
index 0c4defe6d3b873443e927c63755e27ffa9db051b..21724eee5206f7ffe6f780ba6c0c8acb8e7a0930 100644 (file)
@@ -231,8 +231,28 @@ extern int pm_test_level;
 #ifdef CONFIG_SUSPEND_FREEZER
 static inline int suspend_freeze_processes(void)
 {
-       int error = freeze_processes();
-       return error ? : freeze_kernel_threads();
+       int error;
+
+       error = freeze_processes();
+
+       /*
+        * freeze_processes() automatically thaws every task if freezing
+        * fails. So we need not do anything extra upon error.
+        */
+       if (error)
+               goto Finish;
+
+       error = freeze_kernel_threads();
+
+       /*
+        * freeze_kernel_threads() thaws only kernel threads upon freezing
+        * failure. So we have to thaw the userspace tasks ourselves.
+        */
+       if (error)
+               thaw_processes();
+
+ Finish:
+       return error;
 }
 
 static inline void suspend_thaw_processes(void)
index 77274c9ba2f1a95862787e041254f736539a1264..7e426459e60a21633d8eeff0d9751455303bdf97 100644 (file)
@@ -143,7 +143,10 @@ int freeze_processes(void)
 /**
  * freeze_kernel_threads - Make freezable kernel threads go to the refrigerator.
  *
- * On success, returns 0.  On failure, -errno and system is fully thawed.
+ * On success, returns 0.  On failure, -errno and only the kernel threads are
+ * thawed, so as to give a chance to the caller to do additional cleanups
+ * (if any) before thawing the userspace tasks. So, it is the responsibility
+ * of the caller to thaw the userspace tasks, when the time is right.
  */
 int freeze_kernel_threads(void)
 {
@@ -159,7 +162,7 @@ int freeze_kernel_threads(void)
        BUG_ON(in_atomic());
 
        if (error)
-               thaw_processes();
+               thaw_kernel_threads();
        return error;
 }
 
@@ -188,3 +191,22 @@ void thaw_processes(void)
        printk("done.\n");
 }
 
+void thaw_kernel_threads(void)
+{
+       struct task_struct *g, *p;
+
+       pm_nosig_freezing = false;
+       printk("Restarting kernel threads ... ");
+
+       thaw_workqueues();
+
+       read_lock(&tasklist_lock);
+       do_each_thread(g, p) {
+               if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
+                       __thaw_task(p);
+       } while_each_thread(g, p);
+       read_unlock(&tasklist_lock);
+
+       schedule();
+       printk("done.\n");
+}
index 1cf88900ec4fdc162b6ad1250d2c4d13d3895458..6a768e537001ce653e61c080baec76fbc5570da0 100644 (file)
@@ -812,7 +812,8 @@ unsigned int snapshot_additional_pages(struct zone *zone)
        unsigned int res;
 
        res = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
-       res += DIV_ROUND_UP(res * sizeof(struct bm_block), PAGE_SIZE);
+       res += DIV_ROUND_UP(res * sizeof(struct bm_block),
+                           LINKED_PAGE_DATA_SIZE);
        return 2 * res;
 }
 
index 3739ecced085db0141557feec608bb3eccbb0c07..8742fd013a94e3b56efdbf91cf27c709679615e4 100644 (file)
@@ -773,8 +773,7 @@ static int enough_swap(unsigned int nr_pages, unsigned int flags)
 
        pr_debug("PM: Free swap pages: %u\n", free_swap);
 
-       required = PAGES_FOR_IO + ((flags & SF_NOCOMPRESS_MODE) ?
-               nr_pages : (nr_pages * LZO_CMP_PAGES) / LZO_UNC_PAGES + 1);
+       required = PAGES_FOR_IO + nr_pages;
        return free_swap > required;
 }
 
@@ -802,10 +801,12 @@ int swsusp_write(unsigned int flags)
                printk(KERN_ERR "PM: Cannot get swap writer\n");
                return error;
        }
-       if (!enough_swap(pages, flags)) {
-               printk(KERN_ERR "PM: Not enough free swap\n");
-               error = -ENOSPC;
-               goto out_finish;
+       if (flags & SF_NOCOMPRESS_MODE) {
+               if (!enough_swap(pages, flags)) {
+                       printk(KERN_ERR "PM: Not enough free swap\n");
+                       error = -ENOSPC;
+                       goto out_finish;
+               }
        }
        memset(&snapshot, 0, sizeof(struct snapshot_handle));
        error = snapshot_read_next(&snapshot);
index 6b1ab7a88522827cc1ec5cdd3f0cdfa62d56accf..3e100075b13cb6ab449034af458266e20dcaa369 100644 (file)
@@ -249,13 +249,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
                }
                pm_restore_gfp_mask();
                error = hibernation_snapshot(data->platform_support);
-               if (!error) {
+               if (error) {
+                       thaw_kernel_threads();
+               } else {
                        error = put_user(in_suspend, (int __user *)arg);
                        if (!error && !freezer_test_done)
                                data->ready = 1;
                        if (freezer_test_done) {
                                freezer_test_done = false;
-                               thaw_processes();
+                               thaw_kernel_threads();
                        }
                }
                break;
@@ -274,6 +276,15 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
                swsusp_free();
                memset(&data->handle, 0, sizeof(struct snapshot_handle));
                data->ready = 0;
+               /*
+                * It is necessary to thaw kernel threads here, because
+                * SNAPSHOT_CREATE_IMAGE may be invoked directly after
+                * SNAPSHOT_FREE.  In that case, if kernel threads were not
+                * thawed, the preallocation of memory carried out by
+                * hibernation_snapshot() might run into problems (i.e. it
+                * might fail or even deadlock).
+                */
+               thaw_kernel_threads();
                break;
 
        case SNAPSHOT_PREF_IMAGE_SIZE:
index 88f17b8a3b1dac38abbdb42c6916bd52cea5f2b1..a58ac285fc69bb48d085708caf786f6f39413474 100644 (file)
@@ -56,8 +56,8 @@ static int nreaders = -1;     /* # reader threads, defaults to 2*ncpus */
 static int nfakewriters = 4;   /* # fake writer threads */
 static int stat_interval;      /* Interval between stats, in seconds. */
                                /*  Defaults to "only at end of test". */
-static int verbose;            /* Print more debug info. */
-static int test_no_idle_hz;    /* Test RCU's support for tickless idle CPUs. */
+static bool verbose;           /* Print more debug info. */
+static bool test_no_idle_hz;   /* Test RCU's support for tickless idle CPUs. */
 static int shuffle_interval = 3; /* Interval between shuffles (in sec)*/
 static int stutter = 5;                /* Start/stop testing interval (in sec) */
 static int irqreader = 1;      /* RCU readers from irq (timers). */
@@ -1399,7 +1399,7 @@ rcu_torture_shutdown(void *arg)
  * Execute random CPU-hotplug operations at the interval specified
  * by the onoff_interval.
  */
-static int
+static int __cpuinit
 rcu_torture_onoff(void *arg)
 {
        int cpu;
@@ -1447,7 +1447,7 @@ rcu_torture_onoff(void *arg)
        return 0;
 }
 
-static int
+static int __cpuinit
 rcu_torture_onoff_init(void)
 {
        if (onoff_interval <= 0)
index 6d269cce7aa13c4593540f6aa80be389e3ffc720..d508363858b3a147cf757ae06080dde035842091 100644 (file)
@@ -66,6 +66,31 @@ done:
        return ret;
 }
 
+int res_counter_charge_nofail(struct res_counter *counter, unsigned long val,
+                             struct res_counter **limit_fail_at)
+{
+       int ret, r;
+       unsigned long flags;
+       struct res_counter *c;
+
+       r = ret = 0;
+       *limit_fail_at = NULL;
+       local_irq_save(flags);
+       for (c = counter; c != NULL; c = c->parent) {
+               spin_lock(&c->lock);
+               r = res_counter_charge_locked(c, val);
+               if (r)
+                       c->usage += val;
+               spin_unlock(&c->lock);
+               if (r < 0 && ret == 0) {
+                       *limit_fail_at = c;
+                       ret = r;
+               }
+       }
+       local_irq_restore(flags);
+
+       return ret;
+}
 void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
 {
        if (WARN_ON(counter->usage < val))
index df00cb09263e093b2c0a9c73a20e16b6602a70ea..5255c9d2e053225173dfea134e7e243ad0e80891 100644 (file)
@@ -74,6 +74,7 @@
 
 #include <asm/tlb.h>
 #include <asm/irq_regs.h>
+#include <asm/mutex.h>
 #ifdef CONFIG_PARAVIRT
 #include <asm/paravirt.h>
 #endif
@@ -723,9 +724,6 @@ static void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
        p->sched_class->dequeue_task(rq, p, flags);
 }
 
-/*
- * activate_task - move a task to the runqueue.
- */
 void activate_task(struct rq *rq, struct task_struct *p, int flags)
 {
        if (task_contributes_to_load(p))
@@ -734,9 +732,6 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
        enqueue_task(rq, p, flags);
 }
 
-/*
- * deactivate_task - remove a task from the runqueue.
- */
 void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
 {
        if (task_contributes_to_load(p))
@@ -4134,7 +4129,7 @@ recheck:
        on_rq = p->on_rq;
        running = task_current(rq, p);
        if (on_rq)
-               deactivate_task(rq, p, 0);
+               dequeue_task(rq, p, 0);
        if (running)
                p->sched_class->put_prev_task(rq, p);
 
@@ -4147,7 +4142,7 @@ recheck:
        if (running)
                p->sched_class->set_curr_task(rq);
        if (on_rq)
-               activate_task(rq, p, 0);
+               enqueue_task(rq, p, 0);
 
        check_class_changed(rq, p, prev_class, oldprio);
        task_rq_unlock(rq, p, &flags);
@@ -4998,9 +4993,9 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
         * placed properly.
         */
        if (p->on_rq) {
-               deactivate_task(rq_src, p, 0);
+               dequeue_task(rq_src, p, 0);
                set_task_cpu(p, dest_cpu);
-               activate_task(rq_dest, p, 0);
+               enqueue_task(rq_dest, p, 0);
                check_preempt_curr(rq_dest, p, 0);
        }
 done:
@@ -7032,10 +7027,10 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
 
        on_rq = p->on_rq;
        if (on_rq)
-               deactivate_task(rq, p, 0);
+               dequeue_task(rq, p, 0);
        __setscheduler(rq, p, SCHED_NORMAL, 0);
        if (on_rq) {
-               activate_task(rq, p, 0);
+               enqueue_task(rq, p, 0);
                resched_task(rq->curr);
        }
 
index b0d798eaf1302b6b2f93836be0cc232b4614709f..d72586fdf6607db63c5f43a2e1fbbb32ff0ac2c7 100644 (file)
@@ -129,7 +129,7 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
  * cpupri_set - update the cpu priority setting
  * @cp: The cpupri context
  * @cpu: The target cpu
- * @pri: The priority (INVALID-RT99) to assign to this CPU
+ * @newpri: The priority (INVALID-RT99) to assign to this CPU
  *
  * Note: Assumes cpu_rq(cpu)->lock is locked
  *
@@ -200,7 +200,6 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri)
 /**
  * cpupri_init - initialize the cpupri structure
  * @cp: The cpupri context
- * @bootmem: true if allocations need to use bootmem
  *
  * Returns: -ENOMEM if memory fails.
  */
index 84adb2d66cbd3dc15e653532462203f8c73bba18..7c6414fc669de4f09dc03fc763aaddd8040f98bf 100644 (file)
@@ -4866,6 +4866,15 @@ static void nohz_balancer_kick(int cpu)
        return;
 }
 
+static inline void clear_nohz_tick_stopped(int cpu)
+{
+       if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
+               cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
+               atomic_dec(&nohz.nr_cpus);
+               clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+       }
+}
+
 static inline void set_cpu_sd_state_busy(void)
 {
        struct sched_domain *sd;
@@ -4904,6 +4913,12 @@ void select_nohz_load_balancer(int stop_tick)
 {
        int cpu = smp_processor_id();
 
+       /*
+        * If this cpu is going down, then nothing needs to be done.
+        */
+       if (!cpu_active(cpu))
+               return;
+
        if (stop_tick) {
                if (test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))
                        return;
@@ -4914,6 +4929,18 @@ void select_nohz_load_balancer(int stop_tick)
        }
        return;
 }
+
+static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb,
+                                       unsigned long action, void *hcpu)
+{
+       switch (action & ~CPU_TASKS_FROZEN) {
+       case CPU_DYING:
+               clear_nohz_tick_stopped(smp_processor_id());
+               return NOTIFY_OK;
+       default:
+               return NOTIFY_DONE;
+       }
+}
 #endif
 
 static DEFINE_SPINLOCK(balancing);
@@ -5070,11 +5097,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu)
        * busy tick after returning from idle, we will update the busy stats.
        */
        set_cpu_sd_state_busy();
-       if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
-               clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
-               cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
-               atomic_dec(&nohz.nr_cpus);
-       }
+       clear_nohz_tick_stopped(cpu);
 
        /*
         * None are in tickless mode and hence no need for NOHZ idle load
@@ -5590,6 +5613,7 @@ __init void init_sched_fair_class(void)
 
 #ifdef CONFIG_NO_HZ
        zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
+       cpu_notifier(sched_ilb_notifier, 0);
 #endif
 #endif /* SMP */
 
index 3640ebbb466b0bb82d2894ec8223c96bfe502b74..f42ae7fb5ec555a42fe0b63376891aaf8b756af5 100644 (file)
@@ -1587,6 +1587,11 @@ static int push_rt_task(struct rq *rq)
        if (!next_task)
                return 0;
 
+#ifdef __ARCH_WANT_INTERRUPTS_ON_CTXSW
+       if (unlikely(task_running(rq, next_task)))
+               return 0;
+#endif
+
 retry:
        if (unlikely(next_task == rq->curr)) {
                WARN_ON(1);
index 57d4b13b631de36161349feb99ae6f76a2984b06..e8d76c5895ea15f3bb4867a85f48851dedf90cd4 100644 (file)
@@ -6,6 +6,7 @@
  * This defines a simple but solid secure-computing mode.
  */
 
+#include <linux/audit.h>
 #include <linux/seccomp.h>
 #include <linux/sched.h>
 #include <linux/compat.h>
@@ -54,6 +55,7 @@ void __secure_computing(int this_syscall)
 #ifdef SECCOMP_DEBUG
        dump_stack();
 #endif
+       audit_seccomp(this_syscall);
        do_exit(SIGKILL);
 }
 
index db110b8ae0309a39cfdef20e75351b989431cf2a..f1539decd99d853d1a5c44fb3296072444aabf1a 100644 (file)
@@ -634,10 +634,11 @@ static int tracepoint_module_coming(struct module *mod)
        int ret = 0;
 
        /*
-        * We skip modules that tain the kernel, especially those with different
-        * module header (for forced load), to make sure we don't cause a crash.
+        * We skip modules that taint the kernel, especially those with different
+        * module headers (for forced load), to make sure we don't cause a crash.
+        * Staging and out-of-tree GPL modules are fine.
         */
-       if (mod->taints)
+       if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
                return 0;
        mutex_lock(&tracepoints_mutex);
        tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
index 1d7bca7f4f527c2f25b741da9fc914980a0c43bb..d117262deba305400ba768b1f7cecc945f3faba8 100644 (file)
@@ -296,7 +296,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                if (__this_cpu_read(soft_watchdog_warn) == true)
                        return HRTIMER_RESTART;
 
-               printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
+               printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
                        smp_processor_id(), duration,
                        current->comm, task_pid_nr(current));
                print_modules();
index 201e1b33d721bf2ff83e6cd5ca817ea243861c39..028aba9e72af98b402aeec059e0800681e1fad5a 100644 (file)
@@ -19,6 +19,9 @@ config RATIONAL
 config GENERIC_FIND_FIRST_BIT
        bool
 
+config NO_GENERIC_PCI_IOPORT_MAP
+       bool
+
 config GENERIC_PCI_IOMAP
        bool
 
@@ -279,6 +282,9 @@ config AVERAGE
 
          If unsure, say N.
 
+config CLZ_TAB
+       bool
+
 config CORDIC
        tristate "CORDIC algorithm"
        help
@@ -286,25 +292,25 @@ config CORDIC
          calculations are in fixed point. Module will be called cordic.
 
 config MPILIB
-       tristate "Multiprecision maths library"
+       tristate
+       select CLZ_TAB
        help
          Multiprecision maths library from GnuPG.
          It is used to implement RSA digital signature verification,
          which is used by IMA/EVM digital signature extension.
 
 config MPILIB_EXTRA
-       bool "Multiprecision maths library - additional sources"
+       bool
        depends on MPILIB
        help
-         Multiprecision maths library from GnuPG.
-         It is used to implement RSA digital signature verification,
-         which is used by IMA/EVM digital signature extension.
-         This code in unnecessary for RSA digital signature verification,
-         and can be compiled if needed.
+         Additional sources of multiprecision maths library from GnuPG.
+         This code is unnecessary for RSA digital signature verification,
+         but can be compiled if needed.
 
-config DIGSIG
-       tristate "In-kernel signature checker"
-       depends on KEYS
+config SIGNATURE
+       tristate
+       depends on KEYS && CRYPTO
+       select CRYPTO_SHA1
        select MPILIB
        help
          Digital signature verification. Currently only RSA is supported.
index dace162c7e1c2f063498ded701e5576f3c16b75a..18515f0267c41591a198a1df9b3c7950a56a16cf 100644 (file)
@@ -119,7 +119,9 @@ obj-$(CONFIG_CORDIC) += cordic.o
 obj-$(CONFIG_DQL) += dynamic_queue_limits.o
 
 obj-$(CONFIG_MPILIB) += mpi/
-obj-$(CONFIG_DIGSIG) += digsig.o
+obj-$(CONFIG_SIGNATURE) += digsig.o
+
+obj-$(CONFIG_CLZ_TAB) += clz_tab.o
 
 hostprogs-y    := gen_crc32table
 clean-files    := crc32table.h
index 19552096d16b06bd2dac2a9b10212e482a4d0da3..a28c1415357cac9d30fc09195652fbfd099bc421 100644 (file)
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -169,7 +169,7 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
                return BUG_TRAP_TYPE_WARN;
        }
 
-       printk(KERN_EMERG "------------[ cut here ]------------\n");
+       printk(KERN_DEFAULT "------------[ cut here ]------------\n");
 
        if (file)
                printk(KERN_CRIT "kernel BUG at %s:%u!\n",
diff --git a/lib/clz_tab.c b/lib/clz_tab.c
new file mode 100644 (file)
index 0000000..7287b4a
--- /dev/null
@@ -0,0 +1,18 @@
+const unsigned char __clz_tab[] = {
+       0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
+           5, 5, 5, 5, 5, 5, 5, 5,
+       6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+           6, 6, 6, 6, 6, 6, 6, 6,
+       7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+           7, 7, 7, 7, 7, 7, 7, 7,
+       7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+           7, 7, 7, 7, 7, 7, 7, 7,
+       8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+           8, 8, 8, 8, 8, 8, 8, 8,
+       8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+           8, 8, 8, 8, 8, 8, 8, 8,
+       8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+           8, 8, 8, 8, 8, 8, 8, 8,
+       8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
+           8, 8, 8, 8, 8, 8, 8, 8,
+};
index fd2402f67f89a0d740fc5cc1fbb4a27dbe420a39..286d558033e270524ff3fdeac9c96393b81170a6 100644 (file)
@@ -34,14 +34,9 @@ static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg,
                        unsigned long  msglen,
                        unsigned long  modulus_bitlen,
                        unsigned char *out,
-                       unsigned long *outlen,
-                       int *is_valid)
+                       unsigned long *outlen)
 {
        unsigned long modulus_len, ps_len, i;
-       int result;
-
-       /* default to invalid packet */
-       *is_valid = 0;
 
        modulus_len = (modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0);
 
@@ -50,39 +45,30 @@ static int pkcs_1_v1_5_decode_emsa(const unsigned char *msg,
                return -EINVAL;
 
        /* separate encoded message */
-       if ((msg[0] != 0x00) || (msg[1] != (unsigned char)1)) {
-               result = -EINVAL;
-               goto bail;
-       }
+       if ((msg[0] != 0x00) || (msg[1] != (unsigned char)1))
+               return -EINVAL;
 
        for (i = 2; i < modulus_len - 1; i++)
                if (msg[i] != 0xFF)
                        break;
 
        /* separator check */
-       if (msg[i] != 0) {
+       if (msg[i] != 0)
                /* There was no octet with hexadecimal value 0x00
                to separate ps from m. */
-               result = -EINVAL;
-               goto bail;
-       }
+               return -EINVAL;
 
        ps_len = i - 2;
 
        if (*outlen < (msglen - (2 + ps_len + 1))) {
                *outlen = msglen - (2 + ps_len + 1);
-               result = -EOVERFLOW;
-               goto bail;
+               return -EOVERFLOW;
        }
 
        *outlen = (msglen - (2 + ps_len + 1));
        memcpy(out, &msg[2 + ps_len + 1], *outlen);
 
-       /* valid packet */
-       *is_valid = 1;
-       result    = 0;
-bail:
-       return result;
+       return 0;
 }
 
 /*
@@ -96,7 +82,7 @@ static int digsig_verify_rsa(struct key *key,
        unsigned long len;
        unsigned long mlen, mblen;
        unsigned nret, l;
-       int valid, head, i;
+       int head, i;
        unsigned char *out1 = NULL, *out2 = NULL;
        MPI in = NULL, res = NULL, pkey[2];
        uint8_t *p, *datap, *endp;
@@ -105,6 +91,10 @@ static int digsig_verify_rsa(struct key *key,
 
        down_read(&key->sem);
        ukp = key->payload.data;
+
+       if (ukp->datalen < sizeof(*pkh))
+               goto err1;
+
        pkh = (struct pubkey_hdr *)ukp->data;
 
        if (pkh->version != 1)
@@ -117,18 +107,23 @@ static int digsig_verify_rsa(struct key *key,
                goto err1;
 
        datap = pkh->mpi;
-       endp = datap + ukp->datalen;
+       endp = ukp->data + ukp->datalen;
+
+       err = -ENOMEM;
 
        for (i = 0; i < pkh->nmpi; i++) {
                unsigned int remaining = endp - datap;
                pkey[i] = mpi_read_from_buffer(datap, &remaining);
+               if (!pkey[i])
+                       goto err;
                datap += remaining;
        }
 
        mblen = mpi_get_nbits(pkey[0]);
        mlen = (mblen + 7)/8;
 
-       err = -ENOMEM;
+       if (mlen == 0)
+               goto err;
 
        out1 = kzalloc(mlen, GFP_KERNEL);
        if (!out1)
@@ -167,10 +162,9 @@ static int digsig_verify_rsa(struct key *key,
        memset(out1, 0, head);
        memcpy(out1 + head, p, l);
 
-       err = -EINVAL;
-       pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len, &valid);
+       err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
 
-       if (valid && len == hlen)
+       if (!err && len == hlen)
                err = memcmp(out2, h, hlen);
 
 err:
@@ -178,8 +172,8 @@ err:
        mpi_free(res);
        kfree(out1);
        kfree(out2);
-       mpi_free(pkey[0]);
-       mpi_free(pkey[1]);
+       while (--i >= 0)
+               mpi_free(pkey[i]);
 err1:
        up_read(&key->sem);
 
index b87487b40a8b910d01c6c4f9395081e4d05f3d1a..29f98624ef93a706b4fa302972669c83f70bd49d 100644 (file)
@@ -1200,18 +1200,40 @@ do { \
        "r" ((USItype)(v)) \
        : "%g1", "%g2" __AND_CLOBBER_CC)
 #define UMUL_TIME 39           /* 39 instructions */
-#endif
-#ifndef udiv_qrnnd
-#ifndef LONGLONG_STANDALONE
+/* It's quite necessary to add this much assembler for the sparc.
+   The default udiv_qrnnd (in C) is more than 10 times slower!  */
 #define udiv_qrnnd(q, r, n1, n0, d) \
-do { USItype __r; \
-       (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
-       (r) = __r; \
-} while (0)
-       extern USItype __udiv_qrnnd();
-#define UDIV_TIME 140
-#endif /* LONGLONG_STANDALONE */
-#endif /* udiv_qrnnd */
+  __asm__ ("! Inlined udiv_qrnnd\n\t"                                  \
+          "mov 32,%%g1\n\t"                                            \
+          "subcc       %1,%2,%%g0\n\t"                                 \
+          "1:  bcs     5f\n\t"                                         \
+          "addxcc %0,%0,%0     ! shift n1n0 and a q-bit in lsb\n\t"    \
+          "sub %1,%2,%1        ! this kills msb of n\n\t"              \
+          "addx        %1,%1,%1        ! so this can't give carry\n\t" \
+          "subcc       %%g1,1,%%g1\n\t"                                \
+          "2:  bne     1b\n\t"                                         \
+          "subcc       %1,%2,%%g0\n\t"                                 \
+          "bcs 3f\n\t"                                                 \
+          "addxcc %0,%0,%0     ! shift n1n0 and a q-bit in lsb\n\t"    \
+          "b           3f\n\t"                                         \
+          "sub %1,%2,%1        ! this kills msb of n\n\t"              \
+          "4:  sub     %1,%2,%1\n\t"                                   \
+          "5:  addxcc  %1,%1,%1\n\t"                                   \
+          "bcc 2b\n\t"                                                 \
+          "subcc       %%g1,1,%%g1\n\t"                                \
+          "! Got carry from n.  Subtract next step to cancel this carry.\n\t" \
+          "bne 4b\n\t"                                                 \
+          "addcc       %0,%0,%0        ! shift n1n0 and a 0-bit in lsb\n\t" \
+          "sub %1,%2,%1\n\t"                                           \
+          "3:  xnor    %0,0,%0\n\t"                                    \
+          "! End of inline udiv_qrnnd\n"                               \
+          : "=&r" ((USItype)(q)),                                      \
+            "=&r" ((USItype)(r))                                       \
+          : "r" ((USItype)(d)),                                        \
+            "1" ((USItype)(n1)),                                       \
+            "0" ((USItype)(n0)) : "%g1", "cc")
+#define UDIV_TIME (3+7*32)      /* 7 instructions/iteration. 32 iterations.  */
+#endif
 #endif /* __sparc__ */
 
 /***************************************
index 854c9c6da025ef905178cd1b568ff0cbeb1609f2..2f526627e4f575c50468b51015270c35d2179721 100644 (file)
 #include "mpi-internal.h"
 #include "longlong.h"
 
-const unsigned char __clz_tab[] = {
-       0, 1, 2, 2, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5,
-           5, 5, 5, 5, 5, 5, 5, 5,
-       6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
-           6, 6, 6, 6, 6, 6, 6, 6,
-       7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-           7, 7, 7, 7, 7, 7, 7, 7,
-       7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
-           7, 7, 7, 7, 7, 7, 7, 7,
-       8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-           8, 8, 8, 8, 8, 8, 8, 8,
-       8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-           8, 8, 8, 8, 8, 8, 8, 8,
-       8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-           8, 8, 8, 8, 8, 8, 8, 8,
-       8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
-           8, 8, 8, 8, 8, 8, 8, 8,
-};
-
 #define A_LIMB_1 ((mpi_limb_t) 1)
 
 /****************
index c3087d1390ce5d1b669ce7f1326b4eaf079d2caa..f68cbbb4d4a4ec74246be1559e2beaa0c1ce5284 100644 (file)
@@ -149,6 +149,9 @@ int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
        mpi_ptr_t marker[5];
        int markidx = 0;
 
+       if (!dsize)
+               return -EINVAL;
+
        memset(marker, 0, sizeof(marker));
 
        /* Ensure space is enough for quotient and remainder.
@@ -207,6 +210,8 @@ int mpi_tdiv_qr(MPI quot, MPI rem, MPI num, MPI den)
                 * numerator would be gradually overwritten by the quotient limbs.  */
                if (qp == np) { /* Copy NP object to temporary space.  */
                        np = marker[markidx++] = mpi_alloc_limb_space(nsize);
+                       if (!np)
+                               goto nomem;
                        MPN_COPY(np, qp, nsize);
                }
        } else                  /* Put quotient at top of remainder. */
index b04a3cf8008083d8e54f861df465b1f831b98df3..67f3e79af9140e0160a4264b506b41344304e916 100644 (file)
@@ -59,7 +59,7 @@ int mpi_powm(MPI res, MPI base, MPI exp, MPI mod)
        ep = exp->d;
 
        if (!msize)
-               msize = 1 / msize;      /* provoke a signal */
+               return -EINVAL;
 
        if (!esize) {
                /* Exponent is zero, result is 1 mod MOD, i.e., 1 or 0
index fe84bb978e3b5c42befda57c206edd0b68d61684..f26b41fcb48c694c06fdcea18b55bdfd3788fb6d 100644 (file)
 
 #include "mpi-internal.h"
 
-#define DIM(v) (sizeof(v)/sizeof((v)[0]))
 #define MAX_EXTERN_MPI_BITS 16384
 
-static uint8_t asn[15] =       /* Object ID is 1.3.14.3.2.26 */
-{ 0x30, 0x21, 0x30, 0x09, 0x06, 0x05, 0x2b, 0x0e, 0x03,
-       0x02, 0x1a, 0x05, 0x00, 0x04, 0x14
-};
-
-MPI do_encode_md(const void *sha_buffer, unsigned nbits)
-{
-       int nframe = (nbits + 7) / 8;
-       uint8_t *frame, *fr_pt;
-       int i = 0, n;
-       size_t asnlen = DIM(asn);
-       MPI a = MPI_NULL;
-
-       if (SHA1_DIGEST_LENGTH + asnlen + 4 > nframe)
-               pr_info("MPI: can't encode a %d bit MD into a %d bits frame\n",
-                      (int)(SHA1_DIGEST_LENGTH * 8), (int)nbits);
-
-       /* We encode the MD in this way:
-        *
-        *       0  A PAD(n bytes)   0  ASN(asnlen bytes)  MD(len bytes)
-        *
-        * PAD consists of FF bytes.
-        */
-       frame = kmalloc(nframe, GFP_KERNEL);
-       if (!frame)
-               return MPI_NULL;
-       n = 0;
-       frame[n++] = 0;
-       frame[n++] = 1;         /* block type */
-       i = nframe - SHA1_DIGEST_LENGTH - asnlen - 3;
-
-       if (i <= 1) {
-               pr_info("MPI: message digest encoding failed\n");
-               kfree(frame);
-               return a;
-       }
-
-       memset(frame + n, 0xff, i);
-       n += i;
-       frame[n++] = 0;
-       memcpy(frame + n, &asn, asnlen);
-       n += asnlen;
-       memcpy(frame + n, sha_buffer, SHA1_DIGEST_LENGTH);
-       n += SHA1_DIGEST_LENGTH;
-
-       i = nframe;
-       fr_pt = frame;
-
-       if (n != nframe) {
-               printk
-                   ("MPI: message digest encoding failed, frame length is wrong\n");
-               kfree(frame);
-               return a;
-       }
-
-       a = mpi_alloc((nframe + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB);
-       mpi_set_buffer(a, frame, nframe, 0);
-       kfree(frame);
-
-       return a;
-}
-
 MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
 {
        const uint8_t *buffer = xbuffer;
        int i, j;
        unsigned nbits, nbytes, nlimbs, nread = 0;
        mpi_limb_t a;
-       MPI val = MPI_NULL;
+       MPI val = NULL;
 
        if (*ret_nread < 2)
                goto leave;
@@ -108,7 +45,7 @@ MPI mpi_read_from_buffer(const void *xbuffer, unsigned *ret_nread)
        nlimbs = (nbytes + BYTES_PER_MPI_LIMB - 1) / BYTES_PER_MPI_LIMB;
        val = mpi_alloc(nlimbs);
        if (!val)
-               return MPI_NULL;
+               return NULL;
        i = BYTES_PER_MPI_LIMB - nbytes % BYTES_PER_MPI_LIMB;
        i %= BYTES_PER_MPI_LIMB;
        val->nbits = nbits;
@@ -211,30 +148,6 @@ int mpi_fromstr(MPI val, const char *str)
 }
 EXPORT_SYMBOL_GPL(mpi_fromstr);
 
-/****************
- * Special function to get the low 8 bytes from an mpi.
- * This can be used as a keyid; KEYID is an 2 element array.
- * Return the low 4 bytes.
- */
-u32 mpi_get_keyid(const MPI a, u32 *keyid)
-{
-#if BYTES_PER_MPI_LIMB == 4
-       if (keyid) {
-               keyid[0] = a->nlimbs >= 2 ? a->d[1] : 0;
-               keyid[1] = a->nlimbs >= 1 ? a->d[0] : 0;
-       }
-       return a->nlimbs >= 1 ? a->d[0] : 0;
-#elif BYTES_PER_MPI_LIMB == 8
-       if (keyid) {
-               keyid[0] = a->nlimbs ? (u32) (a->d[0] >> 32) : 0;
-               keyid[1] = a->nlimbs ? (u32) (a->d[0] & 0xffffffff) : 0;
-       }
-       return a->nlimbs ? (u32) (a->d[0] & 0xffffffff) : 0;
-#else
-#error Make this function work with other LIMB sizes
-#endif
-}
-
 /****************
  * Return an allocated buffer with the MPI (msb first).
  * NBYTES receives the length of this buffer. Caller must free the
@@ -255,6 +168,8 @@ void *mpi_get_buffer(MPI a, unsigned *nbytes, int *sign)
        if (!n)
                n++;            /* avoid zero length allocation */
        p = buffer = kmalloc(n, GFP_KERNEL);
+       if (!p)
+               return NULL;
 
        for (i = a->nlimbs - 1; i >= 0; i--) {
                alimb = a->d[i];
index 87ede162dfabee78a4cad86e850fdefa06f622f8..cde1aaec18da9d179853724b3928c5be8a4508d6 100644 (file)
@@ -217,6 +217,10 @@ mpihelp_divrem(mpi_ptr_t qp, mpi_size_t qextra_limbs,
        case 0:
                /* We are asked to divide by zero, so go ahead and do it!  (To make
                   the compiler not remove this statement, return the value.)  */
+               /*
+                * existing clients of this function have been modified
+                * not to call it with dsize == 0, so this should not happen
+                */
                return 1 / dsize;
 
        case 1:
index eefc55d6b7f5b8a732992c070d5bca4bac65c374..26e4ed31e256f793d4bdc21ea81f7f6a8ca4bf71 100644 (file)
@@ -58,6 +58,9 @@ mpi_ptr_t mpi_alloc_limb_space(unsigned nlimbs)
 {
        size_t len = nlimbs * sizeof(mpi_limb_t);
 
+       if (!len)
+               return NULL;
+
        return kmalloc(len, GFP_KERNEL);
 }
 
@@ -135,7 +138,7 @@ int mpi_copy(MPI *copied, const MPI a)
        size_t i;
        MPI b;
 
-       *copied = MPI_NULL;
+       *copied = NULL;
 
        if (a) {
                b = mpi_alloc(a->nlimbs);
index 4b0fdc22e688d3869c9e62005af933f8dcdd096a..0d83ea8a9605429aa5e79262bef539e51bfec456 100644 (file)
@@ -34,7 +34,7 @@ void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
        if (maxlen && len > maxlen)
                len = maxlen;
        if (flags & IORESOURCE_IO)
-               return ioport_map(start, len);
+               return __pci_ioport_map(dev, start, len);
        if (flags & IORESOURCE_MEM) {
                if (flags & IORESOURCE_CACHEABLE)
                        return ioremap(start, len);
index 71a58f67f4817720a4eca34678b2353fa96863e6..d9ebebe1a2aaaea69074fe3615faeddd41a4feb7 100644 (file)
@@ -313,12 +313,34 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
                } else if (!locked)
                        spin_lock_irq(&zone->lru_lock);
 
+               /*
+                * migrate_pfn does not necessarily start aligned to a
+                * pageblock. Ensure that pfn_valid is called when moving
+                * into a new MAX_ORDER_NR_PAGES range in case of large
+                * memory holes within the zone
+                */
+               if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
+                       if (!pfn_valid(low_pfn)) {
+                               low_pfn += MAX_ORDER_NR_PAGES - 1;
+                               continue;
+                       }
+               }
+
                if (!pfn_valid_within(low_pfn))
                        continue;
                nr_scanned++;
 
-               /* Get the page and skip if free */
+               /*
+                * Get the page and ensure the page is within the same zone.
+                * See the comment in isolate_freepages about overlapping
+                * nodes. It is deliberate that the new zone lock is not taken
+                * as memory compaction should not move pages between nodes.
+                */
                page = pfn_to_page(low_pfn);
+               if (page_zone(page) != zone)
+                       continue;
+
+               /* Skip if free */
                if (PageBuddy(page))
                        continue;
 
index 97f49ed35bd24fba29677857433814103949dd14..b66275757c281b39e1d7cc0fecf6465bea5b4257 100644 (file)
@@ -1400,15 +1400,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
        unsigned long seg = 0;
        size_t count;
        loff_t *ppos = &iocb->ki_pos;
-       struct blk_plug plug;
 
        count = 0;
        retval = generic_segment_checks(iov, &nr_segs, &count, VERIFY_WRITE);
        if (retval)
                return retval;
 
-       blk_start_plug(&plug);
-
        /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
        if (filp->f_flags & O_DIRECT) {
                loff_t size;
@@ -1424,8 +1421,12 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                        retval = filemap_write_and_wait_range(mapping, pos,
                                        pos + iov_length(iov, nr_segs) - 1);
                        if (!retval) {
+                               struct blk_plug plug;
+
+                               blk_start_plug(&plug);
                                retval = mapping->a_ops->direct_IO(READ, iocb,
                                                        iov, pos, nr_segs);
+                               blk_finish_plug(&plug);
                        }
                        if (retval > 0) {
                                *ppos = pos + retval;
@@ -1481,7 +1482,6 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
                        break;
        }
 out:
-       blk_finish_plug(&plug);
        return retval;
 }
 EXPORT_SYMBOL(generic_file_aio_read);
index f91b2f687343315741c9519670a96435329390f1..a4eb3113222912c9aada14bd92c6b68d01577b73 100644 (file)
@@ -263,7 +263,12 @@ found:
                                                        xip_pfn);
                if (err == -ENOMEM)
                        return VM_FAULT_OOM;
-               BUG_ON(err);
+               /*
+                * err == -EBUSY is fine, we've raced against another thread
+                * that faulted-in the same page
+                */
+               if (err != -EBUSY)
+                       BUG_ON(err);
                return VM_FAULT_NOPAGE;
        } else {
                int err, ret = VM_FAULT_OOM;
index b3ffc21ce8010b9de9c92004e2a364326af8dc1b..91d3efb25d15472332e1f2d9c382a5732629ce69 100644 (file)
@@ -2083,7 +2083,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
 {
        struct mm_struct *mm = mm_slot->mm;
 
-       VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+       VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
 
        if (khugepaged_test_exit(mm)) {
                /* free mm_slot */
@@ -2113,7 +2113,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
        int progress = 0;
 
        VM_BUG_ON(!pages);
-       VM_BUG_ON(!spin_is_locked(&khugepaged_mm_lock));
+       VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
 
        if (khugepaged_scan.mm_slot)
                mm_slot = khugepaged_scan.mm_slot;
index ea8c3a4cd2ae8acdf52a7a4e862e277f2390c265..5f34bd8dda34bbc8224303ad080103f224fa5ad6 100644 (file)
@@ -2508,6 +2508,7 @@ static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
 {
        struct hstate *h = hstate_vma(vma);
        int ret = VM_FAULT_SIGBUS;
+       int anon_rmap = 0;
        pgoff_t idx;
        unsigned long size;
        struct page *page;
@@ -2562,14 +2563,13 @@ retry:
                        spin_lock(&inode->i_lock);
                        inode->i_blocks += blocks_per_huge_page(h);
                        spin_unlock(&inode->i_lock);
-                       page_dup_rmap(page);
                } else {
                        lock_page(page);
                        if (unlikely(anon_vma_prepare(vma))) {
                                ret = VM_FAULT_OOM;
                                goto backout_unlocked;
                        }
-                       hugepage_add_new_anon_rmap(page, vma, address);
+                       anon_rmap = 1;
                }
        } else {
                /*
@@ -2582,7 +2582,6 @@ retry:
                              VM_FAULT_SET_HINDEX(h - hstates);
                        goto backout_unlocked;
                }
-               page_dup_rmap(page);
        }
 
        /*
@@ -2606,6 +2605,10 @@ retry:
        if (!huge_pte_none(huge_ptep_get(ptep)))
                goto backout;
 
+       if (anon_rmap)
+               hugepage_add_new_anon_rmap(page, vma, address);
+       else
+               page_dup_rmap(page);
        new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
                                && (vma->vm_flags & VM_SHARED)));
        set_huge_pte_at(mm, address, ptep, new_pte);
index c833addd94d74703a90e9bed51d759726e77f280..45eb6217bf38e764bfb65d90f29ae2bb46f77886 100644 (file)
@@ -1036,7 +1036,7 @@ void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
 {
        pr_debug("%s(0x%p)\n", __func__, ptr);
 
-       if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
+       if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
                add_scan_area((unsigned long)ptr, size, gfp);
        else if (atomic_read(&kmemleak_early_log))
                log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
@@ -1757,6 +1757,7 @@ void __init kmemleak_init(void)
 
 #ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
        if (!kmemleak_skip_disable) {
+               atomic_set(&kmemleak_early_log, 0);
                kmemleak_disable();
                return;
        }
index 2f55f19b7c86517bb5b23680c1b062f7efe2795a..77b5f227e1d86d9228ae6a67f52f18418a2beb98 100644 (file)
@@ -106,14 +106,17 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t start,
        if (end == MEMBLOCK_ALLOC_ACCESSIBLE)
                end = memblock.current_limit;
 
-       /* adjust @start to avoid underflow and allocating the first page */
-       start = max3(start, size, (phys_addr_t)PAGE_SIZE);
+       /* avoid allocating the first page */
+       start = max_t(phys_addr_t, start, PAGE_SIZE);
        end = max(start, end);
 
        for_each_free_mem_range_reverse(i, nid, &this_start, &this_end, NULL) {
                this_start = clamp(this_start, start, end);
                this_end = clamp(this_end, start, end);
 
+               if (this_end < size)
+                       continue;
+
                cand = round_down(this_end - size, align);
                if (cand >= this_start)
                        return cand;
index 602207be985379f7e15e94ea546b8d70b0b2a7b9..6728a7ae6f2d1eb42c8ac183ec69098aa80d2d6c 100644 (file)
@@ -373,14 +373,13 @@ static void mem_cgroup_put(struct mem_cgroup *memcg);
 
 /* Writing them here to avoid exposing memcg's inner layout */
 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
-#ifdef CONFIG_INET
 #include <net/sock.h>
 #include <net/ip.h>
 
 static bool mem_cgroup_is_root(struct mem_cgroup *memcg);
 void sock_update_memcg(struct sock *sk)
 {
-       if (static_branch(&memcg_socket_limit_enabled)) {
+       if (mem_cgroup_sockets_enabled) {
                struct mem_cgroup *memcg;
 
                BUG_ON(!sk->sk_prot->proto_cgroup);
@@ -412,7 +411,7 @@ EXPORT_SYMBOL(sock_update_memcg);
 
 void sock_release_memcg(struct sock *sk)
 {
-       if (static_branch(&memcg_socket_limit_enabled) && sk->sk_cgrp) {
+       if (mem_cgroup_sockets_enabled && sk->sk_cgrp) {
                struct mem_cgroup *memcg;
                WARN_ON(!sk->sk_cgrp->memcg);
                memcg = sk->sk_cgrp->memcg;
@@ -420,6 +419,7 @@ void sock_release_memcg(struct sock *sk)
        }
 }
 
+#ifdef CONFIG_INET
 struct cg_proto *tcp_proto_cgroup(struct mem_cgroup *memcg)
 {
        if (!memcg || mem_cgroup_is_root(memcg))
@@ -776,7 +776,8 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
        /* threshold event is triggered in finer grain than soft limit */
        if (unlikely(mem_cgroup_event_ratelimit(memcg,
                                                MEM_CGROUP_TARGET_THRESH))) {
-               bool do_softlimit, do_numainfo;
+               bool do_softlimit;
+               bool do_numainfo __maybe_unused;
 
                do_softlimit = mem_cgroup_event_ratelimit(memcg,
                                                MEM_CGROUP_TARGET_SOFTLIMIT);
@@ -3247,7 +3248,7 @@ int mem_cgroup_prepare_migration(struct page *page,
                ctype = MEM_CGROUP_CHARGE_TYPE_CACHE;
        else
                ctype = MEM_CGROUP_CHARGE_TYPE_SHMEM;
-       __mem_cgroup_commit_charge(memcg, page, 1, pc, ctype);
+       __mem_cgroup_commit_charge(memcg, newpage, 1, pc, ctype);
        return ret;
 }
 
index 5e30583c2605d7b791d907ae8a5e07be2d9ad5b6..fa2f04e0337c437e739e75a683ff5b80a070a95a 100644 (file)
@@ -878,15 +878,24 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
                        }
                        if (likely(!non_swap_entry(entry)))
                                rss[MM_SWAPENTS]++;
-                       else if (is_write_migration_entry(entry) &&
-                                       is_cow_mapping(vm_flags)) {
-                               /*
-                                * COW mappings require pages in both parent
-                                * and child to be set to read.
-                                */
-                               make_migration_entry_read(&entry);
-                               pte = swp_entry_to_pte(entry);
-                               set_pte_at(src_mm, addr, src_pte, pte);
+                       else if (is_migration_entry(entry)) {
+                               page = migration_entry_to_page(entry);
+
+                               if (PageAnon(page))
+                                       rss[MM_ANONPAGES]++;
+                               else
+                                       rss[MM_FILEPAGES]++;
+
+                               if (is_write_migration_entry(entry) &&
+                                   is_cow_mapping(vm_flags)) {
+                                       /*
+                                        * COW mappings require pages in both
+                                        * parent and child to be set to read.
+                                        */
+                                       make_migration_entry_read(&entry);
+                                       pte = swp_entry_to_pte(entry);
+                                       set_pte_at(src_mm, addr, src_pte, pte);
+                               }
                        }
                }
                goto out_set_pte;
@@ -1191,6 +1200,16 @@ again:
 
                        if (!non_swap_entry(entry))
                                rss[MM_SWAPENTS]--;
+                       else if (is_migration_entry(entry)) {
+                               struct page *page;
+
+                               page = migration_entry_to_page(entry);
+
+                               if (PageAnon(page))
+                                       rss[MM_ANONPAGES]--;
+                               else
+                                       rss[MM_FILEPAGES]--;
+                       }
                        if (unlikely(!free_swap_and_cache(entry)))
                                print_bad_pte(vma, addr, ptent, NULL);
                }
index 9871a56d82c30b1390ea268fddd9b3bc4b83c8da..df141f60289eef61c7d278080e8290c3dfc717b9 100644 (file)
@@ -445,7 +445,6 @@ void migrate_page_copy(struct page *newpage, struct page *page)
        ClearPageSwapCache(page);
        ClearPagePrivate(page);
        set_page_private(page, 0);
-       page->mapping = NULL;
 
        /*
         * If any waiters have accumulated on the new page then
@@ -667,6 +666,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
        } else {
                if (remap_swapcache)
                        remove_migration_ptes(page, newpage);
+               page->mapping = NULL;
        }
 
        unlock_page(newpage);
index 0027d8f4a1bb8b1423f42e4d52cb3547debc315f..d2186ecb36f7cdfd5fdc75da4bbd10ef798bcbba 100644 (file)
@@ -5413,7 +5413,25 @@ __count_immobile_pages(struct zone *zone, struct page *page, int count)
 
 bool is_pageblock_removable_nolock(struct page *page)
 {
-       struct zone *zone = page_zone(page);
+       struct zone *zone;
+       unsigned long pfn;
+
+       /*
+        * We have to be careful here because we are iterating over memory
+        * sections which are not zone aware so we might end up outside of
+        * the zone but still within the section.
+        * We have to take care about the node as well. If the node is offline
+        * its NODE_DATA will be NULL - see page_zone.
+        */
+       if (!node_online(page_to_nid(page)))
+               return false;
+
+       zone = page_zone(page);
+       pfn = page_to_pfn(page);
+       if (zone->zone_start_pfn > pfn ||
+                       zone->zone_start_pfn + zone->spanned_pages <= pfn)
+               return false;
+
        return __count_immobile_pages(zone, page, 0);
 }
 
index e920aa3ce104c35b902e8412864c78102fa3177e..c20ff48994c29050953c79fcdb0633e690bb653e 100644 (file)
@@ -298,23 +298,18 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
                goto free_proc_pages;
        }
 
-       task_lock(task);
-       if (__ptrace_may_access(task, PTRACE_MODE_ATTACH)) {
-               task_unlock(task);
-               rc = -EPERM;
-               goto put_task_struct;
-       }
-       mm = task->mm;
-
-       if (!mm || (task->flags & PF_KTHREAD)) {
-               task_unlock(task);
-               rc = -EINVAL;
+       mm = mm_access(task, PTRACE_MODE_ATTACH);
+       if (!mm || IS_ERR(mm)) {
+               rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
+               /*
+                * Explicitly map EACCES to EPERM as EPERM is a more a
+                * appropriate error code for process_vw_readv/writev
+                */
+               if (rc == -EACCES)
+                       rc = -EPERM;
                goto put_task_struct;
        }
 
-       atomic_inc(&mm->mm_users);
-       task_unlock(task);
-
        for (i = 0; i < riovcnt && iov_l_curr_idx < liovcnt; i++) {
                rc = process_vm_rw_single_vec(
                        (unsigned long)rvec[i].iov_base, rvec[i].iov_len,
index feead1943d927f6c2660791bab30a370c3f8027c..269d049294abca49b518f158ef7e65fd7cb2547e 100644 (file)
@@ -379,7 +379,7 @@ static int shmem_free_swap(struct address_space *mapping,
 /*
  * Pagevec may contain swap entries, so shuffle up pages before releasing.
  */
-static void shmem_pagevec_release(struct pagevec *pvec)
+static void shmem_deswap_pagevec(struct pagevec *pvec)
 {
        int i, j;
 
@@ -389,7 +389,36 @@ static void shmem_pagevec_release(struct pagevec *pvec)
                        pvec->pages[j++] = page;
        }
        pvec->nr = j;
-       pagevec_release(pvec);
+}
+
+/*
+ * SysV IPC SHM_UNLOCK restore Unevictable pages to their evictable lists.
+ */
+void shmem_unlock_mapping(struct address_space *mapping)
+{
+       struct pagevec pvec;
+       pgoff_t indices[PAGEVEC_SIZE];
+       pgoff_t index = 0;
+
+       pagevec_init(&pvec, 0);
+       /*
+        * Minor point, but we might as well stop if someone else SHM_LOCKs it.
+        */
+       while (!mapping_unevictable(mapping)) {
+               /*
+                * Avoid pagevec_lookup(): find_get_pages() returns 0 as if it
+                * has finished, if it hits a row of PAGEVEC_SIZE swap entries.
+                */
+               pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
+                                       PAGEVEC_SIZE, pvec.pages, indices);
+               if (!pvec.nr)
+                       break;
+               index = indices[pvec.nr - 1] + 1;
+               shmem_deswap_pagevec(&pvec);
+               check_move_unevictable_pages(pvec.pages, pvec.nr);
+               pagevec_release(&pvec);
+               cond_resched();
+       }
 }
 
 /*
@@ -440,7 +469,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        }
                        unlock_page(page);
                }
-               shmem_pagevec_release(&pvec);
+               shmem_deswap_pagevec(&pvec);
+               pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                cond_resched();
                index++;
@@ -470,7 +500,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        continue;
                }
                if (index == start && indices[0] > end) {
-                       shmem_pagevec_release(&pvec);
+                       shmem_deswap_pagevec(&pvec);
+                       pagevec_release(&pvec);
                        break;
                }
                mem_cgroup_uncharge_start();
@@ -494,7 +525,8 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
                        }
                        unlock_page(page);
                }
-               shmem_pagevec_release(&pvec);
+               shmem_deswap_pagevec(&pvec);
+               pagevec_release(&pvec);
                mem_cgroup_uncharge_end();
                index++;
        }
@@ -1068,13 +1100,6 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
                user_shm_unlock(inode->i_size, user);
                info->flags &= ~VM_LOCKED;
                mapping_clear_unevictable(file->f_mapping);
-               /*
-                * Ensure that a racing putback_lru_page() can see
-                * the pages of this mapping are evictable when we
-                * skip them due to !PageLRU during the scan.
-                */
-               smp_mb__after_clear_bit();
-               scan_mapping_unevictable_pages(file->f_mapping);
        }
        retval = 0;
 
@@ -2445,6 +2470,10 @@ int shmem_lock(struct file *file, int lock, struct user_struct *user)
        return 0;
 }
 
+void shmem_unlock_mapping(struct address_space *mapping)
+{
+}
+
 void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
 {
        truncate_inode_pages_range(inode->i_mapping, lstart, lend);
index b0f529b38979447d0ec192f8b6434af5bab28e02..fff1ff7fb9ada36fca1be10d33dec593486f50a6 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -659,7 +659,7 @@ void lru_add_page_tail(struct zone* zone,
        VM_BUG_ON(!PageHead(page));
        VM_BUG_ON(PageCompound(page_tail));
        VM_BUG_ON(PageLRU(page_tail));
-       VM_BUG_ON(!spin_is_locked(&zone->lru_lock));
+       VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock));
 
        SetPageLRU(page_tail);
 
index 2880396f7953b03476db86957c66a3787b40cb8c..c52b23552659af5bce0038dd6fa7ac10d044be40 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/buffer_head.h> /* for try_to_release_page(),
                                        buffer_heads_over_limit */
 #include <linux/mm_inline.h>
-#include <linux/pagevec.h>
 #include <linux/backing-dev.h>
 #include <linux/rmap.h>
 #include <linux/topology.h>
@@ -661,7 +660,7 @@ redo:
                 * When racing with an mlock or AS_UNEVICTABLE clearing
                 * (page is unlocked) make sure that if the other thread
                 * does not observe our setting of PG_lru and fails
-                * isolation/check_move_unevictable_page,
+                * isolation/check_move_unevictable_pages,
                 * we see PG_mlocked/AS_UNEVICTABLE cleared below and move
                 * the page back to the evictable list.
                 *
@@ -3499,100 +3498,61 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
        return 1;
 }
 
+#ifdef CONFIG_SHMEM
 /**
- * check_move_unevictable_page - check page for evictability and move to appropriate zone lru list
- * @page: page to check evictability and move to appropriate lru list
- * @zone: zone page is in
+ * check_move_unevictable_pages - check pages for evictability and move to appropriate zone lru list
+ * @pages:     array of pages to check
+ * @nr_pages:  number of pages to check
  *
- * Checks a page for evictability and moves the page to the appropriate
- * zone lru list.
+ * Checks pages for evictability and moves them to the appropriate lru list.
  *
- * Restrictions: zone->lru_lock must be held, page must be on LRU and must
- * have PageUnevictable set.
+ * This function is only used for SysV IPC SHM_UNLOCK.
  */
-static void check_move_unevictable_page(struct page *page, struct zone *zone)
+void check_move_unevictable_pages(struct page **pages, int nr_pages)
 {
        struct lruvec *lruvec;
+       struct zone *zone = NULL;
+       int pgscanned = 0;
+       int pgrescued = 0;
+       int i;
 
-       VM_BUG_ON(PageActive(page));
-retry:
-       ClearPageUnevictable(page);
-       if (page_evictable(page, NULL)) {
-               enum lru_list l = page_lru_base_type(page);
-
-               __dec_zone_state(zone, NR_UNEVICTABLE);
-               lruvec = mem_cgroup_lru_move_lists(zone, page,
-                                                  LRU_UNEVICTABLE, l);
-               list_move(&page->lru, &lruvec->lists[l]);
-               __inc_zone_state(zone, NR_INACTIVE_ANON + l);
-               __count_vm_event(UNEVICTABLE_PGRESCUED);
-       } else {
-               /*
-                * rotate unevictable list
-                */
-               SetPageUnevictable(page);
-               lruvec = mem_cgroup_lru_move_lists(zone, page, LRU_UNEVICTABLE,
-                                                  LRU_UNEVICTABLE);
-               list_move(&page->lru, &lruvec->lists[LRU_UNEVICTABLE]);
-               if (page_evictable(page, NULL))
-                       goto retry;
-       }
-}
-
-/**
- * scan_mapping_unevictable_pages - scan an address space for evictable pages
- * @mapping: struct address_space to scan for evictable pages
- *
- * Scan all pages in mapping.  Check unevictable pages for
- * evictability and move them to the appropriate zone lru list.
- */
-void scan_mapping_unevictable_pages(struct address_space *mapping)
-{
-       pgoff_t next = 0;
-       pgoff_t end   = (i_size_read(mapping->host) + PAGE_CACHE_SIZE - 1) >>
-                        PAGE_CACHE_SHIFT;
-       struct zone *zone;
-       struct pagevec pvec;
-
-       if (mapping->nrpages == 0)
-               return;
-
-       pagevec_init(&pvec, 0);
-       while (next < end &&
-               pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
-               int i;
-               int pg_scanned = 0;
-
-               zone = NULL;
-
-               for (i = 0; i < pagevec_count(&pvec); i++) {
-                       struct page *page = pvec.pages[i];
-                       pgoff_t page_index = page->index;
-                       struct zone *pagezone = page_zone(page);
+       for (i = 0; i < nr_pages; i++) {
+               struct page *page = pages[i];
+               struct zone *pagezone;
 
-                       pg_scanned++;
-                       if (page_index > next)
-                               next = page_index;
-                       next++;
+               pgscanned++;
+               pagezone = page_zone(page);
+               if (pagezone != zone) {
+                       if (zone)
+                               spin_unlock_irq(&zone->lru_lock);
+                       zone = pagezone;
+                       spin_lock_irq(&zone->lru_lock);
+               }
 
-                       if (pagezone != zone) {
-                               if (zone)
-                                       spin_unlock_irq(&zone->lru_lock);
-                               zone = pagezone;
-                               spin_lock_irq(&zone->lru_lock);
-                       }
+               if (!PageLRU(page) || !PageUnevictable(page))
+                       continue;
 
-                       if (PageLRU(page) && PageUnevictable(page))
-                               check_move_unevictable_page(page, zone);
+               if (page_evictable(page, NULL)) {
+                       enum lru_list lru = page_lru_base_type(page);
+
+                       VM_BUG_ON(PageActive(page));
+                       ClearPageUnevictable(page);
+                       __dec_zone_state(zone, NR_UNEVICTABLE);
+                       lruvec = mem_cgroup_lru_move_lists(zone, page,
+                                               LRU_UNEVICTABLE, lru);
+                       list_move(&page->lru, &lruvec->lists[lru]);
+                       __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
+                       pgrescued++;
                }
-               if (zone)
-                       spin_unlock_irq(&zone->lru_lock);
-               pagevec_release(&pvec);
-
-               count_vm_events(UNEVICTABLE_PGSCANNED, pg_scanned);
        }
 
+       if (zone) {
+               __count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
+               __count_vm_events(UNEVICTABLE_PGSCANNED, pgscanned);
+               spin_unlock_irq(&zone->lru_lock);
+       }
 }
+#endif /* CONFIG_SHMEM */
 
 static void warn_scan_unevictable_pages(void)
 {
index 845da3ee56a0d3966bdeb5ab627ef3dc49c8dead..9de93714213a1be51542af9f8ff94cc6f3ac9fea 100644 (file)
@@ -55,7 +55,7 @@
 
 #define AUTO_OFF_TIMEOUT 2000
 
-int enable_hs;
+bool enable_hs;
 
 static void hci_rx_work(struct work_struct *work);
 static void hci_cmd_work(struct work_struct *work);
index f963f6b1884fd96dc12a96743a2f04669194cb3a..5ba0c844d508cbe549788e2219b4dd9ab1383149 100644 (file)
@@ -146,7 +146,7 @@ void br_fdb_cleanup(unsigned long _data)
        unsigned long next_timer = jiffies + br->ageing_time;
        int i;
 
-       spin_lock_bh(&br->hash_lock);
+       spin_lock(&br->hash_lock);
        for (i = 0; i < BR_HASH_SIZE; i++) {
                struct net_bridge_fdb_entry *f;
                struct hlist_node *h, *n;
@@ -162,7 +162,7 @@ void br_fdb_cleanup(unsigned long _data)
                                next_timer = this_timer;
                }
        }
-       spin_unlock_bh(&br->hash_lock);
+       spin_unlock(&br->hash_lock);
 
        mod_timer(&br->gc_timer, round_jiffies_up(next_timer));
 }
index 61570ee76fe6eca7824431663917a32827c90c9b..82c57069415fe6644b1c64eff665846b87c63f7a 100644 (file)
@@ -59,8 +59,6 @@ struct cfcnfg *get_cfcnfg(struct net *net)
 {
        struct caif_net *caifn;
        caifn = net_generic(net, caif_net_id);
-       if (!caifn)
-               return NULL;
        return caifn->cfg;
 }
 EXPORT_SYMBOL(get_cfcnfg);
@@ -69,8 +67,6 @@ static struct caif_device_entry_list *caif_device_list(struct net *net)
 {
        struct caif_net *caifn;
        caifn = net_generic(net, caif_net_id);
-       if (!caifn)
-               return NULL;
        return &caifn->caifdevs;
 }
 
@@ -99,8 +95,6 @@ static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
        struct caif_device_entry *caifd;
 
        caifdevs = caif_device_list(dev_net(dev));
-       if (!caifdevs)
-               return NULL;
 
        caifd = kzalloc(sizeof(*caifd), GFP_KERNEL);
        if (!caifd)
@@ -120,8 +114,6 @@ static struct caif_device_entry *caif_get(struct net_device *dev)
        struct caif_device_entry_list *caifdevs =
            caif_device_list(dev_net(dev));
        struct caif_device_entry *caifd;
-       if (!caifdevs)
-               return NULL;
 
        list_for_each_entry_rcu(caifd, &caifdevs->list, list) {
                if (caifd->netdev == dev)
@@ -146,15 +138,17 @@ void caif_flow_cb(struct sk_buff *skb)
        spin_lock_bh(&caifd->flow_lock);
        send_xoff = caifd->xoff;
        caifd->xoff = 0;
-       if (!WARN_ON(caifd->xoff_skb_dtor == NULL)) {
-               WARN_ON(caifd->xoff_skb != skb);
-               dtor = caifd->xoff_skb_dtor;
-               caifd->xoff_skb = NULL;
-               caifd->xoff_skb_dtor = NULL;
-       }
+       dtor = caifd->xoff_skb_dtor;
+
+       if (WARN_ON(caifd->xoff_skb != skb))
+               skb = NULL;
+
+       caifd->xoff_skb = NULL;
+       caifd->xoff_skb_dtor = NULL;
+
        spin_unlock_bh(&caifd->flow_lock);
 
-       if (dtor)
+       if (dtor && skb)
                dtor(skb);
 
        if (send_xoff)
@@ -319,8 +313,6 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
        struct caif_device_entry_list *caifdevs;
 
        caifdevs = caif_device_list(dev_net(dev));
-       if (!cfg || !caifdevs)
-               return;
        caifd = caif_device_alloc(dev);
        if (!caifd)
                return;
@@ -372,8 +364,6 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
 
        cfg = get_cfcnfg(dev_net(dev));
        caifdevs = caif_device_list(dev_net(dev));
-       if (!cfg || !caifdevs)
-               return 0;
 
        caifd = caif_get(dev);
        if (caifd == NULL && dev->type != ARPHRD_CAIF)
@@ -505,9 +495,6 @@ static struct notifier_block caif_device_notifier = {
 static int caif_init_net(struct net *net)
 {
        struct caif_net *caifn = net_generic(net, caif_net_id);
-       if (WARN_ON(!caifn))
-               return -EINVAL;
-
        INIT_LIST_HEAD(&caifn->caifdevs.list);
        mutex_init(&caifn->caifdevs.lock);
 
@@ -525,9 +512,6 @@ static void caif_exit_net(struct net *net)
            caif_device_list(net);
        struct cfcnfg *cfg =  get_cfcnfg(net);
 
-       if (!cfg || !caifdevs)
-               return;
-
        rtnl_lock();
        mutex_lock(&caifdevs->lock);
 
@@ -567,7 +551,7 @@ static int __init caif_device_init(void)
 {
        int result;
 
-       result = register_pernet_device(&caif_net_ops);
+       result = register_pernet_subsys(&caif_net_ops);
 
        if (result)
                return result;
@@ -580,7 +564,7 @@ static int __init caif_device_init(void)
 
 static void __exit caif_device_exit(void)
 {
-       unregister_pernet_device(&caif_net_ops);
+       unregister_pernet_subsys(&caif_net_ops);
        unregister_netdevice_notifier(&caif_device_notifier);
        dev_remove_pack(&caif_packet_type);
 }
index 5fc9eca8cd4149b2cc102ea32b0ae131db4e8452..fd7cbf5aa8956732f51637eeab3f512e00743fcb 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/module.h>
 #include <linux/netdevice.h>
 #include <linux/slab.h>
-#include <linux/netdevice.h>
 #include <linux/mii.h>
 #include <linux/usb.h>
 #include <linux/usb/usbnet.h>
@@ -27,7 +26,7 @@ MODULE_LICENSE("GPL");
 #define CFUSB_ALIGNMENT 4      /* Number of bytes to align. */
 #define CFUSB_MAX_HEADLEN (CFUSB_PAD_DESCR_SZ + CFUSB_ALIGNMENT-1)
 #define STE_USB_VID 0x04cc     /* USB Product ID for ST-Ericsson */
-#define STE_USB_PID_CAIF 0x2306        /* Product id for CAIF Modems */
+#define STE_USB_PID_CAIF 0x230f        /* Product id for CAIF Modems */
 
 struct cfusbl {
        struct cflayer layer;
index 598aafb4cb5169e799148ffd6b33d351b702be7d..ba9cfd47778aa808ff35939458f8c59a52e71a56 100644 (file)
@@ -309,7 +309,6 @@ int caif_connect_client(struct net *net, struct caif_connect_request *conn_req,
        int err;
        struct cfctrl_link_param param;
        struct cfcnfg *cfg = get_cfcnfg(net);
-       caif_assert(cfg != NULL);
 
        rcu_read_lock();
        err = caif_connect_req_to_link_param(cfg, conn_req, &param);
index 97f70e50ad3bb4ed806534fbc74d8a5230000acf..761ad9d6cc3b12fc4d6d8c10022e4c877559f5c8 100644 (file)
@@ -85,8 +85,6 @@ int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid)
        } else {
                pr_info("client%lld fsid %pU\n", ceph_client_id(client), fsid);
                memcpy(&client->fsid, fsid, sizeof(*fsid));
-               ceph_debugfs_client_init(client);
-               client->have_fsid = true;
        }
        return 0;
 }
index 0b62deae42bd88930f408e0473f13c11b0c252bd..1845cde2622740162fdb5e42a7663072d072ebf8 100644 (file)
@@ -8,8 +8,8 @@
 
 #include <linux/ceph/mon_client.h>
 #include <linux/ceph/libceph.h>
+#include <linux/ceph/debugfs.h>
 #include <linux/ceph/decode.h>
-
 #include <linux/ceph/auth.h>
 
 /*
@@ -340,8 +340,19 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
        client->monc.monmap = monmap;
        kfree(old);
 
+       if (!client->have_fsid) {
+               client->have_fsid = true;
+               mutex_unlock(&monc->mutex);
+               /*
+                * do debugfs initialization without mutex to avoid
+                * creating a locking dependency
+                */
+               ceph_debugfs_client_init(client);
+               goto out_unlocked;
+       }
 out:
        mutex_unlock(&monc->mutex);
+out_unlocked:
        wake_up_all(&client->auth_wq);
 }
 
index f494675471a91b7f093665e6098ee7b3a91e6e4e..115dee1d985d40c5998abd4ddd57ae82123127c3 100644 (file)
@@ -1887,6 +1887,23 @@ void skb_set_dev(struct sk_buff *skb, struct net_device *dev)
 EXPORT_SYMBOL(skb_set_dev);
 #endif /* CONFIG_NET_NS */
 
+static void skb_warn_bad_offload(const struct sk_buff *skb)
+{
+       static const netdev_features_t null_features = 0;
+       struct net_device *dev = skb->dev;
+       const char *driver = "";
+
+       if (dev && dev->dev.parent)
+               driver = dev_driver_string(dev->dev.parent);
+
+       WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
+            "gso_type=%d ip_summed=%d\n",
+            driver, dev ? &dev->features : &null_features,
+            skb->sk ? &skb->sk->sk_route_caps : &null_features,
+            skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
+            skb_shinfo(skb)->gso_type, skb->ip_summed);
+}
+
 /*
  * Invalidate hardware checksum when packet is to be mangled, and
  * complete checksum manually on outgoing path.
@@ -1900,8 +1917,8 @@ int skb_checksum_help(struct sk_buff *skb)
                goto out_set_summed;
 
        if (unlikely(skb_shinfo(skb)->gso_size)) {
-               /* Let GSO fix up the checksum. */
-               goto out_set_summed;
+               skb_warn_bad_offload(skb);
+               return -EINVAL;
        }
 
        offset = skb_checksum_start_offset(skb);
@@ -1961,16 +1978,7 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb,
        __skb_pull(skb, skb->mac_len);
 
        if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL)) {
-               struct net_device *dev = skb->dev;
-               struct ethtool_drvinfo info = {};
-
-               if (dev && dev->ethtool_ops && dev->ethtool_ops->get_drvinfo)
-                       dev->ethtool_ops->get_drvinfo(dev, &info);
-
-               WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d ip_summed=%d\n",
-                    info.driver, dev ? &dev->features : NULL,
-                    skb->sk ? &skb->sk->sk_route_caps : NULL,
-                    skb->len, skb->data_len, skb->ip_summed);
+               skb_warn_bad_offload(skb);
 
                if (skb_header_cloned(skb) &&
                    (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
index 921aa2b4b4158ab1aefab67c474a58065d43c77f..369b418945276e58c04a6f90201b90f7398e5dd0 100644 (file)
@@ -1311,6 +1311,7 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
        case ETHTOOL_GRXCSUM:
        case ETHTOOL_GTXCSUM:
        case ETHTOOL_GSG:
+       case ETHTOOL_GSSET_INFO:
        case ETHTOOL_GSTRINGS:
        case ETHTOOL_GTSO:
        case ETHTOOL_GPERMADDR:
index 0985b9b14b804737888a59591d242a550ddc0556..a225089df5b6693a715f95ab89de71be4a400338 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/skbuff.h>
+#include <linux/export.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/if_vlan.h>
index f3dbd4f596a4904a99650d18ae73c9a963126aa8..a1727cda03d7bec9b565de4647dd3cff754fcbe8 100644 (file)
@@ -929,7 +929,7 @@ static ssize_t bql_show_inflight(struct netdev_queue *queue,
 }
 
 static struct netdev_queue_attribute bql_inflight_attribute =
-       __ATTR(inflight, S_IRUGO | S_IWUSR, bql_show_inflight, NULL);
+       __ATTR(inflight, S_IRUGO, bql_show_inflight, NULL);
 
 #define BQL_ATTR(NAME, FIELD)                                          \
 static ssize_t bql_show_ ## NAME(struct netdev_queue *queue,           \
index aefcd7acbffa9ff0ae028cc0d5cb2eb662ced477..0e950fda9a0abc88ffaa9e3e62b2a11c023a7b8b 100644 (file)
@@ -30,6 +30,20 @@ EXPORT_SYMBOL(init_net);
 
 #define INITIAL_NET_GEN_PTRS   13 /* +1 for len +2 for rcu_head */
 
+static unsigned int max_gen_ptrs = INITIAL_NET_GEN_PTRS;
+
+static struct net_generic *net_alloc_generic(void)
+{
+       struct net_generic *ng;
+       size_t generic_size = offsetof(struct net_generic, ptr[max_gen_ptrs]);
+
+       ng = kzalloc(generic_size, GFP_KERNEL);
+       if (ng)
+               ng->len = max_gen_ptrs;
+
+       return ng;
+}
+
 static int net_assign_generic(struct net *net, int id, void *data)
 {
        struct net_generic *ng, *old_ng;
@@ -43,8 +57,7 @@ static int net_assign_generic(struct net *net, int id, void *data)
        if (old_ng->len >= id)
                goto assign;
 
-       ng = kzalloc(sizeof(struct net_generic) +
-                       id * sizeof(void *), GFP_KERNEL);
+       ng = net_alloc_generic();
        if (ng == NULL)
                return -ENOMEM;
 
@@ -59,7 +72,6 @@ static int net_assign_generic(struct net *net, int id, void *data)
         * the old copy for kfree after a grace period.
         */
 
-       ng->len = id;
        memcpy(&ng->ptr, &old_ng->ptr, old_ng->len * sizeof(void*));
 
        rcu_assign_pointer(net->gen, ng);
@@ -161,18 +173,6 @@ out_undo:
        goto out;
 }
 
-static struct net_generic *net_alloc_generic(void)
-{
-       struct net_generic *ng;
-       size_t generic_size = sizeof(struct net_generic) +
-               INITIAL_NET_GEN_PTRS * sizeof(void *);
-
-       ng = kzalloc(generic_size, GFP_KERNEL);
-       if (ng)
-               ng->len = INITIAL_NET_GEN_PTRS;
-
-       return ng;
-}
 
 #ifdef CONFIG_NET_NS
 static struct kmem_cache *net_cachep;
@@ -483,6 +483,7 @@ again:
                        }
                        return error;
                }
+               max_gen_ptrs = max_t(unsigned int, max_gen_ptrs, *ops->id);
        }
        error = __register_pernet_operations(list, ops);
        if (error) {
index 65f80c7b1656b81fd5646c8caf24016ce3782ffa..4d8ce93cd5039b191b468fcb87f23ee0457c5cc4 100644 (file)
@@ -767,8 +767,8 @@ done:
        return i;
 }
 
-static unsigned long num_arg(const char __user * user_buffer,
-                            unsigned long maxlen, unsigned long *num)
+static long num_arg(const char __user *user_buffer, unsigned long maxlen,
+                               unsigned long *num)
 {
        int i;
        *num = 0;
index f16444bc6cbb1ca25e57df0d18e7d267ceedf1f9..65aebd45002786f4c00f6a5b4bc6ac45026f7f86 100644 (file)
@@ -1509,6 +1509,9 @@ errout:
 
        if (send_addr_notify)
                call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+       min_ifinfo_dump_size = max_t(u16, if_nlmsg_size(dev),
+                                    min_ifinfo_dump_size);
+
        return err;
 }
 
index 6fd44606fdd130a12712ac3f67ea6f92befab574..99b2596531bbc2a9a714bf262a8c79c61f1726b5 100644 (file)
@@ -46,7 +46,7 @@ __u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
 
        memcpy(hash, saddr, 16);
        for (i = 0; i < 4; i++)
-               secret[i] = net_secret[i] + daddr[i];
+               secret[i] = net_secret[i] + (__force u32)daddr[i];
        secret[4] = net_secret[4] +
                (((__force u16)sport << 16) + (__force u16)dport);
        for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
index 5c5af9988f941c9ee4eef4769127d3ce21f2f8cd..3e81fd2e3c75ca01ed972e98f3bc3f344fe5bfe6 100644 (file)
@@ -1827,7 +1827,7 @@ suppress_allocation:
        /* Alas. Undo changes. */
        sk->sk_forward_alloc -= amt * SK_MEM_QUANTUM;
 
-       sk_memory_allocated_sub(sk, amt, parent_status);
+       sk_memory_allocated_sub(sk, amt);
 
        return 0;
 }
@@ -1840,7 +1840,7 @@ EXPORT_SYMBOL(__sk_mem_schedule);
 void __sk_mem_reclaim(struct sock *sk)
 {
        sk_memory_allocated_sub(sk,
-                               sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT, 0);
+                               sk->sk_forward_alloc >> SK_MEM_QUANTUM_SHIFT);
        sk->sk_forward_alloc &= SK_MEM_QUANTUM - 1;
 
        if (sk_under_memory_pressure(sk) &&
index 2e4e24476c4c3ba1b1abb1463ff2f4123180ccad..19d66cefd7d34beae89c617a10f195f2f097770f 100644 (file)
@@ -123,11 +123,14 @@ again:
                                                smallest_size = tb->num_owners;
                                                smallest_rover = rover;
                                                if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) {
-                                                       spin_unlock(&head->lock);
                                                        snum = smallest_rover;
-                                                       goto have_snum;
+                                                       goto tb_found;
                                                }
                                        }
+                                       if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
+                                               snum = rover;
+                                               goto tb_found;
+                                       }
                                        goto next;
                                }
                        break;
index 86f13c67ea8579d32a4f076063ea1777c75e166b..bf4a9c4808e1faaa7461a472f224a4d3c8edf379 100644 (file)
@@ -136,7 +136,7 @@ static int addr_compare(const struct inetpeer_addr *a,
        for (i = 0; i < n; i++) {
                if (a->addr.a6[i] == b->addr.a6[i])
                        continue;
-               if (a->addr.a6[i] < b->addr.a6[i])
+               if ((__force u32)a->addr.a6[i] < (__force u32)b->addr.a6[i])
                        return -1;
                return 1;
        }
@@ -447,6 +447,7 @@ relookup:
                p->rate_last = 0;
                p->pmtu_expires = 0;
                p->pmtu_orig = 0;
+               p->redirect_genid = 0;
                memset(&p->redirect_learned, 0, sizeof(p->redirect_learned));
 
 
index 2b53a1f7abf6bf57509279cff831b7082040e558..6b3ca5ba4450599e15ebe8b3ed5597a51051821f 100644 (file)
@@ -422,6 +422,10 @@ static struct ip_tunnel *ipgre_tunnel_locate(struct net *net,
        if (register_netdevice(dev) < 0)
                goto failed_free;
 
+       /* Can use a lockless transmit, unless we generate output sequences */
+       if (!(nt->parms.o_flags & GRE_SEQ))
+               dev->features |= NETIF_F_LLTX;
+
        dev_hold(dev);
        ipgre_tunnel_link(ign, nt);
        return nt;
index 7e4ec9fc2cef3c38bd0e659a8c36548ff25fe429..6e412a60a91f27acb3ea2241a860c6a18401fed8 100644 (file)
@@ -141,7 +141,7 @@ __be32 ic_servaddr = NONE;  /* Boot server IP address */
 __be32 root_server_addr = NONE;        /* Address of NFS server */
 u8 root_server_path[256] = { 0, };     /* Path to mount as root */
 
-u32 ic_dev_xid;                /* Device under configuration */
+__be32 ic_dev_xid;             /* Device under configuration */
 
 /* vendor class identifier */
 static char vendor_class_identifier[253] __initdata;
@@ -859,9 +859,9 @@ static int __init ic_bootp_string(char *dest, char *src, int len, int max)
  */
 static void __init ic_do_bootp_ext(u8 *ext)
 {
-       u8 servers;
-       int i;
-       u16 mtu;
+       u8 servers;
+       int i;
+       __be16 mtu;
 
 #ifdef IPCONFIG_DEBUG
        u8 *c;
index 43d4c3b223699aee36de3c3c79c6d82b5236a4c0..aea5a199c37a341be3fab08500f1c35a34cbf215 100644 (file)
@@ -140,13 +140,14 @@ static void ping_v4_unhash(struct sock *sk)
                write_lock_bh(&ping_table.lock);
                hlist_nulls_del(&sk->sk_nulls_node);
                sock_put(sk);
-               isk->inet_num = isk->inet_sport = 0;
+               isk->inet_num = 0;
+               isk->inet_sport = 0;
                sock_prot_inuse_add(sock_net(sk), sk->sk_prot, -1);
                write_unlock_bh(&ping_table.lock);
        }
 }
 
-static struct sock *ping_v4_lookup(struct net *net, u32 saddr, u32 daddr,
+static struct sock *ping_v4_lookup(struct net *net, __be32 saddr, __be32 daddr,
                                   u16 ident, int dif)
 {
        struct hlist_nulls_head *hslot = ping_hashslot(&ping_table, net, ident);
@@ -154,15 +155,15 @@ static struct sock *ping_v4_lookup(struct net *net, u32 saddr, u32 daddr,
        struct inet_sock *isk;
        struct hlist_nulls_node *hnode;
 
-       pr_debug("try to find: num = %d, daddr = %ld, dif = %d\n",
-                        (int)ident, (unsigned long)daddr, dif);
+       pr_debug("try to find: num = %d, daddr = %pI4, dif = %d\n",
+                        (int)ident, &daddr, dif);
        read_lock_bh(&ping_table.lock);
 
        ping_portaddr_for_each_entry(sk, hnode, hslot) {
                isk = inet_sk(sk);
 
-               pr_debug("found: %p: num = %d, daddr = %ld, dif = %d\n", sk,
-                        (int)isk->inet_num, (unsigned long)isk->inet_rcv_saddr,
+               pr_debug("found: %p: num = %d, daddr = %pI4, dif = %d\n", sk,
+                        (int)isk->inet_num, &isk->inet_rcv_saddr,
                         sk->sk_bound_dev_if);
 
                pr_debug("iterate\n");
@@ -254,7 +255,7 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                sk, addr->sin_addr.s_addr, ntohs(addr->sin_port));
 
        chk_addr_ret = inet_addr_type(sock_net(sk), addr->sin_addr.s_addr);
-       if (addr->sin_addr.s_addr == INADDR_ANY)
+       if (addr->sin_addr.s_addr == htonl(INADDR_ANY))
                chk_addr_ret = RTN_LOCAL;
 
        if ((sysctl_ip_nonlocal_bind == 0 &&
@@ -278,9 +279,9 @@ static int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
                goto out;
        }
 
-       pr_debug("after bind(): num = %d, daddr = %ld, dif = %d\n",
+       pr_debug("after bind(): num = %d, daddr = %pI4, dif = %d\n",
                (int)isk->inet_num,
-               (unsigned long) isk->inet_rcv_saddr,
+               &isk->inet_rcv_saddr,
                (int)sk->sk_bound_dev_if);
 
        err = 0;
@@ -407,7 +408,7 @@ out:
 struct pingfakehdr {
        struct icmphdr icmph;
        struct iovec *iov;
-       u32 wcheck;
+       __wsum wcheck;
 };
 
 static int ping_getfrag(void *from, char * to,
@@ -459,7 +460,7 @@ static int ping_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        struct rtable *rt = NULL;
        struct ip_options_data opt_copy;
        int free = 0;
-       u32 saddr, daddr, faddr;
+       __be32 saddr, daddr, faddr;
        u8  tos;
        int err;
 
@@ -696,8 +697,8 @@ void ping_rcv(struct sk_buff *skb)
        struct net *net = dev_net(skb->dev);
        struct iphdr *iph = ip_hdr(skb);
        struct icmphdr *icmph = icmp_hdr(skb);
-       u32 saddr = iph->saddr;
-       u32 daddr = iph->daddr;
+       __be32 saddr = iph->saddr;
+       __be32 daddr = iph->daddr;
 
        /* We assume the packet has already been checked by icmp_rcv */
 
index 3569d8ecaeac55e546912729322f1b15160d2c0c..6afc807ee2ad66991f0e41d5a46602c4f3729310 100644 (file)
@@ -216,7 +216,6 @@ static const struct snmp_mib snmp4_net_list[] = {
        SNMP_MIB_ITEM("TCPPartialUndo", LINUX_MIB_TCPPARTIALUNDO),
        SNMP_MIB_ITEM("TCPDSACKUndo", LINUX_MIB_TCPDSACKUNDO),
        SNMP_MIB_ITEM("TCPLossUndo", LINUX_MIB_TCPLOSSUNDO),
-       SNMP_MIB_ITEM("TCPLoss", LINUX_MIB_TCPLOSS),
        SNMP_MIB_ITEM("TCPLostRetransmit", LINUX_MIB_TCPLOSTRETRANSMIT),
        SNMP_MIB_ITEM("TCPRenoFailures", LINUX_MIB_TCPRENOFAILURES),
        SNMP_MIB_ITEM("TCPSackFailures", LINUX_MIB_TCPSACKFAILURES),
index 4aa7e9dc0cbb961ab75fedea9944304a609dd2fc..4cb9cd2f2c390fc14289f918da8098af0549f5f2 100644 (file)
@@ -814,6 +814,7 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
 
        net->ipv4.sysctl_rt_cache_rebuild_count = 4;
 
+       tcp_init_mem(net);
        limit = nr_free_buffer_pages() / 8;
        limit = max(limit, 128UL);
        net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
index 9bcdec3ad772171a6aa71584b86bc5c1c998db83..06373b4a449a158451d3c99ec482b80e65a0ab87 100644 (file)
@@ -3216,6 +3216,16 @@ static int __init set_thash_entries(char *str)
 }
 __setup("thash_entries=", set_thash_entries);
 
+void tcp_init_mem(struct net *net)
+{
+       /* Set per-socket limits to no more than 1/128 the pressure threshold */
+       unsigned long limit = nr_free_buffer_pages() / 8;
+       limit = max(limit, 128UL);
+       net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3;
+       net->ipv4.sysctl_tcp_mem[1] = limit;
+       net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2;
+}
+
 void __init tcp_init(void)
 {
        struct sk_buff *skb = NULL;
@@ -3276,9 +3286,9 @@ void __init tcp_init(void)
        sysctl_tcp_max_orphans = cnt / 2;
        sysctl_max_syn_backlog = max(128, cnt / 256);
 
-       /* Set per-socket limits to no more than 1/128 the pressure threshold */
-       limit = ((unsigned long)init_net.ipv4.sysctl_tcp_mem[1])
-               << (PAGE_SHIFT - 7);
+       tcp_init_mem(&init_net);
+       limit = nr_free_buffer_pages() / 8;
+       limit = max(limit, 128UL);
        max_share = min(4UL*1024*1024, limit);
 
        sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
index 6187eb4d1dcfe1e66dc90a799c50df496c2d8974..f45e1c24244091a750f5f34ca0fe9e9038731ca5 100644 (file)
@@ -63,7 +63,6 @@ static inline void bictcp_reset(struct bictcp *ca)
 {
        ca->cnt = 0;
        ca->last_max_cwnd = 0;
-       ca->loss_cwnd = 0;
        ca->last_cwnd = 0;
        ca->last_time = 0;
        ca->epoch_start = 0;
@@ -72,7 +71,11 @@ static inline void bictcp_reset(struct bictcp *ca)
 
 static void bictcp_init(struct sock *sk)
 {
-       bictcp_reset(inet_csk_ca(sk));
+       struct bictcp *ca = inet_csk_ca(sk);
+
+       bictcp_reset(ca);
+       ca->loss_cwnd = 0;
+
        if (initial_ssthresh)
                tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
 }
@@ -127,7 +130,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
        }
 
        /* if in slow start or link utilization is very low */
-       if (ca->loss_cwnd == 0) {
+       if (ca->last_max_cwnd == 0) {
                if (ca->cnt > 20) /* increase cwnd 5% per RTT */
                        ca->cnt = 20;
        }
@@ -185,7 +188,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        const struct bictcp *ca = inet_csk_ca(sk);
-       return max(tp->snd_cwnd, ca->last_max_cwnd);
+       return max(tp->snd_cwnd, ca->loss_cwnd);
 }
 
 static void bictcp_state(struct sock *sk, u8 new_state)
index f376b05cca818fd9496fe0cb8540a051e3e7e50a..a9077f441cb27b693d8ad1d710e4dba67aa93ebb 100644 (file)
@@ -107,7 +107,6 @@ static inline void bictcp_reset(struct bictcp *ca)
 {
        ca->cnt = 0;
        ca->last_max_cwnd = 0;
-       ca->loss_cwnd = 0;
        ca->last_cwnd = 0;
        ca->last_time = 0;
        ca->bic_origin_point = 0;
@@ -142,7 +141,10 @@ static inline void bictcp_hystart_reset(struct sock *sk)
 
 static void bictcp_init(struct sock *sk)
 {
-       bictcp_reset(inet_csk_ca(sk));
+       struct bictcp *ca = inet_csk_ca(sk);
+
+       bictcp_reset(ca);
+       ca->loss_cwnd = 0;
 
        if (hystart)
                bictcp_hystart_reset(sk);
@@ -275,7 +277,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
         * The initial growth of cubic function may be too conservative
         * when the available bandwidth is still unknown.
         */
-       if (ca->loss_cwnd == 0 && ca->cnt > 20)
+       if (ca->last_max_cwnd == 0 && ca->cnt > 20)
                ca->cnt = 20;   /* increase cwnd 5% per RTT */
 
        /* TCP Friendly */
@@ -342,7 +344,7 @@ static u32 bictcp_undo_cwnd(struct sock *sk)
 {
        struct bictcp *ca = inet_csk_ca(sk);
 
-       return max(tcp_sk(sk)->snd_cwnd, ca->last_max_cwnd);
+       return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
 }
 
 static void bictcp_state(struct sock *sk, u8 new_state)
index 2877c3e0958777dff87612bb7c057df451f5b57a..976034f823206fcf92e574f082b55de97421ce81 100644 (file)
@@ -105,7 +105,6 @@ int sysctl_tcp_abc __read_mostly;
 #define FLAG_SYN_ACKED         0x10 /* This ACK acknowledged SYN.              */
 #define FLAG_DATA_SACKED       0x20 /* New SACK.                               */
 #define FLAG_ECE               0x40 /* ECE in this ACK                         */
-#define FLAG_DATA_LOST         0x80 /* SACK detected data lossage.             */
 #define FLAG_SLOWPATH          0x100 /* Do not skip RFC checks for window update.*/
 #define FLAG_ONLY_ORIG_SACKED  0x200 /* SACKs only non-rexmit sent before RTO */
 #define FLAG_SND_UNA_ADVANCED  0x400 /* Snd_una was changed (!= FLAG_DATA_ACKED) */
@@ -1040,13 +1039,11 @@ static void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp,
  * These 6 states form finite state machine, controlled by the following events:
  * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue())
  * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue())
- * 3. Loss detection event of one of three flavors:
+ * 3. Loss detection event of two flavors:
  *     A. Scoreboard estimator decided the packet is lost.
  *        A'. Reno "three dupacks" marks head of queue lost.
- *        A''. Its FACK modfication, head until snd.fack is lost.
- *     B. SACK arrives sacking data transmitted after never retransmitted
- *        hole was sent out.
- *     C. SACK arrives sacking SND.NXT at the moment, when the
+ *        A''. Its FACK modification, head until snd.fack is lost.
+ *     B. SACK arrives sacking SND.NXT at the moment, when the
  *        segment was retransmitted.
  * 4. D-SACK added new rule: D-SACK changes any tag to S.
  *
@@ -1153,7 +1150,7 @@ static int tcp_is_sackblock_valid(struct tcp_sock *tp, int is_dsack,
 }
 
 /* Check for lost retransmit. This superb idea is borrowed from "ratehalving".
- * Event "C". Later note: FACK people cheated me again 8), we have to account
+ * Event "B". Later note: FACK people cheated me again 8), we have to account
  * for reordering! Ugly, but should help.
  *
  * Search retransmitted skbs from write_queue that were sent when snd_nxt was
@@ -1844,10 +1841,6 @@ tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb,
                if (found_dup_sack && ((i + 1) == first_sack_index))
                        next_dup = &sp[i + 1];
 
-               /* Event "B" in the comment above. */
-               if (after(end_seq, tp->high_seq))
-                       state.flag |= FLAG_DATA_LOST;
-
                /* Skip too early cached blocks */
                while (tcp_sack_cache_ok(tp, cache) &&
                       !before(start_seq, cache->end_seq))
@@ -2515,8 +2508,11 @@ static void tcp_timeout_skbs(struct sock *sk)
        tcp_verify_left_out(tp);
 }
 
-/* Mark head of queue up as lost. With RFC3517 SACK, the packets is
- * is against sacked "cnt", otherwise it's against facked "cnt"
+/* Detect loss in event "A" above by marking head of queue up as lost.
+ * For FACK or non-SACK(Reno) senders, the first "packets" number of segments
+ * are considered lost. For RFC3517 SACK, a segment is considered lost if it
+ * has at least tp->reordering SACKed seqments above it; "packets" refers to
+ * the maximum SACKed segments to pass before reaching this limit.
  */
 static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
 {
@@ -2525,6 +2521,8 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
        int cnt, oldcnt;
        int err;
        unsigned int mss;
+       /* Use SACK to deduce losses of new sequences sent during recovery */
+       const u32 loss_high = tcp_is_sack(tp) ?  tp->snd_nxt : tp->high_seq;
 
        WARN_ON(packets > tp->packets_out);
        if (tp->lost_skb_hint) {
@@ -2546,7 +2544,7 @@ static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head)
                tp->lost_skb_hint = skb;
                tp->lost_cnt_hint = cnt;
 
-               if (after(TCP_SKB_CB(skb)->end_seq, tp->high_seq))
+               if (after(TCP_SKB_CB(skb)->end_seq, loss_high))
                        break;
 
                oldcnt = cnt;
@@ -3033,19 +3031,10 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
        if (tcp_check_sack_reneging(sk, flag))
                return;
 
-       /* C. Process data loss notification, provided it is valid. */
-       if (tcp_is_fack(tp) && (flag & FLAG_DATA_LOST) &&
-           before(tp->snd_una, tp->high_seq) &&
-           icsk->icsk_ca_state != TCP_CA_Open &&
-           tp->fackets_out > tp->reordering) {
-               tcp_mark_head_lost(sk, tp->fackets_out - tp->reordering, 0);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPLOSS);
-       }
-
-       /* D. Check consistency of the current state. */
+       /* C. Check consistency of the current state. */
        tcp_verify_left_out(tp);
 
-       /* E. Check state exit conditions. State can be terminated
+       /* D. Check state exit conditions. State can be terminated
         *    when high_seq is ACKed. */
        if (icsk->icsk_ca_state == TCP_CA_Open) {
                WARN_ON(tp->retrans_out != 0);
@@ -3077,7 +3066,7 @@ static void tcp_fastretrans_alert(struct sock *sk, int pkts_acked,
                }
        }
 
-       /* F. Process state. */
+       /* E. Process state. */
        switch (icsk->icsk_ca_state) {
        case TCP_CA_Recovery:
                if (!(flag & FLAG_SND_UNA_ADVANCED)) {
index 1eb4ad57670eb0f47c4a3ecde927819cb83ef12f..337ba4cca05214637621988cd19b423e77b05fc8 100644 (file)
@@ -631,7 +631,7 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
        arg.iov[0].iov_len  = sizeof(rep.th);
 
 #ifdef CONFIG_TCP_MD5SIG
-       key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->daddr) : NULL;
+       key = sk ? tcp_v4_md5_do_lookup(sk, ip_hdr(skb)->saddr) : NULL;
        if (key) {
                rep.opt[0] = htonl((TCPOPT_NOP << 24) |
                                   (TCPOPT_NOP << 16) |
index 8c8de2780c7a7add9e91805300824e7182d40f28..4ff3b6dc74fc013b00720443587e2f40d2f1bbbf 100644 (file)
@@ -1141,11 +1141,9 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
        sk_mem_uncharge(sk, len);
        sock_set_flag(sk, SOCK_QUEUE_SHRUNK);
 
-       /* Any change of skb->len requires recalculation of tso
-        * factor and mss.
-        */
+       /* Any change of skb->len requires recalculation of tso factor. */
        if (tcp_skb_pcount(skb) > 1)
-               tcp_set_skb_tso_segs(sk, skb, tcp_current_mss(sk));
+               tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb));
 
        return 0;
 }
index e5e18cb8a58686a2331480c53edac2a77b901e8c..8a949f19deb6dc93542396138004a5a6b2bfeeab 100644 (file)
@@ -15,7 +15,6 @@
 #include <linux/udp.h>
 #include <net/udp.h>
 #include <net/udplite.h>
-#include <linux/inet_diag.h>
 #include <linux/sock_diag.h>
 
 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb,
index a225d5ee3c2fc877f25e7af417c58de124fe8cb9..c02280a4d126980540daac44a8b541b92c0b16f0 100644 (file)
@@ -502,29 +502,31 @@ static void addrconf_forward_change(struct net *net, __s32 newf)
        rcu_read_unlock();
 }
 
-static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int old)
+static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
 {
        struct net *net;
+       int old;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
 
        net = (struct net *)table->extra2;
-       if (p == &net->ipv6.devconf_dflt->forwarding)
-               return 0;
+       old = *p;
+       *p = newf;
 
-       if (!rtnl_trylock()) {
-               /* Restore the original values before restarting */
-               *p = old;
-               return restart_syscall();
+       if (p == &net->ipv6.devconf_dflt->forwarding) {
+               rtnl_unlock();
+               return 0;
        }
 
        if (p == &net->ipv6.devconf_all->forwarding) {
-               __s32 newf = net->ipv6.devconf_all->forwarding;
                net->ipv6.devconf_dflt->forwarding = newf;
                addrconf_forward_change(net, newf);
-       } else if ((!*p) ^ (!old))
+       } else if ((!newf) ^ (!old))
                dev_forward_change((struct inet6_dev *)table->extra1);
        rtnl_unlock();
 
-       if (*p)
+       if (newf)
                rt6_purge_dflt_routers(net);
        return 1;
 }
@@ -4260,9 +4262,17 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
        int *valp = ctl->data;
        int val = *valp;
        loff_t pos = *ppos;
+       ctl_table lctl;
        int ret;
 
-       ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+       /*
+        * ctl->data points to idev->cnf.forwarding, we should
+        * not modify it until we get the rtnl lock.
+        */
+       lctl = *ctl;
+       lctl.data = &val;
+
+       ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
 
        if (write)
                ret = addrconf_fixup_forwarding(ctl, valp, val);
@@ -4300,26 +4310,27 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
        rcu_read_unlock();
 }
 
-static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int old)
+static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
 {
        struct net *net;
+       int old;
+
+       if (!rtnl_trylock())
+               return restart_syscall();
 
        net = (struct net *)table->extra2;
+       old = *p;
+       *p = newf;
 
-       if (p == &net->ipv6.devconf_dflt->disable_ipv6)
+       if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
+               rtnl_unlock();
                return 0;
-
-       if (!rtnl_trylock()) {
-               /* Restore the original values before restarting */
-               *p = old;
-               return restart_syscall();
        }
 
        if (p == &net->ipv6.devconf_all->disable_ipv6) {
-               __s32 newf = net->ipv6.devconf_all->disable_ipv6;
                net->ipv6.devconf_dflt->disable_ipv6 = newf;
                addrconf_disable_change(net, newf);
-       } else if ((!*p) ^ (!old))
+       } else if ((!newf) ^ (!old))
                dev_disable_change((struct inet6_dev *)table->extra1);
 
        rtnl_unlock();
@@ -4333,9 +4344,17 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
        int *valp = ctl->data;
        int val = *valp;
        loff_t pos = *ppos;
+       ctl_table lctl;
        int ret;
 
-       ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+       /*
+        * ctl->data points to idev->cnf.disable_ipv6, we should
+        * not modify it until we get the rtnl lock.
+        */
+       lctl = *ctl;
+       lctl.data = &val;
+
+       ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
 
        if (write)
                ret = addrconf_disable_ipv6(ctl, valp, val);
index ae08aee1773c678187f8be84414f95ac472aa426..251e7cd75e89787f5346037a278bfd7e5b8c8ced 100644 (file)
@@ -575,7 +575,7 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb)
        }
        if (np->rxopt.bits.rxorigdstaddr) {
                struct sockaddr_in6 sin6;
-               u16 *ports = (u16 *) skb_transport_header(skb);
+               __be16 *ports = (__be16 *) skb_transport_header(skb);
 
                if (skb_transport_offset(skb) + 4 <= skb->len) {
                        /* All current transport protocols have the port numbers in the
index fdeb6d03da812d136874fccc870b90c69efee7b1..da2e92d05c15a5052ea2cc19ba9bfc5751c23cf6 100644 (file)
@@ -237,8 +237,8 @@ static int snmp6_dev_seq_show(struct seq_file *seq, void *v)
        struct inet6_dev *idev = (struct inet6_dev *)seq->private;
 
        seq_printf(seq, "%-32s\t%u\n", "ifIndex", idev->dev->ifindex);
-       snmp6_seq_show_item(seq, (void __percpu **)idev->stats.ipv6, NULL,
-                           snmp6_ipstats_list);
+       snmp6_seq_show_item64(seq, (void __percpu **)idev->stats.ipv6,
+                           snmp6_ipstats_list, offsetof(struct ipstats_mib, syncp));
        snmp6_seq_show_item(seq, NULL, idev->stats.icmpv6dev->mibs,
                            snmp6_icmp6_list);
        snmp6_seq_show_icmpv6msg(seq, idev->stats.icmpv6msgdev->mibs);
index 07361dfa80852cbbe4db66027f8da5ef13ade4c1..8c2e3ab58f2af211c04f17c337929bd2802cbe27 100644 (file)
@@ -1091,6 +1091,7 @@ struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
        else {
                neigh = ip6_neigh_lookup(&rt->dst, &fl6->daddr);
                if (IS_ERR(neigh)) {
+                       in6_dev_put(idev);
                        dst_free(&rt->dst);
                        return ERR_CAST(neigh);
                }
index 906c7ca43542e020c53759ec554f952f20b9d1e4..3edd05ae4388741176949e9c72f3543fa8d52c87 100644 (file)
@@ -1083,7 +1083,7 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
 
 #ifdef CONFIG_TCP_MD5SIG
        if (sk)
-               key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr);
+               key = tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->saddr);
 #endif
 
        if (th->ack)
index d21e7ebd91ca5b846b076adb4aeec53a6768d3e5..55670ec3cd0f916143759cbc73320cf85b7ef196 100644 (file)
@@ -393,11 +393,6 @@ static int l2tp_ip_backlog_recv(struct sock *sk, struct sk_buff *skb)
 {
        int rc;
 
-       if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
-               goto drop;
-
-       nf_reset(skb);
-
        /* Charge it to the socket, dropping if the queue is full. */
        rc = sock_queue_rcv_skb(sk, skb);
        if (rc < 0)
index a18e6c3d36e37e699089ed5e0910c857da073d1c..b9bef2c750267cfd0adeaf918c1cc5503461d3db 100644 (file)
@@ -713,6 +713,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
        struct sk_buff *skb = NULL;
        struct sock *sk = sock->sk;
        struct llc_sock *llc = llc_sk(sk);
+       unsigned long cpu_flags;
        size_t copied = 0;
        u32 peek_seq = 0;
        u32 *seq;
@@ -838,7 +839,9 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
                        goto copy_uaddr;
 
                if (!(flags & MSG_PEEK)) {
+                       spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
                        sk_eat_skb(sk, skb, 0);
+                       spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
                        *seq = 0;
                }
 
@@ -859,7 +862,9 @@ copy_uaddr:
                llc_cmsg_rcv(msg, skb);
 
        if (!(flags & MSG_PEEK)) {
+                       spin_lock_irqsave(&sk->sk_receive_queue.lock, cpu_flags);
                        sk_eat_skb(sk, skb, 0);
+                       spin_unlock_irqrestore(&sk->sk_receive_queue.lock, cpu_flags);
                        *seq = 0;
        }
 
index e60df48fa4d4d235016398706bf3b32e5633c1e0..296620d6ca0c0c2388f05ffaffd9c5d832eb3cbd 100644 (file)
@@ -791,7 +791,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
                if (set & BIT(NL80211_STA_FLAG_AUTHORIZED))
                        ret = sta_info_move_state_checked(sta,
                                        IEEE80211_STA_AUTHORIZED);
-               else
+               else if (test_sta_flag(sta, WLAN_STA_AUTHORIZED))
                        ret = sta_info_move_state_checked(sta,
                                        IEEE80211_STA_ASSOC);
                if (ret)
index 38e6101190d9a51eded895cbf2b7db6d5b09a0b6..59edcd95a58dbcec31833568f9d5f8bd8893da09 100644 (file)
@@ -225,9 +225,9 @@ KEY_OPS(key);
                            key, &key_##name##_ops);
 
 void ieee80211_debugfs_key_add(struct ieee80211_key *key)
-  {
+{
        static int keycount;
-       char buf[50];
+       char buf[100];
        struct sta_info *sta;
 
        if (!key->local->debugfs.keys)
@@ -244,7 +244,8 @@ void ieee80211_debugfs_key_add(struct ieee80211_key *key)
 
        sta = key->sta;
        if (sta) {
-               sprintf(buf, "../../stations/%pM", sta->sta.addr);
+               sprintf(buf, "../../netdev:%s/stations/%pM",
+                       sta->sdata->name, sta->sta.addr);
                key->debugfs.stalink =
                        debugfs_create_symlink("station", key->debugfs.dir, buf);
        }
index b3d76b756cd55e4c8e513bfa506c336ab77e667b..a4643969a13b22524a7f90f0304c18d917f025be 100644 (file)
@@ -106,6 +106,7 @@ static void __ieee80211_sta_join_ibss(struct ieee80211_sub_if_data *sdata,
 
        sdata->drop_unencrypted = capability & WLAN_CAPABILITY_PRIVACY ? 1 : 0;
 
+       local->oper_channel = chan;
        channel_type = ifibss->channel_type;
        if (channel_type > NL80211_CHAN_HT20 &&
            !cfg80211_can_beacon_sec_chan(local->hw.wiphy, chan, channel_type))
index e47768cb8cb3b18eb98e3e16abc542a3735ad9e3..01a21c2f6ab37df336f5f69c55f33de4e0a698f3 100644 (file)
@@ -1314,6 +1314,7 @@ u32 __ieee80211_recalc_idle(struct ieee80211_local *local)
                        continue;
                }
                /* count everything else */
+               sdata->vif.bss_conf.idle = false;
                count++;
        }
 
index 73abb7524b2cee3ce83d122efe0dcbbda7257f85..54df1b2bafd2882454bc71b3a018161f0f83e5e3 100644 (file)
@@ -119,12 +119,12 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
        int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
                      sizeof(mgmt->u.action.u.mesh_action);
 
-       skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+       skb = dev_alloc_skb(local->tx_headroom +
                            hdr_len +
                            2 + 37); /* max HWMP IE */
        if (!skb)
                return -1;
-       skb_reserve(skb, local->hw.extra_tx_headroom);
+       skb_reserve(skb, local->tx_headroom);
        mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
        memset(mgmt, 0, hdr_len);
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
@@ -250,12 +250,12 @@ int mesh_path_error_tx(u8 ttl, u8 *target, __le32 target_sn,
        if (time_before(jiffies, ifmsh->next_perr))
                return -EAGAIN;
 
-       skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+       skb = dev_alloc_skb(local->tx_headroom +
                            hdr_len +
                            2 + 15 /* PERR IE */);
        if (!skb)
                return -1;
-       skb_reserve(skb, local->tx_headroom + local->hw.extra_tx_headroom);
+       skb_reserve(skb, local->tx_headroom);
        mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
        memset(mgmt, 0, hdr_len);
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
index 41ef1b4764422a85d56334f16c320b8d1d4db292..a17251730b9e603097ca8763f3301452168444a7 100644 (file)
@@ -172,7 +172,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
        int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.self_prot) +
                      sizeof(mgmt->u.action.u.self_prot);
 
-       skb = dev_alloc_skb(local->hw.extra_tx_headroom +
+       skb = dev_alloc_skb(local->tx_headroom +
                            hdr_len +
                            2 + /* capability info */
                            2 + /* AID */
@@ -186,7 +186,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
                            sdata->u.mesh.ie_len);
        if (!skb)
                return -1;
-       skb_reserve(skb, local->hw.extra_tx_headroom);
+       skb_reserve(skb, local->tx_headroom);
        mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
        memset(mgmt, 0, hdr_len);
        mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
index ecb4c84c1bb389decfedfb182bea2b534dcd258d..295be92f7c7747238ae1ae9204982db8fd264d77 100644 (file)
@@ -2750,7 +2750,6 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
-       struct ieee80211_work *wk;
        u8 bssid[ETH_ALEN];
        bool assoc_bss = false;
 
@@ -2763,30 +2762,47 @@ int ieee80211_mgd_deauth(struct ieee80211_sub_if_data *sdata,
                assoc_bss = true;
        } else {
                bool not_auth_yet = false;
+               struct ieee80211_work *tmp, *wk = NULL;
 
                mutex_unlock(&ifmgd->mtx);
 
                mutex_lock(&local->mtx);
-               list_for_each_entry(wk, &local->work_list, list) {
-                       if (wk->sdata != sdata)
+               list_for_each_entry(tmp, &local->work_list, list) {
+                       if (tmp->sdata != sdata)
                                continue;
 
-                       if (wk->type != IEEE80211_WORK_DIRECT_PROBE &&
-                           wk->type != IEEE80211_WORK_AUTH &&
-                           wk->type != IEEE80211_WORK_ASSOC &&
-                           wk->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
+                       if (tmp->type != IEEE80211_WORK_DIRECT_PROBE &&
+                           tmp->type != IEEE80211_WORK_AUTH &&
+                           tmp->type != IEEE80211_WORK_ASSOC &&
+                           tmp->type != IEEE80211_WORK_ASSOC_BEACON_WAIT)
                                continue;
 
-                       if (memcmp(req->bss->bssid, wk->filter_ta, ETH_ALEN))
+                       if (memcmp(req->bss->bssid, tmp->filter_ta, ETH_ALEN))
                                continue;
 
-                       not_auth_yet = wk->type == IEEE80211_WORK_DIRECT_PROBE;
-                       list_del_rcu(&wk->list);
-                       free_work(wk);
+                       not_auth_yet = tmp->type == IEEE80211_WORK_DIRECT_PROBE;
+                       list_del_rcu(&tmp->list);
+                       synchronize_rcu();
+                       wk = tmp;
                        break;
                }
                mutex_unlock(&local->mtx);
 
+               if (wk && wk->type == IEEE80211_WORK_ASSOC) {
+                       /* clean up dummy sta & TX sync */
+                       sta_info_destroy_addr(wk->sdata, wk->filter_ta);
+                       if (wk->assoc.synced)
+                               drv_finish_tx_sync(local, wk->sdata,
+                                                  wk->filter_ta,
+                                                  IEEE80211_TX_SYNC_ASSOC);
+               } else if (wk && wk->type == IEEE80211_WORK_AUTH) {
+                       if (wk->probe_auth.synced)
+                               drv_finish_tx_sync(local, wk->sdata,
+                                                  wk->filter_ta,
+                                                  IEEE80211_TX_SYNC_AUTH);
+               }
+               kfree(wk);
+
                /*
                 * If somebody requests authentication and we haven't
                 * sent out an auth frame yet there's no need to send
index f407427c642f4a6fdd1d6b38fb2462b244f00da1..7514091207696b9066e416305da8fb8084f651b0 100644 (file)
@@ -1979,6 +1979,7 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
                mesh_path_error_tx(ifmsh->mshcfg.element_ttl, fwd_hdr->addr3,
                                    0, reason, fwd_hdr->addr2, sdata);
                IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
+               kfree_skb(fwd_skb);
                return RX_DROP_MONITOR;
        }
 
index 3c428d4839c7c5cca61ba535197068f3b5c10dbd..ff11f6bf8266dc1a01b1b16330b416a054d702de 100644 (file)
@@ -238,9 +238,11 @@ static void sta_unblock(struct work_struct *wk)
        if (sta->dead)
                return;
 
-       if (!test_sta_flag(sta, WLAN_STA_PS_STA))
+       if (!test_sta_flag(sta, WLAN_STA_PS_STA)) {
+               local_bh_disable();
                ieee80211_sta_ps_deliver_wakeup(sta);
-       else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) {
+               local_bh_enable();
+       } else if (test_and_clear_sta_flag(sta, WLAN_STA_PSPOLL)) {
                clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
 
                local_bh_disable();
index edcd1c7ab83f94fcaffdb049a93540bdf77c5baf..e05667cd5e766057c22770670834e6f2b19e3301 100644 (file)
@@ -1001,8 +1001,6 @@ ieee80211_tx_h_stats(struct ieee80211_tx_data *tx)
 static ieee80211_tx_result debug_noinline
 ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
 {
-       struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx->skb);
-
        if (!tx->key)
                return TX_CONTINUE;
 
@@ -1017,13 +1015,7 @@ ieee80211_tx_h_encrypt(struct ieee80211_tx_data *tx)
        case WLAN_CIPHER_SUITE_AES_CMAC:
                return ieee80211_crypto_aes_cmac_encrypt(tx);
        default:
-               /* handle hw-only algorithm */
-               if (info->control.hw_key) {
-                       ieee80211_tx_set_protected(tx);
-                       return TX_CONTINUE;
-               }
-               break;
-
+               return ieee80211_crypto_hw_encrypt(tx);
        }
 
        return TX_DROP;
index 422b79851ec510ef4272f9f2565bb56a0e002aef..b758350919ff4641e69127a220f3c450fb07fcc9 100644 (file)
@@ -643,3 +643,22 @@ ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx)
 
        return RX_CONTINUE;
 }
+
+ieee80211_tx_result
+ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx)
+{
+       struct sk_buff *skb;
+       struct ieee80211_tx_info *info = NULL;
+
+       skb_queue_walk(&tx->skbs, skb) {
+               info  = IEEE80211_SKB_CB(skb);
+
+               /* handle hw-only algorithm */
+               if (!info->control.hw_key)
+                       return TX_DROP;
+       }
+
+       ieee80211_tx_set_protected(tx);
+
+       return TX_CONTINUE;
+}
index baba0608313ef5419cccff809d28166b1178bf7b..07e33f899c71fc52f9ff1771cd283226a8a64084 100644 (file)
@@ -32,5 +32,7 @@ ieee80211_tx_result
 ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx);
 ieee80211_rx_result
 ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx);
+ieee80211_tx_result
+ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx);
 
 #endif /* WPA_H */
index 86137b558f455d9e1740a977aa09ea1e294bb241..32dbf0fa89db7661e1ee22cee519c273a229c9ca 100644 (file)
@@ -77,35 +77,42 @@ find_set_type(const char *name, u8 family, u8 revision)
 }
 
 /* Unlock, try to load a set type module and lock again */
-static int
-try_to_load_type(const char *name)
+static bool
+load_settype(const char *name)
 {
        nfnl_unlock();
        pr_debug("try to load ip_set_%s\n", name);
        if (request_module("ip_set_%s", name) < 0) {
                pr_warning("Can't find ip_set type %s\n", name);
                nfnl_lock();
-               return -IPSET_ERR_FIND_TYPE;
+               return false;
        }
        nfnl_lock();
-       return -EAGAIN;
+       return true;
 }
 
 /* Find a set type and reference it */
+#define find_set_type_get(name, family, revision, found)       \
+       __find_set_type_get(name, family, revision, found, false)
+
 static int
-find_set_type_get(const char *name, u8 family, u8 revision,
-                 struct ip_set_type **found)
+__find_set_type_get(const char *name, u8 family, u8 revision,
+                   struct ip_set_type **found, bool retry)
 {
        struct ip_set_type *type;
        int err;
 
+       if (retry && !load_settype(name))
+               return -IPSET_ERR_FIND_TYPE;
+
        rcu_read_lock();
        *found = find_set_type(name, family, revision);
        if (*found) {
                err = !try_module_get((*found)->me) ? -EFAULT : 0;
                goto unlock;
        }
-       /* Make sure the type is loaded but we don't support the revision */
+       /* Make sure the type is already loaded
+        * but we don't support the revision */
        list_for_each_entry_rcu(type, &ip_set_type_list, list)
                if (STREQ(type->name, name)) {
                        err = -IPSET_ERR_FIND_TYPE;
@@ -113,7 +120,8 @@ find_set_type_get(const char *name, u8 family, u8 revision,
                }
        rcu_read_unlock();
 
-       return try_to_load_type(name);
+       return retry ? -IPSET_ERR_FIND_TYPE :
+               __find_set_type_get(name, family, revision, found, true);
 
 unlock:
        rcu_read_unlock();
@@ -124,12 +132,19 @@ unlock:
  * If we succeeded, the supported minimal and maximum revisions are
  * filled out.
  */
+#define find_set_type_minmax(name, family, min, max) \
+       __find_set_type_minmax(name, family, min, max, false)
+
 static int
-find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
+__find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max,
+                      bool retry)
 {
        struct ip_set_type *type;
        bool found = false;
 
+       if (retry && !load_settype(name))
+               return -IPSET_ERR_FIND_TYPE;
+
        *min = 255; *max = 0;
        rcu_read_lock();
        list_for_each_entry_rcu(type, &ip_set_type_list, list)
@@ -145,7 +160,8 @@ find_set_type_minmax(const char *name, u8 family, u8 *min, u8 *max)
        if (found)
                return 0;
 
-       return try_to_load_type(name);
+       return retry ? -IPSET_ERR_FIND_TYPE :
+               __find_set_type_minmax(name, family, min, max, true);
 }
 
 #define family_name(f) ((f) == AF_INET ? "inet" : \
@@ -1126,6 +1142,7 @@ release_refcount:
        if (ret || !cb->args[2]) {
                pr_debug("release set %s\n", ip_set_list[index]->name);
                ip_set_put_byindex(index);
+               cb->args[2] = 0;
        }
 out:
        if (nlh) {
index 299fec91f74189a562f7fcb0d47914d9b279c286..bbe23baa19b64f4df7b2532b1471614a5315cc26 100644 (file)
@@ -121,18 +121,6 @@ int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
        int ret = 0;
 
        if (tmpl != NULL) {
-               /* we've got a userspace helper. */
-               if (tmpl->status & IPS_USERSPACE_HELPER) {
-                       help = nf_ct_helper_ext_add(ct, flags);
-                       if (help == NULL) {
-                               ret = -ENOMEM;
-                               goto out;
-                       }
-                       rcu_assign_pointer(help->helper, NULL);
-                       __set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status);
-                       ret = 0;
-                       goto out;
-               }
                help = nfct_help(tmpl);
                if (help != NULL)
                        helper = help->helper;
index 2a4834b83332afa2ebf87b37102cd2a9d6dcd8d1..9307b033c0c9d9ff35c60b31755aaceef8d89087 100644 (file)
@@ -2042,10 +2042,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
        }
        help = nfct_help(ct);
        if (!help) {
-               err = -EOPNOTSUPP;
-               goto out;
-       }
-       if (test_bit(IPS_USERSPACE_HELPER_BIT, &ct->status)) {
                if (!cda[CTA_EXPECT_TIMEOUT]) {
                        err = -EINVAL;
                        goto out;
index 8e87123f1373a1a4a42ddb6211c1737d2346047d..0221d10de75a517dbc4c5e5c7d40b432abef15a3 100644 (file)
@@ -62,8 +62,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
        int ret = 0;
        u8 proto;
 
-       if (info->flags & ~(XT_CT_NOTRACK | XT_CT_USERSPACE_HELPER))
-               return -EOPNOTSUPP;
+       if (info->flags & ~XT_CT_NOTRACK)
+               return -EINVAL;
 
        if (info->flags & XT_CT_NOTRACK) {
                ct = nf_ct_untracked_get();
@@ -92,9 +92,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par)
                                  GFP_KERNEL))
                goto err3;
 
-       if (info->flags & XT_CT_USERSPACE_HELPER) {
-               __set_bit(IPS_USERSPACE_HELPER_BIT, &ct->status);
-       } else if (info->helper[0]) {
+       if (info->helper[0]) {
                ret = -ENOENT;
                proto = xt_ct_find_proto(par);
                if (!proto) {
index 8e4992101875086cd412154d102793c99ffa6bed..d95f9c963cde01cfcd4d6541de352c85f44fc6f8 100644 (file)
@@ -445,7 +445,6 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
 {
        __be16 _ports[2], *ports;
        u8 nexthdr;
-       __be16 frag_off;
        int poff;
 
        memset(dst, 0, sizeof(*dst));
@@ -466,6 +465,9 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
                break;
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
        case NFPROTO_IPV6:
+       {
+               __be16 frag_off;
+
                if (hinfo->cfg.mode & XT_HASHLIMIT_HASH_DIP) {
                        memcpy(&dst->ip6.dst, &ipv6_hdr(skb)->daddr,
                               sizeof(dst->ip6.dst));
@@ -485,6 +487,7 @@ hashlimit_init_dst(const struct xt_hashlimit_htable *hinfo,
                if ((int)protoff < 0)
                        return -1;
                break;
+       }
 #endif
        default:
                BUG();
index 9a2725114e99bad3a1e69351ac55ec764a295cc6..ce64c18b8c79a99ef4251e7c9d2a752105207725 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007-2011 Nicira Networks.
+ * Copyright (c) 2007-2012 Nicira Networks.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -36,7 +36,6 @@
 #include <linux/rcupdate.h>
 #include <linux/tcp.h>
 #include <linux/udp.h>
-#include <linux/version.h>
 #include <linux/ethtool.h>
 #include <linux/wait.h>
 #include <asm/system.h>
@@ -1397,9 +1396,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
        int i = 0;
 
        list_for_each_entry(dp, &dps, list_node) {
-               if (i < skip)
-                       continue;
-               if (ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
+               if (i >= skip &&
+                   ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).pid,
                                         cb->nlh->nlmsg_seq, NLM_F_MULTI,
                                         OVS_DP_CMD_NEW) < 0)
                        break;
index 5b9f884b7055a6c75205fc00082f2f541202df7f..c73370cc1f02df24d32026c0e775efef72451dfc 100644 (file)
@@ -25,7 +25,6 @@
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/u64_stats_sync.h>
-#include <linux/version.h>
 
 #include "flow.h"
 
index fe7f020a843efa2519f47c80ca4b340e14299917..1252c3081ef12740a0b818fbd58ae3ab6e9b870e 100644 (file)
@@ -32,7 +32,6 @@
 #include <linux/in.h>
 #include <linux/rcupdate.h>
 #include <linux/if_arp.h>
-#include <linux/if_ether.h>
 #include <linux/ip.h>
 #include <linux/ipv6.h>
 #include <linux/tcp.h>
index 8fc28b86f2b3e4666fd2ef18d63ad785aed2c25d..322b8d206693dde98a30f1d36a6db4d800e0f17a 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/etherdevice.h>
 #include <linux/ethtool.h>
 #include <linux/skbuff.h>
-#include <linux/version.h>
 
 #include "datapath.h"
 #include "vport-internal_dev.h"
index 7f0ef3794c515064c67d719b8f65c564aa82295d..6c066ba25dc71c24c8a94e4acc50014d610f77a6 100644 (file)
@@ -27,7 +27,6 @@
 #include <linux/rcupdate.h>
 #include <linux/rtnetlink.h>
 #include <linux/compat.h>
-#include <linux/version.h>
 
 #include "vport.h"
 #include "vport-internal_dev.h"
index bb6ad81b671d055b89fbe74287ed328a834aac03..424ff622ab5f8e77dc69c7b54b98bdaafa350329 100644 (file)
@@ -68,7 +68,6 @@ static int rds_release(struct socket *sock)
 {
        struct sock *sk = sock->sk;
        struct rds_sock *rs;
-       unsigned long flags;
 
        if (!sk)
                goto out;
@@ -94,10 +93,10 @@ static int rds_release(struct socket *sock)
        rds_rdma_drop_keys(rs);
        rds_notify_queue_get(rs, NULL);
 
-       spin_lock_irqsave(&rds_sock_lock, flags);
+       spin_lock_bh(&rds_sock_lock);
        list_del_init(&rs->rs_item);
        rds_sock_count--;
-       spin_unlock_irqrestore(&rds_sock_lock, flags);
+       spin_unlock_bh(&rds_sock_lock);
 
        rds_trans_put(rs->rs_transport);
 
@@ -409,7 +408,6 @@ static const struct proto_ops rds_proto_ops = {
 
 static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
 {
-       unsigned long flags;
        struct rds_sock *rs;
 
        sock_init_data(sock, sk);
@@ -426,10 +424,10 @@ static int __rds_create(struct socket *sock, struct sock *sk, int protocol)
        spin_lock_init(&rs->rs_rdma_lock);
        rs->rs_rdma_keys = RB_ROOT;
 
-       spin_lock_irqsave(&rds_sock_lock, flags);
+       spin_lock_bh(&rds_sock_lock);
        list_add_tail(&rs->rs_item, &rds_sock_list);
        rds_sock_count++;
-       spin_unlock_irqrestore(&rds_sock_lock, flags);
+       spin_unlock_bh(&rds_sock_lock);
 
        return 0;
 }
@@ -471,12 +469,11 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
 {
        struct rds_sock *rs;
        struct rds_incoming *inc;
-       unsigned long flags;
        unsigned int total = 0;
 
        len /= sizeof(struct rds_info_message);
 
-       spin_lock_irqsave(&rds_sock_lock, flags);
+       spin_lock_bh(&rds_sock_lock);
 
        list_for_each_entry(rs, &rds_sock_list, rs_item) {
                read_lock(&rs->rs_recv_lock);
@@ -492,7 +489,7 @@ static void rds_sock_inc_info(struct socket *sock, unsigned int len,
                read_unlock(&rs->rs_recv_lock);
        }
 
-       spin_unlock_irqrestore(&rds_sock_lock, flags);
+       spin_unlock_bh(&rds_sock_lock);
 
        lens->nr = total;
        lens->each = sizeof(struct rds_info_message);
@@ -504,11 +501,10 @@ static void rds_sock_info(struct socket *sock, unsigned int len,
 {
        struct rds_info_socket sinfo;
        struct rds_sock *rs;
-       unsigned long flags;
 
        len /= sizeof(struct rds_info_socket);
 
-       spin_lock_irqsave(&rds_sock_lock, flags);
+       spin_lock_bh(&rds_sock_lock);
 
        if (len < rds_sock_count)
                goto out;
@@ -529,7 +525,7 @@ out:
        lens->nr = rds_sock_count;
        lens->each = sizeof(struct rds_info_socket);
 
-       spin_unlock_irqrestore(&rds_sock_lock, flags);
+       spin_unlock_bh(&rds_sock_lock);
 }
 
 static void rds_exit(void)
index e7e1d0b57b3d2a28d5e99276cd195f7dfde999d0..2776012132ea2b6448e5b0dadf1d6dd729b13afa 100644 (file)
@@ -419,7 +419,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
 
        cb = netem_skb_cb(skb);
        if (q->gap == 0 ||              /* not doing reordering */
-           q->counter < q->gap ||      /* inside last reordering gap */
+           q->counter < q->gap - 1 ||  /* inside last reordering gap */
            q->reorder < get_crandom(&q->reorder_cor)) {
                psched_time_t now;
                psched_tdiff_t delay;
index 1426ec3d0a531ecd4ec0b227c5f7aa22843a1750..75762f346975ed6e2be6d639ed7a3e1d83ede077 100644 (file)
@@ -92,6 +92,7 @@ generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
        if (gcred->acred.group_info != NULL)
                get_group_info(gcred->acred.group_info);
        gcred->acred.machine_cred = acred->machine_cred;
+       gcred->acred.principal = acred->principal;
 
        dprintk("RPC:       allocated %s cred %p for uid %d gid %d\n",
                        gcred->acred.machine_cred ? "machine" : "generic",
@@ -123,6 +124,17 @@ generic_destroy_cred(struct rpc_cred *cred)
        call_rcu(&cred->cr_rcu, generic_free_cred_callback);
 }
 
+static int
+machine_cred_match(struct auth_cred *acred, struct generic_cred *gcred, int flags)
+{
+       if (!gcred->acred.machine_cred ||
+           gcred->acred.principal != acred->principal ||
+           gcred->acred.uid != acred->uid ||
+           gcred->acred.gid != acred->gid)
+               return 0;
+       return 1;
+}
+
 /*
  * Match credentials against current process creds.
  */
@@ -132,9 +144,12 @@ generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
        struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base);
        int i;
 
+       if (acred->machine_cred)
+               return machine_cred_match(acred, gcred, flags);
+
        if (gcred->acred.uid != acred->uid ||
            gcred->acred.gid != acred->gid ||
-           gcred->acred.machine_cred != acred->machine_cred)
+           gcred->acred.machine_cred != 0)
                goto out_nomatch;
 
        /* Optimisation in the case where pointers are identical... */
index aad8fb699989d26c0cfe727ea529b312e79c65ce..85d3bb7490aabcb26fd10b08adfcddebb5cbeb59 100644 (file)
@@ -1918,7 +1918,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                struct sk_buff *skb;
 
                unix_state_lock(sk);
-               skb = skb_dequeue(&sk->sk_receive_queue);
+               skb = skb_peek(&sk->sk_receive_queue);
                if (skb == NULL) {
                        unix_sk(sk)->recursion_level = 0;
                        if (copied >= target)
@@ -1958,11 +1958,8 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                if (check_creds) {
                        /* Never glue messages from different writers */
                        if ((UNIXCB(skb).pid  != siocb->scm->pid) ||
-                           (UNIXCB(skb).cred != siocb->scm->cred)) {
-                               skb_queue_head(&sk->sk_receive_queue, skb);
-                               sk->sk_data_ready(sk, skb->len);
+                           (UNIXCB(skb).cred != siocb->scm->cred))
                                break;
-                       }
                } else {
                        /* Copy credentials */
                        scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);
@@ -1977,8 +1974,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
 
                chunk = min_t(unsigned int, skb->len, size);
                if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
-                       skb_queue_head(&sk->sk_receive_queue, skb);
-                       sk->sk_data_ready(sk, skb->len);
                        if (copied == 0)
                                copied = -EFAULT;
                        break;
@@ -1993,13 +1988,10 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                        if (UNIXCB(skb).fp)
                                unix_detach_fds(siocb->scm, skb);
 
-                       /* put the skb back if we didn't use it up.. */
-                       if (skb->len) {
-                               skb_queue_head(&sk->sk_receive_queue, skb);
-                               sk->sk_data_ready(sk, skb->len);
+                       if (skb->len)
                                break;
-                       }
 
+                       skb_unlink(skb, &sk->sk_receive_queue);
                        consume_skb(skb);
 
                        if (siocb->scm->fp)
@@ -2010,9 +2002,6 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
                        if (UNIXCB(skb).fp)
                                siocb->scm->fp = scm_fp_dup(UNIXCB(skb).fp);
 
-                       /* put message back and return */
-                       skb_queue_head(&sk->sk_receive_queue, skb);
-                       sk->sk_data_ready(sk, skb->len);
                        break;
                }
        } while (size);
index a57f5bd5a13d5b8f14a90b03bf23115e925b4a0a..d3bae5e7b6018d7429c518681a706179e00c6d81 100644 (file)
@@ -4,12 +4,16 @@
 # header-y  - list files to be installed. They are preprocessed
 #             to remove __KERNEL__ section of the file
 # objhdr-y  - Same as header-y but for generated files
+# genhdr-y  - Same as objhdr-y but in a generated/ directory
 #
 # ==========================================================================
 
 # called may set destination dir (when installing to asm/)
 _dst := $(if $(dst),$(dst),$(obj))
 
+# generated header directory
+gen := $(if $(gen),$(gen),$(subst include/,include/generated/,$(obj)))
+
 kbuild-file := $(srctree)/$(obj)/Kbuild
 include $(kbuild-file)
 
@@ -33,9 +37,10 @@ wrapper-files := $(filter $(header-y), $(generic-y))
 
 # all headers files for this dir
 header-y      := $(filter-out $(generic-y), $(header-y))
-all-files     := $(header-y) $(objhdr-y) $(wrapper-files)
+all-files     := $(header-y) $(objhdr-y) $(genhdr-y) $(wrapper-files)
 input-files   := $(addprefix $(srctree)/$(obj)/,$(header-y)) \
-                 $(addprefix $(objtree)/$(obj)/,$(objhdr-y))
+                 $(addprefix $(objtree)/$(obj)/,$(objhdr-y)) \
+                 $(addprefix $(objtree)/$(gen)/,$(genhdr-y))
 output-files  := $(addprefix $(install)/, $(all-files))
 
 # Work out what needs to be removed
@@ -52,6 +57,7 @@ quiet_cmd_install = INSTALL $(printdir) ($(words $(all-files))\
       cmd_install = \
         $(PERL) $< $(srctree)/$(obj) $(install) $(SRCARCH) $(header-y); \
         $(PERL) $< $(objtree)/$(obj) $(install) $(SRCARCH) $(objhdr-y); \
+        $(PERL) $< $(objtree)/$(gen) $(install) $(SRCARCH) $(genhdr-y); \
         for F in $(wrapper-files); do                                   \
                 echo "\#include <asm-generic/$$F>" > $(install)/$$F;    \
         done;                                                           \
index 5d986d9adf1b7977684a1fa1825f5ff25c2816bc..00c368c6e996fb0555f8161b6d3147c554d037e5 100644 (file)
@@ -93,9 +93,9 @@ obj-dirs      := $(addprefix $(obj)/,$(obj-dirs))
 # already
 # $(modname_flags) #defines KBUILD_MODNAME as the name of the module it will 
 # end up in (or would, if it gets compiled in)
-# Note: It's possible that one object gets potentially linked into more
-#       than one module. In that case KBUILD_MODNAME will be set to foo_bar,
-#       where foo and bar are the name of the modules.
+# Note: Files that end up in two or more modules are compiled without the
+#       KBUILD_MODNAME definition. The reason is that any made-up name would
+#       differ in different configs.
 name-fix = $(subst $(comma),_,$(subst -,_,$1))
 basename_flags = -D"KBUILD_BASENAME=KBUILD_STR($(call name-fix,$(basetarget)))"
 modname_flags  = $(if $(filter 1,$(words $(modname))),\
@@ -264,7 +264,7 @@ $(obj)/%.dtb.S: $(obj)/%.dtb
        $(call cmd,dt_S_dtb)
 
 quiet_cmd_dtc = DTC     $@
-cmd_dtc = $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 $(DTC_FLAGS) $<
+cmd_dtc = $(objtree)/scripts/dtc/dtc -O dtb -o $@ -b 0 $(DTC_FLAGS) -d $(depfile) $<
 
 # Bzip2
 # ---------------------------------------------------------------------------
index e3bfcbe8a520b63639b04115bf148fb4ddd144eb..a3b9782441f9e49a3a3522107e40dc3ffee913b8 100755 (executable)
@@ -1924,6 +1924,12 @@ sub process {
                        my $pre_ctx = "$1$2";
 
                        my ($level, @ctx) = ctx_statement_level($linenr, $realcnt, 0);
+
+                       if ($line =~ /^\+\t{6,}/) {
+                               WARN("DEEP_INDENTATION",
+                                    "Too many leading tabs - consider code refactoring\n" . $herecurr);
+                       }
+
                        my $ctx_cnt = $realcnt - $#ctx - 1;
                        my $ctx = join("\n", @ctx);
 
index 3ab316e5231336cd528b1d591c517f42860ad721..d24810fc6af6caf3a15ea3f5a55b33f4f08a667b 100755 (executable)
@@ -198,11 +198,16 @@ EOF
 }
 
 syscall_list() {
-sed -n -e '/^\#define/ s/[^_]*__NR_\([^[:space:]]*\).*/\
-\#if !defined \(__NR_\1\) \&\& !defined \(__IGNORE_\1\)\
-\#warning syscall \1 not implemented\
-\#endif/p' $1
+    grep '^[0-9]' "$1" | sort -n | (
+       while read nr abi name entry ; do
+           echo <<EOF
+#if !defined(__NR_${name}) && !defined(__IGNORE_${name})
+#warning syscall ${name} not implemented
+#endif
+EOF
+       done
+    )
 }
 
-(ignore_list && syscall_list $(dirname $0)/../arch/x86/include/asm/unistd_32.h) | \
+(ignore_list && syscall_list $(dirname $0)/../arch/x86/syscalls/syscall_32.tbl) | \
 $* -E -x c - > /dev/null
index 1bb1a1bd2daa832b234edbec3765861d4f41010d..3c2776466d877325d63b1980b798b8b0f9d6f556 100755 (executable)
@@ -9,14 +9,23 @@ if [ "$C" = "1" -o "$C" = "2" ]; then
 #    FLAGS="-ignore_unknown_options -very_quiet"
 #    OPTIONS=$*
 
-# Workaround for Coccinelle < 0.2.3
-    FLAGS="-I $srctree/include -very_quiet"
-    shift $(( $# - 1 ))
-    OPTIONS=$1
+    if [ "$KBUILD_EXTMOD" = "" ] ; then
+        # Workaround for Coccinelle < 0.2.3
+        FLAGS="-I $srctree/include -very_quiet"
+        shift $(( $# - 1 ))
+        OPTIONS=$1
+    else
+       echo M= is not currently supported when C=1 or C=2
+       exit 1
+    fi
 else
     ONLINE=0
     FLAGS="-very_quiet"
-    OPTIONS="-dir $srctree"
+    if [ "$KBUILD_EXTMOD" = "" ] ; then
+        OPTIONS="-dir $srctree"
+    else
+        OPTIONS="-dir $KBUILD_EXTMOD -patch $srctree -I $srctree/include -I $KBUILD_EXTMOD/include"
+    fi
 fi
 
 if [ ! -x "$SPATCH" ]; then
diff --git a/scripts/coccinelle/api/devm_request_and_ioremap.cocci b/scripts/coccinelle/api/devm_request_and_ioremap.cocci
new file mode 100644 (file)
index 0000000..46beb81
--- /dev/null
@@ -0,0 +1,105 @@
+/// Reimplement a call to devm_request_mem_region followed by a call to ioremap
+/// or ioremap_nocache by a call to devm_request_and_ioremap.
+/// Devm_request_and_ioremap was introduced in
+/// 72f8c0bfa0de64c68ee59f40eb9b2683bffffbb0.  It makes the code much more
+/// concise.
+///
+///
+// Confidence: High
+// Copyright: (C) 2011 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2011 Gilles Muller, INRIA/LiP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual patch
+virtual org
+virtual report
+virtual context
+
+@nm@
+expression myname;
+identifier i;
+@@
+
+struct platform_driver i = { .driver = { .name = myname } };
+
+@depends on patch@
+expression dev,res,size;
+@@
+
+-if (!devm_request_mem_region(dev, res->start, size,
+-                              \(res->name\|dev_name(dev)\))) {
+-   ...
+-   return ...;
+-}
+... when != res->start
+(
+-devm_ioremap(dev,res->start,size)
++devm_request_and_ioremap(dev,res)
+|
+-devm_ioremap_nocache(dev,res->start,size)
++devm_request_and_ioremap(dev,res)
+)
+... when any
+    when != res->start
+
+// this rule is separate from the previous one, because a single file can
+// have multiple values of myname
+@depends on patch@
+expression dev,res,size;
+expression nm.myname;
+@@
+
+-if (!devm_request_mem_region(dev, res->start, size,myname)) {
+-   ...
+-   return ...;
+-}
+... when != res->start
+(
+-devm_ioremap(dev,res->start,size)
++devm_request_and_ioremap(dev,res)
+|
+-devm_ioremap_nocache(dev,res->start,size)
++devm_request_and_ioremap(dev,res)
+)
+... when any
+    when != res->start
+
+
+@pb depends on org || report || context@
+expression dev,res,size;
+expression nm.myname;
+position p1,p2;
+@@
+
+*if
+  (!devm_request_mem_region@p1(dev, res->start, size,
+                              \(res->name\|dev_name(dev)\|myname\))) {
+   ...
+   return ...;
+}
+... when != res->start
+(
+*devm_ioremap@p2(dev,res->start,size)
+|
+*devm_ioremap_nocache@p2(dev,res->start,size)
+)
+... when any
+    when != res->start
+
+@script:python depends on org@
+p1 << pb.p1;
+p2 << pb.p2;
+@@
+
+cocci.print_main("INFO: replace by devm_request_and_ioremap",p1)
+cocci.print_secs("",p2)
+
+@script:python depends on report@
+p1 << pb.p1;
+p2 << pb.p2;
+@@
+
+msg = "INFO: devm_request_mem_region followed by ioremap on line %s can be replaced by devm_request_and_ioremap" % (p2[0].line)
+coccilib.report.print_report(p1[0],msg)
index e0805ad08d39260e03e94759fde73dd85a1121e1..07a74b2c6196c0dc649aaadeee908272ed665d15 100644 (file)
@@ -1,16 +1,19 @@
 /// Use kstrdup rather than duplicating its implementation
 ///
 // Confidence: High
-// Copyright: (C) 2010 Nicolas Palix, DIKU.  GPLv2.
-// Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
-// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
+// Copyright: (C) 2010-2012 Nicolas Palix.  GPLv2.
+// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
 // Options: -no_includes -include_headers
 
 virtual patch
+virtual context
+virtual org
+virtual report
 
-@@
+@depends on patch@
 expression from,to;
 expression flag,E1,E2;
 statement S;
@@ -23,7 +26,7 @@ statement S;
    ... when != \(from = E2 \| to = E2 \)
 -  strcpy(to, from);
 
-@@
+@depends on patch@
 expression x,from,to;
 expression flag,E1,E2,E3;
 statement S;
@@ -37,3 +40,65 @@ statement S;
     if (to==NULL || ...) S
     ... when != \(x = E3 \| from = E3 \| to = E3 \)
 -   memcpy(to, from, x);
+
+// ---------------------------------------------------------------------
+
+@r1 depends on !patch exists@
+expression from,to;
+expression flag,E1,E2;
+statement S;
+position p1,p2;
+@@
+
+*  to = kmalloc@p1(strlen(from) + 1,flag);
+   ... when != \(from = E1 \| to = E1 \)
+   if (to==NULL || ...) S
+   ... when != \(from = E2 \| to = E2 \)
+*  strcpy@p2(to, from);
+
+@r2 depends on !patch exists@
+expression x,from,to;
+expression flag,E1,E2,E3;
+statement S;
+position p1,p2;
+@@
+
+*   x = strlen(from) + 1;
+    ... when != \( x = E1 \| from = E1 \)
+*   to = \(kmalloc@p1\|kzalloc@p2\)(x,flag);
+    ... when != \(x = E2 \| from = E2 \| to = E2 \)
+    if (to==NULL || ...) S
+    ... when != \(x = E3 \| from = E3 \| to = E3 \)
+*   memcpy@p2(to, from, x);
+
+@script:python depends on org@
+p1 << r1.p1;
+p2 << r1.p2;
+@@
+
+cocci.print_main("WARNING opportunity for kstrdep",p1)
+cocci.print_secs("strcpy",p2)
+
+@script:python depends on org@
+p1 << r2.p1;
+p2 << r2.p2;
+@@
+
+cocci.print_main("WARNING opportunity for kstrdep",p1)
+cocci.print_secs("memcpy",p2)
+
+@script:python depends on report@
+p1 << r1.p1;
+p2 << r1.p2;
+@@
+
+msg = "WARNING opportunity for kstrdep (strcpy on line %s)" % (p2[0].line)
+coccilib.report.print_report(p1[0], msg)
+
+@script:python depends on report@
+p1 << r2.p1;
+p2 << r2.p2;
+@@
+
+msg = "WARNING opportunity for kstrdep (memcpy on line %s)" % (p2[0].line)
+coccilib.report.print_report(p1[0], msg)
index b5d722077dc179fb7ba23c73215cbd701a005886..4dceab6d54de5deabae0ac3eea2aa6d00da275d8 100644 (file)
@@ -1,14 +1,17 @@
 /// Use kmemdup rather than duplicating its implementation
 ///
 // Confidence: High
-// Copyright: (C) 2010 Nicolas Palix, DIKU.  GPLv2.
-// Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
-// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
+// Copyright: (C) 2010-2012 Nicolas Palix.  GPLv2.
+// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
 // Options: -no_includes -include_headers
 
 virtual patch
+virtual context
+virtual org
+virtual report
 
 @r1@
 expression from,to;
@@ -28,7 +31,7 @@ position p;
     ... when != \( x = E1 \| from = E1 \)
     to = \(kmalloc@p\|kzalloc@p\)(x,flag);
 
-@@
+@depends on patch@
 expression from,to,size,flag;
 position p != {r1.p,r2.p};
 statement S;
@@ -38,3 +41,26 @@ statement S;
 +  to = kmemdup(from,size,flag);
    if (to==NULL || ...) S
 -  memcpy(to, from, size);
+
+@r depends on !patch@
+expression from,to,size,flag;
+position p != {r1.p,r2.p};
+statement S;
+@@
+
+*  to = \(kmalloc@p\|kzalloc@p\)(size,flag);
+   to = kmemdup(from,size,flag);
+   if (to==NULL || ...) S
+*  memcpy(to, from, size);
+
+@script:python depends on org@
+p << r.p;
+@@
+
+coccilib.org.print_todo(p[0], "WARNING opportunity for kmemdep")
+
+@script:python depends on report@
+p << r.p;
+@@
+
+coccilib.report.print_report(p[0], "WARNING opportunity for kmemdep")
index 72ce012e878a12bbcdcf83f421ac885751763ddf..2efac289fd59d2beb755593acdd0c6fc39f3ec61 100644 (file)
@@ -1,23 +1,25 @@
-/// Use kmemdup_user rather than duplicating its implementation
+/// Use memdup_user rather than duplicating its implementation
 /// This is a little bit restricted to reduce false positives
 ///
 // Confidence: High
-// Copyright: (C) 2010 Nicolas Palix, DIKU.  GPLv2.
-// Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
-// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
+// Copyright: (C) 2010-2012 Nicolas Palix.  GPLv2.
+// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
 // Options: -no_includes -include_headers
 
 virtual patch
+virtual context
+virtual org
+virtual report
 
-@@
+@depends on patch@
 expression from,to,size,flag;
-position p;
 identifier l1,l2;
 @@
 
--  to = \(kmalloc@p\|kzalloc@p\)(size,flag);
+-  to = \(kmalloc\|kzalloc\)(size,flag);
 +  to = memdup_user(from,size);
    if (
 -      to==NULL
@@ -33,3 +35,26 @@ identifier l1,l2;
 -    -EFAULT
 -    ...+>
 -  }
+
+@r depends on !patch@
+expression from,to,size,flag;
+position p;
+statement S1,S2;
+@@
+
+*  to = \(kmalloc@p\|kzalloc@p\)(size,flag);
+   if (to==NULL || ...) S1
+   if (copy_from_user(to, from, size) != 0)
+   S2
+
+@script:python depends on org@
+p << r.p;
+@@
+
+coccilib.org.print_todo(p[0], "WARNING opportunity for memdep_user")
+
+@script:python depends on report@
+p << r.p;
+@@
+
+coccilib.report.print_report(p[0], "WARNING opportunity for memdep_user")
diff --git a/scripts/coccinelle/free/devm_free.cocci b/scripts/coccinelle/free/devm_free.cocci
new file mode 100644 (file)
index 0000000..0a1e361
--- /dev/null
@@ -0,0 +1,71 @@
+/// Find uses of standard freeing functons on values allocated using devm_
+/// functions.  Values allocated using the devm_functions are freed when
+/// the device is detached, and thus the use of the standard freeing
+/// function would cause a double free.
+/// See Documentation/driver-model/devres.txt for more information.
+///
+/// A difficulty of detecting this problem is that the standard freeing
+/// function might be called from a different function than the one
+/// containing the allocation function.  It is thus necessary to make the
+/// connection between the allocation function and the freeing function.
+/// Here this is done using the specific argument text, which is prone to
+/// false positives.  There is no rule for the request_region and
+/// request_mem_region variants because this heuristic seems to be a bit
+/// less reliable in these cases.
+///
+// Confidence: Moderate
+// Copyright: (C) 2011 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2011 Gilles Muller, INRIA/LiP6.  GPLv2.
+// URL: http://coccinelle.lip6.fr/
+// Comments:
+// Options: -no_includes -include_headers
+
+virtual org
+virtual report
+virtual context
+
+@r depends on context || org || report@
+expression x;
+@@
+
+(
+ x = devm_kzalloc(...)
+|
+ x = devm_request_irq(...)
+|
+ x = devm_ioremap(...)
+|
+ x = devm_ioremap_nocache(...)
+|
+ x = devm_ioport_map(...)
+)
+
+@pb@
+expression r.x;
+position p;
+@@
+
+(
+* kfree@p(x)
+|
+* free_irq@p(x)
+|
+* iounmap@p(x)
+|
+* ioport_unmap@p(x)
+)
+
+@script:python depends on org@
+p << pb.p;
+@@
+
+msg="WARNING: invalid free of devm_ allocated data"
+coccilib.org.print_todo(p[0], msg)
+
+@script:python depends on report@
+p << pb.p;
+@@
+
+msg="WARNING: invalid free of devm_ allocated data"
+coccilib.report.print_report(p[0], msg)
+
index f9f79d9245ee9815ef3e244f791553501e1391b2..d9ae6d89c2f57e9db00a856f461b1f968cf07018 100644 (file)
@@ -5,9 +5,9 @@
 //# SCTP_DBG_OBJCNT_DEC that do not actually evaluate their argument
 ///
 // Confidence: Moderate
-// Copyright: (C) 2010 Nicolas Palix, DIKU.  GPLv2.
-// Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
-// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
+// Copyright: (C) 2010-2012 Nicolas Palix.  GPLv2.
+// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
 // Options: -no_includes -include_headers
@@ -23,7 +23,7 @@ position p1;
 kfree@p1(E)
 
 @print expression@
-constant char *c;
+constant char [] c;
 expression free.E,E2;
 type T;
 position p;
@@ -36,6 +36,10 @@ identifier f;
  E@p == E2
 |
  E@p != E2
+|
+ E2 == E@p
+|
+ E2 != E@p
 |
  !E@p
 |
@@ -113,5 +117,5 @@ p1 << free.p1;
 p2 << r.p2;
 @@
 
-msg = "reference preceded by free on line %s" % (p1[0].line)
+msg = "ERROR: reference preceded by free on line %s" % (p1[0].line)
 coccilib.report.print_report(p2[0],msg)
index 77bc108c3f597b715850465c0a5fa26333132bec..0a40af828c43a4161b94d2b9c85a66830ae60f1b 100644 (file)
@@ -2,16 +2,19 @@
 /// is no point to call of_node_put on the final value.
 ///
 // Confidence: High
-// Copyright: (C) 2010 Nicolas Palix, DIKU.  GPLv2.
-// Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
-// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
+// Copyright: (C) 2010-2012 Nicolas Palix.  GPLv2.
+// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
 // Options: -no_includes -include_headers
 
 virtual patch
+virtual context
+virtual org
+virtual report
 
-@@
+@depends on patch@
 iterator name for_each_node_by_name;
 expression np,E;
 identifier l;
@@ -24,7 +27,7 @@ for_each_node_by_name(np,...) {
 ... when != np = E
 - of_node_put(np);
 
-@@
+@depends on patch@
 iterator name for_each_node_by_type;
 expression np,E;
 identifier l;
@@ -37,7 +40,7 @@ for_each_node_by_type(np,...) {
 ... when != np = E
 - of_node_put(np);
 
-@@
+@depends on patch@
 iterator name for_each_compatible_node;
 expression np,E;
 identifier l;
@@ -50,7 +53,7 @@ for_each_compatible_node(np,...) {
 ... when != np = E
 - of_node_put(np);
 
-@@
+@depends on patch@
 iterator name for_each_matching_node;
 expression np,E;
 identifier l;
@@ -62,3 +65,59 @@ for_each_matching_node(np,...) {
 }
 ... when != np = E
 - of_node_put(np);
+
+// ----------------------------------------------------------------------
+
+@r depends on !patch forall@
+//iterator name for_each_node_by_name;
+//iterator name for_each_node_by_type;
+//iterator name for_each_compatible_node;
+//iterator name for_each_matching_node;
+expression np,E;
+identifier l;
+position p1,p2;
+@@
+
+(
+*for_each_node_by_name@p1(np,...)
+{
+  ... when != break;
+      when != goto l;
+}
+|
+*for_each_node_by_type@p1(np,...)
+{
+  ... when != break;
+      when != goto l;
+}
+|
+*for_each_compatible_node@p1(np,...)
+{
+  ... when != break;
+      when != goto l;
+}
+|
+*for_each_matching_node@p1(np,...)
+{
+  ... when != break;
+      when != goto l;
+}
+)
+... when != np = E
+* of_node_put@p2(np);
+
+@script:python depends on org@
+p1 << r.p1;
+p2 << r.p2;
+@@
+
+cocci.print_main("unneeded of_node_put",p2)
+cocci.print_secs("iterator",p1)
+
+@script:python depends on report@
+p1 << r.p1;
+p2 << r.p2;
+@@
+
+msg = "ERROR: of_node_put not needed after iterator on line %s" % (p1[0].line)
+coccilib.report.print_report(p2[0], msg)
index baa4297a4ed18d6ae757b10404dcdcefb88d7892..259899f6838edaa463d16d445e7f7b8f39d132f6 100644 (file)
@@ -1,20 +1,24 @@
 /// Many iterators have the property that the first argument is always bound
-/// to a real list element, never NULL.  False positives arise for some
-/// iterators that do not have this property, or in cases when the loop
-/// cursor is reassigned.  The latter should only happen when the matched
-/// code is on the way to a loop exit (break, goto, or return).
+/// to a real list element, never NULL.
+//# False positives arise for some iterators that do not have this property,
+//# or in cases when the loop cursor is reassigned.  The latter should only
+//# happen when the matched code is on the way to a loop exit (break, goto,
+//# or return).
 ///
 // Confidence: Moderate
-// Copyright: (C) 2010 Nicolas Palix, DIKU.  GPLv2.
-// Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
-// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
+// Copyright: (C) 2010-2012 Nicolas Palix.  GPLv2.
+// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
 // Options: -no_includes -include_headers
 
 virtual patch
+virtual context
+virtual org
+virtual report
 
-@@
+@depends on patch@
 iterator I;
 expression x,E,E1,E2;
 statement S,S1,S2;
@@ -55,4 +59,36 @@ I(x,...) { <...
   x != NULL
 + )
 )
-  ...> }
\ No newline at end of file
+  ...> }
+
+@r depends on !patch exists@
+iterator I;
+expression x,E;
+position p1,p2;
+@@
+
+*I@p1(x,...)
+{ ... when != x = E
+(
+*  x@p2 == NULL
+|
+*  x@p2 != NULL
+)
+  ... when any
+}
+
+@script:python depends on org@
+p1 << r.p1;
+p2 << r.p2;
+@@
+
+cocci.print_main("iterator-bound variable",p1)
+cocci.print_secs("useless NULL test",p2)
+
+@script:python depends on report@
+p1 << r.p1;
+p2 << r.p2;
+@@
+
+msg = "ERROR: iterator variable bound on line %s cannot be NULL" % (p1[0].line)
+coccilib.report.print_report(p2[0], msg)
index 00af5344a68f2a1269b85e093bc25ddc56e0f98e..8f10b49603c32e11b9c0aa2845da8f11d7901f5e 100644 (file)
@@ -1,17 +1,20 @@
 /// Find functions that refer to GFP_KERNEL but are called with locks held.
-/// The proposed change of converting the GFP_KERNEL is not necessarily the
-/// correct one.  It may be desired to unlock the lock, or to not call the
-/// function under the lock in the first place.
+//# The proposed change of converting the GFP_KERNEL is not necessarily the
+//# correct one.  It may be desired to unlock the lock, or to not call the
+//# function under the lock in the first place.
 ///
 // Confidence: Moderate
-// Copyright: (C) 2010 Nicolas Palix, DIKU.  GPLv2.
-// Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
-// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
+// Copyright: (C) 2012 Nicolas Palix.  GPLv2.
+// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
 // Options: -no_includes -include_headers
 
 virtual patch
+virtual context
+virtual org
+virtual report
 
 @gfp exists@
 identifier fn;
@@ -32,28 +35,29 @@ fn(...) {
  ... when any
 }
 
-@locked@
+@locked exists@
 identifier gfp.fn;
+position p1,p2;
 @@
 
 (
-read_lock_irq
+read_lock_irq@p1
 |
-write_lock_irq
+write_lock_irq@p1
 |
-read_lock_irqsave
+read_lock_irqsave@p1
 |
-write_lock_irqsave
+write_lock_irqsave@p1
 |
-spin_lock
+spin_lock@p1
 |
-spin_trylock
+spin_trylock@p1
 |
-spin_lock_irq
+spin_lock_irq@p1
 |
-spin_lock_irqsave
+spin_lock_irqsave@p1
 |
-local_irq_disable
+local_irq_disable@p1
 )
  (...)
 ...  when != read_unlock_irq(...)
@@ -64,11 +68,38 @@ local_irq_disable
      when != spin_unlock_irq(...)
      when != spin_unlock_irqrestore(...)
      when != local_irq_enable(...)
-fn(...)
+fn@p2(...)
 
-@depends on locked@
+@depends on locked && patch@
 position gfp.p;
 @@
 
 - GFP_KERNEL@p
 + GFP_ATOMIC
+
+@depends on locked && !patch@
+position gfp.p;
+@@
+
+* GFP_KERNEL@p
+
+@script:python depends on !patch && org@
+p << gfp.p;
+fn << gfp.fn;
+p1 << locked.p1;
+p2 << locked.p2;
+@@
+
+cocci.print_main("lock",p1)
+cocci.print_secs("call",p2)
+cocci.print_secs("GFP_KERNEL",p)
+
+@script:python depends on !patch && report@
+p << gfp.p;
+fn << gfp.fn;
+p1 << locked.p1;
+p2 << locked.p2;
+@@
+
+msg = "ERROR: function %s called on line %s inside lock on line %s but uses GFP_KERNEL" % (fn,p2[0].line,p1[0].line)
+coccilib.report.print_report(p[0], msg)
index b4344d838097bb4a541fbd15097d7ef81367bc02..1c4ffe6fd846746d64a1a34168fb3864f18b9af8 100644 (file)
@@ -1,9 +1,9 @@
 /// Find nested lock+irqsave functions that use the same flags variables
 ///
 // Confidence: High
-// Copyright: (C) 2010 Nicolas Palix, DIKU.  GPLv2.
-// Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
-// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
+// Copyright: (C) 2010-2012 Nicolas Palix.  GPLv2.
+// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
 // Options: -no_includes -include_headers
@@ -12,7 +12,7 @@ virtual context
 virtual org
 virtual report
 
-@r@
+@r exists@
 expression lock1,lock2,flags;
 position p1,p2;
 @@
@@ -39,7 +39,7 @@ read_lock_irqsave@p2(lock2,flags)
 write_lock_irqsave@p2(lock2,flags)
 )
 
-@d@
+@d exists@
 expression f <= r.flags;
 expression lock1,lock2,flags;
 position r.p1, r.p2;
@@ -76,5 +76,5 @@ p1 << r.p1;
 p2 << r.p2;
 @@
 
-msg="ERROR: nested lock+irqsave that reuses flags from %s." % (p1[0].line)
+msg="ERROR: nested lock+irqsave that reuses flags from line %s." % (p1[0].line)
 coccilib.report.print_report(p2[0], msg)
index 7641a2925434219ec314d3c3718b80908b77ff5c..3267d7410bd5072aaab32efd8d5e4c4f06205618 100644 (file)
@@ -6,13 +6,14 @@
 /// function call that releases the lock.
 ///
 // Confidence: Moderate
-// Copyright: (C) 2010 Nicolas Palix, DIKU.  GPLv2.
-// Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
-// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
+// Copyright: (C) 2010-2012 Nicolas Palix.  GPLv2.
+// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
 // Options: -no_includes -include_headers
 
+virtual context
 virtual org
 virtual report
 
@@ -57,7 +58,7 @@ position r;
 
 for(...;...;...) { <+... return@r ...; ...+> }
 
-@err@
+@err exists@
 expression E1;
 position prelocked.p;
 position up != prelocked.p1;
@@ -65,14 +66,14 @@ position r!=looped.r;
 identifier lock,unlock;
 @@
 
-lock(E1@p,...);
+*lock(E1@p,...);
 <+... when != E1
 if (...) {
   ... when != E1
-  return@r ...;
+*  return@r ...;
 }
 ...+>
-unlock@up(E1,...);
+*unlock@up(E1,...);
 
 @script:python depends on org@
 p << prelocked.p1;
index 156b20adb35163a603a533a90d73d7a7cc6c6ad0..cf74a00cf59740d51f828f3140460f77268c1d40 100644 (file)
@@ -3,9 +3,9 @@
 /// initialization.
 ///
 // Confidence: Low
-// Copyright: (C) 2010 Nicolas Palix, DIKU.  GPLv2.
-// Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
-// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
+// Copyright: (C) 2010-2012 Nicolas Palix.  GPLv2.
+// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments: requires at least Coccinelle 0.2.4, lex or parse error otherwise
 // Options: -no_includes -include_headers
@@ -49,5 +49,5 @@ pr << r.p;
 @@
 
 if int(ps[0].line) < int(pr[0].line) or (int(ps[0].line) == int(pr[0].line) and int(ps[0].column) < int(pr[0].column)):
-  msg = "%s: first occurrence %s, second occurrence %s" % (fld,ps[0].line,pr[0].line)
+  msg = "%s: first occurrence line %s, second occurrence line %s" % (fld,ps[0].line,pr[0].line)
   coccilib.report.print_report(p0[0],msg)
index 4c9c52b9c41331c87d2b532b0439ea316a3cba24..ed961a1f7d11b28c8e9581e4bffa43edaa495a5b 100644 (file)
@@ -1,16 +1,19 @@
 /// The various basic memory allocation functions don't return ERR_PTR
 ///
 // Confidence: High
-// Copyright: (C) 2010 Nicolas Palix, DIKU.  GPLv2.
-// Copyright: (C) 2010 Julia Lawall, DIKU.  GPLv2.
-// Copyright: (C) 2010 Gilles Muller, INRIA/LiP6.  GPLv2.
+// Copyright: (C) 2010-2012 Nicolas Palix.  GPLv2.
+// Copyright: (C) 2010-2012 Julia Lawall, INRIA/LIP6.  GPLv2.
+// Copyright: (C) 2010-2012 Gilles Muller, INRIA/LiP6.  GPLv2.
 // URL: http://coccinelle.lip6.fr/
 // Comments:
 // Options: -no_includes -include_headers
 
 virtual patch
+virtual context
+virtual org
+virtual report
 
-@@
+@depends on patch@
 expression x,E;
 @@
 
@@ -18,3 +21,28 @@ x = \(kmalloc\|kzalloc\|kcalloc\|kmem_cache_alloc\|kmem_cache_zalloc\|kmem_cache
 ... when != x = E
 - IS_ERR(x)
 + !x
+
+@r depends on !patch exists@
+expression x,E;
+position p1,p2;
+@@
+
+*x = \(kmalloc@p1\|kzalloc@p1\|kcalloc@p1\|kmem_cache_alloc@p1\|kmem_cache_zalloc@p1\|kmem_cache_alloc_node@p1\|kmalloc_node@p1\|kzalloc_node@p1\)(...)
+... when != x = E
+* IS_ERR@p2(x)
+
+@script:python depends on org@
+p1 << r.p1;
+p2 << r.p2;
+@@
+
+cocci.print_main("alloc call",p1)
+cocci.print_secs("IS_ERR that should be NULL tests",p2)
+
+@script:python depends on report@
+p1 << r.p1;
+p2 << r.p2;
+@@
+
+msg = "ERROR: allocation function on line %s returns NULL not ERR_PTR on failure" % (p1[0].line)
+coccilib.report.print_report(p2[0], msg)
index cbc0193098e4e83ea4628facb1d86dcbcc54be18..451c92d31b196634cfb9ed7b2b15c632efd89691 100644 (file)
@@ -71,6 +71,7 @@ static void  __attribute__ ((noreturn)) usage(void)
        fprintf(stderr, "\t\t\tasm - assembler source\n");
        fprintf(stderr, "\t-V <output version>\n");
        fprintf(stderr, "\t\tBlob version to produce, defaults to %d (relevant for dtb\n\t\tand asm output only)\n", DEFAULT_FDT_VERSION);
+       fprintf(stderr, "\t-d <output dependency file>\n");
        fprintf(stderr, "\t-R <number>\n");
        fprintf(stderr, "\t\tMake space for <number> reserve map entries (relevant for \n\t\tdtb and asm output only)\n");
        fprintf(stderr, "\t-S <bytes>\n");
@@ -99,6 +100,7 @@ int main(int argc, char *argv[])
        const char *inform = "dts";
        const char *outform = "dts";
        const char *outname = "-";
+       const char *depname = NULL;
        int force = 0, check = 0, sort = 0;
        const char *arg;
        int opt;
@@ -111,7 +113,8 @@ int main(int argc, char *argv[])
        minsize    = 0;
        padsize    = 0;
 
-       while ((opt = getopt(argc, argv, "hI:O:o:V:R:S:p:fcqb:vH:s")) != EOF) {
+       while ((opt = getopt(argc, argv, "hI:O:o:V:d:R:S:p:fcqb:vH:s"))
+                       != EOF) {
                switch (opt) {
                case 'I':
                        inform = optarg;
@@ -125,6 +128,9 @@ int main(int argc, char *argv[])
                case 'V':
                        outversion = strtol(optarg, NULL, 0);
                        break;
+               case 'd':
+                       depname = optarg;
+                       break;
                case 'R':
                        reservenum = strtol(optarg, NULL, 0);
                        break;
@@ -188,6 +194,14 @@ int main(int argc, char *argv[])
        fprintf(stderr, "DTC: %s->%s  on file \"%s\"\n",
                inform, outform, arg);
 
+       if (depname) {
+               depfile = fopen(depname, "w");
+               if (!depfile)
+                       die("Couldn't open dependency file %s: %s\n", depname,
+                           strerror(errno));
+               fprintf(depfile, "%s:", outname);
+       }
+
        if (streq(inform, "dts"))
                bi = dt_from_source(arg);
        else if (streq(inform, "fs"))
@@ -197,6 +211,11 @@ int main(int argc, char *argv[])
        else
                die("Unknown input format \"%s\"\n", inform);
 
+       if (depfile) {
+               fputc('\n', depfile);
+               fclose(depfile);
+       }
+
        if (cmdline_boot_cpuid != -1)
                bi->boot_cpuid_phys = cmdline_boot_cpuid;
 
index 2dbc874288ca1b7487dd423d630cb7ac3b63e50f..36a38e9f1a2cb9200ee07bfce22eab489f1f1127 100644 (file)
@@ -40,6 +40,7 @@ static char *dirname(const char *path)
        return NULL;
 }
 
+FILE *depfile; /* = NULL */
 struct srcfile_state *current_srcfile; /* = NULL */
 
 /* Detect infinite include recursion. */
@@ -67,6 +68,9 @@ FILE *srcfile_relative_open(const char *fname, char **fullnamep)
                            strerror(errno));
        }
 
+       if (depfile)
+               fprintf(depfile, " %s", fullname);
+
        if (fullnamep)
                *fullnamep = fullname;
        else
index bd7966e56a53672f8c5c34369067c38696e9e246..ce980cafe58867d64d7a142b00914034f24f0d50 100644 (file)
@@ -30,6 +30,7 @@ struct srcfile_state {
        struct srcfile_state *prev;
 };
 
+extern FILE *depfile; /* = NULL */
 extern struct srcfile_state *current_srcfile; /* = NULL */
 
 FILE *srcfile_relative_open(const char *fname, char **fullnamep);
index a5510903e874a35a5cbb238e1eb50a7b82746fd6..aca33b98bf634b269b9857f5bc08d2e315f5f8d3 100644 (file)
@@ -11,3 +11,4 @@ HOSTCFLAGS_lex.lex.o := -I$(src)
 # dependencies on generated files need to be listed explicitly
 $(obj)/lex.lex.o: $(obj)/keywords.hash.c $(obj)/parse.tab.h
 
+clean-files    := keywords.hash.c lex.lex.c parse.tab.c parse.tab.h
index 914833d99b06f78242fa12584c568ecef6a5e65a..79662658fb9158af1d7b3ee43dba0cfab0aa42cf 100644 (file)
@@ -50,9 +50,8 @@ localyesconfig localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
 
 # Create new linux.pot file
 # Adjust charset to UTF-8 in .po file to accept UTF-8 in Kconfig files
-# The symlink is used to repair a deficiency in arch/um
 update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
-       $(Q)echo "  GEN config"
+       $(Q)echo "  GEN     config.pot"
        $(Q)xgettext --default-domain=linux                         \
            --add-comments --keyword=_ --keyword=N_                 \
            --from-code=UTF-8                                       \
@@ -63,10 +62,11 @@ update-po-config: $(obj)/kxgettext $(obj)/gconf.glade.h
        $(Q)(for i in `ls $(srctree)/arch/*/Kconfig      \
            $(srctree)/arch/*/um/Kconfig`;               \
            do                                           \
-               echo "  GEN $$i";                        \
+               echo "  GEN     $$i";                    \
                $(obj)/kxgettext $$i                     \
                     >> $(obj)/config.pot;               \
            done )
+       $(Q)echo "  GEN     linux.pot"
        $(Q)msguniq --sort-by-file --to-code=UTF-8 $(obj)/config.pot \
            --output $(obj)/linux.pot
        $(Q)rm -f $(obj)/config.pot
index 5a58965d8800daab99a8bd096262c1f95465482d..7c7a5a6cc3f504bd759dc01019536b2bf804fbbd 100644 (file)
@@ -464,7 +464,7 @@ kconfig_print_comment(FILE *fp, const char *value, void *arg)
                fprintf(fp, "#");
                if (l) {
                        fprintf(fp, " ");
-                       fwrite(p, l, 1, fp);
+                       xfwrite(p, l, 1, fp);
                        p += l;
                }
                fprintf(fp, "\n");
@@ -537,7 +537,7 @@ header_print_comment(FILE *fp, const char *value, void *arg)
                fprintf(fp, " *");
                if (l) {
                        fprintf(fp, " ");
-                       fwrite(p, l, 1, fp);
+                       xfwrite(p, l, 1, fp);
                        p += l;
                }
                fprintf(fp, "\n");
index 80fce57080cc36787e01efc00fe8a111efa3d0ff..d4ecce8bc3a689daa2157e4f47e65e7c62192bd4 100644 (file)
@@ -10,6 +10,7 @@
 extern "C" {
 #endif
 
+#include <assert.h>
 #include <stdio.h>
 #ifndef __cplusplus
 #include <stdbool.h>
index 9f4438027df4201b9fb34d4191cdb3da9ac4994f..adc230638c5b141ceaa2cf9b25d35f99324c19b6 100644 (file)
@@ -683,7 +683,7 @@ void on_introduction1_activate(GtkMenuItem * menuitem, gpointer user_data)
        dialog = gtk_message_dialog_new(GTK_WINDOW(main_wnd),
                                        GTK_DIALOG_DESTROY_WITH_PARENT,
                                        GTK_MESSAGE_INFO,
-                                       GTK_BUTTONS_CLOSE, intro_text);
+                                       GTK_BUTTONS_CLOSE, "%s", intro_text);
        g_signal_connect_swapped(GTK_OBJECT(dialog), "response",
                                 G_CALLBACK(gtk_widget_destroy),
                                 GTK_OBJECT(dialog));
@@ -701,7 +701,7 @@ void on_about1_activate(GtkMenuItem * menuitem, gpointer user_data)
        dialog = gtk_message_dialog_new(GTK_WINDOW(main_wnd),
                                        GTK_DIALOG_DESTROY_WITH_PARENT,
                                        GTK_MESSAGE_INFO,
-                                       GTK_BUTTONS_CLOSE, about_text);
+                                       GTK_BUTTONS_CLOSE, "%s", about_text);
        g_signal_connect_swapped(GTK_OBJECT(dialog), "response",
                                 G_CALLBACK(gtk_widget_destroy),
                                 GTK_OBJECT(dialog));
@@ -720,7 +720,7 @@ void on_license1_activate(GtkMenuItem * menuitem, gpointer user_data)
        dialog = gtk_message_dialog_new(GTK_WINDOW(main_wnd),
                                        GTK_DIALOG_DESTROY_WITH_PARENT,
                                        GTK_MESSAGE_INFO,
-                                       GTK_BUTTONS_CLOSE, license_text);
+                                       GTK_BUTTONS_CLOSE, "%s", license_text);
        g_signal_connect_swapped(GTK_OBJECT(dialog), "response",
                                 G_CALLBACK(gtk_widget_destroy),
                                 GTK_OBJECT(dialog));
@@ -830,7 +830,7 @@ static void renderer_edited(GtkCellRendererText * cell,
 static void change_sym_value(struct menu *menu, gint col)
 {
        struct symbol *sym = menu->sym;
-       tristate oldval, newval;
+       tristate newval;
 
        if (!sym)
                return;
@@ -847,7 +847,6 @@ static void change_sym_value(struct menu *menu, gint col)
        switch (sym_get_type(sym)) {
        case S_BOOLEAN:
        case S_TRISTATE:
-               oldval = sym_get_tristate_value(sym);
                if (!sym_tristate_within_range(sym, newval))
                        newval = yes;
                sym_set_tristate_value(sym, newval);
@@ -1278,7 +1277,6 @@ static void update_tree(struct menu *src, GtkTreeIter * dst)
        gboolean valid;
        GtkTreeIter *sibling;
        struct symbol *sym;
-       struct property *prop;
        struct menu *menu1, *menu2;
 
        if (src == &rootmenu)
@@ -1287,7 +1285,6 @@ static void update_tree(struct menu *src, GtkTreeIter * dst)
        valid = gtk_tree_model_iter_children(model2, child2, dst);
        for (child1 = src->list; child1; child1 = child1->next) {
 
-               prop = child1->prompt;
                sym = child1->sym;
 
              reparse:
index b633bdb9f3d47c815be8727b155471167135626a..c18f2bd9c095510d1d5e58575b265bd67ae9e781 100644 (file)
@@ -90,8 +90,10 @@ struct conf_printer {
 /* confdata.c and expr.c */
 static inline void xfwrite(const void *str, size_t len, size_t count, FILE *out)
 {
-       if (fwrite(str, len, count, out) < count)
-               fprintf(stderr, "\nError in writing or end of file.\n");
+       assert(len != 0);
+
+       if (fwrite(str, len, count, out) != count)
+               fprintf(stderr, "Error in writing or end of file.\n");
 }
 
 /* menu.c */
index 19e200d9112091fc3ddfe5aac59f94b32b38c4dc..2c6286c0bc1ac70ef190bdbd031905593070c933 100644 (file)
@@ -830,6 +830,8 @@ static int handle_exit(void)
                fprintf(stderr, _("\n\n"
                                  "Your configuration changes were NOT saved."
                                  "\n\n"));
+               if (res != KEY_ESC)
+                       res = 0;
        }
 
        return res;
diff --git a/scripts/kconfig/merge_config.sh b/scripts/kconfig/merge_config.sh
new file mode 100644 (file)
index 0000000..ceadf0e
--- /dev/null
@@ -0,0 +1,117 @@
+#!/bin/sh
+#  merge_config.sh - Takes a list of config fragment values, and merges
+#  them one by one. Provides warnings on overridden values, and specified
+#  values that did not make it to the resulting .config file (due to missed
+#  dependencies or config symbol removal).
+#
+#  Portions reused from kconf_check and generate_cfg:
+#  http://git.yoctoproject.org/cgit/cgit.cgi/yocto-kernel-tools/tree/tools/kconf_check
+#  http://git.yoctoproject.org/cgit/cgit.cgi/yocto-kernel-tools/tree/tools/generate_cfg
+#
+#  Copyright (c) 2009-2010 Wind River Systems, Inc.
+#  Copyright 2011 Linaro
+#
+#  This program is free software; you can redistribute it and/or modify
+#  it under the terms of the GNU General Public License version 2 as
+#  published by the Free Software Foundation.
+#
+#  This program is distributed in the hope that it will be useful,
+#  but WITHOUT ANY WARRANTY; without even the implied warranty of
+#  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+#  See the GNU General Public License for more details.
+
+clean_up() {
+       rm -f $TMP_FILE
+       exit
+}
+trap clean_up HUP INT TERM
+
+usage() {
+       echo "Usage: $0 [OPTIONS] [CONFIG [...]]"
+       echo "  -h    display this help text"
+       echo "  -m    only merge the fragments, do not execute the make command"
+       echo "  -n    use allnoconfig instead of alldefconfig"
+}
+
+MAKE=true
+ALLTARGET=alldefconfig
+
+while true; do
+       case $1 in
+       "-n")
+               ALLTARGET=allnoconfig
+               shift
+               continue
+               ;;
+       "-m")
+               MAKE=false
+               shift
+               continue
+               ;;
+       "-h")
+               usage
+               exit
+               ;;
+       *)
+               break
+               ;;
+       esac
+done
+
+
+
+MERGE_LIST=$*
+SED_CONFIG_EXP="s/^\(# \)\{0,1\}\(CONFIG_[a-zA-Z0-9_]*\)[= ].*/\2/p"
+TMP_FILE=$(mktemp ./.tmp.config.XXXXXXXXXX)
+
+# Merge files, printing warnings on overrided values
+for MERGE_FILE in $MERGE_LIST ; do
+       echo "Merging $MERGE_FILE"
+       CFG_LIST=$(sed -n "$SED_CONFIG_EXP" $MERGE_FILE)
+
+       for CFG in $CFG_LIST ; do
+               grep -q -w $CFG $TMP_FILE
+               if [ $? -eq 0 ] ; then
+                       PREV_VAL=$(grep -w $CFG $TMP_FILE)
+                       NEW_VAL=$(grep -w $CFG $MERGE_FILE)
+                       if [ "x$PREV_VAL" != "x$NEW_VAL" ] ; then
+                       echo Value of $CFG is redefined by fragment $MERGE_FILE:
+                       echo Previous  value: $PREV_VAL
+                       echo New value:       $NEW_VAL
+                       echo
+                       fi
+                       sed -i "/$CFG[ =]/d" $TMP_FILE
+               fi
+       done
+       cat $MERGE_FILE >> $TMP_FILE
+done
+
+if [ "$MAKE" = "false" ]; then
+       cp $TMP_FILE .config
+       echo "#"
+       echo "# merged configuration written to .config (needs make)"
+       echo "#"
+       clean_up
+       exit
+fi
+
+# Use the merged file as the starting point for:
+# alldefconfig: Fills in any missing symbols with Kconfig default
+# allnoconfig: Fills in any missing symbols with # CONFIG_* is not set
+make KCONFIG_ALLCONFIG=$TMP_FILE $ALLTARGET
+
+
+# Check all specified config values took (might have missed-dependency issues)
+for CFG in $(sed -n "$SED_CONFIG_EXP" $TMP_FILE); do
+
+       REQUESTED_VAL=$(grep -w -e "$CFG" $TMP_FILE)
+       ACTUAL_VAL=$(grep -w -e "$CFG" .config)
+       if [ "x$REQUESTED_VAL" != "x$ACTUAL_VAL" ] ; then
+               echo "Value requested for $CFG not in final .config"
+               echo "Requested value:  $REQUESTED_VAL"
+               echo "Actual value:     $ACTUAL_VAL"
+               echo ""
+       fi
+done
+
+clean_up
index d793001929cf40c0e3a773927ac047878a81df30..9b0c0b8b4ab4cd483158cfb606aaba038fbbabb5 100755 (executable)
@@ -5,7 +5,7 @@ use strict;
 ## Copyright (c) 1998 Michael Zucchi, All Rights Reserved        ##
 ## Copyright (C) 2000, 1  Tim Waugh <twaugh@redhat.com>          ##
 ## Copyright (C) 2001  Simon Huggins                             ##
-## Copyright (C) 2005-2010  Randy Dunlap                         ##
+## Copyright (C) 2005-2012  Randy Dunlap                         ##
 ##                                                              ##
 ## #define enhancements by Armin Kuster <akuster@mvista.com>    ##
 ## Copyright (c) 2000 MontaVista Software, Inc.                         ##
@@ -1785,6 +1785,7 @@ sub dump_function($$) {
     $prototype =~ s/__devinit +//;
     $prototype =~ s/__init +//;
     $prototype =~ s/__init_or_module +//;
+    $prototype =~ s/__must_check +//;
     $prototype =~ s/^#\s*define\s+//; #ak added
     $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//;
 
index c0e14b3f2306f77b6e85d2d6f7f25a70b6b41a22..e8c9695777689eece29e90e3db66b2cc9d59c80e 100644 (file)
@@ -823,16 +823,6 @@ static int do_spi_entry(const char *filename, struct spi_device_id *id,
 }
 ADD_TO_DEVTABLE("spi", struct spi_device_id, do_spi_entry);
 
-/* Looks like: mcp:S */
-static int do_mcp_entry(const char *filename, struct mcp_device_id *id,
-                       char *alias)
-{
-       sprintf(alias, MCP_MODULE_PREFIX "%s", id->name);
-
-       return 1;
-}
-ADD_TO_DEVTABLE("mcp", struct mcp_device_id, do_mcp_entry); 
-
 static const struct dmifield {
        const char *prefix;
        int field;
index 38f6617a2cb16c614a2ed0a854eb97b7aa12cdbb..833813a99e7c23a29c4e7147c58ac0ca2c696400 100755 (executable)
@@ -132,7 +132,28 @@ exuberant()
        --regex-asm='/^(ENTRY|_GLOBAL)\(([^)]*)\).*/\2/'        \
        --regex-c='/^SYSCALL_DEFINE[[:digit:]]?\(([^,)]*).*/sys_\1/' \
        --regex-c++='/^TRACE_EVENT\(([^,)]*).*/trace_\1/'               \
-       --regex-c++='/^DEFINE_EVENT\([^,)]*, *([^,)]*).*/trace_\1/'
+       --regex-c++='/^DEFINE_EVENT\([^,)]*, *([^,)]*).*/trace_\1/'     \
+       --regex-c++='/PAGEFLAG\(([^,)]*).*/Page\1/'                     \
+       --regex-c++='/PAGEFLAG\(([^,)]*).*/SetPage\1/'                  \
+       --regex-c++='/PAGEFLAG\(([^,)]*).*/ClearPage\1/'                \
+       --regex-c++='/TESTSETFLAG\(([^,)]*).*/TestSetPage\1/'           \
+       --regex-c++='/TESTPAGEFLAG\(([^,)]*).*/Page\1/'                 \
+       --regex-c++='/SETPAGEFLAG\(([^,)]*).*/SetPage\1/'               \
+       --regex-c++='/__SETPAGEFLAG\(([^,)]*).*/__SetPage\1/'           \
+       --regex-c++='/TESTCLEARFLAG\(([^,)]*).*/TestClearPage\1/'       \
+       --regex-c++='/__TESTCLEARFLAG\(([^,)]*).*/TestClearPage\1/'     \
+       --regex-c++='/CLEARPAGEFLAG\(([^,)]*).*/ClearPage\1/'           \
+       --regex-c++='/__CLEARPAGEFLAG\(([^,)]*).*/__ClearPage\1/'       \
+       --regex-c++='/__PAGEFLAG\(([^,)]*).*/__SetPage\1/'              \
+       --regex-c++='/__PAGEFLAG\(([^,)]*).*/__ClearPage\1/'            \
+       --regex-c++='/PAGEFLAG_FALSE\(([^,)]*).*/Page\1/'               \
+       --regex-c++='/TESTSCFLAG\(([^,)]*).*/TestSetPage\1/'            \
+       --regex-c++='/TESTSCFLAG\(([^,)]*).*/TestClearPage\1/'          \
+       --regex-c++='/SETPAGEFLAG_NOOP\(([^,)]*).*/SetPage\1/'          \
+       --regex-c++='/CLEARPAGEFLAG_NOOP\(([^,)]*).*/ClearPage\1/'      \
+       --regex-c++='/__CLEARPAGEFLAG_NOOP\(([^,)]*).*/__ClearPage\1/'  \
+       --regex-c++='/TESTCLEARFLAG_FALSE\(([^,)]*).*/TestClearPage\1/' \
+       --regex-c++='/__TESTCLEARFLAG_FALSE\(([^,)]*).*/__TestClearPage\1/'
 
        all_kconfigs | xargs $1 -a                              \
        --langdef=kconfig --language-force=kconfig              \
@@ -146,6 +167,8 @@ exuberant()
        --langdef=dotconfig --language-force=dotconfig          \
        --regex-dotconfig='/^#?[[:blank:]]*(CONFIG_[[:alnum:]_]+)/\1/'
 
+       # Remove structure forward declarations.
+       LANG=C sed -i -e '/^\([a-zA-Z_][a-zA-Z0-9_]*\)\t.*\t\/\^struct \1;.*\$\/;"\tx$/d' tags
 }
 
 emacs()
@@ -154,7 +177,28 @@ emacs()
        --regex='/^(ENTRY|_GLOBAL)(\([^)]*\)).*/\2/'            \
        --regex='/^SYSCALL_DEFINE[0-9]?(\([^,)]*\).*/sys_\1/'   \
        --regex='/^TRACE_EVENT(\([^,)]*\).*/trace_\1/'          \
-       --regex='/^DEFINE_EVENT([^,)]*, *\([^,)]*\).*/trace_\1/'
+       --regex='/^DEFINE_EVENT([^,)]*, *\([^,)]*\).*/trace_\1/' \
+       --regex='/PAGEFLAG\(([^,)]*).*/Page\1/'                 \
+       --regex='/PAGEFLAG\(([^,)]*).*/SetPage\1/'              \
+       --regex='/PAGEFLAG\(([^,)]*).*/ClearPage\1/'            \
+       --regex='/TESTSETFLAG\(([^,)]*).*/TestSetPage\1/'       \
+       --regex='/TESTPAGEFLAG\(([^,)]*).*/Page\1/'             \
+       --regex='/SETPAGEFLAG\(([^,)]*).*/SetPage\1/'           \
+       --regex='/__SETPAGEFLAG\(([^,)]*).*/__SetPage\1/'       \
+       --regex='/TESTCLEARFLAG\(([^,)]*).*/TestClearPage\1/'   \
+       --regex='/__TESTCLEARFLAG\(([^,)]*).*/TestClearPage\1/' \
+       --regex='/CLEARPAGEFLAG\(([^,)]*).*/ClearPage\1/'       \
+       --regex='/__CLEARPAGEFLAG\(([^,)]*).*/__ClearPage\1/'   \
+       --regex='/__PAGEFLAG\(([^,)]*).*/__SetPage\1/'          \
+       --regex='/__PAGEFLAG\(([^,)]*).*/__ClearPage\1/'        \
+       --regex='/PAGEFLAG_FALSE\(([^,)]*).*/Page\1/'           \
+       --regex='/TESTSCFLAG\(([^,)]*).*/TestSetPage\1/'        \
+       --regex='/TESTSCFLAG\(([^,)]*).*/TestClearPage\1/'      \
+       --regex='/SETPAGEFLAG_NOOP\(([^,)]*).*/SetPage\1/'      \
+       --regex='/CLEARPAGEFLAG_NOOP\(([^,)]*).*/ClearPage\1/'  \
+       --regex='/__CLEARPAGEFLAG_NOOP\(([^,)]*).*/__ClearPage\1/' \
+       --regex='/TESTCLEARFLAG_FALSE\(([^,)]*).*/TestClearPage\1/' \
+       --regex='/__TESTCLEARFLAG_FALSE\(([^,)]*).*/__TestClearPage\1/'
 
        all_kconfigs | xargs $1 -a                              \
        --regex='/^[ \t]*\(\(menu\)*config\)[ \t]+\([a-zA-Z0-9_]+\)/\3/'
index d384ea921482088dea0c7c13f54e2d3c57c18f26..5bd1cc1b4a54dcae8681bf7e0c092d374614ab6d 100644 (file)
@@ -3,11 +3,11 @@ config INTEGRITY
        def_bool y
        depends on IMA || EVM
 
-config INTEGRITY_DIGSIG
+config INTEGRITY_SIGNATURE
        boolean "Digital signature verification using multiple keyrings"
        depends on INTEGRITY && KEYS
        default n
-       select DIGSIG
+       select SIGNATURE
        help
          This option enables digital signature verification support
          using multiple keyrings. It defines separate keyrings for each
index bece0563ee5e019bc9fa56bd2dff4e426cf262b7..d43799cc14f69a67da22249d637b5f9d1334b357 100644 (file)
@@ -3,7 +3,7 @@
 #
 
 obj-$(CONFIG_INTEGRITY) += integrity.o
-obj-$(CONFIG_INTEGRITY_DIGSIG) += digsig.o
+obj-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
 
 integrity-y := iint.o
 
index c5c5a72c30be30b0477446cd311e8f329cb7dd30..2ad942fb1e236e694eb50538ac71a33a90e8d40a 100644 (file)
@@ -56,9 +56,11 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
                audit_log_format(ab, " name=");
                audit_log_untrustedstring(ab, fname);
        }
-       if (inode)
-               audit_log_format(ab, " dev=%s ino=%lu",
-                                inode->i_sb->s_id, inode->i_ino);
+       if (inode) {
+               audit_log_format(ab, " dev=");
+               audit_log_untrustedstring(ab, inode->i_sb->s_id);
+               audit_log_format(ab, " ino=%lu", inode->i_ino);
+       }
        audit_log_format(ab, " res=%d", !result ? 0 : 1);
        audit_log_end(ab);
 }
index 1b422bc56264b8ef1d37e66a4cbbf0afec84ddb6..0fb643a9c916228296462a65b80efe18d6a5af26 100644 (file)
@@ -100,6 +100,7 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule,
                            struct inode *inode, enum ima_hooks func, int mask)
 {
        struct task_struct *tsk = current;
+       const struct cred *cred = current_cred();
        int i;
 
        if ((rule->flags & IMA_FUNC) && rule->func != func)
@@ -109,7 +110,7 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule,
        if ((rule->flags & IMA_FSMAGIC)
            && rule->fsmagic != inode->i_sb->s_magic)
                return false;
-       if ((rule->flags & IMA_UID) && rule->uid != tsk->cred->uid)
+       if ((rule->flags & IMA_UID) && rule->uid != cred->uid)
                return false;
        for (i = 0; i < MAX_LSM_RULES; i++) {
                int rc = 0;
index 4da6ba81d1532aa4690cc95b99ba682ddb0254b1..7a25ecec5aaac6b8d00c2bf0f926deaeea561163 100644 (file)
@@ -51,7 +51,7 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
 #define INTEGRITY_KEYRING_IMA          2
 #define INTEGRITY_KEYRING_MAX          3
 
-#ifdef CONFIG_INTEGRITY_DIGSIG
+#ifdef CONFIG_INTEGRITY_SIGNATURE
 
 int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
                                        const char *digest, int digestlen);
@@ -65,7 +65,7 @@ static inline int integrity_digsig_verify(const unsigned int id,
        return -EOPNOTSUPP;
 }
 
-#endif /* CONFIG_INTEGRITY_DIGSIG */
+#endif /* CONFIG_INTEGRITY_SIGNATURE */
 
 /* set during initialization */
 extern int iint_initialized;
index 41144f71d6154f612570f8f2c5243269becf68a4..2d1bb8af7696d5d431394cfbe8ac6d5e40f24da2 100644 (file)
@@ -314,7 +314,7 @@ static struct key *request_user_key(const char *master_desc, u8 **master_key,
                goto error;
 
        down_read(&ukey->sem);
-       upayload = rcu_dereference(ukey->payload.data);
+       upayload = ukey->payload.data;
        *master_key = upayload->data;
        *master_keylen = upayload->datalen;
 error:
@@ -810,7 +810,7 @@ static int encrypted_instantiate(struct key *key, const void *data,
                goto out;
        }
 
-       rcu_assign_pointer(key->payload.data, epayload);
+       rcu_assign_keypointer(key, epayload);
 out:
        kfree(datablob);
        return ret;
@@ -874,7 +874,7 @@ static int encrypted_update(struct key *key, const void *data, size_t datalen)
        memcpy(new_epayload->payload_data, epayload->payload_data,
               epayload->payload_datalen);
 
-       rcu_assign_pointer(key->payload.data, new_epayload);
+       rcu_assign_keypointer(key, new_epayload);
        call_rcu(&epayload->rcu, encrypted_rcu_free);
 out:
        kfree(buf);
index df87272e3f519ca6909e9f9e4ab8132554ae9abb..013f7e5d3a2fce42d026fd87a4a0d6e94af1d53f 100644 (file)
@@ -18,6 +18,8 @@
 #include <linux/module.h>
 #include <linux/err.h>
 #include <keys/trusted-type.h>
+#include <keys/encrypted-type.h>
+#include "encrypted.h"
 
 /*
  * request_trusted_key - request the trusted key
@@ -37,7 +39,7 @@ struct key *request_trusted_key(const char *trusted_desc,
                goto error;
 
        down_read(&tkey->sem);
-       tpayload = rcu_dereference(tkey->payload.data);
+       tpayload = tkey->payload.data;
        *master_key = tpayload->key;
        *master_keylen = tpayload->key_len;
 error:
index bf4d8da5a79502f626a849473e4af18e229eb074..a42b45531aac173a337498cfc26fd09ded18baee 100644 (file)
@@ -145,7 +145,9 @@ static void key_gc_keyring(struct key *keyring, time_t limit)
        if (!klist)
                goto unlock_dont_gc;
 
-       for (loop = klist->nkeys - 1; loop >= 0; loop--) {
+       loop = klist->nkeys;
+       smp_rmb();
+       for (loop--; loop >= 0; loop--) {
                key = klist->keys[loop];
                if (test_bit(KEY_FLAG_DEAD, &key->flags) ||
                    (key->expiry > 0 && key->expiry <= limit))
index c7a7caec4830b33e414ac7fa3b665633b853a080..65647f825584becdfa1db48302be9ba5e33df88e 100644 (file)
@@ -33,6 +33,7 @@
 
 extern struct key_type key_type_dead;
 extern struct key_type key_type_user;
+extern struct key_type key_type_logon;
 
 /*****************************************************************************/
 /*
index 4f64c7267afb64332db0e6218754a8d17190c870..7ada8019be1f2c08314fe0c8c14d3058a1ee26ea 100644 (file)
@@ -999,6 +999,7 @@ void __init key_init(void)
        list_add_tail(&key_type_keyring.link, &key_types_list);
        list_add_tail(&key_type_dead.link, &key_types_list);
        list_add_tail(&key_type_user.link, &key_types_list);
+       list_add_tail(&key_type_logon.link, &key_types_list);
 
        /* record the root user tracking */
        rb_link_node(&root_key_user.node,
index 37a7f3b28852e098f57ab1cd98eb2eccdad5c7c5..d605f75292e4390da5d7f8cd763266004288ecf2 100644 (file)
@@ -319,7 +319,7 @@ key_ref_t keyring_search_aux(key_ref_t keyring_ref,
        struct key *keyring, *key;
        key_ref_t key_ref;
        long err;
-       int sp, kix;
+       int sp, nkeys, kix;
 
        keyring = key_ref_to_ptr(keyring_ref);
        possessed = is_key_possessed(keyring_ref);
@@ -380,7 +380,9 @@ descend:
                goto not_this_keyring;
 
        /* iterate through the keys in this keyring first */
-       for (kix = 0; kix < keylist->nkeys; kix++) {
+       nkeys = keylist->nkeys;
+       smp_rmb();
+       for (kix = 0; kix < nkeys; kix++) {
                key = keylist->keys[kix];
                kflags = key->flags;
 
@@ -421,7 +423,9 @@ descend:
        /* search through the keyrings nested in this one */
        kix = 0;
 ascend:
-       for (; kix < keylist->nkeys; kix++) {
+       nkeys = keylist->nkeys;
+       smp_rmb();
+       for (; kix < nkeys; kix++) {
                key = keylist->keys[kix];
                if (key->type != &key_type_keyring)
                        continue;
@@ -515,7 +519,7 @@ key_ref_t __keyring_search_one(key_ref_t keyring_ref,
        struct keyring_list *klist;
        unsigned long possessed;
        struct key *keyring, *key;
-       int loop;
+       int nkeys, loop;
 
        keyring = key_ref_to_ptr(keyring_ref);
        possessed = is_key_possessed(keyring_ref);
@@ -524,7 +528,9 @@ key_ref_t __keyring_search_one(key_ref_t keyring_ref,
 
        klist = rcu_dereference(keyring->payload.subscriptions);
        if (klist) {
-               for (loop = 0; loop < klist->nkeys; loop++) {
+               nkeys = klist->nkeys;
+               smp_rmb();
+               for (loop = 0; loop < nkeys ; loop++) {
                        key = klist->keys[loop];
 
                        if (key->type == ktype &&
@@ -622,7 +628,7 @@ static int keyring_detect_cycle(struct key *A, struct key *B)
 
        struct keyring_list *keylist;
        struct key *subtree, *key;
-       int sp, kix, ret;
+       int sp, nkeys, kix, ret;
 
        rcu_read_lock();
 
@@ -645,7 +651,9 @@ descend:
 
 ascend:
        /* iterate through the remaining keys in this keyring */
-       for (; kix < keylist->nkeys; kix++) {
+       nkeys = keylist->nkeys;
+       smp_rmb();
+       for (; kix < nkeys; kix++) {
                key = keylist->keys[kix];
 
                if (key == A)
index 0ed5fdf238a22c6712e5375207f5bd6d8a168ef6..2d5d041f2049f323e5072c701f068d695f3f2c6c 100644 (file)
@@ -993,7 +993,7 @@ out:
        kfree(datablob);
        kfree(options);
        if (!ret)
-               rcu_assign_pointer(key->payload.data, payload);
+               rcu_assign_keypointer(key, payload);
        else
                kfree(payload);
        return ret;
@@ -1067,7 +1067,7 @@ static int trusted_update(struct key *key, const void *data, size_t datalen)
                        goto out;
                }
        }
-       rcu_assign_pointer(key->payload.data, new_p);
+       rcu_assign_keypointer(key, new_p);
        call_rcu(&p->rcu, trusted_rcu_free);
 out:
        kfree(datablob);
index 69ff52c08e97bb0eab715b1ea61c709e4f80faad..c7660a25a3e4502673714b39553a0d9d00873672 100644 (file)
@@ -18,6 +18,8 @@
 #include <asm/uaccess.h>
 #include "internal.h"
 
+static int logon_vet_description(const char *desc);
+
 /*
  * user defined keys take an arbitrary string as the description and an
  * arbitrary blob of data as the payload
@@ -35,6 +37,24 @@ struct key_type key_type_user = {
 
 EXPORT_SYMBOL_GPL(key_type_user);
 
+/*
+ * This key type is essentially the same as key_type_user, but it does
+ * not define a .read op. This is suitable for storing username and
+ * password pairs in the keyring that you do not want to be readable
+ * from userspace.
+ */
+struct key_type key_type_logon = {
+       .name                   = "logon",
+       .instantiate            = user_instantiate,
+       .update                 = user_update,
+       .match                  = user_match,
+       .revoke                 = user_revoke,
+       .destroy                = user_destroy,
+       .describe               = user_describe,
+       .vet_description        = logon_vet_description,
+};
+EXPORT_SYMBOL_GPL(key_type_logon);
+
 /*
  * instantiate a user defined key
  */
@@ -59,7 +79,7 @@ int user_instantiate(struct key *key, const void *data, size_t datalen)
        /* attach the data */
        upayload->datalen = datalen;
        memcpy(upayload->data, data, datalen);
-       rcu_assign_pointer(key->payload.data, upayload);
+       rcu_assign_keypointer(key, upayload);
        ret = 0;
 
 error:
@@ -98,7 +118,7 @@ int user_update(struct key *key, const void *data, size_t datalen)
        if (ret == 0) {
                /* attach the new data, displacing the old */
                zap = key->payload.data;
-               rcu_assign_pointer(key->payload.data, upayload);
+               rcu_assign_keypointer(key, upayload);
                key->expiry = 0;
        }
 
@@ -133,7 +153,7 @@ void user_revoke(struct key *key)
        key_payload_reserve(key, 0);
 
        if (upayload) {
-               rcu_assign_pointer(key->payload.data, NULL);
+               rcu_assign_keypointer(key, NULL);
                kfree_rcu(upayload, rcu);
        }
 }
@@ -189,3 +209,20 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen)
 }
 
 EXPORT_SYMBOL_GPL(user_read);
+
+/* Vet the description for a "logon" key */
+static int logon_vet_description(const char *desc)
+{
+       char *p;
+
+       /* require a "qualified" description string */
+       p = strchr(desc, ':');
+       if (!p)
+               return -EINVAL;
+
+       /* also reject description with ':' as first char */
+       if (p == desc)
+               return -EINVAL;
+
+       return 0;
+}
index 7bd6f138236b3a010d457ab473ea73fd02641c8c..293b8c45b1d1bbf9a28edff35001b8c02eb09191 100644 (file)
@@ -232,13 +232,14 @@ static void dump_common_audit_data(struct audit_buffer *ab,
        case LSM_AUDIT_DATA_PATH: {
                struct inode *inode;
 
-               audit_log_d_path(ab, "path=", &a->u.path);
+               audit_log_d_path(ab, " path=", &a->u.path);
 
                inode = a->u.path.dentry->d_inode;
-               if (inode)
-                       audit_log_format(ab, " dev=%s ino=%lu",
-                                       inode->i_sb->s_id,
-                                       inode->i_ino);
+               if (inode) {
+                       audit_log_format(ab, " dev=");
+                       audit_log_untrustedstring(ab, inode->i_sb->s_id);
+                       audit_log_format(ab, " ino=%lu", inode->i_ino);
+               }
                break;
        }
        case LSM_AUDIT_DATA_DENTRY: {
@@ -248,10 +249,11 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
 
                inode = a->u.dentry->d_inode;
-               if (inode)
-                       audit_log_format(ab, " dev=%s ino=%lu",
-                                       inode->i_sb->s_id,
-                                       inode->i_ino);
+               if (inode) {
+                       audit_log_format(ab, " dev=");
+                       audit_log_untrustedstring(ab, inode->i_sb->s_id);
+                       audit_log_format(ab, " ino=%lu", inode->i_ino);
+               }
                break;
        }
        case LSM_AUDIT_DATA_INODE: {
@@ -266,8 +268,9 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                                         dentry->d_name.name);
                        dput(dentry);
                }
-               audit_log_format(ab, " dev=%s ino=%lu", inode->i_sb->s_id,
-                                inode->i_ino);
+               audit_log_format(ab, " dev=");
+               audit_log_untrustedstring(ab, inode->i_sb->s_id);
+               audit_log_format(ab, " ino=%lu", inode->i_ino);
                break;
        }
        case LSM_AUDIT_DATA_TASK:
@@ -315,7 +318,7 @@ static void dump_common_audit_data(struct audit_buffer *ab,
                                                .dentry = u->dentry,
                                                .mnt = u->mnt
                                        };
-                                       audit_log_d_path(ab, "path=", &path);
+                                       audit_log_d_path(ab, " path=", &path);
                                        break;
                                }
                                if (!u->addr)
index 4a9b4b2eb755c0129d408b371ebb3f102b862662..867558c983349d144c5f46a729af55a2a72dbe0a 100644 (file)
@@ -492,13 +492,13 @@ static bool tomoyo_correct_word2(const char *string, size_t len)
                                if (d < '0' || d > '7' || e < '0' || e > '7')
                                        break;
                                c = tomoyo_make_byte(c, d, e);
-                               if (tomoyo_invalid(c))
-                                       continue; /* pattern is not \000 */
+                               if (c <= ' ' || c >= 127)
+                                       continue;
                        }
                        goto out;
                } else if (in_repetition && c == '/') {
                        goto out;
-               } else if (tomoyo_invalid(c)) {
+               } else if (c <= ' ' || c >= 127) {
                        goto out;
                }
        }
index 6fd9391b3a6cd1c4aeb60d791effd9edd985ac18..4fa1dbd8ee8381e1fbc4260c779cfecdea1d1923 100644 (file)
@@ -133,7 +133,7 @@ static int atmel_abdac_prepare_dma(struct atmel_abdac *dac,
        period_len = frames_to_bytes(runtime, runtime->period_size);
 
        cdesc = dw_dma_cyclic_prep(chan, runtime->dma_addr, buffer_len,
-                       period_len, DMA_TO_DEVICE);
+                       period_len, DMA_MEM_TO_DEV);
        if (IS_ERR(cdesc)) {
                dev_dbg(&dac->pdev->dev, "could not prepare cyclic DMA\n");
                return PTR_ERR(cdesc);
index 73516f69ac7ca8a33244cb300df8958ac2d77e20..61dade6983582ce415afdbe3cf88d208bb628c46 100644 (file)
@@ -102,7 +102,7 @@ static void atmel_ac97c_dma_capture_period_done(void *arg)
 
 static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip,
                struct snd_pcm_substream *substream,
-               enum dma_data_direction direction)
+               enum dma_transfer_direction direction)
 {
        struct dma_chan                 *chan;
        struct dw_cyclic_desc           *cdesc;
@@ -118,7 +118,7 @@ static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip,
                return -EINVAL;
        }
 
-       if (direction == DMA_TO_DEVICE)
+       if (direction == DMA_MEM_TO_DEV)
                chan = chip->dma.tx_chan;
        else
                chan = chip->dma.rx_chan;
@@ -133,7 +133,7 @@ static int atmel_ac97c_prepare_dma(struct atmel_ac97c *chip,
                return PTR_ERR(cdesc);
        }
 
-       if (direction == DMA_TO_DEVICE) {
+       if (direction == DMA_MEM_TO_DEV) {
                cdesc->period_callback = atmel_ac97c_dma_playback_period_done;
                set_bit(DMA_TX_READY, &chip->flags);
        } else {
@@ -393,7 +393,7 @@ static int atmel_ac97c_playback_prepare(struct snd_pcm_substream *substream)
        if (cpu_is_at32ap7000()) {
                if (!test_bit(DMA_TX_READY, &chip->flags))
                        retval = atmel_ac97c_prepare_dma(chip, substream,
-                                       DMA_TO_DEVICE);
+                                       DMA_MEM_TO_DEV);
        } else {
                /* Initialize and start the PDC */
                writel(runtime->dma_addr, chip->regs + ATMEL_PDC_TPR);
@@ -484,7 +484,7 @@ static int atmel_ac97c_capture_prepare(struct snd_pcm_substream *substream)
        if (cpu_is_at32ap7000()) {
                if (!test_bit(DMA_RX_READY, &chip->flags))
                        retval = atmel_ac97c_prepare_dma(chip, substream,
-                                       DMA_FROM_DEVICE);
+                                       DMA_DEV_TO_MEM);
        } else {
                /* Initialize and start the PDC */
                writel(runtime->dma_addr, chip->regs + ATMEL_PDC_RPR);
index ad409381f8cca21370f4802b1a5044af2f946b71..b413ed05e74deae78fbd97809c99f0fd68f1ee46 100644 (file)
@@ -12,6 +12,9 @@ config SND_HWDEP
 config SND_RAWMIDI
        tristate
 
+config SND_COMPRESS_OFFLOAD
+       tristate
+
 # To be effective this also requires INPUT - users should say:
 #    select SND_JACK if INPUT=y || INPUT=SND
 # to avoid having to force INPUT on.
@@ -154,16 +157,6 @@ config SND_DYNAMIC_MINORS
 
          If you are unsure about this, say N here.
 
-config SND_COMPRESS_OFFLOAD
-       tristate "ALSA Compressed audio offload support"
-       default n
-       help
-         If you want support for offloading compressed audio and have such
-         a hardware, then you should say Y here and also to the DSP driver
-         of your platform.
-
-         If you are unsure about this, say N here.
-
 config SND_SUPPORT_OLD_API
        bool "Support old ALSA API"
        default y
index dac3633507c9631a300abb4d7f3c35bbaa3ddb5a..a68aed7fce0205462ce08183f560015eeb0060cb 100644 (file)
@@ -441,19 +441,22 @@ snd_compr_set_params(struct snd_compr_stream *stream, unsigned long arg)
                params = kmalloc(sizeof(*params), GFP_KERNEL);
                if (!params)
                        return -ENOMEM;
-               if (copy_from_user(params, (void __user *)arg, sizeof(*params)))
-                       return -EFAULT;
+               if (copy_from_user(params, (void __user *)arg, sizeof(*params))) {
+                       retval = -EFAULT;
+                       goto out;
+               }
                retval = snd_compr_allocate_buffer(stream, params);
                if (retval) {
-                       kfree(params);
-                       return -ENOMEM;
+                       retval = -ENOMEM;
+                       goto out;
                }
                retval = stream->ops->set_params(stream, params);
                if (retval)
                        goto out;
                stream->runtime->state = SNDRV_PCM_STATE_SETUP;
-       } else
+       } else {
                return -EPERM;
+       }
 out:
        kfree(params);
        return retval;
index e09f144177f502f6176139e3e2258890ae88a560..c99c6078be3376fa792f8b335e31f57a80a7c221 100644 (file)
@@ -22,7 +22,6 @@
 #include "emu8000_local.h"
 #include <asm/uaccess.h>
 #include <linux/moduleparam.h>
-#include <linux/moduleparam.h>
 
 static int emu8000_reset_addr;
 module_param(emu8000_reset_addr, int, 0444);
index 762bb108c51c139b71e93cf8d627ebae858a25d2..f13ad536b2d59fe19ab9826b5abc0c301d5133cf 100644 (file)
@@ -268,8 +268,14 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
                card->shortname, chip->io, chip->irq);
 
        // (4) Alloc components.
+       err = snd_vortex_mixer(chip);
+       if (err < 0) {
+               snd_card_free(card);
+               return err;
+       }
        // ADB pcm.
-       if ((err = snd_vortex_new_pcm(chip, VORTEX_PCM_ADB, NR_ADB)) < 0) {
+       err = snd_vortex_new_pcm(chip, VORTEX_PCM_ADB, NR_PCM);
+       if (err < 0) {
                snd_card_free(card);
                return err;
        }
@@ -299,11 +305,6 @@ snd_vortex_probe(struct pci_dev *pci, const struct pci_device_id *pci_id)
                return err;
        }
 #endif
-       // snd_ac97_mixer and Vortex mixer.
-       if ((err = snd_vortex_mixer(chip)) < 0) {
-               snd_card_free(card);
-               return err;
-       }
        if ((err = snd_vortex_midi(chip)) < 0) {
                snd_card_free(card);
                return err;
index 02f6e08f7592e8bcd6c539598f397ebbf88856aa..bb938153a964811b41f471f064fbd293fb660e77 100644 (file)
 #define MIX_SPDIF(x) (vortex->mixspdif[x])
 
 #define NR_WTPB 0x20           /* WT channels per each bank. */
+#define NR_PCM 0x10
 
 /* Structs */
 typedef struct {
index 0488633ea87474c608591601201030a65a1c91f5..0ef2f97122080f206699a01e32b7f1f1ce7b6825 100644 (file)
@@ -168,6 +168,7 @@ static int snd_vortex_pcm_open(struct snd_pcm_substream *substream)
                        runtime->hw = snd_vortex_playback_hw_adb;
 #ifdef CHIP_AU8830
                if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK &&
+                       VORTEX_IS_QUAD(vortex) &&
                        VORTEX_PCM_TYPE(substream->pcm) == VORTEX_PCM_ADB) {
                        runtime->hw.channels_max = 4;
                        snd_pcm_hw_constraint_list(runtime, 0,
index 5b68435d195ba29d530113dd8471f06ea5d8e64e..501501ef36a9d0133d8635795e617d1694ea4496 100644 (file)
@@ -762,16 +762,22 @@ static void alc880_uniwill_unsol_event(struct hda_codec *codec,
        /* Looks like the unsol event is incompatible with the standard
         * definition.  4bit tag is placed at 28 bit!
         */
-       switch (res >> 28) {
+       res >>= 28;
+       switch (res) {
        case ALC_MIC_EVENT:
                alc88x_simple_mic_automute(codec);
                break;
        default:
-               alc_sku_unsol_event(codec, res);
+               alc_exec_unsol_event(codec, res);
                break;
        }
 }
 
+static void alc880_unsol_event(struct hda_codec *codec, unsigned int res)
+{
+       alc_exec_unsol_event(codec, res >> 28);
+}
+
 static void alc880_uniwill_p53_setup(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
@@ -800,10 +806,11 @@ static void alc880_uniwill_p53_unsol_event(struct hda_codec *codec,
        /* Looks like the unsol event is incompatible with the standard
         * definition.  4bit tag is placed at 28 bit!
         */
-       if ((res >> 28) == ALC_DCVOL_EVENT)
+       res >>= 28;
+       if (res == ALC_DCVOL_EVENT)
                alc880_uniwill_p53_dcvol_automute(codec);
        else
-               alc_sku_unsol_event(codec, res);
+               alc_exec_unsol_event(codec, res);
 }
 
 /*
@@ -1677,7 +1684,7 @@ static const struct alc_config_preset alc880_presets[] = {
                .channel_mode = alc880_lg_ch_modes,
                .need_dac_fix = 1,
                .input_mux = &alc880_lg_capture_source,
-               .unsol_event = alc_sku_unsol_event,
+               .unsol_event = alc880_unsol_event,
                .setup = alc880_lg_setup,
                .init_hook = alc_hp_automute,
 #ifdef CONFIG_SND_HDA_POWER_SAVE
index bdf0ed4ab3e24663284013856ed9f83a95c86acd..bb364a53f546bd65154115ce0cc81bc613f38f72 100644 (file)
@@ -730,6 +730,11 @@ static void alc889A_mb31_unsol_event(struct hda_codec *codec, unsigned int res)
                alc889A_mb31_automute(codec);
 }
 
+static void alc882_unsol_event(struct hda_codec *codec, unsigned int res)
+{
+       alc_exec_unsol_event(codec, res >> 26);
+}
+
 /*
  * configuration and preset
  */
@@ -775,7 +780,7 @@ static const struct alc_config_preset alc882_presets[] = {
                        .channel_mode = alc885_mba21_ch_modes,
                        .num_channel_mode = ARRAY_SIZE(alc885_mba21_ch_modes),
                        .input_mux = &alc882_capture_source,
-                       .unsol_event = alc_sku_unsol_event,
+                       .unsol_event = alc882_unsol_event,
                        .setup = alc885_mba21_setup,
                        .init_hook = alc_hp_automute,
        },
@@ -791,7 +796,7 @@ static const struct alc_config_preset alc882_presets[] = {
                .input_mux = &alc882_capture_source,
                .dig_out_nid = ALC882_DIGOUT_NID,
                .dig_in_nid = ALC882_DIGIN_NID,
-               .unsol_event = alc_sku_unsol_event,
+               .unsol_event = alc882_unsol_event,
                .setup = alc885_mbp3_setup,
                .init_hook = alc_hp_automute,
        },
@@ -806,7 +811,7 @@ static const struct alc_config_preset alc882_presets[] = {
                .input_mux = &mb5_capture_source,
                .dig_out_nid = ALC882_DIGOUT_NID,
                .dig_in_nid = ALC882_DIGIN_NID,
-               .unsol_event = alc_sku_unsol_event,
+               .unsol_event = alc882_unsol_event,
                .setup = alc885_mb5_setup,
                .init_hook = alc_hp_automute,
        },
@@ -821,7 +826,7 @@ static const struct alc_config_preset alc882_presets[] = {
                .input_mux = &macmini3_capture_source,
                .dig_out_nid = ALC882_DIGOUT_NID,
                .dig_in_nid = ALC882_DIGIN_NID,
-               .unsol_event = alc_sku_unsol_event,
+               .unsol_event = alc882_unsol_event,
                .setup = alc885_macmini3_setup,
                .init_hook = alc_hp_automute,
        },
@@ -836,7 +841,7 @@ static const struct alc_config_preset alc882_presets[] = {
                .input_mux = &alc889A_imac91_capture_source,
                .dig_out_nid = ALC882_DIGOUT_NID,
                .dig_in_nid = ALC882_DIGIN_NID,
-               .unsol_event = alc_sku_unsol_event,
+               .unsol_event = alc882_unsol_event,
                .setup = alc885_imac91_setup,
                .init_hook = alc_hp_automute,
        },
index 4df72c0e8c37c9011cecff85cca8e834d74260de..c2c65f63bf068a0d39fbfd271be81d74c22d0459 100644 (file)
@@ -1447,7 +1447,7 @@ void snd_hda_codec_setup_stream(struct hda_codec *codec, hda_nid_t nid,
                for (i = 0; i < c->cvt_setups.used; i++) {
                        p = snd_array_elem(&c->cvt_setups, i);
                        if (!p->active && p->stream_tag == stream_tag &&
-                           get_wcaps_type(get_wcaps(codec, p->nid)) == type)
+                           get_wcaps_type(get_wcaps(c, p->nid)) == type)
                                p->dirty = 1;
                }
        }
index 0852e204a4c8439e557bd553dcde3aae1f7c173a..95dfb687494144e54b191e3f7f4dbe7179bcf37d 100644 (file)
@@ -469,6 +469,7 @@ struct azx {
        unsigned int irq_pending_warned :1;
        unsigned int probing :1; /* codec probing phase */
        unsigned int snoop:1;
+       unsigned int align_buffer_size:1;
 
        /* for debugging */
        unsigned int last_cmd[AZX_MAX_CODECS];
@@ -1690,7 +1691,7 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
        runtime->hw.rates = hinfo->rates;
        snd_pcm_limit_hw_rates(runtime);
        snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
-       if (align_buffer_size)
+       if (chip->align_buffer_size)
                /* constrain buffer sizes to be multiple of 128
                   bytes. This is more efficient in terms of memory
                   access but isn't required by the HDA spec and
@@ -2498,6 +2499,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
        SND_PCI_QUIRK(0x1043, 0x81b3, "ASUS", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1043, 0x81e7, "ASUS M2V", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x104d, 0x9069, "Sony VPCS11V9E", POS_FIX_LPIB),
+       SND_PCI_QUIRK(0x10de, 0xcb89, "Macbook Pro 7,1", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1297, 0x3166, "Shuttle", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
        SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
@@ -2772,8 +2774,9 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
        }
 
        /* disable buffer size rounding to 128-byte multiples if supported */
+       chip->align_buffer_size = align_buffer_size;
        if (chip->driver_caps & AZX_DCAPS_BUFSIZE)
-               align_buffer_size = 0;
+               chip->align_buffer_size = 0;
 
        /* allow 64bit DMA address if supported by H/W */
        if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64)))
index d8a35da0803ff853becddd8c899e2cddba8b6293..9d819c4b4923d18c67447953b02395408e923afc 100644 (file)
@@ -282,7 +282,8 @@ int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid,
 EXPORT_SYMBOL_HDA(snd_hda_jack_add_kctl);
 
 static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
-                        const struct auto_pin_cfg *cfg)
+                        const struct auto_pin_cfg *cfg,
+                        char *lastname, int *lastidx)
 {
        unsigned int def_conf, conn;
        char name[44];
@@ -298,6 +299,10 @@ static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid,
                return 0;
 
        snd_hda_get_pin_label(codec, nid, cfg, name, sizeof(name), &idx);
+       if (!strcmp(name, lastname) && idx == *lastidx)
+               idx++;
+       strncpy(lastname, name, 44);
+       *lastidx = idx;
        err = snd_hda_jack_add_kctl(codec, nid, name, idx);
        if (err < 0)
                return err;
@@ -311,41 +316,42 @@ int snd_hda_jack_add_kctls(struct hda_codec *codec,
                           const struct auto_pin_cfg *cfg)
 {
        const hda_nid_t *p;
-       int i, err;
+       int i, err, lastidx = 0;
+       char lastname[44] = "";
 
        for (i = 0, p = cfg->line_out_pins; i < cfg->line_outs; i++, p++) {
-               err = add_jack_kctl(codec, *p, cfg);
+               err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx);
                if (err < 0)
                        return err;
        }
        for (i = 0, p = cfg->hp_pins; i < cfg->hp_outs; i++, p++) {
                if (*p == *cfg->line_out_pins) /* might be duplicated */
                        break;
-               err = add_jack_kctl(codec, *p, cfg);
+               err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx);
                if (err < 0)
                        return err;
        }
        for (i = 0, p = cfg->speaker_pins; i < cfg->speaker_outs; i++, p++) {
                if (*p == *cfg->line_out_pins) /* might be duplicated */
                        break;
-               err = add_jack_kctl(codec, *p, cfg);
+               err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx);
                if (err < 0)
                        return err;
        }
        for (i = 0; i < cfg->num_inputs; i++) {
-               err = add_jack_kctl(codec, cfg->inputs[i].pin, cfg);
+               err = add_jack_kctl(codec, cfg->inputs[i].pin, cfg, lastname, &lastidx);
                if (err < 0)
                        return err;
        }
        for (i = 0, p = cfg->dig_out_pins; i < cfg->dig_outs; i++, p++) {
-               err = add_jack_kctl(codec, *p, cfg);
+               err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx);
                if (err < 0)
                        return err;
        }
-       err = add_jack_kctl(codec, cfg->dig_in_pin, cfg);
+       err = add_jack_kctl(codec, cfg->dig_in_pin, cfg, lastname, &lastidx);
        if (err < 0)
                return err;
-       err = add_jack_kctl(codec, cfg->mono_out_pin, cfg);
+       err = add_jack_kctl(codec, cfg->mono_out_pin, cfg, lastname, &lastidx);
        if (err < 0)
                return err;
        return 0;
index 35abe3c6290884f1e842d3bd1caa5ccef955de88..21d91d580da8b9e29091dca8ad5d91440b38797a 100644 (file)
@@ -728,18 +728,19 @@ static int ca0132_hp_switch_put(struct snd_kcontrol *kcontrol,
 
        err = chipio_read(codec, REG_CODEC_MUTE, &data);
        if (err < 0)
-               return err;
+               goto exit;
 
        /* *valp 0 is mute, 1 is unmute */
        data = (data & 0x7f) | (*valp ? 0 : 0x80);
-       chipio_write(codec, REG_CODEC_MUTE, data);
+       err = chipio_write(codec, REG_CODEC_MUTE, data);
        if (err < 0)
-               return err;
+               goto exit;
 
        spec->curr_hp_switch = *valp;
 
+ exit:
        snd_hda_power_down(codec);
-       return 1;
+       return err < 0 ? err : 1;
 }
 
 static int ca0132_speaker_switch_get(struct snd_kcontrol *kcontrol,
@@ -770,18 +771,19 @@ static int ca0132_speaker_switch_put(struct snd_kcontrol *kcontrol,
 
        err = chipio_read(codec, REG_CODEC_MUTE, &data);
        if (err < 0)
-               return err;
+               goto exit;
 
        /* *valp 0 is mute, 1 is unmute */
        data = (data & 0xef) | (*valp ? 0 : 0x10);
-       chipio_write(codec, REG_CODEC_MUTE, data);
+       err = chipio_write(codec, REG_CODEC_MUTE, data);
        if (err < 0)
-               return err;
+               goto exit;
 
        spec->curr_speaker_switch = *valp;
 
+ exit:
        snd_hda_power_down(codec);
-       return 1;
+       return err < 0 ? err : 1;
 }
 
 static int ca0132_hp_volume_get(struct snd_kcontrol *kcontrol,
@@ -819,25 +821,26 @@ static int ca0132_hp_volume_put(struct snd_kcontrol *kcontrol,
 
        err = chipio_read(codec, REG_CODEC_HP_VOL_L, &data);
        if (err < 0)
-               return err;
+               goto exit;
 
        val = 31 - left_vol;
        data = (data & 0xe0) | val;
-       chipio_write(codec, REG_CODEC_HP_VOL_L, data);
+       err = chipio_write(codec, REG_CODEC_HP_VOL_L, data);
        if (err < 0)
-               return err;
+               goto exit;
 
        val = 31 - right_vol;
        data = (data & 0xe0) | val;
-       chipio_write(codec, REG_CODEC_HP_VOL_R, data);
+       err = chipio_write(codec, REG_CODEC_HP_VOL_R, data);
        if (err < 0)
-               return err;
+               goto exit;
 
        spec->curr_hp_volume[0] = left_vol;
        spec->curr_hp_volume[1] = right_vol;
 
+ exit:
        snd_hda_power_down(codec);
-       return 1;
+       return err < 0 ? err : 1;
 }
 
 static int add_hp_switch(struct hda_codec *codec, hda_nid_t nid)
@@ -936,6 +939,8 @@ static int ca0132_build_controls(struct hda_codec *codec)
                if (err < 0)
                        return err;
                err = add_in_volume(codec, spec->dig_in, "IEC958");
+               if (err < 0)
+                       return err;
        }
        return 0;
 }
index 0e99357e822c201a03ffeadf451e9cc29e2001c0..bc5a993d11461868a2115d5b81efec25114a9996 100644 (file)
@@ -988,8 +988,10 @@ static void cs_automic(struct hda_codec *codec)
                        change_cur_input(codec, !spec->automic_idx, 0);
        } else {
                if (present) {
-                       spec->last_input = spec->cur_input;
-                       spec->cur_input = spec->automic_idx;
+                       if (spec->cur_input != spec->automic_idx) {
+                               spec->last_input = spec->cur_input;
+                               spec->cur_input = spec->automic_idx;
+                       }
                } else  {
                        spec->cur_input = spec->last_input;
                }
index 8a32a69c83c330939b18f10f2cd3a6c9cfc10ebe..a7a5733aa4d20d2ea25edf104e4568b4e42cab3d 100644 (file)
@@ -3027,7 +3027,7 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400s", CXT5066_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x21c5, "Thinkpad Edge 13", CXT5066_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x21c6, "Thinkpad Edge 13", CXT5066_ASUS),
-       SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD),
+       SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo T510", CXT5066_AUTO),
        SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520 & W520", CXT5066_AUTO),
        SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD),
        SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD),
index 5e82acf77c5ae84cbd3f77b1d3b8165246a8df22..9350f3c3bdf89445ece8a6f88a439703aa5a48cf 100644 (file)
@@ -177,6 +177,7 @@ struct alc_spec {
        unsigned int detect_lo:1;       /* Line-out detection enabled */
        unsigned int automute_speaker_possible:1; /* there are speakers and either LO or HP */
        unsigned int automute_lo_possible:1;      /* there are line outs and HP */
+       unsigned int keep_vref_in_automute:1; /* Don't clear VREF in automute */
 
        /* other flags */
        unsigned int no_analog :1; /* digital I/O only */
@@ -185,7 +186,6 @@ struct alc_spec {
        unsigned int vol_in_capsrc:1; /* use capsrc volume (ADC has no vol) */
        unsigned int parse_flags; /* passed to snd_hda_parse_pin_defcfg() */
        unsigned int shared_mic_hp:1; /* HP/Mic-in sharing */
-       unsigned int use_jack_tbl:1; /* 1 for model=auto */
 
        /* auto-mute control */
        int automute_mode;
@@ -496,13 +496,24 @@ static void do_automute(struct hda_codec *codec, int num_pins, hda_nid_t *pins,
 
        for (i = 0; i < num_pins; i++) {
                hda_nid_t nid = pins[i];
+               unsigned int val;
                if (!nid)
                        break;
                switch (spec->automute_mode) {
                case ALC_AUTOMUTE_PIN:
+                       /* don't reset VREF value in case it's controlling
+                        * the amp (see alc861_fixup_asus_amp_vref_0f())
+                        */
+                       if (spec->keep_vref_in_automute) {
+                               val = snd_hda_codec_read(codec, nid, 0,
+                                       AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+                               val &= ~PIN_HP;
+                       } else
+                               val = 0;
+                       val |= pin_bits;
                        snd_hda_codec_write(codec, nid, 0,
                                            AC_VERB_SET_PIN_WIDGET_CONTROL,
-                                           pin_bits);
+                                           val);
                        break;
                case ALC_AUTOMUTE_AMP:
                        snd_hda_codec_amp_stereo(codec, nid, HDA_OUTPUT, 0,
@@ -621,17 +632,10 @@ static void alc_mic_automute(struct hda_codec *codec)
                alc_mux_select(codec, 0, spec->int_mic_idx, false);
 }
 
-/* unsolicited event for HP jack sensing */
-static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res)
+/* handle the specified unsol action (ALC_XXX_EVENT) */
+static void alc_exec_unsol_event(struct hda_codec *codec, int action)
 {
-       struct alc_spec *spec = codec->spec;
-       if (codec->vendor_id == 0x10ec0880)
-               res >>= 28;
-       else
-               res >>= 26;
-       if (spec->use_jack_tbl)
-               res = snd_hda_jack_get_action(codec, res);
-       switch (res) {
+       switch (action) {
        case ALC_HP_EVENT:
                alc_hp_automute(codec);
                break;
@@ -645,6 +649,17 @@ static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res)
        snd_hda_jack_report_sync(codec);
 }
 
+/* unsolicited event for HP jack sensing */
+static void alc_sku_unsol_event(struct hda_codec *codec, unsigned int res)
+{
+       if (codec->vendor_id == 0x10ec0880)
+               res >>= 28;
+       else
+               res >>= 26;
+       res = snd_hda_jack_get_action(codec, res);
+       alc_exec_unsol_event(codec, res);
+}
+
 /* call init functions of standard auto-mute helpers */
 static void alc_inithook(struct hda_codec *codec)
 {
@@ -1840,6 +1855,8 @@ static const char * const alc_slave_vols[] = {
        "Speaker Playback Volume",
        "Mono Playback Volume",
        "Line-Out Playback Volume",
+       "CLFE Playback Volume",
+       "Bass Speaker Playback Volume",
        "PCM Playback Volume",
        NULL,
 };
@@ -1855,6 +1872,8 @@ static const char * const alc_slave_sws[] = {
        "Mono Playback Switch",
        "IEC958 Playback Switch",
        "Line-Out Playback Switch",
+       "CLFE Playback Switch",
+       "Bass Speaker Playback Switch",
        "PCM Playback Switch",
        NULL,
 };
@@ -1883,7 +1902,7 @@ static const struct snd_kcontrol_new alc_beep_mixer[] = {
 };
 #endif
 
-static int alc_build_controls(struct hda_codec *codec)
+static int __alc_build_controls(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
        struct snd_kcontrol *kctl = NULL;
@@ -2029,11 +2048,16 @@ static int alc_build_controls(struct hda_codec *codec)
 
        alc_free_kctls(codec); /* no longer needed */
 
-       err = snd_hda_jack_add_kctls(codec, &spec->autocfg);
+       return 0;
+}
+
+static int alc_build_controls(struct hda_codec *codec)
+{
+       struct alc_spec *spec = codec->spec;
+       int err = __alc_build_controls(codec);
        if (err < 0)
                return err;
-
-       return 0;
+       return snd_hda_jack_add_kctls(codec, &spec->autocfg);
 }
 
 
@@ -2298,7 +2322,7 @@ static int alc_build_pcms(struct hda_codec *codec)
                 "%s Analog", codec->chip_name);
        info->name = spec->stream_name_analog;
 
-       if (spec->multiout.dac_nids > 0) {
+       if (spec->multiout.num_dacs > 0) {
                p = spec->stream_analog_playback;
                if (!p)
                        p = &alc_pcm_analog_playback;
@@ -3233,7 +3257,7 @@ static int alc_auto_create_multi_out_ctls(struct hda_codec *codec,
        int i, err, noutputs;
 
        noutputs = cfg->line_outs;
-       if (spec->multi_ios > 0)
+       if (spec->multi_ios > 0 && cfg->line_outs < 3)
                noutputs += spec->multi_ios;
 
        for (i = 0; i < noutputs; i++) {
@@ -3904,7 +3928,6 @@ static void set_capture_mixer(struct hda_codec *codec)
 static void alc_auto_init_std(struct hda_codec *codec)
 {
        struct alc_spec *spec = codec->spec;
-       spec->use_jack_tbl = 1;
        alc_auto_init_multi_out(codec);
        alc_auto_init_extra_out(codec);
        alc_auto_init_analog_input(codec);
@@ -4168,6 +4191,8 @@ static int patch_alc880(struct hda_codec *codec)
        codec->patch_ops = alc_patch_ops;
        if (board_config == ALC_MODEL_AUTO)
                spec->init_hook = alc_auto_init_std;
+       else
+               codec->patch_ops.build_controls = __alc_build_controls;
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (!spec->loopback.amplist)
                spec->loopback.amplist = alc880_loopbacks;
@@ -4297,6 +4322,8 @@ static int patch_alc260(struct hda_codec *codec)
        codec->patch_ops = alc_patch_ops;
        if (board_config == ALC_MODEL_AUTO)
                spec->init_hook = alc_auto_init_std;
+       else
+               codec->patch_ops.build_controls = __alc_build_controls;
        spec->shutup = alc_eapd_shutup;
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (!spec->loopback.amplist)
@@ -4691,6 +4718,8 @@ static int patch_alc882(struct hda_codec *codec)
        codec->patch_ops = alc_patch_ops;
        if (board_config == ALC_MODEL_AUTO)
                spec->init_hook = alc_auto_init_std;
+       else
+               codec->patch_ops.build_controls = __alc_build_controls;
 
 #ifdef CONFIG_SND_HDA_POWER_SAVE
        if (!spec->loopback.amplist)
@@ -4722,7 +4751,6 @@ enum {
        ALC262_FIXUP_FSC_H270,
        ALC262_FIXUP_HP_Z200,
        ALC262_FIXUP_TYAN,
-       ALC262_FIXUP_TOSHIBA_RX1,
        ALC262_FIXUP_LENOVO_3000,
        ALC262_FIXUP_BENQ,
        ALC262_FIXUP_BENQ_T31,
@@ -4752,16 +4780,6 @@ static const struct alc_fixup alc262_fixups[] = {
                        { }
                }
        },
-       [ALC262_FIXUP_TOSHIBA_RX1] = {
-               .type = ALC_FIXUP_PINS,
-               .v.pins = (const struct alc_pincfg[]) {
-                       { 0x14, 0x90170110 }, /* speaker */
-                       { 0x15, 0x0421101f }, /* HP */
-                       { 0x1a, 0x40f000f0 }, /* N/A */
-                       { 0x1b, 0x40f000f0 }, /* N/A */
-                       { 0x1e, 0x40f000f0 }, /* N/A */
-               }
-       },
        [ALC262_FIXUP_LENOVO_3000] = {
                .type = ALC_FIXUP_VERBS,
                .v.verbs = (const struct hda_verb[]) {
@@ -4794,8 +4812,6 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
        SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu", ALC262_FIXUP_BENQ),
        SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
        SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
-       SND_PCI_QUIRK(0x1179, 0x0001, "Toshiba dynabook SS RX1",
-                     ALC262_FIXUP_TOSHIBA_RX1),
        SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
        SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
        SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
@@ -5364,7 +5380,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1043, 0x8330, "ASUS Eeepc P703 P900A",
                      ALC269_FIXUP_AMIC),
        SND_PCI_QUIRK(0x1043, 0x1013, "ASUS N61Da", ALC269_FIXUP_AMIC),
-       SND_PCI_QUIRK(0x1043, 0x1113, "ASUS N63Jn", ALC269_FIXUP_AMIC),
        SND_PCI_QUIRK(0x1043, 0x1143, "ASUS B53f", ALC269_FIXUP_AMIC),
        SND_PCI_QUIRK(0x1043, 0x1133, "ASUS UJ20ft", ALC269_FIXUP_AMIC),
        SND_PCI_QUIRK(0x1043, 0x1183, "ASUS K72DR", ALC269_FIXUP_AMIC),
@@ -5573,8 +5588,28 @@ static const struct hda_amp_list alc861_loopbacks[] = {
 /* Pin config fixes */
 enum {
        PINFIX_FSC_AMILO_PI1505,
+       PINFIX_ASUS_A6RP,
 };
 
+/* On some laptops, VREF of pin 0x0f is abused for controlling the main amp */
+static void alc861_fixup_asus_amp_vref_0f(struct hda_codec *codec,
+                       const struct alc_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+       unsigned int val;
+
+       if (action != ALC_FIXUP_ACT_INIT)
+               return;
+       val = snd_hda_codec_read(codec, 0x0f, 0,
+                                AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+       if (!(val & (AC_PINCTL_IN_EN | AC_PINCTL_OUT_EN)))
+               val |= AC_PINCTL_IN_EN;
+       val |= AC_PINCTL_VREF_50;
+       snd_hda_codec_write(codec, 0x0f, 0,
+                           AC_VERB_SET_PIN_WIDGET_CONTROL, val);
+       spec->keep_vref_in_automute = 1;
+}
+
 static const struct alc_fixup alc861_fixups[] = {
        [PINFIX_FSC_AMILO_PI1505] = {
                .type = ALC_FIXUP_PINS,
@@ -5584,9 +5619,16 @@ static const struct alc_fixup alc861_fixups[] = {
                        { }
                }
        },
+       [PINFIX_ASUS_A6RP] = {
+               .type = ALC_FIXUP_FUNC,
+               .v.func = alc861_fixup_asus_amp_vref_0f,
+       },
 };
 
 static const struct snd_pci_quirk alc861_fixup_tbl[] = {
+       SND_PCI_QUIRK_VENDOR(0x1043, "ASUS laptop", PINFIX_ASUS_A6RP),
+       SND_PCI_QUIRK(0x1584, 0x0000, "Uniwill ECS M31EI", PINFIX_ASUS_A6RP),   
+       SND_PCI_QUIRK(0x1584, 0x2b01, "Haier W18", PINFIX_ASUS_A6RP),
        SND_PCI_QUIRK(0x1734, 0x10c7, "FSC Amilo Pi1505", PINFIX_FSC_AMILO_PI1505),
        {}
 };
index 87e684fa830f83df49bfec3aa5e894d8c97bfa07..948f0be2f4f3180261c25086126a8932fae87f62 100644 (file)
@@ -1596,7 +1596,7 @@ static const struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02bd,
                                "Dell Studio 1557", STAC_DELL_M6_DMIC),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x02fe,
-                               "Dell Studio XPS 1645", STAC_DELL_M6_BOTH),
+                               "Dell Studio XPS 1645", STAC_DELL_M6_DMIC),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0413,
                                "Dell Studio 1558", STAC_DELL_M6_DMIC),
        {} /* terminator */
@@ -1608,7 +1608,7 @@ static const struct snd_pci_quirk stac92hd73xx_codec_id_cfg_tbl[] = {
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x043a,
                      "Alienware M17x", STAC_ALIENWARE_M17X),
        SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
-                     "Alienware M17x", STAC_ALIENWARE_M17X),
+                     "Alienware M17x R3", STAC_DELL_EQ),
        {} /* terminator */
 };
 
@@ -4163,13 +4163,15 @@ static int enable_pin_detect(struct hda_codec *codec, hda_nid_t nid,
        return 1;
 }
 
-static int is_nid_hp_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
+static int is_nid_out_jack_pin(struct auto_pin_cfg *cfg, hda_nid_t nid)
 {
        int i;
        for (i = 0; i < cfg->hp_outs; i++)
                if (cfg->hp_pins[i] == nid)
                        return 1; /* nid is a HP-Out */
-
+       for (i = 0; i < cfg->line_outs; i++)
+               if (cfg->line_out_pins[i] == nid)
+                       return 1; /* nid is a line-Out */
        return 0; /* nid is not a HP-Out */
 };
 
@@ -4375,7 +4377,7 @@ static int stac92xx_init(struct hda_codec *codec)
                        continue;
                }
 
-               if (is_nid_hp_pin(cfg, nid))
+               if (is_nid_out_jack_pin(cfg, nid))
                        continue; /* already has an unsol event */
 
                pinctl = snd_hda_codec_read(codec, nid, 0,
@@ -4868,7 +4870,14 @@ static int find_mute_led_cfg(struct hda_codec *codec, int default_polarity)
                        /* BIOS bug: unfilled OEM string */
                        if (strstr(dev->name, "HP_Mute_LED_P_G")) {
                                set_hp_led_gpio(codec);
-                               spec->gpio_led_polarity = 1;
+                               switch (codec->subsystem_id) {
+                               case 0x103c148a:
+                                       spec->gpio_led_polarity = 0;
+                                       break;
+                               default:
+                                       spec->gpio_led_polarity = 1;
+                                       break;
+                               }
                                return 1;
                        }
                }
index 03e63fed9caf7d9e8342373901a5086f884122d9..284e311040fe39b58289bc61de556bc4815de76a 100644 (file)
@@ -199,6 +199,9 @@ struct via_spec {
        unsigned int no_pin_power_ctl;
        enum VIA_HDA_CODEC codec_type;
 
+       /* analog low-power control */
+       bool alc_mode;
+
        /* smart51 setup */
        unsigned int smart51_nums;
        hda_nid_t smart51_pins[2];
@@ -687,6 +690,15 @@ static void via_auto_init_analog_input(struct hda_codec *codec)
        }
 }
 
+static void update_power_state(struct hda_codec *codec, hda_nid_t nid,
+                              unsigned int parm)
+{
+       if (snd_hda_codec_read(codec, nid, 0,
+                              AC_VERB_GET_POWER_STATE, 0) == parm)
+               return;
+       snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, parm);
+}
+
 static void set_pin_power_state(struct hda_codec *codec, hda_nid_t nid,
                                unsigned int *affected_parm)
 {
@@ -709,7 +721,7 @@ static void set_pin_power_state(struct hda_codec *codec, hda_nid_t nid,
        } else
                parm = AC_PWRST_D3;
 
-       snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, nid, parm);
 }
 
 static int via_pin_power_ctl_info(struct snd_kcontrol *kcontrol,
@@ -749,6 +761,7 @@ static int via_pin_power_ctl_put(struct snd_kcontrol *kcontrol,
                return 0;
        spec->no_pin_power_ctl = val;
        set_widgets_power_state(codec);
+       analog_low_current_mode(codec);
        return 1;
 }
 
@@ -1036,13 +1049,19 @@ static bool is_aa_path_mute(struct hda_codec *codec)
 }
 
 /* enter/exit analog low-current mode */
-static void analog_low_current_mode(struct hda_codec *codec)
+static void __analog_low_current_mode(struct hda_codec *codec, bool force)
 {
        struct via_spec *spec = codec->spec;
        bool enable;
        unsigned int verb, parm;
 
-       enable = is_aa_path_mute(codec) && (spec->opened_streams != 0);
+       if (spec->no_pin_power_ctl)
+               enable = false;
+       else
+               enable = is_aa_path_mute(codec) && !spec->opened_streams;
+       if (enable == spec->alc_mode && !force)
+               return;
+       spec->alc_mode = enable;
 
        /* decide low current mode's verb & parameter */
        switch (spec->codec_type) {
@@ -1074,6 +1093,11 @@ static void analog_low_current_mode(struct hda_codec *codec)
        snd_hda_codec_write(codec, codec->afg, 0, verb, parm);
 }
 
+static void analog_low_current_mode(struct hda_codec *codec)
+{
+       return __analog_low_current_mode(codec, false);
+}
+
 /*
  * generic initialization of ADC, input mixers and output mixers
  */
@@ -1446,6 +1470,7 @@ static int via_build_controls(struct hda_codec *codec)
        struct snd_kcontrol *kctl;
        int err, i;
 
+       spec->no_pin_power_ctl = 1;
        if (spec->set_widgets_power_state)
                if (!via_clone_control(spec, &via_pin_power_ctl_enum))
                        return -ENOMEM;
@@ -1499,10 +1524,6 @@ static int via_build_controls(struct hda_codec *codec)
                        return err;
        }
 
-       /* init power states */
-       set_widgets_power_state(codec);
-       analog_low_current_mode(codec);
-
        via_free_kctls(codec); /* no longer needed */
 
        err = snd_hda_jack_add_kctls(codec, &spec->autocfg);
@@ -2295,10 +2316,7 @@ static int via_mux_enum_put(struct snd_kcontrol *kcontrol,
 
        if (mux) {
                /* switch to D0 beofre change index */
-               if (snd_hda_codec_read(codec, mux, 0,
-                              AC_VERB_GET_POWER_STATE, 0x00) != AC_PWRST_D0)
-                       snd_hda_codec_write(codec, mux, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+               update_power_state(codec, mux, AC_PWRST_D0);
                snd_hda_codec_write(codec, mux, 0,
                                    AC_VERB_SET_CONNECT_SEL,
                                    spec->inputs[cur].mux_idx);
@@ -2776,6 +2794,10 @@ static int via_init(struct hda_codec *codec)
        for (i = 0; i < spec->num_iverbs; i++)
                snd_hda_sequence_write(codec, spec->init_verbs[i]);
 
+       /* init power states */
+       set_widgets_power_state(codec);
+       __analog_low_current_mode(codec, true);
+
        via_auto_init_multi_out(codec);
        via_auto_init_hp_out(codec);
        via_auto_init_speaker_out(codec);
@@ -2922,9 +2944,9 @@ static void set_widgets_power_state_vt1708B(struct hda_codec *codec)
        if (imux_is_smixer)
                parm = AC_PWRST_D0;
        /* SW0 (17h), AIW 0/1 (13h/14h) */
-       snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x13, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x14, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x17, parm);
+       update_power_state(codec, 0x13, parm);
+       update_power_state(codec, 0x14, parm);
 
        /* outputs */
        /* PW0 (19h), SW1 (18h), AOW1 (11h) */
@@ -2932,8 +2954,8 @@ static void set_widgets_power_state_vt1708B(struct hda_codec *codec)
        set_pin_power_state(codec, 0x19, &parm);
        if (spec->smart51_enabled)
                set_pin_power_state(codec, 0x1b, &parm);
-       snd_hda_codec_write(codec, 0x18, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x18, parm);
+       update_power_state(codec, 0x11, parm);
 
        /* PW6 (22h), SW2 (26h), AOW2 (24h) */
        if (is_8ch) {
@@ -2941,20 +2963,16 @@ static void set_widgets_power_state_vt1708B(struct hda_codec *codec)
                set_pin_power_state(codec, 0x22, &parm);
                if (spec->smart51_enabled)
                        set_pin_power_state(codec, 0x1a, &parm);
-               snd_hda_codec_write(codec, 0x26, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0x24, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x26, parm);
+               update_power_state(codec, 0x24, parm);
        } else if (codec->vendor_id == 0x11064397) {
                /* PW7(23h), SW2(27h), AOW2(25h) */
                parm = AC_PWRST_D3;
                set_pin_power_state(codec, 0x23, &parm);
                if (spec->smart51_enabled)
                        set_pin_power_state(codec, 0x1a, &parm);
-               snd_hda_codec_write(codec, 0x27, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0x25, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x27, parm);
+               update_power_state(codec, 0x25, parm);
        }
 
        /* PW 3/4/7 (1ch/1dh/23h) */
@@ -2966,17 +2984,13 @@ static void set_widgets_power_state_vt1708B(struct hda_codec *codec)
                set_pin_power_state(codec, 0x23, &parm);
 
        /* MW0 (16h), Sw3 (27h), AOW 0/3 (10h/25h) */
-       snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_POWER_STATE,
-                           imux_is_smixer ? AC_PWRST_D0 : parm);
-       snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x16, imux_is_smixer ? AC_PWRST_D0 : parm);
+       update_power_state(codec, 0x10, parm);
        if (is_8ch) {
-               snd_hda_codec_write(codec, 0x25, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0x27, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x25, parm);
+               update_power_state(codec, 0x27, parm);
        } else if (codec->vendor_id == 0x11064397 && spec->hp_independent_mode)
-               snd_hda_codec_write(codec, 0x25, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x25, parm);
 }
 
 static int patch_vt1708S(struct hda_codec *codec);
@@ -3149,10 +3163,10 @@ static void set_widgets_power_state_vt1702(struct hda_codec *codec)
        if (imux_is_smixer)
                parm = AC_PWRST_D0; /* SW0 (13h) = stereo mixer (idx 3) */
        /* SW0 (13h), AIW 0/1/2 (12h/1fh/20h) */
-       snd_hda_codec_write(codec, 0x13, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x12, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x20, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x13, parm);
+       update_power_state(codec, 0x12, parm);
+       update_power_state(codec, 0x1f, parm);
+       update_power_state(codec, 0x20, parm);
 
        /* outputs */
        /* PW 3/4 (16h/17h) */
@@ -3160,10 +3174,9 @@ static void set_widgets_power_state_vt1702(struct hda_codec *codec)
        set_pin_power_state(codec, 0x17, &parm);
        set_pin_power_state(codec, 0x16, &parm);
        /* MW0 (1ah), AOW 0/1 (10h/1dh) */
-       snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_POWER_STATE,
-                           imux_is_smixer ? AC_PWRST_D0 : parm);
-       snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x1d, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x1a, imux_is_smixer ? AC_PWRST_D0 : parm);
+       update_power_state(codec, 0x10, parm);
+       update_power_state(codec, 0x1d, parm);
 }
 
 static int patch_vt1702(struct hda_codec *codec)
@@ -3228,52 +3241,48 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
        if (imux_is_smixer)
                parm = AC_PWRST_D0;
        /* MUX6/7 (1eh/1fh), AIW 0/1 (10h/11h) */
-       snd_hda_codec_write(codec, 0x1e, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x1e, parm);
+       update_power_state(codec, 0x1f, parm);
+       update_power_state(codec, 0x10, parm);
+       update_power_state(codec, 0x11, parm);
 
        /* outputs */
        /* PW3 (27h), MW2 (1ah), AOW3 (bh) */
        parm = AC_PWRST_D3;
        set_pin_power_state(codec, 0x27, &parm);
-       snd_hda_codec_write(codec, 0x1a, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0xb, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x1a, parm);
+       update_power_state(codec, 0xb, parm);
 
        /* PW2 (26h), AOW2 (ah) */
        parm = AC_PWRST_D3;
        set_pin_power_state(codec, 0x26, &parm);
        if (spec->smart51_enabled)
                set_pin_power_state(codec, 0x2b, &parm);
-       snd_hda_codec_write(codec, 0xa, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0xa, parm);
 
        /* PW0 (24h), AOW0 (8h) */
        parm = AC_PWRST_D3;
        set_pin_power_state(codec, 0x24, &parm);
        if (!spec->hp_independent_mode) /* check for redirected HP */
                set_pin_power_state(codec, 0x28, &parm);
-       snd_hda_codec_write(codec, 0x8, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x8, parm);
        /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */
-       snd_hda_codec_write(codec, 0x21, 0, AC_VERB_SET_POWER_STATE,
-                           imux_is_smixer ? AC_PWRST_D0 : parm);
+       update_power_state(codec, 0x21, imux_is_smixer ? AC_PWRST_D0 : parm);
 
        /* PW1 (25h), AOW1 (9h) */
        parm = AC_PWRST_D3;
        set_pin_power_state(codec, 0x25, &parm);
        if (spec->smart51_enabled)
                set_pin_power_state(codec, 0x2a, &parm);
-       snd_hda_codec_write(codec, 0x9, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x9, parm);
 
        if (spec->hp_independent_mode) {
                /* PW4 (28h), MW3 (1bh), MUX1(34h), AOW4 (ch) */
                parm = AC_PWRST_D3;
                set_pin_power_state(codec, 0x28, &parm);
-               snd_hda_codec_write(codec, 0x1b, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0x34, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0xc, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x1b, parm);
+               update_power_state(codec, 0x34, parm);
+               update_power_state(codec, 0xc, parm);
        }
 }
 
@@ -3433,8 +3442,8 @@ static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
        if (imux_is_smixer)
                parm = AC_PWRST_D0;
        /* SW0 (17h), AIW0(13h) */
-       snd_hda_codec_write(codec, 0x17, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x13, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x17, parm);
+       update_power_state(codec, 0x13, parm);
 
        parm = AC_PWRST_D3;
        set_pin_power_state(codec, 0x1e, &parm);
@@ -3442,12 +3451,11 @@ static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
        if (spec->dmic_enabled)
                set_pin_power_state(codec, 0x22, &parm);
        else
-               snd_hda_codec_write(codec, 0x22, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+               update_power_state(codec, 0x22, AC_PWRST_D3);
 
        /* SW2(26h), AIW1(14h) */
-       snd_hda_codec_write(codec, 0x26, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x14, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x26, parm);
+       update_power_state(codec, 0x14, parm);
 
        /* outputs */
        /* PW0 (19h), SW1 (18h), AOW1 (11h) */
@@ -3456,8 +3464,8 @@ static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
        /* Smart 5.1 PW2(1bh) */
        if (spec->smart51_enabled)
                set_pin_power_state(codec, 0x1b, &parm);
-       snd_hda_codec_write(codec, 0x18, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x18, parm);
+       update_power_state(codec, 0x11, parm);
 
        /* PW7 (23h), SW3 (27h), AOW3 (25h) */
        parm = AC_PWRST_D3;
@@ -3465,12 +3473,12 @@ static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
        /* Smart 5.1 PW1(1ah) */
        if (spec->smart51_enabled)
                set_pin_power_state(codec, 0x1a, &parm);
-       snd_hda_codec_write(codec, 0x27, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x27, parm);
 
        /* Smart 5.1 PW5(1eh) */
        if (spec->smart51_enabled)
                set_pin_power_state(codec, 0x1e, &parm);
-       snd_hda_codec_write(codec, 0x25, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x25, parm);
 
        /* Mono out */
        /* SW4(28h)->MW1(29h)-> PW12 (2ah)*/
@@ -3486,9 +3494,9 @@ static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
                        mono_out = 1;
        }
        parm = mono_out ? AC_PWRST_D0 : AC_PWRST_D3;
-       snd_hda_codec_write(codec, 0x28, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x29, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x2a, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x28, parm);
+       update_power_state(codec, 0x29, parm);
+       update_power_state(codec, 0x2a, parm);
 
        /* PW 3/4 (1ch/1dh) */
        parm = AC_PWRST_D3;
@@ -3496,15 +3504,12 @@ static void set_widgets_power_state_vt1716S(struct hda_codec *codec)
        set_pin_power_state(codec, 0x1d, &parm);
        /* HP Independent Mode, power on AOW3 */
        if (spec->hp_independent_mode)
-               snd_hda_codec_write(codec, 0x25, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x25, parm);
 
        /* force to D0 for internal Speaker */
        /* MW0 (16h), AOW0 (10h) */
-       snd_hda_codec_write(codec, 0x16, 0, AC_VERB_SET_POWER_STATE,
-                           imux_is_smixer ? AC_PWRST_D0 : parm);
-       snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE,
-                           mono_out ? AC_PWRST_D0 : parm);
+       update_power_state(codec, 0x16, imux_is_smixer ? AC_PWRST_D0 : parm);
+       update_power_state(codec, 0x10, mono_out ? AC_PWRST_D0 : parm);
 }
 
 static int patch_vt1716S(struct hda_codec *codec)
@@ -3580,54 +3585,45 @@ static void set_widgets_power_state_vt2002P(struct hda_codec *codec)
        set_pin_power_state(codec, 0x2b, &parm);
        parm = AC_PWRST_D0;
        /* MUX9/10 (1eh/1fh), AIW 0/1 (10h/11h) */
-       snd_hda_codec_write(codec, 0x1e, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x1e, parm);
+       update_power_state(codec, 0x1f, parm);
+       update_power_state(codec, 0x10, parm);
+       update_power_state(codec, 0x11, parm);
 
        /* outputs */
        /* AOW0 (8h)*/
-       snd_hda_codec_write(codec, 0x8, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x8, parm);
 
        if (spec->codec_type == VT1802) {
                /* PW4 (28h), MW4 (18h), MUX4(38h) */
                parm = AC_PWRST_D3;
                set_pin_power_state(codec, 0x28, &parm);
-               snd_hda_codec_write(codec, 0x18, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0x38, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x18, parm);
+               update_power_state(codec, 0x38, parm);
        } else {
                /* PW4 (26h), MW4 (1ch), MUX4(37h) */
                parm = AC_PWRST_D3;
                set_pin_power_state(codec, 0x26, &parm);
-               snd_hda_codec_write(codec, 0x1c, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0x37, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x1c, parm);
+               update_power_state(codec, 0x37, parm);
        }
 
        if (spec->codec_type == VT1802) {
                /* PW1 (25h), MW1 (15h), MUX1(35h), AOW1 (9h) */
                parm = AC_PWRST_D3;
                set_pin_power_state(codec, 0x25, &parm);
-               snd_hda_codec_write(codec, 0x15, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0x35, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x15, parm);
+               update_power_state(codec, 0x35, parm);
        } else {
                /* PW1 (25h), MW1 (19h), MUX1(35h), AOW1 (9h) */
                parm = AC_PWRST_D3;
                set_pin_power_state(codec, 0x25, &parm);
-               snd_hda_codec_write(codec, 0x19, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0x35, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x19, parm);
+               update_power_state(codec, 0x35, parm);
        }
 
        if (spec->hp_independent_mode)
-               snd_hda_codec_write(codec, 0x9, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+               update_power_state(codec, 0x9, AC_PWRST_D0);
 
        /* Class-D */
        /* PW0 (24h), MW0(18h/14h), MUX0(34h) */
@@ -3637,12 +3633,10 @@ static void set_widgets_power_state_vt2002P(struct hda_codec *codec)
        set_pin_power_state(codec, 0x24, &parm);
        parm = present ? AC_PWRST_D3 : AC_PWRST_D0;
        if (spec->codec_type == VT1802)
-               snd_hda_codec_write(codec, 0x14, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x14, parm);
        else
-               snd_hda_codec_write(codec, 0x18, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x34, 0, AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x18, parm);
+       update_power_state(codec, 0x34, parm);
 
        /* Mono Out */
        present = snd_hda_jack_detect(codec, 0x26);
@@ -3650,28 +3644,20 @@ static void set_widgets_power_state_vt2002P(struct hda_codec *codec)
        parm = present ? AC_PWRST_D3 : AC_PWRST_D0;
        if (spec->codec_type == VT1802) {
                /* PW15 (33h), MW8(1ch), MUX8(3ch) */
-               snd_hda_codec_write(codec, 0x33, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0x1c, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0x3c, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x33, parm);
+               update_power_state(codec, 0x1c, parm);
+               update_power_state(codec, 0x3c, parm);
        } else {
                /* PW15 (31h), MW8(17h), MUX8(3bh) */
-               snd_hda_codec_write(codec, 0x31, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0x17, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
-               snd_hda_codec_write(codec, 0x3b, 0,
-                                   AC_VERB_SET_POWER_STATE, parm);
+               update_power_state(codec, 0x31, parm);
+               update_power_state(codec, 0x17, parm);
+               update_power_state(codec, 0x3b, parm);
        }
        /* MW9 (21h) */
        if (imux_is_smixer || !is_aa_path_mute(codec))
-               snd_hda_codec_write(codec, 0x21, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+               update_power_state(codec, 0x21, AC_PWRST_D0);
        else
-               snd_hda_codec_write(codec, 0x21, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+               update_power_state(codec, 0x21, AC_PWRST_D3);
 }
 
 /* patch for vt2002P */
@@ -3731,30 +3717,28 @@ static void set_widgets_power_state_vt1812(struct hda_codec *codec)
        set_pin_power_state(codec, 0x2b, &parm);
        parm = AC_PWRST_D0;
        /* MUX10/11 (1eh/1fh), AIW 0/1 (10h/11h) */
-       snd_hda_codec_write(codec, 0x1e, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x1f, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x10, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x11, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x1e, parm);
+       update_power_state(codec, 0x1f, parm);
+       update_power_state(codec, 0x10, parm);
+       update_power_state(codec, 0x11, parm);
 
        /* outputs */
        /* AOW0 (8h)*/
-       snd_hda_codec_write(codec, 0x8, 0,
-                           AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+       update_power_state(codec, 0x8, AC_PWRST_D0);
 
        /* PW4 (28h), MW4 (18h), MUX4(38h) */
        parm = AC_PWRST_D3;
        set_pin_power_state(codec, 0x28, &parm);
-       snd_hda_codec_write(codec, 0x18, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x38, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x18, parm);
+       update_power_state(codec, 0x38, parm);
 
        /* PW1 (25h), MW1 (15h), MUX1(35h), AOW1 (9h) */
        parm = AC_PWRST_D3;
        set_pin_power_state(codec, 0x25, &parm);
-       snd_hda_codec_write(codec, 0x15, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x35, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x15, parm);
+       update_power_state(codec, 0x35, parm);
        if (spec->hp_independent_mode)
-               snd_hda_codec_write(codec, 0x9, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+               update_power_state(codec, 0x9, AC_PWRST_D0);
 
        /* Internal Speaker */
        /* PW0 (24h), MW0(14h), MUX0(34h) */
@@ -3763,15 +3747,11 @@ static void set_widgets_power_state_vt1812(struct hda_codec *codec)
        parm = AC_PWRST_D3;
        set_pin_power_state(codec, 0x24, &parm);
        if (present) {
-               snd_hda_codec_write(codec, 0x14, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-               snd_hda_codec_write(codec, 0x34, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+               update_power_state(codec, 0x14, AC_PWRST_D3);
+               update_power_state(codec, 0x34, AC_PWRST_D3);
        } else {
-               snd_hda_codec_write(codec, 0x14, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
-               snd_hda_codec_write(codec, 0x34, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+               update_power_state(codec, 0x14, AC_PWRST_D0);
+               update_power_state(codec, 0x34, AC_PWRST_D0);
        }
 
 
@@ -3782,26 +3762,20 @@ static void set_widgets_power_state_vt1812(struct hda_codec *codec)
        parm = AC_PWRST_D3;
        set_pin_power_state(codec, 0x31, &parm);
        if (present) {
-               snd_hda_codec_write(codec, 0x1c, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-               snd_hda_codec_write(codec, 0x3c, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-               snd_hda_codec_write(codec, 0x3e, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
+               update_power_state(codec, 0x1c, AC_PWRST_D3);
+               update_power_state(codec, 0x3c, AC_PWRST_D3);
+               update_power_state(codec, 0x3e, AC_PWRST_D3);
        } else {
-               snd_hda_codec_write(codec, 0x1c, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
-               snd_hda_codec_write(codec, 0x3c, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
-               snd_hda_codec_write(codec, 0x3e, 0,
-                                   AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+               update_power_state(codec, 0x1c, AC_PWRST_D0);
+               update_power_state(codec, 0x3c, AC_PWRST_D0);
+               update_power_state(codec, 0x3e, AC_PWRST_D0);
        }
 
        /* PW15 (33h), MW15 (1dh), MUX15(3dh) */
        parm = AC_PWRST_D3;
        set_pin_power_state(codec, 0x33, &parm);
-       snd_hda_codec_write(codec, 0x1d, 0, AC_VERB_SET_POWER_STATE, parm);
-       snd_hda_codec_write(codec, 0x3d, 0, AC_VERB_SET_POWER_STATE, parm);
+       update_power_state(codec, 0x1d, parm);
+       update_power_state(codec, 0x3d, parm);
 
 }
 
index 26c7e8bcb229361b03397e52a59c37eb183ed18d..c0dbb52d45be5306799fb144e63e60d81348045e 100644 (file)
@@ -618,9 +618,12 @@ static int ac97_volume_get(struct snd_kcontrol *ctl,
        mutex_lock(&chip->mutex);
        reg = oxygen_read_ac97(chip, codec, index);
        mutex_unlock(&chip->mutex);
-       value->value.integer.value[0] = 31 - (reg & 0x1f);
-       if (stereo)
-               value->value.integer.value[1] = 31 - ((reg >> 8) & 0x1f);
+       if (!stereo) {
+               value->value.integer.value[0] = 31 - (reg & 0x1f);
+       } else {
+               value->value.integer.value[0] = 31 - ((reg >> 8) & 0x1f);
+               value->value.integer.value[1] = 31 - (reg & 0x1f);
+       }
        return 0;
 }
 
@@ -636,14 +639,14 @@ static int ac97_volume_put(struct snd_kcontrol *ctl,
 
        mutex_lock(&chip->mutex);
        oldreg = oxygen_read_ac97(chip, codec, index);
-       newreg = oldreg;
-       newreg = (newreg & ~0x1f) |
-               (31 - (value->value.integer.value[0] & 0x1f));
-       if (stereo)
-               newreg = (newreg & ~0x1f00) |
-                       ((31 - (value->value.integer.value[1] & 0x1f)) << 8);
-       else
-               newreg = (newreg & ~0x1f00) | ((newreg & 0x1f) << 8);
+       if (!stereo) {
+               newreg = oldreg & ~0x1f;
+               newreg |= 31 - (value->value.integer.value[0] & 0x1f);
+       } else {
+               newreg = oldreg & ~0x1f1f;
+               newreg |= (31 - (value->value.integer.value[0] & 0x1f)) << 8;
+               newreg |= 31 - (value->value.integer.value[1] & 0x1f);
+       }
        change = newreg != oldreg;
        if (change)
                oxygen_write_ac97(chip, codec, index, newreg);
index 478303e6c2b0801c16c2ae52ee4eff0996fac472..63cff90706bf1749b9b7154f1c6ec81c903f68fd 100644 (file)
@@ -177,6 +177,7 @@ static void wm8776_registers_init(struct oxygen *chip)
        struct xonar_wm87x6 *data = chip->model_data;
 
        wm8776_write(chip, WM8776_RESET, 0);
+       wm8776_write(chip, WM8776_PHASESWAP, WM8776_PH_MASK);
        wm8776_write(chip, WM8776_DACCTRL1, WM8776_DZCEN |
                     WM8776_PL_LEFT_LEFT | WM8776_PL_RIGHT_RIGHT);
        wm8776_write(chip, WM8776_DACMUTE, chip->dac_mute ? WM8776_DMUTE : 0);
index e57b89e8aa8926604425b2283d0238327609c63c..94ab728f5ca8b5a28b5a11ef9f36a723e3ebb707 100644 (file)
@@ -286,17 +286,22 @@ static int __devinit snd_card_ymfpci_probe(struct pci_dev *pci,
                snd_card_free(card);
                return err;
        }
-       if ((err = snd_ymfpci_pcm_4ch(chip, 2, NULL)) < 0) {
+       err = snd_ymfpci_mixer(chip, rear_switch[dev]);
+       if (err < 0) {
                snd_card_free(card);
                return err;
        }
-       if ((err = snd_ymfpci_pcm2(chip, 3, NULL)) < 0) {
-               snd_card_free(card);
-               return err;
-       }
-       if ((err = snd_ymfpci_mixer(chip, rear_switch[dev])) < 0) {
-               snd_card_free(card);
-               return err;
+       if (chip->ac97->ext_id & AC97_EI_SDAC) {
+               err = snd_ymfpci_pcm_4ch(chip, 2, NULL);
+               if (err < 0) {
+                       snd_card_free(card);
+                       return err;
+               }
+               err = snd_ymfpci_pcm2(chip, 3, NULL);
+               if (err < 0) {
+                       snd_card_free(card);
+                       return err;
+               }
        }
        if ((err = snd_ymfpci_timer(chip, 0)) < 0) {
                snd_card_free(card);
index 03ee4e3653113820674ad6234fb0017c69dae91e..12a9a2b0338719c556446c2c716d5ff4a8742196 100644 (file)
@@ -1614,6 +1614,14 @@ static int snd_ymfpci_put_dup4ch(struct snd_kcontrol *kcontrol, struct snd_ctl_e
        return change;
 }
 
+static struct snd_kcontrol_new snd_ymfpci_dup4ch __devinitdata = {
+       .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+       .name = "4ch Duplication",
+       .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+       .info = snd_ymfpci_info_dup4ch,
+       .get = snd_ymfpci_get_dup4ch,
+       .put = snd_ymfpci_put_dup4ch,
+};
 
 static struct snd_kcontrol_new snd_ymfpci_controls[] __devinitdata = {
 {
@@ -1642,13 +1650,6 @@ YMFPCI_DOUBLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,VOLUME), 1, YDSXGR_SPDIFLOOPVOL),
 YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("",PLAYBACK,SWITCH), 0, YDSXGR_SPDIFOUTCTRL, 0),
 YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("",CAPTURE,SWITCH), 0, YDSXGR_SPDIFINCTRL, 0),
 YMFPCI_SINGLE(SNDRV_CTL_NAME_IEC958("Loop",NONE,NONE), 0, YDSXGR_SPDIFINCTRL, 4),
-{
-       .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
-       .name = "4ch Duplication",
-       .info = snd_ymfpci_info_dup4ch,
-       .get = snd_ymfpci_get_dup4ch,
-       .put = snd_ymfpci_put_dup4ch,
-},
 };
 
 
@@ -1838,6 +1839,12 @@ int __devinit snd_ymfpci_mixer(struct snd_ymfpci *chip, int rear_switch)
                if ((err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_ymfpci_controls[idx], chip))) < 0)
                        return err;
        }
+       if (chip->ac97->ext_id & AC97_EI_SDAC) {
+               kctl = snd_ctl_new1(&snd_ymfpci_dup4ch, chip);
+               err = snd_ctl_add(chip->card, kctl);
+               if (err < 0)
+                       return err;
+       }
 
        /* add S/PDIF control */
        if (snd_BUG_ON(!chip->pcm_spdif))
index 9d38db8f1919d3b81bf6d7f68b99232ad324331e..78979b3e0e95ad41c3af9836889e03a422da00ec 100644 (file)
@@ -1113,7 +1113,7 @@ static int cs42l73_pcm_hw_params(struct snd_pcm_substream *substream,
                priv->config[id].mmcc &= 0xC0;
                priv->config[id].mmcc |= cs42l73_mclk_coeffs[mclk_coeff].mmcc;
                priv->config[id].spc &= 0xFC;
-               priv->config[id].spc &= MCK_SCLK_64FS;
+               priv->config[id].spc |= MCK_SCLK_MCLK;
        } else {
                /* CS42L73 Slave */
                priv->config[id].spc &= 0xFC;
index d7bd91831611cb3a7a45395a76a82f34b10177df..7f4ba819a9f681dad3f962ae7c65c60d73b0f2a3 100644 (file)
@@ -987,12 +987,12 @@ static int sgtl5000_restore_regs(struct snd_soc_codec *codec)
        /* restore regular registers */
        for (reg = 0; reg <= SGTL5000_CHIP_SHORT_CTRL; reg += 2) {
 
-               /* this regs depends on the others */
+               /* These regs should restore in particular order */
                if (reg == SGTL5000_CHIP_ANA_POWER ||
                        reg == SGTL5000_CHIP_CLK_CTRL ||
                        reg == SGTL5000_CHIP_LINREG_CTRL ||
                        reg == SGTL5000_CHIP_LINE_OUT_CTRL ||
-                       reg == SGTL5000_CHIP_CLK_CTRL)
+                       reg == SGTL5000_CHIP_REF_CTRL)
                        continue;
 
                snd_soc_write(codec, reg, cache[reg]);
@@ -1003,8 +1003,17 @@ static int sgtl5000_restore_regs(struct snd_soc_codec *codec)
                snd_soc_write(codec, reg, cache[reg]);
 
        /*
-        * restore power and other regs according
-        * to set_power() and set_clock()
+        * restore these regs according to the power setting sequence in
+        * sgtl5000_set_power_regs() and clock setting sequence in
+        * sgtl5000_set_clock().
+        *
+        * The order of restore is:
+        * 1. SGTL5000_CHIP_CLK_CTRL MCLK_FREQ bits (1:0) should be restore after
+        *    SGTL5000_CHIP_ANA_POWER PLL bits set
+        * 2. SGTL5000_CHIP_LINREG_CTRL should be set before
+        *    SGTL5000_CHIP_ANA_POWER LINREG_D restored
+        * 3. SGTL5000_CHIP_REF_CTRL controls Analog Ground Voltage,
+        *    prefer to resotre it after SGTL5000_CHIP_ANA_POWER restored
         */
        snd_soc_write(codec, SGTL5000_CHIP_LINREG_CTRL,
                        cache[SGTL5000_CHIP_LINREG_CTRL]);
@@ -1457,5 +1466,5 @@ static void __exit sgtl5000_exit(void)
 module_exit(sgtl5000_exit);
 
 MODULE_DESCRIPTION("Freescale SGTL5000 ALSA SoC Codec Driver");
-MODULE_AUTHOR("Zeng Zhaoming <zhaoming.zeng@freescale.com>");
+MODULE_AUTHOR("Zeng Zhaoming <zengzm.kernel@gmail.com>");
 MODULE_LICENSE("GPL");
index eb401ef021fb7130055d8c73d9385d19daada0ff..372b0b83bd9f191c104dd18dcff3a7756021f1dc 100644 (file)
@@ -60,7 +60,6 @@ struct aic32x4_rate_divs {
 
 struct aic32x4_priv {
        u32 sysclk;
-       s32 master;
        u8 page_no;
        void *control_data;
        u32 power_cfg;
@@ -369,7 +368,6 @@ static int aic32x4_set_dai_sysclk(struct snd_soc_dai *codec_dai,
 static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
 {
        struct snd_soc_codec *codec = codec_dai->codec;
-       struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
        u8 iface_reg_1;
        u8 iface_reg_2;
        u8 iface_reg_3;
@@ -384,11 +382,9 @@ static int aic32x4_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
        /* set master/slave audio interface */
        switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
        case SND_SOC_DAIFMT_CBM_CFM:
-               aic32x4->master = 1;
                iface_reg_1 |= AIC32X4_BCLKMASTER | AIC32X4_WCLKMASTER;
                break;
        case SND_SOC_DAIFMT_CBS_CFS:
-               aic32x4->master = 0;
                break;
        default:
                printk(KERN_ERR "aic32x4: invalid DAI master/slave interface\n");
@@ -526,64 +522,58 @@ static int aic32x4_mute(struct snd_soc_dai *dai, int mute)
 static int aic32x4_set_bias_level(struct snd_soc_codec *codec,
                                  enum snd_soc_bias_level level)
 {
-       struct aic32x4_priv *aic32x4 = snd_soc_codec_get_drvdata(codec);
-
        switch (level) {
        case SND_SOC_BIAS_ON:
-               if (aic32x4->master) {
-                       /* Switch on PLL */
-                       snd_soc_update_bits(codec, AIC32X4_PLLPR,
-                                           AIC32X4_PLLEN, AIC32X4_PLLEN);
-
-                       /* Switch on NDAC Divider */
-                       snd_soc_update_bits(codec, AIC32X4_NDAC,
-                                           AIC32X4_NDACEN, AIC32X4_NDACEN);
-
-                       /* Switch on MDAC Divider */
-                       snd_soc_update_bits(codec, AIC32X4_MDAC,
-                                           AIC32X4_MDACEN, AIC32X4_MDACEN);
-
-                       /* Switch on NADC Divider */
-                       snd_soc_update_bits(codec, AIC32X4_NADC,
-                                           AIC32X4_NADCEN, AIC32X4_NADCEN);
-
-                       /* Switch on MADC Divider */
-                       snd_soc_update_bits(codec, AIC32X4_MADC,
-                                           AIC32X4_MADCEN, AIC32X4_MADCEN);
-
-                       /* Switch on BCLK_N Divider */
-                       snd_soc_update_bits(codec, AIC32X4_BCLKN,
-                                           AIC32X4_BCLKEN, AIC32X4_BCLKEN);
-               }
+               /* Switch on PLL */
+               snd_soc_update_bits(codec, AIC32X4_PLLPR,
+                                   AIC32X4_PLLEN, AIC32X4_PLLEN);
+
+               /* Switch on NDAC Divider */
+               snd_soc_update_bits(codec, AIC32X4_NDAC,
+                                   AIC32X4_NDACEN, AIC32X4_NDACEN);
+
+               /* Switch on MDAC Divider */
+               snd_soc_update_bits(codec, AIC32X4_MDAC,
+                                   AIC32X4_MDACEN, AIC32X4_MDACEN);
+
+               /* Switch on NADC Divider */
+               snd_soc_update_bits(codec, AIC32X4_NADC,
+                                   AIC32X4_NADCEN, AIC32X4_NADCEN);
+
+               /* Switch on MADC Divider */
+               snd_soc_update_bits(codec, AIC32X4_MADC,
+                                   AIC32X4_MADCEN, AIC32X4_MADCEN);
+
+               /* Switch on BCLK_N Divider */
+               snd_soc_update_bits(codec, AIC32X4_BCLKN,
+                                   AIC32X4_BCLKEN, AIC32X4_BCLKEN);
                break;
        case SND_SOC_BIAS_PREPARE:
                break;
        case SND_SOC_BIAS_STANDBY:
-               if (aic32x4->master) {
-                       /* Switch off PLL */
-                       snd_soc_update_bits(codec, AIC32X4_PLLPR,
-                                           AIC32X4_PLLEN, 0);
-
-                       /* Switch off NDAC Divider */
-                       snd_soc_update_bits(codec, AIC32X4_NDAC,
-                                           AIC32X4_NDACEN, 0);
-
-                       /* Switch off MDAC Divider */
-                       snd_soc_update_bits(codec, AIC32X4_MDAC,
-                                           AIC32X4_MDACEN, 0);
-
-                       /* Switch off NADC Divider */
-                       snd_soc_update_bits(codec, AIC32X4_NADC,
-                                           AIC32X4_NADCEN, 0);
-
-                       /* Switch off MADC Divider */
-                       snd_soc_update_bits(codec, AIC32X4_MADC,
-                                           AIC32X4_MADCEN, 0);
-
-                       /* Switch off BCLK_N Divider */
-                       snd_soc_update_bits(codec, AIC32X4_BCLKN,
-                                           AIC32X4_BCLKEN, 0);
-               }
+               /* Switch off PLL */
+               snd_soc_update_bits(codec, AIC32X4_PLLPR,
+                                   AIC32X4_PLLEN, 0);
+
+               /* Switch off NDAC Divider */
+               snd_soc_update_bits(codec, AIC32X4_NDAC,
+                                   AIC32X4_NDACEN, 0);
+
+               /* Switch off MDAC Divider */
+               snd_soc_update_bits(codec, AIC32X4_MDAC,
+                                   AIC32X4_MDACEN, 0);
+
+               /* Switch off NADC Divider */
+               snd_soc_update_bits(codec, AIC32X4_NADC,
+                                   AIC32X4_NADCEN, 0);
+
+               /* Switch off MADC Divider */
+               snd_soc_update_bits(codec, AIC32X4_MADC,
+                                   AIC32X4_MADCEN, 0);
+
+               /* Switch off BCLK_N Divider */
+               snd_soc_update_bits(codec, AIC32X4_BCLKN,
+                                   AIC32X4_BCLKEN, 0);
                break;
        case SND_SOC_BIAS_OFF:
                break;
@@ -651,9 +641,11 @@ static int aic32x4_probe(struct snd_soc_codec *codec)
        if (aic32x4->power_cfg & AIC32X4_PWR_AVDD_DVDD_WEAK_DISABLE) {
                snd_soc_write(codec, AIC32X4_PWRCFG, AIC32X4_AVDDWEAKDISABLE);
        }
-       if (aic32x4->power_cfg & AIC32X4_PWR_AIC32X4_LDO_ENABLE) {
-               snd_soc_write(codec, AIC32X4_LDOCTL, AIC32X4_LDOCTLEN);
-       }
+
+       tmp_reg = (aic32x4->power_cfg & AIC32X4_PWR_AIC32X4_LDO_ENABLE) ?
+                       AIC32X4_LDOCTLEN : 0;
+       snd_soc_write(codec, AIC32X4_LDOCTL, tmp_reg);
+
        tmp_reg = snd_soc_read(codec, AIC32X4_CMMODE);
        if (aic32x4->power_cfg & AIC32X4_PWR_CMMODE_LDOIN_RANGE_18_36) {
                tmp_reg |= AIC32X4_LDOIN_18_36;
index c2880907fcedc0e74aed4b377e697d5223bee579..a75c3766aedeec9192c0d887ebe9b8ff366e1e5b 100644 (file)
@@ -733,8 +733,9 @@ static int __devinit wm2000_i2c_probe(struct i2c_client *i2c,
        struct wm2000_priv *wm2000;
        struct wm2000_platform_data *pdata;
        const char *filename;
-       const struct firmware *fw;
-       int reg, ret;
+       const struct firmware *fw = NULL;
+       int ret;
+       int reg;
        u16 id;
 
        wm2000 = devm_kzalloc(&i2c->dev, sizeof(struct wm2000_priv),
@@ -751,7 +752,7 @@ static int __devinit wm2000_i2c_probe(struct i2c_client *i2c,
                ret = PTR_ERR(wm2000->regmap);
                dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
                        ret);
-               goto err;
+               goto out;
        }
 
        /* Verify that this is a WM2000 */
@@ -763,7 +764,7 @@ static int __devinit wm2000_i2c_probe(struct i2c_client *i2c,
        if (id != 0x2000) {
                dev_err(&i2c->dev, "Device is not a WM2000 - ID %x\n", id);
                ret = -ENODEV;
-               goto err_regmap;
+               goto out_regmap_exit;
        }
 
        reg = wm2000_read(i2c, WM2000_REG_REVISON);
@@ -782,7 +783,7 @@ static int __devinit wm2000_i2c_probe(struct i2c_client *i2c,
        ret = request_firmware(&fw, filename, &i2c->dev);
        if (ret != 0) {
                dev_err(&i2c->dev, "Failed to acquire ANC data: %d\n", ret);
-               goto err_regmap;
+               goto out_regmap_exit;
        }
 
        /* Pre-cook the concatenation of the register address onto the image */
@@ -793,15 +794,13 @@ static int __devinit wm2000_i2c_probe(struct i2c_client *i2c,
        if (wm2000->anc_download == NULL) {
                dev_err(&i2c->dev, "Out of memory\n");
                ret = -ENOMEM;
-               goto err_fw;
+               goto out_regmap_exit;
        }
 
        wm2000->anc_download[0] = 0x80;
        wm2000->anc_download[1] = 0x00;
        memcpy(wm2000->anc_download + 2, fw->data, fw->size);
 
-       release_firmware(fw);
-
        wm2000->anc_eng_ena = 1;
        wm2000->anc_active = 1;
        wm2000->spk_ena = 1;
@@ -809,18 +808,14 @@ static int __devinit wm2000_i2c_probe(struct i2c_client *i2c,
 
        wm2000_reset(wm2000);
 
-       ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm2000,
-                                    NULL, 0);
-       if (ret != 0)
-               goto err_fw;
+       ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm2000, NULL, 0);
+       if (!ret)
+               goto out;
 
-       return 0;
-
-err_fw:
-       release_firmware(fw);
-err_regmap:
+out_regmap_exit:
        regmap_exit(wm2000->regmap);
-err:
+out:
+       release_firmware(fw);
        return ret;
 }
 
index 8b24323d6b2c89ecb5a751b17e1e747cb2d35807..89f2af77b1c3983afaa407e6ff2740ec95e0c068 100644 (file)
@@ -1377,6 +1377,7 @@ static int wm5100_set_bias_level(struct snd_soc_codec *codec,
 
                        switch (wm5100->rev) {
                        case 0:
+                               regcache_cache_bypass(wm5100->regmap, true);
                                snd_soc_write(codec, 0x11, 0x3);
                                snd_soc_write(codec, 0x203, 0xc);
                                snd_soc_write(codec, 0x206, 0);
@@ -1392,6 +1393,7 @@ static int wm5100_set_bias_level(struct snd_soc_codec *codec,
                                        snd_soc_write(codec,
                                                      wm5100_reva_patches[i].reg,
                                                      wm5100_reva_patches[i].val);
+                               regcache_cache_bypass(wm5100->regmap, false);
                                break;
                        default:
                                break;
@@ -1402,6 +1404,8 @@ static int wm5100_set_bias_level(struct snd_soc_codec *codec,
                break;
 
        case SND_SOC_BIAS_OFF:
+               regcache_cache_only(wm5100->regmap, true);
+               regcache_mark_dirty(wm5100->regmap);
                if (wm5100->pdata.ldo_ena)
                        gpio_set_value_cansleep(wm5100->pdata.ldo_ena, 0);
                regulator_bulk_disable(ARRAY_SIZE(wm5100->core_supplies),
@@ -2180,6 +2184,7 @@ static void wm5100_micd_irq(struct snd_soc_codec *codec)
                if (wm5100->jack_detecting) {
                        dev_dbg(codec->dev, "Microphone detected\n");
                        wm5100->jack_mic = true;
+                       wm5100->jack_detecting = false;
                        snd_soc_jack_report(wm5100->jack,
                                            SND_JACK_HEADSET,
                                            SND_JACK_HEADSET | SND_JACK_BTN_0);
@@ -2218,6 +2223,7 @@ static void wm5100_micd_irq(struct snd_soc_codec *codec)
                                            SND_JACK_BTN_0);
                } else if (wm5100->jack_detecting) {
                        dev_dbg(codec->dev, "Headphone detected\n");
+                       wm5100->jack_detecting = false;
                        snd_soc_jack_report(wm5100->jack, SND_JACK_HEADPHONE,
                                            SND_JACK_HEADPHONE);
 
@@ -2607,6 +2613,13 @@ static const struct regmap_config wm5100_regmap = {
        .cache_type = REGCACHE_RBTREE,
 };
 
+static const unsigned int wm5100_mic_ctrl_reg[] = {
+       WM5100_IN1L_CONTROL,
+       WM5100_IN2L_CONTROL,
+       WM5100_IN3L_CONTROL,
+       WM5100_IN4L_CONTROL,
+};
+
 static __devinit int wm5100_i2c_probe(struct i2c_client *i2c,
                                      const struct i2c_device_id *id)
 {
@@ -2739,7 +2752,7 @@ static __devinit int wm5100_i2c_probe(struct i2c_client *i2c,
        }
 
        for (i = 0; i < ARRAY_SIZE(wm5100->pdata.in_mode); i++) {
-               regmap_update_bits(wm5100->regmap, WM5100_IN1L_CONTROL,
+               regmap_update_bits(wm5100->regmap, wm5100_mic_ctrl_reg[i],
                                   WM5100_IN1_MODE_MASK |
                                   WM5100_IN1_DMIC_SUP_MASK,
                                   (wm5100->pdata.in_mode[i] <<
index 8d4ea43d40a383298023165e517ed1c9de510063..40ac888faf3d7633f433257ab96e2f190d6f4174 100644 (file)
@@ -55,7 +55,7 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
                return 0;
 
        if (fw->size < 32) {
-               dev_err(codec->dev, "%s: firmware too short (%d bytes)\n",
+               dev_err(codec->dev, "%s: firmware too short (%zd bytes)\n",
                        name, fw->size);
                goto err;
        }
index 296de4e30d26f49729cd25bc4c22134b01fb90aa..29c4b02c4790681743ba82e6e3c3d6d87c558b8e 100644 (file)
@@ -96,7 +96,7 @@ static int wm8962_regulator_event_##n(struct notifier_block *nb, \
        struct wm8962_priv *wm8962 = container_of(nb, struct wm8962_priv, \
                                                  disable_nb[n]); \
        if (event & REGULATOR_EVENT_DISABLE) { \
-               regcache_cache_only(wm8962->regmap, true);      \
+               regcache_mark_dirty(wm8962->regmap);    \
        } \
        return 0; \
 }
@@ -3159,13 +3159,13 @@ static int wm8962_hw_params(struct snd_pcm_substream *substream,
        case SNDRV_PCM_FORMAT_S16_LE:
                break;
        case SNDRV_PCM_FORMAT_S20_3LE:
-               aif0 |= 0x40;
+               aif0 |= 0x4;
                break;
        case SNDRV_PCM_FORMAT_S24_LE:
-               aif0 |= 0x80;
+               aif0 |= 0x8;
                break;
        case SNDRV_PCM_FORMAT_S32_LE:
-               aif0 |= 0xc0;
+               aif0 |= 0xc;
                break;
        default:
                return -EINVAL;
index 2b40c93601ed493a11bf0222dd5ba0c09754355b..7c7fd925db8da78c1ad6bb08bdde66eb60d7210b 100644 (file)
@@ -444,6 +444,12 @@ static int _wm8993_set_fll(struct snd_soc_codec *codec, int fll_id, int source,
        /* Enable the FLL */
        snd_soc_write(codec, WM8993_FLL_CONTROL_1, reg1 | WM8993_FLL_ENA);
 
+       /* Both overestimates */
+       if (Fref < 1000000)
+               msleep(3);
+       else
+               msleep(1);
+
        dev_dbg(codec->dev, "FLL enabled at %dHz->%dHz\n", Fref, Fout);
 
        wm8993->fll_fref = Fref;
index 93d27b6602571c3f600f9272defe0b05f12a8c3b..ec69a6c152fea9a8b55a7c16c1f3935fb3dc305d 100644 (file)
@@ -770,6 +770,8 @@ static void vmid_reference(struct snd_soc_codec *codec)
 {
        struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
 
+       pm_runtime_get_sync(codec->dev);
+
        wm8994->vmid_refcount++;
 
        dev_dbg(codec->dev, "Referencing VMID, refcount is now %d\n",
@@ -783,7 +785,12 @@ static void vmid_reference(struct snd_soc_codec *codec)
                                    WM8994_VMID_RAMP_MASK,
                                    WM8994_STARTUP_BIAS_ENA |
                                    WM8994_VMID_BUF_ENA |
-                                   (0x11 << WM8994_VMID_RAMP_SHIFT));
+                                   (0x3 << WM8994_VMID_RAMP_SHIFT));
+
+               /* Remove discharge for line out */
+               snd_soc_update_bits(codec, WM8994_ANTIPOP_1,
+                                   WM8994_LINEOUT1_DISCH |
+                                   WM8994_LINEOUT2_DISCH, 0);
 
                /* Main bias enable, VMID=2x40k */
                snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_1,
@@ -837,6 +844,8 @@ static void vmid_dereference(struct snd_soc_codec *codec)
                                    WM8994_VMID_BUF_ENA |
                                    WM8994_VMID_RAMP_MASK, 0);
        }
+
+       pm_runtime_put(codec->dev);
 }
 
 static int vmid_event(struct snd_soc_dapm_widget *w,
@@ -2753,11 +2762,6 @@ static int wm8994_resume(struct snd_soc_codec *codec)
                codec->cache_only = 0;
        }
 
-       /* Restore the registers */
-       ret = snd_soc_cache_sync(codec);
-       if (ret != 0)
-               dev_err(codec->dev, "Failed to sync cache: %d\n", ret);
-
        wm8994_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
 
        for (i = 0; i < ARRAY_SIZE(wm8994->fll); i++) {
index d8da10fe5b522a0ab2ba584d9ac6afe8383a776f..61f7daa4d0e681571222fce0910ab148e8b0f18a 100644 (file)
@@ -108,7 +108,7 @@ static int wm8996_regulator_event_##n(struct notifier_block *nb, \
        struct wm8996_priv *wm8996 = container_of(nb, struct wm8996_priv, \
                                                  disable_nb[n]); \
        if (event & REGULATOR_EVENT_DISABLE) { \
-               regcache_cache_only(wm8996->regmap, true);      \
+               regcache_mark_dirty(wm8996->regmap);    \
        } \
        return 0; \
 }
@@ -1120,7 +1120,8 @@ SND_SOC_DAPM_SUPPLY_S("SYSCLK", 1, WM8996_AIF_CLOCKING_1, 0, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY_S("SYSDSPCLK", 2, WM8996_CLOCKING_1, 1, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY_S("AIFCLK", 2, WM8996_CLOCKING_1, 2, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY_S("Charge Pump", 2, WM8996_CHARGE_PUMP_1, 15, 0, cp_event,
-                     SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
+                     SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
+                     SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_SUPPLY("Bandgap", SND_SOC_NOPM, 0, 0, bg_event,
                    SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
 SND_SOC_DAPM_SUPPLY("LDO2", WM8996_POWER_MANAGEMENT_2, 1, 0, NULL, 0),
@@ -2007,6 +2008,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
        struct wm8996_priv *wm8996 = snd_soc_codec_get_drvdata(codec);
        int lfclk = 0;
        int ratediv = 0;
+       int sync = WM8996_REG_SYNC;
        int src;
        int old;
 
@@ -2051,6 +2053,7 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
        case 32000:
        case 32768:
                lfclk = WM8996_LFCLK_ENA;
+               sync = 0;
                break;
        default:
                dev_warn(codec->dev, "Unsupported clock rate %dHz\n",
@@ -2064,6 +2067,8 @@ static int wm8996_set_sysclk(struct snd_soc_dai *dai,
                            WM8996_SYSCLK_SRC_MASK | WM8996_SYSCLK_DIV_MASK,
                            src << WM8996_SYSCLK_SRC_SHIFT | ratediv);
        snd_soc_update_bits(codec, WM8996_CLOCKING_1, WM8996_LFCLK_ENA, lfclk);
+       snd_soc_update_bits(codec, WM8996_CONTROL_INTERFACE_1,
+                           WM8996_REG_SYNC, sync);
        snd_soc_update_bits(codec, WM8996_AIF_CLOCKING_1,
                            WM8996_SYSCLK_ENA, old);
 
index 0fde643194ceaccbc8c65e5cdd68d8d18010bbf0..de9ac3e44aec840fc718445333d334b2d4f96b63 100644 (file)
@@ -1567,6 +1567,10 @@ int wm8996_detect(struct snd_soc_codec *codec, struct snd_soc_jack *jack,
 /*
  * R257 (0x101) - Control Interface (1)
  */
+#define WM8996_REG_SYNC                         0x8000  /* REG_SYNC */
+#define WM8996_REG_SYNC_MASK                    0x8000  /* REG_SYNC */
+#define WM8996_REG_SYNC_SHIFT                       15  /* REG_SYNC */
+#define WM8996_REG_SYNC_WIDTH                        1  /* REG_SYNC */
 #define WM8996_AUTO_INC                         0x0004  /* AUTO_INC */
 #define WM8996_AUTO_INC_MASK                    0x0004  /* AUTO_INC */
 #define WM8996_AUTO_INC_SHIFT                        2  /* AUTO_INC */
index 2a61094075f86dfd40fff87787b2dba70804b36d..8a68cea4a3ee6af7860ae65f4ccab119b10cc425 100644 (file)
@@ -586,14 +586,14 @@ SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER1, 0, 1, 0),
 };
 
 static const struct snd_kcontrol_new line2_mix[] = {
-SOC_DAPM_SINGLE("IN2R Switch", WM8993_LINE_MIXER2, 2, 1, 0),
-SOC_DAPM_SINGLE("IN2L Switch", WM8993_LINE_MIXER2, 1, 1, 0),
+SOC_DAPM_SINGLE("IN1L Switch", WM8993_LINE_MIXER2, 2, 1, 0),
+SOC_DAPM_SINGLE("IN1R Switch", WM8993_LINE_MIXER2, 1, 1, 0),
 SOC_DAPM_SINGLE("Output Switch", WM8993_LINE_MIXER2, 0, 1, 0),
 };
 
 static const struct snd_kcontrol_new line2n_mix[] = {
-SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER2, 6, 1, 0),
-SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 5, 1, 0),
+SOC_DAPM_SINGLE("Left Output Switch", WM8993_LINE_MIXER2, 5, 1, 0),
+SOC_DAPM_SINGLE("Right Output Switch", WM8993_LINE_MIXER2, 6, 1, 0),
 };
 
 static const struct snd_kcontrol_new line2p_mix[] = {
@@ -613,6 +613,8 @@ SND_SOC_DAPM_INPUT("IN2RP:VXRP"),
 SND_SOC_DAPM_SUPPLY("MICBIAS2", WM8993_POWER_MANAGEMENT_1, 5, 0, NULL, 0),
 SND_SOC_DAPM_SUPPLY("MICBIAS1", WM8993_POWER_MANAGEMENT_1, 4, 0, NULL, 0),
 
+SND_SOC_DAPM_SUPPLY("LINEOUT_VMID_BUF", WM8993_ANTIPOP1, 7, 0, NULL, 0),
+
 SND_SOC_DAPM_MIXER("IN1L PGA", WM8993_POWER_MANAGEMENT_2, 6, 0,
                   in1l_pga, ARRAY_SIZE(in1l_pga)),
 SND_SOC_DAPM_MIXER("IN1R PGA", WM8993_POWER_MANAGEMENT_2, 4, 0,
@@ -834,9 +836,11 @@ static const struct snd_soc_dapm_route lineout1_diff_routes[] = {
 };
 
 static const struct snd_soc_dapm_route lineout1_se_routes[] = {
+       { "LINEOUT1N Mixer", NULL, "LINEOUT_VMID_BUF" },
        { "LINEOUT1N Mixer", "Left Output Switch", "Left Output PGA" },
        { "LINEOUT1N Mixer", "Right Output Switch", "Right Output PGA" },
 
+       { "LINEOUT1P Mixer", NULL, "LINEOUT_VMID_BUF" },
        { "LINEOUT1P Mixer", "Left Output Switch", "Left Output PGA" },
 
        { "LINEOUT1N Driver", NULL, "LINEOUT1N Mixer" },
@@ -844,8 +848,8 @@ static const struct snd_soc_dapm_route lineout1_se_routes[] = {
 };
 
 static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
-       { "LINEOUT2 Mixer", "IN2L Switch", "IN2L PGA" },
-       { "LINEOUT2 Mixer", "IN2R Switch", "IN2R PGA" },
+       { "LINEOUT2 Mixer", "IN1L Switch", "IN1L PGA" },
+       { "LINEOUT2 Mixer", "IN1R Switch", "IN1R PGA" },
        { "LINEOUT2 Mixer", "Output Switch", "Right Output PGA" },
 
        { "LINEOUT2N Driver", NULL, "LINEOUT2 Mixer" },
@@ -853,9 +857,11 @@ static const struct snd_soc_dapm_route lineout2_diff_routes[] = {
 };
 
 static const struct snd_soc_dapm_route lineout2_se_routes[] = {
+       { "LINEOUT2N Mixer", NULL, "LINEOUT_VMID_BUF" },
        { "LINEOUT2N Mixer", "Left Output Switch", "Left Output PGA" },
        { "LINEOUT2N Mixer", "Right Output Switch", "Right Output PGA" },
 
+       { "LINEOUT2P Mixer", NULL, "LINEOUT_VMID_BUF" },
        { "LINEOUT2P Mixer", "Right Output Switch", "Right Output PGA" },
 
        { "LINEOUT2N Driver", NULL, "LINEOUT2N Mixer" },
index 3fc96130d1a6b0ccb76470e979eb34896682e965..de83904498730dbf43447cdcc4ef42cd34a9d0d7 100644 (file)
@@ -113,9 +113,9 @@ static int ep93xx_pcm_open(struct snd_pcm_substream *substream)
        rtd->dma_data.name = dma_params->name;
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
-               rtd->dma_data.direction = DMA_TO_DEVICE;
+               rtd->dma_data.direction = DMA_MEM_TO_DEV;
        else
-               rtd->dma_data.direction = DMA_FROM_DEVICE;
+               rtd->dma_data.direction = DMA_DEV_TO_MEM;
 
        rtd->dma_chan = dma_request_channel(mask, ep93xx_pcm_dma_filter,
                                            &rtd->dma_data);
index 1cf2fe889f6adaa885c77bf2c74da9dd9f56a3d8..5780c9b9d569cf78f82edb63104322c896b7c234 100644 (file)
@@ -88,11 +88,13 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream,
        iprtd->dma_data.dma_request = dma_params->dma;
 
        /* Try to grab a DMA channel */
-       dma_cap_zero(mask);
-       dma_cap_set(DMA_SLAVE, mask);
-       iprtd->dma_chan = dma_request_channel(mask, filter, iprtd);
-       if (!iprtd->dma_chan)
-               return -EINVAL;
+       if (!iprtd->dma_chan) {
+               dma_cap_zero(mask);
+               dma_cap_set(DMA_SLAVE, mask);
+               iprtd->dma_chan = dma_request_channel(mask, filter, iprtd);
+               if (!iprtd->dma_chan)
+                       return -EINVAL;
+       }
 
        switch (params_format(params)) {
        case SNDRV_PCM_FORMAT_S16_LE:
@@ -107,12 +109,12 @@ static int imx_ssi_dma_alloc(struct snd_pcm_substream *substream,
        }
 
        if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
-               slave_config.direction = DMA_TO_DEVICE;
+               slave_config.direction = DMA_MEM_TO_DEV;
                slave_config.dst_addr = dma_params->dma_addr;
                slave_config.dst_addr_width = buswidth;
                slave_config.dst_maxburst = dma_params->burstsize;
        } else {
-               slave_config.direction = DMA_FROM_DEVICE;
+               slave_config.direction = DMA_DEV_TO_MEM;
                slave_config.src_addr = dma_params->dma_addr;
                slave_config.src_addr_width = buswidth;
                slave_config.src_maxburst = dma_params->burstsize;
@@ -159,7 +161,7 @@ static int snd_imx_pcm_hw_params(struct snd_pcm_substream *substream,
                        iprtd->period_bytes * iprtd->periods,
                        iprtd->period_bytes,
                        substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
-                       DMA_TO_DEVICE : DMA_FROM_DEVICE);
+                       DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
        if (!iprtd->desc) {
                dev_err(&chan->dev->device, "cannot prepare slave dma\n");
                return -EINVAL;
index 0e12f4e0a76d60ac10dab9b70ad4e12f80e0d0c7..105f42a394df6460e80899f22c567f3ac6192c7f 100644 (file)
@@ -136,7 +136,7 @@ static int snd_mxs_pcm_hw_params(struct snd_pcm_substream *substream,
                        iprtd->period_bytes * iprtd->periods,
                        iprtd->period_bytes,
                        substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
-                       DMA_TO_DEVICE : DMA_FROM_DEVICE);
+                       DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
        if (!iprtd->desc) {
                dev_err(&chan->dev->device, "cannot prepare slave dma\n");
                return -EINVAL;
index dccfb37a96261dd1523ca5e3bf13f1145d0b68e0..f204dbac11d4e044edf12501b51238adcd069c28 100644 (file)
@@ -124,6 +124,8 @@ static int mxs_saif_set_clk(struct mxs_saif *saif,
         *
         * If MCLK is not used, we just set saif clk to 512*fs.
         */
+       clk_prepare_enable(master_saif->clk);
+
        if (master_saif->mclk_in_use) {
                if (mclk % 32 == 0) {
                        scr &= ~BM_SAIF_CTRL_BITCLK_BASE_RATE;
@@ -133,6 +135,7 @@ static int mxs_saif_set_clk(struct mxs_saif *saif,
                        ret = clk_set_rate(master_saif->clk, 384 * rate);
                } else {
                        /* SAIF MCLK should be either 32x or 48x */
+                       clk_disable_unprepare(master_saif->clk);
                        return -EINVAL;
                }
        } else {
@@ -140,6 +143,8 @@ static int mxs_saif_set_clk(struct mxs_saif *saif,
                scr &= ~BM_SAIF_CTRL_BITCLK_BASE_RATE;
        }
 
+       clk_disable_unprepare(master_saif->clk);
+
        if (ret)
                return ret;
 
index 45d11ddaeea9110a66182eaaa18047c8465ff90b..946020a647db2e1d5eec71f82fb9a3c936ced546 100644 (file)
@@ -366,7 +366,7 @@ static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev)
                goto out3;
 
        /* enbale ac97 multifunction pin */
-       mfp_set_groupg(nuc900_audio->dev, "nuc900-audio");
+       mfp_set_groupg(nuc900_audio->dev, NULL);
 
        return 0;
 
index 427ae0d9817bb95cb09fbab000d6a2d0a35d32c8..e4ba17ce6b32cf3c8d3d3a0762d70c7b7c969b3c 100644 (file)
@@ -86,7 +86,7 @@ static void dma_enqueue(struct snd_pcm_substream *substream)
        dma_info.cap = (samsung_dma_has_circular() ? DMA_CYCLIC : DMA_SLAVE);
        dma_info.direction =
                (substream->stream == SNDRV_PCM_STREAM_PLAYBACK
-               ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+               ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
        dma_info.fp = audio_buffdone;
        dma_info.fp_param = substream;
        dma_info.period = prtd->dma_period;
@@ -171,7 +171,7 @@ static int dma_hw_params(struct snd_pcm_substream *substream,
                dma_info.client = prtd->params->client;
                dma_info.direction =
                        (substream->stream == SNDRV_PCM_STREAM_PLAYBACK
-                       ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+                       ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM);
                dma_info.width = prtd->params->dma_size;
                dma_info.fifo = prtd->params->dma_addr;
                prtd->params->ch = prtd->params->ops->request(
index 7ac0ba2025c337b5f313996f00a04c52975cb9a5..c6012ff5bd3ea977cd18f5bb98e4e7e6ded82d1e 100644 (file)
@@ -230,8 +230,6 @@ static const struct snd_kcontrol_new neo1973_wm8753_controls[] = {
 
 /* GTA02 specific routes and controls */
 
-#ifdef CONFIG_MACH_NEO1973_GTA02
-
 static int gta02_speaker_enabled;
 
 static int lm4853_set_spk(struct snd_kcontrol *kcontrol,
@@ -311,10 +309,6 @@ static int neo1973_gta02_wm8753_init(struct snd_soc_codec *codec)
        return 0;
 }
 
-#else
-static int neo1973_gta02_wm8753_init(struct snd_soc_code *codec) { return 0; }
-#endif
-
 static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd)
 {
        struct snd_soc_codec *codec = rtd->codec;
@@ -322,10 +316,6 @@ static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd)
        int ret;
 
        /* set up NC codec pins */
-       if (machine_is_neo1973_gta01()) {
-               snd_soc_dapm_nc_pin(dapm, "LOUT2");
-               snd_soc_dapm_nc_pin(dapm, "ROUT2");
-       }
        snd_soc_dapm_nc_pin(dapm, "OUT3");
        snd_soc_dapm_nc_pin(dapm, "OUT4");
        snd_soc_dapm_nc_pin(dapm, "LINE1");
@@ -370,50 +360,6 @@ static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd)
        return 0;
 }
 
-/* GTA01 specific controls */
-
-#ifdef CONFIG_MACH_NEO1973_GTA01
-
-static const struct snd_soc_dapm_route neo1973_lm4857_routes[] = {
-       {"Amp IN", NULL, "ROUT1"},
-       {"Amp IN", NULL, "LOUT1"},
-
-       {"Handset Spk", NULL, "Amp EP"},
-       {"Stereo Out", NULL, "Amp LS"},
-       {"Headphone", NULL, "Amp HP"},
-};
-
-static const struct snd_soc_dapm_widget neo1973_lm4857_dapm_widgets[] = {
-       SND_SOC_DAPM_SPK("Handset Spk", NULL),
-       SND_SOC_DAPM_SPK("Stereo Out", NULL),
-       SND_SOC_DAPM_HP("Headphone", NULL),
-};
-
-static int neo1973_lm4857_init(struct snd_soc_dapm_context *dapm)
-{
-       int ret;
-
-       ret = snd_soc_dapm_new_controls(dapm, neo1973_lm4857_dapm_widgets,
-                       ARRAY_SIZE(neo1973_lm4857_dapm_widgets));
-       if (ret)
-               return ret;
-
-       ret = snd_soc_dapm_add_routes(dapm, neo1973_lm4857_routes,
-                       ARRAY_SIZE(neo1973_lm4857_routes));
-       if (ret)
-               return ret;
-
-       snd_soc_dapm_ignore_suspend(dapm, "Stereo Out");
-       snd_soc_dapm_ignore_suspend(dapm, "Handset Spk");
-       snd_soc_dapm_ignore_suspend(dapm, "Headphone");
-
-       return 0;
-}
-
-#else
-static int neo1973_lm4857_init(struct snd_soc_dapm_context *dapm) { return 0; };
-#endif
-
 static struct snd_soc_dai_link neo1973_dai[] = {
 { /* Hifi Playback - for similatious use with voice below */
        .name = "WM8753",
@@ -440,11 +386,6 @@ static struct snd_soc_aux_dev neo1973_aux_devs[] = {
                .name = "dfbmcs320",
                .codec_name = "dfbmcs320.0",
        },
-       {
-               .name = "lm4857",
-               .codec_name = "lm4857.0-007c",
-               .init = neo1973_lm4857_init,
-       },
 };
 
 static struct snd_soc_codec_conf neo1973_codec_conf[] = {
@@ -454,14 +395,10 @@ static struct snd_soc_codec_conf neo1973_codec_conf[] = {
        },
 };
 
-#ifdef CONFIG_MACH_NEO1973_GTA02
 static const struct gpio neo1973_gta02_gpios[] = {
        { GTA02_GPIO_HP_IN, GPIOF_OUT_INIT_HIGH, "GTA02_HP_IN" },
        { GTA02_GPIO_AMP_SHUT, GPIOF_OUT_INIT_HIGH, "GTA02_AMP_SHUT" },
 };
-#else
-static const struct gpio neo1973_gta02_gpios[] = {};
-#endif
 
 static struct snd_soc_card neo1973 = {
        .name = "neo1973",
@@ -480,7 +417,7 @@ static int __init neo1973_init(void)
 {
        int ret;
 
-       if (!machine_is_neo1973_gta01() && !machine_is_neo1973_gta02())
+       if (!machine_is_neo1973_gta02())
                return -ENODEV;
 
        if (machine_is_neo1973_gta02()) {
index f8f681690a712d6c978ec607196fdd956c6fca2a..0193e595d415da8b82e7bf383b243dbdba25f88e 100644 (file)
@@ -131,7 +131,7 @@ static int siu_pcm_wr_set(struct siu_port *port_info,
        sg_dma_address(&sg) = buff;
 
        desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan,
-               &sg, 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               &sg, 1, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dev_err(dev, "Failed to allocate a dma descriptor\n");
                return -ENOMEM;
@@ -181,7 +181,7 @@ static int siu_pcm_rd_set(struct siu_port *port_info,
        sg_dma_address(&sg) = buff;
 
        desc = siu_stream->chan->device->device_prep_slave_sg(siu_stream->chan,
-               &sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+               &sg, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dev_err(dev, "Failed to allocate dma descriptor\n");
                return -ENOMEM;
index 3986520b4677244b9bd592b85f2350889bda5cdf..92cee24ed2dcce7eb7b7e8de3a87f09d0ffe5138 100644 (file)
@@ -567,6 +567,17 @@ int snd_soc_suspend(struct device *dev)
                if (!codec->suspended && codec->driver->suspend) {
                        switch (codec->dapm.bias_level) {
                        case SND_SOC_BIAS_STANDBY:
+                               /*
+                                * If the CODEC is capable of idle
+                                * bias off then being in STANDBY
+                                * means it's doing something,
+                                * otherwise fall through.
+                                */
+                               if (codec->dapm.idle_bias_off) {
+                                       dev_dbg(codec->dev,
+                                               "idle_bias_off CODEC on over suspend\n");
+                                       break;
+                               }
                        case SND_SOC_BIAS_OFF:
                                codec->driver->suspend(codec);
                                codec->suspended = 1;
@@ -907,6 +918,10 @@ static void soc_remove_dai_link(struct snd_soc_card *card, int num, int order)
                        if (err < 0)
                                printk(KERN_ERR "asoc: failed to remove %s\n", platform->name);
                }
+
+               /* Make sure all DAPM widgets are freed */
+               snd_soc_dapm_free(&platform->dapm);
+
                platform->probed = 0;
                list_del(&platform->card_list);
                module_put(platform->dev->driver->owner);
index 3ad1f59b80281cfc7c9ace89ae8a30d34317fe7a..1f55ded4047f03b9a538af971c01018f0fb5df10 100644 (file)
@@ -1426,7 +1426,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
                        dapm->target_bias_level = SND_SOC_BIAS_ON;
                        break;
                case SND_SOC_DAPM_STREAM_STOP:
-                       if (dapm->codec->active)
+                       if (dapm->codec && dapm->codec->active)
                                dapm->target_bias_level = SND_SOC_BIAS_ON;
                        else
                                dapm->target_bias_level = SND_SOC_BIAS_STANDBY;
index 93931def0dce62b3a5e162d7c759ac82f33745f2..21554611557c380ca391a9c384407a9744a056a1 100644 (file)
@@ -134,7 +134,7 @@ txx9aclc_dma_submit(struct txx9aclc_dmadata *dmadata, dma_addr_t buf_dma_addr)
        sg_dma_address(&sg) = buf_dma_addr;
        desc = chan->device->device_prep_slave_sg(chan, &sg, 1,
                dmadata->substream->stream == SNDRV_PCM_STREAM_PLAYBACK ?
-               DMA_TO_DEVICE : DMA_FROM_DEVICE,
+               DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
                DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
        if (!desc) {
                dev_err(&chan->dev->device, "cannot prepare slave dma\n");
index 8edc5035fc8fc5929b948a765cc463498c0a8a9e..d89ab4c7d44b28bc0c52f9e4ec817030059b0692 100644 (file)
@@ -1617,6 +1617,14 @@ YAMAHA_DEVICE(0x7010, "UB99"),
                }
        }
 },
+{
+       /* Edirol UM-3G */
+       USB_DEVICE_VENDOR_SPEC(0x0582, 0x0108),
+       .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
+               .ifnum = 0,
+               .type = QUIRK_MIDI_STANDARD_INTERFACE
+       }
+},
 {
        /* Boss JS-8 Jam Station  */
        USB_DEVICE(0x0582, 0x0109),
index ac86d67b636e06b3bcf931c9d6f6a012d4f2801f..7c12650165aed5933bdcad60a8b298d2cd6f437a 100644 (file)
@@ -104,7 +104,7 @@ endif
 
 CFLAGS = -fno-omit-frame-pointer -ggdb3 -Wall -Wextra -std=gnu99 $(CFLAGS_WERROR) $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
 EXTLIBS = -lpthread -lrt -lelf -lm
-ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
+ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 ALL_LDFLAGS = $(LDFLAGS)
 STRIP ?= strip
 
@@ -168,10 +168,7 @@ endif
 
 ### --- END CONFIGURATION SECTION ---
 
-# Those must not be GNU-specific; they are shared with perl/ which may
-# be built by a different compiler. (Note that this is an artifact now
-# but it still might be nice to keep that distinction.)
-BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include
+BASIC_CFLAGS = -Iutil/include -Iarch/$(ARCH)/include -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -D_GNU_SOURCE
 BASIC_LDFLAGS =
 
 # Guard against environment variables
index 59d43abfbfec1ce22b44ac9c2db18cd29f0ce612..fb8566181f27f2ef4dace5ba93fd273c6cae4f51 100644 (file)
@@ -20,7 +20,6 @@
  * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  *
  */
-#define _GNU_SOURCE
 #include <sys/utsname.h>
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -31,7 +30,6 @@
 #include <stdlib.h>
 #include <string.h>
 
-#undef _GNU_SOURCE
 #include "perf.h"
 #include "builtin.h"
 #include "util/util.h"
index 8f80df89603822e5431f45f1374cf926208f23ed..dd162aa24baad2f44c5cbb1b4176fb2b2cc7b26d 100644 (file)
@@ -89,8 +89,6 @@ void get_term_dimensions(struct winsize *ws)
 
 static void perf_top__update_print_entries(struct perf_top *top)
 {
-       top->print_entries = top->winsize.ws_row;
-
        if (top->print_entries > 9)
                top->print_entries -= 9;
 }
@@ -100,6 +98,13 @@ static void perf_top__sig_winch(int sig __used, siginfo_t *info __used, void *ar
        struct perf_top *top = arg;
 
        get_term_dimensions(&top->winsize);
+       if (!top->print_entries
+           || (top->print_entries+4) > top->winsize.ws_row) {
+               top->print_entries = top->winsize.ws_row;
+       } else {
+               top->print_entries += 4;
+               top->winsize.ws_row = top->print_entries;
+       }
        perf_top__update_print_entries(top);
 }
 
@@ -453,8 +458,10 @@ static void perf_top__handle_keypress(struct perf_top *top, int c)
                                };
                                perf_top__sig_winch(SIGWINCH, NULL, top);
                                sigaction(SIGWINCH, &act, NULL);
-                       } else
+                       } else {
+                               perf_top__sig_winch(SIGWINCH, NULL, top);
                                signal(SIGWINCH, SIG_DFL);
+                       }
                        break;
                case 'E':
                        if (top->evlist->nr_entries > 1) {
index 3e7e0b09c12c29339f3853c469dc0c3cc2d0d71c..ecd7f4dd7eea0156eb4e059eb335089d416d9c0b 100644 (file)
@@ -2105,7 +2105,7 @@ int perf_event__synthesize_event_type(struct perf_tool *tool,
        strncpy(ev.event_type.event_type.name, name, MAX_EVENT_NAME - 1);
 
        ev.event_type.header.type = PERF_RECORD_HEADER_EVENT_TYPE;
-       size = strlen(name);
+       size = strlen(ev.event_type.event_type.name);
        size = ALIGN(size, sizeof(u64));
        ev.event_type.header.size = sizeof(ev.event_type) -
                (sizeof(ev.event_type.event_type.name) - size);
index eb25900e2211f936ee4caea544fd7a70c6df43e8..29cb654598113a66729a17b9bd00ff21da8c52d8 100644 (file)
@@ -19,7 +19,6 @@
  *
  */
 
-#define _GNU_SOURCE
 #include <sys/utsname.h>
 #include <sys/types.h>
 #include <sys/stat.h>
@@ -33,7 +32,6 @@
 #include <limits.h>
 #include <elf.h>
 
-#undef _GNU_SOURCE
 #include "util.h"
 #include "event.h"
 #include "string.h"
index 215d50f2042e063ce53d9526a3aecbd0ec7ff461..0975438c3e7281f5d8fc70b28374e813e8715607 100644 (file)
@@ -1,4 +1,3 @@
-#define _GNU_SOURCE
 #include <ctype.h>
 #include <dirent.h>
 #include <errno.h>
index 6c164dc9ee957dbf3df642f712b2fdc1485d2dc6..1a8d4dc4f386b5bee9897686986cf4fa31548b9b 100644 (file)
  *  The parts for function graph printing was taken and modified from the
  *  Linux Kernel that were written by Frederic Weisbecker.
  */
-#define _GNU_SOURCE
+
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
 #include <ctype.h>
 #include <errno.h>
 
-#undef _GNU_SOURCE
 #include "../perf.h"
 #include "util.h"
 #include "trace-event.h"
index 1212a386a0336e8b0faf02f62513d9172d42b55f..e81aef1f25698d36968da0b03a28680008d0e7c1 100644 (file)
@@ -1,6 +1,4 @@
-#define _GNU_SOURCE
 #include <stdio.h>
-#undef _GNU_SOURCE
 #include "../libslang.h"
 #include <stdlib.h>
 #include <string.h>
index 6ef3c56917626a38d835491d82461219b7dddf88..4f48f5901b303e040a0f515d1311e5d50726c83e 100644 (file)
@@ -1,4 +1,3 @@
-#define _GNU_SOURCE
 #include <stdio.h>
 #include <stdlib.h>
 #include <string.h>
index b9c530cce79a793657778dcc5a1ce2c5dd2215da..ecf9898169c86975c50f484483122f5a3954e37b 100644 (file)
@@ -40,7 +40,6 @@
 #define decimal_length(x)      ((int)(sizeof(x) * 2.56 + 0.5) + 1)
 
 #define _ALL_SOURCE 1
-#define _GNU_SOURCE 1
 #define _BSD_SOURCE 1
 #define HAS_BOOL
 
index 3c6f7808efae53b84cab8926f87b3a5f37ea60ee..310d3dd5e547023ea375f933cb2ad7ca61aee081 100644 (file)
@@ -811,6 +811,8 @@ int has_nehalem_turbo_ratio_limit(unsigned int family, unsigned int model)
        case 0x2C:      /* Westmere EP - Gulftown */
        case 0x2A:      /* SNB */
        case 0x2D:      /* SNB Xeon */
+       case 0x3A:      /* IVB */
+       case 0x3D:      /* IVB Xeon */
                return 1;
        case 0x2E:      /* Nehalem-EX Xeon - Beckton */
        case 0x2F:      /* Westmere-EX Xeon - Eagleton */
index 9a571e71683c2dfea90ddcf03911d93abee75081..a373a5bfff683cc4b395588af358a2aa92b2171d 100755 (executable)
@@ -2,7 +2,9 @@
 
 open (IN,"ktest.pl");
 while (<IN>) {
+    # hashes are now used
     if (/\$opt\{"?([A-Z].*?)(\[.*\])?"?\}/ ||
+       /^\s*"?([A-Z].*?)"?\s*=>\s*/ ||
        /set_test_option\("(.*?)"/) {
        $opt{$1} = 1;
     }
@@ -11,7 +13,7 @@ close IN;
 
 open (IN, "sample.conf");
 while (<IN>) {
-    if (/^\s*#?\s*(\S+)\s*=/) {
+    if (/^\s*#?\s*([A-Z]\S*)\s*=/) {
        $samp{$1} = 1;
     }
 }
index 8b4c2535b266a2abe17d3ad495b3a0fdfa8855a9..62a134dc421ae37d14307a44f09abc9c0d0c197a 100755 (executable)
@@ -18,40 +18,50 @@ $| = 1;
 my %opt;
 my %repeat_tests;
 my %repeats;
-my %default;
 
 #default opts
-$default{"NUM_TESTS"}          = 1;
-$default{"REBOOT_TYPE"}                = "grub";
-$default{"TEST_TYPE"}          = "test";
-$default{"BUILD_TYPE"}         = "randconfig";
-$default{"MAKE_CMD"}           = "make";
-$default{"TIMEOUT"}            = 120;
-$default{"TMP_DIR"}            = "/tmp/ktest/\${MACHINE}";
-$default{"SLEEP_TIME"}         = 60;   # sleep time between tests
-$default{"BUILD_NOCLEAN"}      = 0;
-$default{"REBOOT_ON_ERROR"}    = 0;
-$default{"POWEROFF_ON_ERROR"}  = 0;
-$default{"REBOOT_ON_SUCCESS"}  = 1;
-$default{"POWEROFF_ON_SUCCESS"}        = 0;
-$default{"BUILD_OPTIONS"}      = "";
-$default{"BISECT_SLEEP_TIME"}  = 60;   # sleep time between bisects
-$default{"PATCHCHECK_SLEEP_TIME"} = 60; # sleep time between patch checks
-$default{"CLEAR_LOG"}          = 0;
-$default{"BISECT_MANUAL"}      = 0;
-$default{"BISECT_SKIP"}                = 1;
-$default{"SUCCESS_LINE"}       = "login:";
-$default{"DETECT_TRIPLE_FAULT"} = 1;
-$default{"NO_INSTALL"}         = 0;
-$default{"BOOTED_TIMEOUT"}     = 1;
-$default{"DIE_ON_FAILURE"}     = 1;
-$default{"SSH_EXEC"}           = "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND";
-$default{"SCP_TO_TARGET"}      = "scp \$SRC_FILE \$SSH_USER\@\$MACHINE:\$DST_FILE";
-$default{"REBOOT"}             = "ssh \$SSH_USER\@\$MACHINE reboot";
-$default{"STOP_AFTER_SUCCESS"} = 10;
-$default{"STOP_AFTER_FAILURE"} = 60;
-$default{"STOP_TEST_AFTER"}    = 600;
-$default{"LOCALVERSION"}       = "-test";
+my %default = (
+    "NUM_TESTS"                        => 1,
+    "TEST_TYPE"                        => "build",
+    "BUILD_TYPE"               => "randconfig",
+    "MAKE_CMD"                 => "make",
+    "TIMEOUT"                  => 120,
+    "TMP_DIR"                  => "/tmp/ktest/\${MACHINE}",
+    "SLEEP_TIME"               => 60,  # sleep time between tests
+    "BUILD_NOCLEAN"            => 0,
+    "REBOOT_ON_ERROR"          => 0,
+    "POWEROFF_ON_ERROR"                => 0,
+    "REBOOT_ON_SUCCESS"                => 1,
+    "POWEROFF_ON_SUCCESS"      => 0,
+    "BUILD_OPTIONS"            => "",
+    "BISECT_SLEEP_TIME"                => 60,   # sleep time between bisects
+    "PATCHCHECK_SLEEP_TIME"    => 60, # sleep time between patch checks
+    "CLEAR_LOG"                        => 0,
+    "BISECT_MANUAL"            => 0,
+    "BISECT_SKIP"              => 1,
+    "SUCCESS_LINE"             => "login:",
+    "DETECT_TRIPLE_FAULT"      => 1,
+    "NO_INSTALL"               => 0,
+    "BOOTED_TIMEOUT"           => 1,
+    "DIE_ON_FAILURE"           => 1,
+    "SSH_EXEC"                 => "ssh \$SSH_USER\@\$MACHINE \$SSH_COMMAND",
+    "SCP_TO_TARGET"            => "scp \$SRC_FILE \$SSH_USER\@\$MACHINE:\$DST_FILE",
+    "REBOOT"                   => "ssh \$SSH_USER\@\$MACHINE reboot",
+    "STOP_AFTER_SUCCESS"       => 10,
+    "STOP_AFTER_FAILURE"       => 60,
+    "STOP_TEST_AFTER"          => 600,
+
+# required, and we will ask users if they don't have them but we keep the default
+# value something that is common.
+    "REBOOT_TYPE"              => "grub",
+    "LOCALVERSION"             => "-test",
+    "SSH_USER"                 => "root",
+    "BUILD_TARGET"             => "arch/x86/boot/bzImage",
+    "TARGET_IMAGE"             => "/boot/vmlinuz-test",
+
+    "LOG_FILE"                 => undef,
+    "IGNORE_UNUSED"            => 0,
+);
 
 my $ktest_config;
 my $version;
@@ -73,6 +83,8 @@ my $reboot_script;
 my $power_cycle;
 my $reboot;
 my $reboot_on_error;
+my $switch_to_good;
+my $switch_to_test;
 my $poweroff_on_error;
 my $die_on_failure;
 my $powercycle_after_reboot;
@@ -92,17 +104,24 @@ my $start_minconfig;
 my $start_minconfig_defined;
 my $output_minconfig;
 my $ignore_config;
+my $ignore_errors;
 my $addconfig;
 my $in_bisect = 0;
-my $bisect_bad = "";
+my $bisect_bad_commit = "";
 my $reverse_bisect;
 my $bisect_manual;
 my $bisect_skip;
 my $config_bisect_good;
+my $bisect_ret_good;
+my $bisect_ret_bad;
+my $bisect_ret_skip;
+my $bisect_ret_abort;
+my $bisect_ret_default;
 my $in_patchcheck = 0;
 my $run_test;
 my $redirect;
 my $buildlog;
+my $testlog;
 my $dmesg;
 my $monitor_fp;
 my $monitor_pid;
@@ -112,6 +131,7 @@ my $bisect_sleep_time;
 my $patchcheck_sleep_time;
 my $ignore_warnings;
 my $store_failures;
+my $store_successes;
 my $test_name;
 my $timeout;
 my $booted_timeout;
@@ -124,10 +144,34 @@ my $stop_after_failure;
 my $stop_test_after;
 my $build_target;
 my $target_image;
+my $checkout;
 my $localversion;
 my $iteration = 0;
 my $successes = 0;
 
+my $bisect_good;
+my $bisect_bad;
+my $bisect_type;
+my $bisect_start;
+my $bisect_replay;
+my $bisect_files;
+my $bisect_reverse;
+my $bisect_check;
+
+my $config_bisect;
+my $config_bisect_type;
+
+my $patchcheck_type;
+my $patchcheck_start;
+my $patchcheck_end;
+
+# set when a test is something other that just building or install
+# which would require more options.
+my $buildonly = 1;
+
+# set when creating a new config
+my $newconfig = 0;
+
 my %entered_configs;
 my %config_help;
 my %variable;
@@ -136,11 +180,99 @@ my %force_config;
 # do not force reboots on config problems
 my $no_reboot = 1;
 
+my %option_map = (
+    "MACHINE"                  => \$machine,
+    "SSH_USER"                 => \$ssh_user,
+    "TMP_DIR"                  => \$tmpdir,
+    "OUTPUT_DIR"               => \$outputdir,
+    "BUILD_DIR"                        => \$builddir,
+    "TEST_TYPE"                        => \$test_type,
+    "BUILD_TYPE"               => \$build_type,
+    "BUILD_OPTIONS"            => \$build_options,
+    "PRE_BUILD"                        => \$pre_build,
+    "POST_BUILD"               => \$post_build,
+    "PRE_BUILD_DIE"            => \$pre_build_die,
+    "POST_BUILD_DIE"           => \$post_build_die,
+    "POWER_CYCLE"              => \$power_cycle,
+    "REBOOT"                   => \$reboot,
+    "BUILD_NOCLEAN"            => \$noclean,
+    "MIN_CONFIG"               => \$minconfig,
+    "OUTPUT_MIN_CONFIG"                => \$output_minconfig,
+    "START_MIN_CONFIG"         => \$start_minconfig,
+    "IGNORE_CONFIG"            => \$ignore_config,
+    "TEST"                     => \$run_test,
+    "ADD_CONFIG"               => \$addconfig,
+    "REBOOT_TYPE"              => \$reboot_type,
+    "GRUB_MENU"                        => \$grub_menu,
+    "POST_INSTALL"             => \$post_install,
+    "NO_INSTALL"               => \$no_install,
+    "REBOOT_SCRIPT"            => \$reboot_script,
+    "REBOOT_ON_ERROR"          => \$reboot_on_error,
+    "SWITCH_TO_GOOD"           => \$switch_to_good,
+    "SWITCH_TO_TEST"           => \$switch_to_test,
+    "POWEROFF_ON_ERROR"                => \$poweroff_on_error,
+    "DIE_ON_FAILURE"           => \$die_on_failure,
+    "POWER_OFF"                        => \$power_off,
+    "POWERCYCLE_AFTER_REBOOT"  => \$powercycle_after_reboot,
+    "POWEROFF_AFTER_HALT"      => \$poweroff_after_halt,
+    "SLEEP_TIME"               => \$sleep_time,
+    "BISECT_SLEEP_TIME"                => \$bisect_sleep_time,
+    "PATCHCHECK_SLEEP_TIME"    => \$patchcheck_sleep_time,
+    "IGNORE_WARNINGS"          => \$ignore_warnings,
+    "IGNORE_ERRORS"            => \$ignore_errors,
+    "BISECT_MANUAL"            => \$bisect_manual,
+    "BISECT_SKIP"              => \$bisect_skip,
+    "CONFIG_BISECT_GOOD"       => \$config_bisect_good,
+    "BISECT_RET_GOOD"          => \$bisect_ret_good,
+    "BISECT_RET_BAD"           => \$bisect_ret_bad,
+    "BISECT_RET_SKIP"          => \$bisect_ret_skip,
+    "BISECT_RET_ABORT"         => \$bisect_ret_abort,
+    "BISECT_RET_DEFAULT"       => \$bisect_ret_default,
+    "STORE_FAILURES"           => \$store_failures,
+    "STORE_SUCCESSES"          => \$store_successes,
+    "TEST_NAME"                        => \$test_name,
+    "TIMEOUT"                  => \$timeout,
+    "BOOTED_TIMEOUT"           => \$booted_timeout,
+    "CONSOLE"                  => \$console,
+    "DETECT_TRIPLE_FAULT"      => \$detect_triplefault,
+    "SUCCESS_LINE"             => \$success_line,
+    "REBOOT_SUCCESS_LINE"      => \$reboot_success_line,
+    "STOP_AFTER_SUCCESS"       => \$stop_after_success,
+    "STOP_AFTER_FAILURE"       => \$stop_after_failure,
+    "STOP_TEST_AFTER"          => \$stop_test_after,
+    "BUILD_TARGET"             => \$build_target,
+    "SSH_EXEC"                 => \$ssh_exec,
+    "SCP_TO_TARGET"            => \$scp_to_target,
+    "CHECKOUT"                 => \$checkout,
+    "TARGET_IMAGE"             => \$target_image,
+    "LOCALVERSION"             => \$localversion,
+
+    "BISECT_GOOD"              => \$bisect_good,
+    "BISECT_BAD"               => \$bisect_bad,
+    "BISECT_TYPE"              => \$bisect_type,
+    "BISECT_START"             => \$bisect_start,
+    "BISECT_REPLAY"            => \$bisect_replay,
+    "BISECT_FILES"             => \$bisect_files,
+    "BISECT_REVERSE"           => \$bisect_reverse,
+    "BISECT_CHECK"             => \$bisect_check,
+
+    "CONFIG_BISECT"            => \$config_bisect,
+    "CONFIG_BISECT_TYPE"       => \$config_bisect_type,
+
+    "PATCHCHECK_TYPE"          => \$patchcheck_type,
+    "PATCHCHECK_START"         => \$patchcheck_start,
+    "PATCHCHECK_END"           => \$patchcheck_end,
+);
+
+# Options may be used by other options, record them.
+my %used_options;
+
 # default variables that can be used
 chomp ($variable{"PWD"} = `pwd`);
 
 $config_help{"MACHINE"} = << "EOF"
  The machine hostname that you will test.
+ For build only tests, it is still needed to differentiate log files.
 EOF
     ;
 $config_help{"SSH_USER"} = << "EOF"
@@ -150,11 +282,15 @@ EOF
     ;
 $config_help{"BUILD_DIR"} = << "EOF"
  The directory that contains the Linux source code (full path).
+ You can use \${PWD} that will be the path where ktest.pl is run, or use
+ \${THIS_DIR} which is assigned \${PWD} but may be changed later.
 EOF
     ;
 $config_help{"OUTPUT_DIR"} = << "EOF"
  The directory that the objects will be built (full path).
  (can not be same as BUILD_DIR)
+ You can use \${PWD} that will be the path where ktest.pl is run, or use
+ \${THIS_DIR} which is assigned \${PWD} but may be changed later.
 EOF
     ;
 $config_help{"BUILD_TARGET"} = << "EOF"
@@ -162,6 +298,11 @@ $config_help{"BUILD_TARGET"} = << "EOF"
  (relative to OUTPUT_DIR)
 EOF
     ;
+$config_help{"BUILD_OPTIONS"} = << "EOF"
+ Options to add to \"make\" when building.
+ i.e.  -j20
+EOF
+    ;
 $config_help{"TARGET_IMAGE"} = << "EOF"
  The place to put your image on the test machine.
 EOF
@@ -227,20 +368,36 @@ $config_help{"REBOOT_SCRIPT"} = << "EOF"
 EOF
     ;
 
-sub read_yn {
-    my ($prompt) = @_;
+sub read_prompt {
+    my ($cancel, $prompt) = @_;
 
     my $ans;
 
     for (;;) {
-       print "$prompt [Y/n] ";
+       if ($cancel) {
+           print "$prompt [y/n/C] ";
+       } else {
+           print "$prompt [Y/n] ";
+       }
        $ans = <STDIN>;
        chomp $ans;
        if ($ans =~ /^\s*$/) {
-           $ans = "y";
+           if ($cancel) {
+               $ans = "c";
+           } else {
+               $ans = "y";
+           }
        }
        last if ($ans =~ /^y$/i || $ans =~ /^n$/i);
-       print "Please answer either 'y' or 'n'.\n";
+       if ($cancel) {
+           last if ($ans =~ /^c$/i);
+           print "Please answer either 'y', 'n' or 'c'.\n";
+       } else {
+           print "Please answer either 'y' or 'n'.\n";
+       }
+    }
+    if ($ans =~ /^c/i) {
+       exit;
     }
     if ($ans !~ /^y$/i) {
        return 0;
@@ -248,6 +405,18 @@ sub read_yn {
     return 1;
 }
 
+sub read_yn {
+    my ($prompt) = @_;
+
+    return read_prompt 0, $prompt;
+}
+
+sub read_ync {
+    my ($prompt) = @_;
+
+    return read_prompt 1, $prompt;
+}
+
 sub get_ktest_config {
     my ($config) = @_;
     my $ans;
@@ -261,7 +430,7 @@ sub get_ktest_config {
 
     for (;;) {
        print "$config = ";
-       if (defined($default{$config})) {
+       if (defined($default{$config}) && length($default{$config})) {
            print "\[$default{$config}\] ";
        }
        $ans = <STDIN>;
@@ -274,22 +443,37 @@ sub get_ktest_config {
                next;
            }
        }
-       $entered_configs{$config} = process_variables($ans);
+       $entered_configs{$config} = ${ans};
        last;
     }
 }
 
 sub get_ktest_configs {
     get_ktest_config("MACHINE");
-    get_ktest_config("SSH_USER");
     get_ktest_config("BUILD_DIR");
     get_ktest_config("OUTPUT_DIR");
-    get_ktest_config("BUILD_TARGET");
-    get_ktest_config("TARGET_IMAGE");
-    get_ktest_config("POWER_CYCLE");
-    get_ktest_config("CONSOLE");
+
+    if ($newconfig) {
+       get_ktest_config("BUILD_OPTIONS");
+    }
+
+    # options required for other than just building a kernel
+    if (!$buildonly) {
+       get_ktest_config("POWER_CYCLE");
+       get_ktest_config("CONSOLE");
+    }
+
+    # options required for install and more
+    if ($buildonly != 1) {
+       get_ktest_config("SSH_USER");
+       get_ktest_config("BUILD_TARGET");
+       get_ktest_config("TARGET_IMAGE");
+    }
+
     get_ktest_config("LOCALVERSION");
 
+    return if ($buildonly);
+
     my $rtype = $opt{"REBOOT_TYPE"};
 
     if (!defined($rtype)) {
@@ -303,8 +487,6 @@ sub get_ktest_configs {
 
     if ($rtype eq "grub") {
        get_ktest_config("GRUB_MENU");
-    } else {
-       get_ktest_config("REBOOT_SCRIPT");
     }
 }
 
@@ -334,6 +516,10 @@ sub process_variables {
        } else {
            # put back the origin piece.
            $retval = "$retval\$\{$var\}";
+           # This could be an option that is used later, save
+           # it so we don't warn if this option is not one of
+           # ktests options.
+           $used_options{$var} = 1;
        }
        $value = $end;
     }
@@ -348,6 +534,19 @@ sub process_variables {
 sub set_value {
     my ($lvalue, $rvalue, $override, $overrides, $name) = @_;
 
+    my $prvalue = process_variables($rvalue);
+
+    if ($buildonly && $lvalue =~ /^TEST_TYPE(\[.*\])?$/ && $prvalue ne "build") {
+       # Note if a test is something other than build, then we
+       # will need other manditory options.
+       if ($prvalue ne "install") {
+           $buildonly = 0;
+       } else {
+           # install still limits some manditory options.
+           $buildonly = 2;
+       }
+    }
+
     if (defined($opt{$lvalue})) {
        if (!$override || defined(${$overrides}{$lvalue})) {
            my $extra = "";
@@ -356,13 +555,12 @@ sub set_value {
            }
            die "$name: $.: Option $lvalue defined more than once!\n$extra";
        }
-       ${$overrides}{$lvalue} = $rvalue;
+       ${$overrides}{$lvalue} = $prvalue;
     }
     if ($rvalue =~ /^\s*$/) {
        delete $opt{$lvalue};
     } else {
-       $rvalue = process_variables($rvalue);
-       $opt{$lvalue} = $rvalue;
+       $opt{$lvalue} = $prvalue;
     }
 }
 
@@ -712,6 +910,15 @@ sub __read_config {
     return $test_case;
 }
 
+sub get_test_case {
+       print "What test case would you like to run?\n";
+       print " (build, install or boot)\n";
+       print " Other tests are available but require editing the config file\n";
+       my $ans = <STDIN>;
+       chomp $ans;
+       $default{"TEST_TYPE"} = $ans;
+}
+
 sub read_config {
     my ($config) = @_;
 
@@ -726,10 +933,7 @@ sub read_config {
     # was a test specified?
     if (!$test_case) {
        print "No test case specified.\n";
-       print "What test case would you like to run?\n";
-       my $ans = <STDIN>;
-       chomp $ans;
-       $default{"TEST_TYPE"} = $ans;
+       get_test_case;
     }
 
     # set any defaults
@@ -739,6 +943,37 @@ sub read_config {
            $opt{$default} = $default{$default};
        }
     }
+
+    if ($opt{"IGNORE_UNUSED"} == 1) {
+       return;
+    }
+
+    my %not_used;
+
+    # check if there are any stragglers (typos?)
+    foreach my $option (keys %opt) {
+       my $op = $option;
+       # remove per test labels.
+       $op =~ s/\[.*\]//;
+       if (!exists($option_map{$op}) &&
+           !exists($default{$op}) &&
+           !exists($used_options{$op})) {
+           $not_used{$op} = 1;
+       }
+    }
+
+    if (%not_used) {
+       my $s = "s are";
+       $s = " is" if (keys %not_used == 1);
+       print "The following option$s not used; could be a typo:\n";
+       foreach my $option (keys %not_used) {
+           print "$option\n";
+       }
+       print "Set IGRNORE_UNUSED = 1 to have ktest ignore unused variables\n";
+       if (!read_yn "Do you want to continue?") {
+           exit -1;
+       }
+    }
 }
 
 sub __eval_option {
@@ -873,6 +1108,17 @@ sub reboot {
     }
 }
 
+sub reboot_to_good {
+    my ($time) = @_;
+
+    if (defined($switch_to_good)) {
+       run_command $switch_to_good;
+       return;
+    }
+
+    reboot $time;
+}
+
 sub do_not_reboot {
     my $i = $iteration;
 
@@ -889,7 +1135,7 @@ sub dodie {
     if ($reboot_on_error && !do_not_reboot) {
 
        doprint "REBOOTING\n";
-       reboot;
+       reboot_to_good;
 
     } elsif ($poweroff_on_error && defined($power_off)) {
        doprint "POWERING OFF\n";
@@ -975,6 +1221,43 @@ sub wait_for_monitor {
     print "** Monitor flushed **\n";
 }
 
+sub save_logs {
+       my ($result, $basedir) = @_;
+       my @t = localtime;
+       my $date = sprintf "%04d%02d%02d%02d%02d%02d",
+               1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0];
+
+       my $type = $build_type;
+       if ($type =~ /useconfig/) {
+           $type = "useconfig";
+       }
+
+       my $dir = "$machine-$test_type-$type-$result-$date";
+
+       $dir = "$basedir/$dir";
+
+       if (!-d $dir) {
+           mkpath($dir) or
+               die "can't create $dir";
+       }
+
+       my %files = (
+               "config" => $output_config,
+               "buildlog" => $buildlog,
+               "dmesg" => $dmesg,
+               "testlog" => $testlog,
+       );
+
+       while (my ($name, $source) = each(%files)) {
+               if (-f "$source") {
+                       cp "$source", "$dir/$name" or
+                               die "failed to copy $source";
+               }
+       }
+
+       doprint "*** Saved info to $dir ***\n";
+}
+
 sub fail {
 
        if ($die_on_failure) {
@@ -988,7 +1271,7 @@ sub fail {
        # no need to reboot for just building.
        if (!do_not_reboot) {
            doprint "REBOOTING\n";
-           reboot $sleep_time;
+           reboot_to_good $sleep_time;
        }
 
        my $name = "";
@@ -1003,38 +1286,9 @@ sub fail {
        doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
        doprint "%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n";
 
-       return 1 if (!defined($store_failures));
-
-       my @t = localtime;
-       my $date = sprintf "%04d%02d%02d%02d%02d%02d",
-               1900+$t[5],$t[4],$t[3],$t[2],$t[1],$t[0];
-
-       my $type = $build_type;
-       if ($type =~ /useconfig/) {
-           $type = "useconfig";
-       }
-
-       my $dir = "$machine-$test_type-$type-fail-$date";
-       my $faildir = "$store_failures/$dir";
-
-       if (!-d $faildir) {
-           mkpath($faildir) or
-               die "can't create $faildir";
-       }
-       if (-f "$output_config") {
-           cp "$output_config", "$faildir/config" or
-               die "failed to copy .config";
-       }
-       if (-f $buildlog) {
-           cp $buildlog, "$faildir/buildlog" or
-               die "failed to move $buildlog";
-       }
-       if (-f $dmesg) {
-           cp $dmesg, "$faildir/dmesg" or
-               die "failed to move $dmesg";
-       }
-
-       doprint "*** Saved info to $faildir ***\n";
+       if (defined($store_failures)) {
+           save_logs "fail", $store_failures;
+        }
 
        return 1;
 }
@@ -1170,13 +1424,16 @@ sub wait_for_input
 }
 
 sub reboot_to {
+    if (defined($switch_to_test)) {
+       run_command $switch_to_test;
+    }
+
     if ($reboot_type eq "grub") {
        run_ssh "'(echo \"savedefault --default=$grub_number --once\" | grub --batch)'";
-       reboot;
-       return;
+    } elsif (defined $reboot_script) {
+       run_command "$reboot_script";
     }
-
-    run_command "$reboot_script";
+    reboot;
 }
 
 sub get_sha1 {
@@ -1274,7 +1531,7 @@ sub monitor {
        }
 
        if ($full_line =~ /call trace:/i) {
-           if (!$bug && !$skip_call_trace) {
+           if (!$ignore_errors && !$bug && !$skip_call_trace) {
                $bug = 1;
                $failure_start = time;
            }
@@ -1341,12 +1598,19 @@ sub monitor {
     return 1;
 }
 
+sub eval_kernel_version {
+    my ($option) = @_;
+
+    $option =~ s/\$KERNEL_VERSION/$version/g;
+
+    return $option;
+}
+
 sub do_post_install {
 
     return if (!defined($post_install));
 
-    my $cp_post_install = $post_install;
-    $cp_post_install =~ s/\$KERNEL_VERSION/$version/g;
+    my $cp_post_install = eval_kernel_version $post_install;
     run_command "$cp_post_install" or
        dodie "Failed to run post install";
 }
@@ -1355,7 +1619,9 @@ sub install {
 
     return if ($no_install);
 
-    run_scp "$outputdir/$build_target", "$target_image" or
+    my $cp_target = eval_kernel_version $target_image;
+
+    run_scp "$outputdir/$build_target", "$cp_target" or
        dodie "failed to copy image";
 
     my $install_mods = 0;
@@ -1640,9 +1906,13 @@ sub success {
     doprint     "*******************************************\n";
     doprint     "*******************************************\n";
 
+    if (defined($store_successes)) {
+        save_logs "success", $store_successes;
+    }
+
     if ($i != $opt{"NUM_TESTS"} && !do_not_reboot) {
        doprint "Reboot and wait $sleep_time seconds\n";
-       reboot $sleep_time;
+       reboot_to_good $sleep_time;
     }
 }
 
@@ -1669,7 +1939,10 @@ sub child_run_test {
     $poweroff_on_error = 0;
     $die_on_failure = 1;
 
+    $redirect = "$testlog";
     run_command $run_test or $failed = 1;
+    undef $redirect;
+
     exit $failed;
 }
 
@@ -1744,6 +2017,43 @@ sub do_run_test {
     waitpid $child_pid, 0;
     $child_exit = $?;
 
+    if (!$bug && $in_bisect) {
+       if (defined($bisect_ret_good)) {
+           if ($child_exit == $bisect_ret_good) {
+               return 1;
+           }
+       }
+       if (defined($bisect_ret_skip)) {
+           if ($child_exit == $bisect_ret_skip) {
+               return -1;
+           }
+       }
+       if (defined($bisect_ret_abort)) {
+           if ($child_exit == $bisect_ret_abort) {
+               fail "test abort" and return -2;
+           }
+       }
+       if (defined($bisect_ret_bad)) {
+           if ($child_exit == $bisect_ret_skip) {
+               return 0;
+           }
+       }
+       if (defined($bisect_ret_default)) {
+           if ($bisect_ret_default eq "good") {
+               return 1;
+           } elsif ($bisect_ret_default eq "bad") {
+               return 0;
+           } elsif ($bisect_ret_default eq "skip") {
+               return -1;
+           } elsif ($bisect_ret_default eq "abort") {
+               return -2;
+           } else {
+               fail "unknown default action: $bisect_ret_default"
+                   and return -2;
+           }
+       }
+    }
+
     if ($bug || $child_exit) {
        return 0 if $in_bisect;
        fail "test failed" and return 0;
@@ -1770,7 +2080,7 @@ sub run_git_bisect {
     if ($output =~ m/^(Bisecting: .*\(roughly \d+ steps?\))\s+\[([[:xdigit:]]+)\]/) {
        doprint "$1 [$2]\n";
     } elsif ($output =~ m/^([[:xdigit:]]+) is the first bad commit/) {
-       $bisect_bad = $1;
+       $bisect_bad_commit = $1;
        doprint "Found bad commit... $1\n";
        return 0;
     } else {
@@ -1783,7 +2093,7 @@ sub run_git_bisect {
 
 sub bisect_reboot {
     doprint "Reboot and sleep $bisect_sleep_time seconds\n";
-    reboot $bisect_sleep_time;
+    reboot_to_good $bisect_sleep_time;
 }
 
 # returns 1 on success, 0 on failure, -1 on skip
@@ -1868,21 +2178,28 @@ sub run_bisect {
     }
 }
 
+sub update_bisect_replay {
+    my $tmp_log = "$tmpdir/ktest_bisect_log";
+    run_command "git bisect log > $tmp_log" or
+       die "can't create bisect log";
+    return $tmp_log;
+}
+
 sub bisect {
     my ($i) = @_;
 
     my $result;
 
-    die "BISECT_GOOD[$i] not defined\n"        if (!defined($opt{"BISECT_GOOD[$i]"}));
-    die "BISECT_BAD[$i] not defined\n" if (!defined($opt{"BISECT_BAD[$i]"}));
-    die "BISECT_TYPE[$i] not defined\n"        if (!defined($opt{"BISECT_TYPE[$i]"}));
+    die "BISECT_GOOD[$i] not defined\n"        if (!defined($bisect_good));
+    die "BISECT_BAD[$i] not defined\n" if (!defined($bisect_bad));
+    die "BISECT_TYPE[$i] not defined\n"        if (!defined($bisect_type));
 
-    my $good = $opt{"BISECT_GOOD[$i]"};
-    my $bad = $opt{"BISECT_BAD[$i]"};
-    my $type = $opt{"BISECT_TYPE[$i]"};
-    my $start = $opt{"BISECT_START[$i]"};
-    my $replay = $opt{"BISECT_REPLAY[$i]"};
-    my $start_files = $opt{"BISECT_FILES[$i]"};
+    my $good = $bisect_good;
+    my $bad = $bisect_bad;
+    my $type = $bisect_type;
+    my $start = $bisect_start;
+    my $replay = $bisect_replay;
+    my $start_files = $bisect_files;
 
     if (defined($start_files)) {
        $start_files = " -- " . $start_files;
@@ -1894,8 +2211,7 @@ sub bisect {
     $good = get_sha1($good);
     $bad = get_sha1($bad);
 
-    if (defined($opt{"BISECT_REVERSE[$i]"}) &&
-       $opt{"BISECT_REVERSE[$i]"} == 1) {
+    if (defined($bisect_reverse) && $bisect_reverse == 1) {
        doprint "Performing a reverse bisect (bad is good, good is bad!)\n";
        $reverse_bisect = 1;
     } else {
@@ -1907,8 +2223,31 @@ sub bisect {
        $type = "boot";
     }
 
-    my $check = $opt{"BISECT_CHECK[$i]"};
-    if (defined($check) && $check ne "0") {
+    # Check if a bisect was running
+    my $bisect_start_file = "$builddir/.git/BISECT_START";
+
+    my $check = $bisect_check;
+    my $do_check = defined($check) && $check ne "0";
+
+    if ( -f $bisect_start_file ) {
+       print "Bisect in progress found\n";
+       if ($do_check) {
+           print " If you say yes, then no checks of good or bad will be done\n";
+       }
+       if (defined($replay)) {
+           print "** BISECT_REPLAY is defined in config file **";
+           print " Ignore config option and perform new git bisect log?\n";
+           if (read_ync " (yes, no, or cancel) ") {
+               $replay = update_bisect_replay;
+               $do_check = 0;
+           }
+       } elsif (read_yn "read git log and continue?") {
+           $replay = update_bisect_replay;
+           $do_check = 0;
+       }
+    }
+
+    if ($do_check) {
 
        # get current HEAD
        my $head = get_sha1("HEAD");
@@ -1973,7 +2312,7 @@ sub bisect {
     run_command "git bisect reset" or
        dodie "could not reset git bisect";
 
-    doprint "Bad commit was [$bisect_bad]\n";
+    doprint "Bad commit was [$bisect_bad_commit]\n";
 
     success $i;
 }
@@ -2129,7 +2468,7 @@ sub run_config_bisect {
     }
 
     doprint "***** RUN TEST ***\n";
-    my $type = $opt{"CONFIG_BISECT_TYPE[$iteration]"};
+    my $type = $config_bisect_type;
     my $ret;
     my %current_config;
 
@@ -2233,7 +2572,7 @@ sub run_config_bisect {
 sub config_bisect {
     my ($i) = @_;
 
-    my $start_config = $opt{"CONFIG_BISECT[$i]"};
+    my $start_config = $config_bisect;
 
     my $tmpconfig = "$tmpdir/use_config";
 
@@ -2346,29 +2685,29 @@ sub config_bisect {
 
 sub patchcheck_reboot {
     doprint "Reboot and sleep $patchcheck_sleep_time seconds\n";
-    reboot $patchcheck_sleep_time;
+    reboot_to_good $patchcheck_sleep_time;
 }
 
 sub patchcheck {
     my ($i) = @_;
 
     die "PATCHCHECK_START[$i] not defined\n"
-       if (!defined($opt{"PATCHCHECK_START[$i]"}));
+       if (!defined($patchcheck_start));
     die "PATCHCHECK_TYPE[$i] not defined\n"
-       if (!defined($opt{"PATCHCHECK_TYPE[$i]"}));
+       if (!defined($patchcheck_type));
 
-    my $start = $opt{"PATCHCHECK_START[$i]"};
+    my $start = $patchcheck_start;
 
     my $end = "HEAD";
-    if (defined($opt{"PATCHCHECK_END[$i]"})) {
-       $end = $opt{"PATCHCHECK_END[$i]"};
+    if (defined($patchcheck_end)) {
+       $end = $patchcheck_end;
     }
 
     # Get the true sha1's since we can use things like HEAD~3
     $start = get_sha1($start);
     $end = get_sha1($end);
 
-    my $type = $opt{"PATCHCHECK_TYPE[$i]"};
+    my $type = $patchcheck_type;
 
     # Can't have a test without having a test to run
     if ($type eq "test" && !defined($run_test)) {
@@ -2963,7 +3302,7 @@ sub make_min_config {
        }
 
        doprint "Reboot and wait $sleep_time seconds\n";
-       reboot $sleep_time;
+       reboot_to_good $sleep_time;
     }
 
     success $i;
@@ -2985,13 +3324,27 @@ if ($#ARGV == 0) {
 }
 
 if (! -f $ktest_config) {
+    $newconfig = 1;
+    get_test_case;
     open(OUT, ">$ktest_config") or die "Can not create $ktest_config";
     print OUT << "EOF"
 # Generated by ktest.pl
 #
+
+# PWD is a ktest.pl variable that will result in the process working
+# directory that ktest.pl is executed in.
+
+# THIS_DIR is automatically assigned the PWD of the path that generated
+# the config file. It is best to use this variable when assigning other
+# directory paths within this directory. This allows you to easily
+# move the test cases to other locations or to other machines.
+#
+THIS_DIR := $variable{"PWD"}
+
 # Define each test with TEST_START
 # The config options below it will override the defaults
 TEST_START
+TEST_TYPE = $default{"TEST_TYPE"}
 
 DEFAULTS
 EOF
@@ -3011,7 +3364,7 @@ if ($#new_configs >= 0) {
     open(OUT, ">>$ktest_config") or die "Can not append to $ktest_config";
     foreach my $config (@new_configs) {
        print OUT "$config = $entered_configs{$config}\n";
-       $opt{$config} = $entered_configs{$config};
+       $opt{$config} = process_variables($entered_configs{$config});
     }
 }
 
@@ -3091,61 +3444,10 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
 
     my $makecmd = set_test_option("MAKE_CMD", $i);
 
-    $machine = set_test_option("MACHINE", $i);
-    $ssh_user = set_test_option("SSH_USER", $i);
-    $tmpdir = set_test_option("TMP_DIR", $i);
-    $outputdir = set_test_option("OUTPUT_DIR", $i);
-    $builddir = set_test_option("BUILD_DIR", $i);
-    $test_type = set_test_option("TEST_TYPE", $i);
-    $build_type = set_test_option("BUILD_TYPE", $i);
-    $build_options = set_test_option("BUILD_OPTIONS", $i);
-    $pre_build = set_test_option("PRE_BUILD", $i);
-    $post_build = set_test_option("POST_BUILD", $i);
-    $pre_build_die = set_test_option("PRE_BUILD_DIE", $i);
-    $post_build_die = set_test_option("POST_BUILD_DIE", $i);
-    $power_cycle = set_test_option("POWER_CYCLE", $i);
-    $reboot = set_test_option("REBOOT", $i);
-    $noclean = set_test_option("BUILD_NOCLEAN", $i);
-    $minconfig = set_test_option("MIN_CONFIG", $i);
-    $output_minconfig = set_test_option("OUTPUT_MIN_CONFIG", $i);
-    $start_minconfig = set_test_option("START_MIN_CONFIG", $i);
-    $ignore_config = set_test_option("IGNORE_CONFIG", $i);
-    $run_test = set_test_option("TEST", $i);
-    $addconfig = set_test_option("ADD_CONFIG", $i);
-    $reboot_type = set_test_option("REBOOT_TYPE", $i);
-    $grub_menu = set_test_option("GRUB_MENU", $i);
-    $post_install = set_test_option("POST_INSTALL", $i);
-    $no_install = set_test_option("NO_INSTALL", $i);
-    $reboot_script = set_test_option("REBOOT_SCRIPT", $i);
-    $reboot_on_error = set_test_option("REBOOT_ON_ERROR", $i);
-    $poweroff_on_error = set_test_option("POWEROFF_ON_ERROR", $i);
-    $die_on_failure = set_test_option("DIE_ON_FAILURE", $i);
-    $power_off = set_test_option("POWER_OFF", $i);
-    $powercycle_after_reboot = set_test_option("POWERCYCLE_AFTER_REBOOT", $i);
-    $poweroff_after_halt = set_test_option("POWEROFF_AFTER_HALT", $i);
-    $sleep_time = set_test_option("SLEEP_TIME", $i);
-    $bisect_sleep_time = set_test_option("BISECT_SLEEP_TIME", $i);
-    $patchcheck_sleep_time = set_test_option("PATCHCHECK_SLEEP_TIME", $i);
-    $ignore_warnings = set_test_option("IGNORE_WARNINGS", $i);
-    $bisect_manual = set_test_option("BISECT_MANUAL", $i);
-    $bisect_skip = set_test_option("BISECT_SKIP", $i);
-    $config_bisect_good = set_test_option("CONFIG_BISECT_GOOD", $i);
-    $store_failures = set_test_option("STORE_FAILURES", $i);
-    $test_name = set_test_option("TEST_NAME", $i);
-    $timeout = set_test_option("TIMEOUT", $i);
-    $booted_timeout = set_test_option("BOOTED_TIMEOUT", $i);
-    $console = set_test_option("CONSOLE", $i);
-    $detect_triplefault = set_test_option("DETECT_TRIPLE_FAULT", $i);
-    $success_line = set_test_option("SUCCESS_LINE", $i);
-    $reboot_success_line = set_test_option("REBOOT_SUCCESS_LINE", $i);
-    $stop_after_success = set_test_option("STOP_AFTER_SUCCESS", $i);
-    $stop_after_failure = set_test_option("STOP_AFTER_FAILURE", $i);
-    $stop_test_after = set_test_option("STOP_TEST_AFTER", $i);
-    $build_target = set_test_option("BUILD_TARGET", $i);
-    $ssh_exec = set_test_option("SSH_EXEC", $i);
-    $scp_to_target = set_test_option("SCP_TO_TARGET", $i);
-    $target_image = set_test_option("TARGET_IMAGE", $i);
-    $localversion = set_test_option("LOCALVERSION", $i);
+    # Load all the options into their mapped variable names
+    foreach my $opt (keys %option_map) {
+       ${$option_map{$opt}} = set_test_option($opt, $i);
+    }
 
     $start_minconfig_defined = 1;
 
@@ -3166,26 +3468,26 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
     $ENV{"SSH_USER"} = $ssh_user;
     $ENV{"MACHINE"} = $machine;
 
-    $target = "$ssh_user\@$machine";
-
     $buildlog = "$tmpdir/buildlog-$machine";
+    $testlog = "$tmpdir/testlog-$machine";
     $dmesg = "$tmpdir/dmesg-$machine";
     $make = "$makecmd O=$outputdir";
     $output_config = "$outputdir/.config";
 
-    if ($reboot_type eq "grub") {
-       dodie "GRUB_MENU not defined" if (!defined($grub_menu));
-    } elsif (!defined($reboot_script)) {
-       dodie "REBOOT_SCRIPT not defined"
+    if (!$buildonly) {
+       $target = "$ssh_user\@$machine";
+       if ($reboot_type eq "grub") {
+           dodie "GRUB_MENU not defined" if (!defined($grub_menu));
+       }
     }
 
     my $run_type = $build_type;
     if ($test_type eq "patchcheck") {
-       $run_type = $opt{"PATCHCHECK_TYPE[$i]"};
+       $run_type = $patchcheck_type;
     } elsif ($test_type eq "bisect") {
-       $run_type = $opt{"BISECT_TYPE[$i]"};
+       $run_type = $bisect_type;
     } elsif ($test_type eq "config_bisect") {
-       $run_type = $opt{"CONFIG_BISECT_TYPE[$i]"};
+       $run_type = $config_bisect_type;
     }
 
     if ($test_type eq "make_min_config") {
@@ -3205,6 +3507,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
 
     unlink $dmesg;
     unlink $buildlog;
+    unlink $testlog;
 
     if (defined($addconfig)) {
        my $min = $minconfig;
@@ -3216,7 +3519,6 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
        $minconfig = "$tmpdir/add_config";
     }
 
-    my $checkout = $opt{"CHECKOUT[$i]"};
     if (defined($checkout)) {
        run_command "git checkout $checkout" or
            die "failed to checkout $checkout";
@@ -3267,7 +3569,7 @@ for (my $i = 1; $i <= $opt{"NUM_TESTS"}; $i++) {
 if ($opt{"POWEROFF_ON_SUCCESS"}) {
     halt;
 } elsif ($opt{"REBOOT_ON_SUCCESS"} && !do_not_reboot) {
-    reboot;
+    reboot_to_good;
 }
 
 doprint "\n    $successes of $opt{NUM_TESTS} tests were successful\n\n";
index 553c06b7d6f27ad8e72f8e8e943321271099a237..5ea04c6a71bfc830feca1c10c144264ba97a5db3 100644 (file)
 #GRUB_MENU = Test Kernel
 
 # A script to reboot the target into the test kernel
-# (Only mandatory if REBOOT_TYPE = script)
+# This and SWITCH_TO_TEST are about the same, except
+# SWITCH_TO_TEST is run even for REBOOT_TYPE = grub.
+# This may be left undefined.
+# (default undefined)
 #REBOOT_SCRIPT =
 
 #### Optional Config Options (all have defaults) ####
 # The test will not modify that file.
 #REBOOT_TYPE = grub
 
+# If you are using a machine that doesn't boot with grub, and
+# perhaps gets its kernel from a remote server (tftp), then
+# you can use this option to update the target image with the
+# test image.
+#
+# You could also do the same with POST_INSTALL, but the difference
+# between that option and this option is that POST_INSTALL runs
+# after the install, where this one runs just before a reboot.
+# (default undefined)
+#SWITCH_TO_TEST = cp ${OUTPUT_DIR}/${BUILD_TARGET} ${TARGET_IMAGE}
+
+# If you are using a machine that doesn't boot with grub, and
+# perhaps gets its kernel from a remote server (tftp), then
+# you can use this option to update the target image with the
+# the known good image to reboot safely back into.
+#
+# This option holds a command that will execute before needing
+# to reboot to a good known image.
+# (default undefined)
+#SWITCH_TO_GOOD = ssh ${SSH_USER}/${MACHINE} cp good_image ${TARGET_IMAGE}
+
 # The min config that is needed to build for the machine
 # A nice way to create this is with the following:
 #
 # (default undefined)
 #STORE_FAILURES = /home/test/failures
 
+# Directory to store success directories on success. If this is not
+# set, the .config, dmesg and bootlog will not be saved if a
+# test succeeds.
+# (default undefined)
+#STORE_SUCCESSES = /home/test/successes
+
 # Build without doing a make mrproper, or removing .config
 # (default 0)
 #BUILD_NOCLEAN = 0
 # (default 1)
 #DETECT_TRIPLE_FAULT = 0
 
+# All options in the config file should be either used by ktest
+# or could be used within a value of another option. If an option
+# in the config file is not used, ktest will warn about it and ask
+# if you want to continue.
+#
+# If you don't care if there are non-used options, enable this
+# option. Be careful though, a non-used option is usually a sign
+# of an option name being typed incorrectly.
+# (default 0)
+#IGNORE_UNUSED = 1
+
+# When testing a kernel that happens to have WARNINGs, and call
+# traces, ktest.pl will detect these and fail a boot or test run
+# due to warnings. By setting this option, ktest will ignore
+# call traces, and will not fail a test if the kernel produces
+# an oops. Use this option with care.
+# (default 0)
+#IGNORE_ERRORS = 1
+
 #### Per test run options ####
 # The following options are only allowed in TEST_START sections.
 # They are ignored in the DEFAULTS sections.
 #   BISECT_BAD with BISECT_CHECK = good or
 #   BISECT_CHECK = bad, respectively.
 #
+# BISECT_RET_GOOD = 0 (optional, default undefined)
+#
+#   In case the specificed test returns something other than just
+#   0 for good, and non-zero for bad, you can override 0 being
+#   good by defining BISECT_RET_GOOD.
+#
+# BISECT_RET_BAD = 1 (optional, default undefined)
+#
+#   In case the specificed test returns something other than just
+#   0 for good, and non-zero for bad, you can override non-zero being
+#   bad by defining BISECT_RET_BAD.
+#
+# BISECT_RET_ABORT = 255 (optional, default undefined)
+#
+#   If you need to abort the bisect if the test discovers something
+#   that was wrong, you can define BISECT_RET_ABORT to be the error
+#   code returned by the test in order to abort the bisect.
+#
+# BISECT_RET_SKIP = 2 (optional, default undefined)
+#
+#   If the test detects that the current commit is neither good
+#   nor bad, but something else happened (another bug detected)
+#   you can specify BISECT_RET_SKIP to an error code that the
+#   test returns when it should skip the current commit.
+#
+# BISECT_RET_DEFAULT = good (optional, default undefined)
+#
+#   You can override the default of what to do when the above
+#   options are not hit. This may be one of, "good", "bad",
+#   "abort" or "skip" (without the quotes).
+#
+#   Note, if you do not define any of the previous BISECT_RET_*
+#   and define BISECT_RET_DEFAULT, all bisects results will do
+#   what the BISECT_RET_DEFAULT has.
+#
+#
 # Example:
 #   TEST_START
 #   TEST_TYPE = bisect
index 7287bf5d1c9edc1fa84681aea4f989c9c750fa9c..a91f980077d843ca319dfe5d7b3b95e252e02bc3 100644 (file)
@@ -1543,7 +1543,7 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
        if (memslot && memslot->dirty_bitmap) {
                unsigned long rel_gfn = gfn - memslot->base_gfn;
 
-               if (!__test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
+               if (!test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap))
                        memslot->nr_dirty_pages++;
        }
 }