]> Pileus Git - ~andy/linux/commitdiff
Merge branches 'dma-debug', 'iommu/fixes', 'arm/tegra', 'arm/exynos', 'x86/amd',...
authorJoerg Roedel <joerg.roedel@amd.com>
Tue, 2 Oct 2012 12:40:03 +0000 (14:40 +0200)
committerJoerg Roedel <joerg.roedel@amd.com>
Tue, 2 Oct 2012 12:40:03 +0000 (14:40 +0200)
Conflicts:
drivers/iommu/amd_iommu_init.c

97 files changed:
Documentation/vfio.txt
MAINTAINERS
Makefile
arch/arm/mach-mxs/mach-mxs.c
arch/arm/mach-orion5x/common.c
arch/arm/mach-tegra/include/mach/smmu.h [deleted file]
arch/arm/mm/dma-mapping.c
arch/c6x/include/asm/Kbuild
arch/c6x/include/asm/barrier.h [deleted file]
arch/tile/include/gxio/iorpc_trio.h
arch/um/include/asm/processor-generic.h
arch/um/include/shared/common-offsets.h
arch/um/include/shared/user.h
arch/um/kernel/exec.c
arch/um/kernel/process.c
arch/um/kernel/signal.c
arch/um/kernel/syscall.c
arch/um/scripts/Makefile.rules
arch/x86/um/Kconfig
arch/x86/um/shared/sysdep/kernel-offsets.h
arch/x86/um/shared/sysdep/syscalls.h
arch/x86/um/signal.c
arch/x86/um/sys_call_table_32.c
arch/x86/um/syscalls_32.c
arch/x86/um/syscalls_64.c
arch/x86/xen/setup.c
drivers/block/nvme.c
drivers/block/rbd.c
drivers/edac/i3200_edac.c
drivers/edac/i5000_edac.c
drivers/edac/sb_edac.c
drivers/gpio/gpio-lpc32xx.c
drivers/gpu/drm/nouveau/nouveau_abi16.c
drivers/gpu/drm/nouveau/nvc0_fb.c
drivers/gpu/drm/nouveau/nvc0_fifo.c
drivers/gpu/drm/nouveau/nve0_fifo.c
drivers/gpu/drm/udl/udl_connector.c
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_types.h
drivers/iommu/exynos-iommu.c
drivers/iommu/intel-iommu.c
drivers/iommu/tegra-smmu.c
drivers/md/dm-mpath.c
drivers/md/dm-table.c
drivers/md/dm-thin.c
drivers/md/dm-verity.c
drivers/md/dm.c
drivers/md/dm.h
drivers/md/raid10.c
drivers/md/raid5.c
drivers/mtd/mtdchar.c
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/octeon/octeon_mgmt.c
drivers/net/ethernet/pasemi/pasemi_mac.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
drivers/net/phy/bcm87xx.c
drivers/net/phy/micrel.c
drivers/net/phy/smsc.c
drivers/net/ppp/pppoe.c
drivers/net/team/team.c
drivers/net/usb/smsc75xx.c
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/sh/pfc/pinctrl.c
drivers/usb/core/devices.c
drivers/usb/core/hcd.c
drivers/usb/host/ohci-at91.c
drivers/vfio/pci/vfio_pci_intrs.c
fs/dcache.c
fs/lockd/svclock.c
fs/namespace.c
include/asm-generic/unistd.h
include/linux/iommu.h
include/linux/micrel_phy.h
include/linux/nvme.h
include/linux/security.h
lib/dma-debug.c
lib/flex_proportions.c
mm/huge_memory.c
net/batman-adv/bat_iv_ogm.c
net/batman-adv/soft-interface.c
net/bluetooth/hci_core.c
net/bluetooth/l2cap_core.c
net/bluetooth/mgmt.c
net/ceph/messenger.c
net/core/sock.c
net/ipv4/inetpeer.c
net/ipv4/raw.c
net/ipv6/mip6.c
net/ipv6/raw.c
net/l2tp/l2tp_netlink.c
net/netfilter/xt_limit.c
net/wireless/reg.c
scripts/checksyscalls.sh
sound/soc/codecs/wm2000.c
sound/usb/endpoint.c

index 0cb6685c802904c0d7becccb00e3a00321bfb928..8eda3635a17da0e8d610a01e88d928a68f833f93 100644 (file)
@@ -133,7 +133,7 @@ character devices for this group:
 $ lspci -n -s 0000:06:0d.0
 06:0d.0 0401: 1102:0002 (rev 08)
 # echo 0000:06:0d.0 > /sys/bus/pci/devices/0000:06:0d.0/driver/unbind
-# echo 1102 0002 > /sys/bus/pci/drivers/vfio/new_id
+# echo 1102 0002 > /sys/bus/pci/drivers/vfio-pci/new_id
 
 Now we need to look at what other devices are in the group to free
 it for use by VFIO:
index b17587d9412f04318dab5e7368f6e5306ca57b91..9a6c4da3b2ff2cd77e4ee01a631a9cf9433b4017 100644 (file)
@@ -3552,11 +3552,12 @@ K:      \b(ABS|SYN)_MT_
 
 INTEL C600 SERIES SAS CONTROLLER DRIVER
 M:     Intel SCU Linux support <intel-linux-scu@intel.com>
+M:     Lukasz Dorau <lukasz.dorau@intel.com>
+M:     Maciej Patelczyk <maciej.patelczyk@intel.com>
 M:     Dave Jiang <dave.jiang@intel.com>
-M:     Ed Nadolski <edmund.nadolski@intel.com>
 L:     linux-scsi@vger.kernel.org
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/djbw/isci.git
-S:     Maintained
+T:     git git://git.code.sf.net/p/intel-sas/isci
+S:     Supported
 F:     drivers/scsi/isci/
 F:     firmware/isci/
 
@@ -5544,6 +5545,8 @@ F:        Documentation/devicetree/bindings/pwm/
 F:     include/linux/pwm.h
 F:     include/linux/of_pwm.h
 F:     drivers/pwm/
+F:     drivers/video/backlight/pwm_bl.c
+F:     include/linux/pwm_backlight.h
 
 PXA2xx/PXA3xx SUPPORT
 M:     Eric Miao <eric.y.miao@gmail.com>
index a3c11d589681d9a53db5cf31ca8a4e1c1b9246a5..bb9fff26f078063c395fe8f86488349614c84261 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 6
 SUBLEVEL = 0
-EXTRAVERSION = -rc7
+EXTRAVERSION =
 NAME = Terrified Chipmunk
 
 # *DOCUMENTATION*
index 8dabfe81d07c5f143626f10c57cfb993e87c6a1a..ff886e01a0b0371367ae49f3d0d323e547845478 100644 (file)
@@ -261,7 +261,7 @@ static void __init apx4devkit_init(void)
        enable_clk_enet_out();
 
        if (IS_BUILTIN(CONFIG_PHYLIB))
-               phy_register_fixup_for_uid(PHY_ID_KS8051, MICREL_PHY_ID_MASK,
+               phy_register_fixup_for_uid(PHY_ID_KSZ8051, MICREL_PHY_ID_MASK,
                                           apx4devkit_phy_fixup);
 
        mxsfb_pdata.mode_list = apx4devkit_video_modes;
index 410291c676668befcfe5b43723af071ba153e5db..a6cd14ab1e4e6f4c27103baf780df091de0cfb93 100644 (file)
@@ -204,6 +204,13 @@ void __init orion5x_wdt_init(void)
 void __init orion5x_init_early(void)
 {
        orion_time_set_base(TIMER_VIRT_BASE);
+
+       /*
+        * Some Orion5x devices allocate their coherent buffers from atomic
+        * context. Increase size of atomic coherent pool to make sure such
+        * the allocations won't fail.
+        */
+       init_dma_coherent_pool_size(SZ_1M);
 }
 
 int orion5x_tclk;
diff --git a/arch/arm/mach-tegra/include/mach/smmu.h b/arch/arm/mach-tegra/include/mach/smmu.h
deleted file mode 100644 (file)
index dad403a..0000000
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * IOMMU API for SMMU in Tegra30
- *
- * Copyright (c) 2012, NVIDIA CORPORATION.  All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef        MACH_SMMU_H
-#define        MACH_SMMU_H
-
-enum smmu_hwgrp {
-       HWGRP_AFI,
-       HWGRP_AVPC,
-       HWGRP_DC,
-       HWGRP_DCB,
-       HWGRP_EPP,
-       HWGRP_G2,
-       HWGRP_HC,
-       HWGRP_HDA,
-       HWGRP_ISP,
-       HWGRP_MPE,
-       HWGRP_NV,
-       HWGRP_NV2,
-       HWGRP_PPCS,
-       HWGRP_SATA,
-       HWGRP_VDE,
-       HWGRP_VI,
-
-       HWGRP_COUNT,
-
-       HWGRP_END = ~0,
-};
-
-#define HWG_AFI                (1 << HWGRP_AFI)
-#define HWG_AVPC       (1 << HWGRP_AVPC)
-#define HWG_DC         (1 << HWGRP_DC)
-#define HWG_DCB                (1 << HWGRP_DCB)
-#define HWG_EPP                (1 << HWGRP_EPP)
-#define HWG_G2         (1 << HWGRP_G2)
-#define HWG_HC         (1 << HWGRP_HC)
-#define HWG_HDA                (1 << HWGRP_HDA)
-#define HWG_ISP                (1 << HWGRP_ISP)
-#define HWG_MPE                (1 << HWGRP_MPE)
-#define HWG_NV         (1 << HWGRP_NV)
-#define HWG_NV2                (1 << HWGRP_NV2)
-#define HWG_PPCS       (1 << HWGRP_PPCS)
-#define HWG_SATA       (1 << HWGRP_SATA)
-#define HWG_VDE                (1 << HWGRP_VDE)
-#define HWG_VI         (1 << HWGRP_VI)
-
-#endif /* MACH_SMMU_H */
index e59c4ab71bcb78282f968cebbda09c43b49809ff..13f555d62491e59fbae0127f89e3f2b2395d3abf 100644 (file)
@@ -346,6 +346,8 @@ static int __init atomic_pool_init(void)
                       (unsigned)pool->size / 1024);
                return 0;
        }
+
+       kfree(pages);
 no_pages:
        kfree(bitmap);
 no_bitmap:
index 3af601e31e66ea5180ee498b04009c431f33e989..f08e89183cda452844fda74bc27a4db8823647d1 100644 (file)
@@ -2,6 +2,7 @@ include include/asm-generic/Kbuild.asm
 
 generic-y += atomic.h
 generic-y += auxvec.h
+generic-y += barrier.h
 generic-y += bitsperlong.h
 generic-y += bugs.h
 generic-y += cputime.h
diff --git a/arch/c6x/include/asm/barrier.h b/arch/c6x/include/asm/barrier.h
deleted file mode 100644 (file)
index 538240e..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- *  Port on Texas Instruments TMS320C6x architecture
- *
- *  Copyright (C) 2004, 2009, 2010, 2011 Texas Instruments Incorporated
- *  Author: Aurelien Jacquiot (aurelien.jacquiot@jaluna.com)
- *
- *  This program is free software; you can redistribute it and/or modify
- *  it under the terms of the GNU General Public License version 2 as
- *  published by the Free Software Foundation.
- */
-#ifndef _ASM_C6X_BARRIER_H
-#define _ASM_C6X_BARRIER_H
-
-#define nop()                    asm("NOP\n");
-
-#define mb()                     barrier()
-#define rmb()                    barrier()
-#define wmb()                    barrier()
-#define set_mb(var, value)       do { var = value;  mb(); } while (0)
-#define set_wmb(var, value)      do { var = value; wmb(); } while (0)
-
-#define smp_mb()                barrier()
-#define smp_rmb()               barrier()
-#define smp_wmb()               barrier()
-#define smp_read_barrier_depends()     do { } while (0)
-
-#endif /* _ASM_C6X_BARRIER_H */
index 15fb779920831eb38b110ec5530b1562d647171b..58105c31228b3b6643783e926b2db7886739ba01 100644 (file)
 #include <linux/module.h>
 #include <asm/pgtable.h>
 
-#define GXIO_TRIO_OP_ALLOC_ASIDS       IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400)
+#define GXIO_TRIO_OP_DEALLOC_ASID      IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1400)
+#define GXIO_TRIO_OP_ALLOC_ASIDS       IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1401)
 
-#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1402)
+#define GXIO_TRIO_OP_ALLOC_MEMORY_MAPS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1404)
 
-#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140e)
-#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x140f)
+#define GXIO_TRIO_OP_ALLOC_PIO_REGIONS IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1412)
 
-#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1417)
-#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1418)
-#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1419)
-#define GXIO_TRIO_OP_CONFIG_MSI_INTR   IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x141a)
+#define GXIO_TRIO_OP_INIT_PIO_REGION_AUX IORPC_OPCODE(IORPC_FORMAT_NONE, 0x1414)
 
-#define GXIO_TRIO_OP_SET_MPS_MRS       IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141c)
-#define GXIO_TRIO_OP_FORCE_RC_LINK_UP  IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141d)
-#define GXIO_TRIO_OP_FORCE_EP_LINK_UP  IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e)
+#define GXIO_TRIO_OP_INIT_MEMORY_MAP_MMU_AUX IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141e)
+#define GXIO_TRIO_OP_GET_PORT_PROPERTY IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x141f)
+#define GXIO_TRIO_OP_CONFIG_LEGACY_INTR IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1420)
+#define GXIO_TRIO_OP_CONFIG_MSI_INTR   IORPC_OPCODE(IORPC_FORMAT_KERNEL_INTERRUPT, 0x1421)
+
+#define GXIO_TRIO_OP_SET_MPS_MRS       IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1423)
+#define GXIO_TRIO_OP_FORCE_RC_LINK_UP  IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1424)
+#define GXIO_TRIO_OP_FORCE_EP_LINK_UP  IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x1425)
 #define GXIO_TRIO_OP_GET_MMIO_BASE     IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8000)
 #define GXIO_TRIO_OP_CHECK_MMIO_OFFSET IORPC_OPCODE(IORPC_FORMAT_NONE_NOUSER, 0x8001)
 
index 69f1c57a8d0dc895c727b74e1a53f8acd6c94a4c..33a6a2423bd25638a2b93f20fdc77bcf6e0f1737 100644 (file)
@@ -20,14 +20,6 @@ struct mm_struct;
 
 struct thread_struct {
        struct task_struct *saved_task;
-       /*
-        * This flag is set to 1 before calling do_fork (and analyzed in
-        * copy_thread) to mark that we are begin called from userspace (fork /
-        * vfork / clone), and reset to 0 after. It is left to 0 when called
-        * from kernelspace (i.e. kernel_thread() or fork_idle(),
-        * as of 2.6.11).
-        */
-       int forking;
        struct pt_regs regs;
        int singlestep_syscall;
        void *fault_addr;
@@ -58,7 +50,6 @@ struct thread_struct {
 
 #define INIT_THREAD \
 { \
-       .forking                = 0, \
        .regs                   = EMPTY_REGS,   \
        .fault_addr             = NULL, \
        .prev_sched             = NULL, \
index 40db8f71deaef4eb19c10a09e4f01d51078d3c8d..2df313b6a586c82b6e1dec743d174b0ddd190a9e 100644 (file)
@@ -7,16 +7,6 @@ DEFINE(UM_KERN_PAGE_MASK, PAGE_MASK);
 DEFINE(UM_KERN_PAGE_SHIFT, PAGE_SHIFT);
 DEFINE(UM_NSEC_PER_SEC, NSEC_PER_SEC);
 
-DEFINE_STR(UM_KERN_EMERG, KERN_EMERG);
-DEFINE_STR(UM_KERN_ALERT, KERN_ALERT);
-DEFINE_STR(UM_KERN_CRIT, KERN_CRIT);
-DEFINE_STR(UM_KERN_ERR, KERN_ERR);
-DEFINE_STR(UM_KERN_WARNING, KERN_WARNING);
-DEFINE_STR(UM_KERN_NOTICE, KERN_NOTICE);
-DEFINE_STR(UM_KERN_INFO, KERN_INFO);
-DEFINE_STR(UM_KERN_DEBUG, KERN_DEBUG);
-DEFINE_STR(UM_KERN_CONT, KERN_CONT);
-
 DEFINE(UM_ELF_CLASS, ELF_CLASS);
 DEFINE(UM_ELFCLASS32, ELFCLASS32);
 DEFINE(UM_ELFCLASS64, ELFCLASS64);
index 4fa82c055aab8d53576b06787aa1033644a16697..cef0685633369c05b6020f6071ae1a2ff094173a 100644 (file)
 extern void panic(const char *fmt, ...)
        __attribute__ ((format (printf, 1, 2)));
 
+/* Requires preincluding include/linux/kern_levels.h */
+#define UM_KERN_EMERG  KERN_EMERG
+#define UM_KERN_ALERT  KERN_ALERT
+#define UM_KERN_CRIT   KERN_CRIT
+#define UM_KERN_ERR    KERN_ERR
+#define UM_KERN_WARNING        KERN_WARNING
+#define UM_KERN_NOTICE KERN_NOTICE
+#define UM_KERN_INFO   KERN_INFO
+#define UM_KERN_DEBUG  KERN_DEBUG
+#define UM_KERN_CONT   KERN_CONT
+
 #ifdef UML_CONFIG_PRINTK
 extern int printk(const char *fmt, ...)
        __attribute__ ((format (printf, 1, 2)));
index 6cade9366364d03f2fd58e2cee85336ffdea56e5..8c82786da823df2cdf53ad9361b9b950d85018c7 100644 (file)
@@ -39,34 +39,21 @@ void flush_thread(void)
 
 void start_thread(struct pt_regs *regs, unsigned long eip, unsigned long esp)
 {
+       get_safe_registers(regs->regs.gp, regs->regs.fp);
        PT_REGS_IP(regs) = eip;
        PT_REGS_SP(regs) = esp;
-}
-EXPORT_SYMBOL(start_thread);
-
-static long execve1(const char *file,
-                   const char __user *const __user *argv,
-                   const char __user *const __user *env)
-{
-       long error;
-
-       error = do_execve(file, argv, env, &current->thread.regs);
-       if (error == 0) {
-               task_lock(current);
-               current->ptrace &= ~PT_DTRACE;
+       current->ptrace &= ~PT_DTRACE;
 #ifdef SUBARCH_EXECVE1
-               SUBARCH_EXECVE1(&current->thread.regs.regs);
+       SUBARCH_EXECVE1(regs->regs);
 #endif
-               task_unlock(current);
-       }
-       return error;
 }
+EXPORT_SYMBOL(start_thread);
 
 long um_execve(const char *file, const char __user *const __user *argv, const char __user *const __user *env)
 {
        long err;
 
-       err = execve1(file, argv, env);
+       err = do_execve(file, argv, env, &current->thread.regs);
        if (!err)
                UML_LONGJMP(current->thread.exec_buf, 1);
        return err;
@@ -81,7 +68,7 @@ long sys_execve(const char __user *file, const char __user *const __user *argv,
        filename = getname(file);
        error = PTR_ERR(filename);
        if (IS_ERR(filename)) goto out;
-       error = execve1(filename, argv, env);
+       error = do_execve(filename, argv, env, &current->thread.regs);
        putname(filename);
  out:
        return error;
index 57fc7028714a51af6fc19952d0947a913523ccec..c5f5afa5074571c2b5872bc157c9f199a80c9f5b 100644 (file)
@@ -181,11 +181,12 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
                struct pt_regs *regs)
 {
        void (*handler)(void);
+       int kthread = current->flags & PF_KTHREAD;
        int ret = 0;
 
        p->thread = (struct thread_struct) INIT_THREAD;
 
-       if (current->thread.forking) {
+       if (!kthread) {
                memcpy(&p->thread.regs.regs, &regs->regs,
                       sizeof(p->thread.regs.regs));
                PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
@@ -195,8 +196,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
                handler = fork_handler;
 
                arch_copy_thread(&current->thread.arch, &p->thread.arch);
-       }
-       else {
+       } else {
                get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
                p->thread.request.u.thread = current->thread.request.u.thread;
                handler = new_thread_handler;
@@ -204,7 +204,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
 
        new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
 
-       if (current->thread.forking) {
+       if (!kthread) {
                clear_flushed_tls(p);
 
                /*
index 7362d58efc29612c1ffaad64558b3a986b820515..cc9c2350e41741647f074cc4f43d6c4e3599123f 100644 (file)
@@ -22,9 +22,13 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr,
                         struct k_sigaction *ka, siginfo_t *info)
 {
        sigset_t *oldset = sigmask_to_save();
+       int singlestep = 0;
        unsigned long sp;
        int err;
 
+       if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
+               singlestep = 1;
+
        /* Did we come from a system call? */
        if (PT_REGS_SYSCALL_NR(regs) >= 0) {
                /* If so, check system call restarting.. */
@@ -61,7 +65,7 @@ static void handle_signal(struct pt_regs *regs, unsigned long signr,
        if (err)
                force_sigsegv(signr, current);
        else
-               signal_delivered(signr, info, ka, regs, 0);
+               signal_delivered(signr, info, ka, regs, singlestep);
 }
 
 static int kern_do_signal(struct pt_regs *regs)
index f958cb876ee3d3e47ddff71094e8026c0a110f5d..a4c6d8eee74c702999cc26955e250f40db30f553 100644 (file)
 
 long sys_fork(void)
 {
-       long ret;
-
-       current->thread.forking = 1;
-       ret = do_fork(SIGCHLD, UPT_SP(&current->thread.regs.regs),
+       return do_fork(SIGCHLD, UPT_SP(&current->thread.regs.regs),
                      &current->thread.regs, 0, NULL, NULL);
-       current->thread.forking = 0;
-       return ret;
 }
 
 long sys_vfork(void)
 {
-       long ret;
-
-       current->thread.forking = 1;
-       ret = do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
+       return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD,
                      UPT_SP(&current->thread.regs.regs),
                      &current->thread.regs, 0, NULL, NULL);
-       current->thread.forking = 0;
-       return ret;
+}
+
+long sys_clone(unsigned long clone_flags, unsigned long newsp,
+              void __user *parent_tid, void __user *child_tid)
+{
+       if (!newsp)
+               newsp = UPT_SP(&current->thread.regs.regs);
+
+       return do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
+                     child_tid);
 }
 
 long old_mmap(unsigned long addr, unsigned long len,
index d50270d26b427ace5d5f7c8777094298859a54c3..15889df9b4663771ccddc00f7e0275645ceceb0f 100644 (file)
@@ -8,7 +8,7 @@ USER_OBJS += $(filter %_user.o,$(obj-y) $(obj-m)  $(USER_SINGLE_OBJS))
 USER_OBJS := $(foreach file,$(USER_OBJS),$(obj)/$(file))
 
 $(USER_OBJS:.o=.%): \
-       c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) -include user.h $(CFLAGS_$(basetarget).o)
+       c_flags = -Wp,-MD,$(depfile) $(USER_CFLAGS) -include $(srctree)/include/linux/kern_levels.h -include user.h $(CFLAGS_$(basetarget).o)
 
 # These are like USER_OBJS but filter USER_CFLAGS through unprofile instead of
 # using it directly.
index 9926e11a772dbe9b5417fa204407c2ec9ee58878..aeaff8bef2f162642ef4ad31603a301384f4b1fe 100644 (file)
@@ -21,6 +21,7 @@ config 64BIT
 config X86_32
        def_bool !64BIT
        select HAVE_AOUT
+       select ARCH_WANT_IPC_PARSE_VERSION
 
 config X86_64
        def_bool 64BIT
index 5868526b5eefa3017b784ad1a48c45b6be7ceacf..46a9df99f3c5c952601d7f153adf6bf8c1e3e937 100644 (file)
@@ -7,9 +7,6 @@
 #define DEFINE(sym, val) \
        asm volatile("\n->" #sym " %0 " #val : : "i" (val))
 
-#define STR(x) #x
-#define DEFINE_STR(sym, val) asm volatile("\n->" #sym " " STR(val) " " #val: : )
-
 #define BLANK() asm volatile("\n->" : : )
 
 #define OFFSET(sym, str, mem) \
index bd9a89b67e41f20bdd17b2f38f5c9543ef28c7b1..ca255a805ed936ea059312b0990e884bb2b07e1b 100644 (file)
@@ -1,3 +1,5 @@
+extern long sys_clone(unsigned long clone_flags, unsigned long newsp,
+              void __user *parent_tid, void __user *child_tid);
 #ifdef __i386__
 #include "syscalls_32.h"
 #else
index a508cea135033eba3c1c7facba8aebd04a1a0983..ba7363ecf896598c21534af5d09ef1c32e275668 100644 (file)
@@ -416,9 +416,6 @@ int setup_signal_stack_sc(unsigned long stack_top, int sig,
        PT_REGS_AX(regs) = (unsigned long) sig;
        PT_REGS_DX(regs) = (unsigned long) 0;
        PT_REGS_CX(regs) = (unsigned long) 0;
-
-       if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
-               ptrace_notify(SIGTRAP);
        return 0;
 }
 
@@ -466,9 +463,6 @@ int setup_signal_stack_si(unsigned long stack_top, int sig,
        PT_REGS_AX(regs) = (unsigned long) sig;
        PT_REGS_DX(regs) = (unsigned long) &frame->info;
        PT_REGS_CX(regs) = (unsigned long) &frame->uc;
-
-       if ((current->ptrace & PT_DTRACE) && (current->ptrace & PT_PTRACED))
-               ptrace_notify(SIGTRAP);
        return 0;
 }
 
index 68d1dc91b37badeaeff9fe517a06df2be6484f8a..b5408cecac6cd68f5f649b0a45749c1fdab40d77 100644 (file)
@@ -28,7 +28,7 @@
 #define ptregs_execve sys_execve
 #define ptregs_iopl sys_iopl
 #define ptregs_vm86old sys_vm86old
-#define ptregs_clone sys_clone
+#define ptregs_clone i386_clone
 #define ptregs_vm86 sys_vm86
 #define ptregs_sigaltstack sys_sigaltstack
 #define ptregs_vfork sys_vfork
index b853e8600b9dc1d00e74151a7e16b8af61a8bfda..db444c7218fe53bcabf105b5e2303b7d21a5fd15 100644 (file)
@@ -3,37 +3,24 @@
  * Licensed under the GPL
  */
 
-#include "linux/sched.h"
-#include "linux/shm.h"
-#include "linux/ipc.h"
-#include "linux/syscalls.h"
-#include "asm/mman.h"
-#include "asm/uaccess.h"
-#include "asm/unistd.h"
+#include <linux/syscalls.h>
+#include <sysdep/syscalls.h>
 
 /*
  * The prototype on i386 is:
  *
- *     int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls, int * child_tidptr)
+ *     int clone(int flags, void * child_stack, int * parent_tidptr, struct user_desc * newtls
  *
  * and the "newtls" arg. on i386 is read by copy_thread directly from the
  * register saved on the stack.
  */
-long sys_clone(unsigned long clone_flags, unsigned long newsp,
-              int __user *parent_tid, void *newtls, int __user *child_tid)
+long i386_clone(unsigned long clone_flags, unsigned long newsp,
+               int __user *parent_tid, void *newtls, int __user *child_tid)
 {
-       long ret;
-
-       if (!newsp)
-               newsp = UPT_SP(&current->thread.regs.regs);
-
-       current->thread.forking = 1;
-       ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
-                     child_tid);
-       current->thread.forking = 0;
-       return ret;
+       return sys_clone(clone_flags, newsp, parent_tid, child_tid);
 }
 
+
 long sys_sigaction(int sig, const struct old_sigaction __user *act,
                         struct old_sigaction __user *oact)
 {
index f3d82bb6e15a410b5e706617526efe9d56a42243..adb08eb5c22a38710299d3e1a05b3b55e2ee7e63 100644 (file)
@@ -5,12 +5,9 @@
  * Licensed under the GPL
  */
 
-#include "linux/linkage.h"
-#include "linux/personality.h"
-#include "linux/utsname.h"
-#include "asm/prctl.h" /* XXX This should get the constants from libc */
-#include "asm/uaccess.h"
-#include "os.h"
+#include <linux/sched.h>
+#include <asm/prctl.h> /* XXX This should get the constants from libc */
+#include <os.h>
 
 long arch_prctl(struct task_struct *task, int code, unsigned long __user *addr)
 {
@@ -79,20 +76,6 @@ long sys_arch_prctl(int code, unsigned long addr)
        return arch_prctl(current, code, (unsigned long __user *) addr);
 }
 
-long sys_clone(unsigned long clone_flags, unsigned long newsp,
-              void __user *parent_tid, void __user *child_tid)
-{
-       long ret;
-
-       if (!newsp)
-               newsp = UPT_SP(&current->thread.regs.regs);
-       current->thread.forking = 1;
-       ret = do_fork(clone_flags, newsp, &current->thread.regs, 0, parent_tid,
-                     child_tid);
-       current->thread.forking = 0;
-       return ret;
-}
-
 void arch_switch_to(struct task_struct *to)
 {
        if ((to->thread.arch.fs == 0) || (to->mm == NULL))
index d11ca11d14fc094379e989a6b06fe2e5b2bc72e6..e2d62d697b5dffc60ad6ea53e2ad3f1ce33282a3 100644 (file)
@@ -17,6 +17,7 @@
 #include <asm/e820.h>
 #include <asm/setup.h>
 #include <asm/acpi.h>
+#include <asm/numa.h>
 #include <asm/xen/hypervisor.h>
 #include <asm/xen/hypercall.h>
 
@@ -544,4 +545,7 @@ void __init xen_arch_setup(void)
        disable_cpufreq();
        WARN_ON(set_pm_idle_to_default());
        fiddle_vdso();
+#ifdef CONFIG_NUMA
+       numa_off = 1;
+#endif
 }
index 38a2d0631882ccd192ee475fc3b0f7a2e3b9d0cb..ad16c68c86451bd80dc18bbb7673e14d4611d947 100644 (file)
@@ -79,6 +79,7 @@ struct nvme_dev {
        char serial[20];
        char model[40];
        char firmware_rev[8];
+       u32 max_hw_sectors;
 };
 
 /*
@@ -835,15 +836,15 @@ static int nvme_identify(struct nvme_dev *dev, unsigned nsid, unsigned cns,
 }
 
 static int nvme_get_features(struct nvme_dev *dev, unsigned fid,
-                               unsigned dword11, dma_addr_t dma_addr)
+                               unsigned nsid, dma_addr_t dma_addr)
 {
        struct nvme_command c;
 
        memset(&c, 0, sizeof(c));
        c.features.opcode = nvme_admin_get_features;
+       c.features.nsid = cpu_to_le32(nsid);
        c.features.prp1 = cpu_to_le64(dma_addr);
        c.features.fid = cpu_to_le32(fid);
-       c.features.dword11 = cpu_to_le32(dword11);
 
        return nvme_submit_admin_cmd(dev, &c, NULL);
 }
@@ -862,11 +863,51 @@ static int nvme_set_features(struct nvme_dev *dev, unsigned fid,
        return nvme_submit_admin_cmd(dev, &c, result);
 }
 
+/**
+ * nvme_cancel_ios - Cancel outstanding I/Os
+ * @queue: The queue to cancel I/Os on
+ * @timeout: True to only cancel I/Os which have timed out
+ */
+static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
+{
+       int depth = nvmeq->q_depth - 1;
+       struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+       unsigned long now = jiffies;
+       int cmdid;
+
+       for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
+               void *ctx;
+               nvme_completion_fn fn;
+               static struct nvme_completion cqe = {
+                       .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1,
+               };
+
+               if (timeout && !time_after(now, info[cmdid].timeout))
+                       continue;
+               dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
+               ctx = cancel_cmdid(nvmeq, cmdid, &fn);
+               fn(nvmeq->dev, ctx, &cqe);
+       }
+}
+
+static void nvme_free_queue_mem(struct nvme_queue *nvmeq)
+{
+       dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
+                               (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
+       dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
+                                       nvmeq->sq_cmds, nvmeq->sq_dma_addr);
+       kfree(nvmeq);
+}
+
 static void nvme_free_queue(struct nvme_dev *dev, int qid)
 {
        struct nvme_queue *nvmeq = dev->queues[qid];
        int vector = dev->entry[nvmeq->cq_vector].vector;
 
+       spin_lock_irq(&nvmeq->q_lock);
+       nvme_cancel_ios(nvmeq, false);
+       spin_unlock_irq(&nvmeq->q_lock);
+
        irq_set_affinity_hint(vector, NULL);
        free_irq(vector, nvmeq);
 
@@ -876,18 +917,15 @@ static void nvme_free_queue(struct nvme_dev *dev, int qid)
                adapter_delete_cq(dev, qid);
        }
 
-       dma_free_coherent(nvmeq->q_dmadev, CQ_SIZE(nvmeq->q_depth),
-                               (void *)nvmeq->cqes, nvmeq->cq_dma_addr);
-       dma_free_coherent(nvmeq->q_dmadev, SQ_SIZE(nvmeq->q_depth),
-                                       nvmeq->sq_cmds, nvmeq->sq_dma_addr);
-       kfree(nvmeq);
+       nvme_free_queue_mem(nvmeq);
 }
 
 static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
                                                        int depth, int vector)
 {
        struct device *dmadev = &dev->pci_dev->dev;
-       unsigned extra = (depth / 8) + (depth * sizeof(struct nvme_cmd_info));
+       unsigned extra = DIV_ROUND_UP(depth, 8) + (depth *
+                                               sizeof(struct nvme_cmd_info));
        struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq) + extra, GFP_KERNEL);
        if (!nvmeq)
                return NULL;
@@ -975,7 +1013,7 @@ static __devinit struct nvme_queue *nvme_create_queue(struct nvme_dev *dev,
 
 static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
 {
-       int result;
+       int result = 0;
        u32 aqa;
        u64 cap;
        unsigned long timeout;
@@ -1005,17 +1043,22 @@ static int __devinit nvme_configure_admin_queue(struct nvme_dev *dev)
        timeout = ((NVME_CAP_TIMEOUT(cap) + 1) * HZ / 2) + jiffies;
        dev->db_stride = NVME_CAP_STRIDE(cap);
 
-       while (!(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
+       while (!result && !(readl(&dev->bar->csts) & NVME_CSTS_RDY)) {
                msleep(100);
                if (fatal_signal_pending(current))
-                       return -EINTR;
+                       result = -EINTR;
                if (time_after(jiffies, timeout)) {
                        dev_err(&dev->pci_dev->dev,
                                "Device not ready; aborting initialisation\n");
-                       return -ENODEV;
+                       result = -ENODEV;
                }
        }
 
+       if (result) {
+               nvme_free_queue_mem(nvmeq);
+               return result;
+       }
+
        result = queue_request_irq(dev, nvmeq, "nvme admin");
        dev->queues[0] = nvmeq;
        return result;
@@ -1037,6 +1080,8 @@ static struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
        offset = offset_in_page(addr);
        count = DIV_ROUND_UP(offset + length, PAGE_SIZE);
        pages = kcalloc(count, sizeof(*pages), GFP_KERNEL);
+       if (!pages)
+               return ERR_PTR(-ENOMEM);
 
        err = get_user_pages_fast(addr, count, 1, pages);
        if (err < count) {
@@ -1146,14 +1191,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        return status;
 }
 
-static int nvme_user_admin_cmd(struct nvme_ns *ns,
+static int nvme_user_admin_cmd(struct nvme_dev *dev,
                                        struct nvme_admin_cmd __user *ucmd)
 {
-       struct nvme_dev *dev = ns->dev;
        struct nvme_admin_cmd cmd;
        struct nvme_command c;
        int status, length;
-       struct nvme_iod *iod;
+       struct nvme_iod *uninitialized_var(iod);
 
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
@@ -1204,7 +1248,7 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
        case NVME_IOCTL_ID:
                return ns->ns_id;
        case NVME_IOCTL_ADMIN_CMD:
-               return nvme_user_admin_cmd(ns, (void __user *)arg);
+               return nvme_user_admin_cmd(ns->dev, (void __user *)arg);
        case NVME_IOCTL_SUBMIT_IO:
                return nvme_submit_io(ns, (void __user *)arg);
        default:
@@ -1218,26 +1262,6 @@ static const struct block_device_operations nvme_fops = {
        .compat_ioctl   = nvme_ioctl,
 };
 
-static void nvme_timeout_ios(struct nvme_queue *nvmeq)
-{
-       int depth = nvmeq->q_depth - 1;
-       struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
-       unsigned long now = jiffies;
-       int cmdid;
-
-       for_each_set_bit(cmdid, nvmeq->cmdid_data, depth) {
-               void *ctx;
-               nvme_completion_fn fn;
-               static struct nvme_completion cqe = { .status = cpu_to_le16(NVME_SC_ABORT_REQ) << 1, };
-
-               if (!time_after(now, info[cmdid].timeout))
-                       continue;
-               dev_warn(nvmeq->q_dmadev, "Timing out I/O %d\n", cmdid);
-               ctx = cancel_cmdid(nvmeq, cmdid, &fn);
-               fn(nvmeq->dev, ctx, &cqe);
-       }
-}
-
 static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
 {
        while (bio_list_peek(&nvmeq->sq_cong)) {
@@ -1269,7 +1293,7 @@ static int nvme_kthread(void *data)
                                spin_lock_irq(&nvmeq->q_lock);
                                if (nvme_process_cq(nvmeq))
                                        printk("process_cq did something\n");
-                               nvme_timeout_ios(nvmeq);
+                               nvme_cancel_ios(nvmeq, true);
                                nvme_resubmit_bios(nvmeq);
                                spin_unlock_irq(&nvmeq->q_lock);
                        }
@@ -1339,6 +1363,9 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, int nsid,
        ns->disk = disk;
        lbaf = id->flbas & 0xf;
        ns->lba_shift = id->lbaf[lbaf].ds;
+       blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift);
+       if (dev->max_hw_sectors)
+               blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
 
        disk->major = nvme_major;
        disk->minors = NVME_MINORS;
@@ -1383,7 +1410,7 @@ static int set_queue_count(struct nvme_dev *dev, int count)
 
 static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
 {
-       int result, cpu, i, nr_io_queues, db_bar_size;
+       int result, cpu, i, nr_io_queues, db_bar_size, q_depth;
 
        nr_io_queues = num_online_cpus();
        result = set_queue_count(dev, nr_io_queues);
@@ -1429,9 +1456,10 @@ static int __devinit nvme_setup_io_queues(struct nvme_dev *dev)
                cpu = cpumask_next(cpu, cpu_online_mask);
        }
 
+       q_depth = min_t(int, NVME_CAP_MQES(readq(&dev->bar->cap)) + 1,
+                                                               NVME_Q_DEPTH);
        for (i = 0; i < nr_io_queues; i++) {
-               dev->queues[i + 1] = nvme_create_queue(dev, i + 1,
-                                                       NVME_Q_DEPTH, i);
+               dev->queues[i + 1] = nvme_create_queue(dev, i + 1, q_depth, i);
                if (IS_ERR(dev->queues[i + 1]))
                        return PTR_ERR(dev->queues[i + 1]);
                dev->queue_count++;
@@ -1480,6 +1508,10 @@ static int __devinit nvme_dev_add(struct nvme_dev *dev)
        memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
        memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
        memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
+       if (ctrl->mdts) {
+               int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
+               dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
+       }
 
        id_ns = mem;
        for (i = 1; i <= nn; i++) {
@@ -1523,8 +1555,6 @@ static int nvme_dev_remove(struct nvme_dev *dev)
        list_del(&dev->node);
        spin_unlock(&dev_list_lock);
 
-       /* TODO: wait all I/O finished or cancel them */
-
        list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
                list_del(&ns->list);
                del_gendisk(ns->disk);
@@ -1560,15 +1590,33 @@ static void nvme_release_prp_pools(struct nvme_dev *dev)
        dma_pool_destroy(dev->prp_small_pool);
 }
 
-/* XXX: Use an ida or something to let remove / add work correctly */
-static void nvme_set_instance(struct nvme_dev *dev)
+static DEFINE_IDA(nvme_instance_ida);
+
+static int nvme_set_instance(struct nvme_dev *dev)
 {
-       static int instance;
-       dev->instance = instance++;
+       int instance, error;
+
+       do {
+               if (!ida_pre_get(&nvme_instance_ida, GFP_KERNEL))
+                       return -ENODEV;
+
+               spin_lock(&dev_list_lock);
+               error = ida_get_new(&nvme_instance_ida, &instance);
+               spin_unlock(&dev_list_lock);
+       } while (error == -EAGAIN);
+
+       if (error)
+               return -ENODEV;
+
+       dev->instance = instance;
+       return 0;
 }
 
 static void nvme_release_instance(struct nvme_dev *dev)
 {
+       spin_lock(&dev_list_lock);
+       ida_remove(&nvme_instance_ida, dev->instance);
+       spin_unlock(&dev_list_lock);
 }
 
 static int __devinit nvme_probe(struct pci_dev *pdev,
@@ -1601,7 +1649,10 @@ static int __devinit nvme_probe(struct pci_dev *pdev,
        pci_set_drvdata(pdev, dev);
        dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
        dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
-       nvme_set_instance(dev);
+       result = nvme_set_instance(dev);
+       if (result)
+               goto disable;
+
        dev->entry[0].vector = pdev->irq;
 
        result = nvme_setup_prp_pools(dev);
@@ -1704,15 +1755,17 @@ static struct pci_driver nvme_driver = {
 
 static int __init nvme_init(void)
 {
-       int result = -EBUSY;
+       int result;
 
        nvme_thread = kthread_run(nvme_kthread, NULL, "nvme");
        if (IS_ERR(nvme_thread))
                return PTR_ERR(nvme_thread);
 
-       nvme_major = register_blkdev(nvme_major, "nvme");
-       if (nvme_major <= 0)
+       result = register_blkdev(nvme_major, "nvme");
+       if (result < 0)
                goto kill_kthread;
+       else if (result > 0)
+               nvme_major = result;
 
        result = pci_register_driver(&nvme_driver);
        if (result)
index 9917943a3572ef577ac7a511ef375d11dae08350..54a55f03115df1df8dddb45e354eb8246336e56b 100644 (file)
@@ -246,13 +246,12 @@ static int rbd_open(struct block_device *bdev, fmode_t mode)
 {
        struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
 
-       rbd_get_dev(rbd_dev);
-
-       set_device_ro(bdev, rbd_dev->read_only);
-
        if ((mode & FMODE_WRITE) && rbd_dev->read_only)
                return -EROFS;
 
+       rbd_get_dev(rbd_dev);
+       set_device_ro(bdev, rbd_dev->read_only);
+
        return 0;
 }
 
index 47180a08edad28c7c95fea0cd51e9b7b15172f27..b6653a6fc5d56af68e55b0f2ea10855a1927ee2f 100644 (file)
@@ -391,7 +391,7 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
                for (j = 0; j < nr_channels; j++) {
                        struct dimm_info *dimm = csrow->channels[j]->dimm;
 
-                       dimm->nr_pages = nr_pages / nr_channels;
+                       dimm->nr_pages = nr_pages;
                        dimm->grain = nr_pages << PAGE_SHIFT;
                        dimm->mtype = MEM_DDR2;
                        dimm->dtype = DEV_UNKNOWN;
index 39c63757c2a14fd7bc988c23d7ee6be2e84d0a06..6a49dd00b81b8fce1181a0e0d436bb8e4ce211d5 100644 (file)
@@ -1012,6 +1012,10 @@ static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
                        /* add the number of COLUMN bits */
                        addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
 
+                       /* Dual-rank memories have twice the size */
+                       if (dinfo->dual_rank)
+                               addrBits++;
+
                        addrBits += 6;  /* add 64 bits per DIMM */
                        addrBits -= 20; /* divide by 2^^20 */
                        addrBits -= 3;  /* 8 bits per bytes */
index f3b1f9fafa4b20b4f40917bcbab827ad760515df..5715b7c2c5177a6c76ed732fb6f20de9b7578220 100644 (file)
@@ -513,7 +513,8 @@ static int get_dimm_config(struct mem_ctl_info *mci)
 {
        struct sbridge_pvt *pvt = mci->pvt_info;
        struct dimm_info *dimm;
-       int i, j, banks, ranks, rows, cols, size, npages;
+       unsigned i, j, banks, ranks, rows, cols, npages;
+       u64 size;
        u32 reg;
        enum edac_type mode;
        enum mem_type mtype;
@@ -585,10 +586,10 @@ static int get_dimm_config(struct mem_ctl_info *mci)
                                cols = numcol(mtr);
 
                                /* DDR3 has 8 I/O banks */
-                               size = (rows * cols * banks * ranks) >> (20 - 3);
+                               size = ((u64)rows * cols * banks * ranks) >> (20 - 3);
                                npages = MiB_TO_PAGES(size);
 
-                               edac_dbg(0, "mc#%d: channel %d, dimm %d, %d Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
+                               edac_dbg(0, "mc#%d: channel %d, dimm %d, %Ld Mb (%d pages) bank: %d, rank: %d, row: %#x, col: %#x\n",
                                         pvt->sbridge_dev->mc, i, j,
                                         size, npages,
                                         banks, ranks, rows, cols);
index 8a420f13905e814bc9e76672941d64fd31ab57f2..ed94b4ea72e9324cdc70a1ad4551f0e85fca89d0 100644 (file)
@@ -308,6 +308,7 @@ static int lpc32xx_gpio_dir_output_p012(struct gpio_chip *chip, unsigned pin,
 {
        struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
 
+       __set_gpio_level_p012(group, pin, value);
        __set_gpio_dir_p012(group, pin, 0);
 
        return 0;
@@ -318,6 +319,7 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
 {
        struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
 
+       __set_gpio_level_p3(group, pin, value);
        __set_gpio_dir_p3(group, pin, 0);
 
        return 0;
@@ -326,6 +328,9 @@ static int lpc32xx_gpio_dir_output_p3(struct gpio_chip *chip, unsigned pin,
 static int lpc32xx_gpio_dir_out_always(struct gpio_chip *chip, unsigned pin,
        int value)
 {
+       struct lpc32xx_gpio_chip *group = to_lpc32xx_gpio(chip);
+
+       __set_gpo_level_p3(group, pin, value);
        return 0;
 }
 
index ff23d88880e50ab727b6836c4fc7ad6d8860a222..3ca240b4413d48eb9ddf0bdf479db97345397965 100644 (file)
@@ -179,7 +179,7 @@ nouveau_abi16_ioctl_grobj_alloc(ABI16_IOCTL_ARGS)
                        return 0;
        } else
        if (init->class == 0x906e) {
-               NV_ERROR(dev, "906e not supported yet\n");
+               NV_DEBUG(dev, "906e not supported yet\n");
                return -EINVAL;
        }
 
index f704e942372e75b291cc505536a197c4c464a537..f376c39310dfb11e00a944577aba9209e4481451 100644 (file)
@@ -124,6 +124,7 @@ nvc0_fb_init(struct drm_device *dev)
        priv = dev_priv->engine.fb.priv;
 
        nv_wr32(dev, 0x100c10, priv->r100c10 >> 8);
+       nv_mask(dev, 0x17e820, 0x00100000, 0x00000000); /* NV_PLTCG_INTR_EN */
        return 0;
 }
 
index 7d85553d518c42ee5826c0cf516086cf01ce1534..cd39eb99f5b15b4aef3f9fa4d47967b7bf3f55f6 100644 (file)
@@ -373,7 +373,8 @@ nvc0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
 static void
 nvc0_fifo_isr(struct drm_device *dev)
 {
-       u32 stat = nv_rd32(dev, 0x002100);
+       u32 mask = nv_rd32(dev, 0x002140);
+       u32 stat = nv_rd32(dev, 0x002100) & mask;
 
        if (stat & 0x00000100) {
                NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
index e98d144e6eb9df766fc458e03bdd35acef349ce4..281bece751b61eb091d2b6da73144dc9d2ddb5c5 100644 (file)
@@ -345,7 +345,8 @@ nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
 static void
 nve0_fifo_isr(struct drm_device *dev)
 {
-       u32 stat = nv_rd32(dev, 0x002100);
+       u32 mask = nv_rd32(dev, 0x002140);
+       u32 stat = nv_rd32(dev, 0x002100) & mask;
 
        if (stat & 0x00000100) {
                NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
index ba055e9ca0077875a3a49d3533d729a95ccf98fd..8d9dc44f1f94f56c32234fdb527576f7817433ad 100644 (file)
@@ -69,6 +69,13 @@ static int udl_get_modes(struct drm_connector *connector)
 static int udl_mode_valid(struct drm_connector *connector,
                          struct drm_display_mode *mode)
 {
+       struct udl_device *udl = connector->dev->dev_private;
+       if (!udl->sku_pixel_limit)
+               return 0;
+
+       if (mode->vdisplay * mode->hdisplay > udl->sku_pixel_limit)
+               return MODE_VIRTUAL_Y;
+
        return 0;
 }
 
index f2fb8f15e2f127b350e33c210d2b91d44ed09134..7e0743358dffdf5176d2aa48a393beb21803c6cd 100644 (file)
@@ -1018,7 +1018,7 @@ int vmw_event_fence_action_create(struct drm_file *file_priv,
        }
 
 
-       event = kzalloc(sizeof(event->event), GFP_KERNEL);
+       event = kzalloc(sizeof(*event), GFP_KERNEL);
        if (unlikely(event == NULL)) {
                DRM_ERROR("Failed to allocate an event.\n");
                ret = -ENOMEM;
index b4e525908e6613485a680cfe5b43aa391df0ca46..55074cba20eba9012d6a310acef62b965728bcea 100644 (file)
@@ -278,7 +278,7 @@ static void swap_pci_ref(struct pci_dev **from, struct pci_dev *to)
 
 static int iommu_init_device(struct device *dev)
 {
-       struct pci_dev *dma_pdev, *pdev = to_pci_dev(dev);
+       struct pci_dev *dma_pdev = NULL, *pdev = to_pci_dev(dev);
        struct iommu_dev_data *dev_data;
        struct iommu_group *group;
        u16 alias;
@@ -305,7 +305,9 @@ static int iommu_init_device(struct device *dev)
                dev_data->alias_data = alias_data;
 
                dma_pdev = pci_get_bus_and_slot(alias >> 8, alias & 0xff);
-       } else
+       }
+
+       if (dma_pdev == NULL)
                dma_pdev = pci_dev_get(pdev);
 
        /* Account for quirked devices */
@@ -696,7 +698,7 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
 
                /*
                 * Release iommu->lock because ppr-handling might need to
-                * re-aquire it
+                * re-acquire it
                 */
                spin_unlock_irqrestore(&iommu->lock, flags);
 
@@ -814,7 +816,7 @@ static void build_inv_iommu_pages(struct iommu_cmd *cmd, u64 address,
        CMD_SET_TYPE(cmd, CMD_INV_IOMMU_PAGES);
        if (s) /* size bit - we flush more than one 4kb page */
                cmd->data[2] |= CMD_INV_IOMMU_PAGES_SIZE_MASK;
-       if (pde) /* PDE bit - we wan't flush everything not only the PTEs */
+       if (pde) /* PDE bit - we want to flush everything, not only the PTEs */
                cmd->data[2] |= CMD_INV_IOMMU_PAGES_PDE_MASK;
 }
 
@@ -2192,7 +2194,7 @@ static bool pci_pri_tlp_required(struct pci_dev *pdev)
 }
 
 /*
- * If a device is not yet associated with a domain, this function does
+ * If a device is not yet associated with a domain, this function
  * assigns it visible for the hardware
  */
 static int attach_device(struct device *dev,
@@ -2442,7 +2444,7 @@ static struct protection_domain *get_domain(struct device *dev)
        if (domain != NULL)
                return domain;
 
-       /* Device not bount yet - bind it */
+       /* Device not bound yet - bind it */
        dma_dom = find_protection_domain(devid);
        if (!dma_dom)
                dma_dom = amd_iommu_rlookup_table[devid]->default_dom;
@@ -2981,7 +2983,7 @@ static void __init prealloc_protection_domains(void)
                        alloc_passthrough_domain();
                        dev_data->passthrough = true;
                        attach_device(&dev->dev, pt_domain);
-                       pr_info("AMD-Vi: Using passthough domain for device %s\n",
+                       pr_info("AMD-Vi: Using passthrough domain for device %s\n",
                                dev_name(&dev->dev));
                }
 
index 3d6e7c5471f2a6517b71223bae70d3e20b2badf7..18b0d99bd4d6686b3727f83896769d0a116dcf1f 100644 (file)
@@ -26,7 +26,6 @@
 #include <linux/msi.h>
 #include <linux/amd-iommu.h>
 #include <linux/export.h>
-#include <linux/acpi.h>
 #include <acpi/acpi.h>
 #include <asm/pci-direct.h>
 #include <asm/iommu.h>
@@ -192,7 +191,7 @@ struct amd_iommu **amd_iommu_rlookup_table;
 struct irq_remap_table **irq_lookup_table;
 
 /*
- * AMD IOMMU allows up to 2^16 differend protection domains. This is a bitmap
+ * AMD IOMMU allows up to 2^16 different protection domains. This is a bitmap
  * to know which ones are already in use.
  */
 unsigned long *amd_iommu_pd_alloc_bitmap;
@@ -492,7 +491,7 @@ static int __init find_last_devid_acpi(struct acpi_table_header *table)
 
 /****************************************************************************
  *
- * The following functions belong the the code path which parses the ACPI table
+ * The following functions belong to the code path which parses the ACPI table
  * the second time. In this ACPI parsing iteration we allocate IOMMU specific
  * data structures, initialize the device/alias/rlookup table and also
  * basically initialize the hardware.
@@ -730,7 +729,7 @@ static int add_special_device(u8 type, u8 id, u16 devid)
 }
 
 /*
- * Reads the device exclusion range from ACPI and initialize IOMMU with
+ * Reads the device exclusion range from ACPI and initializes the IOMMU with
  * it
  */
 static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m)
@@ -1196,8 +1195,8 @@ static void print_iommu_info(void)
                                if (iommu_feature(iommu, (1ULL << i)))
                                        pr_cont(" %s", feat_str[i]);
                        }
-               }
                pr_cont("\n");
+               }
        }
        if (irq_remapping_enabled)
                pr_info("AMD-Vi: Interrupt remapping enabled\n");
@@ -1224,7 +1223,7 @@ static int __init amd_iommu_init_pci(void)
 /****************************************************************************
  *
  * The following functions initialize the MSI interrupts for all IOMMUs
- * in the system. Its a bit challenging because there could be multiple
+ * in the system. It's a bit challenging because there could be multiple
  * IOMMUs per PCI BDF but we can call pci_enable_msi(x) only once per
  * pci_dev.
  *
@@ -1282,7 +1281,7 @@ enable_faults:
  *
  * The next functions belong to the third pass of parsing the ACPI
  * table. In this last pass the memory mapping requirements are
- * gathered (like exclusion and unity mapping reanges).
+ * gathered (like exclusion and unity mapping ranges).
  *
  ****************************************************************************/
 
index 90afccb1237bea7ba750e8184ead66e73568a540..c9aa3d079ff0377c376012bb5130b62e6bcefb8e 100644 (file)
 #define PAGE_SIZE_ALIGN(address, pagesize) \
                ((address) & ~((pagesize) - 1))
 /*
- * Creates an IOMMU PTE for an address an a given pagesize
+ * Creates an IOMMU PTE for an address and a given pagesize
  * The PTE has no permission bits set
  * Pagesize is expected to be a power-of-two larger than 4096
  */
@@ -425,7 +425,7 @@ struct iommu_dev_data {
        struct list_head dev_data_list;   /* For global dev_data_list */
        struct iommu_dev_data *alias_data;/* The alias dev_data */
        struct protection_domain *domain; /* Domain the device is bound to */
-       atomic_t bind;                    /* Domain attach reverent count */
+       atomic_t bind;                    /* Domain attach reference count */
        u16 devid;                        /* PCI Device ID */
        bool iommu_v2;                    /* Device can make use of IOMMUv2 */
        bool passthrough;                 /* Default for device is pt_domain */
index 80bad32aa46394443ad5a0aaa4a241c4e0e4b2c0..7fe44f83cc371043788885d3fb3bfdc7f6b091ad 100644 (file)
@@ -840,8 +840,7 @@ static void exynos_iommu_detach_device(struct iommu_domain *domain,
        if (__exynos_sysmmu_disable(data)) {
                dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
                                        __func__, __pa(priv->pgtable));
-               list_del(&data->node);
-               INIT_LIST_HEAD(&data->node);
+               list_del_init(&data->node);
 
        } else {
                dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
index 2297ec193eb4b645812c85519c335361e935f296..cb9e1146b02fe3f897c4ac93da5a084190c0f415 100644 (file)
@@ -589,7 +589,9 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
 {
        int i;
 
-       domain->iommu_coherency = 1;
+       i = find_first_bit(domain->iommu_bmp, g_num_of_iommus);
+
+       domain->iommu_coherency = i < g_num_of_iommus ? 1 : 0;
 
        for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) {
                if (!ecap_coherent(g_iommus[i]->ecap)) {
index 2a4bb36bc6888a9c91aee304df7c19bad1e42f83..0b4d62e0c64573cea96dc38f237436a036aa74cf 100644 (file)
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_iommu.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
 
 #include <asm/page.h>
 #include <asm/cacheflush.h>
 
 #include <mach/iomap.h>
-#include <mach/smmu.h>
 #include <mach/tegra-ahb.h>
 
+enum smmu_hwgrp {
+       HWGRP_AFI,
+       HWGRP_AVPC,
+       HWGRP_DC,
+       HWGRP_DCB,
+       HWGRP_EPP,
+       HWGRP_G2,
+       HWGRP_HC,
+       HWGRP_HDA,
+       HWGRP_ISP,
+       HWGRP_MPE,
+       HWGRP_NV,
+       HWGRP_NV2,
+       HWGRP_PPCS,
+       HWGRP_SATA,
+       HWGRP_VDE,
+       HWGRP_VI,
+
+       HWGRP_COUNT,
+
+       HWGRP_END = ~0,
+};
+
+#define HWG_AFI                (1 << HWGRP_AFI)
+#define HWG_AVPC       (1 << HWGRP_AVPC)
+#define HWG_DC         (1 << HWGRP_DC)
+#define HWG_DCB                (1 << HWGRP_DCB)
+#define HWG_EPP                (1 << HWGRP_EPP)
+#define HWG_G2         (1 << HWGRP_G2)
+#define HWG_HC         (1 << HWGRP_HC)
+#define HWG_HDA                (1 << HWGRP_HDA)
+#define HWG_ISP                (1 << HWGRP_ISP)
+#define HWG_MPE                (1 << HWGRP_MPE)
+#define HWG_NV         (1 << HWGRP_NV)
+#define HWG_NV2                (1 << HWGRP_NV2)
+#define HWG_PPCS       (1 << HWGRP_PPCS)
+#define HWG_SATA       (1 << HWGRP_SATA)
+#define HWG_VDE                (1 << HWGRP_VDE)
+#define HWG_VI         (1 << HWGRP_VI)
+
 /* bitmap of the page sizes currently supported */
 #define SMMU_IOMMU_PGSIZES     (SZ_4K)
 
 #define SMMU_CONFIG_DISABLE                    0
 #define SMMU_CONFIG_ENABLE                     1
 
-#define SMMU_TLB_CONFIG                                0x14
-#define SMMU_TLB_CONFIG_STATS__MASK            (1 << 31)
-#define SMMU_TLB_CONFIG_STATS__ENABLE          (1 << 31)
+/* REVISIT: To support multiple MCs */
+enum {
+       _MC = 0,
+};
+
+enum {
+       _TLB = 0,
+       _PTC,
+};
+
+#define SMMU_CACHE_CONFIG_BASE                 0x14
+#define __SMMU_CACHE_CONFIG(mc, cache)         (SMMU_CACHE_CONFIG_BASE + 4 * cache)
+#define SMMU_CACHE_CONFIG(cache)               __SMMU_CACHE_CONFIG(_MC, cache)
+
+#define SMMU_CACHE_CONFIG_STATS_SHIFT          31
+#define SMMU_CACHE_CONFIG_STATS_ENABLE         (1 << SMMU_CACHE_CONFIG_STATS_SHIFT)
+#define SMMU_CACHE_CONFIG_STATS_TEST_SHIFT     30
+#define SMMU_CACHE_CONFIG_STATS_TEST           (1 << SMMU_CACHE_CONFIG_STATS_TEST_SHIFT)
+
 #define SMMU_TLB_CONFIG_HIT_UNDER_MISS__ENABLE (1 << 29)
 #define SMMU_TLB_CONFIG_ACTIVE_LINES__VALUE    0x10
 #define SMMU_TLB_CONFIG_RESET_VAL              0x20000010
 
-#define SMMU_PTC_CONFIG                                0x18
-#define SMMU_PTC_CONFIG_STATS__MASK            (1 << 31)
-#define SMMU_PTC_CONFIG_STATS__ENABLE          (1 << 31)
 #define SMMU_PTC_CONFIG_CACHE__ENABLE          (1 << 29)
 #define SMMU_PTC_CONFIG_INDEX_MAP__PATTERN     0x3f
 #define SMMU_PTC_CONFIG_RESET_VAL              0x2000003f
 
 #define SMMU_ASID_SECURITY                     0x38
 
-#define SMMU_STATS_TLB_HIT_COUNT               0x1f0
-#define SMMU_STATS_TLB_MISS_COUNT              0x1f4
-#define SMMU_STATS_PTC_HIT_COUNT               0x1f8
-#define SMMU_STATS_PTC_MISS_COUNT              0x1fc
+#define SMMU_STATS_CACHE_COUNT_BASE            0x1f0
+
+#define SMMU_STATS_CACHE_COUNT(mc, cache, hitmiss)             \
+       (SMMU_STATS_CACHE_COUNT_BASE + 8 * cache + 4 * hitmiss)
 
 #define SMMU_TRANSLATION_ENABLE_0              0x228
 #define SMMU_TRANSLATION_ENABLE_1              0x22c
@@ -231,6 +285,12 @@ struct smmu_as {
        spinlock_t              client_lock; /* for client list */
 };
 
+struct smmu_debugfs_info {
+       struct smmu_device *smmu;
+       int mc;
+       int cache;
+};
+
 /*
  * Per SMMU device - IOMMU device
  */
@@ -251,6 +311,9 @@ struct smmu_device {
        unsigned long translation_enable_2;
        unsigned long asid_security;
 
+       struct dentry *debugfs_root;
+       struct smmu_debugfs_info *debugfs_info;
+
        struct device_node *ahb;
 
        int             num_as;
@@ -412,8 +475,8 @@ static int smmu_setup_regs(struct smmu_device *smmu)
        smmu_write(smmu, smmu->translation_enable_1, SMMU_TRANSLATION_ENABLE_1);
        smmu_write(smmu, smmu->translation_enable_2, SMMU_TRANSLATION_ENABLE_2);
        smmu_write(smmu, smmu->asid_security, SMMU_ASID_SECURITY);
-       smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_TLB_CONFIG);
-       smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_PTC_CONFIG);
+       smmu_write(smmu, SMMU_TLB_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_TLB));
+       smmu_write(smmu, SMMU_PTC_CONFIG_RESET_VAL, SMMU_CACHE_CONFIG(_PTC));
 
        smmu_flush_regs(smmu, 1);
 
@@ -895,6 +958,175 @@ static struct iommu_ops smmu_iommu_ops = {
        .pgsize_bitmap  = SMMU_IOMMU_PGSIZES,
 };
 
+/* Should be in the order of enum */
+static const char * const smmu_debugfs_mc[] = { "mc", };
+static const char * const smmu_debugfs_cache[] = {  "tlb", "ptc", };
+
+static ssize_t smmu_debugfs_stats_write(struct file *file,
+                                       const char __user *buffer,
+                                       size_t count, loff_t *pos)
+{
+       struct smmu_debugfs_info *info;
+       struct smmu_device *smmu;
+       struct dentry *dent;
+       int i;
+       enum {
+               _OFF = 0,
+               _ON,
+               _RESET,
+       };
+       const char * const command[] = {
+               [_OFF]          = "off",
+               [_ON]           = "on",
+               [_RESET]        = "reset",
+       };
+       char str[] = "reset";
+       u32 val;
+       size_t offs;
+
+       count = min_t(size_t, count, sizeof(str));
+       if (copy_from_user(str, buffer, count))
+               return -EINVAL;
+
+       for (i = 0; i < ARRAY_SIZE(command); i++)
+               if (strncmp(str, command[i],
+                           strlen(command[i])) == 0)
+                       break;
+
+       if (i == ARRAY_SIZE(command))
+               return -EINVAL;
+
+       dent = file->f_dentry;
+       info = dent->d_inode->i_private;
+       smmu = info->smmu;
+
+       offs = SMMU_CACHE_CONFIG(info->cache);
+       val = smmu_read(smmu, offs);
+       switch (i) {
+       case _OFF:
+               val &= ~SMMU_CACHE_CONFIG_STATS_ENABLE;
+               val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
+               smmu_write(smmu, val, offs);
+               break;
+       case _ON:
+               val |= SMMU_CACHE_CONFIG_STATS_ENABLE;
+               val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
+               smmu_write(smmu, val, offs);
+               break;
+       case _RESET:
+               val |= SMMU_CACHE_CONFIG_STATS_TEST;
+               smmu_write(smmu, val, offs);
+               val &= ~SMMU_CACHE_CONFIG_STATS_TEST;
+               smmu_write(smmu, val, offs);
+               break;
+       default:
+               BUG();
+               break;
+       }
+
+       dev_dbg(smmu->dev, "%s() %08x, %08x @%08x\n", __func__,
+               val, smmu_read(smmu, offs), offs);
+
+       return count;
+}
+
+static int smmu_debugfs_stats_show(struct seq_file *s, void *v)
+{
+       struct smmu_debugfs_info *info;
+       struct smmu_device *smmu;
+       struct dentry *dent;
+       int i;
+       const char * const stats[] = { "hit", "miss", };
+
+       dent = d_find_alias(s->private);
+       info = dent->d_inode->i_private;
+       smmu = info->smmu;
+
+       for (i = 0; i < ARRAY_SIZE(stats); i++) {
+               u32 val;
+               size_t offs;
+
+               offs = SMMU_STATS_CACHE_COUNT(info->mc, info->cache, i);
+               val = smmu_read(smmu, offs);
+               seq_printf(s, "%s:%08x ", stats[i], val);
+
+               dev_dbg(smmu->dev, "%s() %s %08x @%08x\n", __func__,
+                       stats[i], val, offs);
+       }
+       seq_printf(s, "\n");
+
+       return 0;
+}
+
+static int smmu_debugfs_stats_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, smmu_debugfs_stats_show, inode);
+}
+
+static const struct file_operations smmu_debugfs_stats_fops = {
+       .open           = smmu_debugfs_stats_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+       .write          = smmu_debugfs_stats_write,
+};
+
+static void smmu_debugfs_delete(struct smmu_device *smmu)
+{
+       debugfs_remove_recursive(smmu->debugfs_root);
+       kfree(smmu->debugfs_info);
+}
+
+static void smmu_debugfs_create(struct smmu_device *smmu)
+{
+       int i;
+       size_t bytes;
+       struct dentry *root;
+
+       bytes = ARRAY_SIZE(smmu_debugfs_mc) * ARRAY_SIZE(smmu_debugfs_cache) *
+               sizeof(*smmu->debugfs_info);
+       smmu->debugfs_info = kmalloc(bytes, GFP_KERNEL);
+       if (!smmu->debugfs_info)
+               return;
+
+       root = debugfs_create_dir(dev_name(smmu->dev), NULL);
+       if (!root)
+               goto err_out;
+       smmu->debugfs_root = root;
+
+       for (i = 0; i < ARRAY_SIZE(smmu_debugfs_mc); i++) {
+               int j;
+               struct dentry *mc;
+
+               mc = debugfs_create_dir(smmu_debugfs_mc[i], root);
+               if (!mc)
+                       goto err_out;
+
+               for (j = 0; j < ARRAY_SIZE(smmu_debugfs_cache); j++) {
+                       struct dentry *cache;
+                       struct smmu_debugfs_info *info;
+
+                       info = smmu->debugfs_info;
+                       info += i * ARRAY_SIZE(smmu_debugfs_mc) + j;
+                       info->smmu = smmu;
+                       info->mc = i;
+                       info->cache = j;
+
+                       cache = debugfs_create_file(smmu_debugfs_cache[j],
+                                                   S_IWUGO | S_IRUGO, mc,
+                                                   (void *)info,
+                                                   &smmu_debugfs_stats_fops);
+                       if (!cache)
+                               goto err_out;
+               }
+       }
+
+       return;
+
+err_out:
+       smmu_debugfs_delete(smmu);
+}
+
 static int tegra_smmu_suspend(struct device *dev)
 {
        struct smmu_device *smmu = dev_get_drvdata(dev);
@@ -999,6 +1231,7 @@ static int tegra_smmu_probe(struct platform_device *pdev)
        if (!smmu->avp_vector_page)
                return -ENOMEM;
 
+       smmu_debugfs_create(smmu);
        smmu_handle = smmu;
        return 0;
 }
@@ -1008,6 +1241,8 @@ static int tegra_smmu_remove(struct platform_device *pdev)
        struct smmu_device *smmu = platform_get_drvdata(pdev);
        int i;
 
+       smmu_debugfs_delete(smmu);
+
        smmu_write(smmu, SMMU_CONFIG_DISABLE, SMMU_CONFIG);
        for (i = 0; i < smmu->num_as; i++)
                free_pdir(&smmu->as[i]);
index d8abb90a6c2fbecae99ae2de09ebc2f3d726731e..034233eefc8266eba122556fc6ffdb36e054eced 100644 (file)
@@ -1555,6 +1555,7 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
                           unsigned long arg)
 {
        struct multipath *m = ti->private;
+       struct pgpath *pgpath;
        struct block_device *bdev;
        fmode_t mode;
        unsigned long flags;
@@ -1570,12 +1571,14 @@ again:
        if (!m->current_pgpath)
                __choose_pgpath(m, 0);
 
-       if (m->current_pgpath) {
-               bdev = m->current_pgpath->path.dev->bdev;
-               mode = m->current_pgpath->path.dev->mode;
+       pgpath = m->current_pgpath;
+
+       if (pgpath) {
+               bdev = pgpath->path.dev->bdev;
+               mode = pgpath->path.dev->mode;
        }
 
-       if (m->queue_io)
+       if ((pgpath && m->queue_io) || (!pgpath && m->queue_if_no_path))
                r = -EAGAIN;
        else if (!bdev)
                r = -EIO;
index f90069029aaeed02ab6f4f61814afc92d13db2f0..100368eb7991a50c57e473ea20bf4a8441e91ff0 100644 (file)
@@ -1212,6 +1212,41 @@ struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector)
        return &t->targets[(KEYS_PER_NODE * n) + k];
 }
 
+static int count_device(struct dm_target *ti, struct dm_dev *dev,
+                       sector_t start, sector_t len, void *data)
+{
+       unsigned *num_devices = data;
+
+       (*num_devices)++;
+
+       return 0;
+}
+
+/*
+ * Check whether a table has no data devices attached using each
+ * target's iterate_devices method.
+ * Returns false if the result is unknown because a target doesn't
+ * support iterate_devices.
+ */
+bool dm_table_has_no_data_devices(struct dm_table *table)
+{
+       struct dm_target *uninitialized_var(ti);
+       unsigned i = 0, num_devices = 0;
+
+       while (i < dm_table_get_num_targets(table)) {
+               ti = dm_table_get_target(table, i++);
+
+               if (!ti->type->iterate_devices)
+                       return false;
+
+               ti->type->iterate_devices(ti, count_device, &num_devices);
+               if (num_devices)
+                       return false;
+       }
+
+       return true;
+}
+
 /*
  * Establish the new table's queue_limits and validate them.
  */
@@ -1354,17 +1389,25 @@ static int device_is_nonrot(struct dm_target *ti, struct dm_dev *dev,
        return q && blk_queue_nonrot(q);
 }
 
-static bool dm_table_is_nonrot(struct dm_table *t)
+static int device_is_not_random(struct dm_target *ti, struct dm_dev *dev,
+                            sector_t start, sector_t len, void *data)
+{
+       struct request_queue *q = bdev_get_queue(dev->bdev);
+
+       return q && !blk_queue_add_random(q);
+}
+
+static bool dm_table_all_devices_attribute(struct dm_table *t,
+                                          iterate_devices_callout_fn func)
 {
        struct dm_target *ti;
        unsigned i = 0;
 
-       /* Ensure that all underlying device are non-rotational. */
        while (i < dm_table_get_num_targets(t)) {
                ti = dm_table_get_target(t, i++);
 
                if (!ti->type->iterate_devices ||
-                   !ti->type->iterate_devices(ti, device_is_nonrot, NULL))
+                   !ti->type->iterate_devices(ti, func, NULL))
                        return 0;
        }
 
@@ -1396,13 +1439,23 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
        if (!dm_table_discard_zeroes_data(t))
                q->limits.discard_zeroes_data = 0;
 
-       if (dm_table_is_nonrot(t))
+       /* Ensure that all underlying devices are non-rotational. */
+       if (dm_table_all_devices_attribute(t, device_is_nonrot))
                queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
        else
                queue_flag_clear_unlocked(QUEUE_FLAG_NONROT, q);
 
        dm_table_set_integrity(t);
 
+       /*
+        * Determine whether or not this queue's I/O timings contribute
+        * to the entropy pool, Only request-based targets use this.
+        * Clear QUEUE_FLAG_ADD_RANDOM if any underlying device does not
+        * have it set.
+        */
+       if (blk_queue_add_random(q) && dm_table_all_devices_attribute(t, device_is_not_random))
+               queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
+
        /*
         * QUEUE_FLAG_STACKABLE must be set after all queue settings are
         * visible to other CPUs because, once the flag is set, incoming bios
index af1fc3b2c2adbb365e247a2d45c1f7a94de9b6dd..c29410af1e2211cbba6029a16128e2fabbf97976 100644 (file)
@@ -509,9 +509,9 @@ enum pool_mode {
 struct pool_features {
        enum pool_mode mode;
 
-       unsigned zero_new_blocks:1;
-       unsigned discard_enabled:1;
-       unsigned discard_passdown:1;
+       bool zero_new_blocks:1;
+       bool discard_enabled:1;
+       bool discard_passdown:1;
 };
 
 struct thin_c;
@@ -580,7 +580,8 @@ struct pool_c {
        struct dm_target_callbacks callbacks;
 
        dm_block_t low_water_blocks;
-       struct pool_features pf;
+       struct pool_features requested_pf; /* Features requested during table load */
+       struct pool_features adjusted_pf;  /* Features used after adjusting for constituent devices */
 };
 
 /*
@@ -1839,6 +1840,47 @@ static void __requeue_bios(struct pool *pool)
 /*----------------------------------------------------------------
  * Binding of control targets to a pool object
  *--------------------------------------------------------------*/
+static bool data_dev_supports_discard(struct pool_c *pt)
+{
+       struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
+
+       return q && blk_queue_discard(q);
+}
+
+/*
+ * If discard_passdown was enabled verify that the data device
+ * supports discards.  Disable discard_passdown if not.
+ */
+static void disable_passdown_if_not_supported(struct pool_c *pt)
+{
+       struct pool *pool = pt->pool;
+       struct block_device *data_bdev = pt->data_dev->bdev;
+       struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
+       sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
+       const char *reason = NULL;
+       char buf[BDEVNAME_SIZE];
+
+       if (!pt->adjusted_pf.discard_passdown)
+               return;
+
+       if (!data_dev_supports_discard(pt))
+               reason = "discard unsupported";
+
+       else if (data_limits->max_discard_sectors < pool->sectors_per_block)
+               reason = "max discard sectors smaller than a block";
+
+       else if (data_limits->discard_granularity > block_size)
+               reason = "discard granularity larger than a block";
+
+       else if (block_size & (data_limits->discard_granularity - 1))
+               reason = "discard granularity not a factor of block size";
+
+       if (reason) {
+               DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
+               pt->adjusted_pf.discard_passdown = false;
+       }
+}
+
 static int bind_control_target(struct pool *pool, struct dm_target *ti)
 {
        struct pool_c *pt = ti->private;
@@ -1847,31 +1889,16 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
         * We want to make sure that degraded pools are never upgraded.
         */
        enum pool_mode old_mode = pool->pf.mode;
-       enum pool_mode new_mode = pt->pf.mode;
+       enum pool_mode new_mode = pt->adjusted_pf.mode;
 
        if (old_mode > new_mode)
                new_mode = old_mode;
 
        pool->ti = ti;
        pool->low_water_blocks = pt->low_water_blocks;
-       pool->pf = pt->pf;
-       set_pool_mode(pool, new_mode);
+       pool->pf = pt->adjusted_pf;
 
-       /*
-        * If discard_passdown was enabled verify that the data device
-        * supports discards.  Disable discard_passdown if not; otherwise
-        * -EOPNOTSUPP will be returned.
-        */
-       /* FIXME: pull this out into a sep fn. */
-       if (pt->pf.discard_passdown) {
-               struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
-               if (!q || !blk_queue_discard(q)) {
-                       char buf[BDEVNAME_SIZE];
-                       DMWARN("Discard unsupported by data device (%s): Disabling discard passdown.",
-                              bdevname(pt->data_dev->bdev, buf));
-                       pool->pf.discard_passdown = 0;
-               }
-       }
+       set_pool_mode(pool, new_mode);
 
        return 0;
 }
@@ -1889,9 +1916,9 @@ static void unbind_control_target(struct pool *pool, struct dm_target *ti)
 static void pool_features_init(struct pool_features *pf)
 {
        pf->mode = PM_WRITE;
-       pf->zero_new_blocks = 1;
-       pf->discard_enabled = 1;
-       pf->discard_passdown = 1;
+       pf->zero_new_blocks = true;
+       pf->discard_enabled = true;
+       pf->discard_passdown = true;
 }
 
 static void __pool_destroy(struct pool *pool)
@@ -2119,13 +2146,13 @@ static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
                argc--;
 
                if (!strcasecmp(arg_name, "skip_block_zeroing"))
-                       pf->zero_new_blocks = 0;
+                       pf->zero_new_blocks = false;
 
                else if (!strcasecmp(arg_name, "ignore_discard"))
-                       pf->discard_enabled = 0;
+                       pf->discard_enabled = false;
 
                else if (!strcasecmp(arg_name, "no_discard_passdown"))
-                       pf->discard_passdown = 0;
+                       pf->discard_passdown = false;
 
                else if (!strcasecmp(arg_name, "read_only"))
                        pf->mode = PM_READ_ONLY;
@@ -2259,8 +2286,9 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
        pt->metadata_dev = metadata_dev;
        pt->data_dev = data_dev;
        pt->low_water_blocks = low_water_blocks;
-       pt->pf = pf;
+       pt->adjusted_pf = pt->requested_pf = pf;
        ti->num_flush_requests = 1;
+
        /*
         * Only need to enable discards if the pool should pass
         * them down to the data device.  The thin device's discard
@@ -2268,12 +2296,14 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
         */
        if (pf.discard_enabled && pf.discard_passdown) {
                ti->num_discard_requests = 1;
+
                /*
                 * Setting 'discards_supported' circumvents the normal
                 * stacking of discard limits (this keeps the pool and
                 * thin devices' discard limits consistent).
                 */
                ti->discards_supported = true;
+               ti->discard_zeroes_data_unsupported = true;
        }
        ti->private = pt;
 
@@ -2703,7 +2733,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
                       format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
                       (unsigned long)pool->sectors_per_block,
                       (unsigned long long)pt->low_water_blocks);
-               emit_flags(&pt->pf, result, sz, maxlen);
+               emit_flags(&pt->requested_pf, result, sz, maxlen);
                break;
        }
 
@@ -2732,20 +2762,21 @@ static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
        return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
 }
 
-static void set_discard_limits(struct pool *pool, struct queue_limits *limits)
+static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
 {
-       /*
-        * FIXME: these limits may be incompatible with the pool's data device
-        */
+       struct pool *pool = pt->pool;
+       struct queue_limits *data_limits;
+
        limits->max_discard_sectors = pool->sectors_per_block;
 
        /*
-        * This is just a hint, and not enforced.  We have to cope with
-        * bios that cover a block partially.  A discard that spans a block
-        * boundary is not sent to this target.
+        * discard_granularity is just a hint, and not enforced.
         */
-       limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
-       limits->discard_zeroes_data = pool->pf.zero_new_blocks;
+       if (pt->adjusted_pf.discard_passdown) {
+               data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
+               limits->discard_granularity = data_limits->discard_granularity;
+       } else
+               limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
 }
 
 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
@@ -2755,15 +2786,25 @@ static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
 
        blk_limits_io_min(limits, 0);
        blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
-       if (pool->pf.discard_enabled)
-               set_discard_limits(pool, limits);
+
+       /*
+        * pt->adjusted_pf is a staging area for the actual features to use.
+        * They get transferred to the live pool in bind_control_target()
+        * called from pool_preresume().
+        */
+       if (!pt->adjusted_pf.discard_enabled)
+               return;
+
+       disable_passdown_if_not_supported(pt);
+
+       set_discard_limits(pt, limits);
 }
 
 static struct target_type pool_target = {
        .name = "thin-pool",
        .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
                    DM_TARGET_IMMUTABLE,
-       .version = {1, 3, 0},
+       .version = {1, 4, 0},
        .module = THIS_MODULE,
        .ctr = pool_ctr,
        .dtr = pool_dtr,
@@ -3042,19 +3083,19 @@ static int thin_iterate_devices(struct dm_target *ti,
        return 0;
 }
 
+/*
+ * A thin device always inherits its queue limits from its pool.
+ */
 static void thin_io_hints(struct dm_target *ti, struct queue_limits *limits)
 {
        struct thin_c *tc = ti->private;
-       struct pool *pool = tc->pool;
 
-       blk_limits_io_min(limits, 0);
-       blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
-       set_discard_limits(pool, limits);
+       *limits = bdev_get_queue(tc->pool_dev->bdev)->limits;
 }
 
 static struct target_type thin_target = {
        .name = "thin",
-       .version = {1, 3, 0},
+       .version = {1, 4, 0},
        .module = THIS_MODULE,
        .ctr = thin_ctr,
        .dtr = thin_dtr,
index 254d19268ad2fa0c7102014c997b75294450ba42..892ae2766aa6adad52b6d99030a1d8d3bcf2ac38 100644 (file)
@@ -718,8 +718,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
        v->hash_dev_block_bits = ffs(num) - 1;
 
        if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
-           num_ll << (v->data_dev_block_bits - SECTOR_SHIFT) !=
-           (sector_t)num_ll << (v->data_dev_block_bits - SECTOR_SHIFT)) {
+           (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
+           >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
                ti->error = "Invalid data blocks";
                r = -EINVAL;
                goto bad;
@@ -733,8 +733,8 @@ static int verity_ctr(struct dm_target *ti, unsigned argc, char **argv)
        }
 
        if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
-           num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT) !=
-           (sector_t)num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT)) {
+           (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
+           >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
                ti->error = "Invalid hash start";
                r = -EINVAL;
                goto bad;
index 4e09b6ff5b493403d4e51be0b4fee2d99643e276..67ffa391edcf1e70a0cc94e7085a1e99abd7a151 100644 (file)
@@ -865,10 +865,14 @@ static void dm_done(struct request *clone, int error, bool mapped)
 {
        int r = error;
        struct dm_rq_target_io *tio = clone->end_io_data;
-       dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
+       dm_request_endio_fn rq_end_io = NULL;
 
-       if (mapped && rq_end_io)
-               r = rq_end_io(tio->ti, clone, error, &tio->info);
+       if (tio->ti) {
+               rq_end_io = tio->ti->type->rq_end_io;
+
+               if (mapped && rq_end_io)
+                       r = rq_end_io(tio->ti, clone, error, &tio->info);
+       }
 
        if (r <= 0)
                /* The target wants to complete the I/O */
@@ -1588,15 +1592,6 @@ static int map_request(struct dm_target *ti, struct request *clone,
        int r, requeued = 0;
        struct dm_rq_target_io *tio = clone->end_io_data;
 
-       /*
-        * Hold the md reference here for the in-flight I/O.
-        * We can't rely on the reference count by device opener,
-        * because the device may be closed during the request completion
-        * when all bios are completed.
-        * See the comment in rq_completed() too.
-        */
-       dm_get(md);
-
        tio->ti = ti;
        r = ti->type->map_rq(ti, clone, &tio->info);
        switch (r) {
@@ -1628,6 +1623,26 @@ static int map_request(struct dm_target *ti, struct request *clone,
        return requeued;
 }
 
+static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
+{
+       struct request *clone;
+
+       blk_start_request(orig);
+       clone = orig->special;
+       atomic_inc(&md->pending[rq_data_dir(clone)]);
+
+       /*
+        * Hold the md reference here for the in-flight I/O.
+        * We can't rely on the reference count by device opener,
+        * because the device may be closed during the request completion
+        * when all bios are completed.
+        * See the comment in rq_completed() too.
+        */
+       dm_get(md);
+
+       return clone;
+}
+
 /*
  * q->request_fn for request-based dm.
  * Called with the queue lock held.
@@ -1657,14 +1672,21 @@ static void dm_request_fn(struct request_queue *q)
                        pos = blk_rq_pos(rq);
 
                ti = dm_table_find_target(map, pos);
-               BUG_ON(!dm_target_is_valid(ti));
+               if (!dm_target_is_valid(ti)) {
+                       /*
+                        * Must perform setup, that dm_done() requires,
+                        * before calling dm_kill_unmapped_request
+                        */
+                       DMERR_LIMIT("request attempted access beyond the end of device");
+                       clone = dm_start_request(md, rq);
+                       dm_kill_unmapped_request(clone, -EIO);
+                       continue;
+               }
 
                if (ti->type->busy && ti->type->busy(ti))
                        goto delay_and_out;
 
-               blk_start_request(rq);
-               clone = rq->special;
-               atomic_inc(&md->pending[rq_data_dir(clone)]);
+               clone = dm_start_request(md, rq);
 
                spin_unlock(q->queue_lock);
                if (map_request(ti, clone, md))
@@ -1684,8 +1706,6 @@ delay_and_out:
        blk_delay_queue(q, HZ / 10);
 out:
        dm_table_put(map);
-
-       return;
 }
 
 int dm_underlying_device_busy(struct request_queue *q)
@@ -2409,7 +2429,7 @@ static void dm_queue_flush(struct mapped_device *md)
  */
 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
 {
-       struct dm_table *map = ERR_PTR(-EINVAL);
+       struct dm_table *live_map, *map = ERR_PTR(-EINVAL);
        struct queue_limits limits;
        int r;
 
@@ -2419,6 +2439,19 @@ struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
        if (!dm_suspended_md(md))
                goto out;
 
+       /*
+        * If the new table has no data devices, retain the existing limits.
+        * This helps multipath with queue_if_no_path if all paths disappear,
+        * then new I/O is queued based on these limits, and then some paths
+        * reappear.
+        */
+       if (dm_table_has_no_data_devices(table)) {
+               live_map = dm_get_live_table(md);
+               if (live_map)
+                       limits = md->queue->limits;
+               dm_table_put(live_map);
+       }
+
        r = dm_calculate_queue_limits(table, &limits);
        if (r) {
                map = ERR_PTR(r);
index 52eef493d2669290eaa3f2a22e46717dac9aa1eb..6a99fefaa74306aa6e586c47c3eacf33706556b1 100644 (file)
@@ -54,6 +54,7 @@ void dm_table_event_callback(struct dm_table *t,
                             void (*fn)(void *), void *context);
 struct dm_target *dm_table_get_target(struct dm_table *t, unsigned int index);
 struct dm_target *dm_table_find_target(struct dm_table *t, sector_t sector);
+bool dm_table_has_no_data_devices(struct dm_table *table);
 int dm_calculate_queue_limits(struct dm_table *table,
                              struct queue_limits *limits);
 void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
index 1c2eb38f3c51a6dcc93148ca34b1a6a1de7aa465..0138a727c1f3c220bc91a02c9bdf720599465eb9 100644 (file)
@@ -1512,14 +1512,16 @@ static int _enough(struct r10conf *conf, struct geom *geo, int ignore)
        do {
                int n = conf->copies;
                int cnt = 0;
+               int this = first;
                while (n--) {
-                       if (conf->mirrors[first].rdev &&
-                           first != ignore)
+                       if (conf->mirrors[this].rdev &&
+                           this != ignore)
                                cnt++;
-                       first = (first+1) % geo->raid_disks;
+                       this = (this+1) % geo->raid_disks;
                }
                if (cnt == 0)
                        return 0;
+               first = (first + geo->near_copies) % geo->raid_disks;
        } while (first != 0);
        return 1;
 }
index 7031b865b3a030488614764528295d509c663ba5..0689173fd9f568583708c53396631ac8e1838c55 100644 (file)
@@ -1591,6 +1591,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
                #ifdef CONFIG_MULTICORE_RAID456
                init_waitqueue_head(&nsh->ops.wait_for_ops);
                #endif
+               spin_lock_init(&nsh->stripe_lock);
 
                list_add(&nsh->lru, &newstripes);
        }
index f2f482bec5736b21a562da5e4fda11375e8cf457..a6e74514e6624e95f8e6e56fb319d567b1011f78 100644 (file)
@@ -1123,6 +1123,33 @@ static unsigned long mtdchar_get_unmapped_area(struct file *file,
 }
 #endif
 
+static inline unsigned long get_vm_size(struct vm_area_struct *vma)
+{
+       return vma->vm_end - vma->vm_start;
+}
+
+static inline resource_size_t get_vm_offset(struct vm_area_struct *vma)
+{
+       return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT;
+}
+
+/*
+ * Set a new vm offset.
+ *
+ * Verify that the incoming offset really works as a page offset,
+ * and that the offset and size fit in a resource_size_t.
+ */
+static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off)
+{
+       pgoff_t pgoff = off >> PAGE_SHIFT;
+       if (off != (resource_size_t) pgoff << PAGE_SHIFT)
+               return -EINVAL;
+       if (off + get_vm_size(vma) - 1 < off)
+               return -EINVAL;
+       vma->vm_pgoff = pgoff;
+       return 0;
+}
+
 /*
  * set up a mapping for shared memory segments
  */
@@ -1132,20 +1159,29 @@ static int mtdchar_mmap(struct file *file, struct vm_area_struct *vma)
        struct mtd_file_info *mfi = file->private_data;
        struct mtd_info *mtd = mfi->mtd;
        struct map_info *map = mtd->priv;
-       unsigned long start;
-       unsigned long off;
-       u32 len;
+       resource_size_t start, off;
+       unsigned long len, vma_len;
 
        if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) {
-               off = vma->vm_pgoff << PAGE_SHIFT;
+               off = get_vm_offset(vma);
                start = map->phys;
                len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size);
                start &= PAGE_MASK;
-               if ((vma->vm_end - vma->vm_start + off) > len)
+               vma_len = get_vm_size(vma);
+
+               /* Overflow in off+len? */
+               if (vma_len + off < off)
+                       return -EINVAL;
+               /* Does it fit in the mapping? */
+               if (vma_len + off > len)
                        return -EINVAL;
 
                off += start;
-               vma->vm_pgoff = off >> PAGE_SHIFT;
+               /* Did that overflow? */
+               if (off < start)
+                       return -EINVAL;
+               if (set_vm_offset(vma, off) < 0)
+                       return -EINVAL;
                vma->vm_flags |= VM_IO | VM_RESERVED;
 
 #ifdef pgprot_noncached
index 79cebd8525ce3d451e30b935484dac18535939f2..e48312f2305db18a08b5744a01013455a7b732ab 100644 (file)
@@ -8564,7 +8564,7 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        return 0;
 
 error:
-       iounmap(bp->regview);
+       pci_iounmap(pdev, bp->regview);
        pci_release_regions(pdev);
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
index c42bbb16cdaebdeb7147dc887705abcfe91e86e0..a688a2ddcfd612866edbce5f8ee08b7f92fbd8e0 100644 (file)
@@ -722,10 +722,8 @@ static int octeon_mgmt_init_phy(struct net_device *netdev)
                                   octeon_mgmt_adjust_link, 0,
                                   PHY_INTERFACE_MODE_MII);
 
-       if (IS_ERR(p->phydev)) {
-               p->phydev = NULL;
+       if (!p->phydev)
                return -1;
-       }
 
        phy_start_aneg(p->phydev);
 
index e559dfa06d6ace1c349cf48efe33738ef642e778..6fa74d530e44d390ce87946b8327a476e5a728e5 100644 (file)
@@ -1101,9 +1101,9 @@ static int pasemi_mac_phy_init(struct net_device *dev)
        phydev = of_phy_connect(dev, phy_dn, &pasemi_adjust_link, 0,
                                PHY_INTERFACE_MODE_SGMII);
 
-       if (IS_ERR(phydev)) {
+       if (!phydev) {
                printk(KERN_ERR "%s: Could not attach to phy\n", dev->name);
-               return PTR_ERR(phydev);
+               return -ENODEV;
        }
 
        mac->phydev = phydev;
index b8ead696141efbaf452f3a46c23dfba1f199f2fb..2a179d087207e4375afb0d66f712043a10417786 100644 (file)
@@ -15,7 +15,7 @@ qlcnic_poll_rsp(struct qlcnic_adapter *adapter)
 
        do {
                /* give atleast 1ms for firmware to respond */
-               msleep(1);
+               mdelay(1);
 
                if (++timeout > QLCNIC_OS_CRB_RETRY_COUNT)
                        return QLCNIC_CDRP_RSP_TIMEOUT;
@@ -601,7 +601,7 @@ void qlcnic_fw_destroy_ctx(struct qlcnic_adapter *adapter)
                qlcnic_fw_cmd_destroy_tx_ctx(adapter);
 
                /* Allow dma queues to drain after context reset */
-               msleep(20);
+               mdelay(20);
        }
 }
 
index 2346b38b9837101deae4848443da3d5b6be7cdeb..799789518e873edf273c95be6ec1c3b59ff4aa80 100644 (file)
@@ -229,3 +229,5 @@ static void __exit bcm87xx_exit(void)
                ARRAY_SIZE(bcm87xx_driver));
 }
 module_exit(bcm87xx_exit);
+
+MODULE_LICENSE("GPL");
index cf287e0eb4088574530bdb65b86d0276b589b744..2165d5fdb8c0f08675772f8dded60f3a15ac34ba 100644 (file)
 #include <linux/phy.h>
 #include <linux/micrel_phy.h>
 
+/* Operation Mode Strap Override */
+#define MII_KSZPHY_OMSO                                0x16
+#define KSZPHY_OMSO_B_CAST_OFF                 (1 << 9)
+#define KSZPHY_OMSO_RMII_OVERRIDE              (1 << 1)
+#define KSZPHY_OMSO_MII_OVERRIDE               (1 << 0)
+
 /* general Interrupt control/status reg in vendor specific block. */
 #define MII_KSZPHY_INTCS                       0x1B
 #define        KSZPHY_INTCS_JABBER                     (1 << 15)
@@ -101,6 +107,13 @@ static int kszphy_config_init(struct phy_device *phydev)
        return 0;
 }
 
+static int ksz8021_config_init(struct phy_device *phydev)
+{
+       const u16 val = KSZPHY_OMSO_B_CAST_OFF | KSZPHY_OMSO_RMII_OVERRIDE;
+       phy_write(phydev, MII_KSZPHY_OMSO, val);
+       return 0;
+}
+
 static int ks8051_config_init(struct phy_device *phydev)
 {
        int regval;
@@ -128,9 +141,22 @@ static struct phy_driver ksphy_driver[] = {
        .config_intr    = ks8737_config_intr,
        .driver         = { .owner = THIS_MODULE,},
 }, {
-       .phy_id         = PHY_ID_KS8041,
+       .phy_id         = PHY_ID_KSZ8021,
+       .phy_id_mask    = 0x00ffffff,
+       .name           = "Micrel KSZ8021",
+       .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause |
+                          SUPPORTED_Asym_Pause),
+       .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+       .config_init    = ksz8021_config_init,
+       .config_aneg    = genphy_config_aneg,
+       .read_status    = genphy_read_status,
+       .ack_interrupt  = kszphy_ack_interrupt,
+       .config_intr    = kszphy_config_intr,
+       .driver         = { .owner = THIS_MODULE,},
+}, {
+       .phy_id         = PHY_ID_KSZ8041,
        .phy_id_mask    = 0x00fffff0,
-       .name           = "Micrel KS8041",
+       .name           = "Micrel KSZ8041",
        .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -141,9 +167,9 @@ static struct phy_driver ksphy_driver[] = {
        .config_intr    = kszphy_config_intr,
        .driver         = { .owner = THIS_MODULE,},
 }, {
-       .phy_id         = PHY_ID_KS8051,
+       .phy_id         = PHY_ID_KSZ8051,
        .phy_id_mask    = 0x00fffff0,
-       .name           = "Micrel KS8051",
+       .name           = "Micrel KSZ8051",
        .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause
                                | SUPPORTED_Asym_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -154,8 +180,8 @@ static struct phy_driver ksphy_driver[] = {
        .config_intr    = kszphy_config_intr,
        .driver         = { .owner = THIS_MODULE,},
 }, {
-       .phy_id         = PHY_ID_KS8001,
-       .name           = "Micrel KS8001 or KS8721",
+       .phy_id         = PHY_ID_KSZ8001,
+       .name           = "Micrel KSZ8001 or KS8721",
        .phy_id_mask    = 0x00ffffff,
        .features       = (PHY_BASIC_FEATURES | SUPPORTED_Pause),
        .flags          = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
@@ -201,10 +227,11 @@ MODULE_LICENSE("GPL");
 
 static struct mdio_device_id __maybe_unused micrel_tbl[] = {
        { PHY_ID_KSZ9021, 0x000ffffe },
-       { PHY_ID_KS8001, 0x00ffffff },
+       { PHY_ID_KSZ8001, 0x00ffffff },
        { PHY_ID_KS8737, 0x00fffff0 },
-       { PHY_ID_KS8041, 0x00fffff0 },
-       { PHY_ID_KS8051, 0x00fffff0 },
+       { PHY_ID_KSZ8021, 0x00ffffff },
+       { PHY_ID_KSZ8041, 0x00fffff0 },
+       { PHY_ID_KSZ8051, 0x00fffff0 },
        { }
 };
 
index 6d6192316b30988ed742cf7261de5c5daaa310f2..88e3991464e7e1531934bcb562718e97abcbfb9b 100644 (file)
@@ -56,6 +56,32 @@ static int smsc_phy_config_init(struct phy_device *phydev)
        return smsc_phy_ack_interrupt (phydev);
 }
 
+static int lan87xx_config_init(struct phy_device *phydev)
+{
+       /*
+        * Make sure the EDPWRDOWN bit is NOT set. Setting this bit on
+        * LAN8710/LAN8720 PHY causes the PHY to misbehave, likely due
+        * to a bug on the chip.
+        *
+        * When the system is powered on with the network cable being
+        * disconnected all the way until after ifconfig ethX up is
+        * issued for the LAN port with this PHY, connecting the cable
+        * afterwards does not cause LINK change detection, while the
+        * expected behavior is the Link UP being detected.
+        */
+       int rc = phy_read(phydev, MII_LAN83C185_CTRL_STATUS);
+       if (rc < 0)
+               return rc;
+
+       rc &= ~MII_LAN83C185_EDPWRDOWN;
+
+       rc = phy_write(phydev, MII_LAN83C185_CTRL_STATUS, rc);
+       if (rc < 0)
+               return rc;
+
+       return smsc_phy_ack_interrupt(phydev);
+}
+
 static int lan911x_config_init(struct phy_device *phydev)
 {
        return smsc_phy_ack_interrupt(phydev);
@@ -162,7 +188,7 @@ static struct phy_driver smsc_phy_driver[] = {
        /* basic functions */
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
-       .config_init    = smsc_phy_config_init,
+       .config_init    = lan87xx_config_init,
 
        /* IRQ related */
        .ack_interrupt  = smsc_phy_ack_interrupt,
index cbf7047decc04340d39be3daa95359469b9e73ec..20f31d0d1536c36658b481fa083eb6b445637ef7 100644 (file)
@@ -570,7 +570,7 @@ static int pppoe_release(struct socket *sock)
 
        po = pppox_sk(sk);
 
-       if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) {
+       if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND | PPPOX_ZOMBIE)) {
                dev_put(po->pppoe_dev);
                po->pppoe_dev = NULL;
        }
index 341b65dbbcd324d9512f957fc7b663481a073aea..f8cd61f449a4772da5a50a446cd07fc80d5928c8 100644 (file)
@@ -848,7 +848,7 @@ static struct netpoll_info *team_netpoll_info(struct team *team)
 }
 #endif
 
-static void __team_port_change_check(struct team_port *port, bool linkup);
+static void __team_port_change_port_added(struct team_port *port, bool linkup);
 
 static int team_port_add(struct team *team, struct net_device *port_dev)
 {
@@ -948,7 +948,7 @@ static int team_port_add(struct team *team, struct net_device *port_dev)
        team_port_enable(team, port);
        list_add_tail_rcu(&port->list, &team->port_list);
        __team_compute_features(team);
-       __team_port_change_check(port, !!netif_carrier_ok(port_dev));
+       __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
        __team_options_change_check(team);
 
        netdev_info(dev, "Port device %s added\n", portname);
@@ -983,6 +983,8 @@ err_set_mtu:
        return err;
 }
 
+static void __team_port_change_port_removed(struct team_port *port);
+
 static int team_port_del(struct team *team, struct net_device *port_dev)
 {
        struct net_device *dev = team->dev;
@@ -999,8 +1001,7 @@ static int team_port_del(struct team *team, struct net_device *port_dev)
        __team_option_inst_mark_removed_port(team, port);
        __team_options_change_check(team);
        __team_option_inst_del_port(team, port);
-       port->removed = true;
-       __team_port_change_check(port, false);
+       __team_port_change_port_removed(port);
        team_port_disable(team, port);
        list_del_rcu(&port->list);
        netdev_rx_handler_unregister(port_dev);
@@ -1652,8 +1653,8 @@ static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
 
        hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
                          &team_nl_family, 0, TEAM_CMD_NOOP);
-       if (IS_ERR(hdr)) {
-               err = PTR_ERR(hdr);
+       if (!hdr) {
+               err = -EMSGSIZE;
                goto err_msg_put;
        }
 
@@ -1847,8 +1848,8 @@ start_again:
 
        hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags | NLM_F_MULTI,
                          TEAM_CMD_OPTIONS_GET);
-       if (IS_ERR(hdr))
-               return PTR_ERR(hdr);
+       if (!hdr)
+               return -EMSGSIZE;
 
        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
                goto nla_put_failure;
@@ -2067,8 +2068,8 @@ static int team_nl_fill_port_list_get(struct sk_buff *skb,
 
        hdr = genlmsg_put(skb, pid, seq, &team_nl_family, flags,
                          TEAM_CMD_PORT_LIST_GET);
-       if (IS_ERR(hdr))
-               return PTR_ERR(hdr);
+       if (!hdr)
+               return -EMSGSIZE;
 
        if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
                goto nla_put_failure;
@@ -2251,13 +2252,11 @@ static void __team_options_change_check(struct team *team)
 }
 
 /* rtnl lock is held */
-static void __team_port_change_check(struct team_port *port, bool linkup)
+
+static void __team_port_change_send(struct team_port *port, bool linkup)
 {
        int err;
 
-       if (!port->removed && port->state.linkup == linkup)
-               return;
-
        port->changed = true;
        port->state.linkup = linkup;
        team_refresh_port_linkup(port);
@@ -2282,6 +2281,23 @@ send_event:
 
 }
 
+static void __team_port_change_check(struct team_port *port, bool linkup)
+{
+       if (port->state.linkup != linkup)
+               __team_port_change_send(port, linkup);
+}
+
+static void __team_port_change_port_added(struct team_port *port, bool linkup)
+{
+       __team_port_change_send(port, linkup);
+}
+
+static void __team_port_change_port_removed(struct team_port *port)
+{
+       port->removed = true;
+       __team_port_change_send(port, false);
+}
+
 static void team_port_change_check(struct team_port *port, bool linkup)
 {
        struct team *team = port->team;
index f5ab6e613ec8dcddf6b36e25a1dd0f79c50849db..376143e8a1aaf6f78ee44fb76888922064c46dd1 100644 (file)
@@ -1253,6 +1253,7 @@ static struct usb_driver smsc75xx_driver = {
        .probe          = usbnet_probe,
        .suspend        = usbnet_suspend,
        .resume         = usbnet_resume,
+       .reset_resume   = usbnet_resume,
        .disconnect     = usbnet_disconnect,
        .disable_hub_initiated_lpm = 1,
 };
index 1e86ea2266d46971844ba5c66a2fb1142a47609d..dbeebef562d5ca0bca0c9b3ac391747ebfe02973 100644 (file)
@@ -1442,6 +1442,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
        return err;
 
 err_free_irq:
+       trans_pcie->irq_requested = false;
        free_irq(trans_pcie->irq, trans);
 error:
        iwl_free_isr_ict(trans);
index a3ac39b79192a4c8f2de0d063054f5477a1f257a..0646bf6e7889a10ed6e4773af378c264377d5f8b 100644 (file)
@@ -208,6 +208,8 @@ static int sh_pfc_gpio_request_enable(struct pinctrl_dev *pctldev,
 
                break;
        case PINMUX_TYPE_GPIO:
+       case PINMUX_TYPE_INPUT:
+       case PINMUX_TYPE_OUTPUT:
                break;
        default:
                pr_err("Unsupported mux type (%d), bailing...\n", pinmux_type);
index d9569658476274f34d8a51726da828d87bf44831..3440812b4a849c3d2f372e022f730ade5f5e3e6b 100644 (file)
@@ -624,7 +624,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf,
        /* print devices for all busses */
        list_for_each_entry(bus, &usb_bus_list, bus_list) {
                /* recurse through all children of the root hub */
-               if (!bus->root_hub)
+               if (!bus_to_hcd(bus)->rh_registered)
                        continue;
                usb_lock_device(bus->root_hub);
                ret = usb_device_dump(&buf, &nbytes, &skip_bytes, ppos,
index bc84106ac057d7affb0e4d7c5c6ce22dcc2b7943..75ba2091f9b4f253a699563e22a9c6fd80c12676 100644 (file)
@@ -1011,10 +1011,7 @@ static int register_root_hub(struct usb_hcd *hcd)
        if (retval) {
                dev_err (parent_dev, "can't register root hub for %s, %d\n",
                                dev_name(&usb_dev->dev), retval);
-       }
-       mutex_unlock(&usb_bus_list_lock);
-
-       if (retval == 0) {
+       } else {
                spin_lock_irq (&hcd_root_hub_lock);
                hcd->rh_registered = 1;
                spin_unlock_irq (&hcd_root_hub_lock);
@@ -1023,6 +1020,7 @@ static int register_root_hub(struct usb_hcd *hcd)
                if (HCD_DEAD(hcd))
                        usb_hc_died (hcd);      /* This time clean up */
        }
+       mutex_unlock(&usb_bus_list_lock);
 
        return retval;
 }
index aaa8d2bce21702aa8d7844bb343de68bd30f0a3d..0bf72f943b00d392654fdc9613e4f397bfac2d32 100644 (file)
@@ -467,7 +467,8 @@ static irqreturn_t ohci_hcd_at91_overcurrent_irq(int irq, void *data)
        /* From the GPIO notifying the over-current situation, find
         * out the corresponding port */
        at91_for_each_port(port) {
-               if (gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
+               if (gpio_is_valid(pdata->overcurrent_pin[port]) &&
+                               gpio_to_irq(pdata->overcurrent_pin[port]) == irq) {
                        gpio = pdata->overcurrent_pin[port];
                        break;
                }
index 211a4920b88a577216ed9905db510c691cdb4d84..d8dedc7d3910c7bdc362dac6e099326642f0f1fb 100644 (file)
@@ -76,9 +76,24 @@ static int virqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
                        schedule_work(&virqfd->inject);
        }
 
-       if (flags & POLLHUP)
-               /* The eventfd is closing, detach from VFIO */
-               virqfd_deactivate(virqfd);
+       if (flags & POLLHUP) {
+               unsigned long flags;
+               spin_lock_irqsave(&virqfd->vdev->irqlock, flags);
+
+               /*
+                * The eventfd is closing, if the virqfd has not yet been
+                * queued for release, as determined by testing whether the
+                * vdev pointer to it is still valid, queue it now.  As
+                * with kvm irqfds, we know we won't race against the virqfd
+                * going away because we hold wqh->lock to get here.
+                */
+               if (*(virqfd->pvirqfd) == virqfd) {
+                       *(virqfd->pvirqfd) = NULL;
+                       virqfd_deactivate(virqfd);
+               }
+
+               spin_unlock_irqrestore(&virqfd->vdev->irqlock, flags);
+       }
 
        return 0;
 }
@@ -93,7 +108,6 @@ static void virqfd_ptable_queue_proc(struct file *file,
 static void virqfd_shutdown(struct work_struct *work)
 {
        struct virqfd *virqfd = container_of(work, struct virqfd, shutdown);
-       struct virqfd **pvirqfd = virqfd->pvirqfd;
        u64 cnt;
 
        eventfd_ctx_remove_wait_queue(virqfd->eventfd, &virqfd->wait, &cnt);
@@ -101,7 +115,6 @@ static void virqfd_shutdown(struct work_struct *work)
        eventfd_ctx_put(virqfd->eventfd);
 
        kfree(virqfd);
-       *pvirqfd = NULL;
 }
 
 static void virqfd_inject(struct work_struct *work)
@@ -122,15 +135,11 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
        int ret = 0;
        unsigned int events;
 
-       if (*pvirqfd)
-               return -EBUSY;
-
        virqfd = kzalloc(sizeof(*virqfd), GFP_KERNEL);
        if (!virqfd)
                return -ENOMEM;
 
        virqfd->pvirqfd = pvirqfd;
-       *pvirqfd = virqfd;
        virqfd->vdev = vdev;
        virqfd->handler = handler;
        virqfd->thread = thread;
@@ -153,6 +162,23 @@ static int virqfd_enable(struct vfio_pci_device *vdev,
 
        virqfd->eventfd = ctx;
 
+       /*
+        * virqfds can be released by closing the eventfd or directly
+        * through ioctl.  These are both done through a workqueue, so
+        * we update the pointer to the virqfd under lock to avoid
+        * pushing multiple jobs to release the same virqfd.
+        */
+       spin_lock_irq(&vdev->irqlock);
+
+       if (*pvirqfd) {
+               spin_unlock_irq(&vdev->irqlock);
+               ret = -EBUSY;
+               goto fail;
+       }
+       *pvirqfd = virqfd;
+
+       spin_unlock_irq(&vdev->irqlock);
+
        /*
         * Install our own custom wake-up handling so we are notified via
         * a callback whenever someone signals the underlying eventfd.
@@ -187,19 +213,29 @@ fail:
                fput(file);
 
        kfree(virqfd);
-       *pvirqfd = NULL;
 
        return ret;
 }
 
-static void virqfd_disable(struct virqfd *virqfd)
+static void virqfd_disable(struct vfio_pci_device *vdev,
+                          struct virqfd **pvirqfd)
 {
-       if (!virqfd)
-               return;
+       unsigned long flags;
+
+       spin_lock_irqsave(&vdev->irqlock, flags);
+
+       if (*pvirqfd) {
+               virqfd_deactivate(*pvirqfd);
+               *pvirqfd = NULL;
+       }
 
-       virqfd_deactivate(virqfd);
+       spin_unlock_irqrestore(&vdev->irqlock, flags);
 
-       /* Block until we know all outstanding shutdown jobs have completed. */
+       /*
+        * Block until we know all outstanding shutdown jobs have completed.
+        * Even if we don't queue the job, flush the wq to be sure it's
+        * been released.
+        */
        flush_workqueue(vfio_irqfd_cleanup_wq);
 }
 
@@ -392,8 +428,8 @@ static int vfio_intx_set_signal(struct vfio_pci_device *vdev, int fd)
 static void vfio_intx_disable(struct vfio_pci_device *vdev)
 {
        vfio_intx_set_signal(vdev, -1);
-       virqfd_disable(vdev->ctx[0].unmask);
-       virqfd_disable(vdev->ctx[0].mask);
+       virqfd_disable(vdev, &vdev->ctx[0].unmask);
+       virqfd_disable(vdev, &vdev->ctx[0].mask);
        vdev->irq_type = VFIO_PCI_NUM_IRQS;
        vdev->num_ctx = 0;
        kfree(vdev->ctx);
@@ -539,8 +575,8 @@ static void vfio_msi_disable(struct vfio_pci_device *vdev, bool msix)
        vfio_msi_set_block(vdev, 0, vdev->num_ctx, NULL, msix);
 
        for (i = 0; i < vdev->num_ctx; i++) {
-               virqfd_disable(vdev->ctx[i].unmask);
-               virqfd_disable(vdev->ctx[i].mask);
+               virqfd_disable(vdev, &vdev->ctx[i].unmask);
+               virqfd_disable(vdev, &vdev->ctx[i].mask);
        }
 
        if (msix) {
@@ -577,7 +613,7 @@ static int vfio_pci_set_intx_unmask(struct vfio_pci_device *vdev,
                                             vfio_send_intx_eventfd, NULL,
                                             &vdev->ctx[0].unmask, fd);
 
-               virqfd_disable(vdev->ctx[0].unmask);
+               virqfd_disable(vdev, &vdev->ctx[0].unmask);
        }
 
        return 0;
index 16521a9f203859a28a8804d3fb2f5f4cb894ed7f..693f95bf1caeb8769517aa7046702f86b6494fa7 100644 (file)
@@ -1134,6 +1134,8 @@ positive:
        return 1;
 
 rename_retry:
+       if (locked)
+               goto again;
        locked = 1;
        write_seqlock(&rename_lock);
        goto again;
@@ -1141,7 +1143,7 @@ rename_retry:
 EXPORT_SYMBOL(have_submounts);
 
 /*
- * Search the dentry child list for the specified parent,
+ * Search the dentry child list of the specified parent,
  * and move any unused dentries to the end of the unused
  * list for prune_dcache(). We descend to the next level
  * whenever the d_subdirs list is non-empty and continue
@@ -1236,6 +1238,8 @@ out:
 rename_retry:
        if (found)
                return found;
+       if (locked)
+               goto again;
        locked = 1;
        write_seqlock(&rename_lock);
        goto again;
@@ -3035,6 +3039,8 @@ resume:
        return;
 
 rename_retry:
+       if (locked)
+               goto again;
        locked = 1;
        write_seqlock(&rename_lock);
        goto again;
index fb1a2bedbe9789a8fcca5618b4637d70b25fb8ab..8d80c990dffdfc34b9f0168c39d208b99018c01e 100644 (file)
@@ -289,7 +289,6 @@ static void nlmsvc_free_block(struct kref *kref)
        dprintk("lockd: freeing block %p...\n", block);
 
        /* Remove block from file's list of blocks */
-       mutex_lock(&file->f_mutex);
        list_del_init(&block->b_flist);
        mutex_unlock(&file->f_mutex);
 
@@ -303,7 +302,7 @@ static void nlmsvc_free_block(struct kref *kref)
 static void nlmsvc_release_block(struct nlm_block *block)
 {
        if (block != NULL)
-               kref_put(&block->b_count, nlmsvc_free_block);
+               kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex);
 }
 
 /*
index 4d31f73e2561d4d0e19d85becc0223dda27b4710..7bdf7907413f0ea5f3c9e9c2de6b4002381e2064 100644 (file)
@@ -1886,8 +1886,14 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)
                return err;
 
        err = -EINVAL;
-       if (!(mnt_flags & MNT_SHRINKABLE) && !check_mnt(real_mount(path->mnt)))
-               goto unlock;
+       if (unlikely(!check_mnt(real_mount(path->mnt)))) {
+               /* that's acceptable only for automounts done in private ns */
+               if (!(mnt_flags & MNT_SHRINKABLE))
+                       goto unlock;
+               /* ... and for those we'd better have mountpoint still alive */
+               if (!real_mount(path->mnt)->mnt_ns)
+                       goto unlock;
+       }
 
        /* Refuse the same filesystem on the same mount point */
        err = -EBUSY;
index 991ef01cd77eac4fa6b9469dc2ad4c53640f3950..3748ec92dcbcdb988bad96406e7d53b664118152 100644 (file)
@@ -691,9 +691,11 @@ __SC_COMP(__NR_process_vm_readv, sys_process_vm_readv, \
 #define __NR_process_vm_writev 271
 __SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
           compat_sys_process_vm_writev)
+#define __NR_kcmp 272
+__SYSCALL(__NR_kcmp, sys_kcmp)
 
 #undef __NR_syscalls
-#define __NR_syscalls 272
+#define __NR_syscalls 273
 
 /*
  * All syscalls below here should go away really,
index 7e83370e6fd2b134691e7fa348529e70d0a1ae56..f3b99e1c1042ac4073b681ee30ff7ea1f0f9a3a3 100644 (file)
@@ -256,72 +256,78 @@ static inline void iommu_set_fault_handler(struct iommu_domain *domain,
 {
 }
 
-int iommu_attach_group(struct iommu_domain *domain, struct iommu_group *group)
+static inline int iommu_attach_group(struct iommu_domain *domain,
+                                    struct iommu_group *group)
 {
        return -ENODEV;
 }
 
-void iommu_detach_group(struct iommu_domain *domain, struct iommu_group *group)
+static inline void iommu_detach_group(struct iommu_domain *domain,
+                                     struct iommu_group *group)
 {
 }
 
-struct iommu_group *iommu_group_alloc(void)
+static inline struct iommu_group *iommu_group_alloc(void)
 {
        return ERR_PTR(-ENODEV);
 }
 
-void *iommu_group_get_iommudata(struct iommu_group *group)
+static inline void *iommu_group_get_iommudata(struct iommu_group *group)
 {
        return NULL;
 }
 
-void iommu_group_set_iommudata(struct iommu_group *group, void *iommu_data,
-                              void (*release)(void *iommu_data))
+static inline void iommu_group_set_iommudata(struct iommu_group *group,
+                                            void *iommu_data,
+                                            void (*release)(void *iommu_data))
 {
 }
 
-int iommu_group_set_name(struct iommu_group *group, const char *name)
+static inline int iommu_group_set_name(struct iommu_group *group,
+                                      const char *name)
 {
        return -ENODEV;
 }
 
-int iommu_group_add_device(struct iommu_group *group, struct device *dev)
+static inline int iommu_group_add_device(struct iommu_group *group,
+                                        struct device *dev)
 {
        return -ENODEV;
 }
 
-void iommu_group_remove_device(struct device *dev)
+static inline void iommu_group_remove_device(struct device *dev)
 {
 }
 
-int iommu_group_for_each_dev(struct iommu_group *group, void *data,
-                            int (*fn)(struct device *, void *))
+static inline int iommu_group_for_each_dev(struct iommu_group *group,
+                                          void *data,
+                                          int (*fn)(struct device *, void *))
 {
        return -ENODEV;
 }
 
-struct iommu_group *iommu_group_get(struct device *dev)
+static inline struct iommu_group *iommu_group_get(struct device *dev)
 {
        return NULL;
 }
 
-void iommu_group_put(struct iommu_group *group)
+static inline void iommu_group_put(struct iommu_group *group)
 {
 }
 
-int iommu_group_register_notifier(struct iommu_group *group,
-                                 struct notifier_block *nb)
+static inline int iommu_group_register_notifier(struct iommu_group *group,
+                                               struct notifier_block *nb)
 {
        return -ENODEV;
 }
 
-int iommu_group_unregister_notifier(struct iommu_group *group,
-                                   struct notifier_block *nb)
+static inline int iommu_group_unregister_notifier(struct iommu_group *group,
+                                                 struct notifier_block *nb)
 {
        return 0;
 }
 
-int iommu_group_id(struct iommu_group *group)
+static inline int iommu_group_id(struct iommu_group *group)
 {
        return -ENODEV;
 }
index 61f0905bdc480b9078dc31e5d434158f4301df50..de201203bc7c833a09651f75678b833cfac0b880 100644 (file)
@@ -1,3 +1,15 @@
+/*
+ * include/linux/micrel_phy.h
+ *
+ * Micrel PHY IDs
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+
 #ifndef _MICREL_PHY_H
 #define _MICREL_PHY_H
 
 
 #define PHY_ID_KSZ9021         0x00221610
 #define PHY_ID_KS8737          0x00221720
-#define PHY_ID_KS8041          0x00221510
-#define PHY_ID_KS8051          0x00221550
+#define PHY_ID_KSZ8021         0x00221555
+#define PHY_ID_KSZ8041         0x00221510
+#define PHY_ID_KSZ8051         0x00221550
 /* both for ks8001 Rev. A/B, and for ks8721 Rev 3. */
-#define PHY_ID_KS8001          0x0022161A
+#define PHY_ID_KSZ8001         0x0022161A
 
 /* struct phy_device dev_flags definitions */
 #define MICREL_PHY_50MHZ_CLK   0x00000001
index 9490a00529f49aa7f50a8e94e7fd7961b2002319..c25cccaa555a420b5cc6fd26d5e83a89937470a6 100644 (file)
@@ -35,8 +35,10 @@ struct nvme_bar {
        __u64                   acq;    /* Admin CQ Base Address */
 };
 
+#define NVME_CAP_MQES(cap)     ((cap) & 0xffff)
 #define NVME_CAP_TIMEOUT(cap)  (((cap) >> 24) & 0xff)
 #define NVME_CAP_STRIDE(cap)   (((cap) >> 32) & 0xf)
+#define NVME_CAP_MPSMIN(cap)   (((cap) >> 48) & 0xf)
 
 enum {
        NVME_CC_ENABLE          = 1 << 0,
index 3dea6a9d568f416ccd1b704eec1d4bff9dea6d90..d143b8e01954ab14ac224d9894bf65e111ee97fb 100644 (file)
@@ -118,6 +118,7 @@ void reset_security_ops(void);
 extern unsigned long mmap_min_addr;
 extern unsigned long dac_mmap_min_addr;
 #else
+#define mmap_min_addr          0UL
 #define dac_mmap_min_addr      0UL
 #endif
 
index 66ce414891330964aacb48dee8079138034cad3d..b9087bff008be9ef4b4eb2ebb7acea898b69be8d 100644 (file)
@@ -120,11 +120,6 @@ static const char *type2name[4] = { "single", "page",
 static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
                                   "DMA_FROM_DEVICE", "DMA_NONE" };
 
-/* little merge helper - remove it after the merge window */
-#ifndef BUS_NOTIFY_UNBOUND_DRIVER
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x0005
-#endif
-
 /*
  * The access to some variables in this macro is racy. We can't use atomic_t
  * here because all these variables are exported to debugfs. Some of them even
index c785554f95237897edc7e86f7c3ea6f1811b9bbf..ebf3bac460b01c3638e87d23975426f448d58064 100644 (file)
@@ -62,7 +62,7 @@ void fprop_global_destroy(struct fprop_global *p)
  */
 bool fprop_new_period(struct fprop_global *p, int periods)
 {
-       u64 events;
+       s64 events;
        unsigned long flags;
 
        local_irq_save(flags);
index 57c4b93090151f2acbc1271b7b214fe5bc96478c..141dbb695097c1f0674b8978456eb1d0c98e3e67 100644 (file)
@@ -1811,7 +1811,6 @@ static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
                        src_page = pte_page(pteval);
                        copy_user_highpage(page, src_page, address, vma);
                        VM_BUG_ON(page_mapcount(src_page) != 1);
-                       VM_BUG_ON(page_count(src_page) != 2);
                        release_pte_page(src_page);
                        /*
                         * ptl mostly unnecessary, but preempt has to
index e877af8bdd1e8551335ec810ea1ad2c32bedc64e..469daabd90c7bf28572c3f9066ee2146ed590fd1 100644 (file)
@@ -642,7 +642,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
        struct batadv_neigh_node *router = NULL;
        struct batadv_orig_node *orig_node_tmp;
        struct hlist_node *node;
-       uint8_t bcast_own_sum_orig, bcast_own_sum_neigh;
+       int if_num;
+       uint8_t sum_orig, sum_neigh;
        uint8_t *neigh_addr;
 
        batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -727,17 +728,17 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
        if (router && (neigh_node->tq_avg == router->tq_avg)) {
                orig_node_tmp = router->orig_node;
                spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
-               bcast_own_sum_orig =
-                       orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+               if_num = router->if_incoming->if_num;
+               sum_orig = orig_node_tmp->bcast_own_sum[if_num];
                spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
 
                orig_node_tmp = neigh_node->orig_node;
                spin_lock_bh(&orig_node_tmp->ogm_cnt_lock);
-               bcast_own_sum_neigh =
-                       orig_node_tmp->bcast_own_sum[if_incoming->if_num];
+               if_num = neigh_node->if_incoming->if_num;
+               sum_neigh = orig_node_tmp->bcast_own_sum[if_num];
                spin_unlock_bh(&orig_node_tmp->ogm_cnt_lock);
 
-               if (bcast_own_sum_orig >= bcast_own_sum_neigh)
+               if (sum_orig >= sum_neigh)
                        goto update_tt;
        }
 
index 109ea2aae96cde266aef1123bb73a3fa7d219e79..21c53577c8d6a5e65599aa5c01d8d9d93c6fb838 100644 (file)
@@ -100,18 +100,21 @@ static int batadv_interface_set_mac_addr(struct net_device *dev, void *p)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
        struct sockaddr *addr = p;
+       uint8_t old_addr[ETH_ALEN];
 
        if (!is_valid_ether_addr(addr->sa_data))
                return -EADDRNOTAVAIL;
 
+       memcpy(old_addr, dev->dev_addr, ETH_ALEN);
+       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
+
        /* only modify transtable if it has been initialized before */
        if (atomic_read(&bat_priv->mesh_state) == BATADV_MESH_ACTIVE) {
-               batadv_tt_local_remove(bat_priv, dev->dev_addr,
+               batadv_tt_local_remove(bat_priv, old_addr,
                                       "mac address changed", false);
                batadv_tt_local_add(dev, addr->sa_data, BATADV_NULL_IFINDEX);
        }
 
-       memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
        dev->addr_assign_type &= ~NET_ADDR_RANDOM;
        return 0;
 }
index d4de5db18d5a8e48d950a368e7ef93840d269a07..0b997c8f965531d22da050339d17011ab9dd5b3c 100644 (file)
@@ -734,6 +734,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 
        cancel_work_sync(&hdev->le_scan);
 
+       cancel_delayed_work(&hdev->power_off);
+
        hci_req_cancel(hdev, ENODEV);
        hci_req_lock(hdev);
 
index 4ea1710a478329a5d4219ce686a2ef4450ca30b8..38c00f142203505d3a3c809e8e6162f159ae0196 100644 (file)
@@ -1008,7 +1008,7 @@ static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *c
        if (!conn)
                return;
 
-       if (chan->mode == L2CAP_MODE_ERTM) {
+       if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
                __clear_retrans_timer(chan);
                __clear_monitor_timer(chan);
                __clear_ack_timer(chan);
index ad6613d17ca6de815be200b9576d6ecf510fef0d..eba022de3c205bb55f2ed9639e4faf21d78ae9a8 100644 (file)
@@ -2875,6 +2875,22 @@ int mgmt_powered(struct hci_dev *hdev, u8 powered)
                if (scan)
                        hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
 
+               if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
+                       u8 ssp = 1;
+
+                       hci_send_cmd(hdev, HCI_OP_WRITE_SSP_MODE, 1, &ssp);
+               }
+
+               if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
+                       struct hci_cp_write_le_host_supported cp;
+
+                       cp.le = 1;
+                       cp.simul = !!(hdev->features[6] & LMP_SIMUL_LE_BR);
+
+                       hci_send_cmd(hdev, HCI_OP_WRITE_LE_HOST_SUPPORTED,
+                                    sizeof(cp), &cp);
+               }
+
                update_class(hdev);
                update_name(hdev, hdev->dev_name);
                update_eir(hdev);
index 24c5eea8c45bb1233edfad141baf6a6391e023c6..159aa8bef9e7fe2f89f9b508c39a209aa92d3c0d 100644 (file)
@@ -1073,16 +1073,13 @@ static int write_partial_msg_pages(struct ceph_connection *con)
                        BUG_ON(kaddr == NULL);
                        base = kaddr + con->out_msg_pos.page_pos + bio_offset;
                        crc = crc32c(crc, base, len);
+                       kunmap(page);
                        msg->footer.data_crc = cpu_to_le32(crc);
                        con->out_msg_pos.did_page_crc = true;
                }
                ret = ceph_tcp_sendpage(con->sock, page,
                                      con->out_msg_pos.page_pos + bio_offset,
                                      len, 1);
-
-               if (do_datacrc)
-                       kunmap(page);
-
                if (ret <= 0)
                        goto out;
 
index 30579207612175f0a65b19310368079c54ce4bc9..a6000fbad2949f58a079322f4e328e0c051df896 100644 (file)
@@ -691,7 +691,8 @@ set_rcvbuf:
 
        case SO_KEEPALIVE:
 #ifdef CONFIG_INET
-               if (sk->sk_protocol == IPPROTO_TCP)
+               if (sk->sk_protocol == IPPROTO_TCP &&
+                   sk->sk_type == SOCK_STREAM)
                        tcp_set_keepalive(sk, valbool);
 #endif
                sock_valbool_flag(sk, SOCK_KEEPOPEN, valbool);
index e1e0a4e8fd3469f7534cdc65b7ba3312ad66fa39..c7527f6b9ad9b54f36185975aed21171757c4600 100644 (file)
@@ -510,7 +510,10 @@ relookup:
                                        secure_ipv6_id(daddr->addr.a6));
                p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW;
                p->rate_tokens = 0;
-               p->rate_last = 0;
+               /* 60*HZ is arbitrary, but chosen enough high so that the first
+                * calculation of tokens is at its maximum.
+                */
+               p->rate_last = jiffies - 60*HZ;
                INIT_LIST_HEAD(&p->gc_list);
 
                /* Link the node. */
index ff0f071969ea77dbdb6797584765e32bc958203f..d23c6571ba1c34525114af16f0818cfe6bbf1f14 100644 (file)
@@ -131,18 +131,20 @@ found:
  *     0 - deliver
  *     1 - block
  */
-static __inline__ int icmp_filter(struct sock *sk, struct sk_buff *skb)
+static int icmp_filter(const struct sock *sk, const struct sk_buff *skb)
 {
-       int type;
+       struct icmphdr _hdr;
+       const struct icmphdr *hdr;
 
-       if (!pskb_may_pull(skb, sizeof(struct icmphdr)))
+       hdr = skb_header_pointer(skb, skb_transport_offset(skb),
+                                sizeof(_hdr), &_hdr);
+       if (!hdr)
                return 1;
 
-       type = icmp_hdr(skb)->type;
-       if (type < 32) {
+       if (hdr->type < 32) {
                __u32 data = raw_sk(sk)->filter.data;
 
-               return ((1 << type) & data) != 0;
+               return ((1U << hdr->type) & data) != 0;
        }
 
        /* Do not block unknown ICMP types */
index 5b087c31d87b54f5a2574903775b81692bcd2616..0f9bdc5ee9f38c70f1c4c265e8ebe43467d6b59a 100644 (file)
@@ -86,28 +86,30 @@ static int mip6_mh_len(int type)
 
 static int mip6_mh_filter(struct sock *sk, struct sk_buff *skb)
 {
-       struct ip6_mh *mh;
+       struct ip6_mh _hdr;
+       const struct ip6_mh *mh;
 
-       if (!pskb_may_pull(skb, (skb_transport_offset(skb)) + 8) ||
-           !pskb_may_pull(skb, (skb_transport_offset(skb) +
-                                ((skb_transport_header(skb)[1] + 1) << 3))))
+       mh = skb_header_pointer(skb, skb_transport_offset(skb),
+                               sizeof(_hdr), &_hdr);
+       if (!mh)
                return -1;
 
-       mh = (struct ip6_mh *)skb_transport_header(skb);
+       if (((mh->ip6mh_hdrlen + 1) << 3) > skb->len)
+               return -1;
 
        if (mh->ip6mh_hdrlen < mip6_mh_len(mh->ip6mh_type)) {
                LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH message too short: %d vs >=%d\n",
                               mh->ip6mh_hdrlen, mip6_mh_len(mh->ip6mh_type));
-               mip6_param_prob(skb, 0, ((&mh->ip6mh_hdrlen) -
-                                        skb_network_header(skb)));
+               mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_hdrlen) +
+                               skb_network_header_len(skb));
                return -1;
        }
 
        if (mh->ip6mh_proto != IPPROTO_NONE) {
                LIMIT_NETDEBUG(KERN_DEBUG "mip6: MH invalid payload proto = %d\n",
                               mh->ip6mh_proto);
-               mip6_param_prob(skb, 0, ((&mh->ip6mh_proto) -
-                                        skb_network_header(skb)));
+               mip6_param_prob(skb, 0, offsetof(struct ip6_mh, ip6mh_proto) +
+                               skb_network_header_len(skb));
                return -1;
        }
 
index ef0579d5bca6b3794007145279f8ead1626aafe3..4a5f78b50495060470777aa41edd540bb699662f 100644 (file)
@@ -107,21 +107,20 @@ found:
  *     0 - deliver
  *     1 - block
  */
-static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
+static int icmpv6_filter(const struct sock *sk, const struct sk_buff *skb)
 {
-       struct icmp6hdr *icmph;
-       struct raw6_sock *rp = raw6_sk(sk);
-
-       if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
-               __u32 *data = &rp->filter.data[0];
-               int bit_nr;
+       struct icmp6hdr *_hdr;
+       const struct icmp6hdr *hdr;
 
-               icmph = (struct icmp6hdr *) skb->data;
-               bit_nr = icmph->icmp6_type;
+       hdr = skb_header_pointer(skb, skb_transport_offset(skb),
+                                sizeof(_hdr), &_hdr);
+       if (hdr) {
+               const __u32 *data = &raw6_sk(sk)->filter.data[0];
+               unsigned int type = hdr->icmp6_type;
 
-               return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
+               return (data[type >> 5] & (1U << (type & 31))) != 0;
        }
-       return 0;
+       return 1;
 }
 
 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
index d71cd9229a47a8fd85efbf3f5430c66a3e3ad5cd..6f936358d664cd3a8946317ca879180a937c8b22 100644 (file)
@@ -80,8 +80,8 @@ static int l2tp_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
 
        hdr = genlmsg_put(msg, info->snd_pid, info->snd_seq,
                          &l2tp_nl_family, 0, L2TP_CMD_NOOP);
-       if (IS_ERR(hdr)) {
-               ret = PTR_ERR(hdr);
+       if (!hdr) {
+               ret = -EMSGSIZE;
                goto err_out;
        }
 
@@ -250,8 +250,8 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 pid, u32 seq, int flags,
 
        hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags,
                          L2TP_CMD_TUNNEL_GET);
-       if (IS_ERR(hdr))
-               return PTR_ERR(hdr);
+       if (!hdr)
+               return -EMSGSIZE;
 
        if (nla_put_u8(skb, L2TP_ATTR_PROTO_VERSION, tunnel->version) ||
            nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
@@ -617,8 +617,8 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 pid, u32 seq, int flags
        sk = tunnel->sock;
 
        hdr = genlmsg_put(skb, pid, seq, &l2tp_nl_family, flags, L2TP_CMD_SESSION_GET);
-       if (IS_ERR(hdr))
-               return PTR_ERR(hdr);
+       if (!hdr)
+               return -EMSGSIZE;
 
        if (nla_put_u32(skb, L2TP_ATTR_CONN_ID, tunnel->tunnel_id) ||
            nla_put_u32(skb, L2TP_ATTR_SESSION_ID, session->session_id) ||
index 5c22ce8ab3090b103ac75c7ae54d0489c1bc84e2..a4c1e4528cac7e3e820f1cecfb8ec22e8dcf5966 100644 (file)
@@ -117,11 +117,11 @@ static int limit_mt_check(const struct xt_mtchk_param *par)
 
        /* For SMP, we only want to use one set of state. */
        r->master = priv;
+       /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
+          128. */
+       priv->prev = jiffies;
+       priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
        if (r->cost == 0) {
-               /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies *
-                  128. */
-               priv->prev = jiffies;
-               priv->credit = user2credits(r->avg * r->burst); /* Credits full. */
                r->credit_cap = priv->credit; /* Credits full. */
                r->cost = user2credits(r->avg);
        }
index 2ded3c7fad063a067151595c774b9bddd14bdc9a..72d170ca340665ea5c893cc1bec2316219abbf7c 100644 (file)
@@ -350,6 +350,9 @@ static void reg_regdb_search(struct work_struct *work)
        struct reg_regdb_search_request *request;
        const struct ieee80211_regdomain *curdom, *regdom;
        int i, r;
+       bool set_reg = false;
+
+       mutex_lock(&cfg80211_mutex);
 
        mutex_lock(&reg_regdb_search_mutex);
        while (!list_empty(&reg_regdb_search_list)) {
@@ -365,9 +368,7 @@ static void reg_regdb_search(struct work_struct *work)
                                r = reg_copy_regd(&regdom, curdom);
                                if (r)
                                        break;
-                               mutex_lock(&cfg80211_mutex);
-                               set_regdom(regdom);
-                               mutex_unlock(&cfg80211_mutex);
+                               set_reg = true;
                                break;
                        }
                }
@@ -375,6 +376,11 @@ static void reg_regdb_search(struct work_struct *work)
                kfree(request);
        }
        mutex_unlock(&reg_regdb_search_mutex);
+
+       if (set_reg)
+               set_regdom(regdom);
+
+       mutex_unlock(&cfg80211_mutex);
 }
 
 static DECLARE_WORK(reg_regdb_work, reg_regdb_search);
index d24810fc6af6caf3a15ea3f5a55b33f4f08a667b..fd8fa9aa7c4edd698430a9cb0647a8d26095a9a2 100755 (executable)
@@ -200,7 +200,7 @@ EOF
 syscall_list() {
     grep '^[0-9]' "$1" | sort -n | (
        while read nr abi name entry ; do
-           echo <<EOF
+           cat <<EOF
 #if !defined(__NR_${name}) && !defined(__IGNORE_${name})
 #warning syscall ${name} not implemented
 #endif
index 3fd5b29dc9335b5bee03b0b28f6cbbdfee2ea297..a3acb7a85f6ab26f62e92489e75e0f6b4822f831 100644 (file)
@@ -702,7 +702,7 @@ static bool wm2000_readable_reg(struct device *dev, unsigned int reg)
 }
 
 static const struct regmap_config wm2000_regmap = {
-       .reg_bits = 8,
+       .reg_bits = 16,
        .val_bits = 8,
 
        .max_register = WM2000_REG_IF_CTL,
index d6e2bb49c59c52f01288925fa52461eea510e78a..060dccb9ec75513def50e6f8be32f60f4f3b05f1 100644 (file)
@@ -197,7 +197,13 @@ static void prepare_outbound_urb(struct snd_usb_endpoint *ep,
                        /* no data provider, so send silence */
                        unsigned int offs = 0;
                        for (i = 0; i < ctx->packets; ++i) {
-                               int counts = ctx->packet_size[i];
+                               int counts;
+
+                               if (ctx->packet_size[i])
+                                       counts = ctx->packet_size[i];
+                               else
+                                       counts = snd_usb_endpoint_next_packet_size(ep);
+
                                urb->iso_frame_desc[i].offset = offs * ep->stride;
                                urb->iso_frame_desc[i].length = counts * ep->stride;
                                offs += counts;