]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'fixes_for_linus' of git://git.linaro.org/people/mszyprowski/linux-dma...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Oct 2012 17:01:43 +0000 (10:01 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 26 Oct 2012 17:01:43 +0000 (10:01 -0700)
Pull DMA-mapping revert from Marek Szyprowski:
 "Due to my mistake, my previous pull request (merged as commit
  cff7b8ba60e3: "Merge branch 'fixes_for_linus' ..") contained a patch
  which is aimed for v3.8 and lacks its dependences.  This pull request
  reverts it and fixes build break of ARM architecture."

* 'fixes_for_linus' of git://git.linaro.org/people/mszyprowski/linux-dma-mapping:
  Revert "ARM: dma-mapping: support debug_dma_mapping_error"

45 files changed:
MAINTAINERS
arch/x86/include/asm/efi.h
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/cpu/perf_event_knc.c
arch/x86/kernel/cpu/perf_event_p6.c
arch/x86/kernel/e820.c
arch/x86/kernel/setup.c
arch/x86/mm/init.c
arch/x86/mm/init_64.c
arch/x86/platform/efi/efi.c
arch/x86/platform/efi/efi_64.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/evergreen_cs.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/nid.h
drivers/gpu/drm/radeon/radeon_atpx_handler.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_gart.c
drivers/gpu/drm/radeon/radeon_gem.c
drivers/gpu/drm/radeon/radeon_legacy_encoders.c
drivers/gpu/drm/radeon/radeon_object.c
drivers/gpu/drm/radeon/si.c
fs/btrfs/backref.c
fs/btrfs/backref.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/extent_io.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/qgroup.c
fs/btrfs/send.c
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/lockd/mon.c
include/drm/drm_pciids.h
include/linux/memblock.h
include/linux/perf_event.h
mm/memblock.c
net/sunrpc/xprtsock.c
tools/perf/builtin-help.c
tools/perf/builtin-trace.c
tools/perf/util/parse-events-test.c
tools/perf/util/thread.c

index 027ec2bfa135d4f5af20801b020c52d1f66a3f3f..f39a82dc02606cca9f309ca50feb4ef55c2345ae 100644 (file)
@@ -2802,6 +2802,7 @@ F:        sound/usb/misc/ua101.c
 EXTENSIBLE FIRMWARE INTERFACE (EFI)
 M:     Matt Fleming <matt.fleming@intel.com>
 L:     linux-efi@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mfleming/efi.git
 S:     Maintained
 F:     Documentation/x86/efi-stub.txt
 F:     arch/ia64/kernel/efi.c
index c9dcc181d4d1a814c10d6822d0801c7a1b2d7196..6e8fdf5ad1135c0100c8b7a5220bb79db2359ddb 100644 (file)
@@ -35,7 +35,7 @@ extern unsigned long asmlinkage efi_call_phys(void *, ...);
 #define efi_call_virt6(f, a1, a2, a3, a4, a5, a6)      \
        efi_call_virt(f, a1, a2, a3, a4, a5, a6)
 
-#define efi_ioremap(addr, size, type)          ioremap_cache(addr, size)
+#define efi_ioremap(addr, size, type, attr)    ioremap_cache(addr, size)
 
 #else /* !CONFIG_X86_32 */
 
@@ -89,7 +89,7 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
                  (u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
 
 extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
-                                u32 type);
+                                u32 type, u64 attribute);
 
 #endif /* CONFIG_X86_32 */
 
@@ -98,6 +98,8 @@ extern void efi_set_executable(efi_memory_desc_t *md, bool executable);
 extern int efi_memblock_x86_reserve_range(void);
 extern void efi_call_phys_prelog(void);
 extern void efi_call_phys_epilog(void);
+extern void efi_unmap_memmap(void);
+extern void efi_memory_uc(u64 addr, unsigned long size);
 
 #ifndef CONFIG_EFI
 /*
index c265593ec2cdc3df35fda1586aaf91514fab62fa..1817fa911024f07151d3edf91bd350722c9f79f8 100644 (file)
@@ -2257,6 +2257,9 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void)
                        continue;
 
                cfg = irq_cfg(irq);
+               if (!cfg)
+                       continue;
+
                raw_spin_lock(&desc->lock);
 
                /*
index 3373f84d139750276cf33a3a6bf757c171975a40..4a3374e61a93a033f24f03941b99c03e3906a726 100644 (file)
@@ -208,12 +208,14 @@ static bool check_hw_exists(void)
        }
 
        /*
-        * Now write a value and read it back to see if it matches,
-        * this is needed to detect certain hardware emulators (qemu/kvm)
-        * that don't trap on the MSR access and always return 0s.
+        * Read the current value, change it and read it back to see if it
+        * matches, this is needed to detect certain hardware emulators
+        * (qemu/kvm) that don't trap on the MSR access and always return 0s.
         */
-       val = 0xabcdUL;
        reg = x86_pmu_event_addr(0);
+       if (rdmsrl_safe(reg, &val))
+               goto msr_fail;
+       val ^= 0xffffUL;
        ret = wrmsrl_safe(reg, val);
        ret |= rdmsrl_safe(reg, &val_new);
        if (ret || val != val_new)
index 5df8d32ba91e044e065ebb5aed2942438fb8211e..3cf3d97cce3a7ab0b02c678f2535dc5ec03d09bb 100644 (file)
@@ -118,22 +118,24 @@ static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box)
 {
        struct pci_dev *pdev = box->pci_dev;
        int box_ctl = uncore_pci_box_ctl(box);
-       u32 config;
+       u32 config = 0;
 
-       pci_read_config_dword(pdev, box_ctl, &config);
-       config |= SNBEP_PMON_BOX_CTL_FRZ;
-       pci_write_config_dword(pdev, box_ctl, config);
+       if (!pci_read_config_dword(pdev, box_ctl, &config)) {
+               config |= SNBEP_PMON_BOX_CTL_FRZ;
+               pci_write_config_dword(pdev, box_ctl, config);
+       }
 }
 
 static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box)
 {
        struct pci_dev *pdev = box->pci_dev;
        int box_ctl = uncore_pci_box_ctl(box);
-       u32 config;
+       u32 config = 0;
 
-       pci_read_config_dword(pdev, box_ctl, &config);
-       config &= ~SNBEP_PMON_BOX_CTL_FRZ;
-       pci_write_config_dword(pdev, box_ctl, config);
+       if (!pci_read_config_dword(pdev, box_ctl, &config)) {
+               config &= ~SNBEP_PMON_BOX_CTL_FRZ;
+               pci_write_config_dword(pdev, box_ctl, config);
+       }
 }
 
 static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event)
@@ -156,7 +158,7 @@ static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct pe
 {
        struct pci_dev *pdev = box->pci_dev;
        struct hw_perf_event *hwc = &event->hw;
-       u64 count;
+       u64 count = 0;
 
        pci_read_config_dword(pdev, hwc->event_base, (u32 *)&count);
        pci_read_config_dword(pdev, hwc->event_base + 4, (u32 *)&count + 1);
@@ -603,11 +605,12 @@ static struct pci_driver snbep_uncore_pci_driver = {
 /*
  * build pci bus to socket mapping
  */
-static void snbep_pci2phy_map_init(void)
+static int snbep_pci2phy_map_init(void)
 {
        struct pci_dev *ubox_dev = NULL;
        int i, bus, nodeid;
-       u32 config;
+       int err = 0;
+       u32 config = 0;
 
        while (1) {
                /* find the UBOX device */
@@ -618,10 +621,14 @@ static void snbep_pci2phy_map_init(void)
                        break;
                bus = ubox_dev->bus->number;
                /* get the Node ID of the local register */
-               pci_read_config_dword(ubox_dev, 0x40, &config);
+               err = pci_read_config_dword(ubox_dev, 0x40, &config);
+               if (err)
+                       break;
                nodeid = config;
                /* get the Node ID mapping */
-               pci_read_config_dword(ubox_dev, 0x54, &config);
+               err = pci_read_config_dword(ubox_dev, 0x54, &config);
+               if (err)
+                       break;
                /*
                 * every three bits in the Node ID mapping register maps
                 * to a particular node.
@@ -633,7 +640,11 @@ static void snbep_pci2phy_map_init(void)
                        }
                }
        };
-       return;
+
+       if (ubox_dev)
+               pci_dev_put(ubox_dev);
+
+       return err ? pcibios_err_to_errno(err) : 0;
 }
 /* end of Sandy Bridge-EP uncore support */
 
@@ -1547,7 +1558,6 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
 {
        struct hw_perf_event *hwc = &event->hw;
        struct hw_perf_event_extra *reg1 = &hwc->extra_reg;
-       int port;
 
        /* adjust the main event selector and extra register index */
        if (reg1->idx % 2) {
@@ -1559,7 +1569,6 @@ void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event)
        }
 
        /* adjust extra register config */
-       port = reg1->idx / 6 + box->pmu->pmu_idx * 4;
        switch (reg1->idx % 6) {
        case 2:
                /* shift the 8~15 bits to the 0~7 bits */
@@ -2578,9 +2587,11 @@ static int __init uncore_pci_init(void)
 
        switch (boot_cpu_data.x86_model) {
        case 45: /* Sandy Bridge-EP */
+               ret = snbep_pci2phy_map_init();
+               if (ret)
+                       return ret;
                pci_uncores = snbep_pci_uncores;
                uncore_pci_driver = &snbep_uncore_pci_driver;
-               snbep_pci2phy_map_init();
                break;
        default:
                return 0;
index 7c46bfdbc3732f87b005ecb7196a635eba5db546..4b7731bf23a812a4631aa7c53196068924b52ee7 100644 (file)
@@ -3,6 +3,8 @@
 #include <linux/perf_event.h>
 #include <linux/types.h>
 
+#include <asm/hardirq.h>
+
 #include "perf_event.h"
 
 static const u64 knc_perfmon_event_map[] =
@@ -173,30 +175,100 @@ static void knc_pmu_enable_all(int added)
 static inline void
 knc_pmu_disable_event(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        u64 val;
 
        val = hwc->config;
-       if (cpuc->enabled)
-               val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
+       val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
 
        (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
 }
 
 static void knc_pmu_enable_event(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        u64 val;
 
        val = hwc->config;
-       if (cpuc->enabled)
-               val |= ARCH_PERFMON_EVENTSEL_ENABLE;
+       val |= ARCH_PERFMON_EVENTSEL_ENABLE;
 
        (void)wrmsrl_safe(hwc->config_base + hwc->idx, val);
 }
 
+static inline u64 knc_pmu_get_status(void)
+{
+       u64 status;
+
+       rdmsrl(MSR_KNC_IA32_PERF_GLOBAL_STATUS, status);
+
+       return status;
+}
+
+static inline void knc_pmu_ack_status(u64 ack)
+{
+       wrmsrl(MSR_KNC_IA32_PERF_GLOBAL_OVF_CONTROL, ack);
+}
+
+static int knc_pmu_handle_irq(struct pt_regs *regs)
+{
+       struct perf_sample_data data;
+       struct cpu_hw_events *cpuc;
+       int handled = 0;
+       int bit, loops;
+       u64 status;
+
+       cpuc = &__get_cpu_var(cpu_hw_events);
+
+       knc_pmu_disable_all();
+
+       status = knc_pmu_get_status();
+       if (!status) {
+               knc_pmu_enable_all(0);
+               return handled;
+       }
+
+       loops = 0;
+again:
+       knc_pmu_ack_status(status);
+       if (++loops > 100) {
+               WARN_ONCE(1, "perf: irq loop stuck!\n");
+               perf_event_print_debug();
+               goto done;
+       }
+
+       inc_irq_stat(apic_perf_irqs);
+
+       for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
+               struct perf_event *event = cpuc->events[bit];
+
+               handled++;
+
+               if (!test_bit(bit, cpuc->active_mask))
+                       continue;
+
+               if (!intel_pmu_save_and_restart(event))
+                       continue;
+
+               perf_sample_data_init(&data, 0, event->hw.last_period);
+
+               if (perf_event_overflow(event, &data, regs))
+                       x86_pmu_stop(event, 0);
+       }
+
+       /*
+        * Repeat if there is more work to be done:
+        */
+       status = knc_pmu_get_status();
+       if (status)
+               goto again;
+
+done:
+       knc_pmu_enable_all(0);
+
+       return handled;
+}
+
+
 PMU_FORMAT_ATTR(event, "config:0-7"    );
 PMU_FORMAT_ATTR(umask, "config:8-15"   );
 PMU_FORMAT_ATTR(edge,  "config:18"     );
@@ -214,7 +286,7 @@ static struct attribute *intel_knc_formats_attr[] = {
 
 static __initconst struct x86_pmu knc_pmu = {
        .name                   = "knc",
-       .handle_irq             = x86_pmu_handle_irq,
+       .handle_irq             = knc_pmu_handle_irq,
        .disable_all            = knc_pmu_disable_all,
        .enable_all             = knc_pmu_enable_all,
        .enable                 = knc_pmu_enable_event,
@@ -226,12 +298,11 @@ static __initconst struct x86_pmu knc_pmu = {
        .event_map              = knc_pmu_event_map,
        .max_events             = ARRAY_SIZE(knc_perfmon_event_map),
        .apic                   = 1,
-       .max_period             = (1ULL << 31) - 1,
+       .max_period             = (1ULL << 39) - 1,
        .version                = 0,
        .num_counters           = 2,
-       /* in theory 40 bits, early silicon is buggy though */
-       .cntval_bits            = 32,
-       .cntval_mask            = (1ULL << 32) - 1,
+       .cntval_bits            = 40,
+       .cntval_mask            = (1ULL << 40) - 1,
        .get_event_constraints  = x86_get_event_constraints,
        .event_constraints      = knc_event_constraints,
        .format_attrs           = intel_knc_formats_attr,
index e4dd0f7a04535f09b83d47d95868bcc1c152dff3..7d0270bd793ebd8580d63e21a1be521b2aeea91f 100644 (file)
  */
 static const u64 p6_perfmon_event_map[] =
 {
-  [PERF_COUNT_HW_CPU_CYCLES]           = 0x0079,
-  [PERF_COUNT_HW_INSTRUCTIONS]         = 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]     = 0x0f2e,
-  [PERF_COUNT_HW_CACHE_MISSES]         = 0x012e,
-  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]  = 0x00c4,
-  [PERF_COUNT_HW_BRANCH_MISSES]                = 0x00c5,
-  [PERF_COUNT_HW_BUS_CYCLES]           = 0x0062,
+  [PERF_COUNT_HW_CPU_CYCLES]           = 0x0079,       /* CPU_CLK_UNHALTED */
+  [PERF_COUNT_HW_INSTRUCTIONS]         = 0x00c0,       /* INST_RETIRED     */
+  [PERF_COUNT_HW_CACHE_REFERENCES]     = 0x0f2e,       /* L2_RQSTS:M:E:S:I */
+  [PERF_COUNT_HW_CACHE_MISSES]         = 0x012e,       /* L2_RQSTS:I       */
+  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]  = 0x00c4,       /* BR_INST_RETIRED  */
+  [PERF_COUNT_HW_BRANCH_MISSES]                = 0x00c5,       /* BR_MISS_PRED_RETIRED */
+  [PERF_COUNT_HW_BUS_CYCLES]           = 0x0062,       /* BUS_DRDY_CLOCKS  */
+  [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x00a2,    /* RESOURCE_STALLS  */
+
+};
+
+static __initconst u64 p6_hw_cache_event_ids
+                               [PERF_COUNT_HW_CACHE_MAX]
+                               [PERF_COUNT_HW_CACHE_OP_MAX]
+                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
+{
+ [ C(L1D) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0043,  /* DATA_MEM_REFS       */
+                [ C(RESULT_MISS)   ] = 0x0045, /* DCU_LINES_IN        */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0x0f29,  /* L2_LD:M:E:S:I       */
+       },
+        [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+        },
+ },
+ [ C(L1I ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0080,  /* IFU_IFETCH         */
+               [ C(RESULT_MISS)   ] = 0x0f28,  /* L2_IFETCH:M:E:S:I  */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(LL  ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0x0025,  /* L2_M_LINES_INM     */
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(DTLB) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0043,  /* DATA_MEM_REFS      */
+               [ C(RESULT_MISS)   ] = 0,
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = 0,
+               [ C(RESULT_MISS)   ] = 0,
+       },
+ },
+ [ C(ITLB) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x0080,  /* IFU_IFETCH         */
+               [ C(RESULT_MISS)   ] = 0x0085,  /* ITLB_MISS          */
+       },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+ },
+ [ C(BPU ) ] = {
+       [ C(OP_READ) ] = {
+               [ C(RESULT_ACCESS) ] = 0x00c4,  /* BR_INST_RETIRED      */
+               [ C(RESULT_MISS)   ] = 0x00c5,  /* BR_MISS_PRED_RETIRED */
+        },
+       [ C(OP_WRITE) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+       [ C(OP_PREFETCH) ] = {
+               [ C(RESULT_ACCESS) ] = -1,
+               [ C(RESULT_MISS)   ] = -1,
+       },
+ },
 };
 
 static u64 p6_pmu_event_map(int hw_event)
@@ -34,7 +127,7 @@ static struct event_constraint p6_event_constraints[] =
 {
        INTEL_EVENT_CONSTRAINT(0xc1, 0x1),      /* FLOPS */
        INTEL_EVENT_CONSTRAINT(0x10, 0x1),      /* FP_COMP_OPS_EXE */
-       INTEL_EVENT_CONSTRAINT(0x11, 0x1),      /* FP_ASSIST */
+       INTEL_EVENT_CONSTRAINT(0x11, 0x2),      /* FP_ASSIST */
        INTEL_EVENT_CONSTRAINT(0x12, 0x2),      /* MUL */
        INTEL_EVENT_CONSTRAINT(0x13, 0x2),      /* DIV */
        INTEL_EVENT_CONSTRAINT(0x14, 0x1),      /* CYCLES_DIV_BUSY */
@@ -64,25 +157,25 @@ static void p6_pmu_enable_all(int added)
 static inline void
 p6_pmu_disable_event(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        u64 val = P6_NOP_EVENT;
 
-       if (cpuc->enabled)
-               val |= ARCH_PERFMON_EVENTSEL_ENABLE;
-
        (void)wrmsrl_safe(hwc->config_base, val);
 }
 
 static void p6_pmu_enable_event(struct perf_event *event)
 {
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
        u64 val;
 
        val = hwc->config;
-       if (cpuc->enabled)
-               val |= ARCH_PERFMON_EVENTSEL_ENABLE;
+
+       /*
+        * p6 only has a global event enable, set on PerfEvtSel0
+        * We "disable" events by programming P6_NOP_EVENT
+        * and we rely on p6_pmu_enable_all() being called
+        * to actually enable the events.
+        */
 
        (void)wrmsrl_safe(hwc->config_base, val);
 }
@@ -158,5 +251,9 @@ __init int p6_pmu_init(void)
 
        x86_pmu = p6_pmu;
 
+       memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
+               sizeof(hw_cache_event_ids));
+
+
        return 0;
 }
index ed858e9e9a7461aa9b4f8aa42a52c79d57d84507..df06ade26bef8485af1a66d797370385d0012e52 100644 (file)
@@ -1077,6 +1077,9 @@ void __init memblock_x86_fill(void)
                memblock_add(ei->addr, ei->size);
        }
 
+       /* throw away partial pages */
+       memblock_trim_memory(PAGE_SIZE);
+
        memblock_dump_all();
 }
 
index 468e98dfd44e72fd7d0b29690b4bb777356a52a5..ca45696f30fb8a01a4d6852388d08ab19d4bb778 100644 (file)
@@ -921,18 +921,19 @@ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_X86_64
        if (max_pfn > max_low_pfn) {
                int i;
-               for (i = 0; i < e820.nr_map; i++) {
-                       struct e820entry *ei = &e820.map[i];
+               unsigned long start, end;
+               unsigned long start_pfn, end_pfn;
 
-                       if (ei->addr + ei->size <= 1UL << 32)
-                               continue;
+               for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn,
+                                                        NULL) {
 
-                       if (ei->type == E820_RESERVED)
+                       end = PFN_PHYS(end_pfn);
+                       if (end <= (1UL<<32))
                                continue;
 
+                       start = PFN_PHYS(start_pfn);
                        max_pfn_mapped = init_memory_mapping(
-                               ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr,
-                               ei->addr + ei->size);
+                                               max((1UL<<32), start), end);
                }
 
                /* can we preseve max_low_pfn ?*/
@@ -1048,6 +1049,18 @@ void __init setup_arch(char **cmdline_p)
        arch_init_ideal_nops();
 
        register_refined_jiffies(CLOCK_TICK_RATE);
+
+#ifdef CONFIG_EFI
+       /* Once setup is done above, disable efi_enabled on mismatched
+        * firmware/kernel archtectures since there is no support for
+        * runtime services.
+        */
+       if (efi_enabled && IS_ENABLED(CONFIG_X86_64) != efi_64bit) {
+               pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
+               efi_unmap_memmap();
+               efi_enabled = 0;
+       }
+#endif
 }
 
 #ifdef CONFIG_X86_32
index ab1f6a93b527c9bd50acc12a52e9ce157f2cc48b..d7aea41563b372437eb227a499259be23d755564 100644 (file)
@@ -35,40 +35,44 @@ struct map_range {
        unsigned page_size_mask;
 };
 
-static void __init find_early_table_space(struct map_range *mr, unsigned long end,
-                                         int use_pse, int use_gbpages)
+/*
+ * First calculate space needed for kernel direct mapping page tables to cover
+ * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB
+ * pages. Then find enough contiguous space for those page tables.
+ */
+static void __init find_early_table_space(struct map_range *mr, int nr_range)
 {
-       unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
+       int i;
+       unsigned long puds = 0, pmds = 0, ptes = 0, tables;
+       unsigned long start = 0, good_end;
        phys_addr_t base;
 
-       puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
-       tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
-
-       if (use_gbpages) {
-               unsigned long extra;
-
-               extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT);
-               pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT;
-       } else
-               pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
+       for (i = 0; i < nr_range; i++) {
+               unsigned long range, extra;
 
-       tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
+               range = mr[i].end - mr[i].start;
+               puds += (range + PUD_SIZE - 1) >> PUD_SHIFT;
 
-       if (use_pse) {
-               unsigned long extra;
+               if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) {
+                       extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT);
+                       pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT;
+               } else {
+                       pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT;
+               }
 
-               extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT);
+               if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) {
+                       extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT);
 #ifdef CONFIG_X86_32
-               extra += PMD_SIZE;
+                       extra += PMD_SIZE;
 #endif
-               /* The first 2/4M doesn't use large pages. */
-               if (mr->start < PMD_SIZE)
-                       extra += mr->end - mr->start;
-
-               ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
-       } else
-               ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
+                       ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
+               } else {
+                       ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT;
+               }
+       }
 
+       tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
+       tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
        tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
 
 #ifdef CONFIG_X86_32
@@ -86,7 +90,7 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
        pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
 
        printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
-               end - 1, pgt_buf_start << PAGE_SHIFT,
+               mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT,
                (pgt_buf_top << PAGE_SHIFT) - 1);
 }
 
@@ -267,7 +271,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
         * nodes are discovered.
         */
        if (!after_bootmem)
-               find_early_table_space(&mr[0], end, use_pse, use_gbpages);
+               find_early_table_space(mr, nr_range);
 
        for (i = 0; i < nr_range; i++)
                ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
index 2b6b4a3c8beb8727d27b1cdba22411fe0d271efb..3baff255adac6472570298d7e9ee60df75e93dfc 100644 (file)
@@ -386,7 +386,8 @@ phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
                 * these mappings are more intelligent.
                 */
                if (pte_val(*pte)) {
-                       pages++;
+                       if (!after_bootmem)
+                               pages++;
                        continue;
                }
 
@@ -451,6 +452,8 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
                         * attributes.
                         */
                        if (page_size_mask & (1 << PG_LEVEL_2M)) {
+                               if (!after_bootmem)
+                                       pages++;
                                last_map_addr = next;
                                continue;
                        }
@@ -526,6 +529,8 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
                         * attributes.
                         */
                        if (page_size_mask & (1 << PG_LEVEL_1G)) {
+                               if (!after_bootmem)
+                                       pages++;
                                last_map_addr = next;
                                continue;
                        }
index aded2a91162a8af12ad104d047544afce91ff4c4..ad4439145f858314dfe518cf7cd9336c5fe9c96d 100644 (file)
@@ -70,11 +70,15 @@ EXPORT_SYMBOL(efi);
 struct efi_memory_map memmap;
 
 bool efi_64bit;
-static bool efi_native;
 
 static struct efi efi_phys __initdata;
 static efi_system_table_t efi_systab __initdata;
 
+static inline bool efi_is_native(void)
+{
+       return IS_ENABLED(CONFIG_X86_64) == efi_64bit;
+}
+
 static int __init setup_noefi(char *arg)
 {
        efi_enabled = 0;
@@ -420,7 +424,7 @@ void __init efi_reserve_boot_services(void)
        }
 }
 
-static void __init efi_unmap_memmap(void)
+void __init efi_unmap_memmap(void)
 {
        if (memmap.map) {
                early_iounmap(memmap.map, memmap.nr_map * memmap.desc_size);
@@ -432,7 +436,7 @@ void __init efi_free_boot_services(void)
 {
        void *p;
 
-       if (!efi_native)
+       if (!efi_is_native())
                return;
 
        for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
@@ -684,12 +688,10 @@ void __init efi_init(void)
                return;
        }
        efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
-       efi_native = !efi_64bit;
 #else
        efi_phys.systab = (efi_system_table_t *)
                          (boot_params.efi_info.efi_systab |
                          ((__u64)boot_params.efi_info.efi_systab_hi<<32));
-       efi_native = efi_64bit;
 #endif
 
        if (efi_systab_init(efi_phys.systab)) {
@@ -723,7 +725,7 @@ void __init efi_init(void)
         * that doesn't match the kernel 32/64-bit mode.
         */
 
-       if (!efi_native)
+       if (!efi_is_native())
                pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
        else if (efi_runtime_init()) {
                efi_enabled = 0;
@@ -735,7 +737,7 @@ void __init efi_init(void)
                return;
        }
 #ifdef CONFIG_X86_32
-       if (efi_native) {
+       if (efi_is_native()) {
                x86_platform.get_wallclock = efi_get_time;
                x86_platform.set_wallclock = efi_set_rtc_mmss;
        }
@@ -810,6 +812,16 @@ void __iomem *efi_lookup_mapped_addr(u64 phys_addr)
        return NULL;
 }
 
+void efi_memory_uc(u64 addr, unsigned long size)
+{
+       unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
+       u64 npages;
+
+       npages = round_up(size, page_shift) / page_shift;
+       memrange_efi_to_native(&addr, &npages);
+       set_memory_uc(addr, npages);
+}
+
 /*
  * This function will switch the EFI runtime services to virtual mode.
  * Essentially, look through the EFI memmap and map every region that
@@ -823,7 +835,7 @@ void __init efi_enter_virtual_mode(void)
        efi_memory_desc_t *md, *prev_md = NULL;
        efi_status_t status;
        unsigned long size;
-       u64 end, systab, addr, npages, end_pfn;
+       u64 end, systab, end_pfn;
        void *p, *va, *new_memmap = NULL;
        int count = 0;
 
@@ -834,7 +846,7 @@ void __init efi_enter_virtual_mode(void)
         * non-native EFI
         */
 
-       if (!efi_native) {
+       if (!efi_is_native()) {
                efi_unmap_memmap();
                return;
        }
@@ -879,10 +891,14 @@ void __init efi_enter_virtual_mode(void)
                end_pfn = PFN_UP(end);
                if (end_pfn <= max_low_pfn_mapped
                    || (end_pfn > (1UL << (32 - PAGE_SHIFT))
-                       && end_pfn <= max_pfn_mapped))
+                       && end_pfn <= max_pfn_mapped)) {
                        va = __va(md->phys_addr);
-               else
-                       va = efi_ioremap(md->phys_addr, size, md->type);
+
+                       if (!(md->attribute & EFI_MEMORY_WB))
+                               efi_memory_uc((u64)(unsigned long)va, size);
+               } else
+                       va = efi_ioremap(md->phys_addr, size,
+                                        md->type, md->attribute);
 
                md->virt_addr = (u64) (unsigned long) va;
 
@@ -892,13 +908,6 @@ void __init efi_enter_virtual_mode(void)
                        continue;
                }
 
-               if (!(md->attribute & EFI_MEMORY_WB)) {
-                       addr = md->virt_addr;
-                       npages = md->num_pages;
-                       memrange_efi_to_native(&addr, &npages);
-                       set_memory_uc(addr, npages);
-               }
-
                systab = (u64) (unsigned long) efi_phys.systab;
                if (md->phys_addr <= systab && systab < end) {
                        systab += md->virt_addr - md->phys_addr;
index ac3aa54e26546ba5cb4121eba0c58ca00f06ea82..95fd505dfeb6e43dd37b0c41954f0b0db6535d1c 100644 (file)
@@ -82,7 +82,7 @@ void __init efi_call_phys_epilog(void)
 }
 
 void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
-                                u32 type)
+                                u32 type, u64 attribute)
 {
        unsigned long last_map_pfn;
 
@@ -92,8 +92,11 @@ void __iomem *__init efi_ioremap(unsigned long phys_addr, unsigned long size,
        last_map_pfn = init_memory_mapping(phys_addr, phys_addr + size);
        if ((last_map_pfn << PAGE_SHIFT) < phys_addr + size) {
                unsigned long top = last_map_pfn << PAGE_SHIFT;
-               efi_ioremap(top, size - (top - phys_addr), type);
+               efi_ioremap(top, size - (top - phys_addr), type, attribute);
        }
 
+       if (!(attribute & EFI_MEMORY_WB))
+               efi_memory_uc((u64)(unsigned long)__va(phys_addr), size);
+
        return (void __iomem *)__va(phys_addr);
 }
index 49cbb3795a102e755337a3c2105f03caedc0f2ee..ba498f8e47a211c7bc9a8e749624be6cbd8b395b 100644 (file)
@@ -184,6 +184,7 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
        struct radeon_backlight_privdata *pdata;
        struct radeon_encoder_atom_dig *dig;
        u8 backlight_level;
+       char bl_name[16];
 
        if (!radeon_encoder->enc_priv)
                return;
@@ -203,7 +204,9 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
        memset(&props, 0, sizeof(props));
        props.max_brightness = RADEON_MAX_BL_LEVEL;
        props.type = BACKLIGHT_RAW;
-       bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
+       snprintf(bl_name, sizeof(bl_name),
+                "radeon_bl%d", dev->primary->index);
+       bd = backlight_device_register(bl_name, &drm_connector->kdev,
                                       pdata, &radeon_atom_backlight_ops, &props);
        if (IS_ERR(bd)) {
                DRM_ERROR("Backlight registration failed\n");
index 573ed1bc6cf7d36ffd1a3385cc05350fd7a3d3a0..30271b641913f3eeee72dcdc7db7146217c86109 100644 (file)
@@ -2829,6 +2829,7 @@ static bool evergreen_vm_reg_valid(u32 reg)
        case CAYMAN_SQ_EX_ALLOC_TABLE_SLOTS:
                return true;
        default:
+               DRM_ERROR("Invalid register 0x%x in CS\n", reg);
                return false;
        }
 }
index 8c74c729586db21185e54465d23289a15bc4d979..81e6a568c29debcf49bb915c5d3aa19bcc88324e 100644 (file)
@@ -1538,26 +1538,31 @@ void cayman_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 {
        struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
        uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-       int i;
 
-       radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, 1 + count * 2));
-       radeon_ring_write(ring, pe);
-       radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
-       for (i = 0; i < count; ++i) {
-               uint64_t value = 0;
-               if (flags & RADEON_VM_PAGE_SYSTEM) {
-                       value = radeon_vm_map_gart(rdev, addr);
-                       value &= 0xFFFFFFFFFFFFF000ULL;
-                       addr += incr;
-
-               } else if (flags & RADEON_VM_PAGE_VALID) {
-                       value = addr;
-                       addr += incr;
-               }
+       while (count) {
+               unsigned ndw = 1 + count * 2;
+               if (ndw > 0x3FFF)
+                       ndw = 0x3FFF;
+
+               radeon_ring_write(ring, PACKET3(PACKET3_ME_WRITE, ndw));
+               radeon_ring_write(ring, pe);
+               radeon_ring_write(ring, upper_32_bits(pe) & 0xff);
+               for (; ndw > 1; ndw -= 2, --count, pe += 8) {
+                       uint64_t value = 0;
+                       if (flags & RADEON_VM_PAGE_SYSTEM) {
+                               value = radeon_vm_map_gart(rdev, addr);
+                               value &= 0xFFFFFFFFFFFFF000ULL;
+                               addr += incr;
+
+                       } else if (flags & RADEON_VM_PAGE_VALID) {
+                               value = addr;
+                               addr += incr;
+                       }
 
-               value |= r600_flags;
-               radeon_ring_write(ring, value);
-               radeon_ring_write(ring, upper_32_bits(value));
+                       value |= r600_flags;
+                       radeon_ring_write(ring, value);
+                       radeon_ring_write(ring, upper_32_bits(value));
+               }
        }
 }
 
@@ -1586,4 +1591,8 @@ void cayman_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        /* bits 0-7 are the VM contexts0-7 */
        radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
        radeon_ring_write(ring, 1 << vm->id);
+
+       /* sync PFP to ME, otherwise we might get invalid PFP reads */
+       radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+       radeon_ring_write(ring, 0x0);
 }
index 2423d1b5d385938f2445706abc97ab74198e1b0f..cbef6815907a13c83efff8b9e863568b1e625980 100644 (file)
 #define        PACKET3_MPEG_INDEX                              0x3A
 #define        PACKET3_WAIT_REG_MEM                            0x3C
 #define        PACKET3_MEM_WRITE                               0x3D
+#define        PACKET3_PFP_SYNC_ME                             0x42
 #define        PACKET3_SURFACE_SYNC                            0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_CB1_DEST_BASE_ENA    (1 << 7)
index 1aa3f910b99328e9f3de367859742bb57722732b..37f6a907aea49c7b1ff3c57628cb281d737d2e69 100644 (file)
@@ -87,7 +87,7 @@ static union acpi_object *radeon_atpx_call(acpi_handle handle, int function,
                atpx_arg_elements[1].integer.value = 0;
        }
 
-       status = acpi_evaluate_object(handle, "ATPX", &atpx_arg, &buffer);
+       status = acpi_evaluate_object(handle, NULL, &atpx_arg, &buffer);
 
        /* Fail only if calling the method fails and ATPX is supported */
        if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -373,11 +373,11 @@ static int radeon_atpx_power_state(enum vga_switcheroo_client_id id,
 }
 
 /**
- * radeon_atpx_pci_probe_handle - look up the ATRM and ATPX handles
+ * radeon_atpx_pci_probe_handle - look up the ATPX handle
  *
  * @pdev: pci device
  *
- * Look up the ATPX and ATRM handles (all asics).
+ * Look up the ATPX handles (all asics).
  * Returns true if the handles are found, false if not.
  */
 static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
index bd13ca09eb626efcc2c312ef68569ffcf7655db9..e2f5f888c374cc29b2f8658a5e0ba9d91325827d 100644 (file)
@@ -355,6 +355,8 @@ int radeon_wb_init(struct radeon_device *rdev)
  */
 void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64 base)
 {
+       uint64_t limit = (uint64_t)radeon_vram_limit << 20;
+
        mc->vram_start = base;
        if (mc->mc_vram_size > (0xFFFFFFFF - base + 1)) {
                dev_warn(rdev->dev, "limiting VRAM to PCI aperture size\n");
@@ -368,8 +370,8 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
                mc->mc_vram_size = mc->aper_size;
        }
        mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
-       if (radeon_vram_limit && radeon_vram_limit < mc->real_vram_size)
-               mc->real_vram_size = radeon_vram_limit;
+       if (limit && limit < mc->real_vram_size)
+               mc->real_vram_size = limit;
        dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
                        mc->mc_vram_size >> 20, mc->vram_start,
                        mc->vram_end, mc->real_vram_size >> 20);
@@ -834,6 +836,19 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
                return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 }
 
+/**
+ * radeon_check_pot_argument - check that argument is a power of two
+ *
+ * @arg: value to check
+ *
+ * Validates that a certain argument is a power of two (all asics).
+ * Returns true if argument is valid.
+ */
+static bool radeon_check_pot_argument(int arg)
+{
+       return (arg & (arg - 1)) == 0;
+}
+
 /**
  * radeon_check_arguments - validate module params
  *
@@ -845,52 +860,25 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
 static void radeon_check_arguments(struct radeon_device *rdev)
 {
        /* vramlimit must be a power of two */
-       switch (radeon_vram_limit) {
-       case 0:
-       case 4:
-       case 8:
-       case 16:
-       case 32:
-       case 64:
-       case 128:
-       case 256:
-       case 512:
-       case 1024:
-       case 2048:
-       case 4096:
-               break;
-       default:
+       if (!radeon_check_pot_argument(radeon_vram_limit)) {
                dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n",
                                radeon_vram_limit);
                radeon_vram_limit = 0;
-               break;
        }
-       radeon_vram_limit = radeon_vram_limit << 20;
+
        /* gtt size must be power of two and greater or equal to 32M */
-       switch (radeon_gart_size) {
-       case 4:
-       case 8:
-       case 16:
+       if (radeon_gart_size < 32) {
                dev_warn(rdev->dev, "gart size (%d) too small forcing to 512M\n",
                                radeon_gart_size);
                radeon_gart_size = 512;
-               break;
-       case 32:
-       case 64:
-       case 128:
-       case 256:
-       case 512:
-       case 1024:
-       case 2048:
-       case 4096:
-               break;
-       default:
+
+       } else if (!radeon_check_pot_argument(radeon_gart_size)) {
                dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n",
                                radeon_gart_size);
                radeon_gart_size = 512;
-               break;
        }
-       rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+       rdev->mc.gtt_size = (uint64_t)radeon_gart_size << 20;
+
        /* AGP mode can only be -1, 1, 2, 4, 8 */
        switch (radeon_agpmode) {
        case -1:
index a7677dd1ce98573121a395643b31a57480e60c76..4debd60e5aa63390f0f386ee236bfc42fe4f8cf3 100644 (file)
@@ -355,14 +355,13 @@ int radeon_gart_init(struct radeon_device *rdev)
        DRM_INFO("GART: num cpu pages %u, num gpu pages %u\n",
                 rdev->gart.num_cpu_pages, rdev->gart.num_gpu_pages);
        /* Allocate pages table */
-       rdev->gart.pages = kzalloc(sizeof(void *) * rdev->gart.num_cpu_pages,
-                                  GFP_KERNEL);
+       rdev->gart.pages = vzalloc(sizeof(void *) * rdev->gart.num_cpu_pages);
        if (rdev->gart.pages == NULL) {
                radeon_gart_fini(rdev);
                return -ENOMEM;
        }
-       rdev->gart.pages_addr = kzalloc(sizeof(dma_addr_t) *
-                                       rdev->gart.num_cpu_pages, GFP_KERNEL);
+       rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
+                                       rdev->gart.num_cpu_pages);
        if (rdev->gart.pages_addr == NULL) {
                radeon_gart_fini(rdev);
                return -ENOMEM;
@@ -388,8 +387,8 @@ void radeon_gart_fini(struct radeon_device *rdev)
                radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
        }
        rdev->gart.ready = false;
-       kfree(rdev->gart.pages);
-       kfree(rdev->gart.pages_addr);
+       vfree(rdev->gart.pages);
+       vfree(rdev->gart.pages_addr);
        rdev->gart.pages = NULL;
        rdev->gart.pages_addr = NULL;
 
@@ -577,7 +576,7 @@ void radeon_vm_manager_fini(struct radeon_device *rdev)
  *
  * Global and local mutex must be locked!
  */
-int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
+static int radeon_vm_evict(struct radeon_device *rdev, struct radeon_vm *vm)
 {
        struct radeon_vm *vm_evict;
 
@@ -1036,8 +1035,7 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
                pte = radeon_sa_bo_gpu_addr(vm->page_tables[pt_idx]);
                pte += (addr & mask) * 8;
 
-               if (((last_pte + 8 * count) != pte) ||
-                   ((count + nptes) > 1 << 11)) {
+               if ((last_pte + 8 * count) != pte) {
 
                        if (count) {
                                radeon_asic_vm_set_page(rdev, last_pte,
@@ -1148,17 +1146,17 @@ int radeon_vm_bo_update_pte(struct radeon_device *rdev,
 
        if (RADEON_VM_BLOCK_SIZE > 11)
                /* reserve space for one header for every 2k dwords */
-               ndw += (nptes >> 11) * 3;
+               ndw += (nptes >> 11) * 4;
        else
                /* reserve space for one header for
                    every (1 << BLOCK_SIZE) entries */
-               ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 3;
+               ndw += (nptes >> RADEON_VM_BLOCK_SIZE) * 4;
 
        /* reserve space for pte addresses */
        ndw += nptes * 2;
 
        /* reserve space for one header for every 2k dwords */
-       ndw += (npdes >> 11) * 3;
+       ndw += (npdes >> 11) * 4;
 
        /* reserve space for pde addresses */
        ndw += npdes * 2;
index f38fbcc469358a2144791d40f910d4d900d99202..fe5c1f6b795795530075939d39c06486ce42d29d 100644 (file)
@@ -53,6 +53,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
                                struct drm_gem_object **obj)
 {
        struct radeon_bo *robj;
+       unsigned long max_size;
        int r;
 
        *obj = NULL;
@@ -60,11 +61,26 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
        if (alignment < PAGE_SIZE) {
                alignment = PAGE_SIZE;
        }
+
+       /* maximun bo size is the minimun btw visible vram and gtt size */
+       max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+       if (size > max_size) {
+               printk(KERN_WARNING "%s:%d alloc size %dMb bigger than %ldMb limit\n",
+                      __func__, __LINE__, size >> 20, max_size >> 20);
+               return -ENOMEM;
+       }
+
+retry:
        r = radeon_bo_create(rdev, size, alignment, kernel, initial_domain, NULL, &robj);
        if (r) {
-               if (r != -ERESTARTSYS)
+               if (r != -ERESTARTSYS) {
+                       if (initial_domain == RADEON_GEM_DOMAIN_VRAM) {
+                               initial_domain |= RADEON_GEM_DOMAIN_GTT;
+                               goto retry;
+                       }
                        DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
                                  size, initial_domain, alignment, r);
+               }
                return r;
        }
        *obj = &robj->gem_base;
index a13ad9d707cfcccdab04d085a62d4be0ab3690df..0063df9d166d70f5267003d24bc078093f2fdf9a 100644 (file)
@@ -370,6 +370,7 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
        struct backlight_properties props;
        struct radeon_backlight_privdata *pdata;
        uint8_t backlight_level;
+       char bl_name[16];
 
        if (!radeon_encoder->enc_priv)
                return;
@@ -389,7 +390,9 @@ void radeon_legacy_backlight_init(struct radeon_encoder *radeon_encoder,
        memset(&props, 0, sizeof(props));
        props.max_brightness = RADEON_MAX_BL_LEVEL;
        props.type = BACKLIGHT_RAW;
-       bd = backlight_device_register("radeon_bl", &drm_connector->kdev,
+       snprintf(bl_name, sizeof(bl_name),
+                "radeon_bl%d", dev->primary->index);
+       bd = backlight_device_register(bl_name, &drm_connector->kdev,
                                       pdata, &radeon_backlight_ops, &props);
        if (IS_ERR(bd)) {
                DRM_ERROR("Backlight registration failed\n");
index 8b27dd6e3144566bf9cfcb218f4697cd92dbbb95..b91118ccef867f5b59effd78d0ee93d65a932f8b 100644 (file)
@@ -105,7 +105,6 @@ int radeon_bo_create(struct radeon_device *rdev,
        struct radeon_bo *bo;
        enum ttm_bo_type type;
        unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
-       unsigned long max_size = 0;
        size_t acc_size;
        int r;
 
@@ -121,18 +120,9 @@ int radeon_bo_create(struct radeon_device *rdev,
        }
        *bo_ptr = NULL;
 
-       /* maximun bo size is the minimun btw visible vram and gtt size */
-       max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
-       if ((page_align << PAGE_SHIFT) >= max_size) {
-               printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
-                       __func__, __LINE__, page_align  >> (20 - PAGE_SHIFT), max_size >> 20);
-               return -ENOMEM;
-       }
-
        acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
                                       sizeof(struct radeon_bo));
 
-retry:
        bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
        if (bo == NULL)
                return -ENOMEM;
@@ -154,15 +144,6 @@ retry:
                        acc_size, sg, &radeon_ttm_bo_destroy);
        up_read(&rdev->pm.mclk_lock);
        if (unlikely(r != 0)) {
-               if (r != -ERESTARTSYS) {
-                       if (domain == RADEON_GEM_DOMAIN_VRAM) {
-                               domain |= RADEON_GEM_DOMAIN_GTT;
-                               goto retry;
-                       }
-                       dev_err(rdev->dev,
-                               "object_init failed for (%lu, 0x%08X)\n",
-                               size, domain);
-               }
                return r;
        }
        *bo_ptr = bo;
index df8dd77016436373dcfcfce4e3fb3e8410a3d873..b0db712060fb3876dfc8f6b92c93acb04bee751a 100644 (file)
@@ -2808,26 +2808,31 @@ void si_vm_set_page(struct radeon_device *rdev, uint64_t pe,
 {
        struct radeon_ring *ring = &rdev->ring[rdev->asic->vm.pt_ring_index];
        uint32_t r600_flags = cayman_vm_page_flags(rdev, flags);
-       int i;
-       uint64_t value;
 
-       radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 2 + count * 2));
-       radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
-                                WRITE_DATA_DST_SEL(1)));
-       radeon_ring_write(ring, pe);
-       radeon_ring_write(ring, upper_32_bits(pe));
-       for (i = 0; i < count; ++i) {
-               if (flags & RADEON_VM_PAGE_SYSTEM) {
-                       value = radeon_vm_map_gart(rdev, addr);
-                       value &= 0xFFFFFFFFFFFFF000ULL;
-               } else if (flags & RADEON_VM_PAGE_VALID)
-                       value = addr;
-               else
-                       value = 0;
-               addr += incr;
-               value |= r600_flags;
-               radeon_ring_write(ring, value);
-               radeon_ring_write(ring, upper_32_bits(value));
+       while (count) {
+               unsigned ndw = 2 + count * 2;
+               if (ndw > 0x3FFE)
+                       ndw = 0x3FFE;
+
+               radeon_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, ndw));
+               radeon_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) |
+                                        WRITE_DATA_DST_SEL(1)));
+               radeon_ring_write(ring, pe);
+               radeon_ring_write(ring, upper_32_bits(pe));
+               for (; ndw > 2; ndw -= 2, --count, pe += 8) {
+                       uint64_t value;
+                       if (flags & RADEON_VM_PAGE_SYSTEM) {
+                               value = radeon_vm_map_gart(rdev, addr);
+                               value &= 0xFFFFFFFFFFFFF000ULL;
+                       } else if (flags & RADEON_VM_PAGE_VALID)
+                               value = addr;
+                       else
+                               value = 0;
+                       addr += incr;
+                       value |= r600_flags;
+                       radeon_ring_write(ring, value);
+                       radeon_ring_write(ring, upper_32_bits(value));
+               }
        }
 }
 
@@ -2868,6 +2873,10 @@ void si_vm_flush(struct radeon_device *rdev, int ridx, struct radeon_vm *vm)
        radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
        radeon_ring_write(ring, 0);
        radeon_ring_write(ring, 1 << vm->id);
+
+       /* sync PFP to ME, otherwise we might get invalid PFP reads */
+       radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
+       radeon_ring_write(ring, 0x0);
 }
 
 /*
index f3187938e081c7dcbf842d424f5de6ed06d3f93f..208d8aa5b07e488f1f39cb0877ff46bb5d08d5a6 100644 (file)
@@ -283,9 +283,7 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
                goto out;
        }
 
-       rcu_read_lock();
-       root_level = btrfs_header_level(root->node);
-       rcu_read_unlock();
+       root_level = btrfs_old_root_level(root, time_seq);
 
        if (root_level + 1 == level)
                goto out;
@@ -1177,16 +1175,15 @@ int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
        return ret;
 }
 
-static char *ref_to_path(struct btrfs_root *fs_root,
-                        struct btrfs_path *path,
-                        u32 name_len, unsigned long name_off,
-                        struct extent_buffer *eb_in, u64 parent,
-                        char *dest, u32 size)
+char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+                       u32 name_len, unsigned long name_off,
+                       struct extent_buffer *eb_in, u64 parent,
+                       char *dest, u32 size)
 {
        int slot;
        u64 next_inum;
        int ret;
-       s64 bytes_left = size - 1;
+       s64 bytes_left = ((s64)size) - 1;
        struct extent_buffer *eb = eb_in;
        struct btrfs_key found_key;
        int leave_spinning = path->leave_spinning;
@@ -1266,10 +1263,10 @@ char *btrfs_iref_to_path(struct btrfs_root *fs_root,
                         struct extent_buffer *eb_in, u64 parent,
                         char *dest, u32 size)
 {
-       return ref_to_path(fs_root, path,
-                          btrfs_inode_ref_name_len(eb_in, iref),
-                          (unsigned long)(iref + 1),
-                          eb_in, parent, dest, size);
+       return btrfs_ref_to_path(fs_root, path,
+                                btrfs_inode_ref_name_len(eb_in, iref),
+                                (unsigned long)(iref + 1),
+                                eb_in, parent, dest, size);
 }
 
 /*
@@ -1715,9 +1712,8 @@ static int inode_to_path(u64 inum, u32 name_len, unsigned long name_off,
                                        ipath->fspath->bytes_left - s_ptr : 0;
 
        fspath_min = (char *)ipath->fspath->val + (i + 1) * s_ptr;
-       fspath = ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
-                            name_off, eb, inum, fspath_min,
-                            bytes_left);
+       fspath = btrfs_ref_to_path(ipath->fs_root, ipath->btrfs_path, name_len,
+                                  name_off, eb, inum, fspath_min, bytes_left);
        if (IS_ERR(fspath))
                return PTR_ERR(fspath);
 
index e75533043a5ffbab21ff133877c352b743ef6592..d61feca79455bda94308c9ab3608b23409a84c73 100644 (file)
@@ -62,6 +62,10 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
 char *btrfs_iref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
                         struct btrfs_inode_ref *iref, struct extent_buffer *eb,
                         u64 parent, char *dest, u32 size);
+char *btrfs_ref_to_path(struct btrfs_root *fs_root, struct btrfs_path *path,
+                       u32 name_len, unsigned long name_off,
+                       struct extent_buffer *eb_in, u64 parent,
+                       char *dest, u32 size);
 
 struct btrfs_data_container *init_data_container(u32 total_bytes);
 struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
index b334362110003165a72b63433b192f2b481c3b01..cdfb4c49a806ad4ba0ebe83b5569a4121a10a220 100644 (file)
@@ -596,6 +596,11 @@ tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
        if (tree_mod_dont_log(fs_info, eb))
                return 0;
 
+       /*
+        * When we override something during the move, we log these removals.
+        * This can only happen when we move towards the beginning of the
+        * buffer, i.e. dst_slot < src_slot.
+        */
        for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
                ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
                                              MOD_LOG_KEY_REMOVE_WHILE_MOVING);
@@ -647,8 +652,6 @@ tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
        if (tree_mod_dont_log(fs_info, NULL))
                return 0;
 
-       __tree_mod_log_free_eb(fs_info, old_root);
-
        ret = tree_mod_alloc(fs_info, flags, &tm);
        if (ret < 0)
                goto out;
@@ -926,12 +929,7 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
                        ret = btrfs_dec_ref(trans, root, buf, 1, 1);
                        BUG_ON(ret); /* -ENOMEM */
                }
-               /*
-                * don't log freeing in case we're freeing the root node, this
-                * is done by tree_mod_log_set_root_pointer later
-                */
-               if (buf != root->node && btrfs_header_level(buf) != 0)
-                       tree_mod_log_free_eb(root->fs_info, buf);
+               tree_mod_log_free_eb(root->fs_info, buf);
                clean_tree_block(trans, root, buf);
                *last_ref = 1;
        }
@@ -1225,6 +1223,8 @@ tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
        free_extent_buffer(eb);
 
        __tree_mod_log_rewind(eb_rewin, time_seq, tm);
+       WARN_ON(btrfs_header_nritems(eb_rewin) >
+               BTRFS_NODEPTRS_PER_BLOCK(fs_info->fs_root));
 
        return eb_rewin;
 }
@@ -1241,9 +1241,11 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
 {
        struct tree_mod_elem *tm;
        struct extent_buffer *eb;
+       struct extent_buffer *old;
        struct tree_mod_root *old_root = NULL;
        u64 old_generation = 0;
        u64 logical;
+       u32 blocksize;
 
        eb = btrfs_read_lock_root_node(root);
        tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
@@ -1259,14 +1261,32 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
        }
 
        tm = tree_mod_log_search(root->fs_info, logical, time_seq);
-       if (old_root)
+       if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
+               btrfs_tree_read_unlock(root->node);
+               free_extent_buffer(root->node);
+               blocksize = btrfs_level_size(root, old_root->level);
+               old = read_tree_block(root, logical, blocksize, 0);
+               if (!old) {
+                       pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
+                               logical);
+                       WARN_ON(1);
+               } else {
+                       eb = btrfs_clone_extent_buffer(old);
+                       free_extent_buffer(old);
+               }
+       } else if (old_root) {
+               btrfs_tree_read_unlock(root->node);
+               free_extent_buffer(root->node);
                eb = alloc_dummy_extent_buffer(logical, root->nodesize);
-       else
+       } else {
                eb = btrfs_clone_extent_buffer(root->node);
-       btrfs_tree_read_unlock(root->node);
-       free_extent_buffer(root->node);
+               btrfs_tree_read_unlock(root->node);
+               free_extent_buffer(root->node);
+       }
+
        if (!eb)
                return NULL;
+       extent_buffer_get(eb);
        btrfs_tree_read_lock(eb);
        if (old_root) {
                btrfs_set_header_bytenr(eb, eb->start);
@@ -1279,11 +1299,28 @@ get_old_root(struct btrfs_root *root, u64 time_seq)
                __tree_mod_log_rewind(eb, time_seq, tm);
        else
                WARN_ON(btrfs_header_level(eb) != 0);
-       extent_buffer_get(eb);
+       WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
 
        return eb;
 }
 
+int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
+{
+       struct tree_mod_elem *tm;
+       int level;
+
+       tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
+       if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
+               level = tm->old_root.level;
+       } else {
+               rcu_read_lock();
+               level = btrfs_header_level(root->node);
+               rcu_read_unlock();
+       }
+
+       return level;
+}
+
 static inline int should_cow_block(struct btrfs_trans_handle *trans,
                                   struct btrfs_root *root,
                                   struct extent_buffer *buf)
@@ -1725,6 +1762,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
                        goto enospc;
                }
 
+               tree_mod_log_free_eb(root->fs_info, root->node);
                tree_mod_log_set_root_pointer(root, child);
                rcu_assign_pointer(root->node, child);
 
@@ -2970,8 +3008,10 @@ static int push_node_left(struct btrfs_trans_handle *trans,
                           push_items * sizeof(struct btrfs_key_ptr));
 
        if (push_items < src_nritems) {
-               tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
-                                    src_nritems - push_items);
+               /*
+                * don't call tree_mod_log_eb_move here, key removal was already
+                * fully logged by tree_mod_log_eb_copy above.
+                */
                memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
                                      btrfs_node_key_ptr_offset(push_items),
                                      (src_nritems - push_items) *
index 926c9ffc66d93324d155481c4ecba13d27fa3fec..c72ead869507412ac9939c0748a482035baaf2aa 100644 (file)
@@ -3120,6 +3120,7 @@ static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
 {
        return atomic_inc_return(&fs_info->tree_mod_seq);
 }
+int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq);
 
 /* root-item.c */
 int btrfs_find_root_ref(struct btrfs_root *tree_root,
@@ -3338,6 +3339,8 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
 int btrfs_update_inode(struct btrfs_trans_handle *trans,
                              struct btrfs_root *root,
                              struct inode *inode);
+int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
+                               struct btrfs_root *root, struct inode *inode);
 int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode);
 int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode);
 int btrfs_orphan_cleanup(struct btrfs_root *root);
index 8036d3a848530daed167453ebfb68eb8a83950f3..472873a94d969a86967e832eac2d452f274f11f9 100644 (file)
@@ -4110,8 +4110,8 @@ struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
 
        return eb;
 err:
-       for (i--; i >= 0; i--)
-               __free_page(eb->pages[i]);
+       for (; i > 0; i--)
+               __free_page(eb->pages[i - 1]);
        __free_extent_buffer(eb);
        return NULL;
 }
index 85a1e5053fe63a9d8df6682da38198883e00bb22..95542a1b3dfc99632219310f0108788789247fc9 100644 (file)
@@ -94,8 +94,6 @@ static noinline int cow_file_range(struct inode *inode,
                                   struct page *locked_page,
                                   u64 start, u64 end, int *page_started,
                                   unsigned long *nr_written, int unlock);
-static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
-                               struct btrfs_root *root, struct inode *inode);
 
 static int btrfs_init_inode_security(struct btrfs_trans_handle *trans,
                                     struct inode *inode,  struct inode *dir,
@@ -2746,8 +2744,9 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans,
        return btrfs_update_inode_item(trans, root, inode);
 }
 
-static noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
-                               struct btrfs_root *root, struct inode *inode)
+noinline int btrfs_update_inode_fallback(struct btrfs_trans_handle *trans,
+                                        struct btrfs_root *root,
+                                        struct inode *inode)
 {
        int ret;
 
index 61168805f175b3c50ffef99b87e97fdeb2643ebd..8fcf9a59c28d08c5c8a6c65e34844e321ca2b66c 100644 (file)
@@ -343,7 +343,8 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg)
                return -EOPNOTSUPP;
        if (copy_from_user(&range, arg, sizeof(range)))
                return -EFAULT;
-       if (range.start > total_bytes)
+       if (range.start > total_bytes ||
+           range.len < fs_info->sb->s_blocksize)
                return -EINVAL;
 
        range.len = min(range.len, total_bytes - range.start);
@@ -570,7 +571,8 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
                ret = btrfs_commit_transaction(trans,
                                               root->fs_info->extent_root);
        }
-       BUG_ON(ret);
+       if (ret)
+               goto fail;
 
        ret = pending_snapshot->error;
        if (ret)
index 5039686df6ae8e801ed8985eb5e821a226e18855..fe9d02c45f8e521f87b44d6deff3e5f8d99aa3a9 100644 (file)
@@ -790,8 +790,10 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
        }
 
        path = btrfs_alloc_path();
-       if (!path)
-               return -ENOMEM;
+       if (!path) {
+               ret = -ENOMEM;
+               goto out_free_root;
+       }
 
        key.objectid = 0;
        key.type = BTRFS_QGROUP_STATUS_KEY;
@@ -800,7 +802,7 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
        ret = btrfs_insert_empty_item(trans, quota_root, path, &key,
                                      sizeof(*ptr));
        if (ret)
-               goto out;
+               goto out_free_path;
 
        leaf = path->nodes[0];
        ptr = btrfs_item_ptr(leaf, path->slots[0],
@@ -818,8 +820,15 @@ int btrfs_quota_enable(struct btrfs_trans_handle *trans,
        fs_info->quota_root = quota_root;
        fs_info->pending_quota_state = 1;
        spin_unlock(&fs_info->qgroup_lock);
-out:
+out_free_path:
        btrfs_free_path(path);
+out_free_root:
+       if (ret) {
+               free_extent_buffer(quota_root->node);
+               free_extent_buffer(quota_root->commit_root);
+               kfree(quota_root);
+       }
+out:
        return ret;
 }
 
index c7beb543a4a89300f1e586492b767ea8f9bef683..e78b297b0b00cc990e8eb7a3f1f34619638e5ef4 100644 (file)
@@ -745,31 +745,36 @@ typedef int (*iterate_inode_ref_t)(int num, u64 dir, int index,
                                   void *ctx);
 
 /*
- * Helper function to iterate the entries in ONE btrfs_inode_ref.
+ * Helper function to iterate the entries in ONE btrfs_inode_ref or
+ * btrfs_inode_extref.
  * The iterate callback may return a non zero value to stop iteration. This can
  * be a negative value for error codes or 1 to simply stop it.
  *
- * path must point to the INODE_REF when called.
+ * path must point to the INODE_REF or INODE_EXTREF when called.
  */
 static int iterate_inode_ref(struct send_ctx *sctx,
                             struct btrfs_root *root, struct btrfs_path *path,
                             struct btrfs_key *found_key, int resolve,
                             iterate_inode_ref_t iterate, void *ctx)
 {
-       struct extent_buffer *eb;
+       struct extent_buffer *eb = path->nodes[0];
        struct btrfs_item *item;
        struct btrfs_inode_ref *iref;
+       struct btrfs_inode_extref *extref;
        struct btrfs_path *tmp_path;
        struct fs_path *p;
-       u32 cur;
-       u32 len;
+       u32 cur = 0;
        u32 total;
-       int slot;
+       int slot = path->slots[0];
        u32 name_len;
        char *start;
        int ret = 0;
-       int num;
+       int num = 0;
        int index;
+       u64 dir;
+       unsigned long name_off;
+       unsigned long elem_size;
+       unsigned long ptr;
 
        p = fs_path_alloc_reversed(sctx);
        if (!p)
@@ -781,24 +786,40 @@ static int iterate_inode_ref(struct send_ctx *sctx,
                return -ENOMEM;
        }
 
-       eb = path->nodes[0];
-       slot = path->slots[0];
-       item = btrfs_item_nr(eb, slot);
-       iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
-       cur = 0;
-       len = 0;
-       total = btrfs_item_size(eb, item);
 
-       num = 0;
+       if (found_key->type == BTRFS_INODE_REF_KEY) {
+               ptr = (unsigned long)btrfs_item_ptr(eb, slot,
+                                                   struct btrfs_inode_ref);
+               item = btrfs_item_nr(eb, slot);
+               total = btrfs_item_size(eb, item);
+               elem_size = sizeof(*iref);
+       } else {
+               ptr = btrfs_item_ptr_offset(eb, slot);
+               total = btrfs_item_size_nr(eb, slot);
+               elem_size = sizeof(*extref);
+       }
+
        while (cur < total) {
                fs_path_reset(p);
 
-               name_len = btrfs_inode_ref_name_len(eb, iref);
-               index = btrfs_inode_ref_index(eb, iref);
+               if (found_key->type == BTRFS_INODE_REF_KEY) {
+                       iref = (struct btrfs_inode_ref *)(ptr + cur);
+                       name_len = btrfs_inode_ref_name_len(eb, iref);
+                       name_off = (unsigned long)(iref + 1);
+                       index = btrfs_inode_ref_index(eb, iref);
+                       dir = found_key->offset;
+               } else {
+                       extref = (struct btrfs_inode_extref *)(ptr + cur);
+                       name_len = btrfs_inode_extref_name_len(eb, extref);
+                       name_off = (unsigned long)&extref->name;
+                       index = btrfs_inode_extref_index(eb, extref);
+                       dir = btrfs_inode_extref_parent(eb, extref);
+               }
+
                if (resolve) {
-                       start = btrfs_iref_to_path(root, tmp_path, iref, eb,
-                                               found_key->offset, p->buf,
-                                               p->buf_len);
+                       start = btrfs_ref_to_path(root, tmp_path, name_len,
+                                                 name_off, eb, dir,
+                                                 p->buf, p->buf_len);
                        if (IS_ERR(start)) {
                                ret = PTR_ERR(start);
                                goto out;
@@ -809,9 +830,10 @@ static int iterate_inode_ref(struct send_ctx *sctx,
                                                p->buf_len + p->buf - start);
                                if (ret < 0)
                                        goto out;
-                               start = btrfs_iref_to_path(root, tmp_path, iref,
-                                               eb, found_key->offset, p->buf,
-                                               p->buf_len);
+                               start = btrfs_ref_to_path(root, tmp_path,
+                                                         name_len, name_off,
+                                                         eb, dir,
+                                                         p->buf, p->buf_len);
                                if (IS_ERR(start)) {
                                        ret = PTR_ERR(start);
                                        goto out;
@@ -820,21 +842,16 @@ static int iterate_inode_ref(struct send_ctx *sctx,
                        }
                        p->start = start;
                } else {
-                       ret = fs_path_add_from_extent_buffer(p, eb,
-                                       (unsigned long)(iref + 1), name_len);
+                       ret = fs_path_add_from_extent_buffer(p, eb, name_off,
+                                                            name_len);
                        if (ret < 0)
                                goto out;
                }
 
-
-               len = sizeof(*iref) + name_len;
-               iref = (struct btrfs_inode_ref *)((char *)iref + len);
-               cur += len;
-
-               ret = iterate(num, found_key->offset, index, p, ctx);
+               cur += elem_size + name_len;
+               ret = iterate(num, dir, index, p, ctx);
                if (ret)
                        goto out;
-
                num++;
        }
 
@@ -998,7 +1015,8 @@ static int get_inode_path(struct send_ctx *sctx, struct btrfs_root *root,
        }
        btrfs_item_key_to_cpu(p->nodes[0], &found_key, p->slots[0]);
        if (found_key.objectid != ino ||
-               found_key.type != BTRFS_INODE_REF_KEY) {
+           (found_key.type != BTRFS_INODE_REF_KEY &&
+            found_key.type != BTRFS_INODE_EXTREF_KEY)) {
                ret = -ENOENT;
                goto out;
        }
@@ -1551,8 +1569,8 @@ static int get_first_ref(struct send_ctx *sctx,
        struct btrfs_key key;
        struct btrfs_key found_key;
        struct btrfs_path *path;
-       struct btrfs_inode_ref *iref;
        int len;
+       u64 parent_dir;
 
        path = alloc_path_for_send();
        if (!path)
@@ -1568,27 +1586,41 @@ static int get_first_ref(struct send_ctx *sctx,
        if (!ret)
                btrfs_item_key_to_cpu(path->nodes[0], &found_key,
                                path->slots[0]);
-       if (ret || found_key.objectid != key.objectid ||
-           found_key.type != key.type) {
+       if (ret || found_key.objectid != ino ||
+           (found_key.type != BTRFS_INODE_REF_KEY &&
+            found_key.type != BTRFS_INODE_EXTREF_KEY)) {
                ret = -ENOENT;
                goto out;
        }
 
-       iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
-                       struct btrfs_inode_ref);
-       len = btrfs_inode_ref_name_len(path->nodes[0], iref);
-       ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
-                       (unsigned long)(iref + 1), len);
+       if (key.type == BTRFS_INODE_REF_KEY) {
+               struct btrfs_inode_ref *iref;
+               iref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                                     struct btrfs_inode_ref);
+               len = btrfs_inode_ref_name_len(path->nodes[0], iref);
+               ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
+                                                    (unsigned long)(iref + 1),
+                                                    len);
+               parent_dir = found_key.offset;
+       } else {
+               struct btrfs_inode_extref *extref;
+               extref = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                                       struct btrfs_inode_extref);
+               len = btrfs_inode_extref_name_len(path->nodes[0], extref);
+               ret = fs_path_add_from_extent_buffer(name, path->nodes[0],
+                                       (unsigned long)&extref->name, len);
+               parent_dir = btrfs_inode_extref_parent(path->nodes[0], extref);
+       }
        if (ret < 0)
                goto out;
        btrfs_release_path(path);
 
-       ret = get_inode_info(root, found_key.offset, NULL, dir_gen, NULL, NULL,
+       ret = get_inode_info(root, parent_dir, NULL, dir_gen, NULL, NULL,
                        NULL, NULL);
        if (ret < 0)
                goto out;
 
-       *dir = found_key.offset;
+       *dir = parent_dir;
 
 out:
        btrfs_free_path(path);
@@ -2430,7 +2462,8 @@ verbose_printk("btrfs: send_create_inode %llu\n", ino);
                TLV_PUT_PATH(sctx, BTRFS_SEND_A_PATH_LINK, p);
        } else if (S_ISCHR(mode) || S_ISBLK(mode) ||
                   S_ISFIFO(mode) || S_ISSOCK(mode)) {
-               TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, rdev);
+               TLV_PUT_U64(sctx, BTRFS_SEND_A_RDEV, new_encode_dev(rdev));
+               TLV_PUT_U64(sctx, BTRFS_SEND_A_MODE, mode);
        }
 
        ret = send_cmd(sctx);
@@ -3226,7 +3259,8 @@ static int process_all_refs(struct send_ctx *sctx,
                btrfs_item_key_to_cpu(eb, &found_key, slot);
 
                if (found_key.objectid != key.objectid ||
-                   found_key.type != key.type)
+                   (found_key.type != BTRFS_INODE_REF_KEY &&
+                    found_key.type != BTRFS_INODE_EXTREF_KEY))
                        break;
 
                ret = iterate_inode_ref(sctx, root, path, &found_key, 0, cb,
@@ -3987,7 +4021,7 @@ static int process_recorded_refs_if_needed(struct send_ctx *sctx, int at_end)
        if (sctx->cur_ino == 0)
                goto out;
        if (!at_end && sctx->cur_ino == sctx->cmp_key->objectid &&
-           sctx->cmp_key->type <= BTRFS_INODE_REF_KEY)
+           sctx->cmp_key->type <= BTRFS_INODE_EXTREF_KEY)
                goto out;
        if (list_empty(&sctx->new_refs) && list_empty(&sctx->deleted_refs))
                goto out;
@@ -4033,22 +4067,21 @@ static int finish_inode_if_needed(struct send_ctx *sctx, int at_end)
        if (ret < 0)
                goto out;
 
-       if (!S_ISLNK(sctx->cur_inode_mode)) {
-               if (!sctx->parent_root || sctx->cur_inode_new) {
+       if (!sctx->parent_root || sctx->cur_inode_new) {
+               need_chown = 1;
+               if (!S_ISLNK(sctx->cur_inode_mode))
                        need_chmod = 1;
-                       need_chown = 1;
-               } else {
-                       ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
-                                       NULL, NULL, &right_mode, &right_uid,
-                                       &right_gid, NULL);
-                       if (ret < 0)
-                               goto out;
+       } else {
+               ret = get_inode_info(sctx->parent_root, sctx->cur_ino,
+                               NULL, NULL, &right_mode, &right_uid,
+                               &right_gid, NULL);
+               if (ret < 0)
+                       goto out;
 
-                       if (left_uid != right_uid || left_gid != right_gid)
-                               need_chown = 1;
-                       if (left_mode != right_mode)
-                               need_chmod = 1;
-               }
+               if (left_uid != right_uid || left_gid != right_gid)
+                       need_chown = 1;
+               if (!S_ISLNK(sctx->cur_inode_mode) && left_mode != right_mode)
+                       need_chmod = 1;
        }
 
        if (S_ISREG(sctx->cur_inode_mode)) {
@@ -4335,7 +4368,8 @@ static int changed_cb(struct btrfs_root *left_root,
 
        if (key->type == BTRFS_INODE_ITEM_KEY)
                ret = changed_inode(sctx, result);
-       else if (key->type == BTRFS_INODE_REF_KEY)
+       else if (key->type == BTRFS_INODE_REF_KEY ||
+                key->type == BTRFS_INODE_EXTREF_KEY)
                ret = changed_ref(sctx, result);
        else if (key->type == BTRFS_XATTR_ITEM_KEY)
                ret = changed_xattr(sctx, result);
index 77db875b511638b7ff94854c6b1482942da2b3fb..04bbfb1052ebfee9db25427d5542e795cac351cd 100644 (file)
@@ -1200,7 +1200,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
        btrfs_i_size_write(parent_inode, parent_inode->i_size +
                                         dentry->d_name.len * 2);
        parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
-       ret = btrfs_update_inode(trans, parent_root, parent_inode);
+       ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
        if (ret)
                btrfs_abort_transaction(trans, root, ret);
 fail:
index 029b903a4ae3797322e05090790b86c9e8596c43..0f5ebb72a5ea01693b339d66e3f928de9b783f78 100644 (file)
@@ -1819,6 +1819,13 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                                    "Failed to relocate sys chunks after "
                                    "device initialization. This can be fixed "
                                    "using the \"btrfs balance\" command.");
+               trans = btrfs_attach_transaction(root);
+               if (IS_ERR(trans)) {
+                       if (PTR_ERR(trans) == -ENOENT)
+                               return 0;
+                       return PTR_ERR(trans);
+               }
+               ret = btrfs_commit_transaction(trans, root);
        }
 
        return ret;
index e4fb3ba5a58a3d50eb11a0f9533ca58ae5150283..3d7e09bcc0e9efad7418803737e385c76d22a776 100644 (file)
@@ -85,29 +85,38 @@ static struct rpc_clnt *nsm_create(struct net *net)
        return rpc_create(&args);
 }
 
+static struct rpc_clnt *nsm_client_set(struct lockd_net *ln,
+               struct rpc_clnt *clnt)
+{
+       spin_lock(&ln->nsm_clnt_lock);
+       if (ln->nsm_users == 0) {
+               if (clnt == NULL)
+                       goto out;
+               ln->nsm_clnt = clnt;
+       }
+       clnt = ln->nsm_clnt;
+       ln->nsm_users++;
+out:
+       spin_unlock(&ln->nsm_clnt_lock);
+       return clnt;
+}
+
 static struct rpc_clnt *nsm_client_get(struct net *net)
 {
-       static DEFINE_MUTEX(nsm_create_mutex);
-       struct rpc_clnt *clnt;
+       struct rpc_clnt *clnt, *new;
        struct lockd_net *ln = net_generic(net, lockd_net_id);
 
-       spin_lock(&ln->nsm_clnt_lock);
-       if (ln->nsm_users) {
-               ln->nsm_users++;
-               clnt = ln->nsm_clnt;
-               spin_unlock(&ln->nsm_clnt_lock);
+       clnt = nsm_client_set(ln, NULL);
+       if (clnt != NULL)
                goto out;
-       }
-       spin_unlock(&ln->nsm_clnt_lock);
 
-       mutex_lock(&nsm_create_mutex);
-       clnt = nsm_create(net);
-       if (!IS_ERR(clnt)) {
-               ln->nsm_clnt = clnt;
-               smp_wmb();
-               ln->nsm_users = 1;
-       }
-       mutex_unlock(&nsm_create_mutex);
+       clnt = new = nsm_create(net);
+       if (IS_ERR(clnt))
+               goto out;
+
+       clnt = nsm_client_set(ln, new);
+       if (clnt != new)
+               rpc_shutdown_client(new);
 out:
        return clnt;
 }
@@ -115,18 +124,16 @@ out:
 static void nsm_client_put(struct net *net)
 {
        struct lockd_net *ln = net_generic(net, lockd_net_id);
-       struct rpc_clnt *clnt = ln->nsm_clnt;
-       int shutdown = 0;
+       struct rpc_clnt *clnt = NULL;
 
        spin_lock(&ln->nsm_clnt_lock);
-       if (ln->nsm_users) {
-               if (--ln->nsm_users)
-                       ln->nsm_clnt = NULL;
-               shutdown = !ln->nsm_users;
+       ln->nsm_users--;
+       if (ln->nsm_users == 0) {
+               clnt = ln->nsm_clnt;
+               ln->nsm_clnt = NULL;
        }
        spin_unlock(&ln->nsm_clnt_lock);
-
-       if (shutdown)
+       if (clnt != NULL)
                rpc_shutdown_client(clnt);
 }
 
index c78bb997e2c60846a1f5e261664d96ee1656fe6d..af1cbaf535edeb395954583d1d1c1cd0c255c64c 100644 (file)
        {0x1002, 0x6788, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x678A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6790, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6791, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6792, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6798, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6799, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x679A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TAHITI|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x6811, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6816, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6817, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6818, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PITCAIRN|RADEON_NEW_MEMMAP}, \
index 569d67d4243ed99c4b896fac81442531b9d05cbc..d452ee191066456278080c92322e093986baa1fb 100644 (file)
@@ -57,6 +57,7 @@ int memblock_add(phys_addr_t base, phys_addr_t size);
 int memblock_remove(phys_addr_t base, phys_addr_t size);
 int memblock_free(phys_addr_t base, phys_addr_t size);
 int memblock_reserve(phys_addr_t base, phys_addr_t size);
+void memblock_trim_memory(phys_addr_t align);
 
 #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
 void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
index 2e902359aee596f128dee568d06e0e0ab3149dfe..6bfb2faa0b1937555f5ca8fa937c7007bf5b3bc4 100644 (file)
@@ -803,12 +803,16 @@ static inline void perf_event_task_tick(void)                             { }
 do {                                                                   \
        static struct notifier_block fn##_nb __cpuinitdata =            \
                { .notifier_call = fn, .priority = CPU_PRI_PERF };      \
+       unsigned long cpu = smp_processor_id();                         \
+       unsigned long flags;                                            \
        fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,                     \
-               (void *)(unsigned long)smp_processor_id());             \
+               (void *)(unsigned long)cpu);                            \
+       local_irq_save(flags);                                          \
        fn(&fn##_nb, (unsigned long)CPU_STARTING,                       \
-               (void *)(unsigned long)smp_processor_id());             \
+               (void *)(unsigned long)cpu);                            \
+       local_irq_restore(flags);                                       \
        fn(&fn##_nb, (unsigned long)CPU_ONLINE,                         \
-               (void *)(unsigned long)smp_processor_id());             \
+               (void *)(unsigned long)cpu);                            \
        register_cpu_notifier(&fn##_nb);                                \
 } while (0)
 
index 931eef145af5eed370ed6f2b0c9dc577295be6ff..625905523c2a1592f539c46f6721723d62376648 100644 (file)
@@ -930,6 +930,30 @@ int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t si
        return memblock_overlaps_region(&memblock.reserved, base, size) >= 0;
 }
 
+void __init_memblock memblock_trim_memory(phys_addr_t align)
+{
+       int i;
+       phys_addr_t start, end, orig_start, orig_end;
+       struct memblock_type *mem = &memblock.memory;
+
+       for (i = 0; i < mem->cnt; i++) {
+               orig_start = mem->regions[i].base;
+               orig_end = mem->regions[i].base + mem->regions[i].size;
+               start = round_up(orig_start, align);
+               end = round_down(orig_end, align);
+
+               if (start == orig_start && end == orig_end)
+                       continue;
+
+               if (start < end) {
+                       mem->regions[i].base = start;
+                       mem->regions[i].size = end - start;
+               } else {
+                       memblock_remove_region(mem, i);
+                       i--;
+               }
+       }
+}
 
 void __init_memblock memblock_set_current_limit(phys_addr_t limit)
 {
index aaaadfbe36e9525a42a86e1f649bf66e59eb7274..75853cabf4c97b153873eda4ce67cd228581fd15 100644 (file)
@@ -254,7 +254,6 @@ struct sock_xprt {
        void                    (*old_data_ready)(struct sock *, int);
        void                    (*old_state_change)(struct sock *);
        void                    (*old_write_space)(struct sock *);
-       void                    (*old_error_report)(struct sock *);
 };
 
 /*
@@ -737,10 +736,10 @@ static int xs_tcp_send_request(struct rpc_task *task)
                dprintk("RPC:       sendmsg returned unrecognized error %d\n",
                        -status);
        case -ECONNRESET:
-       case -EPIPE:
                xs_tcp_shutdown(xprt);
        case -ECONNREFUSED:
        case -ENOTCONN:
+       case -EPIPE:
                clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
        }
 
@@ -781,7 +780,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk)
        transport->old_data_ready = sk->sk_data_ready;
        transport->old_state_change = sk->sk_state_change;
        transport->old_write_space = sk->sk_write_space;
-       transport->old_error_report = sk->sk_error_report;
 }
 
 static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk)
@@ -789,7 +787,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s
        sk->sk_data_ready = transport->old_data_ready;
        sk->sk_state_change = transport->old_state_change;
        sk->sk_write_space = transport->old_write_space;
-       sk->sk_error_report = transport->old_error_report;
 }
 
 static void xs_reset_transport(struct sock_xprt *transport)
@@ -1453,7 +1450,7 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt)
        xprt_clear_connecting(xprt);
 }
 
-static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt)
 {
        smp_mb__before_clear_bit();
        clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
@@ -1461,6 +1458,11 @@ static void xs_sock_mark_closed(struct rpc_xprt *xprt)
        clear_bit(XPRT_CLOSE_WAIT, &xprt->state);
        clear_bit(XPRT_CLOSING, &xprt->state);
        smp_mb__after_clear_bit();
+}
+
+static void xs_sock_mark_closed(struct rpc_xprt *xprt)
+{
+       xs_sock_reset_connection_flags(xprt);
        /* Mark transport as closed and wake up all pending tasks */
        xprt_disconnect_done(xprt);
 }
@@ -1516,6 +1518,7 @@ static void xs_tcp_state_change(struct sock *sk)
        case TCP_CLOSE_WAIT:
                /* The server initiated a shutdown of the socket */
                xprt->connect_cookie++;
+               clear_bit(XPRT_CONNECTED, &xprt->state);
                xs_tcp_force_close(xprt);
        case TCP_CLOSING:
                /*
@@ -1540,25 +1543,6 @@ static void xs_tcp_state_change(struct sock *sk)
        read_unlock_bh(&sk->sk_callback_lock);
 }
 
-/**
- * xs_error_report - callback mainly for catching socket errors
- * @sk: socket
- */
-static void xs_error_report(struct sock *sk)
-{
-       struct rpc_xprt *xprt;
-
-       read_lock_bh(&sk->sk_callback_lock);
-       if (!(xprt = xprt_from_sock(sk)))
-               goto out;
-       dprintk("RPC:       %s client %p...\n"
-                       "RPC:       error %d\n",
-                       __func__, xprt, sk->sk_err);
-       xprt_wake_pending_tasks(xprt, -EAGAIN);
-out:
-       read_unlock_bh(&sk->sk_callback_lock);
-}
-
 static void xs_write_space(struct sock *sk)
 {
        struct socket *sock;
@@ -1858,7 +1842,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
                sk->sk_user_data = xprt;
                sk->sk_data_ready = xs_local_data_ready;
                sk->sk_write_space = xs_udp_write_space;
-               sk->sk_error_report = xs_error_report;
                sk->sk_allocation = GFP_ATOMIC;
 
                xprt_clear_connected(xprt);
@@ -1983,7 +1966,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                sk->sk_user_data = xprt;
                sk->sk_data_ready = xs_udp_data_ready;
                sk->sk_write_space = xs_udp_write_space;
-               sk->sk_error_report = xs_error_report;
                sk->sk_no_check = UDP_CSUM_NORCV;
                sk->sk_allocation = GFP_ATOMIC;
 
@@ -2050,10 +2032,8 @@ static void xs_abort_connection(struct sock_xprt *transport)
        any.sa_family = AF_UNSPEC;
        result = kernel_connect(transport->sock, &any, sizeof(any), 0);
        if (!result)
-               xs_sock_mark_closed(&transport->xprt);
-       else
-               dprintk("RPC:       AF_UNSPEC connect return code %d\n",
-                               result);
+               xs_sock_reset_connection_flags(&transport->xprt);
+       dprintk("RPC:       AF_UNSPEC connect return code %d\n", result);
 }
 
 static void xs_tcp_reuse_connection(struct sock_xprt *transport)
@@ -2098,7 +2078,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
                sk->sk_data_ready = xs_tcp_data_ready;
                sk->sk_state_change = xs_tcp_state_change;
                sk->sk_write_space = xs_tcp_write_space;
-               sk->sk_error_report = xs_error_report;
                sk->sk_allocation = GFP_ATOMIC;
 
                /* socket options */
index 411ee5664e98e1e765bf992f379c16ec30569585..178b88ae3d2f174a5a085f233163d54cef152337 100644 (file)
@@ -414,7 +414,7 @@ static int show_html_page(const char *perf_cmd)
 int cmd_help(int argc, const char **argv, const char *prefix __maybe_unused)
 {
        bool show_all = false;
-       enum help_format help_format = HELP_FORMAT_NONE;
+       enum help_format help_format = HELP_FORMAT_MAN;
        struct option builtin_help_options[] = {
        OPT_BOOLEAN('a', "all", &show_all, "print all available commands"),
        OPT_SET_UINT('m', "man", &help_format, "show man page", HELP_FORMAT_MAN),
index dec8ced61fb00c2bc7114346db3edaf0f7dd89b7..7aaee39f6774f1fc49131dd7a3d67f002bee823c 100644 (file)
@@ -56,6 +56,10 @@ static int trace__read_syscall_info(struct trace *trace, int id)
 {
        char tp_name[128];
        struct syscall *sc;
+       const char *name = audit_syscall_to_name(id, trace->audit_machine);
+
+       if (name == NULL)
+               return -1;
 
        if (id > trace->syscalls.max) {
                struct syscall *nsyscalls = realloc(trace->syscalls.table, (id + 1) * sizeof(*sc));
@@ -75,11 +79,8 @@ static int trace__read_syscall_info(struct trace *trace, int id)
        }
 
        sc = trace->syscalls.table + id;
-       sc->name = audit_syscall_to_name(id, trace->audit_machine);
-       if (sc->name == NULL)
-               return -1;
-
-       sc->fmt = syscall_fmt__find(sc->name);
+       sc->name = name;
+       sc->fmt  = syscall_fmt__find(sc->name);
 
        snprintf(tp_name, sizeof(tp_name), "sys_enter_%s", sc->name);
        sc->tp_format = event_format__new("syscalls", tp_name);
@@ -267,6 +268,13 @@ again:
                        if (evlist->threads->map[0] == -1 || evlist->threads->nr > 1)
                                printf("%d ", sample.tid);
 
+                       if (sample.raw_data == NULL) {
+                               printf("%s sample with no payload for tid: %d, cpu %d, raw_size=%d, skipping...\n",
+                                      perf_evsel__name(evsel), sample.tid,
+                                      sample.cpu, sample.raw_size);
+                               continue;
+                       }
+
                        handler = evsel->handler.func;
                        handler(trace, evsel, &sample);
                }
index 28c18d1d52c3efceeb2847a1b68bd3dd000d45b6..516ecd9ddd6ee275c1e3144abc8a3d091b6114bb 100644 (file)
@@ -513,7 +513,8 @@ static int test__group1(struct perf_evlist *evlist)
        TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
        TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
        TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
-       TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+       /* use of precise requires exclude_guest */
+       TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
        TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
@@ -599,7 +600,8 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
        TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
        TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
-       TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+       /* use of precise requires exclude_guest */
+       TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 3);
        TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
@@ -662,7 +664,8 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude_user", !evsel->attr.exclude_user);
        TEST_ASSERT_VAL("wrong exclude_kernel", evsel->attr.exclude_kernel);
        TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
-       TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+       /* use of precise requires exclude_guest */
+       TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 1);
        TEST_ASSERT_VAL("wrong group name", !evsel->group_name);
@@ -676,7 +679,8 @@ static int test__group4(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong exclude_user", evsel->attr.exclude_user);
        TEST_ASSERT_VAL("wrong exclude_kernel", !evsel->attr.exclude_kernel);
        TEST_ASSERT_VAL("wrong exclude_hv", evsel->attr.exclude_hv);
-       TEST_ASSERT_VAL("wrong exclude guest", !evsel->attr.exclude_guest);
+       /* use of precise requires exclude_guest */
+       TEST_ASSERT_VAL("wrong exclude guest", evsel->attr.exclude_guest);
        TEST_ASSERT_VAL("wrong exclude host", !evsel->attr.exclude_host);
        TEST_ASSERT_VAL("wrong precise_ip", evsel->attr.precise_ip == 2);
        TEST_ASSERT_VAL("wrong leader", evsel->leader == leader);
index fb4b7ea6752fd86121a968fa77212727f603d7e9..8b3e5939afb6ea00ef6cd74275eae99f35de0c25 100644 (file)
@@ -39,7 +39,6 @@ int thread__set_comm(struct thread *self, const char *comm)
        err = self->comm == NULL ? -ENOMEM : 0;
        if (!err) {
                self->comm_set = true;
-               map_groups__flush(&self->mg);
        }
        return err;
 }