]> Pileus Git - ~andy/linux/blobdiff - kernel/events/core.c
mm: distinguish between mlocked and pinned pages
[~andy/linux] / kernel / events / core.c
index fbe38f2e8edb938256a03571eeff34cfd9b33bad..12a0287e03582c6969e1268ac4e7456222b111be 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/hardirq.h>
 #include <linux/rculist.h>
 #include <linux/uaccess.h>
+#include <linux/suspend.h>
 #include <linux/syscalls.h>
 #include <linux/anon_inodes.h>
 #include <linux/kernel_stat.h>
@@ -3543,7 +3544,7 @@ static void perf_mmap_close(struct vm_area_struct *vma)
                struct ring_buffer *rb = event->rb;
 
                atomic_long_sub((size >> PAGE_SHIFT) + 1, &user->locked_vm);
-               vma->vm_mm->locked_vm -= event->mmap_locked;
+               vma->vm_mm->pinned_vm -= event->mmap_locked;
                rcu_assign_pointer(event->rb, NULL);
                mutex_unlock(&event->mmap_mutex);
 
@@ -3624,7 +3625,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
 
        lock_limit = rlimit(RLIMIT_MEMLOCK);
        lock_limit >>= PAGE_SHIFT;
-       locked = vma->vm_mm->locked_vm + extra;
+       locked = vma->vm_mm->pinned_vm + extra;
 
        if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
                !capable(CAP_IPC_LOCK)) {
@@ -3650,7 +3651,7 @@ static int perf_mmap(struct file *file, struct vm_area_struct *vma)
        atomic_long_add(user_extra, &user->locked_vm);
        event->mmap_locked = extra;
        event->mmap_user = get_current_user();
-       vma->vm_mm->locked_vm += event->mmap_locked;
+       vma->vm_mm->pinned_vm += event->mmap_locked;
 
 unlock:
        if (!ret)
@@ -6852,7 +6853,7 @@ static void __cpuinit perf_event_init_cpu(int cpu)
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
        mutex_lock(&swhash->hlist_mutex);
-       if (swhash->hlist_refcount > 0) {
+       if (swhash->hlist_refcount > 0 && !swhash->swevent_hlist) {
                struct swevent_hlist *hlist;
 
                hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@ -6941,7 +6942,14 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
 {
        unsigned int cpu = (long)hcpu;
 
-       switch (action & ~CPU_TASKS_FROZEN) {
+       /*
+        * Ignore suspend/resume action, the perf_pm_notifier will
+        * take care of that.
+        */
+       if (action & CPU_TASKS_FROZEN)
+               return NOTIFY_OK;
+
+       switch (action) {
 
        case CPU_UP_PREPARE:
        case CPU_DOWN_FAILED:
@@ -6960,6 +6968,90 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
        return NOTIFY_OK;
 }
 
+static void perf_pm_resume_cpu(void *unused)
+{
+       struct perf_cpu_context *cpuctx;
+       struct perf_event_context *ctx;
+       struct pmu *pmu;
+       int idx;
+
+       idx = srcu_read_lock(&pmus_srcu);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+               ctx = cpuctx->task_ctx;
+
+               perf_ctx_lock(cpuctx, ctx);
+               perf_pmu_disable(cpuctx->ctx.pmu);
+
+               cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+               if (ctx)
+                       ctx_sched_out(ctx, cpuctx, EVENT_ALL);
+
+               perf_pmu_enable(cpuctx->ctx.pmu);
+               perf_ctx_unlock(cpuctx, ctx);
+       }
+       srcu_read_unlock(&pmus_srcu, idx);
+}
+
+static void perf_pm_suspend_cpu(void *unused)
+{
+       struct perf_cpu_context *cpuctx;
+       struct perf_event_context *ctx;
+       struct pmu *pmu;
+       int idx;
+
+       idx = srcu_read_lock(&pmus_srcu);
+       list_for_each_entry_rcu(pmu, &pmus, entry) {
+               cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
+               ctx = cpuctx->task_ctx;
+
+               perf_ctx_lock(cpuctx, ctx);
+               perf_pmu_disable(cpuctx->ctx.pmu);
+
+               perf_event_sched_in(cpuctx, ctx, current);
+
+               perf_pmu_enable(cpuctx->ctx.pmu);
+               perf_ctx_unlock(cpuctx, ctx);
+       }
+       srcu_read_unlock(&pmus_srcu, idx);
+}
+
+static int perf_resume(void)
+{
+       get_online_cpus();
+       smp_call_function(perf_pm_resume_cpu, NULL, 1);
+       put_online_cpus();
+
+       return NOTIFY_OK;
+}
+
+static int perf_suspend(void)
+{
+       get_online_cpus();
+       smp_call_function(perf_pm_suspend_cpu, NULL, 1);
+       put_online_cpus();
+
+       return NOTIFY_OK;
+}
+
+static int perf_pm(struct notifier_block *self, unsigned long action, void *ptr)
+{
+       switch (action) {
+       case PM_POST_HIBERNATION:
+       case PM_POST_SUSPEND:
+               return perf_resume();
+       case PM_HIBERNATION_PREPARE:
+       case PM_SUSPEND_PREPARE:
+               return perf_suspend();
+       default:
+               return NOTIFY_DONE;
+       }
+}
+
+static struct notifier_block perf_pm_notifier = {
+       .notifier_call = perf_pm,
+};
+
 void __init perf_event_init(void)
 {
        int ret;
@@ -6974,6 +7066,7 @@ void __init perf_event_init(void)
        perf_tp_register();
        perf_cpu_notifier(perf_cpu_notify);
        register_reboot_notifier(&perf_reboot_notifier);
+       register_pm_notifier(&perf_pm_notifier);
 
        ret = init_hw_breakpoint();
        WARN(ret, "hw_breakpoint initialization failed with: %d", ret);