]> Pileus Git - ~andy/linux/blobdiff - drivers/xen/balloon.c
xen/balloon: don't alloc page while non-preemptible
[~andy/linux] / drivers / xen / balloon.c
index 2a2ef97697b266af2b36428c35584b85955c04ef..b232908a61925724bb61bc0f8a09ecbca6b753e6 100644 (file)
@@ -38,6 +38,7 @@
 
 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
 
+#include <linux/cpu.h>
 #include <linux/kernel.h>
 #include <linux/sched.h>
 #include <linux/errno.h>
@@ -52,6 +53,7 @@
 #include <linux/notifier.h>
 #include <linux/memory.h>
 #include <linux/memory_hotplug.h>
+#include <linux/percpu-defs.h>
 
 #include <asm/page.h>
 #include <asm/pgalloc.h>
@@ -90,6 +92,8 @@ EXPORT_SYMBOL_GPL(balloon_stats);
 
 /* We increase/decrease in batches which fit in a page */
 static xen_pfn_t frame_list[PAGE_SIZE / sizeof(unsigned long)];
+static DEFINE_PER_CPU(struct page *, balloon_scratch_page);
+
 
 /* List of ballooned pages, threaded through the mem_map array. */
 static LIST_HEAD(ballooned_pages);
@@ -345,8 +349,6 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
                BUG_ON(page == NULL);
 
                pfn = page_to_pfn(page);
-               BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
-                      phys_to_machine_mapping_valid(pfn));
 
                set_phys_to_machine(pfn, frame_list[i]);
 
@@ -376,6 +378,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
        enum bp_state state = BP_DONE;
        unsigned long  pfn, i;
        struct page   *page;
+       struct page   *scratch_page;
        int ret;
        struct xen_memory_reservation reservation = {
                .address_bits = 0,
@@ -408,27 +411,35 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
 
                scrub_page(page);
 
+               /*
+                * Ballooned out frames are effectively replaced with
+                * a scratch frame.  Ensure direct mappings and the
+                * p2m are consistent.
+                */
+               scratch_page = get_balloon_scratch_page();
 #ifdef CONFIG_XEN_HAVE_PVMMU
                if (xen_pv_domain() && !PageHighMem(page)) {
                        ret = HYPERVISOR_update_va_mapping(
                                (unsigned long)__va(pfn << PAGE_SHIFT),
-                               __pte_ma(0), 0);
+                               pfn_pte(page_to_pfn(scratch_page),
+                                       PAGE_KERNEL_RO), 0);
                        BUG_ON(ret);
                }
 #endif
+               if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+                       unsigned long p;
+                       p = page_to_pfn(scratch_page);
+                       __set_phys_to_machine(pfn, pfn_to_mfn(p));
+               }
+               put_balloon_scratch_page();
+
+               balloon_append(pfn_to_page(pfn));
        }
 
        /* Ensure that ballooned highmem pages don't have kmaps. */
        kmap_flush_unused();
        flush_tlb_all();
 
-       /* No more mappings: invalidate P2M and add to balloon. */
-       for (i = 0; i < nr_pages; i++) {
-               pfn = mfn_to_pfn(frame_list[i]);
-               __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
-               balloon_append(pfn_to_page(pfn));
-       }
-
        set_xen_guest_handle(reservation.extent_start, frame_list);
        reservation.nr_extents   = nr_pages;
        ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
@@ -480,6 +491,18 @@ static void balloon_process(struct work_struct *work)
        mutex_unlock(&balloon_mutex);
 }
 
+struct page *get_balloon_scratch_page(void)
+{
+       struct page *ret = get_cpu_var(balloon_scratch_page);
+       BUG_ON(ret == NULL);
+       return ret;
+}
+
+void put_balloon_scratch_page(void)
+{
+       put_cpu_var(balloon_scratch_page);
+}
+
 /* Resets the Xen limit, sets new target, and kicks off processing. */
 void balloon_set_new_target(unsigned long target)
 {
@@ -573,13 +596,47 @@ static void __init balloon_add_region(unsigned long start_pfn,
        }
 }
 
+static int __cpuinit balloon_cpu_notify(struct notifier_block *self,
+                                   unsigned long action, void *hcpu)
+{
+       int cpu = (long)hcpu;
+       switch (action) {
+       case CPU_UP_PREPARE:
+               if (per_cpu(balloon_scratch_page, cpu) != NULL)
+                       break;
+               per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
+               if (per_cpu(balloon_scratch_page, cpu) == NULL) {
+                       pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+                       return NOTIFY_BAD;
+               }
+               break;
+       default:
+               break;
+       }
+       return NOTIFY_OK;
+}
+
+static struct notifier_block balloon_cpu_notifier __cpuinitdata = {
+       .notifier_call  = balloon_cpu_notify,
+};
+
 static int __init balloon_init(void)
 {
-       int i;
+       int i, cpu;
 
        if (!xen_domain())
                return -ENODEV;
 
+       for_each_online_cpu(cpu)
+       {
+               per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
+               if (per_cpu(balloon_scratch_page, cpu) == NULL) {
+                       pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+                       return -ENOMEM;
+               }
+       }
+       register_cpu_notifier(&balloon_cpu_notifier);
+
        pr_info("Initialising balloon driver\n");
 
        balloon_stats.current_pages = xen_pv_domain()
@@ -616,4 +673,15 @@ static int __init balloon_init(void)
 
 subsys_initcall(balloon_init);
 
+static int __init balloon_clear(void)
+{
+       int cpu;
+
+       for_each_possible_cpu(cpu)
+               per_cpu(balloon_scratch_page, cpu) = NULL;
+
+       return 0;
+}
+early_initcall(balloon_clear);
+
 MODULE_LICENSE("GPL");