]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'x86-asmlinkage-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 31 Jan 2014 02:15:32 +0000 (18:15 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 31 Jan 2014 02:15:32 +0000 (18:15 -0800)
Pull x86 asmlinkage (LTO) changes from Peter Anvin:
 "This patchset adds more infrastructure for link time optimization
  (LTO).

  This patchset was pulled into my tree late because of a
  miscommunication (part of the patchset was picked up by other
  maintainers).  However, the patchset is strictly build-related and
  seems to be okay in testing"

* 'x86-asmlinkage-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, asmlinkage, xen: Fix type of NMI
  x86, asmlinkage, xen, kvm: Make {xen,kvm}_lock_spinning global and visible
  x86: Use inline assembler instead of global register variable to get sp
  x86, asmlinkage, paravirt: Make paravirt thunks global
  x86, asmlinkage, paravirt: Don't rely on local assembler labels
  x86, asmlinkage, lguest: Fix C functions used by inline assembler

1  2 
arch/x86/xen/irq.c
arch/x86/xen/mmu.c
arch/x86/xen/setup.c

diff --combined arch/x86/xen/irq.c
index 76ca326105f71d9a53fd93e2b68e393dadba9bd8,f56c23b60a6ce525ec7bcc5be38ca3b89a5a7674..08f763de26fe4132d7e6dcf0a7b50a660af76319
@@@ -5,7 -5,6 +5,7 @@@
  #include <xen/interface/xen.h>
  #include <xen/interface/sched.h>
  #include <xen/interface/vcpu.h>
 +#include <xen/features.h>
  #include <xen/events.h>
  
  #include <asm/xen/hypercall.h>
@@@ -23,7 -22,7 +23,7 @@@ void xen_force_evtchn_callback(void
        (void)HYPERVISOR_xen_version(0, NULL);
  }
  
static unsigned long xen_save_fl(void)
asmlinkage unsigned long xen_save_fl(void)
  {
        struct vcpu_info *vcpu;
        unsigned long flags;
@@@ -41,7 -40,7 +41,7 @@@
  }
  PV_CALLEE_SAVE_REGS_THUNK(xen_save_fl);
  
static void xen_restore_fl(unsigned long flags)
__visible void xen_restore_fl(unsigned long flags)
  {
        struct vcpu_info *vcpu;
  
@@@ -63,7 -62,7 +63,7 @@@
  }
  PV_CALLEE_SAVE_REGS_THUNK(xen_restore_fl);
  
static void xen_irq_disable(void)
asmlinkage void xen_irq_disable(void)
  {
        /* There's a one instruction preempt window here.  We need to
           make sure we're don't switch CPUs between getting the vcpu
@@@ -74,7 -73,7 +74,7 @@@
  }
  PV_CALLEE_SAVE_REGS_THUNK(xen_irq_disable);
  
static void xen_irq_enable(void)
asmlinkage void xen_irq_enable(void)
  {
        struct vcpu_info *vcpu;
  
@@@ -129,8 -128,6 +129,8 @@@ static const struct pv_irq_ops xen_irq_
  
  void __init xen_init_irq_ops(void)
  {
 -      pv_irq_ops = xen_irq_ops;
 +      /* For PVH we use default pv_irq_ops settings. */
 +      if (!xen_feature(XENFEAT_hvm_callback_vector))
 +              pv_irq_ops = xen_irq_ops;
        x86_init.irqs.intr_init = xen_init_IRQ;
  }
diff --combined arch/x86/xen/mmu.c
index c1d406f35523143f7fc21f41a71dc0658c5e1823,648512c50cc76e48a2b9d070b710e2b3c27784be..2423ef04ffea596fd43eeb918f290003277fbb21
@@@ -431,7 -431,7 +431,7 @@@ static pteval_t iomap_pte(pteval_t val
        return val;
  }
  
static pteval_t xen_pte_val(pte_t pte)
__visible pteval_t xen_pte_val(pte_t pte)
  {
        pteval_t pteval = pte.pte;
  #if 0
  }
  PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val);
  
static pgdval_t xen_pgd_val(pgd_t pgd)
__visible pgdval_t xen_pgd_val(pgd_t pgd)
  {
        return pte_mfn_to_pfn(pgd.pgd);
  }
@@@ -479,7 -479,7 +479,7 @@@ void xen_set_pat(u64 pat
        WARN_ON(pat != 0x0007010600070106ull);
  }
  
static pte_t xen_make_pte(pteval_t pte)
__visible pte_t xen_make_pte(pteval_t pte)
  {
        phys_addr_t addr = (pte & PTE_PFN_MASK);
  #if 0
  }
  PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte);
  
static pgd_t xen_make_pgd(pgdval_t pgd)
__visible pgd_t xen_make_pgd(pgdval_t pgd)
  {
        pgd = pte_pfn_to_mfn(pgd);
        return native_make_pgd(pgd);
  }
  PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd);
  
static pmdval_t xen_pmd_val(pmd_t pmd)
__visible pmdval_t xen_pmd_val(pmd_t pmd)
  {
        return pte_mfn_to_pfn(pmd.pmd);
  }
@@@ -580,7 -580,7 +580,7 @@@ static void xen_pmd_clear(pmd_t *pmdp
  }
  #endif        /* CONFIG_X86_PAE */
  
static pmd_t xen_make_pmd(pmdval_t pmd)
__visible pmd_t xen_make_pmd(pmdval_t pmd)
  {
        pmd = pte_pfn_to_mfn(pmd);
        return native_make_pmd(pmd);
  PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd);
  
  #if PAGETABLE_LEVELS == 4
static pudval_t xen_pud_val(pud_t pud)
__visible pudval_t xen_pud_val(pud_t pud)
  {
        return pte_mfn_to_pfn(pud.pud);
  }
  PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val);
  
static pud_t xen_make_pud(pudval_t pud)
__visible pud_t xen_make_pud(pudval_t pud)
  {
        pud = pte_pfn_to_mfn(pud);
  
@@@ -1198,40 -1198,44 +1198,40 @@@ static void __init xen_cleanhighmap(uns
         * instead of somewhere later and be confusing. */
        xen_mc_flush();
  }
 -#endif
 -static void __init xen_pagetable_init(void)
 +static void __init xen_pagetable_p2m_copy(void)
  {
 -#ifdef CONFIG_X86_64
        unsigned long size;
        unsigned long addr;
 -#endif
 -      paging_init();
 -      xen_setup_shared_info();
 -#ifdef CONFIG_X86_64
 -      if (!xen_feature(XENFEAT_auto_translated_physmap)) {
 -              unsigned long new_mfn_list;
 -
 -              size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
 -
 -              /* On 32-bit, we get zero so this never gets executed. */
 -              new_mfn_list = xen_revector_p2m_tree();
 -              if (new_mfn_list && new_mfn_list != xen_start_info->mfn_list) {
 -                      /* using __ka address and sticking INVALID_P2M_ENTRY! */
 -                      memset((void *)xen_start_info->mfn_list, 0xff, size);
 -
 -                      /* We should be in __ka space. */
 -                      BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
 -                      addr = xen_start_info->mfn_list;
 -                      /* We roundup to the PMD, which means that if anybody at this stage is
 -                       * using the __ka address of xen_start_info or xen_start_info->shared_info
 -                       * they are in going to crash. Fortunatly we have already revectored
 -                       * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
 -                      size = roundup(size, PMD_SIZE);
 -                      xen_cleanhighmap(addr, addr + size);
 -
 -                      size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
 -                      memblock_free(__pa(xen_start_info->mfn_list), size);
 -                      /* And revector! Bye bye old array */
 -                      xen_start_info->mfn_list = new_mfn_list;
 -              } else
 -                      goto skip;
 -      }
 +      unsigned long new_mfn_list;
 +
 +      if (xen_feature(XENFEAT_auto_translated_physmap))
 +              return;
 +
 +      size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
 +
 +      new_mfn_list = xen_revector_p2m_tree();
 +      /* No memory or already called. */
 +      if (!new_mfn_list || new_mfn_list == xen_start_info->mfn_list)
 +              return;
 +
 +      /* using __ka address and sticking INVALID_P2M_ENTRY! */
 +      memset((void *)xen_start_info->mfn_list, 0xff, size);
 +
 +      /* We should be in __ka space. */
 +      BUG_ON(xen_start_info->mfn_list < __START_KERNEL_map);
 +      addr = xen_start_info->mfn_list;
 +      /* We roundup to the PMD, which means that if anybody at this stage is
 +       * using the __ka address of xen_start_info or xen_start_info->shared_info
 +       * they are in going to crash. Fortunatly we have already revectored
 +       * in xen_setup_kernel_pagetable and in xen_setup_shared_info. */
 +      size = roundup(size, PMD_SIZE);
 +      xen_cleanhighmap(addr, addr + size);
 +
 +      size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long));
 +      memblock_free(__pa(xen_start_info->mfn_list), size);
 +      /* And revector! Bye bye old array */
 +      xen_start_info->mfn_list = new_mfn_list;
 +
        /* At this stage, cleanup_highmap has already cleaned __ka space
         * from _brk_limit way up to the max_pfn_mapped (which is the end of
         * the ramdisk). We continue on, erasing PMD entries that point to page
         * anything at this stage. */
        xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1);
  #endif
 -skip:
 +}
 +#endif
 +
 +static void __init xen_pagetable_init(void)
 +{
 +      paging_init();
 +      xen_setup_shared_info();
 +#ifdef CONFIG_X86_64
 +      xen_pagetable_p2m_copy();
  #endif
        xen_post_allocator_init();
  }
@@@ -1757,10 -1753,6 +1757,10 @@@ static void set_page_prot_flags(void *a
        unsigned long pfn = __pa(addr) >> PAGE_SHIFT;
        pte_t pte = pfn_pte(pfn, prot);
  
 +      /* For PVH no need to set R/O or R/W to pin them or unpin them. */
 +      if (xen_feature(XENFEAT_auto_translated_physmap))
 +              return;
 +
        if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags))
                BUG();
  }
@@@ -1871,7 -1863,6 +1871,7 @@@ static void __init check_pt_base(unsign
   * but that's enough to get __va working.  We need to fill in the rest
   * of the physical mapping once some sort of allocator has been set
   * up.
 + * NOTE: for PVH, the page tables are native.
   */
  void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
  {
        /* Zap identity mapping */
        init_level4_pgt[0] = __pgd(0);
  
 -      /* Pre-constructed entries are in pfn, so convert to mfn */
 -      /* L4[272] -> level3_ident_pgt
 -       * L4[511] -> level3_kernel_pgt */
 -      convert_pfn_mfn(init_level4_pgt);
 -
 -      /* L3_i[0] -> level2_ident_pgt */
 -      convert_pfn_mfn(level3_ident_pgt);
 -      /* L3_k[510] -> level2_kernel_pgt
 -       * L3_i[511] -> level2_fixmap_pgt */
 -      convert_pfn_mfn(level3_kernel_pgt);
 -
 +      if (!xen_feature(XENFEAT_auto_translated_physmap)) {
 +              /* Pre-constructed entries are in pfn, so convert to mfn */
 +              /* L4[272] -> level3_ident_pgt
 +               * L4[511] -> level3_kernel_pgt */
 +              convert_pfn_mfn(init_level4_pgt);
 +
 +              /* L3_i[0] -> level2_ident_pgt */
 +              convert_pfn_mfn(level3_ident_pgt);
 +              /* L3_k[510] -> level2_kernel_pgt
 +               * L3_i[511] -> level2_fixmap_pgt */
 +              convert_pfn_mfn(level3_kernel_pgt);
 +      }
        /* We get [511][511] and have Xen's version of level2_kernel_pgt */
        l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
        l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
        copy_page(level2_fixmap_pgt, l2);
        /* Note that we don't do anything with level1_fixmap_pgt which
         * we don't need. */
 +      if (!xen_feature(XENFEAT_auto_translated_physmap)) {
 +              /* Make pagetable pieces RO */
 +              set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
 +              set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
 +              set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
 +              set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
 +              set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
 +              set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
 +              set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
 +
 +              /* Pin down new L4 */
 +              pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
 +                                PFN_DOWN(__pa_symbol(init_level4_pgt)));
 +
 +              /* Unpin Xen-provided one */
 +              pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
  
 -      /* Make pagetable pieces RO */
 -      set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
 -      set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
 -      set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
 -      set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
 -      set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
 -      set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
 -      set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
 -
 -      /* Pin down new L4 */
 -      pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
 -                        PFN_DOWN(__pa_symbol(init_level4_pgt)));
 -
 -      /* Unpin Xen-provided one */
 -      pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd)));
 -
 -      /*
 -       * At this stage there can be no user pgd, and no page
 -       * structure to attach it to, so make sure we just set kernel
 -       * pgd.
 -       */
 -      xen_mc_batch();
 -      __xen_write_cr3(true, __pa(init_level4_pgt));
 -      xen_mc_issue(PARAVIRT_LAZY_CPU);
 +              /*
 +               * At this stage there can be no user pgd, and no page
 +               * structure to attach it to, so make sure we just set kernel
 +               * pgd.
 +               */
 +              xen_mc_batch();
 +              __xen_write_cr3(true, __pa(init_level4_pgt));
 +              xen_mc_issue(PARAVIRT_LAZY_CPU);
 +      } else
 +              native_write_cr3(__pa(init_level4_pgt));
  
        /* We can't that easily rip out L3 and L2, as the Xen pagetables are
         * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ...  for
@@@ -2115,9 -2103,6 +2115,9 @@@ static void xen_set_fixmap(unsigned idx
  
  static void __init xen_post_allocator_init(void)
  {
 +      if (xen_feature(XENFEAT_auto_translated_physmap))
 +              return;
 +
        pv_mmu_ops.set_pte = xen_set_pte;
        pv_mmu_ops.set_pmd = xen_set_pmd;
        pv_mmu_ops.set_pud = xen_set_pud;
@@@ -2222,15 -2207,6 +2222,15 @@@ static const struct pv_mmu_ops xen_mmu_
  void __init xen_init_mmu_ops(void)
  {
        x86_init.paging.pagetable_init = xen_pagetable_init;
 +
 +      /* Optimization - we can use the HVM one but it has no idea which
 +       * VCPUs are descheduled - which means that it will needlessly IPI
 +       * them. Xen knows so let it do the job.
 +       */
 +      if (xen_feature(XENFEAT_auto_translated_physmap)) {
 +              pv_mmu_ops.flush_tlb_others = xen_flush_tlb_others;
 +              return;
 +      }
        pv_mmu_ops = xen_mmu_ops;
  
        memset(dummy_mapping, 0xff, PAGE_SIZE);
diff --combined arch/x86/xen/setup.c
index dd5f905e33d5e187d9713d65ddaf3b721f6ad0d6,518ab4a17b8a87d9c4caaa34c16c30ae7823ff49..0982233b9b8433a97d6905de4aad3369c6f4c0a5
@@@ -27,7 -27,6 +27,7 @@@
  #include <xen/interface/memory.h>
  #include <xen/interface/physdev.h>
  #include <xen/features.h>
 +#include "mmu.h"
  #include "xen-ops.h"
  #include "vdso.h"
  
@@@ -35,7 -34,7 +35,7 @@@
  extern const char xen_hypervisor_callback[];
  extern const char xen_failsafe_callback[];
  #ifdef CONFIG_X86_64
- extern const char nmi[];
+ extern asmlinkage void nmi(void);
  #endif
  extern void xen_sysenter_target(void);
  extern void xen_syscall_target(void);
@@@ -82,9 -81,6 +82,9 @@@ static void __init xen_add_extra_mem(u6
  
        memblock_reserve(start, size);
  
 +      if (xen_feature(XENFEAT_auto_translated_physmap))
 +              return;
 +
        xen_max_p2m_pfn = PFN_DOWN(start + size);
        for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
                unsigned long mfn = pfn_to_mfn(pfn);
@@@ -107,7 -103,6 +107,7 @@@ static unsigned long __init xen_do_chun
                .domid        = DOMID_SELF
        };
        unsigned long len = 0;
 +      int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);
        unsigned long pfn;
        int ret;
  
                                continue;
                        frame = mfn;
                } else {
 -                      if (mfn != INVALID_P2M_ENTRY)
 +                      if (!xlated_phys && mfn != INVALID_P2M_ENTRY)
                                continue;
                        frame = pfn;
                }
  static unsigned long __init xen_release_chunk(unsigned long start,
                                              unsigned long end)
  {
 +      /*
 +       * Xen already ballooned out the E820 non RAM regions for us
 +       * and set them up properly in EPT.
 +       */
 +      if (xen_feature(XENFEAT_auto_translated_physmap))
 +              return end - start;
 +
        return xen_do_chunk(start, end, true);
  }
  
@@@ -234,13 -222,7 +234,13 @@@ static void __init xen_set_identity_and
         * (except for the ISA region which must be 1:1 mapped) to
         * release the refcounts (in Xen) on the original frames.
         */
 -      for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
 +
 +      /*
 +       * PVH E820 matches the hypervisor's P2M which means we need to
 +       * account for the proper values of *release and *identity.
 +       */
 +      for (pfn = start_pfn; !xen_feature(XENFEAT_auto_translated_physmap) &&
 +           pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
                pte_t pte = __pte_ma(0);
  
                if (pfn < PFN_UP(ISA_END_ADDRESS))
@@@ -577,17 -559,20 +577,17 @@@ void xen_enable_syscall(void
  void xen_enable_nmi(void)
  {
  #ifdef CONFIG_X86_64
-       if (register_callback(CALLBACKTYPE_nmi, nmi))
+       if (register_callback(CALLBACKTYPE_nmi, (char *)nmi))
                BUG();
  #endif
  }
 -void __init xen_arch_setup(void)
 +void __init xen_pvmmu_arch_setup(void)
  {
 -      xen_panic_handler_init();
 -
        HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
        HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables);
  
 -      if (!xen_feature(XENFEAT_auto_translated_physmap))
 -              HYPERVISOR_vm_assist(VMASST_CMD_enable,
 -                                   VMASST_TYPE_pae_extended_cr3);
 +      HYPERVISOR_vm_assist(VMASST_CMD_enable,
 +                           VMASST_TYPE_pae_extended_cr3);
  
        if (register_callback(CALLBACKTYPE_event, xen_hypervisor_callback) ||
            register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback))
        xen_enable_sysenter();
        xen_enable_syscall();
        xen_enable_nmi();
 +}
 +
 +/* This function is not called for HVM domains */
 +void __init xen_arch_setup(void)
 +{
 +      xen_panic_handler_init();
 +      if (!xen_feature(XENFEAT_auto_translated_physmap))
 +              xen_pvmmu_arch_setup();
 +
  #ifdef CONFIG_ACPI
        if (!(xen_start_info->flags & SIF_INITDOMAIN)) {
                printk(KERN_INFO "ACPI in unprivileged domain disabled\n");