]> Pileus Git - ~andy/linux/blobdiff - mm/highmem.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux...
[~andy/linux] / mm / highmem.c
index 7da4a7b6af11f1d359af60568b312838f8fa2802..910198037bf59baa6649a75d1d632203483aa22b 100644 (file)
@@ -40,6 +40,7 @@
 #ifdef CONFIG_HIGHMEM
 
 unsigned long totalhigh_pages __read_mostly;
+EXPORT_SYMBOL(totalhigh_pages);
 
 unsigned int nr_free_highpages (void)
 {
@@ -66,9 +67,29 @@ pte_t * pkmap_page_table;
 
 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
 
+/*
+ * Most architectures have no use for kmap_high_get(), so let's abstract
+ * the disabling of IRQ out of the locking in that case to save on a
+ * potential useless overhead.
+ */
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
+#define lock_kmap()             spin_lock_irq(&kmap_lock)
+#define unlock_kmap()           spin_unlock_irq(&kmap_lock)
+#define lock_kmap_any(flags)    spin_lock_irqsave(&kmap_lock, flags)
+#define unlock_kmap_any(flags)  spin_unlock_irqrestore(&kmap_lock, flags)
+#else
+#define lock_kmap()             spin_lock(&kmap_lock)
+#define unlock_kmap()           spin_unlock(&kmap_lock)
+#define lock_kmap_any(flags)    \
+               do { spin_lock(&kmap_lock); (void)(flags); } while (0)
+#define unlock_kmap_any(flags)  \
+               do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
+#endif
+
 static void flush_all_zero_pkmaps(void)
 {
        int i;
+       int need_flush = 0;
 
        flush_cache_kmaps();
 
@@ -100,8 +121,10 @@ static void flush_all_zero_pkmaps(void)
                          &pkmap_page_table[i]);
 
                set_page_address(page, NULL);
+               need_flush = 1;
        }
-       flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
+       if (need_flush)
+               flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
 }
 
 /**
@@ -109,9 +132,9 @@ static void flush_all_zero_pkmaps(void)
  */
 void kmap_flush_unused(void)
 {
-       spin_lock(&kmap_lock);
+       lock_kmap();
        flush_all_zero_pkmaps();
-       spin_unlock(&kmap_lock);
+       unlock_kmap();
 }
 
 static inline unsigned long map_new_virtual(struct page *page)
@@ -141,10 +164,10 @@ start:
 
                        __set_current_state(TASK_UNINTERRUPTIBLE);
                        add_wait_queue(&pkmap_map_wait, &wait);
-                       spin_unlock(&kmap_lock);
+                       unlock_kmap();
                        schedule();
                        remove_wait_queue(&pkmap_map_wait, &wait);
-                       spin_lock(&kmap_lock);
+                       lock_kmap();
 
                        /* Somebody else might have mapped it while we slept */
                        if (page_address(page))
@@ -180,29 +203,59 @@ void *kmap_high(struct page *page)
         * For highmem pages, we can't trust "virtual" until
         * after we have the lock.
         */
-       spin_lock(&kmap_lock);
+       lock_kmap();
        vaddr = (unsigned long)page_address(page);
        if (!vaddr)
                vaddr = map_new_virtual(page);
        pkmap_count[PKMAP_NR(vaddr)]++;
        BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
-       spin_unlock(&kmap_lock);
+       unlock_kmap();
        return (void*) vaddr;
 }
 
 EXPORT_SYMBOL(kmap_high);
 
+#ifdef ARCH_NEEDS_KMAP_HIGH_GET
+/**
+ * kmap_high_get - pin a highmem page into memory
+ * @page: &struct page to pin
+ *
+ * Returns the page's current virtual memory address, or NULL if no mapping
+ * exists.  When and only when a non null address is returned then a
+ * matching call to kunmap_high() is necessary.
+ *
+ * This can be called from any context.
+ */
+void *kmap_high_get(struct page *page)
+{
+       unsigned long vaddr, flags;
+
+       lock_kmap_any(flags);
+       vaddr = (unsigned long)page_address(page);
+       if (vaddr) {
+               BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
+               pkmap_count[PKMAP_NR(vaddr)]++;
+       }
+       unlock_kmap_any(flags);
+       return (void*) vaddr;
+}
+#endif
+
 /**
  * kunmap_high - map a highmem page into memory
  * @page: &struct page to unmap
+ *
+ * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
+ * only from user context.
  */
 void kunmap_high(struct page *page)
 {
        unsigned long vaddr;
        unsigned long nr;
+       unsigned long flags;
        int need_wakeup;
 
-       spin_lock(&kmap_lock);
+       lock_kmap_any(flags);
        vaddr = (unsigned long)page_address(page);
        BUG_ON(!vaddr);
        nr = PKMAP_NR(vaddr);
@@ -228,7 +281,7 @@ void kunmap_high(struct page *page)
                 */
                need_wakeup = waitqueue_active(&pkmap_map_wait);
        }
-       spin_unlock(&kmap_lock);
+       unlock_kmap_any(flags);
 
        /* do wake-up, if needed, race-free outside of the spin lock */
        if (need_wakeup)