]> Pileus Git - ~andy/linux/blobdiff - include/asm-sh/pgtable.h
Merge master.kernel.org:/pub/scm/linux/kernel/git/davem/sparc-2.6
[~andy/linux] / include / asm-sh / pgtable.h
index 22c3d0b3e11aad1b0e10eacf7924b0a291075bdb..184d7fcaaf107a068f3c5e8967f60a1fd5aaa934 100644 (file)
@@ -47,17 +47,17 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #define PGDIR_MASK     (~(PGDIR_SIZE-1))
 
 /* Entries per level */
-#define PTRS_PER_PTE   (1UL << PTE_BITS)
-#define PTRS_PER_PGD   (1UL << PGDIR_BITS)
+#define PTRS_PER_PTE   (PAGE_SIZE / (1 << PTE_MAGNITUDE))
+#define PTRS_PER_PGD   (PAGE_SIZE / 4)
 
 #define USER_PTRS_PER_PGD      (TASK_SIZE/PGDIR_SIZE)
 #define FIRST_USER_ADDRESS     0
 
-#define PTE_PHYS_MASK  0x1ffff000
+#define PTE_PHYS_MASK          (0x20000000 - PAGE_SIZE)
 
 /*
  * First 1MB map is used by fixed purpose.
- * Currently only 4-enty (16kB) is used (see arch/sh/mm/cache.c)
+ * Currently only 4-entry (16kB) is used (see arch/sh/mm/cache.c)
  */
 #define VMALLOC_START  (P3SEG+0x00100000)
 #define VMALLOC_END    (FIXADDR_START-2*PAGE_SIZE)
@@ -197,32 +197,32 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 # endif
 #endif
 
+/*
+ * Stub out _PAGE_SZHUGE if we don't have a good definition for it,
+ * to make pte_mkhuge() happy.
+ */
+#ifndef _PAGE_SZHUGE
+# define _PAGE_SZHUGE  (_PAGE_FLAGS_HARD)
+#endif
+
 #define _PAGE_CHG_MASK \
        (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | _PAGE_DIRTY)
 
 #ifndef __ASSEMBLY__
 
 #if defined(CONFIG_X2TLB) /* SH-X2 TLB */
-#define _PAGE_TABLE \
-       (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | \
-        _PAGE_EXT(_PAGE_EXT_USER_READ | _PAGE_EXT_USER_WRITE))
-
-#define _KERNPG_TABLE \
-       (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | \
-        _PAGE_EXT(_PAGE_EXT_KERN_READ | _PAGE_EXT_KERN_WRITE))
-
 #define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
                                 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
 
 #define PAGE_SHARED    __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
                                 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_USER_READ | \
-                                          _PAGE_EXT_USER_WRITE))
+                                          _PAGE_EXT_USER_WRITE))
 
 #define PAGE_EXECREAD  __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
                                 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_USER_EXEC | \
-                                          _PAGE_EXT_USER_READ))
+                                          _PAGE_EXT_USER_READ))
 
 #define PAGE_COPY      PAGE_EXECREAD
 
@@ -237,14 +237,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
 #define PAGE_RWX       __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
                                 _PAGE_CACHABLE | _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_USER_WRITE | \
-                                          _PAGE_EXT_USER_READ  | \
+                                          _PAGE_EXT_USER_READ  | \
                                           _PAGE_EXT_USER_EXEC))
 
 #define PAGE_KERNEL    __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
                                 _PAGE_DIRTY | _PAGE_ACCESSED | \
                                 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
-                                          _PAGE_EXT_KERN_WRITE | \
+                                          _PAGE_EXT_KERN_WRITE | \
                                           _PAGE_EXT_KERN_EXEC))
 
 #define PAGE_KERNEL_NOCACHE \
@@ -252,30 +252,25 @@ extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
                                 _PAGE_ACCESSED | _PAGE_HW_SHARED | \
                                 _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
-                                          _PAGE_EXT_KERN_WRITE | \
+                                          _PAGE_EXT_KERN_WRITE | \
                                           _PAGE_EXT_KERN_EXEC))
 
 #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_CACHABLE | \
                                 _PAGE_DIRTY | _PAGE_ACCESSED | \
                                 _PAGE_HW_SHARED | _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
-                                          _PAGE_EXT_KERN_EXEC))
+                                          _PAGE_EXT_KERN_EXEC))
 
 #define PAGE_KERNEL_PCC(slot, type) \
                        __pgprot(_PAGE_PRESENT | _PAGE_DIRTY | \
                                 _PAGE_ACCESSED | _PAGE_FLAGS_HARD | \
                                 _PAGE_EXT(_PAGE_EXT_KERN_READ | \
-                                          _PAGE_EXT_KERN_WRITE | \
+                                          _PAGE_EXT_KERN_WRITE | \
                                           _PAGE_EXT_KERN_EXEC) \
                                 (slot ? _PAGE_PCC_AREA5 : _PAGE_PCC_AREA6) | \
                                 (type))
 
 #elif defined(CONFIG_MMU) /* SH-X TLB */
-#define _PAGE_TABLE \
-       (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
-#define _KERNPG_TABLE \
-       (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
-
 #define PAGE_NONE      __pgprot(_PAGE_PROTNONE | _PAGE_CACHABLE | \
                                 _PAGE_ACCESSED | _PAGE_FLAGS_HARD)
 
@@ -390,9 +385,9 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
 #define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
 
 #define pmd_none(x)    (!pmd_val(x))
-#define pmd_present(x) (pmd_val(x) & _PAGE_PRESENT)
+#define pmd_present(x) (pmd_val(x))
 #define pmd_clear(xp)  do { set_pmd(xp, __pmd(0)); } while (0)
-#define        pmd_bad(x)      ((pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
+#define        pmd_bad(x)      (pmd_val(x) & ~PAGE_MASK)
 
 #define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
 #define pte_page(x)    phys_to_page(pte_val(x)&PTE_PHYS_MASK)
@@ -477,11 +472,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
        return pte;
 }
 
-#define pmd_page_vaddr(pmd) \
-((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
-
-#define pmd_page(pmd) \
-       (phys_to_page(pmd_val(pmd)))
+#define pmd_page_vaddr(pmd)    pmd_val(pmd)
+#define pmd_page(pmd)          (virt_to_page(pmd_val(pmd)))
 
 /* to find an entry in a page-table-directory. */
 #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
@@ -516,16 +508,50 @@ struct vm_area_struct;
 extern void update_mmu_cache(struct vm_area_struct * vma,
                             unsigned long address, pte_t pte);
 
-/* Encode and de-code a swap entry */
 /*
+ * Encode and de-code a swap entry
+ *
+ * Constraints:
+ *     _PAGE_FILE at bit 0
+ *     _PAGE_PRESENT at bit 8
+ *     _PAGE_PROTNONE at bit 9
+ *
+ * For the normal case, we encode the swap type into bits 0:7 and the
+ * swap offset into bits 10:30. For the 64-bit PTE case, we keep the
+ * preserved bits in the low 32-bits and use the upper 32 as the swap
+ * offset (along with a 5-bit type), following the same approach as x86
+ * PAE. This keeps the logic quite simple, and allows for a full 32
+ * PTE_FILE_MAX_BITS, as opposed to the 29-bits we're constrained with
+ * in the pte_low case.
+ *
+ * As is evident by the Alpha code, if we ever get a 64-bit unsigned
+ * long (swp_entry_t) to match up with the 64-bit PTEs, this all becomes
+ * much cleaner..
+ *
  * NOTE: We should set ZEROs at the position of _PAGE_PRESENT
  *       and _PAGE_PROTNONE bits
  */
-#define __swp_type(x)          ((x).val & 0xff)
-#define __swp_offset(x)                ((x).val >> 10)
-#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 10) })
-#define __pte_to_swp_entry(pte)        ((swp_entry_t) { pte_val(pte) >> 1 })
-#define __swp_entry_to_pte(x)  ((pte_t) { (x).val << 1 })
+#ifdef CONFIG_X2TLB
+#define __swp_type(x)                  ((x).val & 0x1f)
+#define __swp_offset(x)                        ((x).val >> 5)
+#define __swp_entry(type, offset)      ((swp_entry_t){ (type) | (offset) << 5})
+#define __pte_to_swp_entry(pte)                ((swp_entry_t){ (pte).pte_high })
+#define __swp_entry_to_pte(x)          ((pte_t){ 0, (x).val })
+
+/*
+ * Encode and decode a nonlinear file mapping entry
+ */
+#define pte_to_pgoff(pte)              ((pte).pte_high)
+#define pgoff_to_pte(off)              ((pte_t) { _PAGE_FILE, (off) })
+
+#define PTE_FILE_MAX_BITS              32
+#else
+#define __swp_type(x)                  ((x).val & 0xff)
+#define __swp_offset(x)                        ((x).val >> 10)
+#define __swp_entry(type, offset)      ((swp_entry_t){(type) | (offset) <<10})
+
+#define __pte_to_swp_entry(pte)                ((swp_entry_t) { pte_val(pte) >> 1 })
+#define __swp_entry_to_pte(x)          ((pte_t) { (x).val << 1 })
 
 /*
  * Encode and decode a nonlinear file mapping entry
@@ -533,6 +559,7 @@ extern void update_mmu_cache(struct vm_area_struct * vma,
 #define PTE_FILE_MAX_BITS      29
 #define pte_to_pgoff(pte)      (pte_val(pte) >> 1)
 #define pgoff_to_pte(off)      ((pte_t) { ((off) << 1) | _PAGE_FILE })
+#endif
 
 typedef pte_t *pte_addr_t;