]> Pileus Git - ~andy/linux/blobdiff - arch/s390/lib/uaccess_pt.c
Merge branch 'late/clksrc' into late/cleanup
[~andy/linux] / arch / s390 / lib / uaccess_pt.c
index a70ee84c024126ca562f8d738b52a590e4fd6a2b..466fb3383960442b7624e51990536d2cf0b8c65d 100644 (file)
 #include <asm/futex.h>
 #include "uaccess.h"
 
+#ifndef CONFIG_64BIT
+#define AHI    "ahi"
+#define SLR    "slr"
+#else
+#define AHI    "aghi"
+#define SLR    "slgr"
+#endif
+
+static size_t strnlen_kernel(size_t count, const char __user *src)
+{
+       register unsigned long reg0 asm("0") = 0UL;
+       unsigned long tmp1, tmp2;
+
+       asm volatile(
+               "   la    %2,0(%1)\n"
+               "   la    %3,0(%0,%1)\n"
+               "  "SLR"  %0,%0\n"
+               "0: srst  %3,%2\n"
+               "   jo    0b\n"
+               "   la    %0,1(%3)\n"   /* strnlen_kernel results includes \0 */
+               "  "SLR"  %0,%1\n"
+               "1:\n"
+               EX_TABLE(0b,1b)
+               : "+a" (count), "+a" (src), "=a" (tmp1), "=a" (tmp2)
+               : "d" (reg0) : "cc", "memory");
+       return count;
+}
+
+static size_t copy_in_kernel(size_t count, void __user *to,
+                            const void __user *from)
+{
+       unsigned long tmp1;
+
+       asm volatile(
+               "  "AHI"  %0,-1\n"
+               "   jo    5f\n"
+               "   bras  %3,3f\n"
+               "0:"AHI"  %0,257\n"
+               "1: mvc   0(1,%1),0(%2)\n"
+               "   la    %1,1(%1)\n"
+               "   la    %2,1(%2)\n"
+               "  "AHI"  %0,-1\n"
+               "   jnz   1b\n"
+               "   j     5f\n"
+               "2: mvc   0(256,%1),0(%2)\n"
+               "   la    %1,256(%1)\n"
+               "   la    %2,256(%2)\n"
+               "3:"AHI"  %0,-256\n"
+               "   jnm   2b\n"
+               "4: ex    %0,1b-0b(%3)\n"
+               "5:"SLR"  %0,%0\n"
+               "6:\n"
+               EX_TABLE(1b,6b) EX_TABLE(2b,0b) EX_TABLE(4b,0b)
+               : "+a" (count), "+a" (to), "+a" (from), "=a" (tmp1)
+               : : "cc", "memory");
+       return count;
+}
 
 /*
  * Returns kernel address for user virtual address. If the returned address is
  * >= -4095 (IS_ERR_VALUE(x) returns true), a fault has occured and the address
  * contains the (negative) exception code.
  */
-static __always_inline unsigned long follow_table(struct mm_struct *mm,
-                                                 unsigned long addr, int write)
+#ifdef CONFIG_64BIT
+static unsigned long follow_table(struct mm_struct *mm,
+                                 unsigned long address, int write)
 {
-       pgd_t *pgd;
-       pud_t *pud;
-       pmd_t *pmd;
-       pte_t *ptep;
+       unsigned long *table = (unsigned long *)__pa(mm->pgd);
+
+       switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
+       case _ASCE_TYPE_REGION1:
+               table = table + ((address >> 53) & 0x7ff);
+               if (unlikely(*table & _REGION_ENTRY_INV))
+                       return -0x39UL;
+               table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+       case _ASCE_TYPE_REGION2:
+               table = table + ((address >> 42) & 0x7ff);
+               if (unlikely(*table & _REGION_ENTRY_INV))
+                       return -0x3aUL;
+               table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+       case _ASCE_TYPE_REGION3:
+               table = table + ((address >> 31) & 0x7ff);
+               if (unlikely(*table & _REGION_ENTRY_INV))
+                       return -0x3bUL;
+               table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
+       case _ASCE_TYPE_SEGMENT:
+               table = table + ((address >> 20) & 0x7ff);
+               if (unlikely(*table & _SEGMENT_ENTRY_INV))
+                       return -0x10UL;
+               if (unlikely(*table & _SEGMENT_ENTRY_LARGE)) {
+                       if (write && (*table & _SEGMENT_ENTRY_RO))
+                               return -0x04UL;
+                       return (*table & _SEGMENT_ENTRY_ORIGIN_LARGE) +
+                               (address & ~_SEGMENT_ENTRY_ORIGIN_LARGE);
+               }
+               table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+       }
+       table = table + ((address >> 12) & 0xff);
+       if (unlikely(*table & _PAGE_INVALID))
+               return -0x11UL;
+       if (write && (*table & _PAGE_RO))
+               return -0x04UL;
+       return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
+}
 
-       pgd = pgd_offset(mm, addr);
-       if (pgd_none(*pgd) || unlikely(pgd_bad(*pgd)))
-               return -0x3aUL;
+#else /* CONFIG_64BIT */
 
-       pud = pud_offset(pgd, addr);
-       if (pud_none(*pud) || unlikely(pud_bad(*pud)))
-               return -0x3bUL;
+static unsigned long follow_table(struct mm_struct *mm,
+                                 unsigned long address, int write)
+{
+       unsigned long *table = (unsigned long *)__pa(mm->pgd);
 
-       pmd = pmd_offset(pud, addr);
-       if (pmd_none(*pmd))
+       table = table + ((address >> 20) & 0x7ff);
+       if (unlikely(*table & _SEGMENT_ENTRY_INV))
                return -0x10UL;
-       if (pmd_large(*pmd)) {
-               if (write && (pmd_val(*pmd) & _SEGMENT_ENTRY_RO))
-                       return -0x04UL;
-               return (pmd_val(*pmd) & HPAGE_MASK) + (addr & ~HPAGE_MASK);
-       }
-       if (unlikely(pmd_bad(*pmd)))
-               return -0x10UL;
-
-       ptep = pte_offset_map(pmd, addr);
-       if (!pte_present(*ptep))
+       table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
+       table = table + ((address >> 12) & 0xff);
+       if (unlikely(*table & _PAGE_INVALID))
                return -0x11UL;
-       if (write && (!pte_write(*ptep) || !pte_dirty(*ptep)))
+       if (write && (*table & _PAGE_RO))
                return -0x04UL;
-
-       return (pte_val(*ptep) & PAGE_MASK) + (addr & ~PAGE_MASK);
+       return (*table & PAGE_MASK) + (address & ~PAGE_MASK);
 }
 
+#endif /* CONFIG_64BIT */
+
 static __always_inline size_t __user_copy_pt(unsigned long uaddr, void *kptr,
                                             size_t n, int write_user)
 {
@@ -123,10 +207,8 @@ size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
 {
        size_t rc;
 
-       if (segment_eq(get_fs(), KERNEL_DS)) {
-               memcpy(to, (void __kernel __force *) from, n);
-               return 0;
-       }
+       if (segment_eq(get_fs(), KERNEL_DS))
+               return copy_in_kernel(n, (void __user *) to, from);
        rc = __user_copy_pt((unsigned long) from, to, n, 0);
        if (unlikely(rc))
                memset(to + n - rc, 0, rc);
@@ -135,30 +217,28 @@ size_t copy_from_user_pt(size_t n, const void __user *from, void *to)
 
 size_t copy_to_user_pt(size_t n, void __user *to, const void *from)
 {
-       if (segment_eq(get_fs(), KERNEL_DS)) {
-               memcpy((void __kernel __force *) to, from, n);
-               return 0;
-       }
+       if (segment_eq(get_fs(), KERNEL_DS))
+               return copy_in_kernel(n, to, (void __user *) from);
        return __user_copy_pt((unsigned long) to, (void *) from, n, 1);
 }
 
 static size_t clear_user_pt(size_t n, void __user *to)
 {
+       void *zpage = (void *) empty_zero_page;
        long done, size, ret;
 
-       if (segment_eq(get_fs(), KERNEL_DS)) {
-               memset((void __kernel __force *) to, 0, n);
-               return 0;
-       }
        done = 0;
        do {
                if (n - done > PAGE_SIZE)
                        size = PAGE_SIZE;
                else
                        size = n - done;
-               ret = __user_copy_pt((unsigned long) to + done,
-                                     &empty_zero_page, size, 1);
+               if (segment_eq(get_fs(), KERNEL_DS))
+                       ret = copy_in_kernel(n, to, (void __user *) zpage);
+               else
+                       ret = __user_copy_pt((unsigned long) to, zpage, size, 1);
                done += size;
+               to += size;
                if (ret)
                        return ret + n - done;
        } while (done < n);
@@ -172,8 +252,10 @@ static size_t strnlen_user_pt(size_t count, const char __user *src)
        unsigned long offset, done, len, kaddr;
        size_t len_str;
 
+       if (unlikely(!count))
+               return 0;
        if (segment_eq(get_fs(), KERNEL_DS))
-               return strnlen((const char __kernel __force *) src, count) + 1;
+               return strnlen_kernel(count, src);
        done = 0;
 retry:
        spin_lock(&mm->page_table_lock);
@@ -200,25 +282,27 @@ fault:
 static size_t strncpy_from_user_pt(size_t count, const char __user *src,
                                   char *dst)
 {
-       size_t n = strnlen_user_pt(count, src);
+       size_t done, len, offset, len_str;
 
-       if (!n)
-               return -EFAULT;
-       if (n > count)
-               n = count;
-       if (segment_eq(get_fs(), KERNEL_DS)) {
-               memcpy(dst, (const char __kernel __force *) src, n);
-               if (dst[n-1] == '\0')
-                       return n-1;
-               else
-                       return n;
-       }
-       if (__user_copy_pt((unsigned long) src, dst, n, 0))
-               return -EFAULT;
-       if (dst[n-1] == '\0')
-               return n-1;
-       else
-               return n;
+       if (unlikely(!count))
+               return 0;
+       done = 0;
+       do {
+               offset = (size_t)src & ~PAGE_MASK;
+               len = min(count - done, PAGE_SIZE - offset);
+               if (segment_eq(get_fs(), KERNEL_DS)) {
+                       if (copy_in_kernel(len, (void __user *) dst, src))
+                               return -EFAULT;
+               } else {
+                       if (__user_copy_pt((unsigned long) src, dst, len, 0))
+                               return -EFAULT;
+               }
+               len_str = strnlen(dst, len);
+               done += len_str;
+               src += len_str;
+               dst += len_str;
+       } while ((len_str == len) && (done < count));
+       return done;
 }
 
 static size_t copy_in_user_pt(size_t n, void __user *to,
@@ -231,10 +315,8 @@ static size_t copy_in_user_pt(size_t n, void __user *to,
        unsigned long kaddr_to, kaddr_from;
        int write_user;
 
-       if (segment_eq(get_fs(), KERNEL_DS)) {
-               memcpy((void __force *) to, (void __force *) from, n);
-               return 0;
-       }
+       if (segment_eq(get_fs(), KERNEL_DS))
+               return copy_in_kernel(n, to, from);
        done = 0;
 retry:
        spin_lock(&mm->page_table_lock);