]> Pileus Git - ~andy/linux/commitdiff
x86-64, copy_user: Remove zero byte check before copy user buffer.
authorFenghua Yu <fenghua.yu@intel.com>
Sat, 16 Nov 2013 20:37:01 +0000 (12:37 -0800)
committerH. Peter Anvin <hpa@linux.intel.com>
Sun, 17 Nov 2013 02:00:58 +0000 (18:00 -0800)
Operation of rep movsb instruction handles zero byte copy. As pointed out by
Linus, there is no need to check zero size in kernel. Removing this redundant
check saves a few cycles in copy user functions.

Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Fenghua Yu <fenghua.yu@intel.com>
Link: http://lkml.kernel.org/r/1384634221-6006-1-git-send-email-fenghua.yu@intel.com
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/lib/copy_user_64.S

index a30ca15be21c8a182b3bb937ac612d1863a046ee..ffe4eb9f09eba6a9dac5804df07857b0672da366 100644 (file)
@@ -236,8 +236,6 @@ ENDPROC(copy_user_generic_unrolled)
 ENTRY(copy_user_generic_string)
        CFI_STARTPROC
        ASM_STAC
-       andl %edx,%edx
-       jz 4f
        cmpl $8,%edx
        jb 2f           /* less than 8 bytes, go to byte copy loop */
        ALIGN_DESTINATION
@@ -249,7 +247,7 @@ ENTRY(copy_user_generic_string)
 2:     movl %edx,%ecx
 3:     rep
        movsb
-4:     xorl %eax,%eax
+       xorl %eax,%eax
        ASM_CLAC
        ret
 
@@ -279,12 +277,10 @@ ENDPROC(copy_user_generic_string)
 ENTRY(copy_user_enhanced_fast_string)
        CFI_STARTPROC
        ASM_STAC
-       andl %edx,%edx
-       jz 2f
        movl %edx,%ecx
 1:     rep
        movsb
-2:     xorl %eax,%eax
+       xorl %eax,%eax
        ASM_CLAC
        ret