Commit 7c3a804c authored by James Hogan's avatar James Hogan Committed by Sasha Levin

metag/usercopy: Zero rest of buffer from copy_from_user

[ Upstream commit 563ddc10 ]

Currently we try to zero the destination for a failed read from userland
in fixup code in the usercopy.c macros. The rest of the destination
buffer is then zeroed from __copy_user_zeroing(), which is used for both
copy_from_user() and __copy_from_user().

Unfortunately we fail to zero in the fixup code as D1Ar1 is set to 0
before the fixup code entry labels, and __copy_from_user() shouldn't even
be zeroing the rest of the buffer.

Move the zeroing out into copy_from_user() and rename
__copy_user_zeroing() to raw_copy_from_user() since it no longer does
any zeroing. This also conveniently matches the name needed for
RAW_COPY_USER support in a later patch.

Fixes: 373cd784 ("metag: Memory handling")
Reported-by: default avatarAl Viro <viro@zeniv.linux.org.uk>
Signed-off-by: default avatarJames Hogan <james.hogan@imgtec.com>
Cc: linux-metag@vger.kernel.org
Cc: stable@vger.kernel.org
Signed-off-by: default avatarSasha Levin <alexander.levin@verizon.com>
parent f58b27f0
...@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count); ...@@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count);
#define strlen_user(str) strnlen_user(str, 32767) #define strlen_user(str) strnlen_user(str, 32767)
extern unsigned long __must_check __copy_user_zeroing(void *to, extern unsigned long raw_copy_from_user(void *to, const void __user *from,
const void __user *from, unsigned long n);
unsigned long n);
static inline unsigned long static inline unsigned long
copy_from_user(void *to, const void __user *from, unsigned long n) copy_from_user(void *to, const void __user *from, unsigned long n)
{ {
unsigned long res = n;
if (likely(access_ok(VERIFY_READ, from, n))) if (likely(access_ok(VERIFY_READ, from, n)))
return __copy_user_zeroing(to, from, n); res = raw_copy_from_user(to, from, n);
memset(to, 0, n); if (unlikely(res))
return n; memset(to + (n - res), 0, res);
return res;
} }
#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) #define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n)
#define __copy_from_user_inatomic __copy_from_user #define __copy_from_user_inatomic __copy_from_user
extern unsigned long __must_check __copy_user(void __user *to, extern unsigned long __must_check __copy_user(void __user *to,
......
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
COPY \ COPY \
"1:\n" \ "1:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" MOV D1Ar1,#0\n" \
FIXUP \ FIXUP \
" MOVT D1Ar1,#HI(1b)\n" \ " MOVT D1Ar1,#HI(1b)\n" \
" JUMP D1Ar1,#LO(1b)\n" \ " JUMP D1Ar1,#LO(1b)\n" \
...@@ -637,16 +636,14 @@ EXPORT_SYMBOL(__copy_user); ...@@ -637,16 +636,14 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_user_cont(to, from, ret, \ __asm_copy_user_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \ " GETB D1Ar1,[%1++]\n" \
"2: SETB [%0++],D1Ar1\n", \ "2: SETB [%0++],D1Ar1\n", \
"3: ADD %2,%2,#1\n" \ "3: ADD %2,%2,#1\n", \
" SETB [%0++],D1Ar1\n", \
" .long 2b,3b\n") " .long 2b,3b\n")
#define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \ __asm_copy_user_cont(to, from, ret, \
" GETW D1Ar1,[%1++]\n" \ " GETW D1Ar1,[%1++]\n" \
"2: SETW [%0++],D1Ar1\n" COPY, \ "2: SETW [%0++],D1Ar1\n" COPY, \
"3: ADD %2,%2,#2\n" \ "3: ADD %2,%2,#2\n" FIXUP, \
" SETW [%0++],D1Ar1\n" FIXUP, \
" .long 2b,3b\n" TENTRY) " .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_2(to, from, ret) \ #define __asm_copy_from_user_2(to, from, ret) \
...@@ -656,32 +653,26 @@ EXPORT_SYMBOL(__copy_user); ...@@ -656,32 +653,26 @@ EXPORT_SYMBOL(__copy_user);
__asm_copy_from_user_2x_cont(to, from, ret, \ __asm_copy_from_user_2x_cont(to, from, ret, \
" GETB D1Ar1,[%1++]\n" \ " GETB D1Ar1,[%1++]\n" \
"4: SETB [%0++],D1Ar1\n", \ "4: SETB [%0++],D1Ar1\n", \
"5: ADD %2,%2,#1\n" \ "5: ADD %2,%2,#1\n", \
" SETB [%0++],D1Ar1\n", \
" .long 4b,5b\n") " .long 4b,5b\n")
#define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \
__asm_copy_user_cont(to, from, ret, \ __asm_copy_user_cont(to, from, ret, \
" GETD D1Ar1,[%1++]\n" \ " GETD D1Ar1,[%1++]\n" \
"2: SETD [%0++],D1Ar1\n" COPY, \ "2: SETD [%0++],D1Ar1\n" COPY, \
"3: ADD %2,%2,#4\n" \ "3: ADD %2,%2,#4\n" FIXUP, \
" SETD [%0++],D1Ar1\n" FIXUP, \
" .long 2b,3b\n" TENTRY) " .long 2b,3b\n" TENTRY)
#define __asm_copy_from_user_4(to, from, ret) \ #define __asm_copy_from_user_4(to, from, ret) \
__asm_copy_from_user_4x_cont(to, from, ret, "", "", "") __asm_copy_from_user_4x_cont(to, from, ret, "", "", "")
#define __asm_copy_from_user_8x64(to, from, ret) \ #define __asm_copy_from_user_8x64(to, from, ret) \
asm volatile ( \ asm volatile ( \
" GETL D0Ar2,D1Ar1,[%1++]\n" \ " GETL D0Ar2,D1Ar1,[%1++]\n" \
"2: SETL [%0++],D0Ar2,D1Ar1\n" \ "2: SETL [%0++],D0Ar2,D1Ar1\n" \
"1:\n" \ "1:\n" \
" .section .fixup,\"ax\"\n" \ " .section .fixup,\"ax\"\n" \
" MOV D1Ar1,#0\n" \
" MOV D0Ar2,#0\n" \
"3: ADD %2,%2,#8\n" \ "3: ADD %2,%2,#8\n" \
" SETL [%0++],D0Ar2,D1Ar1\n" \
" MOVT D0Ar2,#HI(1b)\n" \ " MOVT D0Ar2,#HI(1b)\n" \
" JUMP D0Ar2,#LO(1b)\n" \ " JUMP D0Ar2,#LO(1b)\n" \
" .previous\n" \ " .previous\n" \
...@@ -721,11 +712,12 @@ EXPORT_SYMBOL(__copy_user); ...@@ -721,11 +712,12 @@ EXPORT_SYMBOL(__copy_user);
"SUB %1, %1, #4\n") "SUB %1, %1, #4\n")
/* Copy from user to kernel, zeroing the bytes that were inaccessible in /*
userland. The return-value is the number of bytes that were * Copy from user to kernel. The return-value is the number of bytes that were
inaccessible. */ * inaccessible.
unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, */
unsigned long n) unsigned long raw_copy_from_user(void *pdst, const void __user *psrc,
unsigned long n)
{ {
register char *dst asm ("A0.2") = pdst; register char *dst asm ("A0.2") = pdst;
register const char __user *src asm ("A1.2") = psrc; register const char __user *src asm ("A1.2") = psrc;
...@@ -738,7 +730,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -738,7 +730,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_1(dst, src, retn); __asm_copy_from_user_1(dst, src, retn);
n--; n--;
if (retn) if (retn)
goto copy_exception_bytes; return retn + n;
} }
if ((unsigned long) dst & 1) { if ((unsigned long) dst & 1) {
/* Worst case - byte copy */ /* Worst case - byte copy */
...@@ -746,14 +738,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -746,14 +738,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_1(dst, src, retn); __asm_copy_from_user_1(dst, src, retn);
n--; n--;
if (retn) if (retn)
goto copy_exception_bytes; return retn + n;
} }
} }
if (((unsigned long) src & 2) && n >= 2) { if (((unsigned long) src & 2) && n >= 2) {
__asm_copy_from_user_2(dst, src, retn); __asm_copy_from_user_2(dst, src, retn);
n -= 2; n -= 2;
if (retn) if (retn)
goto copy_exception_bytes; return retn + n;
} }
if ((unsigned long) dst & 2) { if ((unsigned long) dst & 2) {
/* Second worst case - word copy */ /* Second worst case - word copy */
...@@ -761,7 +753,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -761,7 +753,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_2(dst, src, retn); __asm_copy_from_user_2(dst, src, retn);
n -= 2; n -= 2;
if (retn) if (retn)
goto copy_exception_bytes; return retn + n;
} }
} }
...@@ -777,7 +769,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -777,7 +769,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn); __asm_copy_from_user_8x64(dst, src, retn);
n -= 8; n -= 8;
if (retn) if (retn)
goto copy_exception_bytes; return retn + n;
} }
} }
...@@ -793,7 +785,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -793,7 +785,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
__asm_copy_from_user_8x64(dst, src, retn); __asm_copy_from_user_8x64(dst, src, retn);
n -= 8; n -= 8;
if (retn) if (retn)
goto copy_exception_bytes; return retn + n;
} }
} }
#endif #endif
...@@ -803,7 +795,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -803,7 +795,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
n -= 4; n -= 4;
if (retn) if (retn)
goto copy_exception_bytes; return retn + n;
} }
/* If we get here, there were no memory read faults. */ /* If we get here, there were no memory read faults. */
...@@ -829,21 +821,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, ...@@ -829,21 +821,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc,
/* If we get here, retn correctly reflects the number of failing /* If we get here, retn correctly reflects the number of failing
bytes. */ bytes. */
return retn; return retn;
copy_exception_bytes:
/* We already have "retn" bytes cleared, and need to clear the
remaining "n" bytes. A non-optimized simple byte-for-byte in-line
memset is preferred here, since this isn't speed-critical code and
we'd rather have this a leaf-function than calling memset. */
{
char *endp;
for (endp = dst + n; dst < endp; dst++)
*dst = 0;
}
return retn + n;
} }
EXPORT_SYMBOL(__copy_user_zeroing); EXPORT_SYMBOL(raw_copy_from_user);
#define __asm_clear_8x64(to, ret) \ #define __asm_clear_8x64(to, ret) \
asm volatile ( \ asm volatile ( \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment