]> Pileus Git - ~andy/linux/blobdiff - include/asm-sh/system_32.h
sh: update r7780rp interrupt code
[~andy/linux] / include / asm-sh / system_32.h
index ad37e8d5f31e1605b6f14da2d6bf1cb94bfb0230..7ff08d956ba8590ce7ffa4800dc173fd2a6e4cc4 100644 (file)
@@ -9,78 +9,80 @@ struct task_struct *__switch_to(struct task_struct *prev,
 /*
  *     switch_to() should switch tasks to task nr n, first
  */
-#define switch_to(prev, next, last)                            \
-do {                                                           \
-       register u32 *__ts1 __asm__ ("r1") = &prev->thread.sp;  \
-       register u32 *__ts2 __asm__ ("r2") = &prev->thread.pc;  \
-       register u32 *__ts4 __asm__ ("r4") = (u32 *)prev;       \
-       register u32 *__ts5 __asm__ ("r5") = (u32 *)next;       \
-       register u32 *__ts6 __asm__ ("r6") = &next->thread.sp;  \
-       register u32 __ts7 __asm__ ("r7") = next->thread.pc;    \
-       struct task_struct *__last;                             \
-                                                               \
-       __asm__ __volatile__ (                                  \
-               ".balign 4\n\t"                                 \
-               "stc.l  gbr, @-r15\n\t"                         \
-               "sts.l  pr, @-r15\n\t"                          \
-               "mov.l  r8, @-r15\n\t"                          \
-               "mov.l  r9, @-r15\n\t"                          \
-               "mov.l  r10, @-r15\n\t"                         \
-               "mov.l  r11, @-r15\n\t"                         \
-               "mov.l  r12, @-r15\n\t"                         \
-               "mov.l  r13, @-r15\n\t"                         \
-               "mov.l  r14, @-r15\n\t"                         \
-               "mov.l  r15, @r1\t! save SP\n\t"                \
-               "mov.l  @r6, r15\t! change to new stack\n\t"    \
-               "mova   1f, %0\n\t"                             \
-               "mov.l  %0, @r2\t! save PC\n\t"                 \
-               "mov.l  2f, %0\n\t"                             \
-               "jmp    @%0\t! call __switch_to\n\t"            \
-               " lds   r7, pr\t!  with return to new PC\n\t"   \
-               ".balign        4\n"                            \
-               "2:\n\t"                                        \
-               ".long  __switch_to\n"                          \
-               "1:\n\t"                                        \
-               "mov.l  @r15+, r14\n\t"                         \
-               "mov.l  @r15+, r13\n\t"                         \
-               "mov.l  @r15+, r12\n\t"                         \
-               "mov.l  @r15+, r11\n\t"                         \
-               "mov.l  @r15+, r10\n\t"                         \
-               "mov.l  @r15+, r9\n\t"                          \
-               "mov.l  @r15+, r8\n\t"                          \
-               "lds.l  @r15+, pr\n\t"                          \
-               "ldc.l  @r15+, gbr\n\t"                         \
-               : "=z" (__last)                                 \
-               : "r" (__ts1), "r" (__ts2), "r" (__ts4),        \
-                 "r" (__ts5), "r" (__ts6), "r" (__ts7)         \
-               : "r3", "t");                                   \
-                                                               \
-       last = __last;                                          \
+#define switch_to(prev, next, last)                                    \
+do {                                                                   \
+       register u32 *__ts1 __asm__ ("r1") = (u32 *)&prev->thread.sp;   \
+       register u32 *__ts2 __asm__ ("r2") = (u32 *)&prev->thread.pc;   \
+       register u32 *__ts4 __asm__ ("r4") = (u32 *)prev;               \
+       register u32 *__ts5 __asm__ ("r5") = (u32 *)next;               \
+       register u32 *__ts6 __asm__ ("r6") = (u32 *)&next->thread.sp;   \
+       register u32 __ts7 __asm__ ("r7") = next->thread.pc;            \
+       struct task_struct *__last;                                     \
+                                                                       \
+       __asm__ __volatile__ (                                          \
+               ".balign 4\n\t"                                         \
+               "stc.l  gbr, @-r15\n\t"                                 \
+               "sts.l  pr, @-r15\n\t"                                  \
+               "mov.l  r8, @-r15\n\t"                                  \
+               "mov.l  r9, @-r15\n\t"                                  \
+               "mov.l  r10, @-r15\n\t"                                 \
+               "mov.l  r11, @-r15\n\t"                                 \
+               "mov.l  r12, @-r15\n\t"                                 \
+               "mov.l  r13, @-r15\n\t"                                 \
+               "mov.l  r14, @-r15\n\t"                                 \
+               "mov.l  r15, @r1\t! save SP\n\t"                        \
+               "mov.l  @r6, r15\t! change to new stack\n\t"            \
+               "mova   1f, %0\n\t"                                     \
+               "mov.l  %0, @r2\t! save PC\n\t"                         \
+               "mov.l  2f, %0\n\t"                                     \
+               "jmp    @%0\t! call __switch_to\n\t"                    \
+               " lds   r7, pr\t!  with return to new PC\n\t"           \
+               ".balign        4\n"                                    \
+               "2:\n\t"                                                \
+               ".long  __switch_to\n"                                  \
+               "1:\n\t"                                                \
+               "mov.l  @r15+, r14\n\t"                                 \
+               "mov.l  @r15+, r13\n\t"                                 \
+               "mov.l  @r15+, r12\n\t"                                 \
+               "mov.l  @r15+, r11\n\t"                                 \
+               "mov.l  @r15+, r10\n\t"                                 \
+               "mov.l  @r15+, r9\n\t"                                  \
+               "mov.l  @r15+, r8\n\t"                                  \
+               "lds.l  @r15+, pr\n\t"                                  \
+               "ldc.l  @r15+, gbr\n\t"                                 \
+               : "=z" (__last)                                         \
+               : "r" (__ts1), "r" (__ts2), "r" (__ts4),                \
+                 "r" (__ts5), "r" (__ts6), "r" (__ts7)                 \
+               : "r3", "t");                                           \
+                                                                       \
+       last = __last;                                                  \
 } while (0)
 
+#define __uses_jump_to_uncached __attribute__ ((__section__ (".uncached.text")))
+
 /*
- * Jump to P2 area.
- * When handling TLB or caches, we need to do it from P2 area.
+ * Jump to uncached area.
+ * When handling TLB or caches, we need to do it from an uncached area.
  */
-#define jump_to_P2()                   \
-do {                                   \
-       unsigned long __dummy;          \
-       __asm__ __volatile__(           \
-               "mov.l  1f, %0\n\t"     \
-               "or     %1, %0\n\t"     \
-               "jmp    @%0\n\t"        \
-               " nop\n\t"              \
-               ".balign 4\n"           \
-               "1:     .long 2f\n"     \
-               "2:"                    \
-               : "=&r" (__dummy)       \
-               : "r" (0x20000000));    \
+#define jump_to_uncached()                     \
+do {                                           \
+       unsigned long __dummy;                  \
+                                               \
+       __asm__ __volatile__(                   \
+               "mova   1f, %0\n\t"             \
+               "add    %1, %0\n\t"             \
+               "jmp    @%0\n\t"                \
+               " nop\n\t"                      \
+               ".balign 4\n"                   \
+               "1:"                            \
+               : "=&z" (__dummy)               \
+               : "r" (cached_to_uncached));    \
 } while (0)
 
 /*
- * Back to P1 area.
+ * Back to cached area.
  */
-#define back_to_P1()                                   \
+#define back_to_cached()                               \
 do {                                                   \
        unsigned long __dummy;                          \
        ctrl_barrier();                                 \