]> Pileus Git - ~andy/linux/blob - arch/sparc/kernel/process_64.c
sparc: Use generic idle loop
[~andy/linux] / arch / sparc / kernel / process_64.c
1 /*  arch/sparc64/kernel/process.c
2  *
3  *  Copyright (C) 1995, 1996, 2008 David S. Miller (davem@davemloft.net)
4  *  Copyright (C) 1996       Eddie C. Dost   (ecd@skynet.be)
5  *  Copyright (C) 1997, 1998 Jakub Jelinek   (jj@sunsite.mff.cuni.cz)
6  */
7
8 /*
9  * This file handles the architecture-dependent parts of process handling..
10  */
11
12 #include <stdarg.h>
13
14 #include <linux/errno.h>
15 #include <linux/export.h>
16 #include <linux/sched.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/fs.h>
20 #include <linux/smp.h>
21 #include <linux/stddef.h>
22 #include <linux/ptrace.h>
23 #include <linux/slab.h>
24 #include <linux/user.h>
25 #include <linux/delay.h>
26 #include <linux/compat.h>
27 #include <linux/tick.h>
28 #include <linux/init.h>
29 #include <linux/cpu.h>
30 #include <linux/perf_event.h>
31 #include <linux/elfcore.h>
32 #include <linux/sysrq.h>
33 #include <linux/nmi.h>
34
35 #include <asm/uaccess.h>
36 #include <asm/page.h>
37 #include <asm/pgalloc.h>
38 #include <asm/pgtable.h>
39 #include <asm/processor.h>
40 #include <asm/pstate.h>
41 #include <asm/elf.h>
42 #include <asm/fpumacro.h>
43 #include <asm/head.h>
44 #include <asm/cpudata.h>
45 #include <asm/mmu_context.h>
46 #include <asm/unistd.h>
47 #include <asm/hypervisor.h>
48 #include <asm/syscalls.h>
49 #include <asm/irq_regs.h>
50 #include <asm/smp.h>
51 #include <asm/pcr.h>
52
53 #include "kstack.h"
54
55 /* Idle loop support on sparc64. */
56 void arch_cpu_idle(void)
57 {
58         if (tlb_type != hypervisor) {
59                 touch_nmi_watchdog();
60         } else {
61                 unsigned long pstate;
62
63                 /* The sun4v sleeping code requires that we have PSTATE.IE cleared over
64                  * the cpu sleep hypervisor call.
65                  */
66                 __asm__ __volatile__(
67                         "rdpr %%pstate, %0\n\t"
68                         "andn %0, %1, %0\n\t"
69                         "wrpr %0, %%g0, %%pstate"
70                         : "=&r" (pstate)
71                         : "i" (PSTATE_IE));
72
73                 if (!need_resched() && !cpu_is_offline(smp_processor_id()))
74                         sun4v_cpu_yield();
75
76                 /* Re-enable interrupts. */
77                 __asm__ __volatile__(
78                         "rdpr %%pstate, %0\n\t"
79                         "or %0, %1, %0\n\t"
80                         "wrpr %0, %%g0, %%pstate"
81                         : "=&r" (pstate)
82                         : "i" (PSTATE_IE));
83         }
84         local_irq_enable();
85 }
86
87 #ifdef CONFIG_HOTPLUG_CPU
88 void arch_cpu_idle_dead()
89 {
90         sched_preempt_enable_no_resched();
91         cpu_play_dead();
92 }
93 #endif
94
95 #ifdef CONFIG_COMPAT
96 static void show_regwindow32(struct pt_regs *regs)
97 {
98         struct reg_window32 __user *rw;
99         struct reg_window32 r_w;
100         mm_segment_t old_fs;
101         
102         __asm__ __volatile__ ("flushw");
103         rw = compat_ptr((unsigned)regs->u_regs[14]);
104         old_fs = get_fs();
105         set_fs (USER_DS);
106         if (copy_from_user (&r_w, rw, sizeof(r_w))) {
107                 set_fs (old_fs);
108                 return;
109         }
110
111         set_fs (old_fs);                        
112         printk("l0: %08x l1: %08x l2: %08x l3: %08x "
113                "l4: %08x l5: %08x l6: %08x l7: %08x\n",
114                r_w.locals[0], r_w.locals[1], r_w.locals[2], r_w.locals[3],
115                r_w.locals[4], r_w.locals[5], r_w.locals[6], r_w.locals[7]);
116         printk("i0: %08x i1: %08x i2: %08x i3: %08x "
117                "i4: %08x i5: %08x i6: %08x i7: %08x\n",
118                r_w.ins[0], r_w.ins[1], r_w.ins[2], r_w.ins[3],
119                r_w.ins[4], r_w.ins[5], r_w.ins[6], r_w.ins[7]);
120 }
121 #else
122 #define show_regwindow32(regs)  do { } while (0)
123 #endif
124
125 static void show_regwindow(struct pt_regs *regs)
126 {
127         struct reg_window __user *rw;
128         struct reg_window *rwk;
129         struct reg_window r_w;
130         mm_segment_t old_fs;
131
132         if ((regs->tstate & TSTATE_PRIV) || !(test_thread_flag(TIF_32BIT))) {
133                 __asm__ __volatile__ ("flushw");
134                 rw = (struct reg_window __user *)
135                         (regs->u_regs[14] + STACK_BIAS);
136                 rwk = (struct reg_window *)
137                         (regs->u_regs[14] + STACK_BIAS);
138                 if (!(regs->tstate & TSTATE_PRIV)) {
139                         old_fs = get_fs();
140                         set_fs (USER_DS);
141                         if (copy_from_user (&r_w, rw, sizeof(r_w))) {
142                                 set_fs (old_fs);
143                                 return;
144                         }
145                         rwk = &r_w;
146                         set_fs (old_fs);                        
147                 }
148         } else {
149                 show_regwindow32(regs);
150                 return;
151         }
152         printk("l0: %016lx l1: %016lx l2: %016lx l3: %016lx\n",
153                rwk->locals[0], rwk->locals[1], rwk->locals[2], rwk->locals[3]);
154         printk("l4: %016lx l5: %016lx l6: %016lx l7: %016lx\n",
155                rwk->locals[4], rwk->locals[5], rwk->locals[6], rwk->locals[7]);
156         printk("i0: %016lx i1: %016lx i2: %016lx i3: %016lx\n",
157                rwk->ins[0], rwk->ins[1], rwk->ins[2], rwk->ins[3]);
158         printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
159                rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
160         if (regs->tstate & TSTATE_PRIV)
161                 printk("I7: <%pS>\n", (void *) rwk->ins[7]);
162 }
163
164 void show_regs(struct pt_regs *regs)
165 {
166         printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x    %s\n", regs->tstate,
167                regs->tpc, regs->tnpc, regs->y, print_tainted());
168         printk("TPC: <%pS>\n", (void *) regs->tpc);
169         printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
170                regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
171                regs->u_regs[3]);
172         printk("g4: %016lx g5: %016lx g6: %016lx g7: %016lx\n",
173                regs->u_regs[4], regs->u_regs[5], regs->u_regs[6],
174                regs->u_regs[7]);
175         printk("o0: %016lx o1: %016lx o2: %016lx o3: %016lx\n",
176                regs->u_regs[8], regs->u_regs[9], regs->u_regs[10],
177                regs->u_regs[11]);
178         printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
179                regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
180                regs->u_regs[15]);
181         printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
182         show_regwindow(regs);
183         show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
184 }
185
186 union global_cpu_snapshot global_cpu_snapshot[NR_CPUS];
187 static DEFINE_SPINLOCK(global_cpu_snapshot_lock);
188
189 static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs,
190                               int this_cpu)
191 {
192         struct global_reg_snapshot *rp;
193
194         flushw_all();
195
196         rp = &global_cpu_snapshot[this_cpu].reg;
197
198         rp->tstate = regs->tstate;
199         rp->tpc = regs->tpc;
200         rp->tnpc = regs->tnpc;
201         rp->o7 = regs->u_regs[UREG_I7];
202
203         if (regs->tstate & TSTATE_PRIV) {
204                 struct reg_window *rw;
205
206                 rw = (struct reg_window *)
207                         (regs->u_regs[UREG_FP] + STACK_BIAS);
208                 if (kstack_valid(tp, (unsigned long) rw)) {
209                         rp->i7 = rw->ins[7];
210                         rw = (struct reg_window *)
211                                 (rw->ins[6] + STACK_BIAS);
212                         if (kstack_valid(tp, (unsigned long) rw))
213                                 rp->rpc = rw->ins[7];
214                 }
215         } else {
216                 rp->i7 = 0;
217                 rp->rpc = 0;
218         }
219         rp->thread = tp;
220 }
221
222 /* In order to avoid hangs we do not try to synchronize with the
223  * global register dump client cpus.  The last store they make is to
224  * the thread pointer, so do a short poll waiting for that to become
225  * non-NULL.
226  */
227 static void __global_reg_poll(struct global_reg_snapshot *gp)
228 {
229         int limit = 0;
230
231         while (!gp->thread && ++limit < 100) {
232                 barrier();
233                 udelay(1);
234         }
235 }
236
237 void arch_trigger_all_cpu_backtrace(void)
238 {
239         struct thread_info *tp = current_thread_info();
240         struct pt_regs *regs = get_irq_regs();
241         unsigned long flags;
242         int this_cpu, cpu;
243
244         if (!regs)
245                 regs = tp->kregs;
246
247         spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
248
249         memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
250
251         this_cpu = raw_smp_processor_id();
252
253         __global_reg_self(tp, regs, this_cpu);
254
255         smp_fetch_global_regs();
256
257         for_each_online_cpu(cpu) {
258                 struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg;
259
260                 __global_reg_poll(gp);
261
262                 tp = gp->thread;
263                 printk("%c CPU[%3d]: TSTATE[%016lx] TPC[%016lx] TNPC[%016lx] TASK[%s:%d]\n",
264                        (cpu == this_cpu ? '*' : ' '), cpu,
265                        gp->tstate, gp->tpc, gp->tnpc,
266                        ((tp && tp->task) ? tp->task->comm : "NULL"),
267                        ((tp && tp->task) ? tp->task->pid : -1));
268
269                 if (gp->tstate & TSTATE_PRIV) {
270                         printk("             TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
271                                (void *) gp->tpc,
272                                (void *) gp->o7,
273                                (void *) gp->i7,
274                                (void *) gp->rpc);
275                 } else {
276                         printk("             TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
277                                gp->tpc, gp->o7, gp->i7, gp->rpc);
278                 }
279         }
280
281         memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
282
283         spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
284 }
285
286 #ifdef CONFIG_MAGIC_SYSRQ
287
288 static void sysrq_handle_globreg(int key)
289 {
290         arch_trigger_all_cpu_backtrace();
291 }
292
293 static struct sysrq_key_op sparc_globalreg_op = {
294         .handler        = sysrq_handle_globreg,
295         .help_msg       = "global-regs(Y)",
296         .action_msg     = "Show Global CPU Regs",
297 };
298
299 static void __global_pmu_self(int this_cpu)
300 {
301         struct global_pmu_snapshot *pp;
302         int i, num;
303
304         pp = &global_cpu_snapshot[this_cpu].pmu;
305
306         num = 1;
307         if (tlb_type == hypervisor &&
308             sun4v_chip_type >= SUN4V_CHIP_NIAGARA4)
309                 num = 4;
310
311         for (i = 0; i < num; i++) {
312                 pp->pcr[i] = pcr_ops->read_pcr(i);
313                 pp->pic[i] = pcr_ops->read_pic(i);
314         }
315 }
316
317 static void __global_pmu_poll(struct global_pmu_snapshot *pp)
318 {
319         int limit = 0;
320
321         while (!pp->pcr[0] && ++limit < 100) {
322                 barrier();
323                 udelay(1);
324         }
325 }
326
327 static void pmu_snapshot_all_cpus(void)
328 {
329         unsigned long flags;
330         int this_cpu, cpu;
331
332         spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
333
334         memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
335
336         this_cpu = raw_smp_processor_id();
337
338         __global_pmu_self(this_cpu);
339
340         smp_fetch_global_pmu();
341
342         for_each_online_cpu(cpu) {
343                 struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu;
344
345                 __global_pmu_poll(pp);
346
347                 printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n",
348                        (cpu == this_cpu ? '*' : ' '), cpu,
349                        pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
350                        pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
351         }
352
353         memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
354
355         spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags);
356 }
357
358 static void sysrq_handle_globpmu(int key)
359 {
360         pmu_snapshot_all_cpus();
361 }
362
363 static struct sysrq_key_op sparc_globalpmu_op = {
364         .handler        = sysrq_handle_globpmu,
365         .help_msg       = "global-pmu(X)",
366         .action_msg     = "Show Global PMU Regs",
367 };
368
369 static int __init sparc_sysrq_init(void)
370 {
371         int ret = register_sysrq_key('y', &sparc_globalreg_op);
372
373         if (!ret)
374                 ret = register_sysrq_key('x', &sparc_globalpmu_op);
375         return ret;
376 }
377
378 core_initcall(sparc_sysrq_init);
379
380 #endif
381
382 unsigned long thread_saved_pc(struct task_struct *tsk)
383 {
384         struct thread_info *ti = task_thread_info(tsk);
385         unsigned long ret = 0xdeadbeefUL;
386         
387         if (ti && ti->ksp) {
388                 unsigned long *sp;
389                 sp = (unsigned long *)(ti->ksp + STACK_BIAS);
390                 if (((unsigned long)sp & (sizeof(long) - 1)) == 0UL &&
391                     sp[14]) {
392                         unsigned long *fp;
393                         fp = (unsigned long *)(sp[14] + STACK_BIAS);
394                         if (((unsigned long)fp & (sizeof(long) - 1)) == 0UL)
395                                 ret = fp[15];
396                 }
397         }
398         return ret;
399 }
400
401 /* Free current thread data structures etc.. */
402 void exit_thread(void)
403 {
404         struct thread_info *t = current_thread_info();
405
406         if (t->utraps) {
407                 if (t->utraps[0] < 2)
408                         kfree (t->utraps);
409                 else
410                         t->utraps[0]--;
411         }
412 }
413
414 void flush_thread(void)
415 {
416         struct thread_info *t = current_thread_info();
417         struct mm_struct *mm;
418
419         mm = t->task->mm;
420         if (mm)
421                 tsb_context_switch(mm);
422
423         set_thread_wsaved(0);
424
425         /* Clear FPU register state. */
426         t->fpsaved[0] = 0;
427 }
428
429 /* It's a bit more tricky when 64-bit tasks are involved... */
430 static unsigned long clone_stackframe(unsigned long csp, unsigned long psp)
431 {
432         bool stack_64bit = test_thread_64bit_stack(psp);
433         unsigned long fp, distance, rval;
434
435         if (stack_64bit) {
436                 csp += STACK_BIAS;
437                 psp += STACK_BIAS;
438                 __get_user(fp, &(((struct reg_window __user *)psp)->ins[6]));
439                 fp += STACK_BIAS;
440                 if (test_thread_flag(TIF_32BIT))
441                         fp &= 0xffffffff;
442         } else
443                 __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6]));
444
445         /* Now align the stack as this is mandatory in the Sparc ABI
446          * due to how register windows work.  This hides the
447          * restriction from thread libraries etc.
448          */
449         csp &= ~15UL;
450
451         distance = fp - psp;
452         rval = (csp - distance);
453         if (copy_in_user((void __user *) rval, (void __user *) psp, distance))
454                 rval = 0;
455         else if (!stack_64bit) {
456                 if (put_user(((u32)csp),
457                              &(((struct reg_window32 __user *)rval)->ins[6])))
458                         rval = 0;
459         } else {
460                 if (put_user(((u64)csp - STACK_BIAS),
461                              &(((struct reg_window __user *)rval)->ins[6])))
462                         rval = 0;
463                 else
464                         rval = rval - STACK_BIAS;
465         }
466
467         return rval;
468 }
469
470 /* Standard stuff. */
471 static inline void shift_window_buffer(int first_win, int last_win,
472                                        struct thread_info *t)
473 {
474         int i;
475
476         for (i = first_win; i < last_win; i++) {
477                 t->rwbuf_stkptrs[i] = t->rwbuf_stkptrs[i+1];
478                 memcpy(&t->reg_window[i], &t->reg_window[i+1],
479                        sizeof(struct reg_window));
480         }
481 }
482
483 void synchronize_user_stack(void)
484 {
485         struct thread_info *t = current_thread_info();
486         unsigned long window;
487
488         flush_user_windows();
489         if ((window = get_thread_wsaved()) != 0) {
490                 window -= 1;
491                 do {
492                         struct reg_window *rwin = &t->reg_window[window];
493                         int winsize = sizeof(struct reg_window);
494                         unsigned long sp;
495
496                         sp = t->rwbuf_stkptrs[window];
497
498                         if (test_thread_64bit_stack(sp))
499                                 sp += STACK_BIAS;
500                         else
501                                 winsize = sizeof(struct reg_window32);
502
503                         if (!copy_to_user((char __user *)sp, rwin, winsize)) {
504                                 shift_window_buffer(window, get_thread_wsaved() - 1, t);
505                                 set_thread_wsaved(get_thread_wsaved() - 1);
506                         }
507                 } while (window--);
508         }
509 }
510
511 static void stack_unaligned(unsigned long sp)
512 {
513         siginfo_t info;
514
515         info.si_signo = SIGBUS;
516         info.si_errno = 0;
517         info.si_code = BUS_ADRALN;
518         info.si_addr = (void __user *) sp;
519         info.si_trapno = 0;
520         force_sig_info(SIGBUS, &info, current);
521 }
522
523 void fault_in_user_windows(void)
524 {
525         struct thread_info *t = current_thread_info();
526         unsigned long window;
527
528         flush_user_windows();
529         window = get_thread_wsaved();
530
531         if (likely(window != 0)) {
532                 window -= 1;
533                 do {
534                         struct reg_window *rwin = &t->reg_window[window];
535                         int winsize = sizeof(struct reg_window);
536                         unsigned long sp;
537
538                         sp = t->rwbuf_stkptrs[window];
539
540                         if (test_thread_64bit_stack(sp))
541                                 sp += STACK_BIAS;
542                         else
543                                 winsize = sizeof(struct reg_window32);
544
545                         if (unlikely(sp & 0x7UL))
546                                 stack_unaligned(sp);
547
548                         if (unlikely(copy_to_user((char __user *)sp,
549                                                   rwin, winsize)))
550                                 goto barf;
551                 } while (window--);
552         }
553         set_thread_wsaved(0);
554         return;
555
556 barf:
557         set_thread_wsaved(window + 1);
558         do_exit(SIGILL);
559 }
560
561 asmlinkage long sparc_do_fork(unsigned long clone_flags,
562                               unsigned long stack_start,
563                               struct pt_regs *regs,
564                               unsigned long stack_size)
565 {
566         int __user *parent_tid_ptr, *child_tid_ptr;
567         unsigned long orig_i1 = regs->u_regs[UREG_I1];
568         long ret;
569
570 #ifdef CONFIG_COMPAT
571         if (test_thread_flag(TIF_32BIT)) {
572                 parent_tid_ptr = compat_ptr(regs->u_regs[UREG_I2]);
573                 child_tid_ptr = compat_ptr(regs->u_regs[UREG_I4]);
574         } else
575 #endif
576         {
577                 parent_tid_ptr = (int __user *) regs->u_regs[UREG_I2];
578                 child_tid_ptr = (int __user *) regs->u_regs[UREG_I4];
579         }
580
581         ret = do_fork(clone_flags, stack_start, stack_size,
582                       parent_tid_ptr, child_tid_ptr);
583
584         /* If we get an error and potentially restart the system
585          * call, we're screwed because copy_thread() clobbered
586          * the parent's %o1.  So detect that case and restore it
587          * here.
588          */
589         if ((unsigned long)ret >= -ERESTART_RESTARTBLOCK)
590                 regs->u_regs[UREG_I1] = orig_i1;
591
592         return ret;
593 }
594
595 /* Copy a Sparc thread.  The fork() return value conventions
596  * under SunOS are nothing short of bletcherous:
597  * Parent -->  %o0 == childs  pid, %o1 == 0
598  * Child  -->  %o0 == parents pid, %o1 == 1
599  */
600 int copy_thread(unsigned long clone_flags, unsigned long sp,
601                 unsigned long arg, struct task_struct *p)
602 {
603         struct thread_info *t = task_thread_info(p);
604         struct pt_regs *regs = current_pt_regs();
605         struct sparc_stackf *parent_sf;
606         unsigned long child_stack_sz;
607         char *child_trap_frame;
608
609         /* Calculate offset to stack_frame & pt_regs */
610         child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ);
611         child_trap_frame = (task_stack_page(p) +
612                             (THREAD_SIZE - child_stack_sz));
613
614         t->new_child = 1;
615         t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS;
616         t->kregs = (struct pt_regs *) (child_trap_frame +
617                                        sizeof(struct sparc_stackf));
618         t->fpsaved[0] = 0;
619
620         if (unlikely(p->flags & PF_KTHREAD)) {
621                 memset(child_trap_frame, 0, child_stack_sz);
622                 __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 
623                         (current_pt_regs()->tstate + 1) & TSTATE_CWP;
624                 t->current_ds = ASI_P;
625                 t->kregs->u_regs[UREG_G1] = sp; /* function */
626                 t->kregs->u_regs[UREG_G2] = arg;
627                 return 0;
628         }
629
630         parent_sf = ((struct sparc_stackf *) regs) - 1;
631         memcpy(child_trap_frame, parent_sf, child_stack_sz);
632         if (t->flags & _TIF_32BIT) {
633                 sp &= 0x00000000ffffffffUL;
634                 regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL;
635         }
636         t->kregs->u_regs[UREG_FP] = sp;
637         __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = 
638                 (regs->tstate + 1) & TSTATE_CWP;
639         t->current_ds = ASI_AIUS;
640         if (sp != regs->u_regs[UREG_FP]) {
641                 unsigned long csp;
642
643                 csp = clone_stackframe(sp, regs->u_regs[UREG_FP]);
644                 if (!csp)
645                         return -EFAULT;
646                 t->kregs->u_regs[UREG_FP] = csp;
647         }
648         if (t->utraps)
649                 t->utraps[0]++;
650
651         /* Set the return value for the child. */
652         t->kregs->u_regs[UREG_I0] = current->pid;
653         t->kregs->u_regs[UREG_I1] = 1;
654
655         /* Set the second return value for the parent. */
656         regs->u_regs[UREG_I1] = 0;
657
658         if (clone_flags & CLONE_SETTLS)
659                 t->kregs->u_regs[UREG_G7] = regs->u_regs[UREG_I3];
660
661         return 0;
662 }
663
664 typedef struct {
665         union {
666                 unsigned int    pr_regs[32];
667                 unsigned long   pr_dregs[16];
668         } pr_fr;
669         unsigned int __unused;
670         unsigned int    pr_fsr;
671         unsigned char   pr_qcnt;
672         unsigned char   pr_q_entrysize;
673         unsigned char   pr_en;
674         unsigned int    pr_q[64];
675 } elf_fpregset_t32;
676
677 /*
678  * fill in the fpu structure for a core dump.
679  */
680 int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs)
681 {
682         unsigned long *kfpregs = current_thread_info()->fpregs;
683         unsigned long fprs = current_thread_info()->fpsaved[0];
684
685         if (test_thread_flag(TIF_32BIT)) {
686                 elf_fpregset_t32 *fpregs32 = (elf_fpregset_t32 *)fpregs;
687
688                 if (fprs & FPRS_DL)
689                         memcpy(&fpregs32->pr_fr.pr_regs[0], kfpregs,
690                                sizeof(unsigned int) * 32);
691                 else
692                         memset(&fpregs32->pr_fr.pr_regs[0], 0,
693                                sizeof(unsigned int) * 32);
694                 fpregs32->pr_qcnt = 0;
695                 fpregs32->pr_q_entrysize = 8;
696                 memset(&fpregs32->pr_q[0], 0,
697                        (sizeof(unsigned int) * 64));
698                 if (fprs & FPRS_FEF) {
699                         fpregs32->pr_fsr = (unsigned int) current_thread_info()->xfsr[0];
700                         fpregs32->pr_en = 1;
701                 } else {
702                         fpregs32->pr_fsr = 0;
703                         fpregs32->pr_en = 0;
704                 }
705         } else {
706                 if(fprs & FPRS_DL)
707                         memcpy(&fpregs->pr_regs[0], kfpregs,
708                                sizeof(unsigned int) * 32);
709                 else
710                         memset(&fpregs->pr_regs[0], 0,
711                                sizeof(unsigned int) * 32);
712                 if(fprs & FPRS_DU)
713                         memcpy(&fpregs->pr_regs[16], kfpregs+16,
714                                sizeof(unsigned int) * 32);
715                 else
716                         memset(&fpregs->pr_regs[16], 0,
717                                sizeof(unsigned int) * 32);
718                 if(fprs & FPRS_FEF) {
719                         fpregs->pr_fsr = current_thread_info()->xfsr[0];
720                         fpregs->pr_gsr = current_thread_info()->gsr[0];
721                 } else {
722                         fpregs->pr_fsr = fpregs->pr_gsr = 0;
723                 }
724                 fpregs->pr_fprs = fprs;
725         }
726         return 1;
727 }
728 EXPORT_SYMBOL(dump_fpu);
729
730 unsigned long get_wchan(struct task_struct *task)
731 {
732         unsigned long pc, fp, bias = 0;
733         struct thread_info *tp;
734         struct reg_window *rw;
735         unsigned long ret = 0;
736         int count = 0; 
737
738         if (!task || task == current ||
739             task->state == TASK_RUNNING)
740                 goto out;
741
742         tp = task_thread_info(task);
743         bias = STACK_BIAS;
744         fp = task_thread_info(task)->ksp + bias;
745
746         do {
747                 if (!kstack_valid(tp, fp))
748                         break;
749                 rw = (struct reg_window *) fp;
750                 pc = rw->ins[7];
751                 if (!in_sched_functions(pc)) {
752                         ret = pc;
753                         goto out;
754                 }
755                 fp = rw->ins[6] + bias;
756         } while (++count < 16);
757
758 out:
759         return ret;
760 }