2 * Blackfin architecture-dependent process handling
4 * Copyright 2004-2009 Analog Devices Inc.
6 * Licensed under the GPL-2 or later
9 #include <linux/module.h>
10 #include <linux/unistd.h>
11 #include <linux/user.h>
12 #include <linux/uaccess.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/tick.h>
17 #include <linux/err.h>
19 #include <asm/blackfin.h>
20 #include <asm/fixed_code.h>
21 #include <asm/mem_map.h>
24 asmlinkage void ret_from_fork(void);
26 /* Points to the SDRAM backup memory for the stack that is currently in
27 * L1 scratchpad memory.
29 void *current_l1_stack_save;
31 /* The number of tasks currently using a L1 stack area. The SRAM is
32 * allocated/deallocated whenever this changes from/to zero.
36 /* Start and length of the area in L1 scratchpad memory which we've allocated
40 unsigned long l1_stack_len;
43 * Powermanagement idle function, if any..
45 void (*pm_idle)(void) = NULL;
46 EXPORT_SYMBOL(pm_idle);
48 void (*pm_power_off)(void) = NULL;
49 EXPORT_SYMBOL(pm_power_off);
52 * The idle loop on BFIN
55 static void default_idle(void)__attribute__((l1_text));
56 void cpu_idle(void)__attribute__((l1_text));
60 * This is our default idle handler. We need to disable
61 * interrupts here to ensure we don't miss a wakeup call.
63 static void default_idle(void)
66 ipipe_suspend_domain();
68 hard_local_irq_disable();
70 idle_with_irq_disabled();
72 hard_local_irq_enable();
76 * The idle thread. We try to conserve power, while trying to keep
77 * overall latency low. The architecture specific idle is passed
78 * a value to indicate the level of "idleness" of the system.
82 /* endless idle loop with no priority at all */
84 void (*idle)(void) = pm_idle;
86 #ifdef CONFIG_HOTPLUG_CPU
87 if (cpu_is_offline(smp_processor_id()))
92 tick_nohz_idle_enter();
94 while (!need_resched())
97 tick_nohz_idle_exit();
98 preempt_enable_no_resched();
105 * Do necessary setup to start up a newly executed thread.
107 * pass the data segment into user programs if it exists,
108 * it can't hurt anything as far as I can tell
110 void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
114 regs->p5 = current->mm->start_data;
116 task_thread_info(current)->l1_task_info.stack_start =
117 (void *)current->mm->context.stack_start;
118 task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
119 memcpy(L1_SCRATCH_TASK_INFO, &task_thread_info(current)->l1_task_info,
120 sizeof(*L1_SCRATCH_TASK_INFO));
124 EXPORT_SYMBOL_GPL(start_thread);
126 void flush_thread(void)
130 asmlinkage int bfin_clone(unsigned long clone_flags, unsigned long newsp)
132 #ifdef __ARCH_SYNC_CORE_DCACHE
133 if (current->nr_cpus_allowed == num_possible_cpus())
134 set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
138 return do_fork(clone_flags, newsp, 0, NULL, NULL);
142 copy_thread(unsigned long clone_flags,
143 unsigned long usp, unsigned long topstk,
144 struct task_struct *p)
146 struct pt_regs *childregs;
149 childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
150 v = ((unsigned long *)childregs) - 2;
151 if (unlikely(p->flags & PF_KTHREAD)) {
152 memset(childregs, 0, sizeof(struct pt_regs));
155 childregs->orig_p0 = -1;
156 childregs->ipend = 0x8000;
157 __asm__ __volatile__("%0 = syscfg;":"=da"(childregs->syscfg):);
160 *childregs = *current_pt_regs();
162 p->thread.usp = usp ? : rdusp();
166 p->thread.ksp = (unsigned long)v;
167 p->thread.pc = (unsigned long)ret_from_fork;
172 unsigned long get_wchan(struct task_struct *p)
174 unsigned long fp, pc;
175 unsigned long stack_page;
177 if (!p || p == current || p->state == TASK_RUNNING)
180 stack_page = (unsigned long)p;
183 if (fp < stack_page + sizeof(struct thread_info) ||
184 fp >= 8184 + stack_page)
186 pc = ((unsigned long *)fp)[1];
187 if (!in_sched_functions(pc))
189 fp = *(unsigned long *)fp;
191 while (count++ < 16);
195 void finish_atomic_sections (struct pt_regs *regs)
197 int __user *up0 = (int __user *)regs->p0;
201 /* not in middle of an atomic step, so resume like normal */
204 case ATOMIC_XCHG32 + 2:
205 put_user(regs->r1, up0);
208 case ATOMIC_CAS32 + 2:
209 case ATOMIC_CAS32 + 4:
210 if (regs->r0 == regs->r1)
211 case ATOMIC_CAS32 + 6:
212 put_user(regs->r2, up0);
215 case ATOMIC_ADD32 + 2:
216 regs->r0 = regs->r1 + regs->r0;
218 case ATOMIC_ADD32 + 4:
219 put_user(regs->r0, up0);
222 case ATOMIC_SUB32 + 2:
223 regs->r0 = regs->r1 - regs->r0;
225 case ATOMIC_SUB32 + 4:
226 put_user(regs->r0, up0);
229 case ATOMIC_IOR32 + 2:
230 regs->r0 = regs->r1 | regs->r0;
232 case ATOMIC_IOR32 + 4:
233 put_user(regs->r0, up0);
236 case ATOMIC_AND32 + 2:
237 regs->r0 = regs->r1 & regs->r0;
239 case ATOMIC_AND32 + 4:
240 put_user(regs->r0, up0);
243 case ATOMIC_XOR32 + 2:
244 regs->r0 = regs->r1 ^ regs->r0;
246 case ATOMIC_XOR32 + 4:
247 put_user(regs->r0, up0);
252 * We've finished the atomic section, and the only thing left for
253 * userspace is to do a RTS, so we might as well handle that too
254 * since we need to update the PC anyways.
256 regs->pc = regs->rets;
260 int in_mem(unsigned long addr, unsigned long size,
261 unsigned long start, unsigned long end)
263 return addr >= start && addr + size <= end;
266 int in_mem_const_off(unsigned long addr, unsigned long size, unsigned long off,
267 unsigned long const_addr, unsigned long const_size)
270 in_mem(addr, size, const_addr + off, const_addr + const_size);
273 int in_mem_const(unsigned long addr, unsigned long size,
274 unsigned long const_addr, unsigned long const_size)
276 return in_mem_const_off(addr, size, 0, const_addr, const_size);
279 #define ASYNC_ENABLED(bnum, bctlnum) 1
281 #define ASYNC_ENABLED(bnum, bctlnum) \
283 (bfin_read_EBIU_AMGCTL() & 0xe) < ((bnum + 1) << 1) ? 0 : \
284 bfin_read_EBIU_AMBCTL##bctlnum() & B##bnum##RDYEN ? 0 : \
289 * We can't read EBIU banks that aren't enabled or we end up hanging
290 * on the access to the async space. Make sure we validate accesses
291 * that cross async banks too.
292 * 0 - found, but unusable
297 int in_async(unsigned long addr, unsigned long size)
299 if (addr >= ASYNC_BANK0_BASE && addr < ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE) {
300 if (!ASYNC_ENABLED(0, 0))
302 if (addr + size <= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE)
304 size -= ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE - addr;
305 addr = ASYNC_BANK0_BASE + ASYNC_BANK0_SIZE;
307 if (addr >= ASYNC_BANK1_BASE && addr < ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE) {
308 if (!ASYNC_ENABLED(1, 0))
310 if (addr + size <= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE)
312 size -= ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE - addr;
313 addr = ASYNC_BANK1_BASE + ASYNC_BANK1_SIZE;
315 if (addr >= ASYNC_BANK2_BASE && addr < ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE) {
316 if (!ASYNC_ENABLED(2, 1))
318 if (addr + size <= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE)
320 size -= ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE - addr;
321 addr = ASYNC_BANK2_BASE + ASYNC_BANK2_SIZE;
323 if (addr >= ASYNC_BANK3_BASE && addr < ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE) {
324 if (ASYNC_ENABLED(3, 1))
326 if (addr + size <= ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE)
331 /* not within async bounds */
335 int bfin_mem_access_type(unsigned long addr, unsigned long size)
337 int cpu = raw_smp_processor_id();
339 /* Check that things do not wrap around */
340 if (addr > ULONG_MAX - size)
343 if (in_mem(addr, size, FIXED_CODE_START, physical_mem_end))
344 return BFIN_MEM_ACCESS_CORE;
346 if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
347 return cpu == 0 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
348 if (in_mem_const(addr, size, L1_SCRATCH_START, L1_SCRATCH_LENGTH))
349 return cpu == 0 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
350 if (in_mem_const(addr, size, L1_DATA_A_START, L1_DATA_A_LENGTH))
351 return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
352 if (in_mem_const(addr, size, L1_DATA_B_START, L1_DATA_B_LENGTH))
353 return cpu == 0 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
354 #ifdef COREB_L1_CODE_START
355 if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
356 return cpu == 1 ? BFIN_MEM_ACCESS_ITEST : BFIN_MEM_ACCESS_IDMA;
357 if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
358 return cpu == 1 ? BFIN_MEM_ACCESS_CORE_ONLY : -EFAULT;
359 if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
360 return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
361 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
362 return cpu == 1 ? BFIN_MEM_ACCESS_CORE : BFIN_MEM_ACCESS_IDMA;
364 if (in_mem_const(addr, size, L2_START, L2_LENGTH))
365 return BFIN_MEM_ACCESS_CORE;
367 if (addr >= SYSMMR_BASE)
368 return BFIN_MEM_ACCESS_CORE_ONLY;
370 switch (in_async(addr, size)) {
371 case 0: return -EFAULT;
372 case 1: return BFIN_MEM_ACCESS_CORE;
373 case 2: /* fall through */;
376 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
377 return BFIN_MEM_ACCESS_CORE;
378 if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
379 return BFIN_MEM_ACCESS_DMA;
384 #if defined(CONFIG_ACCESS_CHECK)
385 #ifdef CONFIG_ACCESS_OK_L1
386 __attribute__((l1_text))
388 /* Return 1 if access to memory range is OK, 0 otherwise */
389 int _access_ok(unsigned long addr, unsigned long size)
395 /* Check that things do not wrap around */
396 if (addr > ULONG_MAX - size)
398 if (segment_eq(get_fs(), KERNEL_DS))
400 #ifdef CONFIG_MTD_UCLINUX
406 if (in_mem(addr, size, memory_start, memory_end))
408 if (in_mem(addr, size, memory_mtd_end, physical_mem_end))
410 # ifndef CONFIG_ROMFS_ON_MTD
413 /* For XIP, allow user space to use pointers within the ROMFS. */
414 if (in_mem(addr, size, memory_mtd_start, memory_mtd_end))
417 if (in_mem(addr, size, memory_start, physical_mem_end))
421 if (in_mem(addr, size, (unsigned long)__init_begin, (unsigned long)__init_end))
424 if (in_mem_const(addr, size, L1_CODE_START, L1_CODE_LENGTH))
426 if (in_mem_const_off(addr, size, _etext_l1 - _stext_l1, L1_CODE_START, L1_CODE_LENGTH))
428 if (in_mem_const_off(addr, size, _ebss_l1 - _sdata_l1, L1_DATA_A_START, L1_DATA_A_LENGTH))
430 if (in_mem_const_off(addr, size, _ebss_b_l1 - _sdata_b_l1, L1_DATA_B_START, L1_DATA_B_LENGTH))
432 #ifdef COREB_L1_CODE_START
433 if (in_mem_const(addr, size, COREB_L1_CODE_START, COREB_L1_CODE_LENGTH))
435 if (in_mem_const(addr, size, COREB_L1_SCRATCH_START, L1_SCRATCH_LENGTH))
437 if (in_mem_const(addr, size, COREB_L1_DATA_A_START, COREB_L1_DATA_A_LENGTH))
439 if (in_mem_const(addr, size, COREB_L1_DATA_B_START, COREB_L1_DATA_B_LENGTH))
443 #ifndef CONFIG_EXCEPTION_L1_SCRATCH
444 if (in_mem_const(addr, size, (unsigned long)l1_stack_base, l1_stack_len))
448 aret = in_async(addr, size);
452 if (in_mem_const_off(addr, size, _ebss_l2 - _stext_l2, L2_START, L2_LENGTH))
455 if (in_mem_const(addr, size, BOOT_ROM_START, BOOT_ROM_LENGTH))
457 if (in_mem_const(addr, size, L1_ROM_START, L1_ROM_LENGTH))
462 EXPORT_SYMBOL(_access_ok);
463 #endif /* CONFIG_ACCESS_CHECK */