2 * Versatile Express Core Tile Cortex A9x4 Support
4 #include <linux/init.h>
6 #include <linux/device.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/platform_device.h>
9 #include <linux/amba/bus.h>
10 #include <linux/amba/clcd.h>
11 #include <linux/clkdev.h>
13 #include <asm/hardware/arm_timer.h>
14 #include <asm/hardware/cache-l2x0.h>
15 #include <asm/hardware/gic.h>
17 #include <asm/smp_scu.h>
18 #include <asm/smp_twd.h>
20 #include <mach/ct-ca9x4.h>
22 #include <asm/hardware/timer-sp.h>
24 #include <asm/mach/map.h>
25 #include <asm/mach/time.h>
29 #include <mach/motherboard.h>
31 #include <plat/clcd.h>
33 static struct map_desc ct_ca9x4_io_desc[] __initdata = {
35 .virtual = V2T_PERIPH,
36 .pfn = __phys_to_pfn(CT_CA9X4_MPIC),
42 static void __init ct_ca9x4_map_io(void)
44 iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc));
47 #ifdef CONFIG_HAVE_ARM_TWD
48 static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER);
50 static void __init ca9x4_twd_init(void)
52 int err = twd_local_timer_register(&twd_local_timer);
54 pr_err("twd_local_timer_register failed %d\n", err);
57 #define ca9x4_twd_init() do {} while(0)
60 static void __init ct_ca9x4_init_irq(void)
62 gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K),
63 ioremap(A9_MPCORE_GIC_CPU, SZ_256));
67 static void ct_ca9x4_clcd_enable(struct clcd_fb *fb)
69 u32 site = v2m_get_master_site();
72 * Old firmware was using the "site" component of the command
73 * to control the DVI muxer (while it should be always 0 ie. MB).
74 * Newer firmware uses the data register. Keep both for compatibility.
76 v2m_cfg_write(SYS_CFG_MUXFPGA | SYS_CFG_SITE(site), site);
77 v2m_cfg_write(SYS_CFG_DVIMODE | SYS_CFG_SITE(SYS_CFG_SITE_MB), 2);
80 static int ct_ca9x4_clcd_setup(struct clcd_fb *fb)
82 unsigned long framesize = 1024 * 768 * 2;
84 fb->panel = versatile_clcd_get_panel("XVGA");
88 return versatile_clcd_setup_dma(fb, framesize);
91 static struct clcd_board ct_ca9x4_clcd_data = {
93 .caps = CLCD_CAP_5551 | CLCD_CAP_565,
94 .check = clcdfb_check,
95 .decode = clcdfb_decode,
96 .enable = ct_ca9x4_clcd_enable,
97 .setup = ct_ca9x4_clcd_setup,
98 .mmap = versatile_clcd_mmap_dma,
99 .remove = versatile_clcd_remove_dma,
102 static AMBA_AHB_DEVICE(clcd, "ct:clcd", 0, CT_CA9X4_CLCDC, IRQ_CT_CA9X4_CLCDC, &ct_ca9x4_clcd_data);
103 static AMBA_APB_DEVICE(dmc, "ct:dmc", 0, CT_CA9X4_DMC, IRQ_CT_CA9X4_DMC, NULL);
104 static AMBA_APB_DEVICE(smc, "ct:smc", 0, CT_CA9X4_SMC, IRQ_CT_CA9X4_SMC, NULL);
105 static AMBA_APB_DEVICE(gpio, "ct:gpio", 0, CT_CA9X4_GPIO, IRQ_CT_CA9X4_GPIO, NULL);
107 static struct amba_device *ct_ca9x4_amba_devs[] __initdata = {
115 static long ct_round(struct clk *clk, unsigned long rate)
120 static int ct_set(struct clk *clk, unsigned long rate)
122 u32 site = v2m_get_master_site();
124 return v2m_cfg_write(SYS_CFG_OSC | SYS_CFG_SITE(site) | 1, rate);
127 static const struct clk_ops osc1_clk_ops = {
132 static struct clk osc1_clk = {
133 .ops = &osc1_clk_ops,
137 static struct clk ct_sp804_clk = {
141 static struct clk_lookup lookups[] = {
145 }, { /* SP804 timers */
147 .con_id = "ct-timer0",
148 .clk = &ct_sp804_clk,
149 }, { /* SP804 timers */
151 .con_id = "ct-timer1",
152 .clk = &ct_sp804_clk,
156 static struct resource pmu_resources[] = {
158 .start = IRQ_CT_CA9X4_PMU_CPU0,
159 .end = IRQ_CT_CA9X4_PMU_CPU0,
160 .flags = IORESOURCE_IRQ,
163 .start = IRQ_CT_CA9X4_PMU_CPU1,
164 .end = IRQ_CT_CA9X4_PMU_CPU1,
165 .flags = IORESOURCE_IRQ,
168 .start = IRQ_CT_CA9X4_PMU_CPU2,
169 .end = IRQ_CT_CA9X4_PMU_CPU2,
170 .flags = IORESOURCE_IRQ,
173 .start = IRQ_CT_CA9X4_PMU_CPU3,
174 .end = IRQ_CT_CA9X4_PMU_CPU3,
175 .flags = IORESOURCE_IRQ,
179 static struct platform_device pmu_device = {
181 .id = ARM_PMU_DEVICE_CPU,
182 .num_resources = ARRAY_SIZE(pmu_resources),
183 .resource = pmu_resources,
186 static void __init ct_ca9x4_init_early(void)
188 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
191 static void __init ct_ca9x4_init(void)
195 #ifdef CONFIG_CACHE_L2X0
196 void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K);
198 /* set RAM latencies to 1 cycle for this core tile. */
199 writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL);
200 writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL);
202 l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff);
205 for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++)
206 amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource);
208 platform_device_register(&pmu_device);
212 static void *ct_ca9x4_scu_base __initdata;
214 static void __init ct_ca9x4_init_cpu_map(void)
218 ct_ca9x4_scu_base = ioremap(A9_MPCORE_SCU, SZ_128);
219 if (WARN_ON(!ct_ca9x4_scu_base))
222 ncores = scu_get_core_count(ct_ca9x4_scu_base);
224 if (ncores > nr_cpu_ids) {
225 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
230 for (i = 0; i < ncores; ++i)
231 set_cpu_possible(i, true);
233 set_smp_cross_call(gic_raise_softirq);
236 static void __init ct_ca9x4_smp_enable(unsigned int max_cpus)
238 scu_enable(ct_ca9x4_scu_base);
242 struct ct_desc ct_ca9x4_desc __initdata = {
245 .map_io = ct_ca9x4_map_io,
246 .init_early = ct_ca9x4_init_early,
247 .init_irq = ct_ca9x4_init_irq,
248 .init_tile = ct_ca9x4_init,
250 .init_cpu_map = ct_ca9x4_init_cpu_map,
251 .smp_enable = ct_ca9x4_smp_enable,