2 * Copyright (C) STMicroelectronics 2009
3 * Copyright (C) ST-Ericsson SA 2010
5 * License Terms: GNU General Public License v2
6 * Author: Kumar Sanghvi <kumar.sanghvi@stericsson.com>
7 * Author: Sundar Iyer <sundar.iyer@stericsson.com>
8 * Author: Mattias Nilsson <mattias.i.nilsson@stericsson.com>
10 * U8500 PRCM Unit interface driver
13 #include <linux/module.h>
14 #include <linux/kernel.h>
15 #include <linux/delay.h>
16 #include <linux/errno.h>
17 #include <linux/err.h>
18 #include <linux/spinlock.h>
20 #include <linux/slab.h>
21 #include <linux/mutex.h>
22 #include <linux/completion.h>
23 #include <linux/irq.h>
24 #include <linux/jiffies.h>
25 #include <linux/bitops.h>
27 #include <linux/platform_device.h>
28 #include <linux/uaccess.h>
29 #include <linux/mfd/core.h>
30 #include <linux/mfd/db8500-prcmu.h>
31 #include <linux/regulator/db8500-prcmu.h>
32 #include <linux/regulator/machine.h>
33 #include <mach/hardware.h>
34 #include <mach/irqs.h>
35 #include <mach/db8500-regs.h>
37 #include "db8500-prcmu-regs.h"
39 /* Offset for the firmware version within the TCPM */
40 #define PRCMU_FW_VERSION_OFFSET 0xA4
42 /* PRCMU project numbers, defined by PRCMU FW */
43 #define PRCMU_PROJECT_ID_8500V1_0 1
44 #define PRCMU_PROJECT_ID_8500V2_0 2
45 #define PRCMU_PROJECT_ID_8400V2_0 3
47 /* Index of different voltages to be used when accessing AVSData */
48 #define PRCM_AVS_BASE 0x2FC
49 #define PRCM_AVS_VBB_RET (PRCM_AVS_BASE + 0x0)
50 #define PRCM_AVS_VBB_MAX_OPP (PRCM_AVS_BASE + 0x1)
51 #define PRCM_AVS_VBB_100_OPP (PRCM_AVS_BASE + 0x2)
52 #define PRCM_AVS_VBB_50_OPP (PRCM_AVS_BASE + 0x3)
53 #define PRCM_AVS_VARM_MAX_OPP (PRCM_AVS_BASE + 0x4)
54 #define PRCM_AVS_VARM_100_OPP (PRCM_AVS_BASE + 0x5)
55 #define PRCM_AVS_VARM_50_OPP (PRCM_AVS_BASE + 0x6)
56 #define PRCM_AVS_VARM_RET (PRCM_AVS_BASE + 0x7)
57 #define PRCM_AVS_VAPE_100_OPP (PRCM_AVS_BASE + 0x8)
58 #define PRCM_AVS_VAPE_50_OPP (PRCM_AVS_BASE + 0x9)
59 #define PRCM_AVS_VMOD_100_OPP (PRCM_AVS_BASE + 0xA)
60 #define PRCM_AVS_VMOD_50_OPP (PRCM_AVS_BASE + 0xB)
61 #define PRCM_AVS_VSAFE (PRCM_AVS_BASE + 0xC)
63 #define PRCM_AVS_VOLTAGE 0
64 #define PRCM_AVS_VOLTAGE_MASK 0x3f
65 #define PRCM_AVS_ISSLOWSTARTUP 6
66 #define PRCM_AVS_ISSLOWSTARTUP_MASK (1 << PRCM_AVS_ISSLOWSTARTUP)
67 #define PRCM_AVS_ISMODEENABLE 7
68 #define PRCM_AVS_ISMODEENABLE_MASK (1 << PRCM_AVS_ISMODEENABLE)
70 #define PRCM_BOOT_STATUS 0xFFF
71 #define PRCM_ROMCODE_A2P 0xFFE
72 #define PRCM_ROMCODE_P2A 0xFFD
73 #define PRCM_XP70_CUR_PWR_STATE 0xFFC /* 4 BYTES */
75 #define PRCM_SW_RST_REASON 0xFF8 /* 2 bytes */
77 #define _PRCM_MBOX_HEADER 0xFE8 /* 16 bytes */
78 #define PRCM_MBOX_HEADER_REQ_MB0 (_PRCM_MBOX_HEADER + 0x0)
79 #define PRCM_MBOX_HEADER_REQ_MB1 (_PRCM_MBOX_HEADER + 0x1)
80 #define PRCM_MBOX_HEADER_REQ_MB2 (_PRCM_MBOX_HEADER + 0x2)
81 #define PRCM_MBOX_HEADER_REQ_MB3 (_PRCM_MBOX_HEADER + 0x3)
82 #define PRCM_MBOX_HEADER_REQ_MB4 (_PRCM_MBOX_HEADER + 0x4)
83 #define PRCM_MBOX_HEADER_REQ_MB5 (_PRCM_MBOX_HEADER + 0x5)
84 #define PRCM_MBOX_HEADER_ACK_MB0 (_PRCM_MBOX_HEADER + 0x8)
87 #define PRCM_REQ_MB0 0xFDC /* 12 bytes */
88 #define PRCM_REQ_MB1 0xFD0 /* 12 bytes */
89 #define PRCM_REQ_MB2 0xFC0 /* 16 bytes */
90 #define PRCM_REQ_MB3 0xE4C /* 372 bytes */
91 #define PRCM_REQ_MB4 0xE48 /* 4 bytes */
92 #define PRCM_REQ_MB5 0xE44 /* 4 bytes */
95 #define PRCM_ACK_MB0 0xE08 /* 52 bytes */
96 #define PRCM_ACK_MB1 0xE04 /* 4 bytes */
97 #define PRCM_ACK_MB2 0xE00 /* 4 bytes */
98 #define PRCM_ACK_MB3 0xDFC /* 4 bytes */
99 #define PRCM_ACK_MB4 0xDF8 /* 4 bytes */
100 #define PRCM_ACK_MB5 0xDF4 /* 4 bytes */
102 /* Mailbox 0 headers */
103 #define MB0H_POWER_STATE_TRANS 0
104 #define MB0H_CONFIG_WAKEUPS_EXE 1
105 #define MB0H_READ_WAKEUP_ACK 3
106 #define MB0H_CONFIG_WAKEUPS_SLEEP 4
108 #define MB0H_WAKEUP_EXE 2
109 #define MB0H_WAKEUP_SLEEP 5
112 #define PRCM_REQ_MB0_AP_POWER_STATE (PRCM_REQ_MB0 + 0x0)
113 #define PRCM_REQ_MB0_AP_PLL_STATE (PRCM_REQ_MB0 + 0x1)
114 #define PRCM_REQ_MB0_ULP_CLOCK_STATE (PRCM_REQ_MB0 + 0x2)
115 #define PRCM_REQ_MB0_DO_NOT_WFI (PRCM_REQ_MB0 + 0x3)
116 #define PRCM_REQ_MB0_WAKEUP_8500 (PRCM_REQ_MB0 + 0x4)
117 #define PRCM_REQ_MB0_WAKEUP_4500 (PRCM_REQ_MB0 + 0x8)
120 #define PRCM_ACK_MB0_AP_PWRSTTR_STATUS (PRCM_ACK_MB0 + 0x0)
121 #define PRCM_ACK_MB0_READ_POINTER (PRCM_ACK_MB0 + 0x1)
122 #define PRCM_ACK_MB0_WAKEUP_0_8500 (PRCM_ACK_MB0 + 0x4)
123 #define PRCM_ACK_MB0_WAKEUP_0_4500 (PRCM_ACK_MB0 + 0x8)
124 #define PRCM_ACK_MB0_WAKEUP_1_8500 (PRCM_ACK_MB0 + 0x1C)
125 #define PRCM_ACK_MB0_WAKEUP_1_4500 (PRCM_ACK_MB0 + 0x20)
126 #define PRCM_ACK_MB0_EVENT_4500_NUMBERS 20
128 /* Mailbox 1 headers */
129 #define MB1H_ARM_APE_OPP 0x0
130 #define MB1H_RESET_MODEM 0x2
131 #define MB1H_REQUEST_APE_OPP_100_VOLT 0x3
132 #define MB1H_RELEASE_APE_OPP_100_VOLT 0x4
133 #define MB1H_RELEASE_USB_WAKEUP 0x5
135 /* Mailbox 1 Requests */
136 #define PRCM_REQ_MB1_ARM_OPP (PRCM_REQ_MB1 + 0x0)
137 #define PRCM_REQ_MB1_APE_OPP (PRCM_REQ_MB1 + 0x1)
138 #define PRCM_REQ_MB1_APE_OPP_100_RESTORE (PRCM_REQ_MB1 + 0x4)
139 #define PRCM_REQ_MB1_ARM_OPP_100_RESTORE (PRCM_REQ_MB1 + 0x8)
142 #define PRCM_ACK_MB1_CURRENT_ARM_OPP (PRCM_ACK_MB1 + 0x0)
143 #define PRCM_ACK_MB1_CURRENT_APE_OPP (PRCM_ACK_MB1 + 0x1)
144 #define PRCM_ACK_MB1_APE_VOLTAGE_STATUS (PRCM_ACK_MB1 + 0x2)
145 #define PRCM_ACK_MB1_DVFS_STATUS (PRCM_ACK_MB1 + 0x3)
147 /* Mailbox 2 headers */
149 #define MB2H_AUTO_PWR 0x1
152 #define PRCM_REQ_MB2_SVA_MMDSP (PRCM_REQ_MB2 + 0x0)
153 #define PRCM_REQ_MB2_SVA_PIPE (PRCM_REQ_MB2 + 0x1)
154 #define PRCM_REQ_MB2_SIA_MMDSP (PRCM_REQ_MB2 + 0x2)
155 #define PRCM_REQ_MB2_SIA_PIPE (PRCM_REQ_MB2 + 0x3)
156 #define PRCM_REQ_MB2_SGA (PRCM_REQ_MB2 + 0x4)
157 #define PRCM_REQ_MB2_B2R2_MCDE (PRCM_REQ_MB2 + 0x5)
158 #define PRCM_REQ_MB2_ESRAM12 (PRCM_REQ_MB2 + 0x6)
159 #define PRCM_REQ_MB2_ESRAM34 (PRCM_REQ_MB2 + 0x7)
160 #define PRCM_REQ_MB2_AUTO_PM_SLEEP (PRCM_REQ_MB2 + 0x8)
161 #define PRCM_REQ_MB2_AUTO_PM_IDLE (PRCM_REQ_MB2 + 0xC)
164 #define PRCM_ACK_MB2_DPS_STATUS (PRCM_ACK_MB2 + 0x0)
165 #define HWACC_PWR_ST_OK 0xFE
167 /* Mailbox 3 headers */
169 #define MB3H_SIDETONE 0x1
170 #define MB3H_SYSCLK 0xE
172 /* Mailbox 3 Requests */
173 #define PRCM_REQ_MB3_ANC_FIR_COEFF (PRCM_REQ_MB3 + 0x0)
174 #define PRCM_REQ_MB3_ANC_IIR_COEFF (PRCM_REQ_MB3 + 0x20)
175 #define PRCM_REQ_MB3_ANC_SHIFTER (PRCM_REQ_MB3 + 0x60)
176 #define PRCM_REQ_MB3_ANC_WARP (PRCM_REQ_MB3 + 0x64)
177 #define PRCM_REQ_MB3_SIDETONE_FIR_GAIN (PRCM_REQ_MB3 + 0x68)
178 #define PRCM_REQ_MB3_SIDETONE_FIR_COEFF (PRCM_REQ_MB3 + 0x6C)
179 #define PRCM_REQ_MB3_SYSCLK_MGT (PRCM_REQ_MB3 + 0x16C)
181 /* Mailbox 4 headers */
182 #define MB4H_DDR_INIT 0x0
183 #define MB4H_MEM_ST 0x1
184 #define MB4H_HOTDOG 0x12
185 #define MB4H_HOTMON 0x13
186 #define MB4H_HOT_PERIOD 0x14
188 /* Mailbox 4 Requests */
189 #define PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE (PRCM_REQ_MB4 + 0x0)
190 #define PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE (PRCM_REQ_MB4 + 0x1)
191 #define PRCM_REQ_MB4_ESRAM0_ST (PRCM_REQ_MB4 + 0x3)
192 #define PRCM_REQ_MB4_HOTDOG_THRESHOLD (PRCM_REQ_MB4 + 0x0)
193 #define PRCM_REQ_MB4_HOTMON_LOW (PRCM_REQ_MB4 + 0x0)
194 #define PRCM_REQ_MB4_HOTMON_HIGH (PRCM_REQ_MB4 + 0x1)
195 #define PRCM_REQ_MB4_HOTMON_CONFIG (PRCM_REQ_MB4 + 0x2)
196 #define PRCM_REQ_MB4_HOT_PERIOD (PRCM_REQ_MB4 + 0x0)
197 #define HOTMON_CONFIG_LOW BIT(0)
198 #define HOTMON_CONFIG_HIGH BIT(1)
200 /* Mailbox 5 Requests */
201 #define PRCM_REQ_MB5_I2C_SLAVE_OP (PRCM_REQ_MB5 + 0x0)
202 #define PRCM_REQ_MB5_I2C_HW_BITS (PRCM_REQ_MB5 + 0x1)
203 #define PRCM_REQ_MB5_I2C_REG (PRCM_REQ_MB5 + 0x2)
204 #define PRCM_REQ_MB5_I2C_VAL (PRCM_REQ_MB5 + 0x3)
205 #define PRCMU_I2C_WRITE(slave) \
206 (((slave) << 1) | (cpu_is_u8500v2() ? BIT(6) : 0))
207 #define PRCMU_I2C_READ(slave) \
208 (((slave) << 1) | BIT(0) | (cpu_is_u8500v2() ? BIT(6) : 0))
209 #define PRCMU_I2C_STOP_EN BIT(3)
212 #define PRCM_ACK_MB5_I2C_STATUS (PRCM_ACK_MB5 + 0x1)
213 #define PRCM_ACK_MB5_I2C_VAL (PRCM_ACK_MB5 + 0x3)
214 #define I2C_WR_OK 0x1
215 #define I2C_RD_OK 0x2
219 #define ALL_MBOX_BITS (MBOX_BIT(NUM_MB) - 1)
225 #define WAKEUP_BIT_RTC BIT(0)
226 #define WAKEUP_BIT_RTT0 BIT(1)
227 #define WAKEUP_BIT_RTT1 BIT(2)
228 #define WAKEUP_BIT_HSI0 BIT(3)
229 #define WAKEUP_BIT_HSI1 BIT(4)
230 #define WAKEUP_BIT_CA_WAKE BIT(5)
231 #define WAKEUP_BIT_USB BIT(6)
232 #define WAKEUP_BIT_ABB BIT(7)
233 #define WAKEUP_BIT_ABB_FIFO BIT(8)
234 #define WAKEUP_BIT_SYSCLK_OK BIT(9)
235 #define WAKEUP_BIT_CA_SLEEP BIT(10)
236 #define WAKEUP_BIT_AC_WAKE_ACK BIT(11)
237 #define WAKEUP_BIT_SIDE_TONE_OK BIT(12)
238 #define WAKEUP_BIT_ANC_OK BIT(13)
239 #define WAKEUP_BIT_SW_ERROR BIT(14)
240 #define WAKEUP_BIT_AC_SLEEP_ACK BIT(15)
241 #define WAKEUP_BIT_ARM BIT(17)
242 #define WAKEUP_BIT_HOTMON_LOW BIT(18)
243 #define WAKEUP_BIT_HOTMON_HIGH BIT(19)
244 #define WAKEUP_BIT_MODEM_SW_RESET_REQ BIT(20)
245 #define WAKEUP_BIT_GPIO0 BIT(23)
246 #define WAKEUP_BIT_GPIO1 BIT(24)
247 #define WAKEUP_BIT_GPIO2 BIT(25)
248 #define WAKEUP_BIT_GPIO3 BIT(26)
249 #define WAKEUP_BIT_GPIO4 BIT(27)
250 #define WAKEUP_BIT_GPIO5 BIT(28)
251 #define WAKEUP_BIT_GPIO6 BIT(29)
252 #define WAKEUP_BIT_GPIO7 BIT(30)
253 #define WAKEUP_BIT_GPIO8 BIT(31)
256 * This vector maps irq numbers to the bits in the bit field used in
257 * communication with the PRCMU firmware.
259 * The reason for having this is to keep the irq numbers contiguous even though
260 * the bits in the bit field are not. (The bits also have a tendency to move
261 * around, to further complicate matters.)
263 #define IRQ_INDEX(_name) ((IRQ_PRCMU_##_name) - IRQ_PRCMU_BASE)
264 #define IRQ_ENTRY(_name)[IRQ_INDEX(_name)] = (WAKEUP_BIT_##_name)
265 static u32 prcmu_irq_bit[NUM_PRCMU_WAKEUPS] = {
277 IRQ_ENTRY(HOTMON_LOW),
278 IRQ_ENTRY(HOTMON_HIGH),
279 IRQ_ENTRY(MODEM_SW_RESET_REQ),
291 #define VALID_WAKEUPS (BIT(NUM_PRCMU_WAKEUP_INDICES) - 1)
292 #define WAKEUP_ENTRY(_name)[PRCMU_WAKEUP_INDEX_##_name] = (WAKEUP_BIT_##_name)
293 static u32 prcmu_wakeup_bit[NUM_PRCMU_WAKEUP_INDICES] = {
301 WAKEUP_ENTRY(ABB_FIFO),
306 * mb0_transfer - state needed for mailbox 0 communication.
307 * @lock: The transaction lock.
308 * @dbb_events_lock: A lock used to handle concurrent access to (parts of)
310 * @mask_work: Work structure used for (un)masking wakeup interrupts.
311 * @req: Request data that need to persist between requests.
315 spinlock_t dbb_irqs_lock;
316 struct work_struct mask_work;
317 struct mutex ac_wake_lock;
318 struct completion ac_wake_work;
327 * mb1_transfer - state needed for mailbox 1 communication.
328 * @lock: The transaction lock.
329 * @work: The transaction completion structure.
330 * @ack: Reply ("acknowledge") data.
334 struct completion work;
339 u8 ape_voltage_status;
344 * mb2_transfer - state needed for mailbox 2 communication.
345 * @lock: The transaction lock.
346 * @work: The transaction completion structure.
347 * @auto_pm_lock: The autonomous power management configuration lock.
348 * @auto_pm_enabled: A flag indicating whether autonomous PM is enabled.
349 * @req: Request data that need to persist between requests.
350 * @ack: Reply ("acknowledge") data.
354 struct completion work;
355 spinlock_t auto_pm_lock;
356 bool auto_pm_enabled;
363 * mb3_transfer - state needed for mailbox 3 communication.
364 * @lock: The request lock.
365 * @sysclk_lock: A lock used to handle concurrent sysclk requests.
366 * @sysclk_work: Work structure used for sysclk requests.
370 struct mutex sysclk_lock;
371 struct completion sysclk_work;
375 * mb4_transfer - state needed for mailbox 4 communication.
376 * @lock: The transaction lock.
377 * @work: The transaction completion structure.
381 struct completion work;
385 * mb5_transfer - state needed for mailbox 5 communication.
386 * @lock: The transaction lock.
387 * @work: The transaction completion structure.
388 * @ack: Reply ("acknowledge") data.
392 struct completion work;
399 static atomic_t ac_wake_req_state = ATOMIC_INIT(0);
402 static DEFINE_SPINLOCK(clkout_lock);
403 static DEFINE_SPINLOCK(gpiocr_lock);
405 /* Global var to runtime determine TCDM base for v2 or v1 */
406 static __iomem void *tcdm_base;
413 static DEFINE_SPINLOCK(clk_mgt_lock);
415 #define CLK_MGT_ENTRY(_name)[PRCMU_##_name] = { (PRCM_##_name##_MGT_OFF), 0 }
416 struct clk_mgt clk_mgt[PRCMU_NUM_REG_CLOCKS] = {
417 CLK_MGT_ENTRY(SGACLK),
418 CLK_MGT_ENTRY(UARTCLK),
419 CLK_MGT_ENTRY(MSP02CLK),
420 CLK_MGT_ENTRY(MSP1CLK),
421 CLK_MGT_ENTRY(I2CCLK),
422 CLK_MGT_ENTRY(SDMMCCLK),
423 CLK_MGT_ENTRY(SLIMCLK),
424 CLK_MGT_ENTRY(PER1CLK),
425 CLK_MGT_ENTRY(PER2CLK),
426 CLK_MGT_ENTRY(PER3CLK),
427 CLK_MGT_ENTRY(PER5CLK),
428 CLK_MGT_ENTRY(PER6CLK),
429 CLK_MGT_ENTRY(PER7CLK),
430 CLK_MGT_ENTRY(LCDCLK),
431 CLK_MGT_ENTRY(BMLCLK),
432 CLK_MGT_ENTRY(HSITXCLK),
433 CLK_MGT_ENTRY(HSIRXCLK),
434 CLK_MGT_ENTRY(HDMICLK),
435 CLK_MGT_ENTRY(APEATCLK),
436 CLK_MGT_ENTRY(APETRACECLK),
437 CLK_MGT_ENTRY(MCDECLK),
438 CLK_MGT_ENTRY(IPI2CCLK),
439 CLK_MGT_ENTRY(DSIALTCLK),
440 CLK_MGT_ENTRY(DMACLK),
441 CLK_MGT_ENTRY(B2R2CLK),
442 CLK_MGT_ENTRY(TVCLK),
443 CLK_MGT_ENTRY(SSPCLK),
444 CLK_MGT_ENTRY(RNGCLK),
445 CLK_MGT_ENTRY(UICCCLK),
449 * Used by MCDE to setup all necessary PRCMU registers
451 #define PRCMU_RESET_DSIPLL 0x00004000
452 #define PRCMU_UNCLAMP_DSIPLL 0x00400800
454 #define PRCMU_CLK_PLL_DIV_SHIFT 0
455 #define PRCMU_CLK_PLL_SW_SHIFT 5
456 #define PRCMU_CLK_38 (1 << 9)
457 #define PRCMU_CLK_38_SRC (1 << 10)
458 #define PRCMU_CLK_38_DIV (1 << 11)
460 /* PLLDIV=12, PLLSW=4 (PLLDDR) */
461 #define PRCMU_DSI_CLOCK_SETTING 0x0000008C
463 /* PLLDIV=8, PLLSW=4 (PLLDDR) */
464 #define PRCMU_DSI_CLOCK_SETTING_U8400 0x00000088
466 /* DPI 50000000 Hz */
467 #define PRCMU_DPI_CLOCK_SETTING ((1 << PRCMU_CLK_PLL_SW_SHIFT) | \
468 (16 << PRCMU_CLK_PLL_DIV_SHIFT))
469 #define PRCMU_DSI_LP_CLOCK_SETTING 0x00000E00
471 /* D=101, N=1, R=4, SELDIV2=0 */
472 #define PRCMU_PLLDSI_FREQ_SETTING 0x00040165
474 /* D=70, N=1, R=3, SELDIV2=0 */
475 #define PRCMU_PLLDSI_FREQ_SETTING_U8400 0x00030146
477 #define PRCMU_ENABLE_PLLDSI 0x00000001
478 #define PRCMU_DISABLE_PLLDSI 0x00000000
479 #define PRCMU_RELEASE_RESET_DSS 0x0000400C
480 #define PRCMU_DSI_PLLOUT_SEL_SETTING 0x00000202
481 /* ESC clk, div0=1, div1=1, div2=3 */
482 #define PRCMU_ENABLE_ESCAPE_CLOCK_DIV 0x07030101
483 #define PRCMU_DISABLE_ESCAPE_CLOCK_DIV 0x00030101
484 #define PRCMU_DSI_RESET_SW 0x00000007
486 #define PRCMU_PLLDSI_LOCKP_LOCKED 0x3
496 int prcmu_enable_dsipll(void)
499 unsigned int plldsifreq;
501 /* Clear DSIPLL_RESETN */
502 writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_CLR);
503 /* Unclamp DSIPLL in/out */
504 writel(PRCMU_UNCLAMP_DSIPLL, PRCM_MMIP_LS_CLAMP_CLR);
506 if (prcmu_is_u8400())
507 plldsifreq = PRCMU_PLLDSI_FREQ_SETTING_U8400;
509 plldsifreq = PRCMU_PLLDSI_FREQ_SETTING;
510 /* Set DSI PLL FREQ */
511 writel(plldsifreq, PRCM_PLLDSI_FREQ);
512 writel(PRCMU_DSI_PLLOUT_SEL_SETTING, PRCM_DSI_PLLOUT_SEL);
513 /* Enable Escape clocks */
514 writel(PRCMU_ENABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
517 writel(PRCMU_ENABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
519 writel(PRCMU_DSI_RESET_SW, PRCM_DSI_SW_RESET);
520 for (i = 0; i < 10; i++) {
521 if ((readl(PRCM_PLLDSI_LOCKP) & PRCMU_PLLDSI_LOCKP_LOCKED)
522 == PRCMU_PLLDSI_LOCKP_LOCKED)
526 /* Set DSIPLL_RESETN */
527 writel(PRCMU_RESET_DSIPLL, PRCM_APE_RESETN_SET);
531 int prcmu_disable_dsipll(void)
533 /* Disable dsi pll */
534 writel(PRCMU_DISABLE_PLLDSI, PRCM_PLLDSI_ENABLE);
535 /* Disable escapeclock */
536 writel(PRCMU_DISABLE_ESCAPE_CLOCK_DIV, PRCM_DSITVCLK_DIV);
540 int prcmu_set_display_clocks(void)
545 if (prcmu_is_u8400())
546 dsiclk = PRCMU_DSI_CLOCK_SETTING_U8400;
548 dsiclk = PRCMU_DSI_CLOCK_SETTING;
550 spin_lock_irqsave(&clk_mgt_lock, flags);
552 /* Grab the HW semaphore. */
553 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
556 writel(dsiclk, PRCM_HDMICLK_MGT);
557 writel(PRCMU_DSI_LP_CLOCK_SETTING, PRCM_TVCLK_MGT);
558 writel(PRCMU_DPI_CLOCK_SETTING, PRCM_LCDCLK_MGT);
560 /* Release the HW semaphore. */
563 spin_unlock_irqrestore(&clk_mgt_lock, flags);
569 * prcmu_enable_spi2 - Enables pin muxing for SPI2 on OtherAlternateC1.
571 void prcmu_enable_spi2(void)
576 spin_lock_irqsave(&gpiocr_lock, flags);
577 reg = readl(PRCM_GPIOCR);
578 writel(reg | PRCM_GPIOCR_SPI2_SELECT, PRCM_GPIOCR);
579 spin_unlock_irqrestore(&gpiocr_lock, flags);
583 * prcmu_disable_spi2 - Disables pin muxing for SPI2 on OtherAlternateC1.
585 void prcmu_disable_spi2(void)
590 spin_lock_irqsave(&gpiocr_lock, flags);
591 reg = readl(PRCM_GPIOCR);
592 writel(reg & ~PRCM_GPIOCR_SPI2_SELECT, PRCM_GPIOCR);
593 spin_unlock_irqrestore(&gpiocr_lock, flags);
596 bool prcmu_has_arm_maxopp(void)
598 return (readb(tcdm_base + PRCM_AVS_VARM_MAX_OPP) &
599 PRCM_AVS_ISMODEENABLE_MASK) == PRCM_AVS_ISMODEENABLE_MASK;
602 bool prcmu_is_u8400(void)
604 return prcmu_version.project_number == PRCMU_PROJECT_ID_8400V2_0;
608 * prcmu_get_boot_status - PRCMU boot status checking
609 * Returns: the current PRCMU boot status
611 int prcmu_get_boot_status(void)
613 return readb(tcdm_base + PRCM_BOOT_STATUS);
617 * prcmu_set_rc_a2p - This function is used to run few power state sequences
618 * @val: Value to be set, i.e. transition requested
619 * Returns: 0 on success, -EINVAL on invalid argument
621 * This function is used to run the following power state sequences -
622 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
624 int prcmu_set_rc_a2p(enum romcode_write val)
626 if (val < RDY_2_DS || val > RDY_2_XP70_RST)
628 writeb(val, (tcdm_base + PRCM_ROMCODE_A2P));
633 * prcmu_get_rc_p2a - This function is used to get power state sequences
634 * Returns: the power transition that has last happened
636 * This function can return the following transitions-
637 * any state to ApReset, ApDeepSleep to ApExecute, ApExecute to ApDeepSleep
639 enum romcode_read prcmu_get_rc_p2a(void)
641 return readb(tcdm_base + PRCM_ROMCODE_P2A);
645 * prcmu_get_current_mode - Return the current XP70 power mode
646 * Returns: Returns the current AP(ARM) power mode: init,
647 * apBoot, apExecute, apDeepSleep, apSleep, apIdle, apReset
649 enum ap_pwrst prcmu_get_xp70_current_state(void)
651 return readb(tcdm_base + PRCM_XP70_CUR_PWR_STATE);
655 * prcmu_config_clkout - Configure one of the programmable clock outputs.
656 * @clkout: The CLKOUT number (0 or 1).
657 * @source: The clock to be used (one of the PRCMU_CLKSRC_*).
658 * @div: The divider to be applied.
660 * Configures one of the programmable clock outputs (CLKOUTs).
661 * @div should be in the range [1,63] to request a configuration, or 0 to
662 * inform that the configuration is no longer requested.
664 int prcmu_config_clkout(u8 clkout, u8 source, u8 div)
666 static int requests[2];
676 BUG_ON((clkout == 0) && (source > PRCMU_CLKSRC_CLK009));
678 if (!div && !requests[clkout])
683 div_mask = PRCM_CLKOCR_CLKODIV0_MASK;
684 mask = (PRCM_CLKOCR_CLKODIV0_MASK | PRCM_CLKOCR_CLKOSEL0_MASK);
685 bits = ((source << PRCM_CLKOCR_CLKOSEL0_SHIFT) |
686 (div << PRCM_CLKOCR_CLKODIV0_SHIFT));
689 div_mask = PRCM_CLKOCR_CLKODIV1_MASK;
690 mask = (PRCM_CLKOCR_CLKODIV1_MASK | PRCM_CLKOCR_CLKOSEL1_MASK |
691 PRCM_CLKOCR_CLK1TYPE);
692 bits = ((source << PRCM_CLKOCR_CLKOSEL1_SHIFT) |
693 (div << PRCM_CLKOCR_CLKODIV1_SHIFT));
698 spin_lock_irqsave(&clkout_lock, flags);
700 val = readl(PRCM_CLKOCR);
701 if (val & div_mask) {
703 if ((val & mask) != bits) {
705 goto unlock_and_return;
708 if ((val & mask & ~div_mask) != bits) {
710 goto unlock_and_return;
714 writel((bits | (val & ~mask)), PRCM_CLKOCR);
715 requests[clkout] += (div ? 1 : -1);
718 spin_unlock_irqrestore(&clkout_lock, flags);
723 int prcmu_set_power_state(u8 state, bool keep_ulp_clk, bool keep_ap_pll)
727 BUG_ON((state < PRCMU_AP_SLEEP) || (PRCMU_AP_DEEP_IDLE < state));
729 spin_lock_irqsave(&mb0_transfer.lock, flags);
731 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
734 writeb(MB0H_POWER_STATE_TRANS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
735 writeb(state, (tcdm_base + PRCM_REQ_MB0_AP_POWER_STATE));
736 writeb((keep_ap_pll ? 1 : 0), (tcdm_base + PRCM_REQ_MB0_AP_PLL_STATE));
737 writeb((keep_ulp_clk ? 1 : 0),
738 (tcdm_base + PRCM_REQ_MB0_ULP_CLOCK_STATE));
739 writeb(0, (tcdm_base + PRCM_REQ_MB0_DO_NOT_WFI));
740 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
742 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
747 /* This function should only be called while mb0_transfer.lock is held. */
748 static void config_wakeups(void)
750 const u8 header[2] = {
751 MB0H_CONFIG_WAKEUPS_EXE,
752 MB0H_CONFIG_WAKEUPS_SLEEP
754 static u32 last_dbb_events;
755 static u32 last_abb_events;
760 dbb_events = mb0_transfer.req.dbb_irqs | mb0_transfer.req.dbb_wakeups;
761 dbb_events |= (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK);
763 abb_events = mb0_transfer.req.abb_events;
765 if ((dbb_events == last_dbb_events) && (abb_events == last_abb_events))
768 for (i = 0; i < 2; i++) {
769 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
771 writel(dbb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_8500));
772 writel(abb_events, (tcdm_base + PRCM_REQ_MB0_WAKEUP_4500));
773 writeb(header[i], (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
774 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
776 last_dbb_events = dbb_events;
777 last_abb_events = abb_events;
780 void prcmu_enable_wakeups(u32 wakeups)
786 BUG_ON(wakeups != (wakeups & VALID_WAKEUPS));
788 for (i = 0, bits = 0; i < NUM_PRCMU_WAKEUP_INDICES; i++) {
789 if (wakeups & BIT(i))
790 bits |= prcmu_wakeup_bit[i];
793 spin_lock_irqsave(&mb0_transfer.lock, flags);
795 mb0_transfer.req.dbb_wakeups = bits;
798 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
801 void prcmu_config_abb_event_readout(u32 abb_events)
805 spin_lock_irqsave(&mb0_transfer.lock, flags);
807 mb0_transfer.req.abb_events = abb_events;
810 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
813 void prcmu_get_abb_event_buffer(void __iomem **buf)
815 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
816 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_1_4500);
818 *buf = (tcdm_base + PRCM_ACK_MB0_WAKEUP_0_4500);
822 * prcmu_set_arm_opp - set the appropriate ARM OPP
823 * @opp: The new ARM operating point to which transition is to be made
824 * Returns: 0 on success, non-zero on failure
826 * This function sets the the operating point of the ARM.
828 int prcmu_set_arm_opp(u8 opp)
832 if (opp < ARM_NO_CHANGE || opp > ARM_EXTCLK)
837 mutex_lock(&mb1_transfer.lock);
839 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
842 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
843 writeb(opp, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
844 writeb(APE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
846 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
847 wait_for_completion(&mb1_transfer.work);
849 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
850 (mb1_transfer.ack.arm_opp != opp))
853 mutex_unlock(&mb1_transfer.lock);
859 * prcmu_get_arm_opp - get the current ARM OPP
861 * Returns: the current ARM OPP
863 int prcmu_get_arm_opp(void)
865 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_ARM_OPP);
869 * prcmu_get_ddr_opp - get the current DDR OPP
871 * Returns: the current DDR OPP
873 int prcmu_get_ddr_opp(void)
875 return readb(PRCM_DDR_SUBSYS_APE_MINBW);
879 * set_ddr_opp - set the appropriate DDR OPP
880 * @opp: The new DDR operating point to which transition is to be made
881 * Returns: 0 on success, non-zero on failure
883 * This function sets the operating point of the DDR.
885 int prcmu_set_ddr_opp(u8 opp)
887 if (opp < DDR_100_OPP || opp > DDR_25_OPP)
889 /* Changing the DDR OPP can hang the hardware pre-v21 */
890 if (cpu_is_u8500v20_or_later() && !cpu_is_u8500v20())
891 writeb(opp, PRCM_DDR_SUBSYS_APE_MINBW);
896 * set_ape_opp - set the appropriate APE OPP
897 * @opp: The new APE operating point to which transition is to be made
898 * Returns: 0 on success, non-zero on failure
900 * This function sets the operating point of the APE.
902 int prcmu_set_ape_opp(u8 opp)
906 mutex_lock(&mb1_transfer.lock);
908 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
911 writeb(MB1H_ARM_APE_OPP, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
912 writeb(ARM_NO_CHANGE, (tcdm_base + PRCM_REQ_MB1_ARM_OPP));
913 writeb(opp, (tcdm_base + PRCM_REQ_MB1_APE_OPP));
915 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
916 wait_for_completion(&mb1_transfer.work);
918 if ((mb1_transfer.ack.header != MB1H_ARM_APE_OPP) ||
919 (mb1_transfer.ack.ape_opp != opp))
922 mutex_unlock(&mb1_transfer.lock);
928 * prcmu_get_ape_opp - get the current APE OPP
930 * Returns: the current APE OPP
932 int prcmu_get_ape_opp(void)
934 return readb(tcdm_base + PRCM_ACK_MB1_CURRENT_APE_OPP);
938 * prcmu_request_ape_opp_100_voltage - Request APE OPP 100% voltage
939 * @enable: true to request the higher voltage, false to drop a request.
941 * Calls to this function to enable and disable requests must be balanced.
943 int prcmu_request_ape_opp_100_voltage(bool enable)
947 static unsigned int requests;
949 mutex_lock(&mb1_transfer.lock);
953 goto unlock_and_return;
954 header = MB1H_REQUEST_APE_OPP_100_VOLT;
958 goto unlock_and_return;
959 } else if (1 != requests--) {
960 goto unlock_and_return;
962 header = MB1H_RELEASE_APE_OPP_100_VOLT;
965 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
968 writeb(header, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
970 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
971 wait_for_completion(&mb1_transfer.work);
973 if ((mb1_transfer.ack.header != header) ||
974 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
978 mutex_unlock(&mb1_transfer.lock);
984 * prcmu_release_usb_wakeup_state - release the state required by a USB wakeup
986 * This function releases the power state requirements of a USB wakeup.
988 int prcmu_release_usb_wakeup_state(void)
992 mutex_lock(&mb1_transfer.lock);
994 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
997 writeb(MB1H_RELEASE_USB_WAKEUP,
998 (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1000 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
1001 wait_for_completion(&mb1_transfer.work);
1003 if ((mb1_transfer.ack.header != MB1H_RELEASE_USB_WAKEUP) ||
1004 ((mb1_transfer.ack.ape_voltage_status & BIT(0)) != 0))
1007 mutex_unlock(&mb1_transfer.lock);
1013 * prcmu_set_epod - set the state of a EPOD (power domain)
1014 * @epod_id: The EPOD to set
1015 * @epod_state: The new EPOD state
1017 * This function sets the state of a EPOD (power domain). It may not be called
1018 * from interrupt context.
1020 int prcmu_set_epod(u16 epod_id, u8 epod_state)
1023 bool ram_retention = false;
1026 /* check argument */
1027 BUG_ON(epod_id >= NUM_EPOD_ID);
1029 /* set flag if retention is possible */
1031 case EPOD_ID_SVAMMDSP:
1032 case EPOD_ID_SIAMMDSP:
1033 case EPOD_ID_ESRAM12:
1034 case EPOD_ID_ESRAM34:
1035 ram_retention = true;
1039 /* check argument */
1040 BUG_ON(epod_state > EPOD_STATE_ON);
1041 BUG_ON(epod_state == EPOD_STATE_RAMRET && !ram_retention);
1044 mutex_lock(&mb2_transfer.lock);
1046 /* wait for mailbox */
1047 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(2))
1050 /* fill in mailbox */
1051 for (i = 0; i < NUM_EPOD_ID; i++)
1052 writeb(EPOD_STATE_NO_CHANGE, (tcdm_base + PRCM_REQ_MB2 + i));
1053 writeb(epod_state, (tcdm_base + PRCM_REQ_MB2 + epod_id));
1055 writeb(MB2H_DPS, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB2));
1057 writel(MBOX_BIT(2), PRCM_MBOX_CPU_SET);
1060 * The current firmware version does not handle errors correctly,
1061 * and we cannot recover if there is an error.
1062 * This is expected to change when the firmware is updated.
1064 if (!wait_for_completion_timeout(&mb2_transfer.work,
1065 msecs_to_jiffies(20000))) {
1066 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1069 goto unlock_and_return;
1072 if (mb2_transfer.ack.status != HWACC_PWR_ST_OK)
1076 mutex_unlock(&mb2_transfer.lock);
1081 * prcmu_configure_auto_pm - Configure autonomous power management.
1082 * @sleep: Configuration for ApSleep.
1083 * @idle: Configuration for ApIdle.
1085 void prcmu_configure_auto_pm(struct prcmu_auto_pm_config *sleep,
1086 struct prcmu_auto_pm_config *idle)
1090 unsigned long flags;
1092 BUG_ON((sleep == NULL) || (idle == NULL));
1094 sleep_cfg = (sleep->sva_auto_pm_enable & 0xF);
1095 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_auto_pm_enable & 0xF));
1096 sleep_cfg = ((sleep_cfg << 8) | (sleep->sva_power_on & 0xFF));
1097 sleep_cfg = ((sleep_cfg << 8) | (sleep->sia_power_on & 0xFF));
1098 sleep_cfg = ((sleep_cfg << 4) | (sleep->sva_policy & 0xF));
1099 sleep_cfg = ((sleep_cfg << 4) | (sleep->sia_policy & 0xF));
1101 idle_cfg = (idle->sva_auto_pm_enable & 0xF);
1102 idle_cfg = ((idle_cfg << 4) | (idle->sia_auto_pm_enable & 0xF));
1103 idle_cfg = ((idle_cfg << 8) | (idle->sva_power_on & 0xFF));
1104 idle_cfg = ((idle_cfg << 8) | (idle->sia_power_on & 0xFF));
1105 idle_cfg = ((idle_cfg << 4) | (idle->sva_policy & 0xF));
1106 idle_cfg = ((idle_cfg << 4) | (idle->sia_policy & 0xF));
1108 spin_lock_irqsave(&mb2_transfer.auto_pm_lock, flags);
1111 * The autonomous power management configuration is done through
1112 * fields in mailbox 2, but these fields are only used as shared
1113 * variables - i.e. there is no need to send a message.
1115 writel(sleep_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_SLEEP));
1116 writel(idle_cfg, (tcdm_base + PRCM_REQ_MB2_AUTO_PM_IDLE));
1118 mb2_transfer.auto_pm_enabled =
1119 ((sleep->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1120 (sleep->sia_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1121 (idle->sva_auto_pm_enable == PRCMU_AUTO_PM_ON) ||
1122 (idle->sia_auto_pm_enable == PRCMU_AUTO_PM_ON));
1124 spin_unlock_irqrestore(&mb2_transfer.auto_pm_lock, flags);
1126 EXPORT_SYMBOL(prcmu_configure_auto_pm);
1128 bool prcmu_is_auto_pm_enabled(void)
1130 return mb2_transfer.auto_pm_enabled;
1133 static int request_sysclk(bool enable)
1136 unsigned long flags;
1140 mutex_lock(&mb3_transfer.sysclk_lock);
1142 spin_lock_irqsave(&mb3_transfer.lock, flags);
1144 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(3))
1147 writeb((enable ? ON : OFF), (tcdm_base + PRCM_REQ_MB3_SYSCLK_MGT));
1149 writeb(MB3H_SYSCLK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB3));
1150 writel(MBOX_BIT(3), PRCM_MBOX_CPU_SET);
1152 spin_unlock_irqrestore(&mb3_transfer.lock, flags);
1155 * The firmware only sends an ACK if we want to enable the
1156 * SysClk, and it succeeds.
1158 if (enable && !wait_for_completion_timeout(&mb3_transfer.sysclk_work,
1159 msecs_to_jiffies(20000))) {
1160 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1165 mutex_unlock(&mb3_transfer.sysclk_lock);
1170 static int request_timclk(bool enable)
1172 u32 val = (PRCM_TCR_DOZE_MODE | PRCM_TCR_TENSEL_MASK);
1175 val |= PRCM_TCR_STOP_TIMERS;
1176 writel(val, PRCM_TCR);
1181 static int request_reg_clock(u8 clock, bool enable)
1184 unsigned long flags;
1186 spin_lock_irqsave(&clk_mgt_lock, flags);
1188 /* Grab the HW semaphore. */
1189 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
1192 val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
1194 val |= (PRCM_CLK_MGT_CLKEN | clk_mgt[clock].pllsw);
1196 clk_mgt[clock].pllsw = (val & PRCM_CLK_MGT_CLKPLLSW_MASK);
1197 val &= ~(PRCM_CLK_MGT_CLKEN | PRCM_CLK_MGT_CLKPLLSW_MASK);
1199 writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
1201 /* Release the HW semaphore. */
1202 writel(0, PRCM_SEM);
1204 spin_unlock_irqrestore(&clk_mgt_lock, flags);
1210 * prcmu_request_clock() - Request for a clock to be enabled or disabled.
1211 * @clock: The clock for which the request is made.
1212 * @enable: Whether the clock should be enabled (true) or disabled (false).
1214 * This function should only be used by the clock implementation.
1215 * Do not use it from any other place!
1217 int prcmu_request_clock(u8 clock, bool enable)
1219 if (clock < PRCMU_NUM_REG_CLOCKS)
1220 return request_reg_clock(clock, enable);
1221 else if (clock == PRCMU_TIMCLK)
1222 return request_timclk(enable);
1223 else if (clock == PRCMU_SYSCLK)
1224 return request_sysclk(enable);
1229 int prcmu_config_esram0_deep_sleep(u8 state)
1231 if ((state > ESRAM0_DEEP_SLEEP_STATE_RET) ||
1232 (state < ESRAM0_DEEP_SLEEP_STATE_OFF))
1235 mutex_lock(&mb4_transfer.lock);
1237 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1240 writeb(MB4H_MEM_ST, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1241 writeb(((DDR_PWR_STATE_OFFHIGHLAT << 4) | DDR_PWR_STATE_ON),
1242 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_SLEEP_IDLE));
1243 writeb(DDR_PWR_STATE_ON,
1244 (tcdm_base + PRCM_REQ_MB4_DDR_ST_AP_DEEP_IDLE));
1245 writeb(state, (tcdm_base + PRCM_REQ_MB4_ESRAM0_ST));
1247 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
1248 wait_for_completion(&mb4_transfer.work);
1250 mutex_unlock(&mb4_transfer.lock);
1255 int prcmu_config_hotdog(u8 threshold)
1257 mutex_lock(&mb4_transfer.lock);
1259 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1262 writeb(threshold, (tcdm_base + PRCM_REQ_MB4_HOTDOG_THRESHOLD));
1263 writeb(MB4H_HOTDOG, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1265 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
1266 wait_for_completion(&mb4_transfer.work);
1268 mutex_unlock(&mb4_transfer.lock);
1273 int prcmu_config_hotmon(u8 low, u8 high)
1275 mutex_lock(&mb4_transfer.lock);
1277 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1280 writeb(low, (tcdm_base + PRCM_REQ_MB4_HOTMON_LOW));
1281 writeb(high, (tcdm_base + PRCM_REQ_MB4_HOTMON_HIGH));
1282 writeb((HOTMON_CONFIG_LOW | HOTMON_CONFIG_HIGH),
1283 (tcdm_base + PRCM_REQ_MB4_HOTMON_CONFIG));
1284 writeb(MB4H_HOTMON, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1286 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
1287 wait_for_completion(&mb4_transfer.work);
1289 mutex_unlock(&mb4_transfer.lock);
1294 static int config_hot_period(u16 val)
1296 mutex_lock(&mb4_transfer.lock);
1298 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(4))
1301 writew(val, (tcdm_base + PRCM_REQ_MB4_HOT_PERIOD));
1302 writeb(MB4H_HOT_PERIOD, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB4));
1304 writel(MBOX_BIT(4), PRCM_MBOX_CPU_SET);
1305 wait_for_completion(&mb4_transfer.work);
1307 mutex_unlock(&mb4_transfer.lock);
1312 int prcmu_start_temp_sense(u16 cycles32k)
1314 if (cycles32k == 0xFFFF)
1317 return config_hot_period(cycles32k);
1320 int prcmu_stop_temp_sense(void)
1322 return config_hot_period(0xFFFF);
1326 * prcmu_set_clock_divider() - Configure the clock divider.
1327 * @clock: The clock for which the request is made.
1328 * @divider: The clock divider. (< 32)
1330 * This function should only be used by the clock implementation.
1331 * Do not use it from any other place!
1333 int prcmu_set_clock_divider(u8 clock, u8 divider)
1336 unsigned long flags;
1338 if ((clock >= PRCMU_NUM_REG_CLOCKS) || (divider < 1) || (31 < divider))
1341 spin_lock_irqsave(&clk_mgt_lock, flags);
1343 /* Grab the HW semaphore. */
1344 while ((readl(PRCM_SEM) & PRCM_SEM_PRCM_SEM) != 0)
1347 val = readl(_PRCMU_BASE + clk_mgt[clock].offset);
1348 val &= ~(PRCM_CLK_MGT_CLKPLLDIV_MASK);
1349 val |= (u32)divider;
1350 writel(val, (_PRCMU_BASE + clk_mgt[clock].offset));
1352 /* Release the HW semaphore. */
1353 writel(0, PRCM_SEM);
1355 spin_unlock_irqrestore(&clk_mgt_lock, flags);
1361 * prcmu_abb_read() - Read register value(s) from the ABB.
1362 * @slave: The I2C slave address.
1363 * @reg: The (start) register address.
1364 * @value: The read out value(s).
1365 * @size: The number of registers to read.
1367 * Reads register value(s) from the ABB.
1368 * @size has to be 1 for the current firmware version.
1370 int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size)
1377 mutex_lock(&mb5_transfer.lock);
1379 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
1382 writeb(PRCMU_I2C_READ(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
1383 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
1384 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
1385 writeb(0, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
1387 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
1389 if (!wait_for_completion_timeout(&mb5_transfer.work,
1390 msecs_to_jiffies(20000))) {
1391 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1395 r = ((mb5_transfer.ack.status == I2C_RD_OK) ? 0 : -EIO);
1399 *value = mb5_transfer.ack.value;
1401 mutex_unlock(&mb5_transfer.lock);
1407 * prcmu_abb_write() - Write register value(s) to the ABB.
1408 * @slave: The I2C slave address.
1409 * @reg: The (start) register address.
1410 * @value: The value(s) to write.
1411 * @size: The number of registers to write.
1413 * Reads register value(s) from the ABB.
1414 * @size has to be 1 for the current firmware version.
1416 int prcmu_abb_write(u8 slave, u8 reg, u8 *value, u8 size)
1423 mutex_lock(&mb5_transfer.lock);
1425 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(5))
1428 writeb(PRCMU_I2C_WRITE(slave), (tcdm_base + PRCM_REQ_MB5_I2C_SLAVE_OP));
1429 writeb(PRCMU_I2C_STOP_EN, (tcdm_base + PRCM_REQ_MB5_I2C_HW_BITS));
1430 writeb(reg, (tcdm_base + PRCM_REQ_MB5_I2C_REG));
1431 writeb(*value, (tcdm_base + PRCM_REQ_MB5_I2C_VAL));
1433 writel(MBOX_BIT(5), PRCM_MBOX_CPU_SET);
1435 if (!wait_for_completion_timeout(&mb5_transfer.work,
1436 msecs_to_jiffies(20000))) {
1437 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1441 r = ((mb5_transfer.ack.status == I2C_WR_OK) ? 0 : -EIO);
1444 mutex_unlock(&mb5_transfer.lock);
1450 * prcmu_ac_wake_req - should be called whenever ARM wants to wakeup Modem
1452 void prcmu_ac_wake_req(void)
1456 mutex_lock(&mb0_transfer.ac_wake_lock);
1458 val = readl(PRCM_HOSTACCESS_REQ);
1459 if (val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ)
1460 goto unlock_and_return;
1462 atomic_set(&ac_wake_req_state, 1);
1464 writel((val | PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ), PRCM_HOSTACCESS_REQ);
1466 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1467 msecs_to_jiffies(20000))) {
1468 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1473 mutex_unlock(&mb0_transfer.ac_wake_lock);
1477 * prcmu_ac_sleep_req - called when ARM no longer needs to talk to modem
1479 void prcmu_ac_sleep_req()
1483 mutex_lock(&mb0_transfer.ac_wake_lock);
1485 val = readl(PRCM_HOSTACCESS_REQ);
1486 if (!(val & PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ))
1487 goto unlock_and_return;
1489 writel((val & ~PRCM_HOSTACCESS_REQ_HOSTACCESS_REQ),
1490 PRCM_HOSTACCESS_REQ);
1492 if (!wait_for_completion_timeout(&mb0_transfer.ac_wake_work,
1493 msecs_to_jiffies(20000))) {
1494 pr_err("prcmu: %s timed out (20 s) waiting for a reply.\n",
1498 atomic_set(&ac_wake_req_state, 0);
1501 mutex_unlock(&mb0_transfer.ac_wake_lock);
1504 bool prcmu_is_ac_wake_requested(void)
1506 return (atomic_read(&ac_wake_req_state) != 0);
1510 * prcmu_system_reset - System reset
1512 * Saves the reset reason code and then sets the APE_SOFRST register which
1513 * fires interrupt to fw
1515 void prcmu_system_reset(u16 reset_code)
1517 writew(reset_code, (tcdm_base + PRCM_SW_RST_REASON));
1518 writel(1, PRCM_APE_SOFTRST);
1522 * prcmu_reset_modem - ask the PRCMU to reset modem
1524 void prcmu_modem_reset(void)
1526 mutex_lock(&mb1_transfer.lock);
1528 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(1))
1531 writeb(MB1H_RESET_MODEM, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB1));
1532 writel(MBOX_BIT(1), PRCM_MBOX_CPU_SET);
1533 wait_for_completion(&mb1_transfer.work);
1536 * No need to check return from PRCMU as modem should go in reset state
1537 * This state is already managed by upper layer
1540 mutex_unlock(&mb1_transfer.lock);
1543 static void ack_dbb_wakeup(void)
1545 unsigned long flags;
1547 spin_lock_irqsave(&mb0_transfer.lock, flags);
1549 while (readl(PRCM_MBOX_CPU_VAL) & MBOX_BIT(0))
1552 writeb(MB0H_READ_WAKEUP_ACK, (tcdm_base + PRCM_MBOX_HEADER_REQ_MB0));
1553 writel(MBOX_BIT(0), PRCM_MBOX_CPU_SET);
1555 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
1558 static inline void print_unknown_header_warning(u8 n, u8 header)
1560 pr_warning("prcmu: Unknown message header (%d) in mailbox %d.\n",
1564 static bool read_mailbox_0(void)
1571 header = readb(tcdm_base + PRCM_MBOX_HEADER_ACK_MB0);
1573 case MB0H_WAKEUP_EXE:
1574 case MB0H_WAKEUP_SLEEP:
1575 if (readb(tcdm_base + PRCM_ACK_MB0_READ_POINTER) & 1)
1576 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_1_8500);
1578 ev = readl(tcdm_base + PRCM_ACK_MB0_WAKEUP_0_8500);
1580 if (ev & (WAKEUP_BIT_AC_WAKE_ACK | WAKEUP_BIT_AC_SLEEP_ACK))
1581 complete(&mb0_transfer.ac_wake_work);
1582 if (ev & WAKEUP_BIT_SYSCLK_OK)
1583 complete(&mb3_transfer.sysclk_work);
1585 ev &= mb0_transfer.req.dbb_irqs;
1587 for (n = 0; n < NUM_PRCMU_WAKEUPS; n++) {
1588 if (ev & prcmu_irq_bit[n])
1589 generic_handle_irq(IRQ_PRCMU_BASE + n);
1594 print_unknown_header_warning(0, header);
1598 writel(MBOX_BIT(0), PRCM_ARM_IT1_CLR);
1602 static bool read_mailbox_1(void)
1604 mb1_transfer.ack.header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB1);
1605 mb1_transfer.ack.arm_opp = readb(tcdm_base +
1606 PRCM_ACK_MB1_CURRENT_ARM_OPP);
1607 mb1_transfer.ack.ape_opp = readb(tcdm_base +
1608 PRCM_ACK_MB1_CURRENT_APE_OPP);
1609 mb1_transfer.ack.ape_voltage_status = readb(tcdm_base +
1610 PRCM_ACK_MB1_APE_VOLTAGE_STATUS);
1611 writel(MBOX_BIT(1), PRCM_ARM_IT1_CLR);
1612 complete(&mb1_transfer.work);
1616 static bool read_mailbox_2(void)
1618 mb2_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB2_DPS_STATUS);
1619 writel(MBOX_BIT(2), PRCM_ARM_IT1_CLR);
1620 complete(&mb2_transfer.work);
1624 static bool read_mailbox_3(void)
1626 writel(MBOX_BIT(3), PRCM_ARM_IT1_CLR);
1630 static bool read_mailbox_4(void)
1633 bool do_complete = true;
1635 header = readb(tcdm_base + PRCM_MBOX_HEADER_REQ_MB4);
1640 case MB4H_HOT_PERIOD:
1643 print_unknown_header_warning(4, header);
1644 do_complete = false;
1648 writel(MBOX_BIT(4), PRCM_ARM_IT1_CLR);
1651 complete(&mb4_transfer.work);
1656 static bool read_mailbox_5(void)
1658 mb5_transfer.ack.status = readb(tcdm_base + PRCM_ACK_MB5_I2C_STATUS);
1659 mb5_transfer.ack.value = readb(tcdm_base + PRCM_ACK_MB5_I2C_VAL);
1660 writel(MBOX_BIT(5), PRCM_ARM_IT1_CLR);
1661 complete(&mb5_transfer.work);
1665 static bool read_mailbox_6(void)
1667 writel(MBOX_BIT(6), PRCM_ARM_IT1_CLR);
1671 static bool read_mailbox_7(void)
1673 writel(MBOX_BIT(7), PRCM_ARM_IT1_CLR);
1677 static bool (* const read_mailbox[NUM_MB])(void) = {
1688 static irqreturn_t prcmu_irq_handler(int irq, void *data)
1694 bits = (readl(PRCM_ARM_IT1_VAL) & ALL_MBOX_BITS);
1695 if (unlikely(!bits))
1699 for (n = 0; bits; n++) {
1700 if (bits & MBOX_BIT(n)) {
1701 bits -= MBOX_BIT(n);
1702 if (read_mailbox[n]())
1703 r = IRQ_WAKE_THREAD;
1709 static irqreturn_t prcmu_irq_thread_fn(int irq, void *data)
1715 static void prcmu_mask_work(struct work_struct *work)
1717 unsigned long flags;
1719 spin_lock_irqsave(&mb0_transfer.lock, flags);
1723 spin_unlock_irqrestore(&mb0_transfer.lock, flags);
1726 static void prcmu_irq_mask(struct irq_data *d)
1728 unsigned long flags;
1730 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
1732 mb0_transfer.req.dbb_irqs &= ~prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE];
1734 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
1736 if (d->irq != IRQ_PRCMU_CA_SLEEP)
1737 schedule_work(&mb0_transfer.mask_work);
1740 static void prcmu_irq_unmask(struct irq_data *d)
1742 unsigned long flags;
1744 spin_lock_irqsave(&mb0_transfer.dbb_irqs_lock, flags);
1746 mb0_transfer.req.dbb_irqs |= prcmu_irq_bit[d->irq - IRQ_PRCMU_BASE];
1748 spin_unlock_irqrestore(&mb0_transfer.dbb_irqs_lock, flags);
1750 if (d->irq != IRQ_PRCMU_CA_SLEEP)
1751 schedule_work(&mb0_transfer.mask_work);
1754 static void noop(struct irq_data *d)
1758 static struct irq_chip prcmu_irq_chip = {
1760 .irq_disable = prcmu_irq_mask,
1762 .irq_mask = prcmu_irq_mask,
1763 .irq_unmask = prcmu_irq_unmask,
1766 void __init prcmu_early_init(void)
1770 if (cpu_is_u8500v1()) {
1771 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE_V1);
1772 } else if (cpu_is_u8500v2()) {
1773 void *tcpm_base = ioremap_nocache(U8500_PRCMU_TCPM_BASE, SZ_4K);
1775 if (tcpm_base != NULL) {
1777 version = readl(tcpm_base + PRCMU_FW_VERSION_OFFSET);
1778 prcmu_version.project_number = version & 0xFF;
1779 prcmu_version.api_version = (version >> 8) & 0xFF;
1780 prcmu_version.func_version = (version >> 16) & 0xFF;
1781 prcmu_version.errata = (version >> 24) & 0xFF;
1782 pr_info("PRCMU firmware version %d.%d.%d\n",
1783 (version >> 8) & 0xFF, (version >> 16) & 0xFF,
1784 (version >> 24) & 0xFF);
1788 tcdm_base = __io_address(U8500_PRCMU_TCDM_BASE);
1790 pr_err("prcmu: Unsupported chip version\n");
1794 spin_lock_init(&mb0_transfer.lock);
1795 spin_lock_init(&mb0_transfer.dbb_irqs_lock);
1796 mutex_init(&mb0_transfer.ac_wake_lock);
1797 init_completion(&mb0_transfer.ac_wake_work);
1798 mutex_init(&mb1_transfer.lock);
1799 init_completion(&mb1_transfer.work);
1800 mutex_init(&mb2_transfer.lock);
1801 init_completion(&mb2_transfer.work);
1802 spin_lock_init(&mb2_transfer.auto_pm_lock);
1803 spin_lock_init(&mb3_transfer.lock);
1804 mutex_init(&mb3_transfer.sysclk_lock);
1805 init_completion(&mb3_transfer.sysclk_work);
1806 mutex_init(&mb4_transfer.lock);
1807 init_completion(&mb4_transfer.work);
1808 mutex_init(&mb5_transfer.lock);
1809 init_completion(&mb5_transfer.work);
1811 INIT_WORK(&mb0_transfer.mask_work, prcmu_mask_work);
1813 /* Initalize irqs. */
1814 for (i = 0; i < NUM_PRCMU_WAKEUPS; i++) {
1817 irq = IRQ_PRCMU_BASE + i;
1818 irq_set_chip_and_handler(irq, &prcmu_irq_chip,
1820 set_irq_flags(irq, IRQF_VALID);
1825 * Power domain switches (ePODs) modeled as regulators for the DB8500 SoC
1827 static struct regulator_consumer_supply db8500_vape_consumers[] = {
1828 REGULATOR_SUPPLY("v-ape", NULL),
1829 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.0"),
1830 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
1831 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
1832 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
1833 /* "v-mmc" changed to "vcore" in the mainline kernel */
1834 REGULATOR_SUPPLY("vcore", "sdi0"),
1835 REGULATOR_SUPPLY("vcore", "sdi1"),
1836 REGULATOR_SUPPLY("vcore", "sdi2"),
1837 REGULATOR_SUPPLY("vcore", "sdi3"),
1838 REGULATOR_SUPPLY("vcore", "sdi4"),
1839 REGULATOR_SUPPLY("v-dma", "dma40.0"),
1840 REGULATOR_SUPPLY("v-ape", "ab8500-usb.0"),
1841 /* "v-uart" changed to "vcore" in the mainline kernel */
1842 REGULATOR_SUPPLY("vcore", "uart0"),
1843 REGULATOR_SUPPLY("vcore", "uart1"),
1844 REGULATOR_SUPPLY("vcore", "uart2"),
1845 REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
1848 static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
1849 /* CG2900 and CW1200 power to off-chip peripherals */
1850 REGULATOR_SUPPLY("gbf_1v8", "cg2900-uart.0"),
1851 REGULATOR_SUPPLY("wlan_1v8", "cw1200.0"),
1852 REGULATOR_SUPPLY("musb_1v8", "ab8500-usb.0"),
1853 /* AV8100 regulator */
1854 REGULATOR_SUPPLY("hdmi_1v8", "0-0070"),
1857 static struct regulator_consumer_supply db8500_b2r2_mcde_consumers[] = {
1858 REGULATOR_SUPPLY("vsupply", "b2r2.0"),
1859 REGULATOR_SUPPLY("vsupply", "mcde.0"),
1862 static struct regulator_init_data db8500_regulators[DB8500_NUM_REGULATORS] = {
1863 [DB8500_REGULATOR_VAPE] = {
1865 .name = "db8500-vape",
1866 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1868 .consumer_supplies = db8500_vape_consumers,
1869 .num_consumer_supplies = ARRAY_SIZE(db8500_vape_consumers),
1871 [DB8500_REGULATOR_VARM] = {
1873 .name = "db8500-varm",
1874 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1877 [DB8500_REGULATOR_VMODEM] = {
1879 .name = "db8500-vmodem",
1880 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1883 [DB8500_REGULATOR_VPLL] = {
1885 .name = "db8500-vpll",
1886 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1889 [DB8500_REGULATOR_VSMPS1] = {
1891 .name = "db8500-vsmps1",
1892 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1895 [DB8500_REGULATOR_VSMPS2] = {
1897 .name = "db8500-vsmps2",
1898 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1900 .consumer_supplies = db8500_vsmps2_consumers,
1901 .num_consumer_supplies = ARRAY_SIZE(db8500_vsmps2_consumers),
1903 [DB8500_REGULATOR_VSMPS3] = {
1905 .name = "db8500-vsmps3",
1906 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1909 [DB8500_REGULATOR_VRF1] = {
1911 .name = "db8500-vrf1",
1912 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1915 [DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
1916 .supply_regulator = "db8500-vape",
1918 .name = "db8500-sva-mmdsp",
1919 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1922 [DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
1924 /* "ret" means "retention" */
1925 .name = "db8500-sva-mmdsp-ret",
1926 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1929 [DB8500_REGULATOR_SWITCH_SVAPIPE] = {
1930 .supply_regulator = "db8500-vape",
1932 .name = "db8500-sva-pipe",
1933 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1936 [DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
1937 .supply_regulator = "db8500-vape",
1939 .name = "db8500-sia-mmdsp",
1940 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1943 [DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
1945 .name = "db8500-sia-mmdsp-ret",
1946 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1949 [DB8500_REGULATOR_SWITCH_SIAPIPE] = {
1950 .supply_regulator = "db8500-vape",
1952 .name = "db8500-sia-pipe",
1953 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1956 [DB8500_REGULATOR_SWITCH_SGA] = {
1957 .supply_regulator = "db8500-vape",
1959 .name = "db8500-sga",
1960 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1963 [DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
1964 .supply_regulator = "db8500-vape",
1966 .name = "db8500-b2r2-mcde",
1967 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1969 .consumer_supplies = db8500_b2r2_mcde_consumers,
1970 .num_consumer_supplies = ARRAY_SIZE(db8500_b2r2_mcde_consumers),
1972 [DB8500_REGULATOR_SWITCH_ESRAM12] = {
1973 .supply_regulator = "db8500-vape",
1975 .name = "db8500-esram12",
1976 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1979 [DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
1981 .name = "db8500-esram12-ret",
1982 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1985 [DB8500_REGULATOR_SWITCH_ESRAM34] = {
1986 .supply_regulator = "db8500-vape",
1988 .name = "db8500-esram34",
1989 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
1992 [DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
1994 .name = "db8500-esram34-ret",
1995 .valid_ops_mask = REGULATOR_CHANGE_STATUS,
2000 static struct mfd_cell db8500_prcmu_devs[] = {
2002 .name = "db8500-prcmu-regulators",
2003 .platform_data = &db8500_regulators,
2004 .pdata_size = sizeof(db8500_regulators),
2007 .name = "cpufreq-u8500",
2012 * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
2015 static int __init db8500_prcmu_probe(struct platform_device *pdev)
2022 /* Clean up the mailbox interrupts after pre-kernel code. */
2023 writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
2025 err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler,
2026 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
2028 pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
2033 if (cpu_is_u8500v20_or_later())
2034 prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
2036 err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
2037 ARRAY_SIZE(db8500_prcmu_devs), NULL,
2041 pr_err("prcmu: Failed to add subdevices\n");
2043 pr_info("DB8500 PRCMU initialized\n");
2049 static struct platform_driver db8500_prcmu_driver = {
2051 .name = "db8500-prcmu",
2052 .owner = THIS_MODULE,
2056 static int __init db8500_prcmu_init(void)
2058 return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe);
2061 arch_initcall(db8500_prcmu_init);
2063 MODULE_AUTHOR("Mattias Nilsson <mattias.i.nilsson@stericsson.com>");
2064 MODULE_DESCRIPTION("DB8500 PRCM Unit driver");
2065 MODULE_LICENSE("GPL v2");