]> Pileus Git - ~andy/linux/blob - drivers/mmc/host/sh_mmcif.c
mmc: dw_mmc: Support predefined mutiple block transfers
[~andy/linux] / drivers / mmc / host / sh_mmcif.c
1 /*
2  * MMCIF eMMC driver.
3  *
4  * Copyright (C) 2010 Renesas Solutions Corp.
5  * Yusuke Goda <yusuke.goda.sx@renesas.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License.
10  *
11  *
12  * TODO
13  *  1. DMA
14  *  2. Power management
15  *  3. Handle MMC errors better
16  *
17  */
18
19 #include <linux/bitops.h>
20 #include <linux/clk.h>
21 #include <linux/completion.h>
22 #include <linux/delay.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/dmaengine.h>
25 #include <linux/mmc/card.h>
26 #include <linux/mmc/core.h>
27 #include <linux/mmc/host.h>
28 #include <linux/mmc/mmc.h>
29 #include <linux/mmc/sdio.h>
30 #include <linux/mmc/sh_mmcif.h>
31 #include <linux/pagemap.h>
32 #include <linux/platform_device.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/spinlock.h>
35 #include <linux/module.h>
36
37 #define DRIVER_NAME     "sh_mmcif"
38 #define DRIVER_VERSION  "2010-04-28"
39
40 /* CE_CMD_SET */
41 #define CMD_MASK                0x3f000000
42 #define CMD_SET_RTYP_NO         ((0 << 23) | (0 << 22))
43 #define CMD_SET_RTYP_6B         ((0 << 23) | (1 << 22)) /* R1/R1b/R3/R4/R5 */
44 #define CMD_SET_RTYP_17B        ((1 << 23) | (0 << 22)) /* R2 */
45 #define CMD_SET_RBSY            (1 << 21) /* R1b */
46 #define CMD_SET_CCSEN           (1 << 20)
47 #define CMD_SET_WDAT            (1 << 19) /* 1: on data, 0: no data */
48 #define CMD_SET_DWEN            (1 << 18) /* 1: write, 0: read */
49 #define CMD_SET_CMLTE           (1 << 17) /* 1: multi block trans, 0: single */
50 #define CMD_SET_CMD12EN         (1 << 16) /* 1: CMD12 auto issue */
51 #define CMD_SET_RIDXC_INDEX     ((0 << 15) | (0 << 14)) /* index check */
52 #define CMD_SET_RIDXC_BITS      ((0 << 15) | (1 << 14)) /* check bits check */
53 #define CMD_SET_RIDXC_NO        ((1 << 15) | (0 << 14)) /* no check */
54 #define CMD_SET_CRC7C           ((0 << 13) | (0 << 12)) /* CRC7 check*/
55 #define CMD_SET_CRC7C_BITS      ((0 << 13) | (1 << 12)) /* check bits check*/
56 #define CMD_SET_CRC7C_INTERNAL  ((1 << 13) | (0 << 12)) /* internal CRC7 check*/
57 #define CMD_SET_CRC16C          (1 << 10) /* 0: CRC16 check*/
58 #define CMD_SET_CRCSTE          (1 << 8) /* 1: not receive CRC status */
59 #define CMD_SET_TBIT            (1 << 7) /* 1: tran mission bit "Low" */
60 #define CMD_SET_OPDM            (1 << 6) /* 1: open/drain */
61 #define CMD_SET_CCSH            (1 << 5)
62 #define CMD_SET_DATW_1          ((0 << 1) | (0 << 0)) /* 1bit */
63 #define CMD_SET_DATW_4          ((0 << 1) | (1 << 0)) /* 4bit */
64 #define CMD_SET_DATW_8          ((1 << 1) | (0 << 0)) /* 8bit */
65
66 /* CE_CMD_CTRL */
67 #define CMD_CTRL_BREAK          (1 << 0)
68
69 /* CE_BLOCK_SET */
70 #define BLOCK_SIZE_MASK         0x0000ffff
71
72 /* CE_INT */
73 #define INT_CCSDE               (1 << 29)
74 #define INT_CMD12DRE            (1 << 26)
75 #define INT_CMD12RBE            (1 << 25)
76 #define INT_CMD12CRE            (1 << 24)
77 #define INT_DTRANE              (1 << 23)
78 #define INT_BUFRE               (1 << 22)
79 #define INT_BUFWEN              (1 << 21)
80 #define INT_BUFREN              (1 << 20)
81 #define INT_CCSRCV              (1 << 19)
82 #define INT_RBSYE               (1 << 17)
83 #define INT_CRSPE               (1 << 16)
84 #define INT_CMDVIO              (1 << 15)
85 #define INT_BUFVIO              (1 << 14)
86 #define INT_WDATERR             (1 << 11)
87 #define INT_RDATERR             (1 << 10)
88 #define INT_RIDXERR             (1 << 9)
89 #define INT_RSPERR              (1 << 8)
90 #define INT_CCSTO               (1 << 5)
91 #define INT_CRCSTO              (1 << 4)
92 #define INT_WDATTO              (1 << 3)
93 #define INT_RDATTO              (1 << 2)
94 #define INT_RBSYTO              (1 << 1)
95 #define INT_RSPTO               (1 << 0)
96 #define INT_ERR_STS             (INT_CMDVIO | INT_BUFVIO | INT_WDATERR |  \
97                                  INT_RDATERR | INT_RIDXERR | INT_RSPERR | \
98                                  INT_CCSTO | INT_CRCSTO | INT_WDATTO |    \
99                                  INT_RDATTO | INT_RBSYTO | INT_RSPTO)
100
101 /* CE_INT_MASK */
102 #define MASK_ALL                0x00000000
103 #define MASK_MCCSDE             (1 << 29)
104 #define MASK_MCMD12DRE          (1 << 26)
105 #define MASK_MCMD12RBE          (1 << 25)
106 #define MASK_MCMD12CRE          (1 << 24)
107 #define MASK_MDTRANE            (1 << 23)
108 #define MASK_MBUFRE             (1 << 22)
109 #define MASK_MBUFWEN            (1 << 21)
110 #define MASK_MBUFREN            (1 << 20)
111 #define MASK_MCCSRCV            (1 << 19)
112 #define MASK_MRBSYE             (1 << 17)
113 #define MASK_MCRSPE             (1 << 16)
114 #define MASK_MCMDVIO            (1 << 15)
115 #define MASK_MBUFVIO            (1 << 14)
116 #define MASK_MWDATERR           (1 << 11)
117 #define MASK_MRDATERR           (1 << 10)
118 #define MASK_MRIDXERR           (1 << 9)
119 #define MASK_MRSPERR            (1 << 8)
120 #define MASK_MCCSTO             (1 << 5)
121 #define MASK_MCRCSTO            (1 << 4)
122 #define MASK_MWDATTO            (1 << 3)
123 #define MASK_MRDATTO            (1 << 2)
124 #define MASK_MRBSYTO            (1 << 1)
125 #define MASK_MRSPTO             (1 << 0)
126
127 /* CE_HOST_STS1 */
128 #define STS1_CMDSEQ             (1 << 31)
129
130 /* CE_HOST_STS2 */
131 #define STS2_CRCSTE             (1 << 31)
132 #define STS2_CRC16E             (1 << 30)
133 #define STS2_AC12CRCE           (1 << 29)
134 #define STS2_RSPCRC7E           (1 << 28)
135 #define STS2_CRCSTEBE           (1 << 27)
136 #define STS2_RDATEBE            (1 << 26)
137 #define STS2_AC12REBE           (1 << 25)
138 #define STS2_RSPEBE             (1 << 24)
139 #define STS2_AC12IDXE           (1 << 23)
140 #define STS2_RSPIDXE            (1 << 22)
141 #define STS2_CCSTO              (1 << 15)
142 #define STS2_RDATTO             (1 << 14)
143 #define STS2_DATBSYTO           (1 << 13)
144 #define STS2_CRCSTTO            (1 << 12)
145 #define STS2_AC12BSYTO          (1 << 11)
146 #define STS2_RSPBSYTO           (1 << 10)
147 #define STS2_AC12RSPTO          (1 << 9)
148 #define STS2_RSPTO              (1 << 8)
149 #define STS2_CRC_ERR            (STS2_CRCSTE | STS2_CRC16E |            \
150                                  STS2_AC12CRCE | STS2_RSPCRC7E | STS2_CRCSTEBE)
151 #define STS2_TIMEOUT_ERR        (STS2_CCSTO | STS2_RDATTO |             \
152                                  STS2_DATBSYTO | STS2_CRCSTTO |         \
153                                  STS2_AC12BSYTO | STS2_RSPBSYTO |       \
154                                  STS2_AC12RSPTO | STS2_RSPTO)
155
156 #define CLKDEV_EMMC_DATA        52000000 /* 52MHz */
157 #define CLKDEV_MMC_DATA         20000000 /* 20MHz */
158 #define CLKDEV_INIT             400000   /* 400 KHz */
159
160 enum mmcif_state {
161         STATE_IDLE,
162         STATE_REQUEST,
163         STATE_IOS,
164 };
165
166 struct sh_mmcif_host {
167         struct mmc_host *mmc;
168         struct mmc_data *data;
169         struct platform_device *pd;
170         struct sh_dmae_slave dma_slave_tx;
171         struct sh_dmae_slave dma_slave_rx;
172         struct clk *hclk;
173         unsigned int clk;
174         int bus_width;
175         bool sd_error;
176         long timeout;
177         void __iomem *addr;
178         struct completion intr_wait;
179         enum mmcif_state state;
180         spinlock_t lock;
181         bool power;
182         bool card_present;
183
184         /* DMA support */
185         struct dma_chan         *chan_rx;
186         struct dma_chan         *chan_tx;
187         struct completion       dma_complete;
188         bool                    dma_active;
189 };
190
191 static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,
192                                         unsigned int reg, u32 val)
193 {
194         writel(val | readl(host->addr + reg), host->addr + reg);
195 }
196
197 static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
198                                         unsigned int reg, u32 val)
199 {
200         writel(~val & readl(host->addr + reg), host->addr + reg);
201 }
202
203 static void mmcif_dma_complete(void *arg)
204 {
205         struct sh_mmcif_host *host = arg;
206         dev_dbg(&host->pd->dev, "Command completed\n");
207
208         if (WARN(!host->data, "%s: NULL data in DMA completion!\n",
209                  dev_name(&host->pd->dev)))
210                 return;
211
212         if (host->data->flags & MMC_DATA_READ)
213                 dma_unmap_sg(host->chan_rx->device->dev,
214                              host->data->sg, host->data->sg_len,
215                              DMA_FROM_DEVICE);
216         else
217                 dma_unmap_sg(host->chan_tx->device->dev,
218                              host->data->sg, host->data->sg_len,
219                              DMA_TO_DEVICE);
220
221         complete(&host->dma_complete);
222 }
223
224 static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host)
225 {
226         struct scatterlist *sg = host->data->sg;
227         struct dma_async_tx_descriptor *desc = NULL;
228         struct dma_chan *chan = host->chan_rx;
229         dma_cookie_t cookie = -EINVAL;
230         int ret;
231
232         ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
233                          DMA_FROM_DEVICE);
234         if (ret > 0) {
235                 host->dma_active = true;
236                 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
237                         DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
238         }
239
240         if (desc) {
241                 desc->callback = mmcif_dma_complete;
242                 desc->callback_param = host;
243                 cookie = dmaengine_submit(desc);
244                 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN);
245                 dma_async_issue_pending(chan);
246         }
247         dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
248                 __func__, host->data->sg_len, ret, cookie);
249
250         if (!desc) {
251                 /* DMA failed, fall back to PIO */
252                 if (ret >= 0)
253                         ret = -EIO;
254                 host->chan_rx = NULL;
255                 host->dma_active = false;
256                 dma_release_channel(chan);
257                 /* Free the Tx channel too */
258                 chan = host->chan_tx;
259                 if (chan) {
260                         host->chan_tx = NULL;
261                         dma_release_channel(chan);
262                 }
263                 dev_warn(&host->pd->dev,
264                          "DMA failed: %d, falling back to PIO\n", ret);
265                 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
266         }
267
268         dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__,
269                 desc, cookie, host->data->sg_len);
270 }
271
272 static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host)
273 {
274         struct scatterlist *sg = host->data->sg;
275         struct dma_async_tx_descriptor *desc = NULL;
276         struct dma_chan *chan = host->chan_tx;
277         dma_cookie_t cookie = -EINVAL;
278         int ret;
279
280         ret = dma_map_sg(chan->device->dev, sg, host->data->sg_len,
281                          DMA_TO_DEVICE);
282         if (ret > 0) {
283                 host->dma_active = true;
284                 desc = chan->device->device_prep_slave_sg(chan, sg, ret,
285                         DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
286         }
287
288         if (desc) {
289                 desc->callback = mmcif_dma_complete;
290                 desc->callback_param = host;
291                 cookie = dmaengine_submit(desc);
292                 sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN);
293                 dma_async_issue_pending(chan);
294         }
295         dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n",
296                 __func__, host->data->sg_len, ret, cookie);
297
298         if (!desc) {
299                 /* DMA failed, fall back to PIO */
300                 if (ret >= 0)
301                         ret = -EIO;
302                 host->chan_tx = NULL;
303                 host->dma_active = false;
304                 dma_release_channel(chan);
305                 /* Free the Rx channel too */
306                 chan = host->chan_rx;
307                 if (chan) {
308                         host->chan_rx = NULL;
309                         dma_release_channel(chan);
310                 }
311                 dev_warn(&host->pd->dev,
312                          "DMA failed: %d, falling back to PIO\n", ret);
313                 sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
314         }
315
316         dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__,
317                 desc, cookie);
318 }
319
320 static bool sh_mmcif_filter(struct dma_chan *chan, void *arg)
321 {
322         dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg);
323         chan->private = arg;
324         return true;
325 }
326
327 static void sh_mmcif_request_dma(struct sh_mmcif_host *host,
328                                  struct sh_mmcif_plat_data *pdata)
329 {
330         struct sh_dmae_slave *tx, *rx;
331         host->dma_active = false;
332
333         /* We can only either use DMA for both Tx and Rx or not use it at all */
334         if (pdata->dma) {
335                 dev_warn(&host->pd->dev,
336                          "Update your platform to use embedded DMA slave IDs\n");
337                 tx = &pdata->dma->chan_priv_tx;
338                 rx = &pdata->dma->chan_priv_rx;
339         } else {
340                 tx = &host->dma_slave_tx;
341                 tx->slave_id = pdata->slave_id_tx;
342                 rx = &host->dma_slave_rx;
343                 rx->slave_id = pdata->slave_id_rx;
344         }
345         if (tx->slave_id > 0 && rx->slave_id > 0) {
346                 dma_cap_mask_t mask;
347
348                 dma_cap_zero(mask);
349                 dma_cap_set(DMA_SLAVE, mask);
350
351                 host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, tx);
352                 dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__,
353                         host->chan_tx);
354
355                 if (!host->chan_tx)
356                         return;
357
358                 host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, rx);
359                 dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__,
360                         host->chan_rx);
361
362                 if (!host->chan_rx) {
363                         dma_release_channel(host->chan_tx);
364                         host->chan_tx = NULL;
365                         return;
366                 }
367
368                 init_completion(&host->dma_complete);
369         }
370 }
371
372 static void sh_mmcif_release_dma(struct sh_mmcif_host *host)
373 {
374         sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
375         /* Descriptors are freed automatically */
376         if (host->chan_tx) {
377                 struct dma_chan *chan = host->chan_tx;
378                 host->chan_tx = NULL;
379                 dma_release_channel(chan);
380         }
381         if (host->chan_rx) {
382                 struct dma_chan *chan = host->chan_rx;
383                 host->chan_rx = NULL;
384                 dma_release_channel(chan);
385         }
386
387         host->dma_active = false;
388 }
389
390 static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)
391 {
392         struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
393
394         sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
395         sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);
396
397         if (!clk)
398                 return;
399         if (p->sup_pclk && clk == host->clk)
400                 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);
401         else
402                 sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR &
403                                 ((fls(host->clk / clk) - 1) << 16));
404
405         sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);
406 }
407
408 static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)
409 {
410         u32 tmp;
411
412         tmp = 0x010f0000 & sh_mmcif_readl(host->addr, MMCIF_CE_CLK_CTRL);
413
414         sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);
415         sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF);
416         sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp |
417                 SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29);
418         /* byte swap on */
419         sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);
420 }
421
422 static int sh_mmcif_error_manage(struct sh_mmcif_host *host)
423 {
424         u32 state1, state2;
425         int ret, timeout = 10000000;
426
427         host->sd_error = false;
428
429         state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);
430         state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2);
431         dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1);
432         dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);
433
434         if (state1 & STS1_CMDSEQ) {
435                 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);
436                 sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK);
437                 while (1) {
438                         timeout--;
439                         if (timeout < 0) {
440                                 dev_err(&host->pd->dev,
441                                         "Forceed end of command sequence timeout err\n");
442                                 return -EIO;
443                         }
444                         if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1)
445                                                                 & STS1_CMDSEQ))
446                                 break;
447                         mdelay(1);
448                 }
449                 sh_mmcif_sync_reset(host);
450                 dev_dbg(&host->pd->dev, "Forced end of command sequence\n");
451                 return -EIO;
452         }
453
454         if (state2 & STS2_CRC_ERR) {
455                 dev_dbg(&host->pd->dev, ": Happened CRC error\n");
456                 ret = -EIO;
457         } else if (state2 & STS2_TIMEOUT_ERR) {
458                 dev_dbg(&host->pd->dev, ": Happened Timeout error\n");
459                 ret = -ETIMEDOUT;
460         } else {
461                 dev_dbg(&host->pd->dev, ": Happened End/Index error\n");
462                 ret = -EIO;
463         }
464         return ret;
465 }
466
467 static int sh_mmcif_single_read(struct sh_mmcif_host *host,
468                                         struct mmc_request *mrq)
469 {
470         struct mmc_data *data = mrq->data;
471         long time;
472         u32 blocksize, i, *p = sg_virt(data->sg);
473
474         /* buf read enable */
475         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
476         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
477                         host->timeout);
478         if (time <= 0 || host->sd_error)
479                 return sh_mmcif_error_manage(host);
480
481         blocksize = (BLOCK_SIZE_MASK &
482                         sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
483         for (i = 0; i < blocksize / 4; i++)
484                 *p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);
485
486         /* buffer read end */
487         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
488         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
489                         host->timeout);
490         if (time <= 0 || host->sd_error)
491                 return sh_mmcif_error_manage(host);
492
493         return 0;
494 }
495
496 static int sh_mmcif_multi_read(struct sh_mmcif_host *host,
497                                         struct mmc_request *mrq)
498 {
499         struct mmc_data *data = mrq->data;
500         long time;
501         u32 blocksize, i, j, sec, *p;
502
503         blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
504                                                      MMCIF_CE_BLOCK_SET);
505         for (j = 0; j < data->sg_len; j++) {
506                 p = sg_virt(data->sg);
507                 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
508                         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
509                         /* buf read enable */
510                         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
511                                 host->timeout);
512
513                         if (time <= 0 || host->sd_error)
514                                 return sh_mmcif_error_manage(host);
515
516                         for (i = 0; i < blocksize / 4; i++)
517                                 *p++ = sh_mmcif_readl(host->addr,
518                                                       MMCIF_CE_DATA);
519                 }
520                 if (j < data->sg_len - 1)
521                         data->sg++;
522         }
523         return 0;
524 }
525
526 static int sh_mmcif_single_write(struct sh_mmcif_host *host,
527                                         struct mmc_request *mrq)
528 {
529         struct mmc_data *data = mrq->data;
530         long time;
531         u32 blocksize, i, *p = sg_virt(data->sg);
532
533         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
534
535         /* buf write enable */
536         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
537                         host->timeout);
538         if (time <= 0 || host->sd_error)
539                 return sh_mmcif_error_manage(host);
540
541         blocksize = (BLOCK_SIZE_MASK &
542                         sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3;
543         for (i = 0; i < blocksize / 4; i++)
544                 sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);
545
546         /* buffer write end */
547         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
548
549         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
550                         host->timeout);
551         if (time <= 0 || host->sd_error)
552                 return sh_mmcif_error_manage(host);
553
554         return 0;
555 }
556
557 static int sh_mmcif_multi_write(struct sh_mmcif_host *host,
558                                                 struct mmc_request *mrq)
559 {
560         struct mmc_data *data = mrq->data;
561         long time;
562         u32 i, sec, j, blocksize, *p;
563
564         blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr,
565                                                      MMCIF_CE_BLOCK_SET);
566
567         for (j = 0; j < data->sg_len; j++) {
568                 p = sg_virt(data->sg);
569                 for (sec = 0; sec < data->sg->length / blocksize; sec++) {
570                         sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
571                         /* buf write enable*/
572                         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
573                                 host->timeout);
574
575                         if (time <= 0 || host->sd_error)
576                                 return sh_mmcif_error_manage(host);
577
578                         for (i = 0; i < blocksize / 4; i++)
579                                 sh_mmcif_writel(host->addr,
580                                                 MMCIF_CE_DATA, *p++);
581                 }
582                 if (j < data->sg_len - 1)
583                         data->sg++;
584         }
585         return 0;
586 }
587
588 static void sh_mmcif_get_response(struct sh_mmcif_host *host,
589                                                 struct mmc_command *cmd)
590 {
591         if (cmd->flags & MMC_RSP_136) {
592                 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP3);
593                 cmd->resp[1] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP2);
594                 cmd->resp[2] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP1);
595                 cmd->resp[3] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
596         } else
597                 cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP0);
598 }
599
600 static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,
601                                                 struct mmc_command *cmd)
602 {
603         cmd->resp[0] = sh_mmcif_readl(host->addr, MMCIF_CE_RESP_CMD12);
604 }
605
606 static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,
607                 struct mmc_request *mrq, struct mmc_command *cmd, u32 opc)
608 {
609         u32 tmp = 0;
610
611         /* Response Type check */
612         switch (mmc_resp_type(cmd)) {
613         case MMC_RSP_NONE:
614                 tmp |= CMD_SET_RTYP_NO;
615                 break;
616         case MMC_RSP_R1:
617         case MMC_RSP_R1B:
618         case MMC_RSP_R3:
619                 tmp |= CMD_SET_RTYP_6B;
620                 break;
621         case MMC_RSP_R2:
622                 tmp |= CMD_SET_RTYP_17B;
623                 break;
624         default:
625                 dev_err(&host->pd->dev, "Unsupported response type.\n");
626                 break;
627         }
628         switch (opc) {
629         /* RBSY */
630         case MMC_SWITCH:
631         case MMC_STOP_TRANSMISSION:
632         case MMC_SET_WRITE_PROT:
633         case MMC_CLR_WRITE_PROT:
634         case MMC_ERASE:
635         case MMC_GEN_CMD:
636                 tmp |= CMD_SET_RBSY;
637                 break;
638         }
639         /* WDAT / DATW */
640         if (host->data) {
641                 tmp |= CMD_SET_WDAT;
642                 switch (host->bus_width) {
643                 case MMC_BUS_WIDTH_1:
644                         tmp |= CMD_SET_DATW_1;
645                         break;
646                 case MMC_BUS_WIDTH_4:
647                         tmp |= CMD_SET_DATW_4;
648                         break;
649                 case MMC_BUS_WIDTH_8:
650                         tmp |= CMD_SET_DATW_8;
651                         break;
652                 default:
653                         dev_err(&host->pd->dev, "Unsupported bus width.\n");
654                         break;
655                 }
656         }
657         /* DWEN */
658         if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK)
659                 tmp |= CMD_SET_DWEN;
660         /* CMLTE/CMD12EN */
661         if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {
662                 tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;
663                 sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET,
664                                         mrq->data->blocks << 16);
665         }
666         /* RIDXC[1:0] check bits */
667         if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID ||
668             opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
669                 tmp |= CMD_SET_RIDXC_BITS;
670         /* RCRC7C[1:0] check bits */
671         if (opc == MMC_SEND_OP_COND)
672                 tmp |= CMD_SET_CRC7C_BITS;
673         /* RCRC7C[1:0] internal CRC7 */
674         if (opc == MMC_ALL_SEND_CID ||
675                 opc == MMC_SEND_CSD || opc == MMC_SEND_CID)
676                 tmp |= CMD_SET_CRC7C_INTERNAL;
677
678         return opc = ((opc << 24) | tmp);
679 }
680
681 static int sh_mmcif_data_trans(struct sh_mmcif_host *host,
682                                 struct mmc_request *mrq, u32 opc)
683 {
684         int ret;
685
686         switch (opc) {
687         case MMC_READ_MULTIPLE_BLOCK:
688                 ret = sh_mmcif_multi_read(host, mrq);
689                 break;
690         case MMC_WRITE_MULTIPLE_BLOCK:
691                 ret = sh_mmcif_multi_write(host, mrq);
692                 break;
693         case MMC_WRITE_BLOCK:
694                 ret = sh_mmcif_single_write(host, mrq);
695                 break;
696         case MMC_READ_SINGLE_BLOCK:
697         case MMC_SEND_EXT_CSD:
698                 ret = sh_mmcif_single_read(host, mrq);
699                 break;
700         default:
701                 dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc);
702                 ret = -EINVAL;
703                 break;
704         }
705         return ret;
706 }
707
708 static void sh_mmcif_start_cmd(struct sh_mmcif_host *host,
709                         struct mmc_request *mrq, struct mmc_command *cmd)
710 {
711         long time;
712         int ret = 0, mask = 0;
713         u32 opc = cmd->opcode;
714
715         switch (opc) {
716         /* respons busy check */
717         case MMC_SWITCH:
718         case MMC_STOP_TRANSMISSION:
719         case MMC_SET_WRITE_PROT:
720         case MMC_CLR_WRITE_PROT:
721         case MMC_ERASE:
722         case MMC_GEN_CMD:
723                 mask = MASK_MRBSYE;
724                 break;
725         default:
726                 mask = MASK_MCRSPE;
727                 break;
728         }
729         mask |= MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR |
730                 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR |
731                 MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO |
732                 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO;
733
734         if (host->data) {
735                 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);
736                 sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,
737                                 mrq->data->blksz);
738         }
739         opc = sh_mmcif_set_cmd(host, mrq, cmd, opc);
740
741         sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0);
742         sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);
743         /* set arg */
744         sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg);
745         /* set cmd */
746         sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc);
747
748         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
749                 host->timeout);
750         if (time <= 0) {
751                 cmd->error = sh_mmcif_error_manage(host);
752                 return;
753         }
754         if (host->sd_error) {
755                 switch (cmd->opcode) {
756                 case MMC_ALL_SEND_CID:
757                 case MMC_SELECT_CARD:
758                 case MMC_APP_CMD:
759                         cmd->error = -ETIMEDOUT;
760                         break;
761                 default:
762                         dev_dbg(&host->pd->dev, "Cmd(d'%d) err\n",
763                                         cmd->opcode);
764                         cmd->error = sh_mmcif_error_manage(host);
765                         break;
766                 }
767                 host->sd_error = false;
768                 return;
769         }
770         if (!(cmd->flags & MMC_RSP_PRESENT)) {
771                 cmd->error = 0;
772                 return;
773         }
774         sh_mmcif_get_response(host, cmd);
775         if (host->data) {
776                 if (!host->dma_active) {
777                         ret = sh_mmcif_data_trans(host, mrq, cmd->opcode);
778                 } else {
779                         long time =
780                                 wait_for_completion_interruptible_timeout(&host->dma_complete,
781                                                                           host->timeout);
782                         if (!time)
783                                 ret = -ETIMEDOUT;
784                         else if (time < 0)
785                                 ret = time;
786                         sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC,
787                                         BUF_ACC_DMAREN | BUF_ACC_DMAWEN);
788                         host->dma_active = false;
789                 }
790                 if (ret < 0)
791                         mrq->data->bytes_xfered = 0;
792                 else
793                         mrq->data->bytes_xfered =
794                                 mrq->data->blocks * mrq->data->blksz;
795         }
796         cmd->error = ret;
797 }
798
799 static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host,
800                 struct mmc_request *mrq, struct mmc_command *cmd)
801 {
802         long time;
803
804         if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK)
805                 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
806         else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK)
807                 sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
808         else {
809                 dev_err(&host->pd->dev, "unsupported stop cmd\n");
810                 cmd->error = sh_mmcif_error_manage(host);
811                 return;
812         }
813
814         time = wait_for_completion_interruptible_timeout(&host->intr_wait,
815                         host->timeout);
816         if (time <= 0 || host->sd_error) {
817                 cmd->error = sh_mmcif_error_manage(host);
818                 return;
819         }
820         sh_mmcif_get_cmd12response(host, cmd);
821         cmd->error = 0;
822 }
823
824 static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)
825 {
826         struct sh_mmcif_host *host = mmc_priv(mmc);
827         unsigned long flags;
828
829         spin_lock_irqsave(&host->lock, flags);
830         if (host->state != STATE_IDLE) {
831                 spin_unlock_irqrestore(&host->lock, flags);
832                 mrq->cmd->error = -EAGAIN;
833                 mmc_request_done(mmc, mrq);
834                 return;
835         }
836
837         host->state = STATE_REQUEST;
838         spin_unlock_irqrestore(&host->lock, flags);
839
840         switch (mrq->cmd->opcode) {
841         /* MMCIF does not support SD/SDIO command */
842         case SD_IO_SEND_OP_COND:
843         case MMC_APP_CMD:
844                 host->state = STATE_IDLE;
845                 mrq->cmd->error = -ETIMEDOUT;
846                 mmc_request_done(mmc, mrq);
847                 return;
848         case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */
849                 if (!mrq->data) {
850                         /* send_if_cond cmd (not support) */
851                         host->state = STATE_IDLE;
852                         mrq->cmd->error = -ETIMEDOUT;
853                         mmc_request_done(mmc, mrq);
854                         return;
855                 }
856                 break;
857         default:
858                 break;
859         }
860         host->data = mrq->data;
861         if (mrq->data) {
862                 if (mrq->data->flags & MMC_DATA_READ) {
863                         if (host->chan_rx)
864                                 sh_mmcif_start_dma_rx(host);
865                 } else {
866                         if (host->chan_tx)
867                                 sh_mmcif_start_dma_tx(host);
868                 }
869         }
870         sh_mmcif_start_cmd(host, mrq, mrq->cmd);
871         host->data = NULL;
872
873         if (!mrq->cmd->error && mrq->stop)
874                 sh_mmcif_stop_cmd(host, mrq, mrq->stop);
875         host->state = STATE_IDLE;
876         mmc_request_done(mmc, mrq);
877 }
878
879 static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
880 {
881         struct sh_mmcif_host *host = mmc_priv(mmc);
882         struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
883         unsigned long flags;
884
885         spin_lock_irqsave(&host->lock, flags);
886         if (host->state != STATE_IDLE) {
887                 spin_unlock_irqrestore(&host->lock, flags);
888                 return;
889         }
890
891         host->state = STATE_IOS;
892         spin_unlock_irqrestore(&host->lock, flags);
893
894         if (ios->power_mode == MMC_POWER_UP) {
895                 if (!host->card_present) {
896                         /* See if we also get DMA */
897                         sh_mmcif_request_dma(host, host->pd->dev.platform_data);
898                         host->card_present = true;
899                 }
900         } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {
901                 /* clock stop */
902                 sh_mmcif_clock_control(host, 0);
903                 if (ios->power_mode == MMC_POWER_OFF) {
904                         if (host->card_present) {
905                                 sh_mmcif_release_dma(host);
906                                 host->card_present = false;
907                         }
908                 }
909                 if (host->power) {
910                         pm_runtime_put(&host->pd->dev);
911                         host->power = false;
912                         if (p->down_pwr && ios->power_mode == MMC_POWER_OFF)
913                                 p->down_pwr(host->pd);
914                 }
915                 host->state = STATE_IDLE;
916                 return;
917         }
918
919         if (ios->clock) {
920                 if (!host->power) {
921                         if (p->set_pwr)
922                                 p->set_pwr(host->pd, ios->power_mode);
923                         pm_runtime_get_sync(&host->pd->dev);
924                         host->power = true;
925                         sh_mmcif_sync_reset(host);
926                 }
927                 sh_mmcif_clock_control(host, ios->clock);
928         }
929
930         host->bus_width = ios->bus_width;
931         host->state = STATE_IDLE;
932 }
933
934 static int sh_mmcif_get_cd(struct mmc_host *mmc)
935 {
936         struct sh_mmcif_host *host = mmc_priv(mmc);
937         struct sh_mmcif_plat_data *p = host->pd->dev.platform_data;
938
939         if (!p->get_cd)
940                 return -ENOSYS;
941         else
942                 return p->get_cd(host->pd);
943 }
944
945 static struct mmc_host_ops sh_mmcif_ops = {
946         .request        = sh_mmcif_request,
947         .set_ios        = sh_mmcif_set_ios,
948         .get_cd         = sh_mmcif_get_cd,
949 };
950
951 static void sh_mmcif_detect(struct mmc_host *mmc)
952 {
953         mmc_detect_change(mmc, 0);
954 }
955
956 static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)
957 {
958         struct sh_mmcif_host *host = dev_id;
959         u32 state;
960         int err = 0;
961
962         state = sh_mmcif_readl(host->addr, MMCIF_CE_INT);
963
964         if (state & INT_RBSYE) {
965                 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
966                                 ~(INT_RBSYE | INT_CRSPE));
967                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE);
968         } else if (state & INT_CRSPE) {
969                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE);
970                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE);
971         } else if (state & INT_BUFREN) {
972                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN);
973                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN);
974         } else if (state & INT_BUFWEN) {
975                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN);
976                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN);
977         } else if (state & INT_CMD12DRE) {
978                 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
979                         ~(INT_CMD12DRE | INT_CMD12RBE |
980                           INT_CMD12CRE | INT_BUFRE));
981                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE);
982         } else if (state & INT_BUFRE) {
983                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE);
984                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE);
985         } else if (state & INT_DTRANE) {
986                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE);
987                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE);
988         } else if (state & INT_CMD12RBE) {
989                 sh_mmcif_writel(host->addr, MMCIF_CE_INT,
990                                 ~(INT_CMD12RBE | INT_CMD12CRE));
991                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE);
992         } else if (state & INT_ERR_STS) {
993                 /* err interrupts */
994                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
995                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
996                 err = 1;
997         } else {
998                 dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state);
999                 sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state);
1000                 sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state);
1001                 err = 1;
1002         }
1003         if (err) {
1004                 host->sd_error = true;
1005                 dev_dbg(&host->pd->dev, "int err state = %08x\n", state);
1006         }
1007         if (state & ~(INT_CMD12RBE | INT_CMD12CRE))
1008                 complete(&host->intr_wait);
1009         else
1010                 dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);
1011
1012         return IRQ_HANDLED;
1013 }
1014
1015 static int __devinit sh_mmcif_probe(struct platform_device *pdev)
1016 {
1017         int ret = 0, irq[2];
1018         struct mmc_host *mmc;
1019         struct sh_mmcif_host *host;
1020         struct sh_mmcif_plat_data *pd;
1021         struct resource *res;
1022         void __iomem *reg;
1023         char clk_name[8];
1024
1025         irq[0] = platform_get_irq(pdev, 0);
1026         irq[1] = platform_get_irq(pdev, 1);
1027         if (irq[0] < 0 || irq[1] < 0) {
1028                 dev_err(&pdev->dev, "Get irq error\n");
1029                 return -ENXIO;
1030         }
1031         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1032         if (!res) {
1033                 dev_err(&pdev->dev, "platform_get_resource error.\n");
1034                 return -ENXIO;
1035         }
1036         reg = ioremap(res->start, resource_size(res));
1037         if (!reg) {
1038                 dev_err(&pdev->dev, "ioremap error.\n");
1039                 return -ENOMEM;
1040         }
1041         pd = pdev->dev.platform_data;
1042         if (!pd) {
1043                 dev_err(&pdev->dev, "sh_mmcif plat data error.\n");
1044                 ret = -ENXIO;
1045                 goto clean_up;
1046         }
1047         mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);
1048         if (!mmc) {
1049                 ret = -ENOMEM;
1050                 goto clean_up;
1051         }
1052         host            = mmc_priv(mmc);
1053         host->mmc       = mmc;
1054         host->addr      = reg;
1055         host->timeout   = 1000;
1056
1057         snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id);
1058         host->hclk = clk_get(&pdev->dev, clk_name);
1059         if (IS_ERR(host->hclk)) {
1060                 dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name);
1061                 ret = PTR_ERR(host->hclk);
1062                 goto clean_up1;
1063         }
1064         clk_enable(host->hclk);
1065         host->clk = clk_get_rate(host->hclk);
1066         host->pd = pdev;
1067
1068         init_completion(&host->intr_wait);
1069         spin_lock_init(&host->lock);
1070
1071         mmc->ops = &sh_mmcif_ops;
1072         mmc->f_max = host->clk;
1073         /* close to 400KHz */
1074         if (mmc->f_max < 51200000)
1075                 mmc->f_min = mmc->f_max / 128;
1076         else if (mmc->f_max < 102400000)
1077                 mmc->f_min = mmc->f_max / 256;
1078         else
1079                 mmc->f_min = mmc->f_max / 512;
1080         if (pd->ocr)
1081                 mmc->ocr_avail = pd->ocr;
1082         mmc->caps = MMC_CAP_MMC_HIGHSPEED;
1083         if (pd->caps)
1084                 mmc->caps |= pd->caps;
1085         mmc->max_segs = 32;
1086         mmc->max_blk_size = 512;
1087         mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs;
1088         mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;
1089         mmc->max_seg_size = mmc->max_req_size;
1090
1091         sh_mmcif_sync_reset(host);
1092         platform_set_drvdata(pdev, host);
1093
1094         pm_runtime_enable(&pdev->dev);
1095         host->power = false;
1096
1097         ret = pm_runtime_resume(&pdev->dev);
1098         if (ret < 0)
1099                 goto clean_up2;
1100
1101         mmc_add_host(mmc);
1102
1103         sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1104
1105         ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host);
1106         if (ret) {
1107                 dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n");
1108                 goto clean_up3;
1109         }
1110         ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host);
1111         if (ret) {
1112                 free_irq(irq[0], host);
1113                 dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n");
1114                 goto clean_up3;
1115         }
1116
1117         sh_mmcif_detect(host->mmc);
1118
1119         dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION);
1120         dev_dbg(&pdev->dev, "chip ver H'%04x\n",
1121                 sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);
1122         return ret;
1123
1124 clean_up3:
1125         mmc_remove_host(mmc);
1126         pm_runtime_suspend(&pdev->dev);
1127 clean_up2:
1128         pm_runtime_disable(&pdev->dev);
1129         clk_disable(host->hclk);
1130 clean_up1:
1131         mmc_free_host(mmc);
1132 clean_up:
1133         if (reg)
1134                 iounmap(reg);
1135         return ret;
1136 }
1137
1138 static int __devexit sh_mmcif_remove(struct platform_device *pdev)
1139 {
1140         struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1141         int irq[2];
1142
1143         pm_runtime_get_sync(&pdev->dev);
1144
1145         mmc_remove_host(host->mmc);
1146         sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1147
1148         if (host->addr)
1149                 iounmap(host->addr);
1150
1151         irq[0] = platform_get_irq(pdev, 0);
1152         irq[1] = platform_get_irq(pdev, 1);
1153
1154         free_irq(irq[0], host);
1155         free_irq(irq[1], host);
1156
1157         platform_set_drvdata(pdev, NULL);
1158
1159         clk_disable(host->hclk);
1160         mmc_free_host(host->mmc);
1161         pm_runtime_put_sync(&pdev->dev);
1162         pm_runtime_disable(&pdev->dev);
1163
1164         return 0;
1165 }
1166
1167 #ifdef CONFIG_PM
1168 static int sh_mmcif_suspend(struct device *dev)
1169 {
1170         struct platform_device *pdev = to_platform_device(dev);
1171         struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1172         int ret = mmc_suspend_host(host->mmc);
1173
1174         if (!ret) {
1175                 sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL);
1176                 clk_disable(host->hclk);
1177         }
1178
1179         return ret;
1180 }
1181
1182 static int sh_mmcif_resume(struct device *dev)
1183 {
1184         struct platform_device *pdev = to_platform_device(dev);
1185         struct sh_mmcif_host *host = platform_get_drvdata(pdev);
1186
1187         clk_enable(host->hclk);
1188
1189         return mmc_resume_host(host->mmc);
1190 }
1191 #else
1192 #define sh_mmcif_suspend        NULL
1193 #define sh_mmcif_resume         NULL
1194 #endif  /* CONFIG_PM */
1195
1196 static const struct dev_pm_ops sh_mmcif_dev_pm_ops = {
1197         .suspend = sh_mmcif_suspend,
1198         .resume = sh_mmcif_resume,
1199 };
1200
1201 static struct platform_driver sh_mmcif_driver = {
1202         .probe          = sh_mmcif_probe,
1203         .remove         = sh_mmcif_remove,
1204         .driver         = {
1205                 .name   = DRIVER_NAME,
1206                 .pm     = &sh_mmcif_dev_pm_ops,
1207         },
1208 };
1209
1210 module_platform_driver(sh_mmcif_driver);
1211
1212 MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");
1213 MODULE_LICENSE("GPL");
1214 MODULE_ALIAS("platform:" DRIVER_NAME);
1215 MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>");