]> Pileus Git - ~andy/linux/blob - drivers/tty/serial/sirfsoc_uart.c
ASoC: fsl-esai: fix ESAI TDM slot setting
[~andy/linux] / drivers / tty / serial / sirfsoc_uart.c
1 /*
2  * Driver for CSR SiRFprimaII onboard UARTs.
3  *
4  * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
5  *
6  * Licensed under GPLv2 or later.
7  */
8
9 #include <linux/module.h>
10 #include <linux/ioport.h>
11 #include <linux/platform_device.h>
12 #include <linux/init.h>
13 #include <linux/sysrq.h>
14 #include <linux/console.h>
15 #include <linux/tty.h>
16 #include <linux/tty_flip.h>
17 #include <linux/serial_core.h>
18 #include <linux/serial.h>
19 #include <linux/clk.h>
20 #include <linux/of.h>
21 #include <linux/slab.h>
22 #include <linux/io.h>
23 #include <linux/of_gpio.h>
24 #include <linux/dmaengine.h>
25 #include <linux/dma-direction.h>
26 #include <linux/dma-mapping.h>
27 #include <linux/sirfsoc_dma.h>
28 #include <asm/irq.h>
29 #include <asm/mach/irq.h>
30
31 #include "sirfsoc_uart.h"
32
33 static unsigned int
34 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count);
35 static unsigned int
36 sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count);
37 static struct uart_driver sirfsoc_uart_drv;
38
39 static void sirfsoc_uart_tx_dma_complete_callback(void *param);
40 static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port);
41 static void sirfsoc_uart_rx_dma_complete_callback(void *param);
42 static const struct sirfsoc_baudrate_to_regv baudrate_to_regv[] = {
43         {4000000, 2359296},
44         {3500000, 1310721},
45         {3000000, 1572865},
46         {2500000, 1245186},
47         {2000000, 1572866},
48         {1500000, 1245188},
49         {1152000, 1638404},
50         {1000000, 1572869},
51         {921600, 1114120},
52         {576000, 1245196},
53         {500000, 1245198},
54         {460800, 1572876},
55         {230400, 1310750},
56         {115200, 1310781},
57         {57600, 1310843},
58         {38400, 1114328},
59         {19200, 1114545},
60         {9600, 1114979},
61 };
62
63 static struct sirfsoc_uart_port sirfsoc_uart_ports[SIRFSOC_UART_NR] = {
64         [0] = {
65                 .port = {
66                         .iotype         = UPIO_MEM,
67                         .flags          = UPF_BOOT_AUTOCONF,
68                         .line           = 0,
69                 },
70         },
71         [1] = {
72                 .port = {
73                         .iotype         = UPIO_MEM,
74                         .flags          = UPF_BOOT_AUTOCONF,
75                         .line           = 1,
76                 },
77         },
78         [2] = {
79                 .port = {
80                         .iotype         = UPIO_MEM,
81                         .flags          = UPF_BOOT_AUTOCONF,
82                         .line           = 2,
83                 },
84         },
85         [3] = {
86                 .port = {
87                         .iotype         = UPIO_MEM,
88                         .flags          = UPF_BOOT_AUTOCONF,
89                         .line           = 3,
90                 },
91         },
92         [4] = {
93                 .port = {
94                         .iotype         = UPIO_MEM,
95                         .flags          = UPF_BOOT_AUTOCONF,
96                         .line           = 4,
97                 },
98         },
99         [5] = {
100                 .port = {
101                         .iotype         = UPIO_MEM,
102                         .flags          = UPF_BOOT_AUTOCONF,
103                         .line           = 5,
104                 },
105         },
106 };
107
108 static inline struct sirfsoc_uart_port *to_sirfport(struct uart_port *port)
109 {
110         return container_of(port, struct sirfsoc_uart_port, port);
111 }
112
113 static inline unsigned int sirfsoc_uart_tx_empty(struct uart_port *port)
114 {
115         unsigned long reg;
116         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
117         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
118         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
119         reg = rd_regl(port, ureg->sirfsoc_tx_fifo_status);
120
121         return (reg & ufifo_st->ff_empty(port->line)) ? TIOCSER_TEMT : 0;
122 }
123
124 static unsigned int sirfsoc_uart_get_mctrl(struct uart_port *port)
125 {
126         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
127         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
128         if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
129                 goto cts_asserted;
130         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
131                 if (!(rd_regl(port, ureg->sirfsoc_afc_ctrl) &
132                                                 SIRFUART_AFC_CTS_STATUS))
133                         goto cts_asserted;
134                 else
135                         goto cts_deasserted;
136         } else {
137                 if (!gpio_get_value(sirfport->cts_gpio))
138                         goto cts_asserted;
139                 else
140                         goto cts_deasserted;
141         }
142 cts_deasserted:
143         return TIOCM_CAR | TIOCM_DSR;
144 cts_asserted:
145         return TIOCM_CAR | TIOCM_DSR | TIOCM_CTS;
146 }
147
148 static void sirfsoc_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
149 {
150         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
151         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
152         unsigned int assert = mctrl & TIOCM_RTS;
153         unsigned int val = assert ? SIRFUART_AFC_CTRL_RX_THD : 0x0;
154         unsigned int current_val;
155
156         if (!sirfport->hw_flow_ctrl || !sirfport->ms_enabled)
157                 return;
158         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
159                 current_val = rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0xFF;
160                 val |= current_val;
161                 wr_regl(port, ureg->sirfsoc_afc_ctrl, val);
162         } else {
163                 if (!val)
164                         gpio_set_value(sirfport->rts_gpio, 1);
165                 else
166                         gpio_set_value(sirfport->rts_gpio, 0);
167         }
168 }
169
170 static void sirfsoc_uart_stop_tx(struct uart_port *port)
171 {
172         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
173         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
174         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
175
176         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
177                 if (sirfport->tx_dma_state == TX_DMA_RUNNING) {
178                         dmaengine_pause(sirfport->tx_dma_chan);
179                         sirfport->tx_dma_state = TX_DMA_PAUSE;
180                 } else {
181                         if (!sirfport->is_marco)
182                                 wr_regl(port, ureg->sirfsoc_int_en_reg,
183                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
184                                 ~uint_en->sirfsoc_txfifo_empty_en);
185                         else
186                                 wr_regl(port, SIRFUART_INT_EN_CLR,
187                                 uint_en->sirfsoc_txfifo_empty_en);
188                 }
189         } else {
190                 if (!sirfport->is_marco)
191                         wr_regl(port, ureg->sirfsoc_int_en_reg,
192                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
193                                 ~uint_en->sirfsoc_txfifo_empty_en);
194                 else
195                         wr_regl(port, SIRFUART_INT_EN_CLR,
196                                 uint_en->sirfsoc_txfifo_empty_en);
197         }
198 }
199
200 static void sirfsoc_uart_tx_with_dma(struct sirfsoc_uart_port *sirfport)
201 {
202         struct uart_port *port = &sirfport->port;
203         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
204         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
205         struct circ_buf *xmit = &port->state->xmit;
206         unsigned long tran_size;
207         unsigned long tran_start;
208         unsigned long pio_tx_size;
209
210         tran_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE);
211         tran_start = (unsigned long)(xmit->buf + xmit->tail);
212         if (uart_circ_empty(xmit) || uart_tx_stopped(port) ||
213                         !tran_size)
214                 return;
215         if (sirfport->tx_dma_state == TX_DMA_PAUSE) {
216                 dmaengine_resume(sirfport->tx_dma_chan);
217                 return;
218         }
219         if (sirfport->tx_dma_state == TX_DMA_RUNNING)
220                 return;
221         if (!sirfport->is_marco)
222                 wr_regl(port, ureg->sirfsoc_int_en_reg,
223                                 rd_regl(port, ureg->sirfsoc_int_en_reg)&
224                                 ~(uint_en->sirfsoc_txfifo_empty_en));
225         else
226                 wr_regl(port, SIRFUART_INT_EN_CLR,
227                                 uint_en->sirfsoc_txfifo_empty_en);
228         /*
229          * DMA requires buffer address and buffer length are both aligned with
230          * 4 bytes, so we use PIO for
231          * 1. if address is not aligned with 4bytes, use PIO for the first 1~3
232          * bytes, and move to DMA for the left part aligned with 4bytes
233          * 2. if buffer length is not aligned with 4bytes, use DMA for aligned
234          * part first, move to PIO for the left 1~3 bytes
235          */
236         if (tran_size < 4 || BYTES_TO_ALIGN(tran_start)) {
237                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
238                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
239                         rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)|
240                         SIRFUART_IO_MODE);
241                 if (BYTES_TO_ALIGN(tran_start)) {
242                         pio_tx_size = sirfsoc_uart_pio_tx_chars(sirfport,
243                                 BYTES_TO_ALIGN(tran_start));
244                         tran_size -= pio_tx_size;
245                 }
246                 if (tran_size < 4)
247                         sirfsoc_uart_pio_tx_chars(sirfport, tran_size);
248                 if (!sirfport->is_marco)
249                         wr_regl(port, ureg->sirfsoc_int_en_reg,
250                                 rd_regl(port, ureg->sirfsoc_int_en_reg)|
251                                 uint_en->sirfsoc_txfifo_empty_en);
252                 else
253                         wr_regl(port, ureg->sirfsoc_int_en_reg,
254                                 uint_en->sirfsoc_txfifo_empty_en);
255                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
256         } else {
257                 /* tx transfer mode switch into dma mode */
258                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_STOP);
259                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
260                         rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl)&
261                         ~SIRFUART_IO_MODE);
262                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
263                 tran_size &= ~(0x3);
264
265                 sirfport->tx_dma_addr = dma_map_single(port->dev,
266                         xmit->buf + xmit->tail,
267                         tran_size, DMA_TO_DEVICE);
268                 sirfport->tx_dma_desc = dmaengine_prep_slave_single(
269                         sirfport->tx_dma_chan, sirfport->tx_dma_addr,
270                         tran_size, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT);
271                 if (!sirfport->tx_dma_desc) {
272                         dev_err(port->dev, "DMA prep slave single fail\n");
273                         return;
274                 }
275                 sirfport->tx_dma_desc->callback =
276                         sirfsoc_uart_tx_dma_complete_callback;
277                 sirfport->tx_dma_desc->callback_param = (void *)sirfport;
278                 sirfport->transfer_size = tran_size;
279
280                 dmaengine_submit(sirfport->tx_dma_desc);
281                 dma_async_issue_pending(sirfport->tx_dma_chan);
282                 sirfport->tx_dma_state = TX_DMA_RUNNING;
283         }
284 }
285
286 static void sirfsoc_uart_start_tx(struct uart_port *port)
287 {
288         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
289         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
290         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
291         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
292                 sirfsoc_uart_tx_with_dma(sirfport);
293         else {
294                 sirfsoc_uart_pio_tx_chars(sirfport, 1);
295                 wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_START);
296                 if (!sirfport->is_marco)
297                         wr_regl(port, ureg->sirfsoc_int_en_reg,
298                                         rd_regl(port, ureg->sirfsoc_int_en_reg)|
299                                         uint_en->sirfsoc_txfifo_empty_en);
300                 else
301                         wr_regl(port, ureg->sirfsoc_int_en_reg,
302                                         uint_en->sirfsoc_txfifo_empty_en);
303         }
304 }
305
306 static void sirfsoc_uart_stop_rx(struct uart_port *port)
307 {
308         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
309         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
310         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
311
312         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
313         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
314                 if (!sirfport->is_marco)
315                         wr_regl(port, ureg->sirfsoc_int_en_reg,
316                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
317                                 ~(SIRFUART_RX_DMA_INT_EN(port, uint_en) |
318                                 uint_en->sirfsoc_rx_done_en));
319                 else
320                         wr_regl(port, SIRFUART_INT_EN_CLR,
321                                         SIRFUART_RX_DMA_INT_EN(port, uint_en)|
322                                         uint_en->sirfsoc_rx_done_en);
323                 dmaengine_terminate_all(sirfport->rx_dma_chan);
324         } else {
325                 if (!sirfport->is_marco)
326                         wr_regl(port, ureg->sirfsoc_int_en_reg,
327                                 rd_regl(port, ureg->sirfsoc_int_en_reg)&
328                                 ~(SIRFUART_RX_IO_INT_EN(port, uint_en)));
329                 else
330                         wr_regl(port, SIRFUART_INT_EN_CLR,
331                                         SIRFUART_RX_IO_INT_EN(port, uint_en));
332         }
333 }
334
335 static void sirfsoc_uart_disable_ms(struct uart_port *port)
336 {
337         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
338         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
339         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
340
341         if (!sirfport->hw_flow_ctrl)
342                 return;
343         sirfport->ms_enabled = false;
344         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
345                 wr_regl(port, ureg->sirfsoc_afc_ctrl,
346                                 rd_regl(port, ureg->sirfsoc_afc_ctrl) & ~0x3FF);
347                 if (!sirfport->is_marco)
348                         wr_regl(port, ureg->sirfsoc_int_en_reg,
349                                         rd_regl(port, ureg->sirfsoc_int_en_reg)&
350                                         ~uint_en->sirfsoc_cts_en);
351                 else
352                         wr_regl(port, SIRFUART_INT_EN_CLR,
353                                         uint_en->sirfsoc_cts_en);
354         } else
355                 disable_irq(gpio_to_irq(sirfport->cts_gpio));
356 }
357
358 static irqreturn_t sirfsoc_uart_usp_cts_handler(int irq, void *dev_id)
359 {
360         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
361         struct uart_port *port = &sirfport->port;
362         if (gpio_is_valid(sirfport->cts_gpio) && sirfport->ms_enabled)
363                 uart_handle_cts_change(port,
364                                 !gpio_get_value(sirfport->cts_gpio));
365         return IRQ_HANDLED;
366 }
367
368 static void sirfsoc_uart_enable_ms(struct uart_port *port)
369 {
370         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
371         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
372         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
373
374         if (!sirfport->hw_flow_ctrl)
375                 return;
376         sirfport->ms_enabled = true;
377         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
378                 wr_regl(port, ureg->sirfsoc_afc_ctrl,
379                                 rd_regl(port, ureg->sirfsoc_afc_ctrl) |
380                                 SIRFUART_AFC_TX_EN | SIRFUART_AFC_RX_EN);
381                 if (!sirfport->is_marco)
382                         wr_regl(port, ureg->sirfsoc_int_en_reg,
383                                         rd_regl(port, ureg->sirfsoc_int_en_reg)
384                                         | uint_en->sirfsoc_cts_en);
385                 else
386                         wr_regl(port, ureg->sirfsoc_int_en_reg,
387                                         uint_en->sirfsoc_cts_en);
388         } else
389                 enable_irq(gpio_to_irq(sirfport->cts_gpio));
390 }
391
392 static void sirfsoc_uart_break_ctl(struct uart_port *port, int break_state)
393 {
394         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
395         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
396         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
397                 unsigned long ulcon = rd_regl(port, ureg->sirfsoc_line_ctrl);
398                 if (break_state)
399                         ulcon |= SIRFUART_SET_BREAK;
400                 else
401                         ulcon &= ~SIRFUART_SET_BREAK;
402                 wr_regl(port, ureg->sirfsoc_line_ctrl, ulcon);
403         }
404 }
405
406 static unsigned int
407 sirfsoc_uart_pio_rx_chars(struct uart_port *port, unsigned int max_rx_count)
408 {
409         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
410         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
411         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
412         unsigned int ch, rx_count = 0;
413         struct tty_struct *tty;
414         tty = tty_port_tty_get(&port->state->port);
415         if (!tty)
416                 return -ENODEV;
417         while (!(rd_regl(port, ureg->sirfsoc_rx_fifo_status) &
418                                         ufifo_st->ff_empty(port->line))) {
419                 ch = rd_regl(port, ureg->sirfsoc_rx_fifo_data) |
420                         SIRFUART_DUMMY_READ;
421                 if (unlikely(uart_handle_sysrq_char(port, ch)))
422                         continue;
423                 uart_insert_char(port, 0, 0, ch, TTY_NORMAL);
424                 rx_count++;
425                 if (rx_count >= max_rx_count)
426                         break;
427         }
428
429         sirfport->rx_io_count += rx_count;
430         port->icount.rx += rx_count;
431
432         spin_unlock(&port->lock);
433         tty_flip_buffer_push(&port->state->port);
434         spin_lock(&port->lock);
435
436         return rx_count;
437 }
438
439 static unsigned int
440 sirfsoc_uart_pio_tx_chars(struct sirfsoc_uart_port *sirfport, int count)
441 {
442         struct uart_port *port = &sirfport->port;
443         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
444         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
445         struct circ_buf *xmit = &port->state->xmit;
446         unsigned int num_tx = 0;
447         while (!uart_circ_empty(xmit) &&
448                 !(rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
449                                         ufifo_st->ff_full(port->line)) &&
450                 count--) {
451                 wr_regl(port, ureg->sirfsoc_tx_fifo_data,
452                                 xmit->buf[xmit->tail]);
453                 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
454                 port->icount.tx++;
455                 num_tx++;
456         }
457         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
458                 uart_write_wakeup(port);
459         return num_tx;
460 }
461
462 static void sirfsoc_uart_tx_dma_complete_callback(void *param)
463 {
464         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
465         struct uart_port *port = &sirfport->port;
466         struct circ_buf *xmit = &port->state->xmit;
467         unsigned long flags;
468
469         xmit->tail = (xmit->tail + sirfport->transfer_size) &
470                                 (UART_XMIT_SIZE - 1);
471         port->icount.tx += sirfport->transfer_size;
472         if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
473                 uart_write_wakeup(port);
474         if (sirfport->tx_dma_addr)
475                 dma_unmap_single(port->dev, sirfport->tx_dma_addr,
476                                 sirfport->transfer_size, DMA_TO_DEVICE);
477         spin_lock_irqsave(&sirfport->tx_lock, flags);
478         sirfport->tx_dma_state = TX_DMA_IDLE;
479         sirfsoc_uart_tx_with_dma(sirfport);
480         spin_unlock_irqrestore(&sirfport->tx_lock, flags);
481 }
482
483 static void sirfsoc_uart_insert_rx_buf_to_tty(
484                 struct sirfsoc_uart_port *sirfport, int count)
485 {
486         struct uart_port *port = &sirfport->port;
487         struct tty_port *tport = &port->state->port;
488         int inserted;
489
490         inserted = tty_insert_flip_string(tport,
491                 sirfport->rx_dma_items[sirfport->rx_completed].xmit.buf, count);
492         port->icount.rx += inserted;
493         tty_flip_buffer_push(tport);
494 }
495
496 static void sirfsoc_rx_submit_one_dma_desc(struct uart_port *port, int index)
497 {
498         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
499
500         sirfport->rx_dma_items[index].xmit.tail =
501                 sirfport->rx_dma_items[index].xmit.head = 0;
502         sirfport->rx_dma_items[index].desc =
503                 dmaengine_prep_slave_single(sirfport->rx_dma_chan,
504                 sirfport->rx_dma_items[index].dma_addr, SIRFSOC_RX_DMA_BUF_SIZE,
505                 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
506         if (!sirfport->rx_dma_items[index].desc) {
507                 dev_err(port->dev, "DMA slave single fail\n");
508                 return;
509         }
510         sirfport->rx_dma_items[index].desc->callback =
511                 sirfsoc_uart_rx_dma_complete_callback;
512         sirfport->rx_dma_items[index].desc->callback_param = sirfport;
513         sirfport->rx_dma_items[index].cookie =
514                 dmaengine_submit(sirfport->rx_dma_items[index].desc);
515         dma_async_issue_pending(sirfport->rx_dma_chan);
516 }
517
518 static void sirfsoc_rx_tmo_process_tl(unsigned long param)
519 {
520         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
521         struct uart_port *port = &sirfport->port;
522         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
523         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
524         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
525         unsigned int count;
526         unsigned long flags;
527         struct dma_tx_state tx_state;
528
529         spin_lock_irqsave(&sirfport->rx_lock, flags);
530         while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
531                 sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
532                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
533                                         SIRFSOC_RX_DMA_BUF_SIZE);
534                 sirfport->rx_completed++;
535                 sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
536         }
537         count = CIRC_CNT(sirfport->rx_dma_items[sirfport->rx_issued].xmit.head,
538                 sirfport->rx_dma_items[sirfport->rx_issued].xmit.tail,
539                 SIRFSOC_RX_DMA_BUF_SIZE);
540         if (count > 0)
541                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport, count);
542         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
543                         rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
544                         SIRFUART_IO_MODE);
545         sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
546         spin_unlock_irqrestore(&sirfport->rx_lock, flags);
547         if (sirfport->rx_io_count == 4) {
548                 spin_lock_irqsave(&sirfport->rx_lock, flags);
549                 sirfport->rx_io_count = 0;
550                 wr_regl(port, ureg->sirfsoc_int_st_reg,
551                                 uint_st->sirfsoc_rx_done);
552                 if (!sirfport->is_marco)
553                         wr_regl(port, ureg->sirfsoc_int_en_reg,
554                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
555                                 ~(uint_en->sirfsoc_rx_done_en));
556                 else
557                         wr_regl(port, SIRFUART_INT_EN_CLR,
558                                         uint_en->sirfsoc_rx_done_en);
559                 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
560
561                 sirfsoc_uart_start_next_rx_dma(port);
562         } else {
563                 spin_lock_irqsave(&sirfport->rx_lock, flags);
564                 wr_regl(port, ureg->sirfsoc_int_st_reg,
565                                 uint_st->sirfsoc_rx_done);
566                 if (!sirfport->is_marco)
567                         wr_regl(port, ureg->sirfsoc_int_en_reg,
568                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
569                                 (uint_en->sirfsoc_rx_done_en));
570                 else
571                         wr_regl(port, ureg->sirfsoc_int_en_reg,
572                                         uint_en->sirfsoc_rx_done_en);
573                 spin_unlock_irqrestore(&sirfport->rx_lock, flags);
574         }
575 }
576
577 static void sirfsoc_uart_handle_rx_tmo(struct sirfsoc_uart_port *sirfport)
578 {
579         struct uart_port *port = &sirfport->port;
580         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
581         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
582         struct dma_tx_state tx_state;
583         spin_lock(&sirfport->rx_lock);
584
585         dmaengine_tx_status(sirfport->rx_dma_chan,
586                 sirfport->rx_dma_items[sirfport->rx_issued].cookie, &tx_state);
587         dmaengine_terminate_all(sirfport->rx_dma_chan);
588         sirfport->rx_dma_items[sirfport->rx_issued].xmit.head =
589                 SIRFSOC_RX_DMA_BUF_SIZE - tx_state.residue;
590         if (!sirfport->is_marco)
591                 wr_regl(port, ureg->sirfsoc_int_en_reg,
592                         rd_regl(port, ureg->sirfsoc_int_en_reg) &
593                         ~(uint_en->sirfsoc_rx_timeout_en));
594         else
595                 wr_regl(port, SIRFUART_INT_EN_CLR,
596                                 uint_en->sirfsoc_rx_timeout_en);
597         spin_unlock(&sirfport->rx_lock);
598         tasklet_schedule(&sirfport->rx_tmo_process_tasklet);
599 }
600
601 static void sirfsoc_uart_handle_rx_done(struct sirfsoc_uart_port *sirfport)
602 {
603         struct uart_port *port = &sirfport->port;
604         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
605         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
606         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
607
608         sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
609         if (sirfport->rx_io_count == 4) {
610                 sirfport->rx_io_count = 0;
611                 if (!sirfport->is_marco)
612                         wr_regl(port, ureg->sirfsoc_int_en_reg,
613                                 rd_regl(port, ureg->sirfsoc_int_en_reg) &
614                                 ~(uint_en->sirfsoc_rx_done_en));
615                 else
616                         wr_regl(port, SIRFUART_INT_EN_CLR,
617                                         uint_en->sirfsoc_rx_done_en);
618                 wr_regl(port, ureg->sirfsoc_int_st_reg,
619                                 uint_st->sirfsoc_rx_timeout);
620                 sirfsoc_uart_start_next_rx_dma(port);
621         }
622 }
623
624 static irqreturn_t sirfsoc_uart_isr(int irq, void *dev_id)
625 {
626         unsigned long intr_status;
627         unsigned long cts_status;
628         unsigned long flag = TTY_NORMAL;
629         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)dev_id;
630         struct uart_port *port = &sirfport->port;
631         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
632         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
633         struct sirfsoc_int_status *uint_st = &sirfport->uart_reg->uart_int_st;
634         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
635         struct uart_state *state = port->state;
636         struct circ_buf *xmit = &port->state->xmit;
637         spin_lock(&port->lock);
638         intr_status = rd_regl(port, ureg->sirfsoc_int_st_reg);
639         wr_regl(port, ureg->sirfsoc_int_st_reg, intr_status);
640         intr_status &= rd_regl(port, ureg->sirfsoc_int_en_reg);
641         if (unlikely(intr_status & (SIRFUART_ERR_INT_STAT(port, uint_st)))) {
642                 if (intr_status & uint_st->sirfsoc_rxd_brk) {
643                         port->icount.brk++;
644                         if (uart_handle_break(port))
645                                 goto recv_char;
646                 }
647                 if (intr_status & uint_st->sirfsoc_rx_oflow)
648                         port->icount.overrun++;
649                 if (intr_status & uint_st->sirfsoc_frm_err) {
650                         port->icount.frame++;
651                         flag = TTY_FRAME;
652                 }
653                 if (intr_status & uint_st->sirfsoc_parity_err)
654                         flag = TTY_PARITY;
655                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
656                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
657                 wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
658                 intr_status &= port->read_status_mask;
659                 uart_insert_char(port, intr_status,
660                                         uint_en->sirfsoc_rx_oflow_en, 0, flag);
661                 tty_flip_buffer_push(&state->port);
662         }
663 recv_char:
664         if ((sirfport->uart_reg->uart_type == SIRF_REAL_UART) &&
665                         (intr_status & SIRFUART_CTS_INT_ST(uint_st)) &&
666                         !sirfport->tx_dma_state) {
667                 cts_status = rd_regl(port, ureg->sirfsoc_afc_ctrl) &
668                                         SIRFUART_AFC_CTS_STATUS;
669                 if (cts_status != 0)
670                         cts_status = 0;
671                 else
672                         cts_status = 1;
673                 uart_handle_cts_change(port, cts_status);
674                 wake_up_interruptible(&state->port.delta_msr_wait);
675         }
676         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
677                 if (intr_status & uint_st->sirfsoc_rx_timeout)
678                         sirfsoc_uart_handle_rx_tmo(sirfport);
679                 if (intr_status & uint_st->sirfsoc_rx_done)
680                         sirfsoc_uart_handle_rx_done(sirfport);
681         } else {
682                 if (intr_status & SIRFUART_RX_IO_INT_ST(uint_st))
683                         sirfsoc_uart_pio_rx_chars(port,
684                                         SIRFSOC_UART_IO_RX_MAX_CNT);
685         }
686         if (intr_status & uint_st->sirfsoc_txfifo_empty) {
687                 if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
688                         sirfsoc_uart_tx_with_dma(sirfport);
689                 else {
690                         if (uart_circ_empty(xmit) || uart_tx_stopped(port)) {
691                                 spin_unlock(&port->lock);
692                                 return IRQ_HANDLED;
693                         } else {
694                                 sirfsoc_uart_pio_tx_chars(sirfport,
695                                         SIRFSOC_UART_IO_TX_REASONABLE_CNT);
696                                 if ((uart_circ_empty(xmit)) &&
697                                 (rd_regl(port, ureg->sirfsoc_tx_fifo_status) &
698                                 ufifo_st->ff_empty(port->line)))
699                                         sirfsoc_uart_stop_tx(port);
700                         }
701                 }
702         }
703         spin_unlock(&port->lock);
704         return IRQ_HANDLED;
705 }
706
707 static void sirfsoc_uart_rx_dma_complete_tl(unsigned long param)
708 {
709         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
710         struct uart_port *port = &sirfport->port;
711         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
712         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
713         unsigned long flags;
714         struct dma_tx_state tx_state;
715         spin_lock_irqsave(&sirfport->rx_lock, flags);
716         while (DMA_COMPLETE == dmaengine_tx_status(sirfport->rx_dma_chan,
717                         sirfport->rx_dma_items[sirfport->rx_completed].cookie, &tx_state)) {
718                 sirfsoc_uart_insert_rx_buf_to_tty(sirfport,
719                                         SIRFSOC_RX_DMA_BUF_SIZE);
720                 if (rd_regl(port, ureg->sirfsoc_int_en_reg) &
721                                 uint_en->sirfsoc_rx_timeout_en)
722                         sirfsoc_rx_submit_one_dma_desc(port,
723                                         sirfport->rx_completed++);
724                 else
725                         sirfport->rx_completed++;
726                 sirfport->rx_completed %= SIRFSOC_RX_LOOP_BUF_CNT;
727         }
728         spin_unlock_irqrestore(&sirfport->rx_lock, flags);
729 }
730
731 static void sirfsoc_uart_rx_dma_complete_callback(void *param)
732 {
733         struct sirfsoc_uart_port *sirfport = (struct sirfsoc_uart_port *)param;
734         spin_lock(&sirfport->rx_lock);
735         sirfport->rx_issued++;
736         sirfport->rx_issued %= SIRFSOC_RX_LOOP_BUF_CNT;
737         spin_unlock(&sirfport->rx_lock);
738         tasklet_schedule(&sirfport->rx_dma_complete_tasklet);
739 }
740
741 /* submit rx dma task into dmaengine */
742 static void sirfsoc_uart_start_next_rx_dma(struct uart_port *port)
743 {
744         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
745         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
746         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
747         unsigned long flags;
748         int i;
749         spin_lock_irqsave(&sirfport->rx_lock, flags);
750         sirfport->rx_io_count = 0;
751         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
752                 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) &
753                 ~SIRFUART_IO_MODE);
754         spin_unlock_irqrestore(&sirfport->rx_lock, flags);
755         for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
756                 sirfsoc_rx_submit_one_dma_desc(port, i);
757         sirfport->rx_completed = sirfport->rx_issued = 0;
758         spin_lock_irqsave(&sirfport->rx_lock, flags);
759         if (!sirfport->is_marco)
760                 wr_regl(port, ureg->sirfsoc_int_en_reg,
761                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
762                                 SIRFUART_RX_DMA_INT_EN(port, uint_en));
763         else
764                 wr_regl(port, ureg->sirfsoc_int_en_reg,
765                         SIRFUART_RX_DMA_INT_EN(port, uint_en));
766         spin_unlock_irqrestore(&sirfport->rx_lock, flags);
767 }
768
769 static void sirfsoc_uart_start_rx(struct uart_port *port)
770 {
771         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
772         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
773         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
774
775         sirfport->rx_io_count = 0;
776         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
777         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
778         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_START);
779         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
780                 sirfsoc_uart_start_next_rx_dma(port);
781         else {
782                 if (!sirfport->is_marco)
783                         wr_regl(port, ureg->sirfsoc_int_en_reg,
784                                 rd_regl(port, ureg->sirfsoc_int_en_reg) |
785                                 SIRFUART_RX_IO_INT_EN(port, uint_en));
786                 else
787                         wr_regl(port, ureg->sirfsoc_int_en_reg,
788                                 SIRFUART_RX_IO_INT_EN(port, uint_en));
789         }
790 }
791
792 static unsigned int
793 sirfsoc_usp_calc_sample_div(unsigned long set_rate,
794                 unsigned long ioclk_rate, unsigned long *sample_reg)
795 {
796         unsigned long min_delta = ~0UL;
797         unsigned short sample_div;
798         unsigned long ioclk_div = 0;
799         unsigned long temp_delta;
800
801         for (sample_div = SIRF_MIN_SAMPLE_DIV;
802                         sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
803                 temp_delta = ioclk_rate -
804                 (ioclk_rate + (set_rate * sample_div) / 2)
805                 / (set_rate * sample_div) * set_rate * sample_div;
806
807                 temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
808                 if (temp_delta < min_delta) {
809                         ioclk_div = (2 * ioclk_rate /
810                                 (set_rate * sample_div) + 1) / 2 - 1;
811                         if (ioclk_div > SIRF_IOCLK_DIV_MAX)
812                                 continue;
813                         min_delta = temp_delta;
814                         *sample_reg = sample_div;
815                         if (!temp_delta)
816                                 break;
817                 }
818         }
819         return ioclk_div;
820 }
821
822 static unsigned int
823 sirfsoc_uart_calc_sample_div(unsigned long baud_rate,
824                         unsigned long ioclk_rate, unsigned long *set_baud)
825 {
826         unsigned long min_delta = ~0UL;
827         unsigned short sample_div;
828         unsigned int regv = 0;
829         unsigned long ioclk_div;
830         unsigned long baud_tmp;
831         int temp_delta;
832
833         for (sample_div = SIRF_MIN_SAMPLE_DIV;
834                         sample_div <= SIRF_MAX_SAMPLE_DIV; sample_div++) {
835                 ioclk_div = (ioclk_rate / (baud_rate * (sample_div + 1))) - 1;
836                 if (ioclk_div > SIRF_IOCLK_DIV_MAX)
837                         continue;
838                 baud_tmp = ioclk_rate / ((ioclk_div + 1) * (sample_div + 1));
839                 temp_delta = baud_tmp - baud_rate;
840                 temp_delta = (temp_delta > 0) ? temp_delta : -temp_delta;
841                 if (temp_delta < min_delta) {
842                         regv = regv & (~SIRF_IOCLK_DIV_MASK);
843                         regv = regv | ioclk_div;
844                         regv = regv & (~SIRF_SAMPLE_DIV_MASK);
845                         regv = regv | (sample_div << SIRF_SAMPLE_DIV_SHIFT);
846                         min_delta = temp_delta;
847                         *set_baud = baud_tmp;
848                 }
849         }
850         return regv;
851 }
852
853 static void sirfsoc_uart_set_termios(struct uart_port *port,
854                                        struct ktermios *termios,
855                                        struct ktermios *old)
856 {
857         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
858         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
859         struct sirfsoc_int_en *uint_en = &sirfport->uart_reg->uart_int_en;
860         unsigned long   config_reg = 0;
861         unsigned long   baud_rate;
862         unsigned long   set_baud;
863         unsigned long   flags;
864         unsigned long   ic;
865         unsigned int    clk_div_reg = 0;
866         unsigned long   txfifo_op_reg, ioclk_rate;
867         unsigned long   rx_time_out;
868         int             threshold_div;
869         u32             data_bit_len, stop_bit_len, len_val;
870         unsigned long   sample_div_reg = 0xf;
871         ioclk_rate      = port->uartclk;
872
873         switch (termios->c_cflag & CSIZE) {
874         default:
875         case CS8:
876                 data_bit_len = 8;
877                 config_reg |= SIRFUART_DATA_BIT_LEN_8;
878                 break;
879         case CS7:
880                 data_bit_len = 7;
881                 config_reg |= SIRFUART_DATA_BIT_LEN_7;
882                 break;
883         case CS6:
884                 data_bit_len = 6;
885                 config_reg |= SIRFUART_DATA_BIT_LEN_6;
886                 break;
887         case CS5:
888                 data_bit_len = 5;
889                 config_reg |= SIRFUART_DATA_BIT_LEN_5;
890                 break;
891         }
892         if (termios->c_cflag & CSTOPB) {
893                 config_reg |= SIRFUART_STOP_BIT_LEN_2;
894                 stop_bit_len = 2;
895         } else
896                 stop_bit_len = 1;
897
898         spin_lock_irqsave(&port->lock, flags);
899         port->read_status_mask = uint_en->sirfsoc_rx_oflow_en;
900         port->ignore_status_mask = 0;
901         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
902                 if (termios->c_iflag & INPCK)
903                         port->read_status_mask |= uint_en->sirfsoc_frm_err_en |
904                                 uint_en->sirfsoc_parity_err_en;
905         } else {
906                 if (termios->c_iflag & INPCK)
907                         port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
908         }
909         if (termios->c_iflag & (BRKINT | PARMRK))
910                         port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
911         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
912                 if (termios->c_iflag & IGNPAR)
913                         port->ignore_status_mask |=
914                                 uint_en->sirfsoc_frm_err_en |
915                                 uint_en->sirfsoc_parity_err_en;
916                 if (termios->c_cflag & PARENB) {
917                         if (termios->c_cflag & CMSPAR) {
918                                 if (termios->c_cflag & PARODD)
919                                         config_reg |= SIRFUART_STICK_BIT_MARK;
920                                 else
921                                         config_reg |= SIRFUART_STICK_BIT_SPACE;
922                         } else if (termios->c_cflag & PARODD) {
923                                 config_reg |= SIRFUART_STICK_BIT_ODD;
924                         } else {
925                                 config_reg |= SIRFUART_STICK_BIT_EVEN;
926                         }
927                 }
928         } else {
929                 if (termios->c_iflag & IGNPAR)
930                         port->ignore_status_mask |=
931                                 uint_en->sirfsoc_frm_err_en;
932                 if (termios->c_cflag & PARENB)
933                         dev_warn(port->dev,
934                                         "USP-UART not support parity err\n");
935         }
936         if (termios->c_iflag & IGNBRK) {
937                 port->ignore_status_mask |=
938                         uint_en->sirfsoc_rxd_brk_en;
939                 if (termios->c_iflag & IGNPAR)
940                         port->ignore_status_mask |=
941                                 uint_en->sirfsoc_rx_oflow_en;
942         }
943         if ((termios->c_cflag & CREAD) == 0)
944                 port->ignore_status_mask |= SIRFUART_DUMMY_READ;
945         /* Hardware Flow Control Settings */
946         if (UART_ENABLE_MS(port, termios->c_cflag)) {
947                 if (!sirfport->ms_enabled)
948                         sirfsoc_uart_enable_ms(port);
949         } else {
950                 if (sirfport->ms_enabled)
951                         sirfsoc_uart_disable_ms(port);
952         }
953         baud_rate = uart_get_baud_rate(port, termios, old, 0, 4000000);
954         if (ioclk_rate == 150000000) {
955                 for (ic = 0; ic < SIRF_BAUD_RATE_SUPPORT_NR; ic++)
956                         if (baud_rate == baudrate_to_regv[ic].baud_rate)
957                                 clk_div_reg = baudrate_to_regv[ic].reg_val;
958         }
959         set_baud = baud_rate;
960         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
961                 if (unlikely(clk_div_reg == 0))
962                         clk_div_reg = sirfsoc_uart_calc_sample_div(baud_rate,
963                                         ioclk_rate, &set_baud);
964                 wr_regl(port, ureg->sirfsoc_divisor, clk_div_reg);
965         } else {
966                 clk_div_reg = sirfsoc_usp_calc_sample_div(baud_rate,
967                                 ioclk_rate, &sample_div_reg);
968                 sample_div_reg--;
969                 set_baud = ((ioclk_rate / (clk_div_reg+1) - 1) /
970                                 (sample_div_reg + 1));
971                 /* setting usp mode 2 */
972                 len_val = ((1 << SIRFSOC_USP_MODE2_RXD_DELAY_OFFSET) |
973                                 (1 << SIRFSOC_USP_MODE2_TXD_DELAY_OFFSET));
974                 len_val |= ((clk_div_reg & SIRFSOC_USP_MODE2_CLK_DIVISOR_MASK)
975                                 << SIRFSOC_USP_MODE2_CLK_DIVISOR_OFFSET);
976                 wr_regl(port, ureg->sirfsoc_mode2, len_val);
977         }
978         if (tty_termios_baud_rate(termios))
979                 tty_termios_encode_baud_rate(termios, set_baud, set_baud);
980         /* set receive timeout && data bits len */
981         rx_time_out = SIRFSOC_UART_RX_TIMEOUT(set_baud, 20000);
982         rx_time_out = SIRFUART_RECV_TIMEOUT_VALUE(rx_time_out);
983         txfifo_op_reg = rd_regl(port, ureg->sirfsoc_tx_fifo_op);
984         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_STOP);
985         wr_regl(port, ureg->sirfsoc_tx_fifo_op,
986                         (txfifo_op_reg & ~SIRFUART_FIFO_START));
987         if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
988                 config_reg |= SIRFUART_RECV_TIMEOUT(port, rx_time_out);
989                 wr_regl(port, ureg->sirfsoc_line_ctrl, config_reg);
990         } else {
991                 /*tx frame ctrl*/
992                 len_val = (data_bit_len - 1) << SIRFSOC_USP_TX_DATA_LEN_OFFSET;
993                 len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
994                                 SIRFSOC_USP_TX_FRAME_LEN_OFFSET;
995                 len_val |= ((data_bit_len - 1) <<
996                                 SIRFSOC_USP_TX_SHIFTER_LEN_OFFSET);
997                 len_val |= (((clk_div_reg & 0xc00) >> 10) <<
998                                 SIRFSOC_USP_TX_CLK_DIVISOR_OFFSET);
999                 wr_regl(port, ureg->sirfsoc_tx_frame_ctrl, len_val);
1000                 /*rx frame ctrl*/
1001                 len_val = (data_bit_len - 1) << SIRFSOC_USP_RX_DATA_LEN_OFFSET;
1002                 len_val |= (data_bit_len + 1 + stop_bit_len - 1) <<
1003                                 SIRFSOC_USP_RX_FRAME_LEN_OFFSET;
1004                 len_val |= (data_bit_len - 1) <<
1005                                 SIRFSOC_USP_RX_SHIFTER_LEN_OFFSET;
1006                 len_val |= (((clk_div_reg & 0xf000) >> 12) <<
1007                                 SIRFSOC_USP_RX_CLK_DIVISOR_OFFSET);
1008                 wr_regl(port, ureg->sirfsoc_rx_frame_ctrl, len_val);
1009                 /*async param*/
1010                 wr_regl(port, ureg->sirfsoc_async_param_reg,
1011                         (SIRFUART_RECV_TIMEOUT(port, rx_time_out)) |
1012                         (sample_div_reg & SIRFSOC_USP_ASYNC_DIV2_MASK) <<
1013                         SIRFSOC_USP_ASYNC_DIV2_OFFSET);
1014         }
1015         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no))
1016                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_DMA_MODE);
1017         else
1018                 wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl, SIRFUART_IO_MODE);
1019         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
1020                 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_DMA_MODE);
1021         else
1022                 wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl, SIRFUART_IO_MODE);
1023         /* Reset Rx/Tx FIFO Threshold level for proper baudrate */
1024         if (set_baud < 1000000)
1025                 threshold_div = 1;
1026         else
1027                 threshold_div = 2;
1028         wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl,
1029                                 SIRFUART_FIFO_THD(port) / threshold_div);
1030         wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl,
1031                                 SIRFUART_FIFO_THD(port) / threshold_div);
1032         txfifo_op_reg |= SIRFUART_FIFO_START;
1033         wr_regl(port, ureg->sirfsoc_tx_fifo_op, txfifo_op_reg);
1034         uart_update_timeout(port, termios->c_cflag, set_baud);
1035         sirfsoc_uart_start_rx(port);
1036         wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_TX_EN | SIRFUART_RX_EN);
1037         spin_unlock_irqrestore(&port->lock, flags);
1038 }
1039
1040 static void sirfsoc_uart_pm(struct uart_port *port, unsigned int state,
1041                               unsigned int oldstate)
1042 {
1043         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1044         if (!state)
1045                 clk_prepare_enable(sirfport->clk);
1046         else
1047                 clk_disable_unprepare(sirfport->clk);
1048 }
1049
1050 static unsigned int sirfsoc_uart_init_tx_dma(struct uart_port *port)
1051 {
1052         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1053         dma_cap_mask_t dma_mask;
1054         struct dma_slave_config tx_slv_cfg = {
1055                 .dst_maxburst = 2,
1056         };
1057
1058         dma_cap_zero(dma_mask);
1059         dma_cap_set(DMA_SLAVE, dma_mask);
1060         sirfport->tx_dma_chan = dma_request_channel(dma_mask,
1061                 (dma_filter_fn)sirfsoc_dma_filter_id,
1062                 (void *)sirfport->tx_dma_no);
1063         if (!sirfport->tx_dma_chan) {
1064                 dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
1065                                         sirfport->tx_dma_no);
1066                 return  -EPROBE_DEFER;
1067         }
1068         dmaengine_slave_config(sirfport->tx_dma_chan, &tx_slv_cfg);
1069
1070         return 0;
1071 }
1072
1073 static unsigned int sirfsoc_uart_init_rx_dma(struct uart_port *port)
1074 {
1075         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1076         dma_cap_mask_t dma_mask;
1077         int ret;
1078         int i, j;
1079         struct dma_slave_config slv_cfg = {
1080                 .src_maxburst = 2,
1081         };
1082
1083         dma_cap_zero(dma_mask);
1084         dma_cap_set(DMA_SLAVE, dma_mask);
1085         sirfport->rx_dma_chan = dma_request_channel(dma_mask,
1086                                         (dma_filter_fn)sirfsoc_dma_filter_id,
1087                                         (void *)sirfport->rx_dma_no);
1088         if (!sirfport->rx_dma_chan) {
1089                 dev_err(port->dev, "Uart Request Dma Channel Fail %d\n",
1090                                 sirfport->rx_dma_no);
1091                 ret = -EPROBE_DEFER;
1092                 goto request_err;
1093         }
1094         for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++) {
1095                 sirfport->rx_dma_items[i].xmit.buf =
1096                         dma_alloc_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1097                         &sirfport->rx_dma_items[i].dma_addr, GFP_KERNEL);
1098                 if (!sirfport->rx_dma_items[i].xmit.buf) {
1099                         dev_err(port->dev, "Uart alloc bufa failed\n");
1100                         ret = -ENOMEM;
1101                         goto alloc_coherent_err;
1102                 }
1103                 sirfport->rx_dma_items[i].xmit.head =
1104                         sirfport->rx_dma_items[i].xmit.tail = 0;
1105         }
1106         dmaengine_slave_config(sirfport->rx_dma_chan, &slv_cfg);
1107
1108         return 0;
1109 alloc_coherent_err:
1110         for (j = 0; j < i; j++)
1111                 dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1112                                 sirfport->rx_dma_items[j].xmit.buf,
1113                                 sirfport->rx_dma_items[j].dma_addr);
1114         dma_release_channel(sirfport->rx_dma_chan);
1115 request_err:
1116         return ret;
1117 }
1118
1119 static void sirfsoc_uart_uninit_tx_dma(struct sirfsoc_uart_port *sirfport)
1120 {
1121         dmaengine_terminate_all(sirfport->tx_dma_chan);
1122         dma_release_channel(sirfport->tx_dma_chan);
1123 }
1124
1125 static void sirfsoc_uart_uninit_rx_dma(struct sirfsoc_uart_port *sirfport)
1126 {
1127         int i;
1128         struct uart_port *port = &sirfport->port;
1129         dmaengine_terminate_all(sirfport->rx_dma_chan);
1130         dma_release_channel(sirfport->rx_dma_chan);
1131         for (i = 0; i < SIRFSOC_RX_LOOP_BUF_CNT; i++)
1132                 dma_free_coherent(port->dev, SIRFSOC_RX_DMA_BUF_SIZE,
1133                                 sirfport->rx_dma_items[i].xmit.buf,
1134                                 sirfport->rx_dma_items[i].dma_addr);
1135 }
1136
1137 static int sirfsoc_uart_startup(struct uart_port *port)
1138 {
1139         struct sirfsoc_uart_port *sirfport      = to_sirfport(port);
1140         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1141         unsigned int index                      = port->line;
1142         int ret;
1143         set_irq_flags(port->irq, IRQF_VALID | IRQF_NOAUTOEN);
1144         ret = request_irq(port->irq,
1145                                 sirfsoc_uart_isr,
1146                                 0,
1147                                 SIRFUART_PORT_NAME,
1148                                 sirfport);
1149         if (ret != 0) {
1150                 dev_err(port->dev, "UART%d request IRQ line (%d) failed.\n",
1151                                                         index, port->irq);
1152                 goto irq_err;
1153         }
1154
1155         /* initial hardware settings */
1156         wr_regl(port, ureg->sirfsoc_tx_dma_io_ctrl,
1157                 rd_regl(port, ureg->sirfsoc_tx_dma_io_ctrl) |
1158                 SIRFUART_IO_MODE);
1159         wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
1160                 rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
1161                 SIRFUART_IO_MODE);
1162         wr_regl(port, ureg->sirfsoc_tx_dma_io_len, 0);
1163         wr_regl(port, ureg->sirfsoc_rx_dma_io_len, 0);
1164         wr_regl(port, ureg->sirfsoc_tx_rx_en, SIRFUART_RX_EN | SIRFUART_TX_EN);
1165         if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
1166                 wr_regl(port, ureg->sirfsoc_mode1,
1167                         SIRFSOC_USP_ENDIAN_CTRL_LSBF |
1168                         SIRFSOC_USP_EN);
1169         wr_regl(port, ureg->sirfsoc_tx_fifo_op, SIRFUART_FIFO_RESET);
1170         wr_regl(port, ureg->sirfsoc_tx_fifo_op, 0);
1171         wr_regl(port, ureg->sirfsoc_rx_fifo_op, SIRFUART_FIFO_RESET);
1172         wr_regl(port, ureg->sirfsoc_rx_fifo_op, 0);
1173         wr_regl(port, ureg->sirfsoc_tx_fifo_ctrl, SIRFUART_FIFO_THD(port));
1174         wr_regl(port, ureg->sirfsoc_rx_fifo_ctrl, SIRFUART_FIFO_THD(port));
1175
1176         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no)) {
1177                 ret = sirfsoc_uart_init_rx_dma(port);
1178                 if (ret)
1179                         goto init_rx_err;
1180                 wr_regl(port, ureg->sirfsoc_rx_fifo_level_chk,
1181                                 SIRFUART_RX_FIFO_CHK_SC(port->line, 0x4) |
1182                                 SIRFUART_RX_FIFO_CHK_LC(port->line, 0xe) |
1183                                 SIRFUART_RX_FIFO_CHK_HC(port->line, 0x1b));
1184         }
1185         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
1186                 sirfsoc_uart_init_tx_dma(port);
1187                 sirfport->tx_dma_state = TX_DMA_IDLE;
1188                 wr_regl(port, ureg->sirfsoc_tx_fifo_level_chk,
1189                                 SIRFUART_TX_FIFO_CHK_SC(port->line, 0x1b) |
1190                                 SIRFUART_TX_FIFO_CHK_LC(port->line, 0xe) |
1191                                 SIRFUART_TX_FIFO_CHK_HC(port->line, 0x4));
1192         }
1193         sirfport->ms_enabled = false;
1194         if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
1195                 sirfport->hw_flow_ctrl) {
1196                 set_irq_flags(gpio_to_irq(sirfport->cts_gpio),
1197                         IRQF_VALID | IRQF_NOAUTOEN);
1198                 ret = request_irq(gpio_to_irq(sirfport->cts_gpio),
1199                         sirfsoc_uart_usp_cts_handler, IRQF_TRIGGER_FALLING |
1200                         IRQF_TRIGGER_RISING, "usp_cts_irq", sirfport);
1201                 if (ret != 0) {
1202                         dev_err(port->dev, "UART-USP:request gpio irq fail\n");
1203                         goto init_rx_err;
1204                 }
1205         }
1206
1207         enable_irq(port->irq);
1208
1209         return 0;
1210 init_rx_err:
1211         free_irq(port->irq, sirfport);
1212 irq_err:
1213         return ret;
1214 }
1215
1216 static void sirfsoc_uart_shutdown(struct uart_port *port)
1217 {
1218         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1219         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1220         if (!sirfport->is_marco)
1221                 wr_regl(port, ureg->sirfsoc_int_en_reg, 0);
1222         else
1223                 wr_regl(port, SIRFUART_INT_EN_CLR, ~0UL);
1224
1225         free_irq(port->irq, sirfport);
1226         if (sirfport->ms_enabled)
1227                 sirfsoc_uart_disable_ms(port);
1228         if (sirfport->uart_reg->uart_type == SIRF_USP_UART &&
1229                         sirfport->hw_flow_ctrl) {
1230                 gpio_set_value(sirfport->rts_gpio, 1);
1231                 free_irq(gpio_to_irq(sirfport->cts_gpio), sirfport);
1232         }
1233         if (IS_DMA_CHAN_VALID(sirfport->rx_dma_no))
1234                 sirfsoc_uart_uninit_rx_dma(sirfport);
1235         if (IS_DMA_CHAN_VALID(sirfport->tx_dma_no)) {
1236                 sirfsoc_uart_uninit_tx_dma(sirfport);
1237                 sirfport->tx_dma_state = TX_DMA_IDLE;
1238         }
1239 }
1240
1241 static const char *sirfsoc_uart_type(struct uart_port *port)
1242 {
1243         return port->type == SIRFSOC_PORT_TYPE ? SIRFUART_PORT_NAME : NULL;
1244 }
1245
1246 static int sirfsoc_uart_request_port(struct uart_port *port)
1247 {
1248         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1249         struct sirfsoc_uart_param *uart_param = &sirfport->uart_reg->uart_param;
1250         void *ret;
1251         ret = request_mem_region(port->mapbase,
1252                 SIRFUART_MAP_SIZE, uart_param->port_name);
1253         return ret ? 0 : -EBUSY;
1254 }
1255
1256 static void sirfsoc_uart_release_port(struct uart_port *port)
1257 {
1258         release_mem_region(port->mapbase, SIRFUART_MAP_SIZE);
1259 }
1260
1261 static void sirfsoc_uart_config_port(struct uart_port *port, int flags)
1262 {
1263         if (flags & UART_CONFIG_TYPE) {
1264                 port->type = SIRFSOC_PORT_TYPE;
1265                 sirfsoc_uart_request_port(port);
1266         }
1267 }
1268
1269 static struct uart_ops sirfsoc_uart_ops = {
1270         .tx_empty       = sirfsoc_uart_tx_empty,
1271         .get_mctrl      = sirfsoc_uart_get_mctrl,
1272         .set_mctrl      = sirfsoc_uart_set_mctrl,
1273         .stop_tx        = sirfsoc_uart_stop_tx,
1274         .start_tx       = sirfsoc_uart_start_tx,
1275         .stop_rx        = sirfsoc_uart_stop_rx,
1276         .enable_ms      = sirfsoc_uart_enable_ms,
1277         .break_ctl      = sirfsoc_uart_break_ctl,
1278         .startup        = sirfsoc_uart_startup,
1279         .shutdown       = sirfsoc_uart_shutdown,
1280         .set_termios    = sirfsoc_uart_set_termios,
1281         .pm             = sirfsoc_uart_pm,
1282         .type           = sirfsoc_uart_type,
1283         .release_port   = sirfsoc_uart_release_port,
1284         .request_port   = sirfsoc_uart_request_port,
1285         .config_port    = sirfsoc_uart_config_port,
1286 };
1287
1288 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1289 static int __init
1290 sirfsoc_uart_console_setup(struct console *co, char *options)
1291 {
1292         unsigned int baud = 115200;
1293         unsigned int bits = 8;
1294         unsigned int parity = 'n';
1295         unsigned int flow = 'n';
1296         struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
1297         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1298         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1299         if (co->index < 0 || co->index >= SIRFSOC_UART_NR)
1300                 return -EINVAL;
1301
1302         if (!port->mapbase)
1303                 return -ENODEV;
1304
1305         /* enable usp in mode1 register */
1306         if (sirfport->uart_reg->uart_type == SIRF_USP_UART)
1307                 wr_regl(port, ureg->sirfsoc_mode1, SIRFSOC_USP_EN |
1308                                 SIRFSOC_USP_ENDIAN_CTRL_LSBF);
1309         if (options)
1310                 uart_parse_options(options, &baud, &parity, &bits, &flow);
1311         port->cons = co;
1312
1313         /* default console tx/rx transfer using io mode */
1314         sirfport->rx_dma_no = UNVALID_DMA_CHAN;
1315         sirfport->tx_dma_no = UNVALID_DMA_CHAN;
1316         return uart_set_options(port, co, baud, parity, bits, flow);
1317 }
1318
1319 static void sirfsoc_uart_console_putchar(struct uart_port *port, int ch)
1320 {
1321         struct sirfsoc_uart_port *sirfport = to_sirfport(port);
1322         struct sirfsoc_register *ureg = &sirfport->uart_reg->uart_reg;
1323         struct sirfsoc_fifo_status *ufifo_st = &sirfport->uart_reg->fifo_status;
1324         while (rd_regl(port,
1325                 ureg->sirfsoc_tx_fifo_status) & ufifo_st->ff_full(port->line))
1326                 cpu_relax();
1327         wr_regb(port, ureg->sirfsoc_tx_fifo_data, ch);
1328 }
1329
1330 static void sirfsoc_uart_console_write(struct console *co, const char *s,
1331                                                         unsigned int count)
1332 {
1333         struct uart_port *port = &sirfsoc_uart_ports[co->index].port;
1334         uart_console_write(port, s, count, sirfsoc_uart_console_putchar);
1335 }
1336
1337 static struct console sirfsoc_uart_console = {
1338         .name           = SIRFSOC_UART_NAME,
1339         .device         = uart_console_device,
1340         .flags          = CON_PRINTBUFFER,
1341         .index          = -1,
1342         .write          = sirfsoc_uart_console_write,
1343         .setup          = sirfsoc_uart_console_setup,
1344         .data           = &sirfsoc_uart_drv,
1345 };
1346
1347 static int __init sirfsoc_uart_console_init(void)
1348 {
1349         register_console(&sirfsoc_uart_console);
1350         return 0;
1351 }
1352 console_initcall(sirfsoc_uart_console_init);
1353 #endif
1354
1355 static struct uart_driver sirfsoc_uart_drv = {
1356         .owner          = THIS_MODULE,
1357         .driver_name    = SIRFUART_PORT_NAME,
1358         .nr             = SIRFSOC_UART_NR,
1359         .dev_name       = SIRFSOC_UART_NAME,
1360         .major          = SIRFSOC_UART_MAJOR,
1361         .minor          = SIRFSOC_UART_MINOR,
1362 #ifdef CONFIG_SERIAL_SIRFSOC_CONSOLE
1363         .cons                   = &sirfsoc_uart_console,
1364 #else
1365         .cons                   = NULL,
1366 #endif
1367 };
1368
1369 static struct of_device_id sirfsoc_uart_ids[] = {
1370         { .compatible = "sirf,prima2-uart", .data = &sirfsoc_uart,},
1371         { .compatible = "sirf,marco-uart", .data = &sirfsoc_uart},
1372         { .compatible = "sirf,prima2-usp-uart", .data = &sirfsoc_usp},
1373         {}
1374 };
1375 MODULE_DEVICE_TABLE(of, sirfsoc_uart_ids);
1376
1377 static int sirfsoc_uart_probe(struct platform_device *pdev)
1378 {
1379         struct sirfsoc_uart_port *sirfport;
1380         struct uart_port *port;
1381         struct resource *res;
1382         int ret;
1383         const struct of_device_id *match;
1384
1385         match = of_match_node(sirfsoc_uart_ids, pdev->dev.of_node);
1386         if (of_property_read_u32(pdev->dev.of_node, "cell-index", &pdev->id)) {
1387                 dev_err(&pdev->dev,
1388                         "Unable to find cell-index in uart node.\n");
1389                 ret = -EFAULT;
1390                 goto err;
1391         }
1392         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart"))
1393                 pdev->id += ((struct sirfsoc_uart_register *)
1394                                 match->data)->uart_param.register_uart_nr;
1395         sirfport = &sirfsoc_uart_ports[pdev->id];
1396         port = &sirfport->port;
1397         port->dev = &pdev->dev;
1398         port->private_data = sirfport;
1399         sirfport->uart_reg = (struct sirfsoc_uart_register *)match->data;
1400
1401         sirfport->hw_flow_ctrl = of_property_read_bool(pdev->dev.of_node,
1402                 "sirf,uart-has-rtscts");
1403         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-uart")) {
1404                 sirfport->uart_reg->uart_type = SIRF_REAL_UART;
1405                 if (of_property_read_u32(pdev->dev.of_node,
1406                                 "sirf,uart-dma-rx-channel",
1407                                 &sirfport->rx_dma_no))
1408                         sirfport->rx_dma_no = UNVALID_DMA_CHAN;
1409                 if (of_property_read_u32(pdev->dev.of_node,
1410                                 "sirf,uart-dma-tx-channel",
1411                                 &sirfport->tx_dma_no))
1412                         sirfport->tx_dma_no = UNVALID_DMA_CHAN;
1413         }
1414         if (of_device_is_compatible(pdev->dev.of_node, "sirf,prima2-usp-uart")) {
1415                 sirfport->uart_reg->uart_type = SIRF_USP_UART;
1416                 if (of_property_read_u32(pdev->dev.of_node,
1417                                 "sirf,usp-dma-rx-channel",
1418                                 &sirfport->rx_dma_no))
1419                         sirfport->rx_dma_no = UNVALID_DMA_CHAN;
1420                 if (of_property_read_u32(pdev->dev.of_node,
1421                                 "sirf,usp-dma-tx-channel",
1422                                 &sirfport->tx_dma_no))
1423                         sirfport->tx_dma_no = UNVALID_DMA_CHAN;
1424                 if (!sirfport->hw_flow_ctrl)
1425                         goto usp_no_flow_control;
1426                 if (of_find_property(pdev->dev.of_node, "cts-gpios", NULL))
1427                         sirfport->cts_gpio = of_get_named_gpio(
1428                                         pdev->dev.of_node, "cts-gpios", 0);
1429                 else
1430                         sirfport->cts_gpio = -1;
1431                 if (of_find_property(pdev->dev.of_node, "rts-gpios", NULL))
1432                         sirfport->rts_gpio = of_get_named_gpio(
1433                                         pdev->dev.of_node, "rts-gpios", 0);
1434                 else
1435                         sirfport->rts_gpio = -1;
1436
1437                 if ((!gpio_is_valid(sirfport->cts_gpio) ||
1438                          !gpio_is_valid(sirfport->rts_gpio))) {
1439                         ret = -EINVAL;
1440                         dev_err(&pdev->dev,
1441                                 "Usp flow control must have cts and rts gpio");
1442                         goto err;
1443                 }
1444                 ret = devm_gpio_request(&pdev->dev, sirfport->cts_gpio,
1445                                 "usp-cts-gpio");
1446                 if (ret) {
1447                         dev_err(&pdev->dev, "Unable request cts gpio");
1448                         goto err;
1449                 }
1450                 gpio_direction_input(sirfport->cts_gpio);
1451                 ret = devm_gpio_request(&pdev->dev, sirfport->rts_gpio,
1452                                 "usp-rts-gpio");
1453                 if (ret) {
1454                         dev_err(&pdev->dev, "Unable request rts gpio");
1455                         goto err;
1456                 }
1457                 gpio_direction_output(sirfport->rts_gpio, 1);
1458         }
1459 usp_no_flow_control:
1460         if (of_device_is_compatible(pdev->dev.of_node, "sirf,marco-uart"))
1461                 sirfport->is_marco = true;
1462
1463         if (of_property_read_u32(pdev->dev.of_node,
1464                         "fifosize",
1465                         &port->fifosize)) {
1466                 dev_err(&pdev->dev,
1467                         "Unable to find fifosize in uart node.\n");
1468                 ret = -EFAULT;
1469                 goto err;
1470         }
1471
1472         res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1473         if (res == NULL) {
1474                 dev_err(&pdev->dev, "Insufficient resources.\n");
1475                 ret = -EFAULT;
1476                 goto err;
1477         }
1478         spin_lock_init(&sirfport->rx_lock);
1479         spin_lock_init(&sirfport->tx_lock);
1480         tasklet_init(&sirfport->rx_dma_complete_tasklet,
1481                         sirfsoc_uart_rx_dma_complete_tl, (unsigned long)sirfport);
1482         tasklet_init(&sirfport->rx_tmo_process_tasklet,
1483                         sirfsoc_rx_tmo_process_tl, (unsigned long)sirfport);
1484         port->mapbase = res->start;
1485         port->membase = devm_ioremap(&pdev->dev, res->start, resource_size(res));
1486         if (!port->membase) {
1487                 dev_err(&pdev->dev, "Cannot remap resource.\n");
1488                 ret = -ENOMEM;
1489                 goto err;
1490         }
1491         res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1492         if (res == NULL) {
1493                 dev_err(&pdev->dev, "Insufficient resources.\n");
1494                 ret = -EFAULT;
1495                 goto err;
1496         }
1497         port->irq = res->start;
1498
1499         sirfport->clk = clk_get(&pdev->dev, NULL);
1500         if (IS_ERR(sirfport->clk)) {
1501                 ret = PTR_ERR(sirfport->clk);
1502                 goto err;
1503         }
1504         port->uartclk = clk_get_rate(sirfport->clk);
1505
1506         port->ops = &sirfsoc_uart_ops;
1507         spin_lock_init(&port->lock);
1508
1509         platform_set_drvdata(pdev, sirfport);
1510         ret = uart_add_one_port(&sirfsoc_uart_drv, port);
1511         if (ret != 0) {
1512                 dev_err(&pdev->dev, "Cannot add UART port(%d).\n", pdev->id);
1513                 goto port_err;
1514         }
1515
1516         return 0;
1517
1518 port_err:
1519         clk_put(sirfport->clk);
1520 err:
1521         return ret;
1522 }
1523
1524 static int sirfsoc_uart_remove(struct platform_device *pdev)
1525 {
1526         struct sirfsoc_uart_port *sirfport = platform_get_drvdata(pdev);
1527         struct uart_port *port = &sirfport->port;
1528         clk_put(sirfport->clk);
1529         uart_remove_one_port(&sirfsoc_uart_drv, port);
1530         return 0;
1531 }
1532
1533 #ifdef CONFIG_PM_SLEEP
1534 static int
1535 sirfsoc_uart_suspend(struct device *pdev)
1536 {
1537         struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
1538         struct uart_port *port = &sirfport->port;
1539         uart_suspend_port(&sirfsoc_uart_drv, port);
1540         return 0;
1541 }
1542
1543 static int sirfsoc_uart_resume(struct device *pdev)
1544 {
1545         struct sirfsoc_uart_port *sirfport = dev_get_drvdata(pdev);
1546         struct uart_port *port = &sirfport->port;
1547         uart_resume_port(&sirfsoc_uart_drv, port);
1548         return 0;
1549 }
1550 #endif
1551
1552 static const struct dev_pm_ops sirfsoc_uart_pm_ops = {
1553         SET_SYSTEM_SLEEP_PM_OPS(sirfsoc_uart_suspend, sirfsoc_uart_resume)
1554 };
1555
1556 static struct platform_driver sirfsoc_uart_driver = {
1557         .probe          = sirfsoc_uart_probe,
1558         .remove         = sirfsoc_uart_remove,
1559         .driver         = {
1560                 .name   = SIRFUART_PORT_NAME,
1561                 .owner  = THIS_MODULE,
1562                 .of_match_table = sirfsoc_uart_ids,
1563                 .pm     = &sirfsoc_uart_pm_ops,
1564         },
1565 };
1566
1567 static int __init sirfsoc_uart_init(void)
1568 {
1569         int ret = 0;
1570
1571         ret = uart_register_driver(&sirfsoc_uart_drv);
1572         if (ret)
1573                 goto out;
1574
1575         ret = platform_driver_register(&sirfsoc_uart_driver);
1576         if (ret)
1577                 uart_unregister_driver(&sirfsoc_uart_drv);
1578 out:
1579         return ret;
1580 }
1581 module_init(sirfsoc_uart_init);
1582
1583 static void __exit sirfsoc_uart_exit(void)
1584 {
1585         platform_driver_unregister(&sirfsoc_uart_driver);
1586         uart_unregister_driver(&sirfsoc_uart_drv);
1587 }
1588 module_exit(sirfsoc_uart_exit);
1589
1590 MODULE_LICENSE("GPL v2");
1591 MODULE_AUTHOR("Bin Shi <Bin.Shi@csr.com>, Rong Wang<Rong.Wang@csr.com>");
1592 MODULE_DESCRIPTION("CSR SiRFprimaII Uart Driver");