]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/ti/cpsw.c
f1af5e08cabb7aef02c0e71817638bbad60608a3
[~andy/linux] / drivers / net / ethernet / ti / cpsw.c
1 /*
2  * Texas Instruments Ethernet Switch Driver
3  *
4  * Copyright (C) 2012 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/io.h>
18 #include <linux/clk.h>
19 #include <linux/timer.h>
20 #include <linux/module.h>
21 #include <linux/platform_device.h>
22 #include <linux/irqreturn.h>
23 #include <linux/interrupt.h>
24 #include <linux/if_ether.h>
25 #include <linux/etherdevice.h>
26 #include <linux/netdevice.h>
27 #include <linux/phy.h>
28 #include <linux/workqueue.h>
29 #include <linux/delay.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/of.h>
32 #include <linux/of_net.h>
33 #include <linux/of_device.h>
34
35 #include <linux/platform_data/cpsw.h>
36
37 #include "cpsw_ale.h"
38 #include "davinci_cpdma.h"
39
40 #define CPSW_DEBUG      (NETIF_MSG_HW           | NETIF_MSG_WOL         | \
41                          NETIF_MSG_DRV          | NETIF_MSG_LINK        | \
42                          NETIF_MSG_IFUP         | NETIF_MSG_INTR        | \
43                          NETIF_MSG_PROBE        | NETIF_MSG_TIMER       | \
44                          NETIF_MSG_IFDOWN       | NETIF_MSG_RX_ERR      | \
45                          NETIF_MSG_TX_ERR       | NETIF_MSG_TX_DONE     | \
46                          NETIF_MSG_PKTDATA      | NETIF_MSG_TX_QUEUED   | \
47                          NETIF_MSG_RX_STATUS)
48
49 #define cpsw_info(priv, type, format, ...)              \
50 do {                                                            \
51         if (netif_msg_##type(priv) && net_ratelimit())          \
52                 dev_info(priv->dev, format, ## __VA_ARGS__);    \
53 } while (0)
54
55 #define cpsw_err(priv, type, format, ...)               \
56 do {                                                            \
57         if (netif_msg_##type(priv) && net_ratelimit())          \
58                 dev_err(priv->dev, format, ## __VA_ARGS__);     \
59 } while (0)
60
61 #define cpsw_dbg(priv, type, format, ...)               \
62 do {                                                            \
63         if (netif_msg_##type(priv) && net_ratelimit())          \
64                 dev_dbg(priv->dev, format, ## __VA_ARGS__);     \
65 } while (0)
66
67 #define cpsw_notice(priv, type, format, ...)            \
68 do {                                                            \
69         if (netif_msg_##type(priv) && net_ratelimit())          \
70                 dev_notice(priv->dev, format, ## __VA_ARGS__);  \
71 } while (0)
72
73 #define ALE_ALL_PORTS           0x7
74
75 #define CPSW_MAJOR_VERSION(reg)         (reg >> 8 & 0x7)
76 #define CPSW_MINOR_VERSION(reg)         (reg & 0xff)
77 #define CPSW_RTL_VERSION(reg)           ((reg >> 11) & 0x1f)
78
79 #define CPSW_VERSION_1          0x19010a
80 #define CPSW_VERSION_2          0x19010c
81 #define CPDMA_RXTHRESH          0x0c0
82 #define CPDMA_RXFREE            0x0e0
83 #define CPDMA_TXHDP             0x00
84 #define CPDMA_RXHDP             0x20
85 #define CPDMA_TXCP              0x40
86 #define CPDMA_RXCP              0x60
87
88 #define cpsw_dma_regs(base, offset)             \
89         (void __iomem *)((base) + (offset))
90 #define cpsw_dma_rxthresh(base, offset)         \
91         (void __iomem *)((base) + (offset) + CPDMA_RXTHRESH)
92 #define cpsw_dma_rxfree(base, offset)           \
93         (void __iomem *)((base) + (offset) + CPDMA_RXFREE)
94 #define cpsw_dma_txhdp(base, offset)            \
95         (void __iomem *)((base) + (offset) + CPDMA_TXHDP)
96 #define cpsw_dma_rxhdp(base, offset)            \
97         (void __iomem *)((base) + (offset) + CPDMA_RXHDP)
98 #define cpsw_dma_txcp(base, offset)             \
99         (void __iomem *)((base) + (offset) + CPDMA_TXCP)
100 #define cpsw_dma_rxcp(base, offset)             \
101         (void __iomem *)((base) + (offset) + CPDMA_RXCP)
102
103 #define CPSW_POLL_WEIGHT        64
104 #define CPSW_MIN_PACKET_SIZE    60
105 #define CPSW_MAX_PACKET_SIZE    (1500 + 14 + 4 + 4)
106
107 #define RX_PRIORITY_MAPPING     0x76543210
108 #define TX_PRIORITY_MAPPING     0x33221100
109 #define CPDMA_TX_PRIORITY_MAP   0x76543210
110
111 #define cpsw_enable_irq(priv)   \
112         do {                    \
113                 u32 i;          \
114                 for (i = 0; i < priv->num_irqs; i++) \
115                         enable_irq(priv->irqs_table[i]); \
116         } while (0);
117 #define cpsw_disable_irq(priv)  \
118         do {                    \
119                 u32 i;          \
120                 for (i = 0; i < priv->num_irqs; i++) \
121                         disable_irq_nosync(priv->irqs_table[i]); \
122         } while (0);
123
124 static int debug_level;
125 module_param(debug_level, int, 0);
126 MODULE_PARM_DESC(debug_level, "cpsw debug level (NETIF_MSG bits)");
127
128 static int ale_ageout = 10;
129 module_param(ale_ageout, int, 0);
130 MODULE_PARM_DESC(ale_ageout, "cpsw ale ageout interval (seconds)");
131
132 static int rx_packet_max = CPSW_MAX_PACKET_SIZE;
133 module_param(rx_packet_max, int, 0);
134 MODULE_PARM_DESC(rx_packet_max, "maximum receive packet size (bytes)");
135
136 struct cpsw_wr_regs {
137         u32     id_ver;
138         u32     soft_reset;
139         u32     control;
140         u32     int_control;
141         u32     rx_thresh_en;
142         u32     rx_en;
143         u32     tx_en;
144         u32     misc_en;
145 };
146
147 struct cpsw_ss_regs {
148         u32     id_ver;
149         u32     control;
150         u32     soft_reset;
151         u32     stat_port_en;
152         u32     ptype;
153         u32     soft_idle;
154         u32     thru_rate;
155         u32     gap_thresh;
156         u32     tx_start_wds;
157         u32     flow_control;
158         u32     vlan_ltype;
159         u32     ts_ltype;
160         u32     dlr_ltype;
161 };
162
163 /* CPSW_PORT_V1 */
164 #define CPSW1_MAX_BLKS      0x00 /* Maximum FIFO Blocks */
165 #define CPSW1_BLK_CNT       0x04 /* FIFO Block Usage Count (Read Only) */
166 #define CPSW1_TX_IN_CTL     0x08 /* Transmit FIFO Control */
167 #define CPSW1_PORT_VLAN     0x0c /* VLAN Register */
168 #define CPSW1_TX_PRI_MAP    0x10 /* Tx Header Priority to Switch Pri Mapping */
169 #define CPSW1_TS_CTL        0x14 /* Time Sync Control */
170 #define CPSW1_TS_SEQ_LTYPE  0x18 /* Time Sync Sequence ID Offset and Msg Type */
171 #define CPSW1_TS_VLAN       0x1c /* Time Sync VLAN1 and VLAN2 */
172
173 /* CPSW_PORT_V2 */
174 #define CPSW2_CONTROL       0x00 /* Control Register */
175 #define CPSW2_MAX_BLKS      0x08 /* Maximum FIFO Blocks */
176 #define CPSW2_BLK_CNT       0x0c /* FIFO Block Usage Count (Read Only) */
177 #define CPSW2_TX_IN_CTL     0x10 /* Transmit FIFO Control */
178 #define CPSW2_PORT_VLAN     0x14 /* VLAN Register */
179 #define CPSW2_TX_PRI_MAP    0x18 /* Tx Header Priority to Switch Pri Mapping */
180 #define CPSW2_TS_SEQ_MTYPE  0x1c /* Time Sync Sequence ID Offset and Msg Type */
181
182 /* CPSW_PORT_V1 and V2 */
183 #define SA_LO               0x20 /* CPGMAC_SL Source Address Low */
184 #define SA_HI               0x24 /* CPGMAC_SL Source Address High */
185 #define SEND_PERCENT        0x28 /* Transmit Queue Send Percentages */
186
187 /* CPSW_PORT_V2 only */
188 #define RX_DSCP_PRI_MAP0    0x30 /* Rx DSCP Priority to Rx Packet Mapping */
189 #define RX_DSCP_PRI_MAP1    0x34 /* Rx DSCP Priority to Rx Packet Mapping */
190 #define RX_DSCP_PRI_MAP2    0x38 /* Rx DSCP Priority to Rx Packet Mapping */
191 #define RX_DSCP_PRI_MAP3    0x3c /* Rx DSCP Priority to Rx Packet Mapping */
192 #define RX_DSCP_PRI_MAP4    0x40 /* Rx DSCP Priority to Rx Packet Mapping */
193 #define RX_DSCP_PRI_MAP5    0x44 /* Rx DSCP Priority to Rx Packet Mapping */
194 #define RX_DSCP_PRI_MAP6    0x48 /* Rx DSCP Priority to Rx Packet Mapping */
195 #define RX_DSCP_PRI_MAP7    0x4c /* Rx DSCP Priority to Rx Packet Mapping */
196
197 /* Bit definitions for the CPSW2_CONTROL register */
198 #define PASS_PRI_TAGGED     (1<<24) /* Pass Priority Tagged */
199 #define VLAN_LTYPE2_EN      (1<<21) /* VLAN LTYPE 2 enable */
200 #define VLAN_LTYPE1_EN      (1<<20) /* VLAN LTYPE 1 enable */
201 #define DSCP_PRI_EN         (1<<16) /* DSCP Priority Enable */
202 #define TS_320              (1<<14) /* Time Sync Dest Port 320 enable */
203 #define TS_319              (1<<13) /* Time Sync Dest Port 319 enable */
204 #define TS_132              (1<<12) /* Time Sync Dest IP Addr 132 enable */
205 #define TS_131              (1<<11) /* Time Sync Dest IP Addr 131 enable */
206 #define TS_130              (1<<10) /* Time Sync Dest IP Addr 130 enable */
207 #define TS_129              (1<<9)  /* Time Sync Dest IP Addr 129 enable */
208 #define TS_BIT8             (1<<8)  /* ts_ttl_nonzero? */
209 #define TS_ANNEX_D_EN       (1<<4)  /* Time Sync Annex D enable */
210 #define TS_LTYPE2_EN        (1<<3)  /* Time Sync LTYPE 2 enable */
211 #define TS_LTYPE1_EN        (1<<2)  /* Time Sync LTYPE 1 enable */
212 #define TS_TX_EN            (1<<1)  /* Time Sync Transmit Enable */
213 #define TS_RX_EN            (1<<0)  /* Time Sync Receive Enable */
214
215 #define CTRL_TS_BITS \
216         (TS_320 | TS_319 | TS_132 | TS_131 | TS_130 | TS_129 | TS_BIT8 | \
217          TS_ANNEX_D_EN | TS_LTYPE1_EN)
218
219 #define CTRL_ALL_TS_MASK (CTRL_TS_BITS | TS_TX_EN | TS_RX_EN)
220 #define CTRL_TX_TS_BITS  (CTRL_TS_BITS | TS_TX_EN)
221 #define CTRL_RX_TS_BITS  (CTRL_TS_BITS | TS_RX_EN)
222
223 /* Bit definitions for the CPSW2_TS_SEQ_MTYPE register */
224 #define TS_SEQ_ID_OFFSET_SHIFT   (16)    /* Time Sync Sequence ID Offset */
225 #define TS_SEQ_ID_OFFSET_MASK    (0x3f)
226 #define TS_MSG_TYPE_EN_SHIFT     (0)     /* Time Sync Message Type Enable */
227 #define TS_MSG_TYPE_EN_MASK      (0xffff)
228
229 /* The PTP event messages - Sync, Delay_Req, Pdelay_Req, and Pdelay_Resp. */
230 #define EVENT_MSG_BITS ((1<<0) | (1<<1) | (1<<2) | (1<<3))
231
232 struct cpsw_host_regs {
233         u32     max_blks;
234         u32     blk_cnt;
235         u32     flow_thresh;
236         u32     port_vlan;
237         u32     tx_pri_map;
238         u32     cpdma_tx_pri_map;
239         u32     cpdma_rx_chan_map;
240 };
241
242 struct cpsw_sliver_regs {
243         u32     id_ver;
244         u32     mac_control;
245         u32     mac_status;
246         u32     soft_reset;
247         u32     rx_maxlen;
248         u32     __reserved_0;
249         u32     rx_pause;
250         u32     tx_pause;
251         u32     __reserved_1;
252         u32     rx_pri_map;
253 };
254
255 struct cpsw_slave {
256         void __iomem                    *regs;
257         struct cpsw_sliver_regs __iomem *sliver;
258         int                             slave_num;
259         u32                             mac_control;
260         struct cpsw_slave_data          *data;
261         struct phy_device               *phy;
262 };
263
264 static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
265 {
266         return __raw_readl(slave->regs + offset);
267 }
268
269 static inline void slave_write(struct cpsw_slave *slave, u32 val, u32 offset)
270 {
271         __raw_writel(val, slave->regs + offset);
272 }
273
274 struct cpsw_priv {
275         spinlock_t                      lock;
276         struct platform_device          *pdev;
277         struct net_device               *ndev;
278         struct resource                 *cpsw_res;
279         struct resource                 *cpsw_ss_res;
280         struct napi_struct              napi;
281         struct device                   *dev;
282         struct cpsw_platform_data       data;
283         struct cpsw_ss_regs __iomem     *regs;
284         struct cpsw_wr_regs __iomem     *wr_regs;
285         struct cpsw_host_regs __iomem   *host_port_regs;
286         u32                             msg_enable;
287         u32                             version;
288         struct net_device_stats         stats;
289         int                             rx_packet_max;
290         int                             host_port;
291         struct clk                      *clk;
292         u8                              mac_addr[ETH_ALEN];
293         struct cpsw_slave               *slaves;
294         struct cpdma_ctlr               *dma;
295         struct cpdma_chan               *txch, *rxch;
296         struct cpsw_ale                 *ale;
297         /* snapshot of IRQ numbers */
298         u32 irqs_table[4];
299         u32 num_irqs;
300 };
301
302 #define napi_to_priv(napi)      container_of(napi, struct cpsw_priv, napi)
303 #define for_each_slave(priv, func, arg...)                      \
304         do {                                                    \
305                 int idx;                                        \
306                 for (idx = 0; idx < (priv)->data.slaves; idx++) \
307                         (func)((priv)->slaves + idx, ##arg);    \
308         } while (0)
309
310 static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
311 {
312         struct cpsw_priv *priv = netdev_priv(ndev);
313
314         if (ndev->flags & IFF_PROMISC) {
315                 /* Enable promiscuous mode */
316                 dev_err(priv->dev, "Ignoring Promiscuous mode\n");
317                 return;
318         }
319
320         /* Clear all mcast from ALE */
321         cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
322
323         if (!netdev_mc_empty(ndev)) {
324                 struct netdev_hw_addr *ha;
325
326                 /* program multicast address list into ALE register */
327                 netdev_for_each_mc_addr(ha, ndev) {
328                         cpsw_ale_add_mcast(priv->ale, (u8 *)ha->addr,
329                                 ALE_ALL_PORTS << priv->host_port, 0, 0);
330                 }
331         }
332 }
333
334 static void cpsw_intr_enable(struct cpsw_priv *priv)
335 {
336         __raw_writel(0xFF, &priv->wr_regs->tx_en);
337         __raw_writel(0xFF, &priv->wr_regs->rx_en);
338
339         cpdma_ctlr_int_ctrl(priv->dma, true);
340         return;
341 }
342
343 static void cpsw_intr_disable(struct cpsw_priv *priv)
344 {
345         __raw_writel(0, &priv->wr_regs->tx_en);
346         __raw_writel(0, &priv->wr_regs->rx_en);
347
348         cpdma_ctlr_int_ctrl(priv->dma, false);
349         return;
350 }
351
352 void cpsw_tx_handler(void *token, int len, int status)
353 {
354         struct sk_buff          *skb = token;
355         struct net_device       *ndev = skb->dev;
356         struct cpsw_priv        *priv = netdev_priv(ndev);
357
358         if (unlikely(netif_queue_stopped(ndev)))
359                 netif_start_queue(ndev);
360         priv->stats.tx_packets++;
361         priv->stats.tx_bytes += len;
362         dev_kfree_skb_any(skb);
363 }
364
365 void cpsw_rx_handler(void *token, int len, int status)
366 {
367         struct sk_buff          *skb = token;
368         struct net_device       *ndev = skb->dev;
369         struct cpsw_priv        *priv = netdev_priv(ndev);
370         int                     ret = 0;
371
372         /* free and bail if we are shutting down */
373         if (unlikely(!netif_running(ndev)) ||
374                         unlikely(!netif_carrier_ok(ndev))) {
375                 dev_kfree_skb_any(skb);
376                 return;
377         }
378         if (likely(status >= 0)) {
379                 skb_put(skb, len);
380                 skb->protocol = eth_type_trans(skb, ndev);
381                 netif_receive_skb(skb);
382                 priv->stats.rx_bytes += len;
383                 priv->stats.rx_packets++;
384                 skb = NULL;
385         }
386
387         if (unlikely(!netif_running(ndev))) {
388                 if (skb)
389                         dev_kfree_skb_any(skb);
390                 return;
391         }
392
393         if (likely(!skb)) {
394                 skb = netdev_alloc_skb_ip_align(ndev, priv->rx_packet_max);
395                 if (WARN_ON(!skb))
396                         return;
397
398                 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
399                                         skb_tailroom(skb), GFP_KERNEL);
400         }
401         WARN_ON(ret < 0);
402 }
403
404 static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
405 {
406         struct cpsw_priv *priv = dev_id;
407
408         if (likely(netif_running(priv->ndev))) {
409                 cpsw_intr_disable(priv);
410                 cpsw_disable_irq(priv);
411                 napi_schedule(&priv->napi);
412         }
413         return IRQ_HANDLED;
414 }
415
416 static inline int cpsw_get_slave_port(struct cpsw_priv *priv, u32 slave_num)
417 {
418         if (priv->host_port == 0)
419                 return slave_num + 1;
420         else
421                 return slave_num;
422 }
423
424 static int cpsw_poll(struct napi_struct *napi, int budget)
425 {
426         struct cpsw_priv        *priv = napi_to_priv(napi);
427         int                     num_tx, num_rx;
428
429         num_tx = cpdma_chan_process(priv->txch, 128);
430         num_rx = cpdma_chan_process(priv->rxch, budget);
431
432         if (num_rx || num_tx)
433                 cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n",
434                          num_rx, num_tx);
435
436         if (num_rx < budget) {
437                 napi_complete(napi);
438                 cpsw_intr_enable(priv);
439                 cpdma_ctlr_eoi(priv->dma);
440                 cpsw_enable_irq(priv);
441         }
442
443         return num_rx;
444 }
445
446 static inline void soft_reset(const char *module, void __iomem *reg)
447 {
448         unsigned long timeout = jiffies + HZ;
449
450         __raw_writel(1, reg);
451         do {
452                 cpu_relax();
453         } while ((__raw_readl(reg) & 1) && time_after(timeout, jiffies));
454
455         WARN(__raw_readl(reg) & 1, "failed to soft-reset %s\n", module);
456 }
457
458 #define mac_hi(mac)     (((mac)[0] << 0) | ((mac)[1] << 8) |    \
459                          ((mac)[2] << 16) | ((mac)[3] << 24))
460 #define mac_lo(mac)     (((mac)[4] << 0) | ((mac)[5] << 8))
461
462 static void cpsw_set_slave_mac(struct cpsw_slave *slave,
463                                struct cpsw_priv *priv)
464 {
465         slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
466         slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
467 }
468
469 static void _cpsw_adjust_link(struct cpsw_slave *slave,
470                               struct cpsw_priv *priv, bool *link)
471 {
472         struct phy_device       *phy = slave->phy;
473         u32                     mac_control = 0;
474         u32                     slave_port;
475
476         if (!phy)
477                 return;
478
479         slave_port = cpsw_get_slave_port(priv, slave->slave_num);
480
481         if (phy->link) {
482                 mac_control = priv->data.mac_control;
483
484                 /* enable forwarding */
485                 cpsw_ale_control_set(priv->ale, slave_port,
486                                      ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
487
488                 if (phy->speed == 1000)
489                         mac_control |= BIT(7);  /* GIGABITEN    */
490                 if (phy->duplex)
491                         mac_control |= BIT(0);  /* FULLDUPLEXEN */
492
493                 /* set speed_in input in case RMII mode is used in 100Mbps */
494                 if (phy->speed == 100)
495                         mac_control |= BIT(15);
496
497                 *link = true;
498         } else {
499                 mac_control = 0;
500                 /* disable forwarding */
501                 cpsw_ale_control_set(priv->ale, slave_port,
502                                      ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
503         }
504
505         if (mac_control != slave->mac_control) {
506                 phy_print_status(phy);
507                 __raw_writel(mac_control, &slave->sliver->mac_control);
508         }
509
510         slave->mac_control = mac_control;
511 }
512
513 static void cpsw_adjust_link(struct net_device *ndev)
514 {
515         struct cpsw_priv        *priv = netdev_priv(ndev);
516         bool                    link = false;
517
518         for_each_slave(priv, _cpsw_adjust_link, priv, &link);
519
520         if (link) {
521                 netif_carrier_on(ndev);
522                 if (netif_running(ndev))
523                         netif_wake_queue(ndev);
524         } else {
525                 netif_carrier_off(ndev);
526                 netif_stop_queue(ndev);
527         }
528 }
529
530 static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
531 {
532         static char *leader = "........................................";
533
534         if (!val)
535                 return 0;
536         else
537                 return snprintf(buf, maxlen, "%s %s %10d\n", name,
538                                 leader + strlen(name), val);
539 }
540
541 static void cpsw_slave_open(struct cpsw_slave *slave, struct cpsw_priv *priv)
542 {
543         char name[32];
544         u32 slave_port;
545
546         sprintf(name, "slave-%d", slave->slave_num);
547
548         soft_reset(name, &slave->sliver->soft_reset);
549
550         /* setup priority mapping */
551         __raw_writel(RX_PRIORITY_MAPPING, &slave->sliver->rx_pri_map);
552
553         switch (priv->version) {
554         case CPSW_VERSION_1:
555                 slave_write(slave, TX_PRIORITY_MAPPING, CPSW1_TX_PRI_MAP);
556                 break;
557         case CPSW_VERSION_2:
558                 slave_write(slave, TX_PRIORITY_MAPPING, CPSW2_TX_PRI_MAP);
559                 break;
560         }
561
562         /* setup max packet size, and mac address */
563         __raw_writel(priv->rx_packet_max, &slave->sliver->rx_maxlen);
564         cpsw_set_slave_mac(slave, priv);
565
566         slave->mac_control = 0; /* no link yet */
567
568         slave_port = cpsw_get_slave_port(priv, slave->slave_num);
569
570         cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
571                            1 << slave_port, 0, ALE_MCAST_FWD_2);
572
573         slave->phy = phy_connect(priv->ndev, slave->data->phy_id,
574                                  &cpsw_adjust_link, 0, slave->data->phy_if);
575         if (IS_ERR(slave->phy)) {
576                 dev_err(priv->dev, "phy %s not found on slave %d\n",
577                         slave->data->phy_id, slave->slave_num);
578                 slave->phy = NULL;
579         } else {
580                 dev_info(priv->dev, "phy found : id is : 0x%x\n",
581                          slave->phy->phy_id);
582                 phy_start(slave->phy);
583         }
584 }
585
586 static void cpsw_init_host_port(struct cpsw_priv *priv)
587 {
588         /* soft reset the controller and initialize ale */
589         soft_reset("cpsw", &priv->regs->soft_reset);
590         cpsw_ale_start(priv->ale);
591
592         /* switch to vlan unaware mode */
593         cpsw_ale_control_set(priv->ale, 0, ALE_VLAN_AWARE, 0);
594
595         /* setup host port priority mapping */
596         __raw_writel(CPDMA_TX_PRIORITY_MAP,
597                      &priv->host_port_regs->cpdma_tx_pri_map);
598         __raw_writel(0, &priv->host_port_regs->cpdma_rx_chan_map);
599
600         cpsw_ale_control_set(priv->ale, priv->host_port,
601                              ALE_PORT_STATE, ALE_PORT_STATE_FORWARD);
602
603         cpsw_ale_add_ucast(priv->ale, priv->mac_addr, priv->host_port, 0);
604         cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
605                            1 << priv->host_port, 0, ALE_MCAST_FWD_2);
606 }
607
608 static int cpsw_ndo_open(struct net_device *ndev)
609 {
610         struct cpsw_priv *priv = netdev_priv(ndev);
611         int i, ret;
612         u32 reg;
613
614         cpsw_intr_disable(priv);
615         netif_carrier_off(ndev);
616
617         pm_runtime_get_sync(&priv->pdev->dev);
618
619         reg = __raw_readl(&priv->regs->id_ver);
620         priv->version = reg;
621
622         dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
623                  CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
624                  CPSW_RTL_VERSION(reg));
625
626         /* initialize host and slave ports */
627         cpsw_init_host_port(priv);
628         for_each_slave(priv, cpsw_slave_open, priv);
629
630         /* setup tx dma to fixed prio and zero offset */
631         cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
632         cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);
633
634         /* disable priority elevation and enable statistics on all ports */
635         __raw_writel(0, &priv->regs->ptype);
636
637         /* enable statistics collection only on the host port */
638         __raw_writel(0x7, &priv->regs->stat_port_en);
639
640         if (WARN_ON(!priv->data.rx_descs))
641                 priv->data.rx_descs = 128;
642
643         for (i = 0; i < priv->data.rx_descs; i++) {
644                 struct sk_buff *skb;
645
646                 ret = -ENOMEM;
647                 skb = netdev_alloc_skb_ip_align(priv->ndev,
648                                                 priv->rx_packet_max);
649                 if (!skb)
650                         break;
651                 ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
652                                         skb_tailroom(skb), GFP_KERNEL);
653                 if (WARN_ON(ret < 0))
654                         break;
655         }
656         /* continue even if we didn't manage to submit all receive descs */
657         cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
658
659         cpdma_ctlr_start(priv->dma);
660         cpsw_intr_enable(priv);
661         napi_enable(&priv->napi);
662         cpdma_ctlr_eoi(priv->dma);
663
664         return 0;
665 }
666
667 static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
668 {
669         if (!slave->phy)
670                 return;
671         phy_stop(slave->phy);
672         phy_disconnect(slave->phy);
673         slave->phy = NULL;
674 }
675
676 static int cpsw_ndo_stop(struct net_device *ndev)
677 {
678         struct cpsw_priv *priv = netdev_priv(ndev);
679
680         cpsw_info(priv, ifdown, "shutting down cpsw device\n");
681         cpsw_intr_disable(priv);
682         cpdma_ctlr_int_ctrl(priv->dma, false);
683         cpdma_ctlr_stop(priv->dma);
684         netif_stop_queue(priv->ndev);
685         napi_disable(&priv->napi);
686         netif_carrier_off(priv->ndev);
687         cpsw_ale_stop(priv->ale);
688         for_each_slave(priv, cpsw_slave_stop, priv);
689         pm_runtime_put_sync(&priv->pdev->dev);
690         return 0;
691 }
692
693 static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
694                                        struct net_device *ndev)
695 {
696         struct cpsw_priv *priv = netdev_priv(ndev);
697         int ret;
698
699         ndev->trans_start = jiffies;
700
701         if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
702                 cpsw_err(priv, tx_err, "packet pad failed\n");
703                 priv->stats.tx_dropped++;
704                 return NETDEV_TX_OK;
705         }
706
707         ret = cpdma_chan_submit(priv->txch, skb, skb->data,
708                                 skb->len, GFP_KERNEL);
709         if (unlikely(ret != 0)) {
710                 cpsw_err(priv, tx_err, "desc submit failed\n");
711                 goto fail;
712         }
713
714         return NETDEV_TX_OK;
715 fail:
716         priv->stats.tx_dropped++;
717         netif_stop_queue(ndev);
718         return NETDEV_TX_BUSY;
719 }
720
721 static void cpsw_ndo_change_rx_flags(struct net_device *ndev, int flags)
722 {
723         /*
724          * The switch cannot operate in promiscuous mode without substantial
725          * headache.  For promiscuous mode to work, we would need to put the
726          * ALE in bypass mode and route all traffic to the host port.
727          * Subsequently, the host will need to operate as a "bridge", learn,
728          * and flood as needed.  For now, we simply complain here and
729          * do nothing about it :-)
730          */
731         if ((flags & IFF_PROMISC) && (ndev->flags & IFF_PROMISC))
732                 dev_err(&ndev->dev, "promiscuity ignored!\n");
733
734         /*
735          * The switch cannot filter multicast traffic unless it is configured
736          * in "VLAN Aware" mode.  Unfortunately, VLAN awareness requires a
737          * whole bunch of additional logic that this driver does not implement
738          * at present.
739          */
740         if ((flags & IFF_ALLMULTI) && !(ndev->flags & IFF_ALLMULTI))
741                 dev_err(&ndev->dev, "multicast traffic cannot be filtered!\n");
742 }
743
744 static void cpsw_ndo_tx_timeout(struct net_device *ndev)
745 {
746         struct cpsw_priv *priv = netdev_priv(ndev);
747
748         cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
749         priv->stats.tx_errors++;
750         cpsw_intr_disable(priv);
751         cpdma_ctlr_int_ctrl(priv->dma, false);
752         cpdma_chan_stop(priv->txch);
753         cpdma_chan_start(priv->txch);
754         cpdma_ctlr_int_ctrl(priv->dma, true);
755         cpsw_intr_enable(priv);
756         cpdma_ctlr_eoi(priv->dma);
757 }
758
759 static struct net_device_stats *cpsw_ndo_get_stats(struct net_device *ndev)
760 {
761         struct cpsw_priv *priv = netdev_priv(ndev);
762         return &priv->stats;
763 }
764
765 #ifdef CONFIG_NET_POLL_CONTROLLER
766 static void cpsw_ndo_poll_controller(struct net_device *ndev)
767 {
768         struct cpsw_priv *priv = netdev_priv(ndev);
769
770         cpsw_intr_disable(priv);
771         cpdma_ctlr_int_ctrl(priv->dma, false);
772         cpsw_interrupt(ndev->irq, priv);
773         cpdma_ctlr_int_ctrl(priv->dma, true);
774         cpsw_intr_enable(priv);
775         cpdma_ctlr_eoi(priv->dma);
776 }
777 #endif
778
779 static const struct net_device_ops cpsw_netdev_ops = {
780         .ndo_open               = cpsw_ndo_open,
781         .ndo_stop               = cpsw_ndo_stop,
782         .ndo_start_xmit         = cpsw_ndo_start_xmit,
783         .ndo_change_rx_flags    = cpsw_ndo_change_rx_flags,
784         .ndo_validate_addr      = eth_validate_addr,
785         .ndo_change_mtu         = eth_change_mtu,
786         .ndo_tx_timeout         = cpsw_ndo_tx_timeout,
787         .ndo_get_stats          = cpsw_ndo_get_stats,
788         .ndo_set_rx_mode        = cpsw_ndo_set_rx_mode,
789 #ifdef CONFIG_NET_POLL_CONTROLLER
790         .ndo_poll_controller    = cpsw_ndo_poll_controller,
791 #endif
792 };
793
794 static void cpsw_get_drvinfo(struct net_device *ndev,
795                              struct ethtool_drvinfo *info)
796 {
797         struct cpsw_priv *priv = netdev_priv(ndev);
798         strcpy(info->driver, "TI CPSW Driver v1.0");
799         strcpy(info->version, "1.0");
800         strcpy(info->bus_info, priv->pdev->name);
801 }
802
803 static u32 cpsw_get_msglevel(struct net_device *ndev)
804 {
805         struct cpsw_priv *priv = netdev_priv(ndev);
806         return priv->msg_enable;
807 }
808
809 static void cpsw_set_msglevel(struct net_device *ndev, u32 value)
810 {
811         struct cpsw_priv *priv = netdev_priv(ndev);
812         priv->msg_enable = value;
813 }
814
815 static const struct ethtool_ops cpsw_ethtool_ops = {
816         .get_drvinfo    = cpsw_get_drvinfo,
817         .get_msglevel   = cpsw_get_msglevel,
818         .set_msglevel   = cpsw_set_msglevel,
819         .get_link       = ethtool_op_get_link,
820 };
821
822 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv)
823 {
824         void __iomem            *regs = priv->regs;
825         int                     slave_num = slave->slave_num;
826         struct cpsw_slave_data  *data = priv->data.slave_data + slave_num;
827
828         slave->data     = data;
829         slave->regs     = regs + data->slave_reg_ofs;
830         slave->sliver   = regs + data->sliver_reg_ofs;
831 }
832
833 static int cpsw_probe_dt(struct cpsw_platform_data *data,
834                          struct platform_device *pdev)
835 {
836         struct device_node *node = pdev->dev.of_node;
837         struct device_node *slave_node;
838         int i = 0, ret;
839         u32 prop;
840
841         if (!node)
842                 return -EINVAL;
843
844         if (of_property_read_u32(node, "slaves", &prop)) {
845                 pr_err("Missing slaves property in the DT.\n");
846                 return -EINVAL;
847         }
848         data->slaves = prop;
849
850         data->slave_data = kzalloc(sizeof(struct cpsw_slave_data) *
851                                    data->slaves, GFP_KERNEL);
852         if (!data->slave_data) {
853                 pr_err("Could not allocate slave memory.\n");
854                 return -EINVAL;
855         }
856
857         data->no_bd_ram = of_property_read_bool(node, "no_bd_ram");
858
859         if (of_property_read_u32(node, "cpdma_channels", &prop)) {
860                 pr_err("Missing cpdma_channels property in the DT.\n");
861                 ret = -EINVAL;
862                 goto error_ret;
863         }
864         data->channels = prop;
865
866         if (of_property_read_u32(node, "host_port_no", &prop)) {
867                 pr_err("Missing host_port_no property in the DT.\n");
868                 ret = -EINVAL;
869                 goto error_ret;
870         }
871         data->host_port_num = prop;
872
873         if (of_property_read_u32(node, "cpdma_reg_ofs", &prop)) {
874                 pr_err("Missing cpdma_reg_ofs property in the DT.\n");
875                 ret = -EINVAL;
876                 goto error_ret;
877         }
878         data->cpdma_reg_ofs = prop;
879
880         if (of_property_read_u32(node, "cpdma_sram_ofs", &prop)) {
881                 pr_err("Missing cpdma_sram_ofs property in the DT.\n");
882                 ret = -EINVAL;
883                 goto error_ret;
884         }
885         data->cpdma_sram_ofs = prop;
886
887         if (of_property_read_u32(node, "ale_reg_ofs", &prop)) {
888                 pr_err("Missing ale_reg_ofs property in the DT.\n");
889                 ret = -EINVAL;
890                 goto error_ret;
891         }
892         data->ale_reg_ofs = prop;
893
894         if (of_property_read_u32(node, "ale_entries", &prop)) {
895                 pr_err("Missing ale_entries property in the DT.\n");
896                 ret = -EINVAL;
897                 goto error_ret;
898         }
899         data->ale_entries = prop;
900
901         if (of_property_read_u32(node, "host_port_reg_ofs", &prop)) {
902                 pr_err("Missing host_port_reg_ofs property in the DT.\n");
903                 ret = -EINVAL;
904                 goto error_ret;
905         }
906         data->host_port_reg_ofs = prop;
907
908         if (of_property_read_u32(node, "hw_stats_reg_ofs", &prop)) {
909                 pr_err("Missing hw_stats_reg_ofs property in the DT.\n");
910                 ret = -EINVAL;
911                 goto error_ret;
912         }
913         data->hw_stats_reg_ofs = prop;
914
915         if (of_property_read_u32(node, "cpts_reg_ofs", &prop)) {
916                 pr_err("Missing cpts_reg_ofs property in the DT.\n");
917                 ret = -EINVAL;
918                 goto error_ret;
919         }
920         data->cpts_reg_ofs = prop;
921
922         if (of_property_read_u32(node, "bd_ram_ofs", &prop)) {
923                 pr_err("Missing bd_ram_ofs property in the DT.\n");
924                 ret = -EINVAL;
925                 goto error_ret;
926         }
927         data->bd_ram_ofs = prop;
928
929         if (of_property_read_u32(node, "bd_ram_size", &prop)) {
930                 pr_err("Missing bd_ram_size property in the DT.\n");
931                 ret = -EINVAL;
932                 goto error_ret;
933         }
934         data->bd_ram_size = prop;
935
936         if (of_property_read_u32(node, "rx_descs", &prop)) {
937                 pr_err("Missing rx_descs property in the DT.\n");
938                 ret = -EINVAL;
939                 goto error_ret;
940         }
941         data->rx_descs = prop;
942
943         if (of_property_read_u32(node, "mac_control", &prop)) {
944                 pr_err("Missing mac_control property in the DT.\n");
945                 ret = -EINVAL;
946                 goto error_ret;
947         }
948         data->mac_control = prop;
949
950         for_each_child_of_node(node, slave_node) {
951                 struct cpsw_slave_data *slave_data = data->slave_data + i;
952                 const char *phy_id = NULL;
953                 const void *mac_addr = NULL;
954
955                 if (of_property_read_string(slave_node, "phy_id", &phy_id)) {
956                         pr_err("Missing slave[%d] phy_id property\n", i);
957                         ret = -EINVAL;
958                         goto error_ret;
959                 }
960                 slave_data->phy_id = phy_id;
961
962                 if (of_property_read_u32(slave_node, "slave_reg_ofs", &prop)) {
963                         pr_err("Missing slave[%d] slave_reg_ofs property\n", i);
964                         ret = -EINVAL;
965                         goto error_ret;
966                 }
967                 slave_data->slave_reg_ofs = prop;
968
969                 if (of_property_read_u32(slave_node, "sliver_reg_ofs",
970                                          &prop)) {
971                         pr_err("Missing slave[%d] sliver_reg_ofs property\n",
972                                 i);
973                         ret = -EINVAL;
974                         goto error_ret;
975                 }
976                 slave_data->sliver_reg_ofs = prop;
977
978                 mac_addr = of_get_mac_address(slave_node);
979                 if (mac_addr)
980                         memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
981
982                 i++;
983         }
984
985         return 0;
986
987 error_ret:
988         kfree(data->slave_data);
989         return ret;
990 }
991
992 static int __devinit cpsw_probe(struct platform_device *pdev)
993 {
994         struct cpsw_platform_data       *data = pdev->dev.platform_data;
995         struct net_device               *ndev;
996         struct cpsw_priv                *priv;
997         struct cpdma_params             dma_params;
998         struct cpsw_ale_params          ale_params;
999         void __iomem                    *regs;
1000         struct resource                 *res;
1001         int ret = 0, i, k = 0;
1002
1003         ndev = alloc_etherdev(sizeof(struct cpsw_priv));
1004         if (!ndev) {
1005                 pr_err("error allocating net_device\n");
1006                 return -ENOMEM;
1007         }
1008
1009         platform_set_drvdata(pdev, ndev);
1010         priv = netdev_priv(ndev);
1011         spin_lock_init(&priv->lock);
1012         priv->pdev = pdev;
1013         priv->ndev = ndev;
1014         priv->dev  = &ndev->dev;
1015         priv->msg_enable = netif_msg_init(debug_level, CPSW_DEBUG);
1016         priv->rx_packet_max = max(rx_packet_max, 128);
1017
1018         if (cpsw_probe_dt(&priv->data, pdev)) {
1019                 pr_err("cpsw: platform data missing\n");
1020                 ret = -ENODEV;
1021                 goto clean_ndev_ret;
1022         }
1023         data = &priv->data;
1024
1025         if (is_valid_ether_addr(data->slave_data[0].mac_addr)) {
1026                 memcpy(priv->mac_addr, data->slave_data[0].mac_addr, ETH_ALEN);
1027                 pr_info("Detected MACID = %pM", priv->mac_addr);
1028         } else {
1029                 eth_random_addr(priv->mac_addr);
1030                 pr_info("Random MACID = %pM", priv->mac_addr);
1031         }
1032
1033         memcpy(ndev->dev_addr, priv->mac_addr, ETH_ALEN);
1034
1035         priv->slaves = kzalloc(sizeof(struct cpsw_slave) * data->slaves,
1036                                GFP_KERNEL);
1037         if (!priv->slaves) {
1038                 ret = -EBUSY;
1039                 goto clean_ndev_ret;
1040         }
1041         for (i = 0; i < data->slaves; i++)
1042                 priv->slaves[i].slave_num = i;
1043
1044         pm_runtime_enable(&pdev->dev);
1045         priv->clk = clk_get(&pdev->dev, "fck");
1046         if (IS_ERR(priv->clk)) {
1047                 dev_err(&pdev->dev, "fck is not found\n");
1048                 ret = -ENODEV;
1049                 goto clean_slave_ret;
1050         }
1051
1052         priv->cpsw_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1053         if (!priv->cpsw_res) {
1054                 dev_err(priv->dev, "error getting i/o resource\n");
1055                 ret = -ENOENT;
1056                 goto clean_clk_ret;
1057         }
1058
1059         if (!request_mem_region(priv->cpsw_res->start,
1060                                 resource_size(priv->cpsw_res), ndev->name)) {
1061                 dev_err(priv->dev, "failed request i/o region\n");
1062                 ret = -ENXIO;
1063                 goto clean_clk_ret;
1064         }
1065
1066         regs = ioremap(priv->cpsw_res->start, resource_size(priv->cpsw_res));
1067         if (!regs) {
1068                 dev_err(priv->dev, "unable to map i/o region\n");
1069                 goto clean_cpsw_iores_ret;
1070         }
1071         priv->regs = regs;
1072         priv->host_port = data->host_port_num;
1073         priv->host_port_regs = regs + data->host_port_reg_ofs;
1074
1075         priv->cpsw_ss_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1076         if (!priv->cpsw_ss_res) {
1077                 dev_err(priv->dev, "error getting i/o resource\n");
1078                 ret = -ENOENT;
1079                 goto clean_clk_ret;
1080         }
1081
1082         if (!request_mem_region(priv->cpsw_ss_res->start,
1083                         resource_size(priv->cpsw_ss_res), ndev->name)) {
1084                 dev_err(priv->dev, "failed request i/o region\n");
1085                 ret = -ENXIO;
1086                 goto clean_clk_ret;
1087         }
1088
1089         regs = ioremap(priv->cpsw_ss_res->start,
1090                                 resource_size(priv->cpsw_ss_res));
1091         if (!regs) {
1092                 dev_err(priv->dev, "unable to map i/o region\n");
1093                 goto clean_cpsw_ss_iores_ret;
1094         }
1095         priv->wr_regs = regs;
1096
1097         for_each_slave(priv, cpsw_slave_init, priv);
1098
1099         memset(&dma_params, 0, sizeof(dma_params));
1100         dma_params.dev          = &pdev->dev;
1101         dma_params.dmaregs      = cpsw_dma_regs((u32)priv->regs,
1102                                                 data->cpdma_reg_ofs);
1103         dma_params.rxthresh     = cpsw_dma_rxthresh((u32)priv->regs,
1104                                                     data->cpdma_reg_ofs);
1105         dma_params.rxfree       = cpsw_dma_rxfree((u32)priv->regs,
1106                                                   data->cpdma_reg_ofs);
1107         dma_params.txhdp        = cpsw_dma_txhdp((u32)priv->regs,
1108                                                  data->cpdma_sram_ofs);
1109         dma_params.rxhdp        = cpsw_dma_rxhdp((u32)priv->regs,
1110                                                  data->cpdma_sram_ofs);
1111         dma_params.txcp         = cpsw_dma_txcp((u32)priv->regs,
1112                                                 data->cpdma_sram_ofs);
1113         dma_params.rxcp         = cpsw_dma_rxcp((u32)priv->regs,
1114                                                 data->cpdma_sram_ofs);
1115
1116         dma_params.num_chan             = data->channels;
1117         dma_params.has_soft_reset       = true;
1118         dma_params.min_packet_size      = CPSW_MIN_PACKET_SIZE;
1119         dma_params.desc_mem_size        = data->bd_ram_size;
1120         dma_params.desc_align           = 16;
1121         dma_params.has_ext_regs         = true;
1122         dma_params.desc_mem_phys        = data->no_bd_ram ? 0 :
1123                         (u32 __force)priv->cpsw_res->start + data->bd_ram_ofs;
1124         dma_params.desc_hw_addr         = data->hw_ram_addr ?
1125                         data->hw_ram_addr : dma_params.desc_mem_phys ;
1126
1127         priv->dma = cpdma_ctlr_create(&dma_params);
1128         if (!priv->dma) {
1129                 dev_err(priv->dev, "error initializing dma\n");
1130                 ret = -ENOMEM;
1131                 goto clean_iomap_ret;
1132         }
1133
1134         priv->txch = cpdma_chan_create(priv->dma, tx_chan_num(0),
1135                                        cpsw_tx_handler);
1136         priv->rxch = cpdma_chan_create(priv->dma, rx_chan_num(0),
1137                                        cpsw_rx_handler);
1138
1139         if (WARN_ON(!priv->txch || !priv->rxch)) {
1140                 dev_err(priv->dev, "error initializing dma channels\n");
1141                 ret = -ENOMEM;
1142                 goto clean_dma_ret;
1143         }
1144
1145         memset(&ale_params, 0, sizeof(ale_params));
1146         ale_params.dev                  = &ndev->dev;
1147         ale_params.ale_regs             = (void *)((u32)priv->regs) +
1148                                                 ((u32)data->ale_reg_ofs);
1149         ale_params.ale_ageout           = ale_ageout;
1150         ale_params.ale_entries          = data->ale_entries;
1151         ale_params.ale_ports            = data->slaves;
1152
1153         priv->ale = cpsw_ale_create(&ale_params);
1154         if (!priv->ale) {
1155                 dev_err(priv->dev, "error initializing ale engine\n");
1156                 ret = -ENODEV;
1157                 goto clean_dma_ret;
1158         }
1159
1160         ndev->irq = platform_get_irq(pdev, 0);
1161         if (ndev->irq < 0) {
1162                 dev_err(priv->dev, "error getting irq resource\n");
1163                 ret = -ENOENT;
1164                 goto clean_ale_ret;
1165         }
1166
1167         while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
1168                 for (i = res->start; i <= res->end; i++) {
1169                         if (request_irq(i, cpsw_interrupt, IRQF_DISABLED,
1170                                         dev_name(&pdev->dev), priv)) {
1171                                 dev_err(priv->dev, "error attaching irq\n");
1172                                 goto clean_ale_ret;
1173                         }
1174                         priv->irqs_table[k] = i;
1175                         priv->num_irqs = k;
1176                 }
1177                 k++;
1178         }
1179
1180         ndev->flags |= IFF_ALLMULTI;    /* see cpsw_ndo_change_rx_flags() */
1181
1182         ndev->netdev_ops = &cpsw_netdev_ops;
1183         SET_ETHTOOL_OPS(ndev, &cpsw_ethtool_ops);
1184         netif_napi_add(ndev, &priv->napi, cpsw_poll, CPSW_POLL_WEIGHT);
1185
1186         /* register the network device */
1187         SET_NETDEV_DEV(ndev, &pdev->dev);
1188         ret = register_netdev(ndev);
1189         if (ret) {
1190                 dev_err(priv->dev, "error registering net device\n");
1191                 ret = -ENODEV;
1192                 goto clean_irq_ret;
1193         }
1194
1195         cpsw_notice(priv, probe, "initialized device (regs %x, irq %d)\n",
1196                   priv->cpsw_res->start, ndev->irq);
1197
1198         return 0;
1199
1200 clean_irq_ret:
1201         free_irq(ndev->irq, priv);
1202 clean_ale_ret:
1203         cpsw_ale_destroy(priv->ale);
1204 clean_dma_ret:
1205         cpdma_chan_destroy(priv->txch);
1206         cpdma_chan_destroy(priv->rxch);
1207         cpdma_ctlr_destroy(priv->dma);
1208 clean_iomap_ret:
1209         iounmap(priv->regs);
1210 clean_cpsw_ss_iores_ret:
1211         release_mem_region(priv->cpsw_ss_res->start,
1212                            resource_size(priv->cpsw_ss_res));
1213 clean_cpsw_iores_ret:
1214         release_mem_region(priv->cpsw_res->start,
1215                            resource_size(priv->cpsw_res));
1216 clean_clk_ret:
1217         clk_put(priv->clk);
1218 clean_slave_ret:
1219         pm_runtime_disable(&pdev->dev);
1220         kfree(priv->slaves);
1221 clean_ndev_ret:
1222         free_netdev(ndev);
1223         return ret;
1224 }
1225
1226 static int __devexit cpsw_remove(struct platform_device *pdev)
1227 {
1228         struct net_device *ndev = platform_get_drvdata(pdev);
1229         struct cpsw_priv *priv = netdev_priv(ndev);
1230
1231         pr_info("removing device");
1232         platform_set_drvdata(pdev, NULL);
1233
1234         free_irq(ndev->irq, priv);
1235         cpsw_ale_destroy(priv->ale);
1236         cpdma_chan_destroy(priv->txch);
1237         cpdma_chan_destroy(priv->rxch);
1238         cpdma_ctlr_destroy(priv->dma);
1239         iounmap(priv->regs);
1240         release_mem_region(priv->cpsw_res->start,
1241                            resource_size(priv->cpsw_res));
1242         release_mem_region(priv->cpsw_ss_res->start,
1243                            resource_size(priv->cpsw_ss_res));
1244         pm_runtime_disable(&pdev->dev);
1245         clk_put(priv->clk);
1246         kfree(priv->slaves);
1247         free_netdev(ndev);
1248
1249         return 0;
1250 }
1251
1252 static int cpsw_suspend(struct device *dev)
1253 {
1254         struct platform_device  *pdev = to_platform_device(dev);
1255         struct net_device       *ndev = platform_get_drvdata(pdev);
1256
1257         if (netif_running(ndev))
1258                 cpsw_ndo_stop(ndev);
1259         pm_runtime_put_sync(&pdev->dev);
1260
1261         return 0;
1262 }
1263
1264 static int cpsw_resume(struct device *dev)
1265 {
1266         struct platform_device  *pdev = to_platform_device(dev);
1267         struct net_device       *ndev = platform_get_drvdata(pdev);
1268
1269         pm_runtime_get_sync(&pdev->dev);
1270         if (netif_running(ndev))
1271                 cpsw_ndo_open(ndev);
1272         return 0;
1273 }
1274
1275 static const struct dev_pm_ops cpsw_pm_ops = {
1276         .suspend        = cpsw_suspend,
1277         .resume         = cpsw_resume,
1278 };
1279
1280 static const struct of_device_id cpsw_of_mtable[] = {
1281         { .compatible = "ti,cpsw", },
1282         { /* sentinel */ },
1283 };
1284
1285 static struct platform_driver cpsw_driver = {
1286         .driver = {
1287                 .name    = "cpsw",
1288                 .owner   = THIS_MODULE,
1289                 .pm      = &cpsw_pm_ops,
1290                 .of_match_table = of_match_ptr(cpsw_of_mtable),
1291         },
1292         .probe = cpsw_probe,
1293         .remove = __devexit_p(cpsw_remove),
1294 };
1295
1296 static int __init cpsw_init(void)
1297 {
1298         return platform_driver_register(&cpsw_driver);
1299 }
1300 late_initcall(cpsw_init);
1301
1302 static void __exit cpsw_exit(void)
1303 {
1304         platform_driver_unregister(&cpsw_driver);
1305 }
1306 module_exit(cpsw_exit);
1307
1308 MODULE_LICENSE("GPL");
1309 MODULE_AUTHOR("Cyril Chemparathy <cyril@ti.com>");
1310 MODULE_AUTHOR("Mugunthan V N <mugunthanvnm@ti.com>");
1311 MODULE_DESCRIPTION("TI CPSW Ethernet driver");