]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
net: stmmac: Add support for optional reset control
[~andy/linux] / drivers / net / ethernet / stmicro / stmmac / stmmac_main.c
1 /*******************************************************************************
2   This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
3   ST Ethernet IPs are built around a Synopsys IP Core.
4
5         Copyright(C) 2007-2011 STMicroelectronics Ltd
6
7   This program is free software; you can redistribute it and/or modify it
8   under the terms and conditions of the GNU General Public License,
9   version 2, as published by the Free Software Foundation.
10
11   This program is distributed in the hope it will be useful, but WITHOUT
12   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13   FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14   more details.
15
16   You should have received a copy of the GNU General Public License along with
17   this program; if not, write to the Free Software Foundation, Inc.,
18   51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19
20   The full GNU General Public License is included in this distribution in
21   the file called "COPYING".
22
23   Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
24
25   Documentation available at:
26         http://www.stlinux.com
27   Support available at:
28         https://bugzilla.stlinux.com/
29 *******************************************************************************/
30
31 #include <linux/clk.h>
32 #include <linux/kernel.h>
33 #include <linux/interrupt.h>
34 #include <linux/ip.h>
35 #include <linux/tcp.h>
36 #include <linux/skbuff.h>
37 #include <linux/ethtool.h>
38 #include <linux/if_ether.h>
39 #include <linux/crc32.h>
40 #include <linux/mii.h>
41 #include <linux/if.h>
42 #include <linux/if_vlan.h>
43 #include <linux/dma-mapping.h>
44 #include <linux/slab.h>
45 #include <linux/prefetch.h>
46 #include <linux/pinctrl/consumer.h>
47 #ifdef CONFIG_STMMAC_DEBUG_FS
48 #include <linux/debugfs.h>
49 #include <linux/seq_file.h>
50 #endif /* CONFIG_STMMAC_DEBUG_FS */
51 #include <linux/net_tstamp.h>
52 #include "stmmac_ptp.h"
53 #include "stmmac.h"
54 #include <linux/reset.h>
55
56 #define STMMAC_ALIGN(x) L1_CACHE_ALIGN(x)
57 #define JUMBO_LEN       9000
58
59 /* Module parameters */
60 #define TX_TIMEO        5000
61 static int watchdog = TX_TIMEO;
62 module_param(watchdog, int, S_IRUGO | S_IWUSR);
63 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
64
65 static int debug = -1;
66 module_param(debug, int, S_IRUGO | S_IWUSR);
67 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
68
69 static int phyaddr = -1;
70 module_param(phyaddr, int, S_IRUGO);
71 MODULE_PARM_DESC(phyaddr, "Physical device address");
72
73 #define DMA_TX_SIZE 256
74 static int dma_txsize = DMA_TX_SIZE;
75 module_param(dma_txsize, int, S_IRUGO | S_IWUSR);
76 MODULE_PARM_DESC(dma_txsize, "Number of descriptors in the TX list");
77
78 #define DMA_RX_SIZE 256
79 static int dma_rxsize = DMA_RX_SIZE;
80 module_param(dma_rxsize, int, S_IRUGO | S_IWUSR);
81 MODULE_PARM_DESC(dma_rxsize, "Number of descriptors in the RX list");
82
83 static int flow_ctrl = FLOW_OFF;
84 module_param(flow_ctrl, int, S_IRUGO | S_IWUSR);
85 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
86
87 static int pause = PAUSE_TIME;
88 module_param(pause, int, S_IRUGO | S_IWUSR);
89 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
90
91 #define TC_DEFAULT 64
92 static int tc = TC_DEFAULT;
93 module_param(tc, int, S_IRUGO | S_IWUSR);
94 MODULE_PARM_DESC(tc, "DMA threshold control value");
95
96 #define DMA_BUFFER_SIZE BUF_SIZE_2KiB
97 static int buf_sz = DMA_BUFFER_SIZE;
98 module_param(buf_sz, int, S_IRUGO | S_IWUSR);
99 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
100
101 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
102                                       NETIF_MSG_LINK | NETIF_MSG_IFUP |
103                                       NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
104
105 #define STMMAC_DEFAULT_LPI_TIMER        1000
106 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
107 module_param(eee_timer, int, S_IRUGO | S_IWUSR);
108 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
109 #define STMMAC_LPI_T(x) (jiffies + msecs_to_jiffies(x))
110
111 /* By default the driver will use the ring mode to manage tx and rx descriptors
112  * but passing this value so user can force to use the chain instead of the ring
113  */
114 static unsigned int chain_mode;
115 module_param(chain_mode, int, S_IRUGO);
116 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
117
118 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
119
120 #ifdef CONFIG_STMMAC_DEBUG_FS
121 static int stmmac_init_fs(struct net_device *dev);
122 static void stmmac_exit_fs(void);
123 #endif
124
125 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
126
127 /**
128  * stmmac_verify_args - verify the driver parameters.
129  * Description: it verifies if some wrong parameter is passed to the driver.
130  * Note that wrong parameters are replaced with the default values.
131  */
132 static void stmmac_verify_args(void)
133 {
134         if (unlikely(watchdog < 0))
135                 watchdog = TX_TIMEO;
136         if (unlikely(dma_rxsize < 0))
137                 dma_rxsize = DMA_RX_SIZE;
138         if (unlikely(dma_txsize < 0))
139                 dma_txsize = DMA_TX_SIZE;
140         if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
141                 buf_sz = DMA_BUFFER_SIZE;
142         if (unlikely(flow_ctrl > 1))
143                 flow_ctrl = FLOW_AUTO;
144         else if (likely(flow_ctrl < 0))
145                 flow_ctrl = FLOW_OFF;
146         if (unlikely((pause < 0) || (pause > 0xffff)))
147                 pause = PAUSE_TIME;
148         if (eee_timer < 0)
149                 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
150 }
151
152 /**
153  * stmmac_clk_csr_set - dynamically set the MDC clock
154  * @priv: driver private structure
155  * Description: this is to dynamically set the MDC clock according to the csr
156  * clock input.
157  * Note:
158  *      If a specific clk_csr value is passed from the platform
159  *      this means that the CSR Clock Range selection cannot be
160  *      changed at run-time and it is fixed (as reported in the driver
161  *      documentation). Viceversa the driver will try to set the MDC
162  *      clock dynamically according to the actual clock input.
163  */
164 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
165 {
166         u32 clk_rate;
167
168         clk_rate = clk_get_rate(priv->stmmac_clk);
169
170         /* Platform provided default clk_csr would be assumed valid
171          * for all other cases except for the below mentioned ones.
172          * For values higher than the IEEE 802.3 specified frequency
173          * we can not estimate the proper divider as it is not known
174          * the frequency of clk_csr_i. So we do not change the default
175          * divider.
176          */
177         if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
178                 if (clk_rate < CSR_F_35M)
179                         priv->clk_csr = STMMAC_CSR_20_35M;
180                 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
181                         priv->clk_csr = STMMAC_CSR_35_60M;
182                 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
183                         priv->clk_csr = STMMAC_CSR_60_100M;
184                 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
185                         priv->clk_csr = STMMAC_CSR_100_150M;
186                 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
187                         priv->clk_csr = STMMAC_CSR_150_250M;
188                 else if ((clk_rate >= CSR_F_250M) && (clk_rate < CSR_F_300M))
189                         priv->clk_csr = STMMAC_CSR_250_300M;
190         }
191 }
192
193 static void print_pkt(unsigned char *buf, int len)
194 {
195         int j;
196         pr_debug("len = %d byte, buf addr: 0x%p", len, buf);
197         for (j = 0; j < len; j++) {
198                 if ((j % 16) == 0)
199                         pr_debug("\n %03x:", j);
200                 pr_debug(" %02x", buf[j]);
201         }
202         pr_debug("\n");
203 }
204
205 /* minimum number of free TX descriptors required to wake up TX process */
206 #define STMMAC_TX_THRESH(x)     (x->dma_tx_size/4)
207
208 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
209 {
210         return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
211 }
212
213 /**
214  * stmmac_hw_fix_mac_speed: callback for speed selection
215  * @priv: driver private structure
216  * Description: on some platforms (e.g. ST), some HW system configuraton
217  * registers have to be set according to the link speed negotiated.
218  */
219 static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
220 {
221         struct phy_device *phydev = priv->phydev;
222
223         if (likely(priv->plat->fix_mac_speed))
224                 priv->plat->fix_mac_speed(priv->plat->bsp_priv, phydev->speed);
225 }
226
227 /**
228  * stmmac_enable_eee_mode: Check and enter in LPI mode
229  * @priv: driver private structure
230  * Description: this function is to verify and enter in LPI mode for EEE.
231  */
232 static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
233 {
234         /* Check and enter in LPI mode */
235         if ((priv->dirty_tx == priv->cur_tx) &&
236             (priv->tx_path_in_lpi_mode == false))
237                 priv->hw->mac->set_eee_mode(priv->ioaddr);
238 }
239
240 /**
241  * stmmac_disable_eee_mode: disable/exit from EEE
242  * @priv: driver private structure
243  * Description: this function is to exit and disable EEE in case of
244  * LPI state is true. This is called by the xmit.
245  */
246 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
247 {
248         priv->hw->mac->reset_eee_mode(priv->ioaddr);
249         del_timer_sync(&priv->eee_ctrl_timer);
250         priv->tx_path_in_lpi_mode = false;
251 }
252
253 /**
254  * stmmac_eee_ctrl_timer: EEE TX SW timer.
255  * @arg : data hook
256  * Description:
257  *  if there is no data transfer and if we are not in LPI state,
258  *  then MAC Transmitter can be moved to LPI state.
259  */
260 static void stmmac_eee_ctrl_timer(unsigned long arg)
261 {
262         struct stmmac_priv *priv = (struct stmmac_priv *)arg;
263
264         stmmac_enable_eee_mode(priv);
265         mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
266 }
267
268 /**
269  * stmmac_eee_init: init EEE
270  * @priv: driver private structure
271  * Description:
272  *  If the EEE support has been enabled while configuring the driver,
273  *  if the GMAC actually supports the EEE (from the HW cap reg) and the
274  *  phy can also manage EEE, so enable the LPI state and start the timer
275  *  to verify if the tx path can enter in LPI state.
276  */
277 bool stmmac_eee_init(struct stmmac_priv *priv)
278 {
279         bool ret = false;
280
281         /* Using PCS we cannot dial with the phy registers at this stage
282          * so we do not support extra feature like EEE.
283          */
284         if ((priv->pcs == STMMAC_PCS_RGMII) || (priv->pcs == STMMAC_PCS_TBI) ||
285             (priv->pcs == STMMAC_PCS_RTBI))
286                 goto out;
287
288         /* MAC core supports the EEE feature. */
289         if (priv->dma_cap.eee) {
290                 /* Check if the PHY supports EEE */
291                 if (phy_init_eee(priv->phydev, 1))
292                         goto out;
293
294                 if (!priv->eee_active) {
295                         priv->eee_active = 1;
296                         init_timer(&priv->eee_ctrl_timer);
297                         priv->eee_ctrl_timer.function = stmmac_eee_ctrl_timer;
298                         priv->eee_ctrl_timer.data = (unsigned long)priv;
299                         priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
300                         add_timer(&priv->eee_ctrl_timer);
301
302                         priv->hw->mac->set_eee_timer(priv->ioaddr,
303                                                      STMMAC_DEFAULT_LIT_LS,
304                                                      priv->tx_lpi_timer);
305                 } else
306                         /* Set HW EEE according to the speed */
307                         priv->hw->mac->set_eee_pls(priv->ioaddr,
308                                                    priv->phydev->link);
309
310                 pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
311
312                 ret = true;
313         }
314 out:
315         return ret;
316 }
317
318 /* stmmac_get_tx_hwtstamp: get HW TX timestamps
319  * @priv: driver private structure
320  * @entry : descriptor index to be used.
321  * @skb : the socket buffer
322  * Description :
323  * This function will read timestamp from the descriptor & pass it to stack.
324  * and also perform some sanity checks.
325  */
326 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
327                                    unsigned int entry, struct sk_buff *skb)
328 {
329         struct skb_shared_hwtstamps shhwtstamp;
330         u64 ns;
331         void *desc = NULL;
332
333         if (!priv->hwts_tx_en)
334                 return;
335
336         /* exit if skb doesn't support hw tstamp */
337         if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
338                 return;
339
340         if (priv->adv_ts)
341                 desc = (priv->dma_etx + entry);
342         else
343                 desc = (priv->dma_tx + entry);
344
345         /* check tx tstamp status */
346         if (!priv->hw->desc->get_tx_timestamp_status((struct dma_desc *)desc))
347                 return;
348
349         /* get the valid tstamp */
350         ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
351
352         memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
353         shhwtstamp.hwtstamp = ns_to_ktime(ns);
354         /* pass tstamp to stack */
355         skb_tstamp_tx(skb, &shhwtstamp);
356
357         return;
358 }
359
360 /* stmmac_get_rx_hwtstamp: get HW RX timestamps
361  * @priv: driver private structure
362  * @entry : descriptor index to be used.
363  * @skb : the socket buffer
364  * Description :
365  * This function will read received packet's timestamp from the descriptor
366  * and pass it to stack. It also perform some sanity checks.
367  */
368 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv,
369                                    unsigned int entry, struct sk_buff *skb)
370 {
371         struct skb_shared_hwtstamps *shhwtstamp = NULL;
372         u64 ns;
373         void *desc = NULL;
374
375         if (!priv->hwts_rx_en)
376                 return;
377
378         if (priv->adv_ts)
379                 desc = (priv->dma_erx + entry);
380         else
381                 desc = (priv->dma_rx + entry);
382
383         /* exit if rx tstamp is not valid */
384         if (!priv->hw->desc->get_rx_timestamp_status(desc, priv->adv_ts))
385                 return;
386
387         /* get valid tstamp */
388         ns = priv->hw->desc->get_timestamp(desc, priv->adv_ts);
389         shhwtstamp = skb_hwtstamps(skb);
390         memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
391         shhwtstamp->hwtstamp = ns_to_ktime(ns);
392 }
393
394 /**
395  *  stmmac_hwtstamp_ioctl - control hardware timestamping.
396  *  @dev: device pointer.
397  *  @ifr: An IOCTL specefic structure, that can contain a pointer to
398  *  a proprietary structure used to pass information to the driver.
399  *  Description:
400  *  This function configures the MAC to enable/disable both outgoing(TX)
401  *  and incoming(RX) packets time stamping based on user input.
402  *  Return Value:
403  *  0 on success and an appropriate -ve integer on failure.
404  */
405 static int stmmac_hwtstamp_ioctl(struct net_device *dev, struct ifreq *ifr)
406 {
407         struct stmmac_priv *priv = netdev_priv(dev);
408         struct hwtstamp_config config;
409         struct timespec now;
410         u64 temp = 0;
411         u32 ptp_v2 = 0;
412         u32 tstamp_all = 0;
413         u32 ptp_over_ipv4_udp = 0;
414         u32 ptp_over_ipv6_udp = 0;
415         u32 ptp_over_ethernet = 0;
416         u32 snap_type_sel = 0;
417         u32 ts_master_en = 0;
418         u32 ts_event_en = 0;
419         u32 value = 0;
420
421         if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
422                 netdev_alert(priv->dev, "No support for HW time stamping\n");
423                 priv->hwts_tx_en = 0;
424                 priv->hwts_rx_en = 0;
425
426                 return -EOPNOTSUPP;
427         }
428
429         if (copy_from_user(&config, ifr->ifr_data,
430                            sizeof(struct hwtstamp_config)))
431                 return -EFAULT;
432
433         pr_debug("%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
434                  __func__, config.flags, config.tx_type, config.rx_filter);
435
436         /* reserved for future extensions */
437         if (config.flags)
438                 return -EINVAL;
439
440         if (config.tx_type != HWTSTAMP_TX_OFF &&
441             config.tx_type != HWTSTAMP_TX_ON)
442                 return -ERANGE;
443
444         if (priv->adv_ts) {
445                 switch (config.rx_filter) {
446                 case HWTSTAMP_FILTER_NONE:
447                         /* time stamp no incoming packet at all */
448                         config.rx_filter = HWTSTAMP_FILTER_NONE;
449                         break;
450
451                 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
452                         /* PTP v1, UDP, any kind of event packet */
453                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
454                         /* take time stamp for all event messages */
455                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
456
457                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
458                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
459                         break;
460
461                 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
462                         /* PTP v1, UDP, Sync packet */
463                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
464                         /* take time stamp for SYNC messages only */
465                         ts_event_en = PTP_TCR_TSEVNTENA;
466
467                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
468                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
469                         break;
470
471                 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
472                         /* PTP v1, UDP, Delay_req packet */
473                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
474                         /* take time stamp for Delay_Req messages only */
475                         ts_master_en = PTP_TCR_TSMSTRENA;
476                         ts_event_en = PTP_TCR_TSEVNTENA;
477
478                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
479                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
480                         break;
481
482                 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
483                         /* PTP v2, UDP, any kind of event packet */
484                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
485                         ptp_v2 = PTP_TCR_TSVER2ENA;
486                         /* take time stamp for all event messages */
487                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
488
489                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
490                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
491                         break;
492
493                 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
494                         /* PTP v2, UDP, Sync packet */
495                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
496                         ptp_v2 = PTP_TCR_TSVER2ENA;
497                         /* take time stamp for SYNC messages only */
498                         ts_event_en = PTP_TCR_TSEVNTENA;
499
500                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
501                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
502                         break;
503
504                 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
505                         /* PTP v2, UDP, Delay_req packet */
506                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
507                         ptp_v2 = PTP_TCR_TSVER2ENA;
508                         /* take time stamp for Delay_Req messages only */
509                         ts_master_en = PTP_TCR_TSMSTRENA;
510                         ts_event_en = PTP_TCR_TSEVNTENA;
511
512                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
513                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
514                         break;
515
516                 case HWTSTAMP_FILTER_PTP_V2_EVENT:
517                         /* PTP v2/802.AS1 any layer, any kind of event packet */
518                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
519                         ptp_v2 = PTP_TCR_TSVER2ENA;
520                         /* take time stamp for all event messages */
521                         snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
522
523                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
524                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
525                         ptp_over_ethernet = PTP_TCR_TSIPENA;
526                         break;
527
528                 case HWTSTAMP_FILTER_PTP_V2_SYNC:
529                         /* PTP v2/802.AS1, any layer, Sync packet */
530                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
531                         ptp_v2 = PTP_TCR_TSVER2ENA;
532                         /* take time stamp for SYNC messages only */
533                         ts_event_en = PTP_TCR_TSEVNTENA;
534
535                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
536                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
537                         ptp_over_ethernet = PTP_TCR_TSIPENA;
538                         break;
539
540                 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
541                         /* PTP v2/802.AS1, any layer, Delay_req packet */
542                         config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
543                         ptp_v2 = PTP_TCR_TSVER2ENA;
544                         /* take time stamp for Delay_Req messages only */
545                         ts_master_en = PTP_TCR_TSMSTRENA;
546                         ts_event_en = PTP_TCR_TSEVNTENA;
547
548                         ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
549                         ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
550                         ptp_over_ethernet = PTP_TCR_TSIPENA;
551                         break;
552
553                 case HWTSTAMP_FILTER_ALL:
554                         /* time stamp any incoming packet */
555                         config.rx_filter = HWTSTAMP_FILTER_ALL;
556                         tstamp_all = PTP_TCR_TSENALL;
557                         break;
558
559                 default:
560                         return -ERANGE;
561                 }
562         } else {
563                 switch (config.rx_filter) {
564                 case HWTSTAMP_FILTER_NONE:
565                         config.rx_filter = HWTSTAMP_FILTER_NONE;
566                         break;
567                 default:
568                         /* PTP v1, UDP, any kind of event packet */
569                         config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
570                         break;
571                 }
572         }
573         priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
574         priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
575
576         if (!priv->hwts_tx_en && !priv->hwts_rx_en)
577                 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, 0);
578         else {
579                 value = (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | PTP_TCR_TSCTRLSSR |
580                          tstamp_all | ptp_v2 | ptp_over_ethernet |
581                          ptp_over_ipv6_udp | ptp_over_ipv4_udp | ts_event_en |
582                          ts_master_en | snap_type_sel);
583
584                 priv->hw->ptp->config_hw_tstamping(priv->ioaddr, value);
585
586                 /* program Sub Second Increment reg */
587                 priv->hw->ptp->config_sub_second_increment(priv->ioaddr);
588
589                 /* calculate default added value:
590                  * formula is :
591                  * addend = (2^32)/freq_div_ratio;
592                  * where, freq_div_ratio = STMMAC_SYSCLOCK/50MHz
593                  * hence, addend = ((2^32) * 50MHz)/STMMAC_SYSCLOCK;
594                  * NOTE: STMMAC_SYSCLOCK should be >= 50MHz to
595                  *       achive 20ns accuracy.
596                  *
597                  * 2^x * y == (y << x), hence
598                  * 2^32 * 50000000 ==> (50000000 << 32)
599                  */
600                 temp = (u64) (50000000ULL << 32);
601                 priv->default_addend = div_u64(temp, STMMAC_SYSCLOCK);
602                 priv->hw->ptp->config_addend(priv->ioaddr,
603                                              priv->default_addend);
604
605                 /* initialize system time */
606                 getnstimeofday(&now);
607                 priv->hw->ptp->init_systime(priv->ioaddr, now.tv_sec,
608                                             now.tv_nsec);
609         }
610
611         return copy_to_user(ifr->ifr_data, &config,
612                             sizeof(struct hwtstamp_config)) ? -EFAULT : 0;
613 }
614
615 /**
616  * stmmac_init_ptp: init PTP
617  * @priv: driver private structure
618  * Description: this is to verify if the HW supports the PTPv1 or v2.
619  * This is done by looking at the HW cap. register.
620  * Also it registers the ptp driver.
621  */
622 static int stmmac_init_ptp(struct stmmac_priv *priv)
623 {
624         if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
625                 return -EOPNOTSUPP;
626
627         priv->adv_ts = 0;
628         if (priv->dma_cap.atime_stamp && priv->extend_desc)
629                 priv->adv_ts = 1;
630
631         if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
632                 pr_debug("IEEE 1588-2002 Time Stamp supported\n");
633
634         if (netif_msg_hw(priv) && priv->adv_ts)
635                 pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
636
637         priv->hw->ptp = &stmmac_ptp;
638         priv->hwts_tx_en = 0;
639         priv->hwts_rx_en = 0;
640
641         return stmmac_ptp_register(priv);
642 }
643
644 static void stmmac_release_ptp(struct stmmac_priv *priv)
645 {
646         stmmac_ptp_unregister(priv);
647 }
648
649 /**
650  * stmmac_adjust_link
651  * @dev: net device structure
652  * Description: it adjusts the link parameters.
653  */
654 static void stmmac_adjust_link(struct net_device *dev)
655 {
656         struct stmmac_priv *priv = netdev_priv(dev);
657         struct phy_device *phydev = priv->phydev;
658         unsigned long flags;
659         int new_state = 0;
660         unsigned int fc = priv->flow_ctrl, pause_time = priv->pause;
661
662         if (phydev == NULL)
663                 return;
664
665         spin_lock_irqsave(&priv->lock, flags);
666
667         if (phydev->link) {
668                 u32 ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
669
670                 /* Now we make sure that we can be in full duplex mode.
671                  * If not, we operate in half-duplex mode. */
672                 if (phydev->duplex != priv->oldduplex) {
673                         new_state = 1;
674                         if (!(phydev->duplex))
675                                 ctrl &= ~priv->hw->link.duplex;
676                         else
677                                 ctrl |= priv->hw->link.duplex;
678                         priv->oldduplex = phydev->duplex;
679                 }
680                 /* Flow Control operation */
681                 if (phydev->pause)
682                         priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
683                                                  fc, pause_time);
684
685                 if (phydev->speed != priv->speed) {
686                         new_state = 1;
687                         switch (phydev->speed) {
688                         case 1000:
689                                 if (likely(priv->plat->has_gmac))
690                                         ctrl &= ~priv->hw->link.port;
691                                 stmmac_hw_fix_mac_speed(priv);
692                                 break;
693                         case 100:
694                         case 10:
695                                 if (priv->plat->has_gmac) {
696                                         ctrl |= priv->hw->link.port;
697                                         if (phydev->speed == SPEED_100) {
698                                                 ctrl |= priv->hw->link.speed;
699                                         } else {
700                                                 ctrl &= ~(priv->hw->link.speed);
701                                         }
702                                 } else {
703                                         ctrl &= ~priv->hw->link.port;
704                                 }
705                                 stmmac_hw_fix_mac_speed(priv);
706                                 break;
707                         default:
708                                 if (netif_msg_link(priv))
709                                         pr_warn("%s: Speed (%d) not 10/100\n",
710                                                 dev->name, phydev->speed);
711                                 break;
712                         }
713
714                         priv->speed = phydev->speed;
715                 }
716
717                 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
718
719                 if (!priv->oldlink) {
720                         new_state = 1;
721                         priv->oldlink = 1;
722                 }
723         } else if (priv->oldlink) {
724                 new_state = 1;
725                 priv->oldlink = 0;
726                 priv->speed = 0;
727                 priv->oldduplex = -1;
728         }
729
730         if (new_state && netif_msg_link(priv))
731                 phy_print_status(phydev);
732
733         /* At this stage, it could be needed to setup the EEE or adjust some
734          * MAC related HW registers.
735          */
736         priv->eee_enabled = stmmac_eee_init(priv);
737
738         spin_unlock_irqrestore(&priv->lock, flags);
739 }
740
741 /**
742  * stmmac_check_pcs_mode: verify if RGMII/SGMII is supported
743  * @priv: driver private structure
744  * Description: this is to verify if the HW supports the PCS.
745  * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
746  * configured for the TBI, RTBI, or SGMII PHY interface.
747  */
748 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
749 {
750         int interface = priv->plat->interface;
751
752         if (priv->dma_cap.pcs) {
753                 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
754                     (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
755                     (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
756                     (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
757                         pr_debug("STMMAC: PCS RGMII support enable\n");
758                         priv->pcs = STMMAC_PCS_RGMII;
759                 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
760                         pr_debug("STMMAC: PCS SGMII support enable\n");
761                         priv->pcs = STMMAC_PCS_SGMII;
762                 }
763         }
764 }
765
766 /**
767  * stmmac_init_phy - PHY initialization
768  * @dev: net device structure
769  * Description: it initializes the driver's PHY state, and attaches the PHY
770  * to the mac driver.
771  *  Return value:
772  *  0 on success
773  */
774 static int stmmac_init_phy(struct net_device *dev)
775 {
776         struct stmmac_priv *priv = netdev_priv(dev);
777         struct phy_device *phydev;
778         char phy_id_fmt[MII_BUS_ID_SIZE + 3];
779         char bus_id[MII_BUS_ID_SIZE];
780         int interface = priv->plat->interface;
781         int max_speed = priv->plat->max_speed;
782         priv->oldlink = 0;
783         priv->speed = 0;
784         priv->oldduplex = -1;
785
786         if (priv->plat->phy_bus_name)
787                 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x",
788                          priv->plat->phy_bus_name, priv->plat->bus_id);
789         else
790                 snprintf(bus_id, MII_BUS_ID_SIZE, "stmmac-%x",
791                          priv->plat->bus_id);
792
793         snprintf(phy_id_fmt, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
794                  priv->plat->phy_addr);
795         pr_debug("stmmac_init_phy:  trying to attach to %s\n", phy_id_fmt);
796
797         phydev = phy_connect(dev, phy_id_fmt, &stmmac_adjust_link, interface);
798
799         if (IS_ERR(phydev)) {
800                 pr_err("%s: Could not attach to PHY\n", dev->name);
801                 return PTR_ERR(phydev);
802         }
803
804         /* Stop Advertising 1000BASE Capability if interface is not GMII */
805         if ((interface == PHY_INTERFACE_MODE_MII) ||
806             (interface == PHY_INTERFACE_MODE_RMII) ||
807                 (max_speed < 1000 &&  max_speed > 0))
808                 phydev->advertising &= ~(SUPPORTED_1000baseT_Half |
809                                          SUPPORTED_1000baseT_Full);
810
811         /*
812          * Broken HW is sometimes missing the pull-up resistor on the
813          * MDIO line, which results in reads to non-existent devices returning
814          * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
815          * device as well.
816          * Note: phydev->phy_id is the result of reading the UID PHY registers.
817          */
818         if (phydev->phy_id == 0) {
819                 phy_disconnect(phydev);
820                 return -ENODEV;
821         }
822         pr_debug("stmmac_init_phy:  %s: attached to PHY (UID 0x%x)"
823                  " Link = %d\n", dev->name, phydev->phy_id, phydev->link);
824
825         priv->phydev = phydev;
826
827         return 0;
828 }
829
830 /**
831  * stmmac_display_ring: display ring
832  * @head: pointer to the head of the ring passed.
833  * @size: size of the ring.
834  * @extend_desc: to verify if extended descriptors are used.
835  * Description: display the control/status and buffer descriptors.
836  */
837 static void stmmac_display_ring(void *head, int size, int extend_desc)
838 {
839         int i;
840         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
841         struct dma_desc *p = (struct dma_desc *)head;
842
843         for (i = 0; i < size; i++) {
844                 u64 x;
845                 if (extend_desc) {
846                         x = *(u64 *) ep;
847                         pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
848                                 i, (unsigned int)virt_to_phys(ep),
849                                 (unsigned int)x, (unsigned int)(x >> 32),
850                                 ep->basic.des2, ep->basic.des3);
851                         ep++;
852                 } else {
853                         x = *(u64 *) p;
854                         pr_info("%d [0x%x]: 0x%x 0x%x 0x%x 0x%x",
855                                 i, (unsigned int)virt_to_phys(p),
856                                 (unsigned int)x, (unsigned int)(x >> 32),
857                                 p->des2, p->des3);
858                         p++;
859                 }
860                 pr_info("\n");
861         }
862 }
863
864 static void stmmac_display_rings(struct stmmac_priv *priv)
865 {
866         unsigned int txsize = priv->dma_tx_size;
867         unsigned int rxsize = priv->dma_rx_size;
868
869         if (priv->extend_desc) {
870                 pr_info("Extended RX descriptor ring:\n");
871                 stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
872                 pr_info("Extended TX descriptor ring:\n");
873                 stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
874         } else {
875                 pr_info("RX descriptor ring:\n");
876                 stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
877                 pr_info("TX descriptor ring:\n");
878                 stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
879         }
880 }
881
882 static int stmmac_set_bfsize(int mtu, int bufsize)
883 {
884         int ret = bufsize;
885
886         if (mtu >= BUF_SIZE_4KiB)
887                 ret = BUF_SIZE_8KiB;
888         else if (mtu >= BUF_SIZE_2KiB)
889                 ret = BUF_SIZE_4KiB;
890         else if (mtu >= DMA_BUFFER_SIZE)
891                 ret = BUF_SIZE_2KiB;
892         else
893                 ret = DMA_BUFFER_SIZE;
894
895         return ret;
896 }
897
898 /**
899  * stmmac_clear_descriptors: clear descriptors
900  * @priv: driver private structure
901  * Description: this function is called to clear the tx and rx descriptors
902  * in case of both basic and extended descriptors are used.
903  */
904 static void stmmac_clear_descriptors(struct stmmac_priv *priv)
905 {
906         int i;
907         unsigned int txsize = priv->dma_tx_size;
908         unsigned int rxsize = priv->dma_rx_size;
909
910         /* Clear the Rx/Tx descriptors */
911         for (i = 0; i < rxsize; i++)
912                 if (priv->extend_desc)
913                         priv->hw->desc->init_rx_desc(&priv->dma_erx[i].basic,
914                                                      priv->use_riwt, priv->mode,
915                                                      (i == rxsize - 1));
916                 else
917                         priv->hw->desc->init_rx_desc(&priv->dma_rx[i],
918                                                      priv->use_riwt, priv->mode,
919                                                      (i == rxsize - 1));
920         for (i = 0; i < txsize; i++)
921                 if (priv->extend_desc)
922                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
923                                                      priv->mode,
924                                                      (i == txsize - 1));
925                 else
926                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
927                                                      priv->mode,
928                                                      (i == txsize - 1));
929 }
930
931 static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
932                                   int i)
933 {
934         struct sk_buff *skb;
935
936         skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
937                                  GFP_KERNEL);
938         if (!skb) {
939                 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
940                 return -ENOMEM;
941         }
942         skb_reserve(skb, NET_IP_ALIGN);
943         priv->rx_skbuff[i] = skb;
944         priv->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data,
945                                                 priv->dma_buf_sz,
946                                                 DMA_FROM_DEVICE);
947         if (dma_mapping_error(priv->device, priv->rx_skbuff_dma[i])) {
948                 pr_err("%s: DMA mapping error\n", __func__);
949                 dev_kfree_skb_any(skb);
950                 return -EINVAL;
951         }
952
953         p->des2 = priv->rx_skbuff_dma[i];
954
955         if ((priv->mode == STMMAC_RING_MODE) &&
956             (priv->dma_buf_sz == BUF_SIZE_16KiB))
957                 priv->hw->ring->init_desc3(p);
958
959         return 0;
960 }
961
962 static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
963 {
964         if (priv->rx_skbuff[i]) {
965                 dma_unmap_single(priv->device, priv->rx_skbuff_dma[i],
966                                  priv->dma_buf_sz, DMA_FROM_DEVICE);
967                 dev_kfree_skb_any(priv->rx_skbuff[i]);
968         }
969         priv->rx_skbuff[i] = NULL;
970 }
971
972 /**
973  * init_dma_desc_rings - init the RX/TX descriptor rings
974  * @dev: net device structure
975  * Description:  this function initializes the DMA RX/TX descriptors
976  * and allocates the socket buffers. It suppors the chained and ring
977  * modes.
978  */
979 static int init_dma_desc_rings(struct net_device *dev)
980 {
981         int i;
982         struct stmmac_priv *priv = netdev_priv(dev);
983         unsigned int txsize = priv->dma_tx_size;
984         unsigned int rxsize = priv->dma_rx_size;
985         unsigned int bfsize = 0;
986         int ret = -ENOMEM;
987
988         /* Set the max buffer size according to the DESC mode
989          * and the MTU. Note that RING mode allows 16KiB bsize.
990          */
991         if (priv->mode == STMMAC_RING_MODE)
992                 bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
993
994         if (bfsize < BUF_SIZE_16KiB)
995                 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
996
997         if (netif_msg_probe(priv))
998                 pr_debug("%s: txsize %d, rxsize %d, bfsize %d\n", __func__,
999                          txsize, rxsize, bfsize);
1000
1001         if (netif_msg_probe(priv)) {
1002                 pr_debug("(%s) dma_rx_phy=0x%08x dma_tx_phy=0x%08x\n", __func__,
1003                          (u32) priv->dma_rx_phy, (u32) priv->dma_tx_phy);
1004
1005                 /* RX INITIALIZATION */
1006                 pr_debug("\tSKB addresses:\nskb\t\tskb data\tdma data\n");
1007         }
1008         for (i = 0; i < rxsize; i++) {
1009                 struct dma_desc *p;
1010                 if (priv->extend_desc)
1011                         p = &((priv->dma_erx + i)->basic);
1012                 else
1013                         p = priv->dma_rx + i;
1014
1015                 ret = stmmac_init_rx_buffers(priv, p, i);
1016                 if (ret)
1017                         goto err_init_rx_buffers;
1018
1019                 if (netif_msg_probe(priv))
1020                         pr_debug("[%p]\t[%p]\t[%x]\n", priv->rx_skbuff[i],
1021                                  priv->rx_skbuff[i]->data,
1022                                  (unsigned int)priv->rx_skbuff_dma[i]);
1023         }
1024         priv->cur_rx = 0;
1025         priv->dirty_rx = (unsigned int)(i - rxsize);
1026         priv->dma_buf_sz = bfsize;
1027         buf_sz = bfsize;
1028
1029         /* Setup the chained descriptor addresses */
1030         if (priv->mode == STMMAC_CHAIN_MODE) {
1031                 if (priv->extend_desc) {
1032                         priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
1033                                               rxsize, 1);
1034                         priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
1035                                               txsize, 1);
1036                 } else {
1037                         priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
1038                                               rxsize, 0);
1039                         priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
1040                                               txsize, 0);
1041                 }
1042         }
1043
1044         /* TX INITIALIZATION */
1045         for (i = 0; i < txsize; i++) {
1046                 struct dma_desc *p;
1047                 if (priv->extend_desc)
1048                         p = &((priv->dma_etx + i)->basic);
1049                 else
1050                         p = priv->dma_tx + i;
1051                 p->des2 = 0;
1052                 priv->tx_skbuff_dma[i] = 0;
1053                 priv->tx_skbuff[i] = NULL;
1054         }
1055
1056         priv->dirty_tx = 0;
1057         priv->cur_tx = 0;
1058
1059         stmmac_clear_descriptors(priv);
1060
1061         if (netif_msg_hw(priv))
1062                 stmmac_display_rings(priv);
1063
1064         return 0;
1065 err_init_rx_buffers:
1066         while (--i >= 0)
1067                 stmmac_free_rx_buffers(priv, i);
1068         return ret;
1069 }
1070
1071 static void dma_free_rx_skbufs(struct stmmac_priv *priv)
1072 {
1073         int i;
1074
1075         for (i = 0; i < priv->dma_rx_size; i++)
1076                 stmmac_free_rx_buffers(priv, i);
1077 }
1078
1079 static void dma_free_tx_skbufs(struct stmmac_priv *priv)
1080 {
1081         int i;
1082
1083         for (i = 0; i < priv->dma_tx_size; i++) {
1084                 if (priv->tx_skbuff[i] != NULL) {
1085                         struct dma_desc *p;
1086                         if (priv->extend_desc)
1087                                 p = &((priv->dma_etx + i)->basic);
1088                         else
1089                                 p = priv->dma_tx + i;
1090
1091                         if (priv->tx_skbuff_dma[i])
1092                                 dma_unmap_single(priv->device,
1093                                                  priv->tx_skbuff_dma[i],
1094                                                  priv->hw->desc->get_tx_len(p),
1095                                                  DMA_TO_DEVICE);
1096                         dev_kfree_skb_any(priv->tx_skbuff[i]);
1097                         priv->tx_skbuff[i] = NULL;
1098                         priv->tx_skbuff_dma[i] = 0;
1099                 }
1100         }
1101 }
1102
1103 static int alloc_dma_desc_resources(struct stmmac_priv *priv)
1104 {
1105         unsigned int txsize = priv->dma_tx_size;
1106         unsigned int rxsize = priv->dma_rx_size;
1107         int ret = -ENOMEM;
1108
1109         priv->rx_skbuff_dma = kmalloc_array(rxsize, sizeof(dma_addr_t),
1110                                             GFP_KERNEL);
1111         if (!priv->rx_skbuff_dma)
1112                 return -ENOMEM;
1113
1114         priv->rx_skbuff = kmalloc_array(rxsize, sizeof(struct sk_buff *),
1115                                         GFP_KERNEL);
1116         if (!priv->rx_skbuff)
1117                 goto err_rx_skbuff;
1118
1119         priv->tx_skbuff_dma = kmalloc_array(txsize, sizeof(dma_addr_t),
1120                                             GFP_KERNEL);
1121         if (!priv->tx_skbuff_dma)
1122                 goto err_tx_skbuff_dma;
1123
1124         priv->tx_skbuff = kmalloc_array(txsize, sizeof(struct sk_buff *),
1125                                         GFP_KERNEL);
1126         if (!priv->tx_skbuff)
1127                 goto err_tx_skbuff;
1128
1129         if (priv->extend_desc) {
1130                 priv->dma_erx = dma_alloc_coherent(priv->device, rxsize *
1131                                                    sizeof(struct
1132                                                           dma_extended_desc),
1133                                                    &priv->dma_rx_phy,
1134                                                    GFP_KERNEL);
1135                 if (!priv->dma_erx)
1136                         goto err_dma;
1137
1138                 priv->dma_etx = dma_alloc_coherent(priv->device, txsize *
1139                                                    sizeof(struct
1140                                                           dma_extended_desc),
1141                                                    &priv->dma_tx_phy,
1142                                                    GFP_KERNEL);
1143                 if (!priv->dma_etx) {
1144                         dma_free_coherent(priv->device, priv->dma_rx_size *
1145                                         sizeof(struct dma_extended_desc),
1146                                         priv->dma_erx, priv->dma_rx_phy);
1147                         goto err_dma;
1148                 }
1149         } else {
1150                 priv->dma_rx = dma_alloc_coherent(priv->device, rxsize *
1151                                                   sizeof(struct dma_desc),
1152                                                   &priv->dma_rx_phy,
1153                                                   GFP_KERNEL);
1154                 if (!priv->dma_rx)
1155                         goto err_dma;
1156
1157                 priv->dma_tx = dma_alloc_coherent(priv->device, txsize *
1158                                                   sizeof(struct dma_desc),
1159                                                   &priv->dma_tx_phy,
1160                                                   GFP_KERNEL);
1161                 if (!priv->dma_tx) {
1162                         dma_free_coherent(priv->device, priv->dma_rx_size *
1163                                         sizeof(struct dma_desc),
1164                                         priv->dma_rx, priv->dma_rx_phy);
1165                         goto err_dma;
1166                 }
1167         }
1168
1169         return 0;
1170
1171 err_dma:
1172         kfree(priv->tx_skbuff);
1173 err_tx_skbuff:
1174         kfree(priv->tx_skbuff_dma);
1175 err_tx_skbuff_dma:
1176         kfree(priv->rx_skbuff);
1177 err_rx_skbuff:
1178         kfree(priv->rx_skbuff_dma);
1179         return ret;
1180 }
1181
1182 static void free_dma_desc_resources(struct stmmac_priv *priv)
1183 {
1184         /* Release the DMA TX/RX socket buffers */
1185         dma_free_rx_skbufs(priv);
1186         dma_free_tx_skbufs(priv);
1187
1188         /* Free DMA regions of consistent memory previously allocated */
1189         if (!priv->extend_desc) {
1190                 dma_free_coherent(priv->device,
1191                                   priv->dma_tx_size * sizeof(struct dma_desc),
1192                                   priv->dma_tx, priv->dma_tx_phy);
1193                 dma_free_coherent(priv->device,
1194                                   priv->dma_rx_size * sizeof(struct dma_desc),
1195                                   priv->dma_rx, priv->dma_rx_phy);
1196         } else {
1197                 dma_free_coherent(priv->device, priv->dma_tx_size *
1198                                   sizeof(struct dma_extended_desc),
1199                                   priv->dma_etx, priv->dma_tx_phy);
1200                 dma_free_coherent(priv->device, priv->dma_rx_size *
1201                                   sizeof(struct dma_extended_desc),
1202                                   priv->dma_erx, priv->dma_rx_phy);
1203         }
1204         kfree(priv->rx_skbuff_dma);
1205         kfree(priv->rx_skbuff);
1206         kfree(priv->tx_skbuff_dma);
1207         kfree(priv->tx_skbuff);
1208 }
1209
1210 /**
1211  *  stmmac_dma_operation_mode - HW DMA operation mode
1212  *  @priv: driver private structure
1213  *  Description: it sets the DMA operation mode: tx/rx DMA thresholds
1214  *  or Store-And-Forward capability.
1215  */
1216 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
1217 {
1218         if (priv->plat->force_thresh_dma_mode)
1219                 priv->hw->dma->dma_mode(priv->ioaddr, tc, tc);
1220         else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
1221                 /*
1222                  * In case of GMAC, SF mode can be enabled
1223                  * to perform the TX COE in HW. This depends on:
1224                  * 1) TX COE if actually supported
1225                  * 2) There is no bugged Jumbo frame support
1226                  *    that needs to not insert csum in the TDES.
1227                  */
1228                 priv->hw->dma->dma_mode(priv->ioaddr, SF_DMA_MODE, SF_DMA_MODE);
1229                 tc = SF_DMA_MODE;
1230         } else
1231                 priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
1232 }
1233
1234 /**
1235  * stmmac_tx_clean:
1236  * @priv: driver private structure
1237  * Description: it reclaims resources after transmission completes.
1238  */
1239 static void stmmac_tx_clean(struct stmmac_priv *priv)
1240 {
1241         unsigned int txsize = priv->dma_tx_size;
1242
1243         spin_lock(&priv->tx_lock);
1244
1245         priv->xstats.tx_clean++;
1246
1247         while (priv->dirty_tx != priv->cur_tx) {
1248                 int last;
1249                 unsigned int entry = priv->dirty_tx % txsize;
1250                 struct sk_buff *skb = priv->tx_skbuff[entry];
1251                 struct dma_desc *p;
1252
1253                 if (priv->extend_desc)
1254                         p = (struct dma_desc *)(priv->dma_etx + entry);
1255                 else
1256                         p = priv->dma_tx + entry;
1257
1258                 /* Check if the descriptor is owned by the DMA. */
1259                 if (priv->hw->desc->get_tx_owner(p))
1260                         break;
1261
1262                 /* Verify tx error by looking at the last segment. */
1263                 last = priv->hw->desc->get_tx_ls(p);
1264                 if (likely(last)) {
1265                         int tx_error =
1266                             priv->hw->desc->tx_status(&priv->dev->stats,
1267                                                       &priv->xstats, p,
1268                                                       priv->ioaddr);
1269                         if (likely(tx_error == 0)) {
1270                                 priv->dev->stats.tx_packets++;
1271                                 priv->xstats.tx_pkt_n++;
1272                         } else
1273                                 priv->dev->stats.tx_errors++;
1274
1275                         stmmac_get_tx_hwtstamp(priv, entry, skb);
1276                 }
1277                 if (netif_msg_tx_done(priv))
1278                         pr_debug("%s: curr %d, dirty %d\n", __func__,
1279                                  priv->cur_tx, priv->dirty_tx);
1280
1281                 if (likely(priv->tx_skbuff_dma[entry])) {
1282                         dma_unmap_single(priv->device,
1283                                          priv->tx_skbuff_dma[entry],
1284                                          priv->hw->desc->get_tx_len(p),
1285                                          DMA_TO_DEVICE);
1286                         priv->tx_skbuff_dma[entry] = 0;
1287                 }
1288                 priv->hw->ring->clean_desc3(priv, p);
1289
1290                 if (likely(skb != NULL)) {
1291                         dev_kfree_skb(skb);
1292                         priv->tx_skbuff[entry] = NULL;
1293                 }
1294
1295                 priv->hw->desc->release_tx_desc(p, priv->mode);
1296
1297                 priv->dirty_tx++;
1298         }
1299         if (unlikely(netif_queue_stopped(priv->dev) &&
1300                      stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv))) {
1301                 netif_tx_lock(priv->dev);
1302                 if (netif_queue_stopped(priv->dev) &&
1303                     stmmac_tx_avail(priv) > STMMAC_TX_THRESH(priv)) {
1304                         if (netif_msg_tx_done(priv))
1305                                 pr_debug("%s: restart transmit\n", __func__);
1306                         netif_wake_queue(priv->dev);
1307                 }
1308                 netif_tx_unlock(priv->dev);
1309         }
1310
1311         if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) {
1312                 stmmac_enable_eee_mode(priv);
1313                 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(eee_timer));
1314         }
1315         spin_unlock(&priv->tx_lock);
1316 }
1317
1318 static inline void stmmac_enable_dma_irq(struct stmmac_priv *priv)
1319 {
1320         priv->hw->dma->enable_dma_irq(priv->ioaddr);
1321 }
1322
1323 static inline void stmmac_disable_dma_irq(struct stmmac_priv *priv)
1324 {
1325         priv->hw->dma->disable_dma_irq(priv->ioaddr);
1326 }
1327
1328 /**
1329  * stmmac_tx_err: irq tx error mng function
1330  * @priv: driver private structure
1331  * Description: it cleans the descriptors and restarts the transmission
1332  * in case of errors.
1333  */
1334 static void stmmac_tx_err(struct stmmac_priv *priv)
1335 {
1336         int i;
1337         int txsize = priv->dma_tx_size;
1338         netif_stop_queue(priv->dev);
1339
1340         priv->hw->dma->stop_tx(priv->ioaddr);
1341         dma_free_tx_skbufs(priv);
1342         for (i = 0; i < txsize; i++)
1343                 if (priv->extend_desc)
1344                         priv->hw->desc->init_tx_desc(&priv->dma_etx[i].basic,
1345                                                      priv->mode,
1346                                                      (i == txsize - 1));
1347                 else
1348                         priv->hw->desc->init_tx_desc(&priv->dma_tx[i],
1349                                                      priv->mode,
1350                                                      (i == txsize - 1));
1351         priv->dirty_tx = 0;
1352         priv->cur_tx = 0;
1353         priv->hw->dma->start_tx(priv->ioaddr);
1354
1355         priv->dev->stats.tx_errors++;
1356         netif_wake_queue(priv->dev);
1357 }
1358
1359 /**
1360  * stmmac_dma_interrupt: DMA ISR
1361  * @priv: driver private structure
1362  * Description: this is the DMA ISR. It is called by the main ISR.
1363  * It calls the dwmac dma routine to understand which type of interrupt
1364  * happened. In case of there is a Normal interrupt and either TX or RX
1365  * interrupt happened so the NAPI is scheduled.
1366  */
1367 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
1368 {
1369         int status;
1370
1371         status = priv->hw->dma->dma_interrupt(priv->ioaddr, &priv->xstats);
1372         if (likely((status & handle_rx)) || (status & handle_tx)) {
1373                 if (likely(napi_schedule_prep(&priv->napi))) {
1374                         stmmac_disable_dma_irq(priv);
1375                         __napi_schedule(&priv->napi);
1376                 }
1377         }
1378         if (unlikely(status & tx_hard_error_bump_tc)) {
1379                 /* Try to bump up the dma threshold on this failure */
1380                 if (unlikely(tc != SF_DMA_MODE) && (tc <= 256)) {
1381                         tc += 64;
1382                         priv->hw->dma->dma_mode(priv->ioaddr, tc, SF_DMA_MODE);
1383                         priv->xstats.threshold = tc;
1384                 }
1385         } else if (unlikely(status == tx_hard_error))
1386                 stmmac_tx_err(priv);
1387 }
1388
1389 /**
1390  * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
1391  * @priv: driver private structure
1392  * Description: this masks the MMC irq, in fact, the counters are managed in SW.
1393  */
1394 static void stmmac_mmc_setup(struct stmmac_priv *priv)
1395 {
1396         unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
1397             MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
1398
1399         dwmac_mmc_intr_all_mask(priv->ioaddr);
1400
1401         if (priv->dma_cap.rmon) {
1402                 dwmac_mmc_ctrl(priv->ioaddr, mode);
1403                 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
1404         } else
1405                 pr_info(" No MAC Management Counters available\n");
1406 }
1407
1408 static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
1409 {
1410         u32 hwid = priv->hw->synopsys_uid;
1411
1412         /* Check Synopsys Id (not available on old chips) */
1413         if (likely(hwid)) {
1414                 u32 uid = ((hwid & 0x0000ff00) >> 8);
1415                 u32 synid = (hwid & 0x000000ff);
1416
1417                 pr_info("stmmac - user ID: 0x%x, Synopsys ID: 0x%x\n",
1418                         uid, synid);
1419
1420                 return synid;
1421         }
1422         return 0;
1423 }
1424
1425 /**
1426  * stmmac_selec_desc_mode: to select among: normal/alternate/extend descriptors
1427  * @priv: driver private structure
1428  * Description: select the Enhanced/Alternate or Normal descriptors.
1429  * In case of Enhanced/Alternate, it looks at the extended descriptors are
1430  * supported by the HW cap. register.
1431  */
1432 static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
1433 {
1434         if (priv->plat->enh_desc) {
1435                 pr_info(" Enhanced/Alternate descriptors\n");
1436
1437                 /* GMAC older than 3.50 has no extended descriptors */
1438                 if (priv->synopsys_id >= DWMAC_CORE_3_50) {
1439                         pr_info("\tEnabled extended descriptors\n");
1440                         priv->extend_desc = 1;
1441                 } else
1442                         pr_warn("Extended descriptors not supported\n");
1443
1444                 priv->hw->desc = &enh_desc_ops;
1445         } else {
1446                 pr_info(" Normal descriptors\n");
1447                 priv->hw->desc = &ndesc_ops;
1448         }
1449 }
1450
1451 /**
1452  * stmmac_get_hw_features: get MAC capabilities from the HW cap. register.
1453  * @priv: driver private structure
1454  * Description:
1455  *  new GMAC chip generations have a new register to indicate the
1456  *  presence of the optional feature/functions.
1457  *  This can be also used to override the value passed through the
1458  *  platform and necessary for old MAC10/100 and GMAC chips.
1459  */
1460 static int stmmac_get_hw_features(struct stmmac_priv *priv)
1461 {
1462         u32 hw_cap = 0;
1463
1464         if (priv->hw->dma->get_hw_feature) {
1465                 hw_cap = priv->hw->dma->get_hw_feature(priv->ioaddr);
1466
1467                 priv->dma_cap.mbps_10_100 = (hw_cap & DMA_HW_FEAT_MIISEL);
1468                 priv->dma_cap.mbps_1000 = (hw_cap & DMA_HW_FEAT_GMIISEL) >> 1;
1469                 priv->dma_cap.half_duplex = (hw_cap & DMA_HW_FEAT_HDSEL) >> 2;
1470                 priv->dma_cap.hash_filter = (hw_cap & DMA_HW_FEAT_HASHSEL) >> 4;
1471                 priv->dma_cap.multi_addr = (hw_cap & DMA_HW_FEAT_ADDMAC) >> 5;
1472                 priv->dma_cap.pcs = (hw_cap & DMA_HW_FEAT_PCSSEL) >> 6;
1473                 priv->dma_cap.sma_mdio = (hw_cap & DMA_HW_FEAT_SMASEL) >> 8;
1474                 priv->dma_cap.pmt_remote_wake_up =
1475                     (hw_cap & DMA_HW_FEAT_RWKSEL) >> 9;
1476                 priv->dma_cap.pmt_magic_frame =
1477                     (hw_cap & DMA_HW_FEAT_MGKSEL) >> 10;
1478                 /* MMC */
1479                 priv->dma_cap.rmon = (hw_cap & DMA_HW_FEAT_MMCSEL) >> 11;
1480                 /* IEEE 1588-2002 */
1481                 priv->dma_cap.time_stamp =
1482                     (hw_cap & DMA_HW_FEAT_TSVER1SEL) >> 12;
1483                 /* IEEE 1588-2008 */
1484                 priv->dma_cap.atime_stamp =
1485                     (hw_cap & DMA_HW_FEAT_TSVER2SEL) >> 13;
1486                 /* 802.3az - Energy-Efficient Ethernet (EEE) */
1487                 priv->dma_cap.eee = (hw_cap & DMA_HW_FEAT_EEESEL) >> 14;
1488                 priv->dma_cap.av = (hw_cap & DMA_HW_FEAT_AVSEL) >> 15;
1489                 /* TX and RX csum */
1490                 priv->dma_cap.tx_coe = (hw_cap & DMA_HW_FEAT_TXCOESEL) >> 16;
1491                 priv->dma_cap.rx_coe_type1 =
1492                     (hw_cap & DMA_HW_FEAT_RXTYP1COE) >> 17;
1493                 priv->dma_cap.rx_coe_type2 =
1494                     (hw_cap & DMA_HW_FEAT_RXTYP2COE) >> 18;
1495                 priv->dma_cap.rxfifo_over_2048 =
1496                     (hw_cap & DMA_HW_FEAT_RXFIFOSIZE) >> 19;
1497                 /* TX and RX number of channels */
1498                 priv->dma_cap.number_rx_channel =
1499                     (hw_cap & DMA_HW_FEAT_RXCHCNT) >> 20;
1500                 priv->dma_cap.number_tx_channel =
1501                     (hw_cap & DMA_HW_FEAT_TXCHCNT) >> 22;
1502                 /* Alternate (enhanced) DESC mode */
1503                 priv->dma_cap.enh_desc = (hw_cap & DMA_HW_FEAT_ENHDESSEL) >> 24;
1504         }
1505
1506         return hw_cap;
1507 }
1508
1509 /**
1510  * stmmac_check_ether_addr: check if the MAC addr is valid
1511  * @priv: driver private structure
1512  * Description:
1513  * it is to verify if the MAC address is valid, in case of failures it
1514  * generates a random MAC address
1515  */
1516 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
1517 {
1518         if (!is_valid_ether_addr(priv->dev->dev_addr)) {
1519                 priv->hw->mac->get_umac_addr((void __iomem *)
1520                                              priv->dev->base_addr,
1521                                              priv->dev->dev_addr, 0);
1522                 if (!is_valid_ether_addr(priv->dev->dev_addr))
1523                         eth_hw_addr_random(priv->dev);
1524         }
1525         pr_warn("%s: device MAC address %pM\n", priv->dev->name,
1526                 priv->dev->dev_addr);
1527 }
1528
1529 /**
1530  * stmmac_init_dma_engine: DMA init.
1531  * @priv: driver private structure
1532  * Description:
1533  * It inits the DMA invoking the specific MAC/GMAC callback.
1534  * Some DMA parameters can be passed from the platform;
1535  * in case of these are not passed a default is kept for the MAC or GMAC.
1536  */
1537 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
1538 {
1539         int pbl = DEFAULT_DMA_PBL, fixed_burst = 0, burst_len = 0;
1540         int mixed_burst = 0;
1541         int atds = 0;
1542
1543         if (priv->plat->dma_cfg) {
1544                 pbl = priv->plat->dma_cfg->pbl;
1545                 fixed_burst = priv->plat->dma_cfg->fixed_burst;
1546                 mixed_burst = priv->plat->dma_cfg->mixed_burst;
1547                 burst_len = priv->plat->dma_cfg->burst_len;
1548         }
1549
1550         if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
1551                 atds = 1;
1552
1553         return priv->hw->dma->init(priv->ioaddr, pbl, fixed_burst, mixed_burst,
1554                                    burst_len, priv->dma_tx_phy,
1555                                    priv->dma_rx_phy, atds);
1556 }
1557
1558 /**
1559  * stmmac_tx_timer: mitigation sw timer for tx.
1560  * @data: data pointer
1561  * Description:
1562  * This is the timer handler to directly invoke the stmmac_tx_clean.
1563  */
1564 static void stmmac_tx_timer(unsigned long data)
1565 {
1566         struct stmmac_priv *priv = (struct stmmac_priv *)data;
1567
1568         stmmac_tx_clean(priv);
1569 }
1570
1571 /**
1572  * stmmac_init_tx_coalesce: init tx mitigation options.
1573  * @priv: driver private structure
1574  * Description:
1575  * This inits the transmit coalesce parameters: i.e. timer rate,
1576  * timer handler and default threshold used for enabling the
1577  * interrupt on completion bit.
1578  */
1579 static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
1580 {
1581         priv->tx_coal_frames = STMMAC_TX_FRAMES;
1582         priv->tx_coal_timer = STMMAC_COAL_TX_TIMER;
1583         init_timer(&priv->txtimer);
1584         priv->txtimer.expires = STMMAC_COAL_TIMER(priv->tx_coal_timer);
1585         priv->txtimer.data = (unsigned long)priv;
1586         priv->txtimer.function = stmmac_tx_timer;
1587         add_timer(&priv->txtimer);
1588 }
1589
1590 /**
1591  * stmmac_hw_setup: setup mac in a usable state.
1592  *  @dev : pointer to the device structure.
1593  *  Description:
1594  *  This function sets up the ip in a usable state.
1595  *  Return value:
1596  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1597  *  file on failure.
1598  */
1599 static int stmmac_hw_setup(struct net_device *dev)
1600 {
1601         struct stmmac_priv *priv = netdev_priv(dev);
1602         int ret;
1603
1604         ret = init_dma_desc_rings(dev);
1605         if (ret < 0) {
1606                 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1607                 return ret;
1608         }
1609         /* DMA initialization and SW reset */
1610         ret = stmmac_init_dma_engine(priv);
1611         if (ret < 0) {
1612                 pr_err("%s: DMA engine initialization failed\n", __func__);
1613                 return ret;
1614         }
1615
1616         /* Copy the MAC addr into the HW  */
1617         priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
1618
1619         /* If required, perform hw setup of the bus. */
1620         if (priv->plat->bus_setup)
1621                 priv->plat->bus_setup(priv->ioaddr);
1622
1623         /* Initialize the MAC Core */
1624         priv->hw->mac->core_init(priv->ioaddr);
1625
1626         /* Enable the MAC Rx/Tx */
1627         stmmac_set_mac(priv->ioaddr, true);
1628
1629         /* Set the HW DMA mode and the COE */
1630         stmmac_dma_operation_mode(priv);
1631
1632         stmmac_mmc_setup(priv);
1633
1634         ret = stmmac_init_ptp(priv);
1635         if (ret)
1636                 pr_warn("%s: failed PTP initialisation\n", __func__);
1637
1638 #ifdef CONFIG_STMMAC_DEBUG_FS
1639         ret = stmmac_init_fs(dev);
1640         if (ret < 0)
1641                 pr_warn("%s: failed debugFS registration\n", __func__);
1642 #endif
1643         /* Start the ball rolling... */
1644         pr_debug("%s: DMA RX/TX processes started...\n", dev->name);
1645         priv->hw->dma->start_tx(priv->ioaddr);
1646         priv->hw->dma->start_rx(priv->ioaddr);
1647
1648         /* Dump DMA/MAC registers */
1649         if (netif_msg_hw(priv)) {
1650                 priv->hw->mac->dump_regs(priv->ioaddr);
1651                 priv->hw->dma->dump_regs(priv->ioaddr);
1652         }
1653         priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1654
1655         priv->eee_enabled = stmmac_eee_init(priv);
1656
1657         stmmac_init_tx_coalesce(priv);
1658
1659         if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1660                 priv->rx_riwt = MAX_DMA_RIWT;
1661                 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
1662         }
1663
1664         if (priv->pcs && priv->hw->mac->ctrl_ane)
1665                 priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
1666
1667         return 0;
1668 }
1669
1670 /**
1671  *  stmmac_open - open entry point of the driver
1672  *  @dev : pointer to the device structure.
1673  *  Description:
1674  *  This function is the open entry point of the driver.
1675  *  Return value:
1676  *  0 on success and an appropriate (-)ve integer as defined in errno.h
1677  *  file on failure.
1678  */
1679 static int stmmac_open(struct net_device *dev)
1680 {
1681         struct stmmac_priv *priv = netdev_priv(dev);
1682         int ret;
1683
1684         stmmac_check_ether_addr(priv);
1685
1686         if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
1687             priv->pcs != STMMAC_PCS_RTBI) {
1688                 ret = stmmac_init_phy(dev);
1689                 if (ret) {
1690                         pr_err("%s: Cannot attach to PHY (error: %d)\n",
1691                                __func__, ret);
1692                         goto phy_error;
1693                 }
1694         }
1695
1696         /* Extra statistics */
1697         memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats));
1698         priv->xstats.threshold = tc;
1699
1700         /* Create and initialize the TX/RX descriptors chains. */
1701         priv->dma_tx_size = STMMAC_ALIGN(dma_txsize);
1702         priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
1703         priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
1704
1705         alloc_dma_desc_resources(priv);
1706         if (ret < 0) {
1707                 pr_err("%s: DMA descriptors allocation failed\n", __func__);
1708                 goto dma_desc_error;
1709         }
1710
1711         ret = stmmac_hw_setup(dev);
1712         if (ret < 0) {
1713                 pr_err("%s: Hw setup failed\n", __func__);
1714                 goto init_error;
1715         }
1716
1717         if (priv->phydev)
1718                 phy_start(priv->phydev);
1719
1720         /* Request the IRQ lines */
1721         ret = request_irq(dev->irq, stmmac_interrupt,
1722                           IRQF_SHARED, dev->name, dev);
1723         if (unlikely(ret < 0)) {
1724                 pr_err("%s: ERROR: allocating the IRQ %d (error: %d)\n",
1725                        __func__, dev->irq, ret);
1726                 goto init_error;
1727         }
1728
1729         /* Request the Wake IRQ in case of another line is used for WoL */
1730         if (priv->wol_irq != dev->irq) {
1731                 ret = request_irq(priv->wol_irq, stmmac_interrupt,
1732                                   IRQF_SHARED, dev->name, dev);
1733                 if (unlikely(ret < 0)) {
1734                         pr_err("%s: ERROR: allocating the WoL IRQ %d (%d)\n",
1735                                __func__, priv->wol_irq, ret);
1736                         goto wolirq_error;
1737                 }
1738         }
1739
1740         /* Request the IRQ lines */
1741         if (priv->lpi_irq != -ENXIO) {
1742                 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED,
1743                                   dev->name, dev);
1744                 if (unlikely(ret < 0)) {
1745                         pr_err("%s: ERROR: allocating the LPI IRQ %d (%d)\n",
1746                                __func__, priv->lpi_irq, ret);
1747                         goto lpiirq_error;
1748                 }
1749         }
1750
1751         napi_enable(&priv->napi);
1752         netif_start_queue(dev);
1753
1754         return 0;
1755
1756 lpiirq_error:
1757         if (priv->wol_irq != dev->irq)
1758                 free_irq(priv->wol_irq, dev);
1759 wolirq_error:
1760         free_irq(dev->irq, dev);
1761
1762 init_error:
1763         free_dma_desc_resources(priv);
1764 dma_desc_error:
1765         if (priv->phydev)
1766                 phy_disconnect(priv->phydev);
1767 phy_error:
1768         clk_disable_unprepare(priv->stmmac_clk);
1769
1770         return ret;
1771 }
1772
1773 /**
1774  *  stmmac_release - close entry point of the driver
1775  *  @dev : device pointer.
1776  *  Description:
1777  *  This is the stop entry point of the driver.
1778  */
1779 static int stmmac_release(struct net_device *dev)
1780 {
1781         struct stmmac_priv *priv = netdev_priv(dev);
1782
1783         if (priv->eee_enabled)
1784                 del_timer_sync(&priv->eee_ctrl_timer);
1785
1786         /* Stop and disconnect the PHY */
1787         if (priv->phydev) {
1788                 phy_stop(priv->phydev);
1789                 phy_disconnect(priv->phydev);
1790                 priv->phydev = NULL;
1791         }
1792
1793         netif_stop_queue(dev);
1794
1795         napi_disable(&priv->napi);
1796
1797         del_timer_sync(&priv->txtimer);
1798
1799         /* Free the IRQ lines */
1800         free_irq(dev->irq, dev);
1801         if (priv->wol_irq != dev->irq)
1802                 free_irq(priv->wol_irq, dev);
1803         if (priv->lpi_irq != -ENXIO)
1804                 free_irq(priv->lpi_irq, dev);
1805
1806         /* Stop TX/RX DMA and clear the descriptors */
1807         priv->hw->dma->stop_tx(priv->ioaddr);
1808         priv->hw->dma->stop_rx(priv->ioaddr);
1809
1810         /* Release and free the Rx/Tx resources */
1811         free_dma_desc_resources(priv);
1812
1813         /* Disable the MAC Rx/Tx */
1814         stmmac_set_mac(priv->ioaddr, false);
1815
1816         netif_carrier_off(dev);
1817
1818 #ifdef CONFIG_STMMAC_DEBUG_FS
1819         stmmac_exit_fs();
1820 #endif
1821
1822         stmmac_release_ptp(priv);
1823
1824         return 0;
1825 }
1826
1827 /**
1828  *  stmmac_xmit: Tx entry point of the driver
1829  *  @skb : the socket buffer
1830  *  @dev : device pointer
1831  *  Description : this is the tx entry point of the driver.
1832  *  It programs the chain or the ring and supports oversized frames
1833  *  and SG feature.
1834  */
1835 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1836 {
1837         struct stmmac_priv *priv = netdev_priv(dev);
1838         unsigned int txsize = priv->dma_tx_size;
1839         unsigned int entry;
1840         int i, csum_insertion = 0, is_jumbo = 0;
1841         int nfrags = skb_shinfo(skb)->nr_frags;
1842         struct dma_desc *desc, *first;
1843         unsigned int nopaged_len = skb_headlen(skb);
1844
1845         if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1846                 if (!netif_queue_stopped(dev)) {
1847                         netif_stop_queue(dev);
1848                         /* This is a hard error, log it. */
1849                         pr_err("%s: Tx Ring full when queue awake\n", __func__);
1850                 }
1851                 return NETDEV_TX_BUSY;
1852         }
1853
1854         spin_lock(&priv->tx_lock);
1855
1856         if (priv->tx_path_in_lpi_mode)
1857                 stmmac_disable_eee_mode(priv);
1858
1859         entry = priv->cur_tx % txsize;
1860
1861         csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
1862
1863         if (priv->extend_desc)
1864                 desc = (struct dma_desc *)(priv->dma_etx + entry);
1865         else
1866                 desc = priv->dma_tx + entry;
1867
1868         first = desc;
1869
1870         priv->tx_skbuff[entry] = skb;
1871
1872         /* To program the descriptors according to the size of the frame */
1873         if (priv->mode == STMMAC_RING_MODE) {
1874                 is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
1875                                                         priv->plat->enh_desc);
1876                 if (unlikely(is_jumbo))
1877                         entry = priv->hw->ring->jumbo_frm(priv, skb,
1878                                                           csum_insertion);
1879         } else {
1880                 is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
1881                                                          priv->plat->enh_desc);
1882                 if (unlikely(is_jumbo))
1883                         entry = priv->hw->chain->jumbo_frm(priv, skb,
1884                                                            csum_insertion);
1885         }
1886         if (likely(!is_jumbo)) {
1887                 desc->des2 = dma_map_single(priv->device, skb->data,
1888                                             nopaged_len, DMA_TO_DEVICE);
1889                 priv->tx_skbuff_dma[entry] = desc->des2;
1890                 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
1891                                                 csum_insertion, priv->mode);
1892         } else
1893                 desc = first;
1894
1895         for (i = 0; i < nfrags; i++) {
1896                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1897                 int len = skb_frag_size(frag);
1898
1899                 entry = (++priv->cur_tx) % txsize;
1900                 if (priv->extend_desc)
1901                         desc = (struct dma_desc *)(priv->dma_etx + entry);
1902                 else
1903                         desc = priv->dma_tx + entry;
1904
1905                 desc->des2 = skb_frag_dma_map(priv->device, frag, 0, len,
1906                                               DMA_TO_DEVICE);
1907                 priv->tx_skbuff_dma[entry] = desc->des2;
1908                 priv->tx_skbuff[entry] = NULL;
1909                 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum_insertion,
1910                                                 priv->mode);
1911                 wmb();
1912                 priv->hw->desc->set_tx_owner(desc);
1913                 wmb();
1914         }
1915
1916         /* Finalize the latest segment. */
1917         priv->hw->desc->close_tx_desc(desc);
1918
1919         wmb();
1920         /* According to the coalesce parameter the IC bit for the latest
1921          * segment could be reset and the timer re-started to invoke the
1922          * stmmac_tx function. This approach takes care about the fragments.
1923          */
1924         priv->tx_count_frames += nfrags + 1;
1925         if (priv->tx_coal_frames > priv->tx_count_frames) {
1926                 priv->hw->desc->clear_tx_ic(desc);
1927                 priv->xstats.tx_reset_ic_bit++;
1928                 mod_timer(&priv->txtimer,
1929                           STMMAC_COAL_TIMER(priv->tx_coal_timer));
1930         } else
1931                 priv->tx_count_frames = 0;
1932
1933         /* To avoid raise condition */
1934         priv->hw->desc->set_tx_owner(first);
1935         wmb();
1936
1937         priv->cur_tx++;
1938
1939         if (netif_msg_pktdata(priv)) {
1940                 pr_debug("%s: curr %d dirty=%d entry=%d, first=%p, nfrags=%d",
1941                         __func__, (priv->cur_tx % txsize),
1942                         (priv->dirty_tx % txsize), entry, first, nfrags);
1943
1944                 if (priv->extend_desc)
1945                         stmmac_display_ring((void *)priv->dma_etx, txsize, 1);
1946                 else
1947                         stmmac_display_ring((void *)priv->dma_tx, txsize, 0);
1948
1949                 pr_debug(">>> frame to be transmitted: ");
1950                 print_pkt(skb->data, skb->len);
1951         }
1952         if (unlikely(stmmac_tx_avail(priv) <= (MAX_SKB_FRAGS + 1))) {
1953                 if (netif_msg_hw(priv))
1954                         pr_debug("%s: stop transmitted packets\n", __func__);
1955                 netif_stop_queue(dev);
1956         }
1957
1958         dev->stats.tx_bytes += skb->len;
1959
1960         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1961                      priv->hwts_tx_en)) {
1962                 /* declare that device is doing timestamping */
1963                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1964                 priv->hw->desc->enable_tx_timestamp(first);
1965         }
1966
1967         if (!priv->hwts_tx_en)
1968                 skb_tx_timestamp(skb);
1969
1970         priv->hw->dma->enable_dma_transmission(priv->ioaddr);
1971
1972         spin_unlock(&priv->tx_lock);
1973
1974         return NETDEV_TX_OK;
1975 }
1976
1977 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
1978 {
1979         struct ethhdr *ehdr;
1980         u16 vlanid;
1981
1982         if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) ==
1983             NETIF_F_HW_VLAN_CTAG_RX &&
1984             !__vlan_get_tag(skb, &vlanid)) {
1985                 /* pop the vlan tag */
1986                 ehdr = (struct ethhdr *)skb->data;
1987                 memmove(skb->data + VLAN_HLEN, ehdr, ETH_ALEN * 2);
1988                 skb_pull(skb, VLAN_HLEN);
1989                 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlanid);
1990         }
1991 }
1992
1993
1994 /**
1995  * stmmac_rx_refill: refill used skb preallocated buffers
1996  * @priv: driver private structure
1997  * Description : this is to reallocate the skb for the reception process
1998  * that is based on zero-copy.
1999  */
2000 static inline void stmmac_rx_refill(struct stmmac_priv *priv)
2001 {
2002         unsigned int rxsize = priv->dma_rx_size;
2003         int bfsize = priv->dma_buf_sz;
2004
2005         for (; priv->cur_rx - priv->dirty_rx > 0; priv->dirty_rx++) {
2006                 unsigned int entry = priv->dirty_rx % rxsize;
2007                 struct dma_desc *p;
2008
2009                 if (priv->extend_desc)
2010                         p = (struct dma_desc *)(priv->dma_erx + entry);
2011                 else
2012                         p = priv->dma_rx + entry;
2013
2014                 if (likely(priv->rx_skbuff[entry] == NULL)) {
2015                         struct sk_buff *skb;
2016
2017                         skb = netdev_alloc_skb_ip_align(priv->dev, bfsize);
2018
2019                         if (unlikely(skb == NULL))
2020                                 break;
2021
2022                         priv->rx_skbuff[entry] = skb;
2023                         priv->rx_skbuff_dma[entry] =
2024                             dma_map_single(priv->device, skb->data, bfsize,
2025                                            DMA_FROM_DEVICE);
2026
2027                         p->des2 = priv->rx_skbuff_dma[entry];
2028
2029                         priv->hw->ring->refill_desc3(priv, p);
2030
2031                         if (netif_msg_rx_status(priv))
2032                                 pr_debug("\trefill entry #%d\n", entry);
2033                 }
2034                 wmb();
2035                 priv->hw->desc->set_rx_owner(p);
2036                 wmb();
2037         }
2038 }
2039
2040 /**
2041  * stmmac_rx_refill: refill used skb preallocated buffers
2042  * @priv: driver private structure
2043  * @limit: napi bugget.
2044  * Description :  this the function called by the napi poll method.
2045  * It gets all the frames inside the ring.
2046  */
2047 static int stmmac_rx(struct stmmac_priv *priv, int limit)
2048 {
2049         unsigned int rxsize = priv->dma_rx_size;
2050         unsigned int entry = priv->cur_rx % rxsize;
2051         unsigned int next_entry;
2052         unsigned int count = 0;
2053         int coe = priv->plat->rx_coe;
2054
2055         if (netif_msg_rx_status(priv)) {
2056                 pr_debug("%s: descriptor ring:\n", __func__);
2057                 if (priv->extend_desc)
2058                         stmmac_display_ring((void *)priv->dma_erx, rxsize, 1);
2059                 else
2060                         stmmac_display_ring((void *)priv->dma_rx, rxsize, 0);
2061         }
2062         while (count < limit) {
2063                 int status;
2064                 struct dma_desc *p;
2065
2066                 if (priv->extend_desc)
2067                         p = (struct dma_desc *)(priv->dma_erx + entry);
2068                 else
2069                         p = priv->dma_rx + entry;
2070
2071                 if (priv->hw->desc->get_rx_owner(p))
2072                         break;
2073
2074                 count++;
2075
2076                 next_entry = (++priv->cur_rx) % rxsize;
2077                 if (priv->extend_desc)
2078                         prefetch(priv->dma_erx + next_entry);
2079                 else
2080                         prefetch(priv->dma_rx + next_entry);
2081
2082                 /* read the status of the incoming frame */
2083                 status = priv->hw->desc->rx_status(&priv->dev->stats,
2084                                                    &priv->xstats, p);
2085                 if ((priv->extend_desc) && (priv->hw->desc->rx_extended_status))
2086                         priv->hw->desc->rx_extended_status(&priv->dev->stats,
2087                                                            &priv->xstats,
2088                                                            priv->dma_erx +
2089                                                            entry);
2090                 if (unlikely(status == discard_frame)) {
2091                         priv->dev->stats.rx_errors++;
2092                         if (priv->hwts_rx_en && !priv->extend_desc) {
2093                                 /* DESC2 & DESC3 will be overwitten by device
2094                                  * with timestamp value, hence reinitialize
2095                                  * them in stmmac_rx_refill() function so that
2096                                  * device can reuse it.
2097                                  */
2098                                 priv->rx_skbuff[entry] = NULL;
2099                                 dma_unmap_single(priv->device,
2100                                                  priv->rx_skbuff_dma[entry],
2101                                                  priv->dma_buf_sz,
2102                                                  DMA_FROM_DEVICE);
2103                         }
2104                 } else {
2105                         struct sk_buff *skb;
2106                         int frame_len;
2107
2108                         frame_len = priv->hw->desc->get_rx_frame_len(p, coe);
2109
2110                         /* ACS is set; GMAC core strips PAD/FCS for IEEE 802.3
2111                          * Type frames (LLC/LLC-SNAP)
2112                          */
2113                         if (unlikely(status != llc_snap))
2114                                 frame_len -= ETH_FCS_LEN;
2115
2116                         if (netif_msg_rx_status(priv)) {
2117                                 pr_debug("\tdesc: %p [entry %d] buff=0x%x\n",
2118                                          p, entry, p->des2);
2119                                 if (frame_len > ETH_FRAME_LEN)
2120                                         pr_debug("\tframe size %d, COE: %d\n",
2121                                                  frame_len, status);
2122                         }
2123                         skb = priv->rx_skbuff[entry];
2124                         if (unlikely(!skb)) {
2125                                 pr_err("%s: Inconsistent Rx descriptor chain\n",
2126                                        priv->dev->name);
2127                                 priv->dev->stats.rx_dropped++;
2128                                 break;
2129                         }
2130                         prefetch(skb->data - NET_IP_ALIGN);
2131                         priv->rx_skbuff[entry] = NULL;
2132
2133                         stmmac_get_rx_hwtstamp(priv, entry, skb);
2134
2135                         skb_put(skb, frame_len);
2136                         dma_unmap_single(priv->device,
2137                                          priv->rx_skbuff_dma[entry],
2138                                          priv->dma_buf_sz, DMA_FROM_DEVICE);
2139
2140                         if (netif_msg_pktdata(priv)) {
2141                                 pr_debug("frame received (%dbytes)", frame_len);
2142                                 print_pkt(skb->data, frame_len);
2143                         }
2144
2145                         stmmac_rx_vlan(priv->dev, skb);
2146
2147                         skb->protocol = eth_type_trans(skb, priv->dev);
2148
2149                         if (unlikely(!coe))
2150                                 skb_checksum_none_assert(skb);
2151                         else
2152                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
2153
2154                         napi_gro_receive(&priv->napi, skb);
2155
2156                         priv->dev->stats.rx_packets++;
2157                         priv->dev->stats.rx_bytes += frame_len;
2158                 }
2159                 entry = next_entry;
2160         }
2161
2162         stmmac_rx_refill(priv);
2163
2164         priv->xstats.rx_pkt_n += count;
2165
2166         return count;
2167 }
2168
2169 /**
2170  *  stmmac_poll - stmmac poll method (NAPI)
2171  *  @napi : pointer to the napi structure.
2172  *  @budget : maximum number of packets that the current CPU can receive from
2173  *            all interfaces.
2174  *  Description :
2175  *  To look at the incoming frames and clear the tx resources.
2176  */
2177 static int stmmac_poll(struct napi_struct *napi, int budget)
2178 {
2179         struct stmmac_priv *priv = container_of(napi, struct stmmac_priv, napi);
2180         int work_done = 0;
2181
2182         priv->xstats.napi_poll++;
2183         stmmac_tx_clean(priv);
2184
2185         work_done = stmmac_rx(priv, budget);
2186         if (work_done < budget) {
2187                 napi_complete(napi);
2188                 stmmac_enable_dma_irq(priv);
2189         }
2190         return work_done;
2191 }
2192
2193 /**
2194  *  stmmac_tx_timeout
2195  *  @dev : Pointer to net device structure
2196  *  Description: this function is called when a packet transmission fails to
2197  *   complete within a reasonable time. The driver will mark the error in the
2198  *   netdev structure and arrange for the device to be reset to a sane state
2199  *   in order to transmit a new packet.
2200  */
2201 static void stmmac_tx_timeout(struct net_device *dev)
2202 {
2203         struct stmmac_priv *priv = netdev_priv(dev);
2204
2205         /* Clear Tx resources and restart transmitting again */
2206         stmmac_tx_err(priv);
2207 }
2208
2209 /* Configuration changes (passed on by ifconfig) */
2210 static int stmmac_config(struct net_device *dev, struct ifmap *map)
2211 {
2212         if (dev->flags & IFF_UP)        /* can't act on a running interface */
2213                 return -EBUSY;
2214
2215         /* Don't allow changing the I/O address */
2216         if (map->base_addr != dev->base_addr) {
2217                 pr_warn("%s: can't change I/O address\n", dev->name);
2218                 return -EOPNOTSUPP;
2219         }
2220
2221         /* Don't allow changing the IRQ */
2222         if (map->irq != dev->irq) {
2223                 pr_warn("%s: not change IRQ number %d\n", dev->name, dev->irq);
2224                 return -EOPNOTSUPP;
2225         }
2226
2227         return 0;
2228 }
2229
2230 /**
2231  *  stmmac_set_rx_mode - entry point for multicast addressing
2232  *  @dev : pointer to the device structure
2233  *  Description:
2234  *  This function is a driver entry point which gets called by the kernel
2235  *  whenever multicast addresses must be enabled/disabled.
2236  *  Return value:
2237  *  void.
2238  */
2239 static void stmmac_set_rx_mode(struct net_device *dev)
2240 {
2241         struct stmmac_priv *priv = netdev_priv(dev);
2242
2243         spin_lock(&priv->lock);
2244         priv->hw->mac->set_filter(dev, priv->synopsys_id);
2245         spin_unlock(&priv->lock);
2246 }
2247
2248 /**
2249  *  stmmac_change_mtu - entry point to change MTU size for the device.
2250  *  @dev : device pointer.
2251  *  @new_mtu : the new MTU size for the device.
2252  *  Description: the Maximum Transfer Unit (MTU) is used by the network layer
2253  *  to drive packet transmission. Ethernet has an MTU of 1500 octets
2254  *  (ETH_DATA_LEN). This value can be changed with ifconfig.
2255  *  Return value:
2256  *  0 on success and an appropriate (-)ve integer as defined in errno.h
2257  *  file on failure.
2258  */
2259 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
2260 {
2261         struct stmmac_priv *priv = netdev_priv(dev);
2262         int max_mtu;
2263
2264         if (netif_running(dev)) {
2265                 pr_err("%s: must be stopped to change its MTU\n", dev->name);
2266                 return -EBUSY;
2267         }
2268
2269         if (priv->plat->enh_desc)
2270                 max_mtu = JUMBO_LEN;
2271         else
2272                 max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
2273
2274         if ((new_mtu < 46) || (new_mtu > max_mtu)) {
2275                 pr_err("%s: invalid MTU, max MTU is: %d\n", dev->name, max_mtu);
2276                 return -EINVAL;
2277         }
2278
2279         dev->mtu = new_mtu;
2280         netdev_update_features(dev);
2281
2282         return 0;
2283 }
2284
2285 static netdev_features_t stmmac_fix_features(struct net_device *dev,
2286                                              netdev_features_t features)
2287 {
2288         struct stmmac_priv *priv = netdev_priv(dev);
2289
2290         if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
2291                 features &= ~NETIF_F_RXCSUM;
2292         else if (priv->plat->rx_coe == STMMAC_RX_COE_TYPE1)
2293                 features &= ~NETIF_F_IPV6_CSUM;
2294         if (!priv->plat->tx_coe)
2295                 features &= ~NETIF_F_ALL_CSUM;
2296
2297         /* Some GMAC devices have a bugged Jumbo frame support that
2298          * needs to have the Tx COE disabled for oversized frames
2299          * (due to limited buffer sizes). In this case we disable
2300          * the TX csum insertionin the TDES and not use SF.
2301          */
2302         if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
2303                 features &= ~NETIF_F_ALL_CSUM;
2304
2305         return features;
2306 }
2307
2308 /**
2309  *  stmmac_interrupt - main ISR
2310  *  @irq: interrupt number.
2311  *  @dev_id: to pass the net device pointer.
2312  *  Description: this is the main driver interrupt service routine.
2313  *  It calls the DMA ISR and also the core ISR to manage PMT, MMC, LPI
2314  *  interrupts.
2315  */
2316 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
2317 {
2318         struct net_device *dev = (struct net_device *)dev_id;
2319         struct stmmac_priv *priv = netdev_priv(dev);
2320
2321         if (priv->irq_wake)
2322                 pm_wakeup_event(priv->device, 0);
2323
2324         if (unlikely(!dev)) {
2325                 pr_err("%s: invalid dev pointer\n", __func__);
2326                 return IRQ_NONE;
2327         }
2328
2329         /* To handle GMAC own interrupts */
2330         if (priv->plat->has_gmac) {
2331                 int status = priv->hw->mac->host_irq_status((void __iomem *)
2332                                                             dev->base_addr,
2333                                                             &priv->xstats);
2334                 if (unlikely(status)) {
2335                         /* For LPI we need to save the tx status */
2336                         if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
2337                                 priv->tx_path_in_lpi_mode = true;
2338                         if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
2339                                 priv->tx_path_in_lpi_mode = false;
2340                 }
2341         }
2342
2343         /* To handle DMA interrupts */
2344         stmmac_dma_interrupt(priv);
2345
2346         return IRQ_HANDLED;
2347 }
2348
2349 #ifdef CONFIG_NET_POLL_CONTROLLER
2350 /* Polling receive - used by NETCONSOLE and other diagnostic tools
2351  * to allow network I/O with interrupts disabled.
2352  */
2353 static void stmmac_poll_controller(struct net_device *dev)
2354 {
2355         disable_irq(dev->irq);
2356         stmmac_interrupt(dev->irq, dev);
2357         enable_irq(dev->irq);
2358 }
2359 #endif
2360
2361 /**
2362  *  stmmac_ioctl - Entry point for the Ioctl
2363  *  @dev: Device pointer.
2364  *  @rq: An IOCTL specefic structure, that can contain a pointer to
2365  *  a proprietary structure used to pass information to the driver.
2366  *  @cmd: IOCTL command
2367  *  Description:
2368  *  Currently it supports the phy_mii_ioctl(...) and HW time stamping.
2369  */
2370 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2371 {
2372         struct stmmac_priv *priv = netdev_priv(dev);
2373         int ret = -EOPNOTSUPP;
2374
2375         if (!netif_running(dev))
2376                 return -EINVAL;
2377
2378         switch (cmd) {
2379         case SIOCGMIIPHY:
2380         case SIOCGMIIREG:
2381         case SIOCSMIIREG:
2382                 if (!priv->phydev)
2383                         return -EINVAL;
2384                 ret = phy_mii_ioctl(priv->phydev, rq, cmd);
2385                 break;
2386         case SIOCSHWTSTAMP:
2387                 ret = stmmac_hwtstamp_ioctl(dev, rq);
2388                 break;
2389         default:
2390                 break;
2391         }
2392
2393         return ret;
2394 }
2395
2396 #ifdef CONFIG_STMMAC_DEBUG_FS
2397 static struct dentry *stmmac_fs_dir;
2398 static struct dentry *stmmac_rings_status;
2399 static struct dentry *stmmac_dma_cap;
2400
2401 static void sysfs_display_ring(void *head, int size, int extend_desc,
2402                                struct seq_file *seq)
2403 {
2404         int i;
2405         struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
2406         struct dma_desc *p = (struct dma_desc *)head;
2407
2408         for (i = 0; i < size; i++) {
2409                 u64 x;
2410                 if (extend_desc) {
2411                         x = *(u64 *) ep;
2412                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2413                                    i, (unsigned int)virt_to_phys(ep),
2414                                    (unsigned int)x, (unsigned int)(x >> 32),
2415                                    ep->basic.des2, ep->basic.des3);
2416                         ep++;
2417                 } else {
2418                         x = *(u64 *) p;
2419                         seq_printf(seq, "%d [0x%x]: 0x%x 0x%x 0x%x 0x%x\n",
2420                                    i, (unsigned int)virt_to_phys(ep),
2421                                    (unsigned int)x, (unsigned int)(x >> 32),
2422                                    p->des2, p->des3);
2423                         p++;
2424                 }
2425                 seq_printf(seq, "\n");
2426         }
2427 }
2428
2429 static int stmmac_sysfs_ring_read(struct seq_file *seq, void *v)
2430 {
2431         struct net_device *dev = seq->private;
2432         struct stmmac_priv *priv = netdev_priv(dev);
2433         unsigned int txsize = priv->dma_tx_size;
2434         unsigned int rxsize = priv->dma_rx_size;
2435
2436         if (priv->extend_desc) {
2437                 seq_printf(seq, "Extended RX descriptor ring:\n");
2438                 sysfs_display_ring((void *)priv->dma_erx, rxsize, 1, seq);
2439                 seq_printf(seq, "Extended TX descriptor ring:\n");
2440                 sysfs_display_ring((void *)priv->dma_etx, txsize, 1, seq);
2441         } else {
2442                 seq_printf(seq, "RX descriptor ring:\n");
2443                 sysfs_display_ring((void *)priv->dma_rx, rxsize, 0, seq);
2444                 seq_printf(seq, "TX descriptor ring:\n");
2445                 sysfs_display_ring((void *)priv->dma_tx, txsize, 0, seq);
2446         }
2447
2448         return 0;
2449 }
2450
2451 static int stmmac_sysfs_ring_open(struct inode *inode, struct file *file)
2452 {
2453         return single_open(file, stmmac_sysfs_ring_read, inode->i_private);
2454 }
2455
2456 static const struct file_operations stmmac_rings_status_fops = {
2457         .owner = THIS_MODULE,
2458         .open = stmmac_sysfs_ring_open,
2459         .read = seq_read,
2460         .llseek = seq_lseek,
2461         .release = single_release,
2462 };
2463
2464 static int stmmac_sysfs_dma_cap_read(struct seq_file *seq, void *v)
2465 {
2466         struct net_device *dev = seq->private;
2467         struct stmmac_priv *priv = netdev_priv(dev);
2468
2469         if (!priv->hw_cap_support) {
2470                 seq_printf(seq, "DMA HW features not supported\n");
2471                 return 0;
2472         }
2473
2474         seq_printf(seq, "==============================\n");
2475         seq_printf(seq, "\tDMA HW features\n");
2476         seq_printf(seq, "==============================\n");
2477
2478         seq_printf(seq, "\t10/100 Mbps %s\n",
2479                    (priv->dma_cap.mbps_10_100) ? "Y" : "N");
2480         seq_printf(seq, "\t1000 Mbps %s\n",
2481                    (priv->dma_cap.mbps_1000) ? "Y" : "N");
2482         seq_printf(seq, "\tHalf duple %s\n",
2483                    (priv->dma_cap.half_duplex) ? "Y" : "N");
2484         seq_printf(seq, "\tHash Filter: %s\n",
2485                    (priv->dma_cap.hash_filter) ? "Y" : "N");
2486         seq_printf(seq, "\tMultiple MAC address registers: %s\n",
2487                    (priv->dma_cap.multi_addr) ? "Y" : "N");
2488         seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfatces): %s\n",
2489                    (priv->dma_cap.pcs) ? "Y" : "N");
2490         seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
2491                    (priv->dma_cap.sma_mdio) ? "Y" : "N");
2492         seq_printf(seq, "\tPMT Remote wake up: %s\n",
2493                    (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
2494         seq_printf(seq, "\tPMT Magic Frame: %s\n",
2495                    (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
2496         seq_printf(seq, "\tRMON module: %s\n",
2497                    (priv->dma_cap.rmon) ? "Y" : "N");
2498         seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
2499                    (priv->dma_cap.time_stamp) ? "Y" : "N");
2500         seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp:%s\n",
2501                    (priv->dma_cap.atime_stamp) ? "Y" : "N");
2502         seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE) %s\n",
2503                    (priv->dma_cap.eee) ? "Y" : "N");
2504         seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
2505         seq_printf(seq, "\tChecksum Offload in TX: %s\n",
2506                    (priv->dma_cap.tx_coe) ? "Y" : "N");
2507         seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
2508                    (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
2509         seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
2510                    (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
2511         seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
2512                    (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
2513         seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
2514                    priv->dma_cap.number_rx_channel);
2515         seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
2516                    priv->dma_cap.number_tx_channel);
2517         seq_printf(seq, "\tEnhanced descriptors: %s\n",
2518                    (priv->dma_cap.enh_desc) ? "Y" : "N");
2519
2520         return 0;
2521 }
2522
2523 static int stmmac_sysfs_dma_cap_open(struct inode *inode, struct file *file)
2524 {
2525         return single_open(file, stmmac_sysfs_dma_cap_read, inode->i_private);
2526 }
2527
2528 static const struct file_operations stmmac_dma_cap_fops = {
2529         .owner = THIS_MODULE,
2530         .open = stmmac_sysfs_dma_cap_open,
2531         .read = seq_read,
2532         .llseek = seq_lseek,
2533         .release = single_release,
2534 };
2535
2536 static int stmmac_init_fs(struct net_device *dev)
2537 {
2538         /* Create debugfs entries */
2539         stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
2540
2541         if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
2542                 pr_err("ERROR %s, debugfs create directory failed\n",
2543                        STMMAC_RESOURCE_NAME);
2544
2545                 return -ENOMEM;
2546         }
2547
2548         /* Entry to report DMA RX/TX rings */
2549         stmmac_rings_status = debugfs_create_file("descriptors_status",
2550                                                   S_IRUGO, stmmac_fs_dir, dev,
2551                                                   &stmmac_rings_status_fops);
2552
2553         if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
2554                 pr_info("ERROR creating stmmac ring debugfs file\n");
2555                 debugfs_remove(stmmac_fs_dir);
2556
2557                 return -ENOMEM;
2558         }
2559
2560         /* Entry to report the DMA HW features */
2561         stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
2562                                              dev, &stmmac_dma_cap_fops);
2563
2564         if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
2565                 pr_info("ERROR creating stmmac MMC debugfs file\n");
2566                 debugfs_remove(stmmac_rings_status);
2567                 debugfs_remove(stmmac_fs_dir);
2568
2569                 return -ENOMEM;
2570         }
2571
2572         return 0;
2573 }
2574
2575 static void stmmac_exit_fs(void)
2576 {
2577         debugfs_remove(stmmac_rings_status);
2578         debugfs_remove(stmmac_dma_cap);
2579         debugfs_remove(stmmac_fs_dir);
2580 }
2581 #endif /* CONFIG_STMMAC_DEBUG_FS */
2582
2583 static const struct net_device_ops stmmac_netdev_ops = {
2584         .ndo_open = stmmac_open,
2585         .ndo_start_xmit = stmmac_xmit,
2586         .ndo_stop = stmmac_release,
2587         .ndo_change_mtu = stmmac_change_mtu,
2588         .ndo_fix_features = stmmac_fix_features,
2589         .ndo_set_rx_mode = stmmac_set_rx_mode,
2590         .ndo_tx_timeout = stmmac_tx_timeout,
2591         .ndo_do_ioctl = stmmac_ioctl,
2592         .ndo_set_config = stmmac_config,
2593 #ifdef CONFIG_NET_POLL_CONTROLLER
2594         .ndo_poll_controller = stmmac_poll_controller,
2595 #endif
2596         .ndo_set_mac_address = eth_mac_addr,
2597 };
2598
2599 /**
2600  *  stmmac_hw_init - Init the MAC device
2601  *  @priv: driver private structure
2602  *  Description: this function detects which MAC device
2603  *  (GMAC/MAC10-100) has to attached, checks the HW capability
2604  *  (if supported) and sets the driver's features (for example
2605  *  to use the ring or chaine mode or support the normal/enh
2606  *  descriptor structure).
2607  */
2608 static int stmmac_hw_init(struct stmmac_priv *priv)
2609 {
2610         int ret;
2611         struct mac_device_info *mac;
2612
2613         /* Identify the MAC HW device */
2614         if (priv->plat->has_gmac) {
2615                 priv->dev->priv_flags |= IFF_UNICAST_FLT;
2616                 mac = dwmac1000_setup(priv->ioaddr);
2617         } else {
2618                 mac = dwmac100_setup(priv->ioaddr);
2619         }
2620         if (!mac)
2621                 return -ENOMEM;
2622
2623         priv->hw = mac;
2624
2625         /* Get and dump the chip ID */
2626         priv->synopsys_id = stmmac_get_synopsys_id(priv);
2627
2628         /* To use the chained or ring mode */
2629         if (chain_mode) {
2630                 priv->hw->chain = &chain_mode_ops;
2631                 pr_info(" Chain mode enabled\n");
2632                 priv->mode = STMMAC_CHAIN_MODE;
2633         } else {
2634                 priv->hw->ring = &ring_mode_ops;
2635                 pr_info(" Ring mode enabled\n");
2636                 priv->mode = STMMAC_RING_MODE;
2637         }
2638
2639         /* Get the HW capability (new GMAC newer than 3.50a) */
2640         priv->hw_cap_support = stmmac_get_hw_features(priv);
2641         if (priv->hw_cap_support) {
2642                 pr_info(" DMA HW capability register supported");
2643
2644                 /* We can override some gmac/dma configuration fields: e.g.
2645                  * enh_desc, tx_coe (e.g. that are passed through the
2646                  * platform) with the values from the HW capability
2647                  * register (if supported).
2648                  */
2649                 priv->plat->enh_desc = priv->dma_cap.enh_desc;
2650                 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up;
2651
2652                 priv->plat->tx_coe = priv->dma_cap.tx_coe;
2653
2654                 if (priv->dma_cap.rx_coe_type2)
2655                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
2656                 else if (priv->dma_cap.rx_coe_type1)
2657                         priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
2658
2659         } else
2660                 pr_info(" No HW DMA feature register supported");
2661
2662         /* To use alternate (extended) or normal descriptor structures */
2663         stmmac_selec_desc_mode(priv);
2664
2665         ret = priv->hw->mac->rx_ipc(priv->ioaddr);
2666         if (!ret) {
2667                 pr_warn(" RX IPC Checksum Offload not configured.\n");
2668                 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
2669         }
2670
2671         if (priv->plat->rx_coe)
2672                 pr_info(" RX Checksum Offload Engine supported (type %d)\n",
2673                         priv->plat->rx_coe);
2674         if (priv->plat->tx_coe)
2675                 pr_info(" TX Checksum insertion supported\n");
2676
2677         if (priv->plat->pmt) {
2678                 pr_info(" Wake-Up On Lan supported\n");
2679                 device_set_wakeup_capable(priv->device, 1);
2680         }
2681
2682         return 0;
2683 }
2684
2685 /**
2686  * stmmac_dvr_probe
2687  * @device: device pointer
2688  * @plat_dat: platform data pointer
2689  * @addr: iobase memory address
2690  * Description: this is the main probe function used to
2691  * call the alloc_etherdev, allocate the priv structure.
2692  */
2693 struct stmmac_priv *stmmac_dvr_probe(struct device *device,
2694                                      struct plat_stmmacenet_data *plat_dat,
2695                                      void __iomem *addr)
2696 {
2697         int ret = 0;
2698         struct net_device *ndev = NULL;
2699         struct stmmac_priv *priv;
2700
2701         ndev = alloc_etherdev(sizeof(struct stmmac_priv));
2702         if (!ndev)
2703                 return NULL;
2704
2705         SET_NETDEV_DEV(ndev, device);
2706
2707         priv = netdev_priv(ndev);
2708         priv->device = device;
2709         priv->dev = ndev;
2710
2711         ether_setup(ndev);
2712
2713         stmmac_set_ethtool_ops(ndev);
2714         priv->pause = pause;
2715         priv->plat = plat_dat;
2716         priv->ioaddr = addr;
2717         priv->dev->base_addr = (unsigned long)addr;
2718
2719         /* Verify driver arguments */
2720         stmmac_verify_args();
2721
2722         /* Override with kernel parameters if supplied XXX CRS XXX
2723          * this needs to have multiple instances
2724          */
2725         if ((phyaddr >= 0) && (phyaddr <= 31))
2726                 priv->plat->phy_addr = phyaddr;
2727
2728         priv->stmmac_clk = devm_clk_get(priv->device, STMMAC_RESOURCE_NAME);
2729         if (IS_ERR(priv->stmmac_clk)) {
2730                 dev_warn(priv->device, "%s: warning: cannot get CSR clock\n",
2731                          __func__);
2732                 ret = PTR_ERR(priv->stmmac_clk);
2733                 goto error_clk_get;
2734         }
2735         clk_prepare_enable(priv->stmmac_clk);
2736
2737         priv->stmmac_rst = devm_reset_control_get(priv->device,
2738                                                   STMMAC_RESOURCE_NAME);
2739         if (IS_ERR(priv->stmmac_rst)) {
2740                 if (PTR_ERR(priv->stmmac_rst) == -EPROBE_DEFER) {
2741                         ret = -EPROBE_DEFER;
2742                         goto error_hw_init;
2743                 }
2744                 dev_info(priv->device, "no reset control found\n");
2745                 priv->stmmac_rst = NULL;
2746         }
2747         if (priv->stmmac_rst)
2748                 reset_control_deassert(priv->stmmac_rst);
2749
2750         /* Init MAC and get the capabilities */
2751         ret = stmmac_hw_init(priv);
2752         if (ret)
2753                 goto error_hw_init;
2754
2755         ndev->netdev_ops = &stmmac_netdev_ops;
2756
2757         ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2758                             NETIF_F_RXCSUM;
2759         ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
2760         ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
2761 #ifdef STMMAC_VLAN_TAG_USED
2762         /* Both mac100 and gmac support receive VLAN tag detection */
2763         ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
2764 #endif
2765         priv->msg_enable = netif_msg_init(debug, default_msg_level);
2766
2767         if (flow_ctrl)
2768                 priv->flow_ctrl = FLOW_AUTO;    /* RX/TX pause on */
2769
2770         /* Rx Watchdog is available in the COREs newer than the 3.40.
2771          * In some case, for example on bugged HW this feature
2772          * has to be disable and this can be done by passing the
2773          * riwt_off field from the platform.
2774          */
2775         if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
2776                 priv->use_riwt = 1;
2777                 pr_info(" Enable RX Mitigation via HW Watchdog Timer\n");
2778         }
2779
2780         netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
2781
2782         spin_lock_init(&priv->lock);
2783         spin_lock_init(&priv->tx_lock);
2784
2785         ret = register_netdev(ndev);
2786         if (ret) {
2787                 pr_err("%s: ERROR %i registering the device\n", __func__, ret);
2788                 goto error_netdev_register;
2789         }
2790
2791         /* If a specific clk_csr value is passed from the platform
2792          * this means that the CSR Clock Range selection cannot be
2793          * changed at run-time and it is fixed. Viceversa the driver'll try to
2794          * set the MDC clock dynamically according to the csr actual
2795          * clock input.
2796          */
2797         if (!priv->plat->clk_csr)
2798                 stmmac_clk_csr_set(priv);
2799         else
2800                 priv->clk_csr = priv->plat->clk_csr;
2801
2802         stmmac_check_pcs_mode(priv);
2803
2804         if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2805             priv->pcs != STMMAC_PCS_RTBI) {
2806                 /* MDIO bus Registration */
2807                 ret = stmmac_mdio_register(ndev);
2808                 if (ret < 0) {
2809                         pr_debug("%s: MDIO bus (id: %d) registration failed",
2810                                  __func__, priv->plat->bus_id);
2811                         goto error_mdio_register;
2812                 }
2813         }
2814
2815         return priv;
2816
2817 error_mdio_register:
2818         unregister_netdev(ndev);
2819 error_netdev_register:
2820         netif_napi_del(&priv->napi);
2821 error_hw_init:
2822         clk_disable_unprepare(priv->stmmac_clk);
2823 error_clk_get:
2824         free_netdev(ndev);
2825
2826         return ERR_PTR(ret);
2827 }
2828
2829 /**
2830  * stmmac_dvr_remove
2831  * @ndev: net device pointer
2832  * Description: this function resets the TX/RX processes, disables the MAC RX/TX
2833  * changes the link status, releases the DMA descriptor rings.
2834  */
2835 int stmmac_dvr_remove(struct net_device *ndev)
2836 {
2837         struct stmmac_priv *priv = netdev_priv(ndev);
2838
2839         pr_info("%s:\n\tremoving driver", __func__);
2840
2841         priv->hw->dma->stop_rx(priv->ioaddr);
2842         priv->hw->dma->stop_tx(priv->ioaddr);
2843
2844         stmmac_set_mac(priv->ioaddr, false);
2845         if (priv->pcs != STMMAC_PCS_RGMII && priv->pcs != STMMAC_PCS_TBI &&
2846             priv->pcs != STMMAC_PCS_RTBI)
2847                 stmmac_mdio_unregister(ndev);
2848         netif_carrier_off(ndev);
2849         unregister_netdev(ndev);
2850         if (priv->stmmac_rst)
2851                 reset_control_assert(priv->stmmac_rst);
2852         clk_disable_unprepare(priv->stmmac_clk);
2853         free_netdev(ndev);
2854
2855         return 0;
2856 }
2857
2858 #ifdef CONFIG_PM
2859 int stmmac_suspend(struct net_device *ndev)
2860 {
2861         struct stmmac_priv *priv = netdev_priv(ndev);
2862         unsigned long flags;
2863
2864         if (!ndev || !netif_running(ndev))
2865                 return 0;
2866
2867         if (priv->phydev)
2868                 phy_stop(priv->phydev);
2869
2870         spin_lock_irqsave(&priv->lock, flags);
2871
2872         netif_device_detach(ndev);
2873         netif_stop_queue(ndev);
2874
2875         napi_disable(&priv->napi);
2876
2877         /* Stop TX/RX DMA */
2878         priv->hw->dma->stop_tx(priv->ioaddr);
2879         priv->hw->dma->stop_rx(priv->ioaddr);
2880
2881         stmmac_clear_descriptors(priv);
2882
2883         /* Enable Power down mode by programming the PMT regs */
2884         if (device_may_wakeup(priv->device)) {
2885                 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
2886                 priv->irq_wake = 1;
2887         } else {
2888                 stmmac_set_mac(priv->ioaddr, false);
2889                 pinctrl_pm_select_sleep_state(priv->device);
2890                 /* Disable clock in case of PWM is off */
2891                 clk_disable_unprepare(priv->stmmac_clk);
2892         }
2893         spin_unlock_irqrestore(&priv->lock, flags);
2894         return 0;
2895 }
2896
2897 int stmmac_resume(struct net_device *ndev)
2898 {
2899         struct stmmac_priv *priv = netdev_priv(ndev);
2900         unsigned long flags;
2901
2902         if (!netif_running(ndev))
2903                 return 0;
2904
2905         spin_lock_irqsave(&priv->lock, flags);
2906
2907         /* Power Down bit, into the PM register, is cleared
2908          * automatically as soon as a magic packet or a Wake-up frame
2909          * is received. Anyway, it's better to manually clear
2910          * this bit because it can generate problems while resuming
2911          * from another devices (e.g. serial console).
2912          */
2913         if (device_may_wakeup(priv->device)) {
2914                 priv->hw->mac->pmt(priv->ioaddr, 0);
2915                 priv->irq_wake = 0;
2916         } else {
2917                 pinctrl_pm_select_default_state(priv->device);
2918                 /* enable the clk prevously disabled */
2919                 clk_prepare_enable(priv->stmmac_clk);
2920                 /* reset the phy so that it's ready */
2921                 if (priv->mii)
2922                         stmmac_mdio_reset(priv->mii);
2923         }
2924
2925         netif_device_attach(ndev);
2926
2927         stmmac_hw_setup(ndev);
2928
2929         napi_enable(&priv->napi);
2930
2931         netif_start_queue(ndev);
2932
2933         spin_unlock_irqrestore(&priv->lock, flags);
2934
2935         if (priv->phydev)
2936                 phy_start(priv->phydev);
2937
2938         return 0;
2939 }
2940 #endif /* CONFIG_PM */
2941
2942 /* Driver can be configured w/ and w/ both PCI and Platf drivers
2943  * depending on the configuration selected.
2944  */
2945 static int __init stmmac_init(void)
2946 {
2947         int ret;
2948
2949         ret = stmmac_register_platform();
2950         if (ret)
2951                 goto err;
2952         ret = stmmac_register_pci();
2953         if (ret)
2954                 goto err_pci;
2955         return 0;
2956 err_pci:
2957         stmmac_unregister_platform();
2958 err:
2959         pr_err("stmmac: driver registration failed\n");
2960         return ret;
2961 }
2962
2963 static void __exit stmmac_exit(void)
2964 {
2965         stmmac_unregister_platform();
2966         stmmac_unregister_pci();
2967 }
2968
2969 module_init(stmmac_init);
2970 module_exit(stmmac_exit);
2971
2972 #ifndef MODULE
2973 static int __init stmmac_cmdline_opt(char *str)
2974 {
2975         char *opt;
2976
2977         if (!str || !*str)
2978                 return -EINVAL;
2979         while ((opt = strsep(&str, ",")) != NULL) {
2980                 if (!strncmp(opt, "debug:", 6)) {
2981                         if (kstrtoint(opt + 6, 0, &debug))
2982                                 goto err;
2983                 } else if (!strncmp(opt, "phyaddr:", 8)) {
2984                         if (kstrtoint(opt + 8, 0, &phyaddr))
2985                                 goto err;
2986                 } else if (!strncmp(opt, "dma_txsize:", 11)) {
2987                         if (kstrtoint(opt + 11, 0, &dma_txsize))
2988                                 goto err;
2989                 } else if (!strncmp(opt, "dma_rxsize:", 11)) {
2990                         if (kstrtoint(opt + 11, 0, &dma_rxsize))
2991                                 goto err;
2992                 } else if (!strncmp(opt, "buf_sz:", 7)) {
2993                         if (kstrtoint(opt + 7, 0, &buf_sz))
2994                                 goto err;
2995                 } else if (!strncmp(opt, "tc:", 3)) {
2996                         if (kstrtoint(opt + 3, 0, &tc))
2997                                 goto err;
2998                 } else if (!strncmp(opt, "watchdog:", 9)) {
2999                         if (kstrtoint(opt + 9, 0, &watchdog))
3000                                 goto err;
3001                 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
3002                         if (kstrtoint(opt + 10, 0, &flow_ctrl))
3003                                 goto err;
3004                 } else if (!strncmp(opt, "pause:", 6)) {
3005                         if (kstrtoint(opt + 6, 0, &pause))
3006                                 goto err;
3007                 } else if (!strncmp(opt, "eee_timer:", 10)) {
3008                         if (kstrtoint(opt + 10, 0, &eee_timer))
3009                                 goto err;
3010                 } else if (!strncmp(opt, "chain_mode:", 11)) {
3011                         if (kstrtoint(opt + 11, 0, &chain_mode))
3012                                 goto err;
3013                 }
3014         }
3015         return 0;
3016
3017 err:
3018         pr_err("%s: ERROR broken module parameter conversion", __func__);
3019         return -EINVAL;
3020 }
3021
3022 __setup("stmmaceth=", stmmac_cmdline_opt);
3023 #endif /* MODULE */
3024
3025 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
3026 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
3027 MODULE_LICENSE("GPL");