2 * linux/drivers/net/ehea/ehea_main.c
4 * eHEA ethernet device driver for IBM eServer System p
6 * (C) Copyright IBM Corp. 2006
9 * Christoph Raisch <raisch@de.ibm.com>
10 * Jan-Bernd Themann <themann@de.ibm.com>
11 * Thomas Klein <tklein@de.ibm.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2, or (at your option)
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33 #include <linux/tcp.h>
34 #include <linux/udp.h>
36 #include <linux/list.h>
37 #include <linux/slab.h>
38 #include <linux/if_ether.h>
39 #include <linux/notifier.h>
40 #include <linux/reboot.h>
41 #include <linux/memory.h>
42 #include <asm/kexec.h>
43 #include <linux/mutex.h>
49 #include "ehea_phyp.h"
52 MODULE_LICENSE("GPL");
53 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
54 MODULE_DESCRIPTION("IBM eServer HEA Driver");
55 MODULE_VERSION(DRV_VERSION);
58 static int msg_level = -1;
59 static int rq1_entries = EHEA_DEF_ENTRIES_RQ1;
60 static int rq2_entries = EHEA_DEF_ENTRIES_RQ2;
61 static int rq3_entries = EHEA_DEF_ENTRIES_RQ3;
62 static int sq_entries = EHEA_DEF_ENTRIES_SQ;
65 static int lro_max_aggr = EHEA_LRO_MAX_AGGR;
66 static int num_tx_qps = EHEA_NUM_TX_QP;
67 static int prop_carrier_state;
69 module_param(msg_level, int, 0);
70 module_param(rq1_entries, int, 0);
71 module_param(rq2_entries, int, 0);
72 module_param(rq3_entries, int, 0);
73 module_param(sq_entries, int, 0);
74 module_param(prop_carrier_state, int, 0);
75 module_param(use_mcs, int, 0);
76 module_param(use_lro, int, 0);
77 module_param(lro_max_aggr, int, 0);
78 module_param(num_tx_qps, int, 0);
80 MODULE_PARM_DESC(num_tx_qps, "Number of TX-QPS");
81 MODULE_PARM_DESC(msg_level, "msg_level");
82 MODULE_PARM_DESC(prop_carrier_state, "Propagate carrier state of physical "
83 "port to stack. 1:yes, 0:no. Default = 0 ");
84 MODULE_PARM_DESC(rq3_entries, "Number of entries for Receive Queue 3 "
85 "[2^x - 1], x = [6..14]. Default = "
86 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ3) ")");
87 MODULE_PARM_DESC(rq2_entries, "Number of entries for Receive Queue 2 "
88 "[2^x - 1], x = [6..14]. Default = "
89 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ2) ")");
90 MODULE_PARM_DESC(rq1_entries, "Number of entries for Receive Queue 1 "
91 "[2^x - 1], x = [6..14]. Default = "
92 __MODULE_STRING(EHEA_DEF_ENTRIES_RQ1) ")");
93 MODULE_PARM_DESC(sq_entries, " Number of entries for the Send Queue "
94 "[2^x - 1], x = [6..14]. Default = "
95 __MODULE_STRING(EHEA_DEF_ENTRIES_SQ) ")");
96 MODULE_PARM_DESC(use_mcs, " 0:NAPI, 1:Multiple receive queues, Default = 0 ");
98 MODULE_PARM_DESC(lro_max_aggr, " LRO: Max packets to be aggregated. Default = "
99 __MODULE_STRING(EHEA_LRO_MAX_AGGR));
100 MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
103 static int port_name_cnt;
104 static LIST_HEAD(adapter_list);
105 static unsigned long ehea_driver_flags;
106 static DEFINE_MUTEX(dlpar_mem_lock);
107 struct ehea_fw_handle_array ehea_fw_handles;
108 struct ehea_bcmc_reg_array ehea_bcmc_regs;
111 static int __devinit ehea_probe_adapter(struct platform_device *dev,
112 const struct of_device_id *id);
114 static int __devexit ehea_remove(struct platform_device *dev);
116 static struct of_device_id ehea_device_table[] = {
119 .compatible = "IBM,lhea",
123 MODULE_DEVICE_TABLE(of, ehea_device_table);
125 static struct of_platform_driver ehea_driver = {
128 .owner = THIS_MODULE,
129 .of_match_table = ehea_device_table,
131 .probe = ehea_probe_adapter,
132 .remove = ehea_remove,
135 void ehea_dump(void *adr, int len, char *msg)
138 unsigned char *deb = adr;
139 for (x = 0; x < len; x += 16) {
140 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
141 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
146 void ehea_schedule_port_reset(struct ehea_port *port)
148 if (!test_bit(__EHEA_DISABLE_PORT_RESET, &port->flags))
149 schedule_work(&port->reset_task);
152 static void ehea_update_firmware_handles(void)
154 struct ehea_fw_handle_entry *arr = NULL;
155 struct ehea_adapter *adapter;
156 int num_adapters = 0;
160 int num_fw_handles, k, l;
162 /* Determine number of handles */
163 mutex_lock(&ehea_fw_handles.lock);
165 list_for_each_entry(adapter, &adapter_list, list) {
168 for (k = 0; k < EHEA_MAX_PORTS; k++) {
169 struct ehea_port *port = adapter->port[k];
171 if (!port || (port->state != EHEA_PORT_UP))
175 num_portres += port->num_def_qps + port->num_add_tx_qps;
179 num_fw_handles = num_adapters * EHEA_NUM_ADAPTER_FW_HANDLES +
180 num_ports * EHEA_NUM_PORT_FW_HANDLES +
181 num_portres * EHEA_NUM_PORTRES_FW_HANDLES;
183 if (num_fw_handles) {
184 arr = kcalloc(num_fw_handles, sizeof(*arr), GFP_KERNEL);
186 goto out; /* Keep the existing array */
190 list_for_each_entry(adapter, &adapter_list, list) {
191 if (num_adapters == 0)
194 for (k = 0; k < EHEA_MAX_PORTS; k++) {
195 struct ehea_port *port = adapter->port[k];
197 if (!port || (port->state != EHEA_PORT_UP) ||
202 l < port->num_def_qps + port->num_add_tx_qps;
204 struct ehea_port_res *pr = &port->port_res[l];
206 arr[i].adh = adapter->handle;
207 arr[i++].fwh = pr->qp->fw_handle;
208 arr[i].adh = adapter->handle;
209 arr[i++].fwh = pr->send_cq->fw_handle;
210 arr[i].adh = adapter->handle;
211 arr[i++].fwh = pr->recv_cq->fw_handle;
212 arr[i].adh = adapter->handle;
213 arr[i++].fwh = pr->eq->fw_handle;
214 arr[i].adh = adapter->handle;
215 arr[i++].fwh = pr->send_mr.handle;
216 arr[i].adh = adapter->handle;
217 arr[i++].fwh = pr->recv_mr.handle;
219 arr[i].adh = adapter->handle;
220 arr[i++].fwh = port->qp_eq->fw_handle;
224 arr[i].adh = adapter->handle;
225 arr[i++].fwh = adapter->neq->fw_handle;
227 if (adapter->mr.handle) {
228 arr[i].adh = adapter->handle;
229 arr[i++].fwh = adapter->mr.handle;
235 kfree(ehea_fw_handles.arr);
236 ehea_fw_handles.arr = arr;
237 ehea_fw_handles.num_entries = i;
239 mutex_unlock(&ehea_fw_handles.lock);
242 static void ehea_update_bcmc_registrations(void)
245 struct ehea_bcmc_reg_entry *arr = NULL;
246 struct ehea_adapter *adapter;
247 struct ehea_mc_list *mc_entry;
248 int num_registrations = 0;
252 spin_lock_irqsave(&ehea_bcmc_regs.lock, flags);
254 /* Determine number of registrations */
255 list_for_each_entry(adapter, &adapter_list, list)
256 for (k = 0; k < EHEA_MAX_PORTS; k++) {
257 struct ehea_port *port = adapter->port[k];
259 if (!port || (port->state != EHEA_PORT_UP))
262 num_registrations += 2; /* Broadcast registrations */
264 list_for_each_entry(mc_entry, &port->mc_list->list,list)
265 num_registrations += 2;
268 if (num_registrations) {
269 arr = kcalloc(num_registrations, sizeof(*arr), GFP_ATOMIC);
271 goto out; /* Keep the existing array */
275 list_for_each_entry(adapter, &adapter_list, list) {
276 for (k = 0; k < EHEA_MAX_PORTS; k++) {
277 struct ehea_port *port = adapter->port[k];
279 if (!port || (port->state != EHEA_PORT_UP))
282 if (num_registrations == 0)
285 arr[i].adh = adapter->handle;
286 arr[i].port_id = port->logical_port_id;
287 arr[i].reg_type = EHEA_BCMC_BROADCAST |
289 arr[i++].macaddr = port->mac_addr;
291 arr[i].adh = adapter->handle;
292 arr[i].port_id = port->logical_port_id;
293 arr[i].reg_type = EHEA_BCMC_BROADCAST |
294 EHEA_BCMC_VLANID_ALL;
295 arr[i++].macaddr = port->mac_addr;
296 num_registrations -= 2;
298 list_for_each_entry(mc_entry,
299 &port->mc_list->list, list) {
300 if (num_registrations == 0)
303 arr[i].adh = adapter->handle;
304 arr[i].port_id = port->logical_port_id;
305 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
306 EHEA_BCMC_MULTICAST |
308 arr[i++].macaddr = mc_entry->macaddr;
310 arr[i].adh = adapter->handle;
311 arr[i].port_id = port->logical_port_id;
312 arr[i].reg_type = EHEA_BCMC_SCOPE_ALL |
313 EHEA_BCMC_MULTICAST |
314 EHEA_BCMC_VLANID_ALL;
315 arr[i++].macaddr = mc_entry->macaddr;
316 num_registrations -= 2;
322 kfree(ehea_bcmc_regs.arr);
323 ehea_bcmc_regs.arr = arr;
324 ehea_bcmc_regs.num_entries = i;
326 spin_unlock_irqrestore(&ehea_bcmc_regs.lock, flags);
329 static struct net_device_stats *ehea_get_stats(struct net_device *dev)
331 struct ehea_port *port = netdev_priv(dev);
332 struct net_device_stats *stats = &port->stats;
333 struct hcp_ehea_port_cb2 *cb2;
334 u64 hret, rx_packets, tx_packets, rx_bytes = 0, tx_bytes = 0;
337 memset(stats, 0, sizeof(*stats));
339 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
341 netdev_err(dev, "no mem for cb2\n");
345 hret = ehea_h_query_ehea_port(port->adapter->handle,
346 port->logical_port_id,
347 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
348 if (hret != H_SUCCESS) {
349 netdev_err(dev, "query_ehea_port failed\n");
353 if (netif_msg_hw(port))
354 ehea_dump(cb2, sizeof(*cb2), "net_device_stats");
357 for (i = 0; i < port->num_def_qps; i++) {
358 rx_packets += port->port_res[i].rx_packets;
359 rx_bytes += port->port_res[i].rx_bytes;
363 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
364 tx_packets += port->port_res[i].tx_packets;
365 tx_bytes += port->port_res[i].tx_bytes;
368 stats->tx_packets = tx_packets;
369 stats->multicast = cb2->rxmcp;
370 stats->rx_errors = cb2->rxuerr;
371 stats->rx_bytes = rx_bytes;
372 stats->tx_bytes = tx_bytes;
373 stats->rx_packets = rx_packets;
376 free_page((unsigned long)cb2);
381 static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
383 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
384 struct net_device *dev = pr->port->netdev;
385 int max_index_mask = pr->rq1_skba.len - 1;
386 int fill_wqes = pr->rq1_skba.os_skbs + nr_of_wqes;
390 pr->rq1_skba.os_skbs = 0;
392 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
394 pr->rq1_skba.index = index;
395 pr->rq1_skba.os_skbs = fill_wqes;
399 for (i = 0; i < fill_wqes; i++) {
400 if (!skb_arr_rq1[index]) {
401 skb_arr_rq1[index] = netdev_alloc_skb(dev,
403 if (!skb_arr_rq1[index]) {
404 netdev_info(dev, "Unable to allocate enough skb in the array\n");
405 pr->rq1_skba.os_skbs = fill_wqes - i;
410 index &= max_index_mask;
418 ehea_update_rq1a(pr->qp, adder);
421 static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
423 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
424 struct net_device *dev = pr->port->netdev;
427 if (nr_rq1a > pr->rq1_skba.len) {
428 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
432 for (i = 0; i < nr_rq1a; i++) {
433 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
434 if (!skb_arr_rq1[i]) {
435 netdev_info(dev, "Not enough memory to allocate skb array\n");
440 ehea_update_rq1a(pr->qp, i);
443 static int ehea_refill_rq_def(struct ehea_port_res *pr,
444 struct ehea_q_skb_arr *q_skba, int rq_nr,
445 int num_wqes, int wqe_type, int packet_size)
447 struct net_device *dev = pr->port->netdev;
448 struct ehea_qp *qp = pr->qp;
449 struct sk_buff **skb_arr = q_skba->arr;
450 struct ehea_rwqe *rwqe;
451 int i, index, max_index_mask, fill_wqes;
455 fill_wqes = q_skba->os_skbs + num_wqes;
458 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
459 q_skba->os_skbs = fill_wqes;
463 index = q_skba->index;
464 max_index_mask = q_skba->len - 1;
465 for (i = 0; i < fill_wqes; i++) {
469 skb = netdev_alloc_skb_ip_align(dev, packet_size);
471 q_skba->os_skbs = fill_wqes - i;
472 if (q_skba->os_skbs == q_skba->len - 2) {
473 netdev_info(pr->port->netdev,
474 "rq%i ran dry - no mem for skb\n",
481 skb_arr[index] = skb;
482 tmp_addr = ehea_map_vaddr(skb->data);
483 if (tmp_addr == -1) {
485 q_skba->os_skbs = fill_wqes - i;
490 rwqe = ehea_get_next_rwqe(qp, rq_nr);
491 rwqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, wqe_type)
492 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, index);
493 rwqe->sg_list[0].l_key = pr->recv_mr.lkey;
494 rwqe->sg_list[0].vaddr = tmp_addr;
495 rwqe->sg_list[0].len = packet_size;
496 rwqe->data_segments = 1;
499 index &= max_index_mask;
503 q_skba->index = index;
510 ehea_update_rq2a(pr->qp, adder);
512 ehea_update_rq3a(pr->qp, adder);
518 static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
520 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
521 nr_of_wqes, EHEA_RWQE2_TYPE,
526 static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
528 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
529 nr_of_wqes, EHEA_RWQE3_TYPE,
530 EHEA_MAX_PACKET_SIZE);
533 static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
535 *rq_num = (cqe->type & EHEA_CQE_TYPE_RQ) >> 5;
536 if ((cqe->status & EHEA_CQE_STAT_ERR_MASK) == 0)
538 if (((cqe->status & EHEA_CQE_STAT_ERR_TCP) != 0) &&
539 (cqe->header_length == 0))
544 static inline void ehea_fill_skb(struct net_device *dev,
545 struct sk_buff *skb, struct ehea_cqe *cqe)
547 int length = cqe->num_bytes_transfered - 4; /*remove CRC */
549 skb_put(skb, length);
550 skb->protocol = eth_type_trans(skb, dev);
552 /* The packet was not an IPV4 packet so a complemented checksum was
553 calculated. The value is found in the Internet Checksum field. */
554 if (cqe->status & EHEA_CQE_BLIND_CKSUM) {
555 skb->ip_summed = CHECKSUM_COMPLETE;
556 skb->csum = csum_unfold(~cqe->inet_checksum_value);
558 skb->ip_summed = CHECKSUM_UNNECESSARY;
561 static inline struct sk_buff *get_skb_by_index(struct sk_buff **skb_array,
563 struct ehea_cqe *cqe)
565 int skb_index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
576 prefetchw(pref + EHEA_CACHE_LINE);
578 pref = (skb_array[x]->data);
580 prefetch(pref + EHEA_CACHE_LINE);
581 prefetch(pref + EHEA_CACHE_LINE * 2);
582 prefetch(pref + EHEA_CACHE_LINE * 3);
585 skb = skb_array[skb_index];
586 skb_array[skb_index] = NULL;
590 static inline struct sk_buff *get_skb_by_index_ll(struct sk_buff **skb_array,
591 int arr_len, int wqe_index)
603 prefetchw(pref + EHEA_CACHE_LINE);
605 pref = (skb_array[x]->data);
607 prefetchw(pref + EHEA_CACHE_LINE);
610 skb = skb_array[wqe_index];
611 skb_array[wqe_index] = NULL;
615 static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
616 struct ehea_cqe *cqe, int *processed_rq2,
621 if (cqe->status & EHEA_CQE_STAT_ERR_TCP)
622 pr->p_stats.err_tcp_cksum++;
623 if (cqe->status & EHEA_CQE_STAT_ERR_IP)
624 pr->p_stats.err_ip_cksum++;
625 if (cqe->status & EHEA_CQE_STAT_ERR_CRC)
626 pr->p_stats.err_frame_crc++;
630 skb = get_skb_by_index(pr->rq2_skba.arr, pr->rq2_skba.len, cqe);
632 } else if (rq == 3) {
634 skb = get_skb_by_index(pr->rq3_skba.arr, pr->rq3_skba.len, cqe);
638 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
639 if (netif_msg_rx_err(pr->port)) {
640 pr_err("Critical receive error for QP %d. Resetting port.\n",
641 pr->qp->init_attr.qp_nr);
642 ehea_dump(cqe, sizeof(*cqe), "CQE");
644 ehea_schedule_port_reset(pr->port);
651 static int get_skb_hdr(struct sk_buff *skb, void **iphdr,
652 void **tcph, u64 *hdr_flags, void *priv)
654 struct ehea_cqe *cqe = priv;
658 /* non tcp/udp packets */
659 if (!cqe->header_length)
663 skb_reset_network_header(skb);
665 if (iph->protocol != IPPROTO_TCP)
668 ip_len = ip_hdrlen(skb);
669 skb_set_transport_header(skb, ip_len);
670 *tcph = tcp_hdr(skb);
672 /* check if ip header and tcp header are complete */
673 if (ntohs(iph->tot_len) < ip_len + tcp_hdrlen(skb))
676 *hdr_flags = LRO_IPV4 | LRO_TCP;
682 static void ehea_proc_skb(struct ehea_port_res *pr, struct ehea_cqe *cqe,
685 int vlan_extracted = ((cqe->status & EHEA_CQE_VLAN_TAG_XTRACT) &&
688 if (skb->dev->features & NETIF_F_LRO) {
690 lro_vlan_hwaccel_receive_skb(&pr->lro_mgr, skb,
695 lro_receive_skb(&pr->lro_mgr, skb, cqe);
698 vlan_hwaccel_receive_skb(skb, pr->port->vgrp,
701 netif_receive_skb(skb);
705 static int ehea_proc_rwqes(struct net_device *dev,
706 struct ehea_port_res *pr,
709 struct ehea_port *port = pr->port;
710 struct ehea_qp *qp = pr->qp;
711 struct ehea_cqe *cqe;
713 struct sk_buff **skb_arr_rq1 = pr->rq1_skba.arr;
714 struct sk_buff **skb_arr_rq2 = pr->rq2_skba.arr;
715 struct sk_buff **skb_arr_rq3 = pr->rq3_skba.arr;
716 int skb_arr_rq1_len = pr->rq1_skba.len;
717 int skb_arr_rq2_len = pr->rq2_skba.len;
718 int skb_arr_rq3_len = pr->rq3_skba.len;
719 int processed, processed_rq1, processed_rq2, processed_rq3;
720 u64 processed_bytes = 0;
721 int wqe_index, last_wqe_index, rq, port_reset;
723 processed = processed_rq1 = processed_rq2 = processed_rq3 = 0;
726 cqe = ehea_poll_rq1(qp, &wqe_index);
727 while ((processed < budget) && cqe) {
731 if (netif_msg_rx_status(port))
732 ehea_dump(cqe, sizeof(*cqe), "CQE");
734 last_wqe_index = wqe_index;
736 if (!ehea_check_cqe(cqe, &rq)) {
739 skb = get_skb_by_index_ll(skb_arr_rq1,
742 if (unlikely(!skb)) {
743 netif_info(port, rx_err, dev,
744 "LL rq1: skb=NULL\n");
746 skb = netdev_alloc_skb(dev,
749 netdev_err(dev, "Not enough memory to allocate skb\n");
753 skb_copy_to_linear_data(skb, ((char *)cqe) + 64,
754 cqe->num_bytes_transfered - 4);
755 ehea_fill_skb(dev, skb, cqe);
756 } else if (rq == 2) {
758 skb = get_skb_by_index(skb_arr_rq2,
759 skb_arr_rq2_len, cqe);
760 if (unlikely(!skb)) {
761 netif_err(port, rx_err, dev,
765 ehea_fill_skb(dev, skb, cqe);
769 skb = get_skb_by_index(skb_arr_rq3,
770 skb_arr_rq3_len, cqe);
771 if (unlikely(!skb)) {
772 netif_err(port, rx_err, dev,
776 ehea_fill_skb(dev, skb, cqe);
780 processed_bytes += skb->len;
781 ehea_proc_skb(pr, cqe, skb);
783 pr->p_stats.poll_receive_errors++;
784 port_reset = ehea_treat_poll_error(pr, rq, cqe,
790 cqe = ehea_poll_rq1(qp, &wqe_index);
792 if (dev->features & NETIF_F_LRO)
793 lro_flush_all(&pr->lro_mgr);
795 pr->rx_packets += processed;
796 pr->rx_bytes += processed_bytes;
798 ehea_refill_rq1(pr, last_wqe_index, processed_rq1);
799 ehea_refill_rq2(pr, processed_rq2);
800 ehea_refill_rq3(pr, processed_rq3);
805 #define SWQE_RESTART_CHECK 0xdeadbeaff00d0000ull
807 static void reset_sq_restart_flag(struct ehea_port *port)
811 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
812 struct ehea_port_res *pr = &port->port_res[i];
813 pr->sq_restart_flag = 0;
815 wake_up(&port->restart_wq);
818 static void check_sqs(struct ehea_port *port)
820 struct ehea_swqe *swqe;
824 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
825 struct ehea_port_res *pr = &port->port_res[i];
828 swqe = ehea_get_swqe(pr->qp, &swqe_index);
829 memset(swqe, 0, SWQE_HEADER_SIZE);
830 atomic_dec(&pr->swqe_avail);
832 swqe->tx_control |= EHEA_SWQE_PURGE;
833 swqe->wr_id = SWQE_RESTART_CHECK;
834 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
835 swqe->tx_control |= EHEA_SWQE_IMM_DATA_PRESENT;
836 swqe->immediate_data_length = 80;
838 ehea_post_swqe(pr->qp, swqe);
840 ret = wait_event_timeout(port->restart_wq,
841 pr->sq_restart_flag == 0,
842 msecs_to_jiffies(100));
845 pr_err("HW/SW queues out of sync\n");
846 ehea_schedule_port_reset(pr->port);
853 static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
856 struct ehea_cq *send_cq = pr->send_cq;
857 struct ehea_cqe *cqe;
858 int quota = my_quota;
864 cqe = ehea_poll_cq(send_cq);
865 while (cqe && (quota > 0)) {
866 ehea_inc_cq(send_cq);
871 if (cqe->wr_id == SWQE_RESTART_CHECK) {
872 pr->sq_restart_flag = 1;
877 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
878 pr_err("Bad send completion status=0x%04X\n",
881 if (netif_msg_tx_err(pr->port))
882 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
884 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
885 pr_err("Resetting port\n");
886 ehea_schedule_port_reset(pr->port);
891 if (netif_msg_tx_done(pr->port))
892 ehea_dump(cqe, sizeof(*cqe), "CQE");
894 if (likely(EHEA_BMASK_GET(EHEA_WR_ID_TYPE, cqe->wr_id)
895 == EHEA_SWQE2_TYPE)) {
897 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, cqe->wr_id);
898 skb = pr->sq_skba.arr[index];
900 pr->sq_skba.arr[index] = NULL;
903 swqe_av += EHEA_BMASK_GET(EHEA_WR_ID_REFILL, cqe->wr_id);
906 cqe = ehea_poll_cq(send_cq);
909 ehea_update_feca(send_cq, cqe_counter);
910 atomic_add(swqe_av, &pr->swqe_avail);
912 spin_lock_irqsave(&pr->netif_queue, flags);
914 if (pr->queue_stopped && (atomic_read(&pr->swqe_avail)
915 >= pr->swqe_refill_th)) {
916 netif_wake_queue(pr->port->netdev);
917 pr->queue_stopped = 0;
919 spin_unlock_irqrestore(&pr->netif_queue, flags);
920 wake_up(&pr->port->swqe_avail_wq);
925 #define EHEA_NAPI_POLL_NUM_BEFORE_IRQ 16
926 #define EHEA_POLL_MAX_CQES 65535
928 static int ehea_poll(struct napi_struct *napi, int budget)
930 struct ehea_port_res *pr = container_of(napi, struct ehea_port_res,
932 struct net_device *dev = pr->port->netdev;
933 struct ehea_cqe *cqe;
934 struct ehea_cqe *cqe_skb = NULL;
935 int force_irq, wqe_index;
938 force_irq = (pr->poll_counter > EHEA_NAPI_POLL_NUM_BEFORE_IRQ);
939 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
942 rx += ehea_proc_rwqes(dev, pr, budget - rx);
944 while ((rx != budget) || force_irq) {
945 pr->poll_counter = 0;
948 ehea_reset_cq_ep(pr->recv_cq);
949 ehea_reset_cq_ep(pr->send_cq);
950 ehea_reset_cq_n1(pr->recv_cq);
951 ehea_reset_cq_n1(pr->send_cq);
953 cqe = ehea_poll_rq1(pr->qp, &wqe_index);
954 cqe_skb = ehea_poll_cq(pr->send_cq);
956 if (!cqe && !cqe_skb)
959 if (!napi_reschedule(napi))
962 cqe_skb = ehea_proc_cqes(pr, EHEA_POLL_MAX_CQES);
963 rx += ehea_proc_rwqes(dev, pr, budget - rx);
970 #ifdef CONFIG_NET_POLL_CONTROLLER
971 static void ehea_netpoll(struct net_device *dev)
973 struct ehea_port *port = netdev_priv(dev);
976 for (i = 0; i < port->num_def_qps; i++)
977 napi_schedule(&port->port_res[i].napi);
981 static irqreturn_t ehea_recv_irq_handler(int irq, void *param)
983 struct ehea_port_res *pr = param;
985 napi_schedule(&pr->napi);
990 static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
992 struct ehea_port *port = param;
993 struct ehea_eqe *eqe;
996 u64 resource_type, aer, aerr;
999 eqe = ehea_poll_eq(port->qp_eq);
1002 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
1003 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
1004 eqe->entry, qp_token);
1006 qp = port->port_res[qp_token].qp;
1008 resource_type = ehea_error_data(port->adapter, qp->fw_handle,
1011 if (resource_type == EHEA_AER_RESTYPE_QP) {
1012 if ((aer & EHEA_AER_RESET_MASK) ||
1013 (aerr & EHEA_AERR_RESET_MASK))
1016 reset_port = 1; /* Reset in case of CQ or EQ error */
1018 eqe = ehea_poll_eq(port->qp_eq);
1022 pr_err("Resetting port\n");
1023 ehea_schedule_port_reset(port);
1029 static struct ehea_port *ehea_get_port(struct ehea_adapter *adapter,
1034 for (i = 0; i < EHEA_MAX_PORTS; i++)
1035 if (adapter->port[i])
1036 if (adapter->port[i]->logical_port_id == logical_port)
1037 return adapter->port[i];
1041 int ehea_sense_port_attr(struct ehea_port *port)
1045 struct hcp_ehea_port_cb0 *cb0;
1047 /* may be called via ehea_neq_tasklet() */
1048 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
1050 pr_err("no mem for cb0\n");
1055 hret = ehea_h_query_ehea_port(port->adapter->handle,
1056 port->logical_port_id, H_PORT_CB0,
1057 EHEA_BMASK_SET(H_PORT_CB0_ALL, 0xFFFF),
1059 if (hret != H_SUCCESS) {
1065 port->mac_addr = cb0->port_mac_addr << 16;
1067 if (!is_valid_ether_addr((u8 *)&port->mac_addr)) {
1068 ret = -EADDRNOTAVAIL;
1073 switch (cb0->port_speed) {
1075 port->port_speed = EHEA_SPEED_10M;
1076 port->full_duplex = 0;
1079 port->port_speed = EHEA_SPEED_10M;
1080 port->full_duplex = 1;
1082 case H_SPEED_100M_H:
1083 port->port_speed = EHEA_SPEED_100M;
1084 port->full_duplex = 0;
1086 case H_SPEED_100M_F:
1087 port->port_speed = EHEA_SPEED_100M;
1088 port->full_duplex = 1;
1091 port->port_speed = EHEA_SPEED_1G;
1092 port->full_duplex = 1;
1095 port->port_speed = EHEA_SPEED_10G;
1096 port->full_duplex = 1;
1099 port->port_speed = 0;
1100 port->full_duplex = 0;
1105 port->num_mcs = cb0->num_default_qps;
1107 /* Number of default QPs */
1109 port->num_def_qps = cb0->num_default_qps;
1111 port->num_def_qps = 1;
1113 if (!port->num_def_qps) {
1118 port->num_tx_qps = num_tx_qps;
1120 if (port->num_def_qps >= port->num_tx_qps)
1121 port->num_add_tx_qps = 0;
1123 port->num_add_tx_qps = port->num_tx_qps - port->num_def_qps;
1127 if (ret || netif_msg_probe(port))
1128 ehea_dump(cb0, sizeof(*cb0), "ehea_sense_port_attr");
1129 free_page((unsigned long)cb0);
1134 int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1136 struct hcp_ehea_port_cb4 *cb4;
1140 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1142 pr_err("no mem for cb4\n");
1147 cb4->port_speed = port_speed;
1149 netif_carrier_off(port->netdev);
1151 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1152 port->logical_port_id,
1153 H_PORT_CB4, H_PORT_CB4_SPEED, cb4);
1154 if (hret == H_SUCCESS) {
1155 port->autoneg = port_speed == EHEA_SPEED_AUTONEG ? 1 : 0;
1157 hret = ehea_h_query_ehea_port(port->adapter->handle,
1158 port->logical_port_id,
1159 H_PORT_CB4, H_PORT_CB4_SPEED,
1161 if (hret == H_SUCCESS) {
1162 switch (cb4->port_speed) {
1164 port->port_speed = EHEA_SPEED_10M;
1165 port->full_duplex = 0;
1168 port->port_speed = EHEA_SPEED_10M;
1169 port->full_duplex = 1;
1171 case H_SPEED_100M_H:
1172 port->port_speed = EHEA_SPEED_100M;
1173 port->full_duplex = 0;
1175 case H_SPEED_100M_F:
1176 port->port_speed = EHEA_SPEED_100M;
1177 port->full_duplex = 1;
1180 port->port_speed = EHEA_SPEED_1G;
1181 port->full_duplex = 1;
1184 port->port_speed = EHEA_SPEED_10G;
1185 port->full_duplex = 1;
1188 port->port_speed = 0;
1189 port->full_duplex = 0;
1193 pr_err("Failed sensing port speed\n");
1197 if (hret == H_AUTHORITY) {
1198 pr_info("Hypervisor denied setting port speed\n");
1202 pr_err("Failed setting port speed\n");
1205 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
1206 netif_carrier_on(port->netdev);
1208 free_page((unsigned long)cb4);
1213 static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1218 struct ehea_port *port;
1219 struct net_device *dev;
1221 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1222 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1223 port = ehea_get_port(adapter, portnum);
1227 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1230 netdev_err(dev, "unknown portnum %x\n", portnum);
1234 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1235 if (!netif_carrier_ok(dev)) {
1236 ret = ehea_sense_port_attr(port);
1238 netdev_err(dev, "failed resensing port attributes\n");
1242 netif_info(port, link, dev,
1243 "Logical port up: %dMbps %s Duplex\n",
1245 port->full_duplex == 1 ?
1248 netif_carrier_on(dev);
1249 netif_wake_queue(dev);
1252 if (netif_carrier_ok(dev)) {
1253 netif_info(port, link, dev,
1254 "Logical port down\n");
1255 netif_carrier_off(dev);
1256 netif_stop_queue(dev);
1259 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1260 port->phy_link = EHEA_PHY_LINK_UP;
1261 netif_info(port, link, dev,
1262 "Physical port up\n");
1263 if (prop_carrier_state)
1264 netif_carrier_on(dev);
1266 port->phy_link = EHEA_PHY_LINK_DOWN;
1267 netif_info(port, link, dev,
1268 "Physical port down\n");
1269 if (prop_carrier_state)
1270 netif_carrier_off(dev);
1273 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1275 "External switch port is primary port\n");
1278 "External switch port is backup port\n");
1281 case EHEA_EC_ADAPTER_MALFUNC:
1282 netdev_err(dev, "Adapter malfunction\n");
1284 case EHEA_EC_PORT_MALFUNC:
1285 netdev_info(dev, "Port malfunction\n");
1286 netif_carrier_off(dev);
1287 netif_stop_queue(dev);
1290 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1295 static void ehea_neq_tasklet(unsigned long data)
1297 struct ehea_adapter *adapter = (struct ehea_adapter *)data;
1298 struct ehea_eqe *eqe;
1301 eqe = ehea_poll_eq(adapter->neq);
1302 pr_debug("eqe=%p\n", eqe);
1305 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1306 ehea_parse_eqe(adapter, eqe->entry);
1307 eqe = ehea_poll_eq(adapter->neq);
1308 pr_debug("next eqe=%p\n", eqe);
1311 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
1312 | EHEA_BMASK_SET(NELR_ADAPTER_MALFUNC, 1)
1313 | EHEA_BMASK_SET(NELR_PORT_MALFUNC, 1);
1315 ehea_h_reset_events(adapter->handle,
1316 adapter->neq->fw_handle, event_mask);
1319 static irqreturn_t ehea_interrupt_neq(int irq, void *param)
1321 struct ehea_adapter *adapter = param;
1322 tasklet_hi_schedule(&adapter->neq_tasklet);
1327 static int ehea_fill_port_res(struct ehea_port_res *pr)
1330 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1332 ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1
1333 - init_attr->act_nr_rwqes_rq2
1334 - init_attr->act_nr_rwqes_rq3 - 1);
1336 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1338 ret |= ehea_refill_rq3(pr, init_attr->act_nr_rwqes_rq3 - 1);
1343 static int ehea_reg_interrupts(struct net_device *dev)
1345 struct ehea_port *port = netdev_priv(dev);
1346 struct ehea_port_res *pr;
1350 snprintf(port->int_aff_name, EHEA_IRQ_NAME_SIZE - 1, "%s-aff",
1353 ret = ibmebus_request_irq(port->qp_eq->attr.ist1,
1354 ehea_qp_aff_irq_handler,
1355 IRQF_DISABLED, port->int_aff_name, port);
1357 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1358 port->qp_eq->attr.ist1);
1362 netif_info(port, ifup, dev,
1363 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1364 port->qp_eq->attr.ist1);
1367 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1368 pr = &port->port_res[i];
1369 snprintf(pr->int_send_name, EHEA_IRQ_NAME_SIZE - 1,
1370 "%s-queue%d", dev->name, i);
1371 ret = ibmebus_request_irq(pr->eq->attr.ist1,
1372 ehea_recv_irq_handler,
1373 IRQF_DISABLED, pr->int_send_name,
1376 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1377 i, pr->eq->attr.ist1);
1380 netif_info(port, ifup, dev,
1381 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1382 pr->eq->attr.ist1, i);
1390 u32 ist = port->port_res[i].eq->attr.ist1;
1391 ibmebus_free_irq(ist, &port->port_res[i]);
1395 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1396 i = port->num_def_qps;
1402 static void ehea_free_interrupts(struct net_device *dev)
1404 struct ehea_port *port = netdev_priv(dev);
1405 struct ehea_port_res *pr;
1410 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1411 pr = &port->port_res[i];
1412 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1413 netif_info(port, intr, dev,
1414 "free send irq for res %d with handle 0x%X\n",
1415 i, pr->eq->attr.ist1);
1418 /* associated events */
1419 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1420 netif_info(port, intr, dev,
1421 "associated event interrupt for handle 0x%X freed\n",
1422 port->qp_eq->attr.ist1);
1425 static int ehea_configure_port(struct ehea_port *port)
1429 struct hcp_ehea_port_cb0 *cb0;
1432 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1436 cb0->port_rc = EHEA_BMASK_SET(PXLY_RC_VALID, 1)
1437 | EHEA_BMASK_SET(PXLY_RC_IP_CHKSUM, 1)
1438 | EHEA_BMASK_SET(PXLY_RC_TCP_UDP_CHKSUM, 1)
1439 | EHEA_BMASK_SET(PXLY_RC_VLAN_XTRACT, 1)
1440 | EHEA_BMASK_SET(PXLY_RC_VLAN_TAG_FILTER,
1441 PXLY_RC_VLAN_FILTER)
1442 | EHEA_BMASK_SET(PXLY_RC_JUMBO_FRAME, 1);
1444 for (i = 0; i < port->num_mcs; i++)
1446 cb0->default_qpn_arr[i] =
1447 port->port_res[i].qp->init_attr.qp_nr;
1449 cb0->default_qpn_arr[i] =
1450 port->port_res[0].qp->init_attr.qp_nr;
1452 if (netif_msg_ifup(port))
1453 ehea_dump(cb0, sizeof(*cb0), "ehea_configure_port");
1455 mask = EHEA_BMASK_SET(H_PORT_CB0_PRC, 1)
1456 | EHEA_BMASK_SET(H_PORT_CB0_DEFQPNARRAY, 1);
1458 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1459 port->logical_port_id,
1460 H_PORT_CB0, mask, cb0);
1462 if (hret != H_SUCCESS)
1468 free_page((unsigned long)cb0);
1473 int ehea_gen_smrs(struct ehea_port_res *pr)
1476 struct ehea_adapter *adapter = pr->port->adapter;
1478 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->send_mr);
1482 ret = ehea_gen_smr(adapter, &adapter->mr, &pr->recv_mr);
1489 ehea_rem_mr(&pr->send_mr);
1491 pr_err("Generating SMRS failed\n");
1495 int ehea_rem_smrs(struct ehea_port_res *pr)
1497 if ((ehea_rem_mr(&pr->send_mr)) ||
1498 (ehea_rem_mr(&pr->recv_mr)))
1504 static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1506 int arr_size = sizeof(void *) * max_q_entries;
1508 q_skba->arr = vzalloc(arr_size);
1512 q_skba->len = max_q_entries;
1514 q_skba->os_skbs = 0;
1519 static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1520 struct port_res_cfg *pr_cfg, int queue_token)
1522 struct ehea_adapter *adapter = port->adapter;
1523 enum ehea_eq_type eq_type = EHEA_EQ;
1524 struct ehea_qp_init_attr *init_attr = NULL;
1526 u64 tx_bytes, rx_bytes, tx_packets, rx_packets;
1528 tx_bytes = pr->tx_bytes;
1529 tx_packets = pr->tx_packets;
1530 rx_bytes = pr->rx_bytes;
1531 rx_packets = pr->rx_packets;
1533 memset(pr, 0, sizeof(struct ehea_port_res));
1535 pr->tx_bytes = rx_bytes;
1536 pr->tx_packets = tx_packets;
1537 pr->rx_bytes = rx_bytes;
1538 pr->rx_packets = rx_packets;
1541 spin_lock_init(&pr->xmit_lock);
1542 spin_lock_init(&pr->netif_queue);
1544 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1546 pr_err("create_eq failed (eq)\n");
1550 pr->recv_cq = ehea_create_cq(adapter, pr_cfg->max_entries_rcq,
1552 port->logical_port_id);
1554 pr_err("create_cq failed (cq_recv)\n");
1558 pr->send_cq = ehea_create_cq(adapter, pr_cfg->max_entries_scq,
1560 port->logical_port_id);
1562 pr_err("create_cq failed (cq_send)\n");
1566 if (netif_msg_ifup(port))
1567 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1568 pr->send_cq->attr.act_nr_of_cqes,
1569 pr->recv_cq->attr.act_nr_of_cqes);
1571 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1574 pr_err("no mem for ehea_qp_init_attr\n");
1578 init_attr->low_lat_rq1 = 1;
1579 init_attr->signalingtype = 1; /* generate CQE if specified in WQE */
1580 init_attr->rq_count = 3;
1581 init_attr->qp_token = queue_token;
1582 init_attr->max_nr_send_wqes = pr_cfg->max_entries_sq;
1583 init_attr->max_nr_rwqes_rq1 = pr_cfg->max_entries_rq1;
1584 init_attr->max_nr_rwqes_rq2 = pr_cfg->max_entries_rq2;
1585 init_attr->max_nr_rwqes_rq3 = pr_cfg->max_entries_rq3;
1586 init_attr->wqe_size_enc_sq = EHEA_SG_SQ;
1587 init_attr->wqe_size_enc_rq1 = EHEA_SG_RQ1;
1588 init_attr->wqe_size_enc_rq2 = EHEA_SG_RQ2;
1589 init_attr->wqe_size_enc_rq3 = EHEA_SG_RQ3;
1590 init_attr->rq2_threshold = EHEA_RQ2_THRESHOLD;
1591 init_attr->rq3_threshold = EHEA_RQ3_THRESHOLD;
1592 init_attr->port_nr = port->logical_port_id;
1593 init_attr->send_cq_handle = pr->send_cq->fw_handle;
1594 init_attr->recv_cq_handle = pr->recv_cq->fw_handle;
1595 init_attr->aff_eq_handle = port->qp_eq->fw_handle;
1597 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1599 pr_err("create_qp failed\n");
1604 if (netif_msg_ifup(port))
1605 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1607 init_attr->act_nr_send_wqes,
1608 init_attr->act_nr_rwqes_rq1,
1609 init_attr->act_nr_rwqes_rq2,
1610 init_attr->act_nr_rwqes_rq3);
1612 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1614 ret = ehea_init_q_skba(&pr->sq_skba, pr->sq_skba_size);
1615 ret |= ehea_init_q_skba(&pr->rq1_skba, init_attr->act_nr_rwqes_rq1 + 1);
1616 ret |= ehea_init_q_skba(&pr->rq2_skba, init_attr->act_nr_rwqes_rq2 + 1);
1617 ret |= ehea_init_q_skba(&pr->rq3_skba, init_attr->act_nr_rwqes_rq3 + 1);
1621 pr->swqe_refill_th = init_attr->act_nr_send_wqes / 10;
1622 if (ehea_gen_smrs(pr) != 0) {
1627 atomic_set(&pr->swqe_avail, init_attr->act_nr_send_wqes - 1);
1631 netif_napi_add(pr->port->netdev, &pr->napi, ehea_poll, 64);
1633 pr->lro_mgr.max_aggr = pr->port->lro_max_aggr;
1634 pr->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS;
1635 pr->lro_mgr.lro_arr = pr->lro_desc;
1636 pr->lro_mgr.get_skb_header = get_skb_hdr;
1637 pr->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID;
1638 pr->lro_mgr.dev = port->netdev;
1639 pr->lro_mgr.ip_summed = CHECKSUM_UNNECESSARY;
1640 pr->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY;
1647 vfree(pr->sq_skba.arr);
1648 vfree(pr->rq1_skba.arr);
1649 vfree(pr->rq2_skba.arr);
1650 vfree(pr->rq3_skba.arr);
1651 ehea_destroy_qp(pr->qp);
1652 ehea_destroy_cq(pr->send_cq);
1653 ehea_destroy_cq(pr->recv_cq);
1654 ehea_destroy_eq(pr->eq);
1659 static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
1664 netif_napi_del(&pr->napi);
1666 ret = ehea_destroy_qp(pr->qp);
1669 ehea_destroy_cq(pr->send_cq);
1670 ehea_destroy_cq(pr->recv_cq);
1671 ehea_destroy_eq(pr->eq);
1673 for (i = 0; i < pr->rq1_skba.len; i++)
1674 if (pr->rq1_skba.arr[i])
1675 dev_kfree_skb(pr->rq1_skba.arr[i]);
1677 for (i = 0; i < pr->rq2_skba.len; i++)
1678 if (pr->rq2_skba.arr[i])
1679 dev_kfree_skb(pr->rq2_skba.arr[i]);
1681 for (i = 0; i < pr->rq3_skba.len; i++)
1682 if (pr->rq3_skba.arr[i])
1683 dev_kfree_skb(pr->rq3_skba.arr[i]);
1685 for (i = 0; i < pr->sq_skba.len; i++)
1686 if (pr->sq_skba.arr[i])
1687 dev_kfree_skb(pr->sq_skba.arr[i]);
1689 vfree(pr->rq1_skba.arr);
1690 vfree(pr->rq2_skba.arr);
1691 vfree(pr->rq3_skba.arr);
1692 vfree(pr->sq_skba.arr);
1693 ret = ehea_rem_smrs(pr);
1699 * The write_* functions store information in swqe which is used by
1700 * the hardware to calculate the ip/tcp/udp checksum
1703 static inline void write_ip_start_end(struct ehea_swqe *swqe,
1704 const struct sk_buff *skb)
1706 swqe->ip_start = skb_network_offset(skb);
1707 swqe->ip_end = (u8)(swqe->ip_start + ip_hdrlen(skb) - 1);
1710 static inline void write_tcp_offset_end(struct ehea_swqe *swqe,
1711 const struct sk_buff *skb)
1714 (u8)(swqe->ip_end + 1 + offsetof(struct tcphdr, check));
1716 swqe->tcp_end = (u16)skb->len - 1;
1719 static inline void write_udp_offset_end(struct ehea_swqe *swqe,
1720 const struct sk_buff *skb)
1723 (u8)(swqe->ip_end + 1 + offsetof(struct udphdr, check));
1725 swqe->tcp_end = (u16)skb->len - 1;
1729 static void write_swqe2_TSO(struct sk_buff *skb,
1730 struct ehea_swqe *swqe, u32 lkey)
1732 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1733 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1734 int skb_data_size = skb_headlen(skb);
1737 /* Packet is TCP with TSO enabled */
1738 swqe->tx_control |= EHEA_SWQE_TSO;
1739 swqe->mss = skb_shinfo(skb)->gso_size;
1740 /* copy only eth/ip/tcp headers to immediate data and
1741 * the rest of skb->data to sg1entry
1743 headersize = ETH_HLEN + ip_hdrlen(skb) + tcp_hdrlen(skb);
1745 skb_data_size = skb_headlen(skb);
1747 if (skb_data_size >= headersize) {
1748 /* copy immediate data */
1749 skb_copy_from_linear_data(skb, imm_data, headersize);
1750 swqe->immediate_data_length = headersize;
1752 if (skb_data_size > headersize) {
1753 /* set sg1entry data */
1754 sg1entry->l_key = lkey;
1755 sg1entry->len = skb_data_size - headersize;
1757 ehea_map_vaddr(skb->data + headersize);
1758 swqe->descriptors++;
1761 pr_err("cannot handle fragmented headers\n");
1764 static void write_swqe2_nonTSO(struct sk_buff *skb,
1765 struct ehea_swqe *swqe, u32 lkey)
1767 int skb_data_size = skb_headlen(skb);
1768 u8 *imm_data = &swqe->u.immdata_desc.immediate_data[0];
1769 struct ehea_vsgentry *sg1entry = &swqe->u.immdata_desc.sg_entry;
1771 /* Packet is any nonTSO type
1773 * Copy as much as possible skb->data to immediate data and
1774 * the rest to sg1entry
1776 if (skb_data_size >= SWQE2_MAX_IMM) {
1777 /* copy immediate data */
1778 skb_copy_from_linear_data(skb, imm_data, SWQE2_MAX_IMM);
1780 swqe->immediate_data_length = SWQE2_MAX_IMM;
1782 if (skb_data_size > SWQE2_MAX_IMM) {
1783 /* copy sg1entry data */
1784 sg1entry->l_key = lkey;
1785 sg1entry->len = skb_data_size - SWQE2_MAX_IMM;
1787 ehea_map_vaddr(skb->data + SWQE2_MAX_IMM);
1788 swqe->descriptors++;
1791 skb_copy_from_linear_data(skb, imm_data, skb_data_size);
1792 swqe->immediate_data_length = skb_data_size;
1796 static inline void write_swqe2_data(struct sk_buff *skb, struct net_device *dev,
1797 struct ehea_swqe *swqe, u32 lkey)
1799 struct ehea_vsgentry *sg_list, *sg1entry, *sgentry;
1801 int nfrags, sg1entry_contains_frag_data, i;
1803 nfrags = skb_shinfo(skb)->nr_frags;
1804 sg1entry = &swqe->u.immdata_desc.sg_entry;
1805 sg_list = (struct ehea_vsgentry *)&swqe->u.immdata_desc.sg_list;
1806 swqe->descriptors = 0;
1807 sg1entry_contains_frag_data = 0;
1809 if ((dev->features & NETIF_F_TSO) && skb_shinfo(skb)->gso_size)
1810 write_swqe2_TSO(skb, swqe, lkey);
1812 write_swqe2_nonTSO(skb, swqe, lkey);
1814 /* write descriptors */
1816 if (swqe->descriptors == 0) {
1817 /* sg1entry not yet used */
1818 frag = &skb_shinfo(skb)->frags[0];
1820 /* copy sg1entry data */
1821 sg1entry->l_key = lkey;
1822 sg1entry->len = frag->size;
1824 ehea_map_vaddr(page_address(frag->page)
1825 + frag->page_offset);
1826 swqe->descriptors++;
1827 sg1entry_contains_frag_data = 1;
1830 for (i = sg1entry_contains_frag_data; i < nfrags; i++) {
1832 frag = &skb_shinfo(skb)->frags[i];
1833 sgentry = &sg_list[i - sg1entry_contains_frag_data];
1835 sgentry->l_key = lkey;
1836 sgentry->len = frag->size;
1838 ehea_map_vaddr(page_address(frag->page)
1839 + frag->page_offset);
1840 swqe->descriptors++;
1845 static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1851 /* De/Register untagged packets */
1852 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_UNTAGGED;
1853 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1854 port->logical_port_id,
1855 reg_type, port->mac_addr, 0, hcallid);
1856 if (hret != H_SUCCESS) {
1857 pr_err("%sregistering bc address failed (tagged)\n",
1858 hcallid == H_REG_BCMC ? "" : "de");
1863 /* De/Register VLAN packets */
1864 reg_type = EHEA_BCMC_BROADCAST | EHEA_BCMC_VLANID_ALL;
1865 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1866 port->logical_port_id,
1867 reg_type, port->mac_addr, 0, hcallid);
1868 if (hret != H_SUCCESS) {
1869 pr_err("%sregistering bc address failed (vlan)\n",
1870 hcallid == H_REG_BCMC ? "" : "de");
1877 static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1879 struct ehea_port *port = netdev_priv(dev);
1880 struct sockaddr *mac_addr = sa;
1881 struct hcp_ehea_port_cb0 *cb0;
1885 if (!is_valid_ether_addr(mac_addr->sa_data)) {
1886 ret = -EADDRNOTAVAIL;
1890 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1892 pr_err("no mem for cb0\n");
1897 memcpy(&(cb0->port_mac_addr), &(mac_addr->sa_data[0]), ETH_ALEN);
1899 cb0->port_mac_addr = cb0->port_mac_addr >> 16;
1901 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1902 port->logical_port_id, H_PORT_CB0,
1903 EHEA_BMASK_SET(H_PORT_CB0_MAC, 1), cb0);
1904 if (hret != H_SUCCESS) {
1909 memcpy(dev->dev_addr, mac_addr->sa_data, dev->addr_len);
1911 /* Deregister old MAC in pHYP */
1912 if (port->state == EHEA_PORT_UP) {
1913 ret = ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
1918 port->mac_addr = cb0->port_mac_addr << 16;
1920 /* Register new MAC in pHYP */
1921 if (port->state == EHEA_PORT_UP) {
1922 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
1930 ehea_update_bcmc_registrations();
1932 free_page((unsigned long)cb0);
1937 static void ehea_promiscuous_error(u64 hret, int enable)
1939 if (hret == H_AUTHORITY)
1940 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1941 enable == 1 ? "en" : "dis");
1943 pr_err("failed %sabling promiscuous mode\n",
1944 enable == 1 ? "en" : "dis");
1947 static void ehea_promiscuous(struct net_device *dev, int enable)
1949 struct ehea_port *port = netdev_priv(dev);
1950 struct hcp_ehea_port_cb7 *cb7;
1953 if (enable == port->promisc)
1956 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1958 pr_err("no mem for cb7\n");
1962 /* Modify Pxs_DUCQPN in CB7 */
1963 cb7->def_uc_qpn = enable == 1 ? port->port_res[0].qp->fw_handle : 0;
1965 hret = ehea_h_modify_ehea_port(port->adapter->handle,
1966 port->logical_port_id,
1967 H_PORT_CB7, H_PORT_CB7_DUCQPN, cb7);
1969 ehea_promiscuous_error(hret, enable);
1973 port->promisc = enable;
1975 free_page((unsigned long)cb7);
1978 static u64 ehea_multicast_reg_helper(struct ehea_port *port, u64 mc_mac_addr,
1984 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1985 | EHEA_BCMC_UNTAGGED;
1987 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1988 port->logical_port_id,
1989 reg_type, mc_mac_addr, 0, hcallid);
1993 reg_type = EHEA_BCMC_SCOPE_ALL | EHEA_BCMC_MULTICAST
1994 | EHEA_BCMC_VLANID_ALL;
1996 hret = ehea_h_reg_dereg_bcmc(port->adapter->handle,
1997 port->logical_port_id,
1998 reg_type, mc_mac_addr, 0, hcallid);
2003 static int ehea_drop_multicast_list(struct net_device *dev)
2005 struct ehea_port *port = netdev_priv(dev);
2006 struct ehea_mc_list *mc_entry = port->mc_list;
2007 struct list_head *pos;
2008 struct list_head *temp;
2012 list_for_each_safe(pos, temp, &(port->mc_list->list)) {
2013 mc_entry = list_entry(pos, struct ehea_mc_list, list);
2015 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
2018 pr_err("failed deregistering mcast MAC\n");
2028 static void ehea_allmulti(struct net_device *dev, int enable)
2030 struct ehea_port *port = netdev_priv(dev);
2033 if (!port->allmulti) {
2035 /* Enable ALLMULTI */
2036 ehea_drop_multicast_list(dev);
2037 hret = ehea_multicast_reg_helper(port, 0, H_REG_BCMC);
2042 "failed enabling IFF_ALLMULTI\n");
2046 /* Disable ALLMULTI */
2047 hret = ehea_multicast_reg_helper(port, 0, H_DEREG_BCMC);
2052 "failed disabling IFF_ALLMULTI\n");
2056 static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
2058 struct ehea_mc_list *ehea_mcl_entry;
2061 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
2062 if (!ehea_mcl_entry) {
2063 pr_err("no mem for mcl_entry\n");
2067 INIT_LIST_HEAD(&ehea_mcl_entry->list);
2069 memcpy(&ehea_mcl_entry->macaddr, mc_mac_addr, ETH_ALEN);
2071 hret = ehea_multicast_reg_helper(port, ehea_mcl_entry->macaddr,
2074 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
2076 pr_err("failed registering mcast MAC\n");
2077 kfree(ehea_mcl_entry);
2081 static void ehea_set_multicast_list(struct net_device *dev)
2083 struct ehea_port *port = netdev_priv(dev);
2084 struct netdev_hw_addr *ha;
2087 if (dev->flags & IFF_PROMISC) {
2088 ehea_promiscuous(dev, 1);
2091 ehea_promiscuous(dev, 0);
2093 if (dev->flags & IFF_ALLMULTI) {
2094 ehea_allmulti(dev, 1);
2097 ehea_allmulti(dev, 0);
2099 if (!netdev_mc_empty(dev)) {
2100 ret = ehea_drop_multicast_list(dev);
2102 /* Dropping the current multicast list failed.
2103 * Enabling ALL_MULTI is the best we can do.
2105 ehea_allmulti(dev, 1);
2108 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
2109 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
2110 port->adapter->max_mc_mac);
2114 netdev_for_each_mc_addr(ha, dev)
2115 ehea_add_multicast_entry(port, ha->addr);
2119 ehea_update_bcmc_registrations();
2122 static int ehea_change_mtu(struct net_device *dev, int new_mtu)
2124 if ((new_mtu < 68) || (new_mtu > EHEA_MAX_PACKET_SIZE))
2130 static void ehea_xmit2(struct sk_buff *skb, struct net_device *dev,
2131 struct ehea_swqe *swqe, u32 lkey)
2133 if (skb->protocol == htons(ETH_P_IP)) {
2134 const struct iphdr *iph = ip_hdr(skb);
2137 swqe->tx_control |= EHEA_SWQE_CRC
2138 | EHEA_SWQE_IP_CHECKSUM
2139 | EHEA_SWQE_TCP_CHECKSUM
2140 | EHEA_SWQE_IMM_DATA_PRESENT
2141 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2143 write_ip_start_end(swqe, skb);
2145 if (iph->protocol == IPPROTO_UDP) {
2146 if ((iph->frag_off & IP_MF) ||
2147 (iph->frag_off & IP_OFFSET))
2148 /* IP fragment, so don't change cs */
2149 swqe->tx_control &= ~EHEA_SWQE_TCP_CHECKSUM;
2151 write_udp_offset_end(swqe, skb);
2152 } else if (iph->protocol == IPPROTO_TCP) {
2153 write_tcp_offset_end(swqe, skb);
2156 /* icmp (big data) and ip segmentation packets (all other ip
2157 packets) do not require any special handling */
2160 /* Other Ethernet Protocol */
2161 swqe->tx_control |= EHEA_SWQE_CRC
2162 | EHEA_SWQE_IMM_DATA_PRESENT
2163 | EHEA_SWQE_DESCRIPTORS_PRESENT;
2166 write_swqe2_data(skb, dev, swqe, lkey);
2169 static void ehea_xmit3(struct sk_buff *skb, struct net_device *dev,
2170 struct ehea_swqe *swqe)
2172 int nfrags = skb_shinfo(skb)->nr_frags;
2173 u8 *imm_data = &swqe->u.immdata_nodesc.immediate_data[0];
2177 if (skb->protocol == htons(ETH_P_IP)) {
2178 const struct iphdr *iph = ip_hdr(skb);
2181 write_ip_start_end(swqe, skb);
2183 if (iph->protocol == IPPROTO_TCP) {
2184 swqe->tx_control |= EHEA_SWQE_CRC
2185 | EHEA_SWQE_IP_CHECKSUM
2186 | EHEA_SWQE_TCP_CHECKSUM
2187 | EHEA_SWQE_IMM_DATA_PRESENT;
2189 write_tcp_offset_end(swqe, skb);
2191 } else if (iph->protocol == IPPROTO_UDP) {
2192 if ((iph->frag_off & IP_MF) ||
2193 (iph->frag_off & IP_OFFSET))
2194 /* IP fragment, so don't change cs */
2195 swqe->tx_control |= EHEA_SWQE_CRC
2196 | EHEA_SWQE_IMM_DATA_PRESENT;
2198 swqe->tx_control |= EHEA_SWQE_CRC
2199 | EHEA_SWQE_IP_CHECKSUM
2200 | EHEA_SWQE_TCP_CHECKSUM
2201 | EHEA_SWQE_IMM_DATA_PRESENT;
2203 write_udp_offset_end(swqe, skb);
2206 /* icmp (big data) and
2207 ip segmentation packets (all other ip packets) */
2208 swqe->tx_control |= EHEA_SWQE_CRC
2209 | EHEA_SWQE_IP_CHECKSUM
2210 | EHEA_SWQE_IMM_DATA_PRESENT;
2213 /* Other Ethernet Protocol */
2214 swqe->tx_control |= EHEA_SWQE_CRC | EHEA_SWQE_IMM_DATA_PRESENT;
2216 /* copy (immediate) data */
2218 /* data is in a single piece */
2219 skb_copy_from_linear_data(skb, imm_data, skb->len);
2221 /* first copy data from the skb->data buffer ... */
2222 skb_copy_from_linear_data(skb, imm_data,
2224 imm_data += skb_headlen(skb);
2226 /* ... then copy data from the fragments */
2227 for (i = 0; i < nfrags; i++) {
2228 frag = &skb_shinfo(skb)->frags[i];
2230 page_address(frag->page) + frag->page_offset,
2232 imm_data += frag->size;
2235 swqe->immediate_data_length = skb->len;
2239 static inline int ehea_hash_skb(struct sk_buff *skb, int num_qps)
2244 if ((skb->protocol == htons(ETH_P_IP)) &&
2245 (ip_hdr(skb)->protocol == IPPROTO_TCP)) {
2246 tcp = (struct tcphdr *)(skb_network_header(skb) +
2247 (ip_hdr(skb)->ihl * 4));
2248 tmp = (tcp->source + (tcp->dest << 16)) % 31;
2249 tmp += ip_hdr(skb)->daddr % 31;
2250 return tmp % num_qps;
2255 static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2257 struct ehea_port *port = netdev_priv(dev);
2258 struct ehea_swqe *swqe;
2259 unsigned long flags;
2262 struct ehea_port_res *pr;
2264 pr = &port->port_res[ehea_hash_skb(skb, port->num_tx_qps)];
2266 if (!spin_trylock(&pr->xmit_lock))
2267 return NETDEV_TX_BUSY;
2269 if (pr->queue_stopped) {
2270 spin_unlock(&pr->xmit_lock);
2271 return NETDEV_TX_BUSY;
2274 swqe = ehea_get_swqe(pr->qp, &swqe_index);
2275 memset(swqe, 0, SWQE_HEADER_SIZE);
2276 atomic_dec(&pr->swqe_avail);
2278 if (vlan_tx_tag_present(skb)) {
2279 swqe->tx_control |= EHEA_SWQE_VLAN_INSERT;
2280 swqe->vlan_tag = vlan_tx_tag_get(skb);
2284 pr->tx_bytes += skb->len;
2286 if (skb->len <= SWQE3_MAX_IMM) {
2287 u32 sig_iv = port->sig_comp_iv;
2288 u32 swqe_num = pr->swqe_id_counter;
2289 ehea_xmit3(skb, dev, swqe);
2290 swqe->wr_id = EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE3_TYPE)
2291 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, swqe_num);
2292 if (pr->swqe_ll_count >= (sig_iv - 1)) {
2293 swqe->wr_id |= EHEA_BMASK_SET(EHEA_WR_ID_REFILL,
2295 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2296 pr->swqe_ll_count = 0;
2298 pr->swqe_ll_count += 1;
2301 EHEA_BMASK_SET(EHEA_WR_ID_TYPE, EHEA_SWQE2_TYPE)
2302 | EHEA_BMASK_SET(EHEA_WR_ID_COUNT, pr->swqe_id_counter)
2303 | EHEA_BMASK_SET(EHEA_WR_ID_REFILL, 1)
2304 | EHEA_BMASK_SET(EHEA_WR_ID_INDEX, pr->sq_skba.index);
2305 pr->sq_skba.arr[pr->sq_skba.index] = skb;
2307 pr->sq_skba.index++;
2308 pr->sq_skba.index &= (pr->sq_skba.len - 1);
2310 lkey = pr->send_mr.lkey;
2311 ehea_xmit2(skb, dev, swqe, lkey);
2312 swqe->tx_control |= EHEA_SWQE_SIGNALLED_COMPLETION;
2314 pr->swqe_id_counter += 1;
2316 netif_info(port, tx_queued, dev,
2317 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2318 if (netif_msg_tx_queued(port))
2319 ehea_dump(swqe, 512, "swqe");
2321 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2322 netif_stop_queue(dev);
2323 swqe->tx_control |= EHEA_SWQE_PURGE;
2326 ehea_post_swqe(pr->qp, swqe);
2328 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2329 spin_lock_irqsave(&pr->netif_queue, flags);
2330 if (unlikely(atomic_read(&pr->swqe_avail) <= 1)) {
2331 pr->p_stats.queue_stopped++;
2332 netif_stop_queue(dev);
2333 pr->queue_stopped = 1;
2335 spin_unlock_irqrestore(&pr->netif_queue, flags);
2337 dev->trans_start = jiffies; /* NETIF_F_LLTX driver :( */
2338 spin_unlock(&pr->xmit_lock);
2340 return NETDEV_TX_OK;
2343 static void ehea_vlan_rx_register(struct net_device *dev,
2344 struct vlan_group *grp)
2346 struct ehea_port *port = netdev_priv(dev);
2347 struct ehea_adapter *adapter = port->adapter;
2348 struct hcp_ehea_port_cb1 *cb1;
2353 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2355 pr_err("no mem for cb1\n");
2359 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2360 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2361 if (hret != H_SUCCESS)
2362 pr_err("modify_ehea_port failed\n");
2364 free_page((unsigned long)cb1);
2369 static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2371 struct ehea_port *port = netdev_priv(dev);
2372 struct ehea_adapter *adapter = port->adapter;
2373 struct hcp_ehea_port_cb1 *cb1;
2377 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2379 pr_err("no mem for cb1\n");
2383 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2384 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2385 if (hret != H_SUCCESS) {
2386 pr_err("query_ehea_port failed\n");
2391 cb1->vlan_filter[index] |= ((u64)(0x8000000000000000 >> (vid & 0x3F)));
2393 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2394 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2395 if (hret != H_SUCCESS)
2396 pr_err("modify_ehea_port failed\n");
2398 free_page((unsigned long)cb1);
2402 static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2404 struct ehea_port *port = netdev_priv(dev);
2405 struct ehea_adapter *adapter = port->adapter;
2406 struct hcp_ehea_port_cb1 *cb1;
2410 vlan_group_set_device(port->vgrp, vid, NULL);
2412 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2414 pr_err("no mem for cb1\n");
2418 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2419 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2420 if (hret != H_SUCCESS) {
2421 pr_err("query_ehea_port failed\n");
2426 cb1->vlan_filter[index] &= ~((u64)(0x8000000000000000 >> (vid & 0x3F)));
2428 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2429 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2430 if (hret != H_SUCCESS)
2431 pr_err("modify_ehea_port failed\n");
2433 free_page((unsigned long)cb1);
2436 int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2442 struct hcp_modify_qp_cb0 *cb0;
2444 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2450 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2451 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2452 if (hret != H_SUCCESS) {
2453 pr_err("query_ehea_qp failed (1)\n");
2457 cb0->qp_ctl_reg = H_QP_CR_STATE_INITIALIZED;
2458 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2459 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2460 &dummy64, &dummy64, &dummy16, &dummy16);
2461 if (hret != H_SUCCESS) {
2462 pr_err("modify_ehea_qp failed (1)\n");
2466 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2467 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2468 if (hret != H_SUCCESS) {
2469 pr_err("query_ehea_qp failed (2)\n");
2473 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_INITIALIZED;
2474 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2475 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2476 &dummy64, &dummy64, &dummy16, &dummy16);
2477 if (hret != H_SUCCESS) {
2478 pr_err("modify_ehea_qp failed (2)\n");
2482 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2483 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2484 if (hret != H_SUCCESS) {
2485 pr_err("query_ehea_qp failed (3)\n");
2489 cb0->qp_ctl_reg = H_QP_CR_ENABLED | H_QP_CR_STATE_RDY2SND;
2490 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2491 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2492 &dummy64, &dummy64, &dummy16, &dummy16);
2493 if (hret != H_SUCCESS) {
2494 pr_err("modify_ehea_qp failed (3)\n");
2498 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2499 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2500 if (hret != H_SUCCESS) {
2501 pr_err("query_ehea_qp failed (4)\n");
2507 free_page((unsigned long)cb0);
2511 static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2515 struct port_res_cfg pr_cfg, pr_cfg_small_rx;
2516 enum ehea_eq_type eq_type = EHEA_EQ;
2518 port->qp_eq = ehea_create_eq(port->adapter, eq_type,
2519 EHEA_MAX_ENTRIES_EQ, 1);
2522 pr_err("ehea_create_eq failed (qp_eq)\n");
2526 pr_cfg.max_entries_rcq = rq1_entries + rq2_entries + rq3_entries;
2527 pr_cfg.max_entries_scq = sq_entries * 2;
2528 pr_cfg.max_entries_sq = sq_entries;
2529 pr_cfg.max_entries_rq1 = rq1_entries;
2530 pr_cfg.max_entries_rq2 = rq2_entries;
2531 pr_cfg.max_entries_rq3 = rq3_entries;
2533 pr_cfg_small_rx.max_entries_rcq = 1;
2534 pr_cfg_small_rx.max_entries_scq = sq_entries;
2535 pr_cfg_small_rx.max_entries_sq = sq_entries;
2536 pr_cfg_small_rx.max_entries_rq1 = 1;
2537 pr_cfg_small_rx.max_entries_rq2 = 1;
2538 pr_cfg_small_rx.max_entries_rq3 = 1;
2540 for (i = 0; i < def_qps; i++) {
2541 ret = ehea_init_port_res(port, &port->port_res[i], &pr_cfg, i);
2545 for (i = def_qps; i < def_qps + add_tx_qps; i++) {
2546 ret = ehea_init_port_res(port, &port->port_res[i],
2547 &pr_cfg_small_rx, i);
2556 ehea_clean_portres(port, &port->port_res[i]);
2559 ehea_destroy_eq(port->qp_eq);
2563 static int ehea_clean_all_portres(struct ehea_port *port)
2568 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2569 ret |= ehea_clean_portres(port, &port->port_res[i]);
2571 ret |= ehea_destroy_eq(port->qp_eq);
2576 static void ehea_remove_adapter_mr(struct ehea_adapter *adapter)
2578 if (adapter->active_ports)
2581 ehea_rem_mr(&adapter->mr);
2584 static int ehea_add_adapter_mr(struct ehea_adapter *adapter)
2586 if (adapter->active_ports)
2589 return ehea_reg_kernel_mr(adapter, &adapter->mr);
2592 static int ehea_up(struct net_device *dev)
2595 struct ehea_port *port = netdev_priv(dev);
2597 if (port->state == EHEA_PORT_UP)
2600 ret = ehea_port_res_setup(port, port->num_def_qps,
2601 port->num_add_tx_qps);
2603 netdev_err(dev, "port_res_failed\n");
2607 /* Set default QP for this port */
2608 ret = ehea_configure_port(port);
2610 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2614 ret = ehea_reg_interrupts(dev);
2616 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2620 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2621 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2623 netdev_err(dev, "activate_qp failed\n");
2628 for (i = 0; i < port->num_def_qps; i++) {
2629 ret = ehea_fill_port_res(&port->port_res[i]);
2631 netdev_err(dev, "out_free_irqs\n");
2636 ret = ehea_broadcast_reg_helper(port, H_REG_BCMC);
2642 port->state = EHEA_PORT_UP;
2648 ehea_free_interrupts(dev);
2651 ehea_clean_all_portres(port);
2654 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2656 ehea_update_bcmc_registrations();
2657 ehea_update_firmware_handles();
2662 static void port_napi_disable(struct ehea_port *port)
2666 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2667 napi_disable(&port->port_res[i].napi);
2670 static void port_napi_enable(struct ehea_port *port)
2674 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++)
2675 napi_enable(&port->port_res[i].napi);
2678 static int ehea_open(struct net_device *dev)
2681 struct ehea_port *port = netdev_priv(dev);
2683 mutex_lock(&port->port_lock);
2685 netif_info(port, ifup, dev, "enabling port\n");
2689 port_napi_enable(port);
2690 netif_start_queue(dev);
2693 init_waitqueue_head(&port->swqe_avail_wq);
2694 init_waitqueue_head(&port->restart_wq);
2696 mutex_unlock(&port->port_lock);
2701 static int ehea_down(struct net_device *dev)
2704 struct ehea_port *port = netdev_priv(dev);
2706 if (port->state == EHEA_PORT_DOWN)
2709 ehea_drop_multicast_list(dev);
2710 ehea_broadcast_reg_helper(port, H_DEREG_BCMC);
2712 ehea_free_interrupts(dev);
2714 port->state = EHEA_PORT_DOWN;
2716 ehea_update_bcmc_registrations();
2718 ret = ehea_clean_all_portres(port);
2720 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2722 ehea_update_firmware_handles();
2727 static int ehea_stop(struct net_device *dev)
2730 struct ehea_port *port = netdev_priv(dev);
2732 netif_info(port, ifdown, dev, "disabling port\n");
2734 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2735 cancel_work_sync(&port->reset_task);
2736 mutex_lock(&port->port_lock);
2737 netif_stop_queue(dev);
2738 port_napi_disable(port);
2739 ret = ehea_down(dev);
2740 mutex_unlock(&port->port_lock);
2741 clear_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2745 static void ehea_purge_sq(struct ehea_qp *orig_qp)
2747 struct ehea_qp qp = *orig_qp;
2748 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2749 struct ehea_swqe *swqe;
2753 for (i = 0; i < init_attr->act_nr_send_wqes; i++) {
2754 swqe = ehea_get_swqe(&qp, &wqe_index);
2755 swqe->tx_control |= EHEA_SWQE_PURGE;
2759 static void ehea_flush_sq(struct ehea_port *port)
2763 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2764 struct ehea_port_res *pr = &port->port_res[i];
2765 int swqe_max = pr->sq_skba_size - 2 - pr->swqe_ll_count;
2768 ret = wait_event_timeout(port->swqe_avail_wq,
2769 atomic_read(&pr->swqe_avail) >= swqe_max,
2770 msecs_to_jiffies(100));
2773 pr_err("WARNING: sq not flushed completely\n");
2779 int ehea_stop_qps(struct net_device *dev)
2781 struct ehea_port *port = netdev_priv(dev);
2782 struct ehea_adapter *adapter = port->adapter;
2783 struct hcp_modify_qp_cb0 *cb0;
2791 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2797 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2798 struct ehea_port_res *pr = &port->port_res[i];
2799 struct ehea_qp *qp = pr->qp;
2801 /* Purge send queue */
2804 /* Disable queue pair */
2805 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2806 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2808 if (hret != H_SUCCESS) {
2809 pr_err("query_ehea_qp failed (1)\n");
2813 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2814 cb0->qp_ctl_reg &= ~H_QP_CR_ENABLED;
2816 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2817 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2819 &dummy64, &dummy16, &dummy16);
2820 if (hret != H_SUCCESS) {
2821 pr_err("modify_ehea_qp failed (1)\n");
2825 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2826 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2828 if (hret != H_SUCCESS) {
2829 pr_err("query_ehea_qp failed (2)\n");
2833 /* deregister shared memory regions */
2834 dret = ehea_rem_smrs(pr);
2836 pr_err("unreg shared memory region failed\n");
2843 free_page((unsigned long)cb0);
2848 void ehea_update_rqs(struct ehea_qp *orig_qp, struct ehea_port_res *pr)
2850 struct ehea_qp qp = *orig_qp;
2851 struct ehea_qp_init_attr *init_attr = &qp.init_attr;
2852 struct ehea_rwqe *rwqe;
2853 struct sk_buff **skba_rq2 = pr->rq2_skba.arr;
2854 struct sk_buff **skba_rq3 = pr->rq3_skba.arr;
2855 struct sk_buff *skb;
2856 u32 lkey = pr->recv_mr.lkey;
2862 for (i = 0; i < init_attr->act_nr_rwqes_rq2 + 1; i++) {
2863 rwqe = ehea_get_next_rwqe(&qp, 2);
2864 rwqe->sg_list[0].l_key = lkey;
2865 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2866 skb = skba_rq2[index];
2868 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2871 for (i = 0; i < init_attr->act_nr_rwqes_rq3 + 1; i++) {
2872 rwqe = ehea_get_next_rwqe(&qp, 3);
2873 rwqe->sg_list[0].l_key = lkey;
2874 index = EHEA_BMASK_GET(EHEA_WR_ID_INDEX, rwqe->wr_id);
2875 skb = skba_rq3[index];
2877 rwqe->sg_list[0].vaddr = ehea_map_vaddr(skb->data);
2881 int ehea_restart_qps(struct net_device *dev)
2883 struct ehea_port *port = netdev_priv(dev);
2884 struct ehea_adapter *adapter = port->adapter;
2888 struct hcp_modify_qp_cb0 *cb0;
2893 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
2899 for (i = 0; i < (port->num_def_qps + port->num_add_tx_qps); i++) {
2900 struct ehea_port_res *pr = &port->port_res[i];
2901 struct ehea_qp *qp = pr->qp;
2903 ret = ehea_gen_smrs(pr);
2905 netdev_err(dev, "creation of shared memory regions failed\n");
2909 ehea_update_rqs(qp, pr);
2911 /* Enable queue pair */
2912 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2913 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2915 if (hret != H_SUCCESS) {
2916 netdev_err(dev, "query_ehea_qp failed (1)\n");
2920 cb0->qp_ctl_reg = (cb0->qp_ctl_reg & H_QP_CR_RES_STATE) << 8;
2921 cb0->qp_ctl_reg |= H_QP_CR_ENABLED;
2923 hret = ehea_h_modify_ehea_qp(adapter->handle, 0, qp->fw_handle,
2924 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG,
2926 &dummy64, &dummy16, &dummy16);
2927 if (hret != H_SUCCESS) {
2928 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2932 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2933 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2935 if (hret != H_SUCCESS) {
2936 netdev_err(dev, "query_ehea_qp failed (2)\n");
2940 /* refill entire queue */
2941 ehea_refill_rq1(pr, pr->rq1_skba.index, 0);
2942 ehea_refill_rq2(pr, 0);
2943 ehea_refill_rq3(pr, 0);
2946 free_page((unsigned long)cb0);
2951 static void ehea_reset_port(struct work_struct *work)
2954 struct ehea_port *port =
2955 container_of(work, struct ehea_port, reset_task);
2956 struct net_device *dev = port->netdev;
2958 mutex_lock(&dlpar_mem_lock);
2960 mutex_lock(&port->port_lock);
2961 netif_stop_queue(dev);
2963 port_napi_disable(port);
2971 ehea_set_multicast_list(dev);
2973 netif_info(port, timer, dev, "reset successful\n");
2975 port_napi_enable(port);
2977 netif_wake_queue(dev);
2979 mutex_unlock(&port->port_lock);
2980 mutex_unlock(&dlpar_mem_lock);
2983 static void ehea_rereg_mrs(void)
2986 struct ehea_adapter *adapter;
2988 pr_info("LPAR memory changed - re-initializing driver\n");
2990 list_for_each_entry(adapter, &adapter_list, list)
2991 if (adapter->active_ports) {
2992 /* Shutdown all ports */
2993 for (i = 0; i < EHEA_MAX_PORTS; i++) {
2994 struct ehea_port *port = adapter->port[i];
2995 struct net_device *dev;
3002 if (dev->flags & IFF_UP) {
3003 mutex_lock(&port->port_lock);
3004 netif_stop_queue(dev);
3005 ehea_flush_sq(port);
3006 ret = ehea_stop_qps(dev);
3008 mutex_unlock(&port->port_lock);
3011 port_napi_disable(port);
3012 mutex_unlock(&port->port_lock);
3014 reset_sq_restart_flag(port);
3017 /* Unregister old memory region */
3018 ret = ehea_rem_mr(&adapter->mr);
3020 pr_err("unregister MR failed - driver inoperable!\n");
3025 clear_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3027 list_for_each_entry(adapter, &adapter_list, list)
3028 if (adapter->active_ports) {
3029 /* Register new memory region */
3030 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
3032 pr_err("register MR failed - driver inoperable!\n");
3036 /* Restart all ports */
3037 for (i = 0; i < EHEA_MAX_PORTS; i++) {
3038 struct ehea_port *port = adapter->port[i];
3041 struct net_device *dev = port->netdev;
3043 if (dev->flags & IFF_UP) {
3044 mutex_lock(&port->port_lock);
3045 port_napi_enable(port);
3046 ret = ehea_restart_qps(dev);
3049 netif_wake_queue(dev);
3050 mutex_unlock(&port->port_lock);
3055 pr_info("re-initializing driver complete\n");
3060 static void ehea_tx_watchdog(struct net_device *dev)
3062 struct ehea_port *port = netdev_priv(dev);
3064 if (netif_carrier_ok(dev) &&
3065 !test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))
3066 ehea_schedule_port_reset(port);
3069 int ehea_sense_adapter_attr(struct ehea_adapter *adapter)
3071 struct hcp_query_ehea *cb;
3075 cb = (void *)get_zeroed_page(GFP_KERNEL);
3081 hret = ehea_h_query_ehea(adapter->handle, cb);
3083 if (hret != H_SUCCESS) {
3088 adapter->max_mc_mac = cb->max_mc_mac - 1;
3092 free_page((unsigned long)cb);
3097 int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
3099 struct hcp_ehea_port_cb4 *cb4;
3105 /* (Try to) enable *jumbo frames */
3106 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
3108 pr_err("no mem for cb4\n");
3112 hret = ehea_h_query_ehea_port(port->adapter->handle,
3113 port->logical_port_id,
3115 H_PORT_CB4_JUMBO, cb4);
3116 if (hret == H_SUCCESS) {
3117 if (cb4->jumbo_frame)
3120 cb4->jumbo_frame = 1;
3121 hret = ehea_h_modify_ehea_port(port->adapter->
3128 if (hret == H_SUCCESS)
3134 free_page((unsigned long)cb4);
3140 static ssize_t ehea_show_port_id(struct device *dev,
3141 struct device_attribute *attr, char *buf)
3143 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3144 return sprintf(buf, "%d", port->logical_port_id);
3147 static DEVICE_ATTR(log_port_id, S_IRUSR | S_IRGRP | S_IROTH, ehea_show_port_id,
3150 static void __devinit logical_port_release(struct device *dev)
3152 struct ehea_port *port = container_of(dev, struct ehea_port, ofdev.dev);
3153 of_node_put(port->ofdev.dev.of_node);
3156 static struct device *ehea_register_port(struct ehea_port *port,
3157 struct device_node *dn)
3161 port->ofdev.dev.of_node = of_node_get(dn);
3162 port->ofdev.dev.parent = &port->adapter->ofdev->dev;
3163 port->ofdev.dev.bus = &ibmebus_bus_type;
3165 dev_set_name(&port->ofdev.dev, "port%d", port_name_cnt++);
3166 port->ofdev.dev.release = logical_port_release;
3168 ret = of_device_register(&port->ofdev);
3170 pr_err("failed to register device. ret=%d\n", ret);
3174 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
3176 pr_err("failed to register attributes, ret=%d\n", ret);
3177 goto out_unreg_of_dev;
3180 return &port->ofdev.dev;
3183 of_device_unregister(&port->ofdev);
3188 static void ehea_unregister_port(struct ehea_port *port)
3190 device_remove_file(&port->ofdev.dev, &dev_attr_log_port_id);
3191 of_device_unregister(&port->ofdev);
3194 static const struct net_device_ops ehea_netdev_ops = {
3195 .ndo_open = ehea_open,
3196 .ndo_stop = ehea_stop,
3197 .ndo_start_xmit = ehea_start_xmit,
3198 #ifdef CONFIG_NET_POLL_CONTROLLER
3199 .ndo_poll_controller = ehea_netpoll,
3201 .ndo_get_stats = ehea_get_stats,
3202 .ndo_set_mac_address = ehea_set_mac_addr,
3203 .ndo_validate_addr = eth_validate_addr,
3204 .ndo_set_multicast_list = ehea_set_multicast_list,
3205 .ndo_change_mtu = ehea_change_mtu,
3206 .ndo_vlan_rx_register = ehea_vlan_rx_register,
3207 .ndo_vlan_rx_add_vid = ehea_vlan_rx_add_vid,
3208 .ndo_vlan_rx_kill_vid = ehea_vlan_rx_kill_vid,
3209 .ndo_tx_timeout = ehea_tx_watchdog,
3212 struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3213 u32 logical_port_id,
3214 struct device_node *dn)
3217 struct net_device *dev;
3218 struct ehea_port *port;
3219 struct device *port_dev;
3222 /* allocate memory for the port structures */
3223 dev = alloc_etherdev(sizeof(struct ehea_port));
3226 pr_err("no mem for net_device\n");
3231 port = netdev_priv(dev);
3233 mutex_init(&port->port_lock);
3234 port->state = EHEA_PORT_DOWN;
3235 port->sig_comp_iv = sq_entries / 10;
3237 port->adapter = adapter;
3239 port->logical_port_id = logical_port_id;
3241 port->msg_enable = netif_msg_init(msg_level, EHEA_MSG_DEFAULT);
3243 port->mc_list = kzalloc(sizeof(struct ehea_mc_list), GFP_KERNEL);
3244 if (!port->mc_list) {
3246 goto out_free_ethdev;
3249 INIT_LIST_HEAD(&port->mc_list->list);
3251 ret = ehea_sense_port_attr(port);
3253 goto out_free_mc_list;
3255 port_dev = ehea_register_port(port, dn);
3257 goto out_free_mc_list;
3259 SET_NETDEV_DEV(dev, port_dev);
3261 /* initialize net_device structure */
3262 memcpy(dev->dev_addr, &port->mac_addr, ETH_ALEN);
3264 dev->netdev_ops = &ehea_netdev_ops;
3265 ehea_set_ethtool_ops(dev);
3267 dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO
3268 | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX
3269 | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER
3271 dev->watchdog_timeo = EHEA_WATCH_DOG_TIMEOUT;
3274 dev->features |= NETIF_F_LRO;
3276 INIT_WORK(&port->reset_task, ehea_reset_port);
3278 ret = register_netdev(dev);
3280 pr_err("register_netdev failed. ret=%d\n", ret);
3281 goto out_unreg_port;
3284 port->lro_max_aggr = lro_max_aggr;
3286 ret = ehea_get_jumboframe_status(port, &jumbo);
3288 netdev_err(dev, "failed determining jumbo frame status\n");
3290 netdev_info(dev, "Jumbo frames are %sabled\n",
3291 jumbo == 1 ? "en" : "dis");
3293 adapter->active_ports++;
3298 ehea_unregister_port(port);
3301 kfree(port->mc_list);
3307 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3308 logical_port_id, ret);
3312 static void ehea_shutdown_single_port(struct ehea_port *port)
3314 struct ehea_adapter *adapter = port->adapter;
3316 cancel_work_sync(&port->reset_task);
3317 unregister_netdev(port->netdev);
3318 ehea_unregister_port(port);
3319 kfree(port->mc_list);
3320 free_netdev(port->netdev);
3321 adapter->active_ports--;
3324 static int ehea_setup_ports(struct ehea_adapter *adapter)
3326 struct device_node *lhea_dn;
3327 struct device_node *eth_dn = NULL;
3329 const u32 *dn_log_port_id;
3332 lhea_dn = adapter->ofdev->dev.of_node;
3333 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3335 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3337 if (!dn_log_port_id) {
3338 pr_err("bad device node: eth_dn name=%s\n",
3343 if (ehea_add_adapter_mr(adapter)) {
3344 pr_err("creating MR failed\n");
3345 of_node_put(eth_dn);
3349 adapter->port[i] = ehea_setup_single_port(adapter,
3352 if (adapter->port[i])
3353 netdev_info(adapter->port[i]->netdev,
3354 "logical port id #%d\n", *dn_log_port_id);
3356 ehea_remove_adapter_mr(adapter);
3363 static struct device_node *ehea_get_eth_dn(struct ehea_adapter *adapter,
3364 u32 logical_port_id)
3366 struct device_node *lhea_dn;
3367 struct device_node *eth_dn = NULL;
3368 const u32 *dn_log_port_id;
3370 lhea_dn = adapter->ofdev->dev.of_node;
3371 while ((eth_dn = of_get_next_child(lhea_dn, eth_dn))) {
3373 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3376 if (*dn_log_port_id == logical_port_id)
3383 static ssize_t ehea_probe_port(struct device *dev,
3384 struct device_attribute *attr,
3385 const char *buf, size_t count)
3387 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3388 struct ehea_port *port;
3389 struct device_node *eth_dn = NULL;
3392 u32 logical_port_id;
3394 sscanf(buf, "%d", &logical_port_id);
3396 port = ehea_get_port(adapter, logical_port_id);
3399 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3404 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3407 pr_info("no logical port with id %d found\n", logical_port_id);
3411 if (ehea_add_adapter_mr(adapter)) {
3412 pr_err("creating MR failed\n");
3416 port = ehea_setup_single_port(adapter, logical_port_id, eth_dn);
3418 of_node_put(eth_dn);
3421 for (i = 0; i < EHEA_MAX_PORTS; i++)
3422 if (!adapter->port[i]) {
3423 adapter->port[i] = port;
3427 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3430 ehea_remove_adapter_mr(adapter);
3434 return (ssize_t) count;
3437 static ssize_t ehea_remove_port(struct device *dev,
3438 struct device_attribute *attr,
3439 const char *buf, size_t count)
3441 struct ehea_adapter *adapter = dev_get_drvdata(dev);
3442 struct ehea_port *port;
3444 u32 logical_port_id;
3446 sscanf(buf, "%d", &logical_port_id);
3448 port = ehea_get_port(adapter, logical_port_id);
3451 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3454 ehea_shutdown_single_port(port);
3456 for (i = 0; i < EHEA_MAX_PORTS; i++)
3457 if (adapter->port[i] == port) {
3458 adapter->port[i] = NULL;
3462 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3467 ehea_remove_adapter_mr(adapter);
3469 return (ssize_t) count;
3472 static DEVICE_ATTR(probe_port, S_IWUSR, NULL, ehea_probe_port);
3473 static DEVICE_ATTR(remove_port, S_IWUSR, NULL, ehea_remove_port);
3475 int ehea_create_device_sysfs(struct platform_device *dev)
3477 int ret = device_create_file(&dev->dev, &dev_attr_probe_port);
3481 ret = device_create_file(&dev->dev, &dev_attr_remove_port);
3486 void ehea_remove_device_sysfs(struct platform_device *dev)
3488 device_remove_file(&dev->dev, &dev_attr_probe_port);
3489 device_remove_file(&dev->dev, &dev_attr_remove_port);
3492 static int __devinit ehea_probe_adapter(struct platform_device *dev,
3493 const struct of_device_id *id)
3495 struct ehea_adapter *adapter;
3496 const u64 *adapter_handle;
3499 if (!dev || !dev->dev.of_node) {
3500 pr_err("Invalid ibmebus device probed\n");
3504 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3507 dev_err(&dev->dev, "no mem for ehea_adapter\n");
3511 list_add(&adapter->list, &adapter_list);
3513 adapter->ofdev = dev;
3515 adapter_handle = of_get_property(dev->dev.of_node, "ibm,hea-handle",
3518 adapter->handle = *adapter_handle;
3520 if (!adapter->handle) {
3521 dev_err(&dev->dev, "failed getting handle for adapter"
3522 " '%s'\n", dev->dev.of_node->full_name);
3527 adapter->pd = EHEA_PD_ID;
3529 dev_set_drvdata(&dev->dev, adapter);
3532 /* initialize adapter and ports */
3533 /* get adapter properties */
3534 ret = ehea_sense_adapter_attr(adapter);
3536 dev_err(&dev->dev, "sense_adapter_attr failed: %d\n", ret);
3540 adapter->neq = ehea_create_eq(adapter,
3541 EHEA_NEQ, EHEA_MAX_ENTRIES_EQ, 1);
3542 if (!adapter->neq) {
3544 dev_err(&dev->dev, "NEQ creation failed\n");
3548 tasklet_init(&adapter->neq_tasklet, ehea_neq_tasklet,
3549 (unsigned long)adapter);
3551 ret = ibmebus_request_irq(adapter->neq->attr.ist1,
3552 ehea_interrupt_neq, IRQF_DISABLED,
3553 "ehea_neq", adapter);
3555 dev_err(&dev->dev, "requesting NEQ IRQ failed\n");
3559 ret = ehea_create_device_sysfs(dev);
3563 ret = ehea_setup_ports(adapter);
3565 dev_err(&dev->dev, "setup_ports failed\n");
3566 goto out_rem_dev_sysfs;
3573 ehea_remove_device_sysfs(dev);
3576 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3579 ehea_destroy_eq(adapter->neq);
3582 list_del(&adapter->list);
3586 ehea_update_firmware_handles();
3591 static int __devexit ehea_remove(struct platform_device *dev)
3593 struct ehea_adapter *adapter = dev_get_drvdata(&dev->dev);
3596 for (i = 0; i < EHEA_MAX_PORTS; i++)
3597 if (adapter->port[i]) {
3598 ehea_shutdown_single_port(adapter->port[i]);
3599 adapter->port[i] = NULL;
3602 ehea_remove_device_sysfs(dev);
3604 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3605 tasklet_kill(&adapter->neq_tasklet);
3607 ehea_destroy_eq(adapter->neq);
3608 ehea_remove_adapter_mr(adapter);
3609 list_del(&adapter->list);
3612 ehea_update_firmware_handles();
3617 void ehea_crash_handler(void)
3621 if (ehea_fw_handles.arr)
3622 for (i = 0; i < ehea_fw_handles.num_entries; i++)
3623 ehea_h_free_resource(ehea_fw_handles.arr[i].adh,
3624 ehea_fw_handles.arr[i].fwh,
3627 if (ehea_bcmc_regs.arr)
3628 for (i = 0; i < ehea_bcmc_regs.num_entries; i++)
3629 ehea_h_reg_dereg_bcmc(ehea_bcmc_regs.arr[i].adh,
3630 ehea_bcmc_regs.arr[i].port_id,
3631 ehea_bcmc_regs.arr[i].reg_type,
3632 ehea_bcmc_regs.arr[i].macaddr,
3636 static int ehea_mem_notifier(struct notifier_block *nb,
3637 unsigned long action, void *data)
3639 int ret = NOTIFY_BAD;
3640 struct memory_notify *arg = data;
3642 mutex_lock(&dlpar_mem_lock);
3645 case MEM_CANCEL_OFFLINE:
3646 pr_info("memory offlining canceled");
3647 /* Readd canceled memory block */
3649 pr_info("memory is going online");
3650 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3651 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3655 case MEM_GOING_OFFLINE:
3656 pr_info("memory is going offline");
3657 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3658 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3666 ehea_update_firmware_handles();
3670 mutex_unlock(&dlpar_mem_lock);
3674 static struct notifier_block ehea_mem_nb = {
3675 .notifier_call = ehea_mem_notifier,
3678 static int ehea_reboot_notifier(struct notifier_block *nb,
3679 unsigned long action, void *unused)
3681 if (action == SYS_RESTART) {
3682 pr_info("Reboot: freeing all eHEA resources\n");
3683 ibmebus_unregister_driver(&ehea_driver);
3688 static struct notifier_block ehea_reboot_nb = {
3689 .notifier_call = ehea_reboot_notifier,
3692 static int check_module_parm(void)
3696 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3697 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3698 pr_info("Bad parameter: rq1_entries\n");
3701 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3702 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3703 pr_info("Bad parameter: rq2_entries\n");
3706 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3707 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3708 pr_info("Bad parameter: rq3_entries\n");
3711 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3712 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3713 pr_info("Bad parameter: sq_entries\n");
3720 static ssize_t ehea_show_capabilities(struct device_driver *drv,
3723 return sprintf(buf, "%d", EHEA_CAPABILITIES);
3726 static DRIVER_ATTR(capabilities, S_IRUSR | S_IRGRP | S_IROTH,
3727 ehea_show_capabilities, NULL);
3729 int __init ehea_module_init(void)
3733 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3735 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3736 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3738 mutex_init(&ehea_fw_handles.lock);
3739 spin_lock_init(&ehea_bcmc_regs.lock);
3741 ret = check_module_parm();
3745 ret = ehea_create_busmap();
3749 ret = register_reboot_notifier(&ehea_reboot_nb);
3751 pr_info("failed registering reboot notifier\n");
3753 ret = register_memory_notifier(&ehea_mem_nb);
3755 pr_info("failed registering memory remove notifier\n");
3757 ret = crash_shutdown_register(ehea_crash_handler);
3759 pr_info("failed registering crash handler\n");
3761 ret = ibmebus_register_driver(&ehea_driver);
3763 pr_err("failed registering eHEA device driver on ebus\n");
3767 ret = driver_create_file(&ehea_driver.driver,
3768 &driver_attr_capabilities);
3770 pr_err("failed to register capabilities attribute, ret=%d\n",
3778 ibmebus_unregister_driver(&ehea_driver);
3780 unregister_memory_notifier(&ehea_mem_nb);
3781 unregister_reboot_notifier(&ehea_reboot_nb);
3782 crash_shutdown_unregister(ehea_crash_handler);
3787 static void __exit ehea_module_exit(void)
3791 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3792 ibmebus_unregister_driver(&ehea_driver);
3793 unregister_reboot_notifier(&ehea_reboot_nb);
3794 ret = crash_shutdown_unregister(ehea_crash_handler);
3796 pr_info("failed unregistering crash handler\n");
3797 unregister_memory_notifier(&ehea_mem_nb);
3798 kfree(ehea_fw_handles.arr);
3799 kfree(ehea_bcmc_regs.arr);
3800 ehea_destroy_busmap();
3803 module_init(ehea_module_init);
3804 module_exit(ehea_module_exit);