1 /**************************************************************************/
3 /* IBM eServer i/pSeries Virtual Ethernet Device Driver */
4 /* Copyright (C) 2003 IBM Corp. */
5 /* Originally written by Dave Larson (larson1@us.ibm.com) */
6 /* Maintained by Santiago Leon (santil@us.ibm.com) */
8 /* This program is free software; you can redistribute it and/or modify */
9 /* it under the terms of the GNU General Public License as published by */
10 /* the Free Software Foundation; either version 2 of the License, or */
11 /* (at your option) any later version. */
13 /* This program is distributed in the hope that it will be useful, */
14 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
15 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
16 /* GNU General Public License for more details. */
18 /* You should have received a copy of the GNU General Public License */
19 /* along with this program; if not, write to the Free Software */
20 /* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 */
23 /* This module contains the implementation of a virtual ethernet device */
24 /* for use with IBM i/pSeries LPAR Linux. It utilizes the logical LAN */
25 /* option of the RS/6000 Platform Architechture to interface with virtual */
26 /* ethernet NICs that are presented to the partition by the hypervisor. */
28 /**************************************************************************/
31 - add support for sysfs
32 - possibly remove procfs support
35 #include <linux/module.h>
36 #include <linux/moduleparam.h>
37 #include <linux/types.h>
38 #include <linux/errno.h>
39 #include <linux/ioport.h>
40 #include <linux/dma-mapping.h>
41 #include <linux/kernel.h>
42 #include <linux/netdevice.h>
43 #include <linux/etherdevice.h>
44 #include <linux/skbuff.h>
45 #include <linux/init.h>
46 #include <linux/delay.h>
49 #include <linux/ethtool.h>
50 #include <linux/proc_fs.h>
53 #include <linux/slab.h>
54 #include <net/net_namespace.h>
55 #include <asm/hvcall.h>
56 #include <asm/atomic.h>
58 #include <asm/iommu.h>
59 #include <asm/uaccess.h>
60 #include <asm/firmware.h>
61 #include <linux/seq_file.h>
67 #define ibmveth_printk(fmt, args...) \
68 printk(KERN_DEBUG "%s: " fmt, __FILE__, ## args)
70 #define ibmveth_error_printk(fmt, args...) \
71 printk(KERN_ERR "(%s:%3.3d ua:%x) ERROR: " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
74 #define ibmveth_debug_printk_no_adapter(fmt, args...) \
75 printk(KERN_DEBUG "(%s:%3.3d): " fmt, __FILE__, __LINE__ , ## args)
76 #define ibmveth_debug_printk(fmt, args...) \
77 printk(KERN_DEBUG "(%s:%3.3d ua:%x): " fmt, __FILE__, __LINE__ , adapter->vdev->unit_address, ## args)
78 #define ibmveth_assert(expr) \
80 printk(KERN_DEBUG "assertion failed (%s:%3.3d ua:%x): %s\n", __FILE__, __LINE__, adapter->vdev->unit_address, #expr); \
84 #define ibmveth_debug_printk_no_adapter(fmt, args...)
85 #define ibmveth_debug_printk(fmt, args...)
86 #define ibmveth_assert(expr)
89 static int ibmveth_open(struct net_device *dev);
90 static int ibmveth_close(struct net_device *dev);
91 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
92 static int ibmveth_poll(struct napi_struct *napi, int budget);
93 static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *dev);
94 static void ibmveth_set_multicast_list(struct net_device *dev);
95 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu);
96 static void ibmveth_proc_register_driver(void);
97 static void ibmveth_proc_unregister_driver(void);
98 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter);
99 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter);
100 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
101 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
102 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
103 static struct kobj_type ktype_veth_pool;
106 #ifdef CONFIG_PROC_FS
107 #define IBMVETH_PROC_DIR "ibmveth"
108 static struct proc_dir_entry *ibmveth_proc_dir;
111 static const char ibmveth_driver_name[] = "ibmveth";
112 static const char ibmveth_driver_string[] = "IBM i/pSeries Virtual Ethernet Driver";
113 #define ibmveth_driver_version "1.03"
115 MODULE_AUTHOR("Santiago Leon <santil@us.ibm.com>");
116 MODULE_DESCRIPTION("IBM i/pSeries Virtual Ethernet Driver");
117 MODULE_LICENSE("GPL");
118 MODULE_VERSION(ibmveth_driver_version);
120 static unsigned int tx_copybreak __read_mostly = 128;
121 module_param(tx_copybreak, uint, 0644);
122 MODULE_PARM_DESC(tx_copybreak,
123 "Maximum size of packet that is copied to a new buffer on transmit");
125 static unsigned int rx_copybreak __read_mostly = 128;
126 module_param(rx_copybreak, uint, 0644);
127 MODULE_PARM_DESC(rx_copybreak,
128 "Maximum size of packet that is copied to a new buffer on receive");
130 struct ibmveth_stat {
131 char name[ETH_GSTRING_LEN];
135 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
136 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
138 struct ibmveth_stat ibmveth_stats[] = {
139 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
140 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
141 { "replenish_add_buff_failure", IBMVETH_STAT_OFF(replenish_add_buff_failure) },
142 { "replenish_add_buff_success", IBMVETH_STAT_OFF(replenish_add_buff_success) },
143 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
144 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
145 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
146 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
149 /* simple methods of getting data from the current rxq entry */
150 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
152 return adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off;
155 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
157 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >> IBMVETH_RXQ_TOGGLE_SHIFT;
160 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
162 return (ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle);
165 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
167 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID);
170 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
172 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK);
175 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
177 return (adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
180 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
182 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD);
185 /* setup the initial settings for a buffer pool */
186 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool, u32 pool_index, u32 pool_size, u32 buff_size, u32 pool_active)
188 pool->size = pool_size;
189 pool->index = pool_index;
190 pool->buff_size = buff_size;
191 pool->threshold = pool_size * 7 / 8;
192 pool->active = pool_active;
195 /* allocate and setup an buffer pool - called during open */
196 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
200 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
202 if(!pool->free_map) {
206 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
207 if(!pool->dma_addr) {
208 kfree(pool->free_map);
209 pool->free_map = NULL;
213 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
216 kfree(pool->dma_addr);
217 pool->dma_addr = NULL;
219 kfree(pool->free_map);
220 pool->free_map = NULL;
224 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
226 for(i = 0; i < pool->size; ++i) {
227 pool->free_map[i] = i;
230 atomic_set(&pool->available, 0);
231 pool->producer_index = 0;
232 pool->consumer_index = 0;
237 /* replenish the buffers for a pool. note that we don't need to
238 * skb_reserve these since they are used for incoming...
240 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
243 u32 count = pool->size - atomic_read(&pool->available);
244 u32 buffers_added = 0;
246 unsigned int free_index, index;
248 unsigned long lpar_rc;
253 for(i = 0; i < count; ++i) {
254 union ibmveth_buf_desc desc;
256 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
259 ibmveth_debug_printk("replenish: unable to allocate skb\n");
260 adapter->replenish_no_mem++;
264 free_index = pool->consumer_index;
265 pool->consumer_index++;
266 if (pool->consumer_index >= pool->size)
267 pool->consumer_index = 0;
268 index = pool->free_map[free_index];
270 ibmveth_assert(index != IBM_VETH_INVALID_MAP);
271 ibmveth_assert(pool->skbuff[index] == NULL);
273 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
274 pool->buff_size, DMA_FROM_DEVICE);
276 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
279 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
280 pool->dma_addr[index] = dma_addr;
281 pool->skbuff[index] = skb;
283 correlator = ((u64)pool->index << 32) | index;
284 *(u64*)skb->data = correlator;
286 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
287 desc.fields.address = dma_addr;
289 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
291 if (lpar_rc != H_SUCCESS)
295 adapter->replenish_add_buff_success++;
300 atomic_add(buffers_added, &(pool->available));
304 pool->free_map[free_index] = index;
305 pool->skbuff[index] = NULL;
306 if (pool->consumer_index == 0)
307 pool->consumer_index = pool->size - 1;
309 pool->consumer_index--;
310 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
311 dma_unmap_single(&adapter->vdev->dev,
312 pool->dma_addr[index], pool->buff_size,
314 dev_kfree_skb_any(skb);
315 adapter->replenish_add_buff_failure++;
318 atomic_add(buffers_added, &(pool->available));
321 /* replenish routine */
322 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
326 adapter->replenish_task_cycles++;
328 for (i = (IbmVethNumBufferPools - 1); i >= 0; i--) {
329 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
332 (atomic_read(&pool->available) < pool->threshold))
333 ibmveth_replenish_buffer_pool(adapter, pool);
336 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
339 /* empty and free ana buffer pool - also used to do cleanup in error paths */
340 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter, struct ibmveth_buff_pool *pool)
344 kfree(pool->free_map);
345 pool->free_map = NULL;
347 if(pool->skbuff && pool->dma_addr) {
348 for(i = 0; i < pool->size; ++i) {
349 struct sk_buff *skb = pool->skbuff[i];
351 dma_unmap_single(&adapter->vdev->dev,
355 dev_kfree_skb_any(skb);
356 pool->skbuff[i] = NULL;
362 kfree(pool->dma_addr);
363 pool->dma_addr = NULL;
372 /* remove a buffer from a pool */
373 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter, u64 correlator)
375 unsigned int pool = correlator >> 32;
376 unsigned int index = correlator & 0xffffffffUL;
377 unsigned int free_index;
380 ibmveth_assert(pool < IbmVethNumBufferPools);
381 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
383 skb = adapter->rx_buff_pool[pool].skbuff[index];
385 ibmveth_assert(skb != NULL);
387 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
389 dma_unmap_single(&adapter->vdev->dev,
390 adapter->rx_buff_pool[pool].dma_addr[index],
391 adapter->rx_buff_pool[pool].buff_size,
394 free_index = adapter->rx_buff_pool[pool].producer_index;
395 adapter->rx_buff_pool[pool].producer_index++;
396 if (adapter->rx_buff_pool[pool].producer_index >=
397 adapter->rx_buff_pool[pool].size)
398 adapter->rx_buff_pool[pool].producer_index = 0;
399 adapter->rx_buff_pool[pool].free_map[free_index] = index;
403 atomic_dec(&(adapter->rx_buff_pool[pool].available));
406 /* get the current buffer on the rx queue */
407 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
409 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
410 unsigned int pool = correlator >> 32;
411 unsigned int index = correlator & 0xffffffffUL;
413 ibmveth_assert(pool < IbmVethNumBufferPools);
414 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
416 return adapter->rx_buff_pool[pool].skbuff[index];
419 /* recycle the current buffer on the rx queue */
420 static void ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
422 u32 q_index = adapter->rx_queue.index;
423 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
424 unsigned int pool = correlator >> 32;
425 unsigned int index = correlator & 0xffffffffUL;
426 union ibmveth_buf_desc desc;
427 unsigned long lpar_rc;
429 ibmveth_assert(pool < IbmVethNumBufferPools);
430 ibmveth_assert(index < adapter->rx_buff_pool[pool].size);
432 if(!adapter->rx_buff_pool[pool].active) {
433 ibmveth_rxq_harvest_buffer(adapter);
434 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
438 desc.fields.flags_len = IBMVETH_BUF_VALID |
439 adapter->rx_buff_pool[pool].buff_size;
440 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
442 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
444 if(lpar_rc != H_SUCCESS) {
445 ibmveth_debug_printk("h_add_logical_lan_buffer failed during recycle rc=%ld", lpar_rc);
446 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
449 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
450 adapter->rx_queue.index = 0;
451 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
455 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
457 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
459 if(++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
460 adapter->rx_queue.index = 0;
461 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
465 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
468 struct device *dev = &adapter->vdev->dev;
470 if(adapter->buffer_list_addr != NULL) {
471 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
472 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
474 adapter->buffer_list_dma = DMA_ERROR_CODE;
476 free_page((unsigned long)adapter->buffer_list_addr);
477 adapter->buffer_list_addr = NULL;
480 if(adapter->filter_list_addr != NULL) {
481 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
482 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
484 adapter->filter_list_dma = DMA_ERROR_CODE;
486 free_page((unsigned long)adapter->filter_list_addr);
487 adapter->filter_list_addr = NULL;
490 if(adapter->rx_queue.queue_addr != NULL) {
491 if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
492 dma_unmap_single(dev,
493 adapter->rx_queue.queue_dma,
494 adapter->rx_queue.queue_len,
496 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
498 kfree(adapter->rx_queue.queue_addr);
499 adapter->rx_queue.queue_addr = NULL;
502 for(i = 0; i<IbmVethNumBufferPools; i++)
503 if (adapter->rx_buff_pool[i].active)
504 ibmveth_free_buffer_pool(adapter,
505 &adapter->rx_buff_pool[i]);
507 if (adapter->bounce_buffer != NULL) {
508 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
509 dma_unmap_single(&adapter->vdev->dev,
510 adapter->bounce_buffer_dma,
511 adapter->netdev->mtu + IBMVETH_BUFF_OH,
513 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
515 kfree(adapter->bounce_buffer);
516 adapter->bounce_buffer = NULL;
520 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
521 union ibmveth_buf_desc rxq_desc, u64 mac_address)
523 int rc, try_again = 1;
525 /* After a kexec the adapter will still be open, so our attempt to
526 * open it will fail. So if we get a failure we free the adapter and
527 * try again, but only once. */
529 rc = h_register_logical_lan(adapter->vdev->unit_address,
530 adapter->buffer_list_dma, rxq_desc.desc,
531 adapter->filter_list_dma, mac_address);
533 if (rc != H_SUCCESS && try_again) {
535 rc = h_free_logical_lan(adapter->vdev->unit_address);
536 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
545 static int ibmveth_open(struct net_device *netdev)
547 struct ibmveth_adapter *adapter = netdev_priv(netdev);
550 unsigned long lpar_rc;
552 union ibmveth_buf_desc rxq_desc;
556 ibmveth_debug_printk("open starting\n");
558 napi_enable(&adapter->napi);
560 for(i = 0; i<IbmVethNumBufferPools; i++)
561 rxq_entries += adapter->rx_buff_pool[i].size;
563 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
564 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
566 if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
567 ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
568 ibmveth_cleanup(adapter);
569 napi_disable(&adapter->napi);
573 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
574 adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);
576 if(!adapter->rx_queue.queue_addr) {
577 ibmveth_error_printk("unable to allocate rx queue pages\n");
578 ibmveth_cleanup(adapter);
579 napi_disable(&adapter->napi);
583 dev = &adapter->vdev->dev;
585 adapter->buffer_list_dma = dma_map_single(dev,
586 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
587 adapter->filter_list_dma = dma_map_single(dev,
588 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
589 adapter->rx_queue.queue_dma = dma_map_single(dev,
590 adapter->rx_queue.queue_addr,
591 adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
593 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
594 (dma_mapping_error(dev, adapter->filter_list_dma)) ||
595 (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
596 ibmveth_error_printk("unable to map filter or buffer list pages\n");
597 ibmveth_cleanup(adapter);
598 napi_disable(&adapter->napi);
602 adapter->rx_queue.index = 0;
603 adapter->rx_queue.num_slots = rxq_entries;
604 adapter->rx_queue.toggle = 1;
606 memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
607 mac_address = mac_address >> 16;
609 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len;
610 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
612 ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
613 ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
614 ibmveth_debug_printk("receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
616 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
618 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
620 if(lpar_rc != H_SUCCESS) {
621 ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
622 ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n",
623 adapter->buffer_list_dma,
624 adapter->filter_list_dma,
627 ibmveth_cleanup(adapter);
628 napi_disable(&adapter->napi);
632 for(i = 0; i<IbmVethNumBufferPools; i++) {
633 if(!adapter->rx_buff_pool[i].active)
635 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
636 ibmveth_error_printk("unable to alloc pool\n");
637 adapter->rx_buff_pool[i].active = 0;
638 ibmveth_cleanup(adapter);
639 napi_disable(&adapter->napi);
644 ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
645 if((rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
646 ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
648 rc = h_free_logical_lan(adapter->vdev->unit_address);
649 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
651 ibmveth_cleanup(adapter);
652 napi_disable(&adapter->napi);
656 adapter->bounce_buffer =
657 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
658 if (!adapter->bounce_buffer) {
659 ibmveth_error_printk("unable to allocate bounce buffer\n");
660 ibmveth_cleanup(adapter);
661 napi_disable(&adapter->napi);
664 adapter->bounce_buffer_dma =
665 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
666 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
667 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
668 ibmveth_error_printk("unable to map bounce buffer\n");
669 ibmveth_cleanup(adapter);
670 napi_disable(&adapter->napi);
674 ibmveth_debug_printk("initial replenish cycle\n");
675 ibmveth_interrupt(netdev->irq, netdev);
677 netif_start_queue(netdev);
679 ibmveth_debug_printk("open complete\n");
684 static int ibmveth_close(struct net_device *netdev)
686 struct ibmveth_adapter *adapter = netdev_priv(netdev);
689 ibmveth_debug_printk("close starting\n");
691 napi_disable(&adapter->napi);
693 if (!adapter->pool_config)
694 netif_stop_queue(netdev);
696 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
699 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
700 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
702 if(lpar_rc != H_SUCCESS)
704 ibmveth_error_printk("h_free_logical_lan failed with %lx, continuing with close\n",
708 free_irq(netdev->irq, netdev);
710 adapter->rx_no_buffer = *(u64*)(((char*)adapter->buffer_list_addr) + 4096 - 8);
712 ibmveth_cleanup(adapter);
714 ibmveth_debug_printk("close complete\n");
719 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) {
720 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_FIBRE);
721 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_FIBRE);
722 cmd->speed = SPEED_1000;
723 cmd->duplex = DUPLEX_FULL;
724 cmd->port = PORT_FIBRE;
725 cmd->phy_address = 0;
726 cmd->transceiver = XCVR_INTERNAL;
727 cmd->autoneg = AUTONEG_ENABLE;
733 static void netdev_get_drvinfo (struct net_device *dev, struct ethtool_drvinfo *info) {
734 strncpy(info->driver, ibmveth_driver_name, sizeof(info->driver) - 1);
735 strncpy(info->version, ibmveth_driver_version, sizeof(info->version) - 1);
738 static u32 netdev_get_link(struct net_device *dev) {
742 static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
744 struct ibmveth_adapter *adapter = netdev_priv(dev);
747 adapter->rx_csum = 1;
750 * Since the ibmveth firmware interface does not have the concept of
751 * separate tx/rx checksum offload enable, if rx checksum is disabled
752 * we also have to disable tx checksum offload. Once we disable rx
753 * checksum offload, we are no longer allowed to send tx buffers that
754 * are not properly checksummed.
756 adapter->rx_csum = 0;
757 dev->features &= ~NETIF_F_IP_CSUM;
761 static void ibmveth_set_tx_csum_flags(struct net_device *dev, u32 data)
763 struct ibmveth_adapter *adapter = netdev_priv(dev);
766 dev->features |= NETIF_F_IP_CSUM;
767 adapter->rx_csum = 1;
769 dev->features &= ~NETIF_F_IP_CSUM;
772 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data,
773 void (*done) (struct net_device *, u32))
775 struct ibmveth_adapter *adapter = netdev_priv(dev);
776 unsigned long set_attr, clr_attr, ret_attr;
778 int rc1 = 0, rc2 = 0;
781 if (netif_running(dev)) {
783 adapter->pool_config = 1;
785 adapter->pool_config = 0;
792 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
794 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
796 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
798 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
799 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
800 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
801 ret = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
802 set_attr, &ret_attr);
804 if (ret != H_SUCCESS) {
806 ibmveth_error_printk("unable to change checksum offload settings."
807 " %d rc=%ld\n", data, ret);
809 ret = h_illan_attributes(adapter->vdev->unit_address,
810 set_attr, clr_attr, &ret_attr);
815 ibmveth_error_printk("unable to change checksum offload settings."
816 " %d rc=%ld ret_attr=%lx\n", data, ret, ret_attr);
820 rc2 = ibmveth_open(dev);
822 return rc1 ? rc1 : rc2;
825 static int ibmveth_set_rx_csum(struct net_device *dev, u32 data)
827 struct ibmveth_adapter *adapter = netdev_priv(dev);
829 if ((data && adapter->rx_csum) || (!data && !adapter->rx_csum))
832 return ibmveth_set_csum_offload(dev, data, ibmveth_set_rx_csum_flags);
835 static int ibmveth_set_tx_csum(struct net_device *dev, u32 data)
837 struct ibmveth_adapter *adapter = netdev_priv(dev);
840 if (data && (dev->features & NETIF_F_IP_CSUM))
842 if (!data && !(dev->features & NETIF_F_IP_CSUM))
845 if (data && !adapter->rx_csum)
846 rc = ibmveth_set_csum_offload(dev, data, ibmveth_set_tx_csum_flags);
848 ibmveth_set_tx_csum_flags(dev, data);
853 static u32 ibmveth_get_rx_csum(struct net_device *dev)
855 struct ibmveth_adapter *adapter = netdev_priv(dev);
856 return adapter->rx_csum;
859 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
863 if (stringset != ETH_SS_STATS)
866 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
867 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
870 static int ibmveth_get_sset_count(struct net_device *dev, int sset)
874 return ARRAY_SIZE(ibmveth_stats);
880 static void ibmveth_get_ethtool_stats(struct net_device *dev,
881 struct ethtool_stats *stats, u64 *data)
884 struct ibmveth_adapter *adapter = netdev_priv(dev);
886 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
887 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
890 static const struct ethtool_ops netdev_ethtool_ops = {
891 .get_drvinfo = netdev_get_drvinfo,
892 .get_settings = netdev_get_settings,
893 .get_link = netdev_get_link,
894 .set_tx_csum = ibmveth_set_tx_csum,
895 .get_rx_csum = ibmveth_get_rx_csum,
896 .set_rx_csum = ibmveth_set_rx_csum,
897 .get_strings = ibmveth_get_strings,
898 .get_sset_count = ibmveth_get_sset_count,
899 .get_ethtool_stats = ibmveth_get_ethtool_stats,
900 .set_sg = ethtool_op_set_sg,
903 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
908 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
910 static int ibmveth_send(struct ibmveth_adapter *adapter,
911 union ibmveth_buf_desc *descs)
913 unsigned long correlator;
914 unsigned int retry_count;
918 * The retry count sets a maximum for the number of broadcast and
919 * multicast destinations within the system.
924 ret = h_send_logical_lan(adapter->vdev->unit_address,
925 descs[0].desc, descs[1].desc,
926 descs[2].desc, descs[3].desc,
927 descs[4].desc, descs[5].desc,
928 correlator, &correlator);
929 } while ((ret == H_BUSY) && (retry_count--));
931 if (ret != H_SUCCESS && ret != H_DROPPED) {
932 ibmveth_error_printk("tx: h_send_logical_lan failed with "
940 static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
941 struct net_device *netdev)
943 struct ibmveth_adapter *adapter = netdev_priv(netdev);
944 unsigned int desc_flags;
945 union ibmveth_buf_desc descs[6];
947 int force_bounce = 0;
950 * veth handles a maximum of 6 segments including the header, so
951 * we have to linearize the skb if there are more than this.
953 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
954 netdev->stats.tx_dropped++;
958 /* veth can't checksum offload UDP */
959 if (skb->ip_summed == CHECKSUM_PARTIAL &&
960 ip_hdr(skb)->protocol != IPPROTO_TCP && skb_checksum_help(skb)) {
961 ibmveth_error_printk("tx: failed to checksum packet\n");
962 netdev->stats.tx_dropped++;
966 desc_flags = IBMVETH_BUF_VALID;
968 if (skb->ip_summed == CHECKSUM_PARTIAL) {
969 unsigned char *buf = skb_transport_header(skb) +
972 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
974 /* Need to zero out the checksum */
980 memset(descs, 0, sizeof(descs));
983 * If a linear packet is below the rx threshold then
984 * copy it into the static bounce buffer. This avoids the
985 * cost of a TCE insert and remove.
987 if (force_bounce || (!skb_is_nonlinear(skb) &&
988 (skb->len < tx_copybreak))) {
989 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
992 descs[0].fields.flags_len = desc_flags | skb->len;
993 descs[0].fields.address = adapter->bounce_buffer_dma;
995 if (ibmveth_send(adapter, descs)) {
996 adapter->tx_send_failed++;
997 netdev->stats.tx_dropped++;
999 netdev->stats.tx_packets++;
1000 netdev->stats.tx_bytes += skb->len;
1006 /* Map the header */
1007 descs[0].fields.address = dma_map_single(&adapter->vdev->dev, skb->data,
1010 if (dma_mapping_error(&adapter->vdev->dev, descs[0].fields.address))
1013 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1016 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1017 unsigned long dma_addr;
1018 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1020 dma_addr = dma_map_page(&adapter->vdev->dev, frag->page,
1021 frag->page_offset, frag->size,
1024 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1025 goto map_failed_frags;
1027 descs[i+1].fields.flags_len = desc_flags | frag->size;
1028 descs[i+1].fields.address = dma_addr;
1031 if (ibmveth_send(adapter, descs)) {
1032 adapter->tx_send_failed++;
1033 netdev->stats.tx_dropped++;
1035 netdev->stats.tx_packets++;
1036 netdev->stats.tx_bytes += skb->len;
1039 for (i = 0; i < skb_shinfo(skb)->nr_frags + 1; i++)
1040 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1041 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1046 return NETDEV_TX_OK;
1050 for (i = 0; i < last; i++)
1051 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1052 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1056 if (!firmware_has_feature(FW_FEATURE_CMO))
1057 ibmveth_error_printk("tx: unable to map xmit buffer\n");
1058 adapter->tx_map_failed++;
1064 static int ibmveth_poll(struct napi_struct *napi, int budget)
1066 struct ibmveth_adapter *adapter = container_of(napi, struct ibmveth_adapter, napi);
1067 struct net_device *netdev = adapter->netdev;
1068 int frames_processed = 0;
1069 unsigned long lpar_rc;
1073 if (!ibmveth_rxq_pending_buffer(adapter))
1077 if (!ibmveth_rxq_buffer_valid(adapter)) {
1078 wmb(); /* suggested by larson1 */
1079 adapter->rx_invalid_buffer++;
1080 ibmveth_debug_printk("recycling invalid buffer\n");
1081 ibmveth_rxq_recycle_buffer(adapter);
1083 struct sk_buff *skb, *new_skb;
1084 int length = ibmveth_rxq_frame_length(adapter);
1085 int offset = ibmveth_rxq_frame_offset(adapter);
1086 int csum_good = ibmveth_rxq_csum_good(adapter);
1088 skb = ibmveth_rxq_get_buffer(adapter);
1091 if (length < rx_copybreak)
1092 new_skb = netdev_alloc_skb(netdev, length);
1095 skb_copy_to_linear_data(new_skb,
1099 ibmveth_rxq_recycle_buffer(adapter);
1101 ibmveth_rxq_harvest_buffer(adapter);
1102 skb_reserve(skb, offset);
1105 skb_put(skb, length);
1106 skb->protocol = eth_type_trans(skb, netdev);
1109 skb->ip_summed = CHECKSUM_UNNECESSARY;
1111 netif_receive_skb(skb); /* send it up */
1113 netdev->stats.rx_packets++;
1114 netdev->stats.rx_bytes += length;
1117 } while (frames_processed < budget);
1119 ibmveth_replenish_task(adapter);
1121 if (frames_processed < budget) {
1122 /* We think we are done - reenable interrupts,
1123 * then check once more to make sure we are done.
1125 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1128 ibmveth_assert(lpar_rc == H_SUCCESS);
1130 napi_complete(napi);
1132 if (ibmveth_rxq_pending_buffer(adapter) &&
1133 napi_reschedule(napi)) {
1134 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1140 return frames_processed;
1143 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1145 struct net_device *netdev = dev_instance;
1146 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1147 unsigned long lpar_rc;
1149 if (napi_schedule_prep(&adapter->napi)) {
1150 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1152 ibmveth_assert(lpar_rc == H_SUCCESS);
1153 __napi_schedule(&adapter->napi);
1158 static void ibmveth_set_multicast_list(struct net_device *netdev)
1160 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1161 unsigned long lpar_rc;
1163 if ((netdev->flags & IFF_PROMISC) ||
1164 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1165 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1166 IbmVethMcastEnableRecv |
1167 IbmVethMcastDisableFiltering,
1169 if(lpar_rc != H_SUCCESS) {
1170 ibmveth_error_printk("h_multicast_ctrl rc=%ld when entering promisc mode\n", lpar_rc);
1173 struct netdev_hw_addr *ha;
1174 /* clear the filter table & disable filtering */
1175 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1176 IbmVethMcastEnableRecv |
1177 IbmVethMcastDisableFiltering |
1178 IbmVethMcastClearFilterTable,
1180 if(lpar_rc != H_SUCCESS) {
1181 ibmveth_error_printk("h_multicast_ctrl rc=%ld when attempting to clear filter table\n", lpar_rc);
1183 /* add the addresses to the filter table */
1184 netdev_for_each_mc_addr(ha, netdev) {
1185 // add the multicast address to the filter table
1186 unsigned long mcast_addr = 0;
1187 memcpy(((char *)&mcast_addr)+2, ha->addr, 6);
1188 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1189 IbmVethMcastAddFilter,
1191 if(lpar_rc != H_SUCCESS) {
1192 ibmveth_error_printk("h_multicast_ctrl rc=%ld when adding an entry to the filter table\n", lpar_rc);
1196 /* re-enable filtering */
1197 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1198 IbmVethMcastEnableFiltering,
1200 if(lpar_rc != H_SUCCESS) {
1201 ibmveth_error_printk("h_multicast_ctrl rc=%ld when enabling filtering\n", lpar_rc);
1206 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1208 struct ibmveth_adapter *adapter = netdev_priv(dev);
1209 struct vio_dev *viodev = adapter->vdev;
1210 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1212 int need_restart = 0;
1214 if (new_mtu < IBMVETH_MAX_MTU)
1217 for (i = 0; i < IbmVethNumBufferPools; i++)
1218 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
1221 if (i == IbmVethNumBufferPools)
1224 /* Deactivate all the buffer pools so that the next loop can activate
1225 only the buffer pools necessary to hold the new MTU */
1226 if (netif_running(adapter->netdev)) {
1228 adapter->pool_config = 1;
1229 ibmveth_close(adapter->netdev);
1230 adapter->pool_config = 0;
1233 /* Look for an active buffer pool that can hold the new MTU */
1234 for(i = 0; i<IbmVethNumBufferPools; i++) {
1235 adapter->rx_buff_pool[i].active = 1;
1237 if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
1239 vio_cmo_set_dev_desired(viodev,
1240 ibmveth_get_desired_dma
1243 return ibmveth_open(adapter->netdev);
1249 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1255 #ifdef CONFIG_NET_POLL_CONTROLLER
1256 static void ibmveth_poll_controller(struct net_device *dev)
1258 ibmveth_replenish_task(netdev_priv(dev));
1259 ibmveth_interrupt(dev->irq, dev);
1264 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1266 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1269 * Number of bytes of IO data the driver will need to perform well.
1271 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1273 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1274 struct ibmveth_adapter *adapter;
1279 /* netdev inits at probe time along with the structures we need below*/
1281 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT);
1283 adapter = netdev_priv(netdev);
1285 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1286 ret += IOMMU_PAGE_ALIGN(netdev->mtu);
1288 for (i = 0; i < IbmVethNumBufferPools; i++) {
1289 /* add the size of the active receive buffers */
1290 if (adapter->rx_buff_pool[i].active)
1292 adapter->rx_buff_pool[i].size *
1293 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1295 rxqentries += adapter->rx_buff_pool[i].size;
1297 /* add the size of the receive queue entries */
1298 ret += IOMMU_PAGE_ALIGN(rxqentries * sizeof(struct ibmveth_rx_q_entry));
1303 static const struct net_device_ops ibmveth_netdev_ops = {
1304 .ndo_open = ibmveth_open,
1305 .ndo_stop = ibmveth_close,
1306 .ndo_start_xmit = ibmveth_start_xmit,
1307 .ndo_set_multicast_list = ibmveth_set_multicast_list,
1308 .ndo_do_ioctl = ibmveth_ioctl,
1309 .ndo_change_mtu = ibmveth_change_mtu,
1310 .ndo_validate_addr = eth_validate_addr,
1311 .ndo_set_mac_address = eth_mac_addr,
1312 #ifdef CONFIG_NET_POLL_CONTROLLER
1313 .ndo_poll_controller = ibmveth_poll_controller,
1317 static int __devinit ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1321 struct net_device *netdev;
1322 struct ibmveth_adapter *adapter;
1323 unsigned long set_attr, ret_attr;
1325 unsigned char *mac_addr_p;
1326 unsigned int *mcastFilterSize_p;
1329 ibmveth_debug_printk_no_adapter("entering ibmveth_probe for UA 0x%x\n",
1332 mac_addr_p = (unsigned char *) vio_get_attribute(dev,
1333 VETH_MAC_ADDR, NULL);
1335 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find VETH_MAC_ADDR "
1336 "attribute\n", __FILE__, __LINE__);
1340 mcastFilterSize_p = (unsigned int *) vio_get_attribute(dev,
1341 VETH_MCAST_FILTER_SIZE, NULL);
1342 if(!mcastFilterSize_p) {
1343 printk(KERN_ERR "(%s:%3.3d) ERROR: Can't find "
1344 "VETH_MCAST_FILTER_SIZE attribute\n",
1345 __FILE__, __LINE__);
1349 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1354 adapter = netdev_priv(netdev);
1355 dev_set_drvdata(&dev->dev, netdev);
1357 adapter->vdev = dev;
1358 adapter->netdev = netdev;
1359 adapter->mcastFilterSize= *mcastFilterSize_p;
1360 adapter->pool_config = 0;
1362 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1364 /* Some older boxes running PHYP non-natively have an OF that
1365 returns a 8-byte local-mac-address field (and the first
1366 2 bytes have to be ignored) while newer boxes' OF return
1367 a 6-byte field. Note that IEEE 1275 specifies that
1368 local-mac-address must be a 6-byte field.
1369 The RPA doc specifies that the first byte must be 10b, so
1370 we'll just look for it to solve this 8 vs. 6 byte field issue */
1372 if ((*mac_addr_p & 0x3) != 0x02)
1375 adapter->mac_addr = 0;
1376 memcpy(&adapter->mac_addr, mac_addr_p, 6);
1378 netdev->irq = dev->irq;
1379 netdev->netdev_ops = &ibmveth_netdev_ops;
1380 netdev->ethtool_ops = &netdev_ethtool_ops;
1381 SET_NETDEV_DEV(netdev, &dev->dev);
1382 netdev->features |= NETIF_F_SG;
1384 memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
1386 for(i = 0; i<IbmVethNumBufferPools; i++) {
1387 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1390 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1391 pool_count[i], pool_size[i],
1393 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1394 &dev->dev.kobj, "pool%d", i);
1396 kobject_uevent(kobj, KOBJ_ADD);
1399 ibmveth_debug_printk("adapter @ 0x%p\n", adapter);
1401 adapter->buffer_list_dma = DMA_ERROR_CODE;
1402 adapter->filter_list_dma = DMA_ERROR_CODE;
1403 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1405 ibmveth_debug_printk("registering netdev...\n");
1407 ret = h_illan_attributes(dev->unit_address, 0, 0, &ret_attr);
1409 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
1410 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
1411 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
1412 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
1414 ret = h_illan_attributes(dev->unit_address, 0, set_attr, &ret_attr);
1416 if (ret == H_SUCCESS) {
1417 adapter->rx_csum = 1;
1418 netdev->features |= NETIF_F_IP_CSUM;
1420 ret = h_illan_attributes(dev->unit_address, set_attr, 0, &ret_attr);
1423 rc = register_netdev(netdev);
1426 ibmveth_debug_printk("failed to register netdev rc=%d\n", rc);
1427 free_netdev(netdev);
1431 ibmveth_debug_printk("registered\n");
1433 ibmveth_proc_register_adapter(adapter);
1438 static int __devexit ibmveth_remove(struct vio_dev *dev)
1440 struct net_device *netdev = dev_get_drvdata(&dev->dev);
1441 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1444 for(i = 0; i<IbmVethNumBufferPools; i++)
1445 kobject_put(&adapter->rx_buff_pool[i].kobj);
1447 unregister_netdev(netdev);
1449 ibmveth_proc_unregister_adapter(adapter);
1451 free_netdev(netdev);
1452 dev_set_drvdata(&dev->dev, NULL);
1457 #ifdef CONFIG_PROC_FS
1458 static void ibmveth_proc_register_driver(void)
1460 ibmveth_proc_dir = proc_mkdir(IBMVETH_PROC_DIR, init_net.proc_net);
1461 if (ibmveth_proc_dir) {
1465 static void ibmveth_proc_unregister_driver(void)
1467 remove_proc_entry(IBMVETH_PROC_DIR, init_net.proc_net);
1470 static int ibmveth_show(struct seq_file *seq, void *v)
1472 struct ibmveth_adapter *adapter = seq->private;
1473 char *current_mac = (char *) adapter->netdev->dev_addr;
1474 char *firmware_mac = (char *) &adapter->mac_addr;
1476 seq_printf(seq, "%s %s\n\n", ibmveth_driver_string, ibmveth_driver_version);
1478 seq_printf(seq, "Unit Address: 0x%x\n", adapter->vdev->unit_address);
1479 seq_printf(seq, "Current MAC: %pM\n", current_mac);
1480 seq_printf(seq, "Firmware MAC: %pM\n", firmware_mac);
1482 seq_printf(seq, "\nAdapter Statistics:\n");
1483 seq_printf(seq, " TX: vio_map_single failres: %lld\n", adapter->tx_map_failed);
1484 seq_printf(seq, " send failures: %lld\n", adapter->tx_send_failed);
1485 seq_printf(seq, " RX: replenish task cycles: %lld\n", adapter->replenish_task_cycles);
1486 seq_printf(seq, " alloc_skb_failures: %lld\n", adapter->replenish_no_mem);
1487 seq_printf(seq, " add buffer failures: %lld\n", adapter->replenish_add_buff_failure);
1488 seq_printf(seq, " invalid buffers: %lld\n", adapter->rx_invalid_buffer);
1489 seq_printf(seq, " no buffers: %lld\n", adapter->rx_no_buffer);
1494 static int ibmveth_proc_open(struct inode *inode, struct file *file)
1496 return single_open(file, ibmveth_show, PDE(inode)->data);
1499 static const struct file_operations ibmveth_proc_fops = {
1500 .owner = THIS_MODULE,
1501 .open = ibmveth_proc_open,
1503 .llseek = seq_lseek,
1504 .release = single_release,
1507 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1509 struct proc_dir_entry *entry;
1510 if (ibmveth_proc_dir) {
1512 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1513 entry = proc_create_data(u_addr, S_IFREG, ibmveth_proc_dir,
1514 &ibmveth_proc_fops, adapter);
1516 ibmveth_error_printk("Cannot create adapter proc entry");
1520 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1522 if (ibmveth_proc_dir) {
1524 sprintf(u_addr, "%x", adapter->vdev->unit_address);
1525 remove_proc_entry(u_addr, ibmveth_proc_dir);
1529 #else /* CONFIG_PROC_FS */
1530 static void ibmveth_proc_register_adapter(struct ibmveth_adapter *adapter)
1534 static void ibmveth_proc_unregister_adapter(struct ibmveth_adapter *adapter)
1537 static void ibmveth_proc_register_driver(void)
1541 static void ibmveth_proc_unregister_driver(void)
1544 #endif /* CONFIG_PROC_FS */
1546 static struct attribute veth_active_attr;
1547 static struct attribute veth_num_attr;
1548 static struct attribute veth_size_attr;
1550 static ssize_t veth_pool_show(struct kobject * kobj,
1551 struct attribute * attr, char * buf)
1553 struct ibmveth_buff_pool *pool = container_of(kobj,
1554 struct ibmveth_buff_pool,
1557 if (attr == &veth_active_attr)
1558 return sprintf(buf, "%d\n", pool->active);
1559 else if (attr == &veth_num_attr)
1560 return sprintf(buf, "%d\n", pool->size);
1561 else if (attr == &veth_size_attr)
1562 return sprintf(buf, "%d\n", pool->buff_size);
1566 static ssize_t veth_pool_store(struct kobject * kobj, struct attribute * attr,
1567 const char * buf, size_t count)
1569 struct ibmveth_buff_pool *pool = container_of(kobj,
1570 struct ibmveth_buff_pool,
1572 struct net_device *netdev = dev_get_drvdata(
1573 container_of(kobj->parent, struct device, kobj));
1574 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1575 long value = simple_strtol(buf, NULL, 10);
1578 if (attr == &veth_active_attr) {
1579 if (value && !pool->active) {
1580 if (netif_running(netdev)) {
1581 if(ibmveth_alloc_buffer_pool(pool)) {
1582 ibmveth_error_printk("unable to alloc pool\n");
1586 adapter->pool_config = 1;
1587 ibmveth_close(netdev);
1588 adapter->pool_config = 0;
1589 if ((rc = ibmveth_open(netdev)))
1593 } else if (!value && pool->active) {
1594 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1596 /* Make sure there is a buffer pool with buffers that
1597 can hold a packet of the size of the MTU */
1598 for (i = 0; i < IbmVethNumBufferPools; i++) {
1599 if (pool == &adapter->rx_buff_pool[i])
1601 if (!adapter->rx_buff_pool[i].active)
1603 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1607 if (i == IbmVethNumBufferPools) {
1608 ibmveth_error_printk("no active pool >= MTU\n");
1612 if (netif_running(netdev)) {
1613 adapter->pool_config = 1;
1614 ibmveth_close(netdev);
1616 adapter->pool_config = 0;
1617 if ((rc = ibmveth_open(netdev)))
1622 } else if (attr == &veth_num_attr) {
1623 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT)
1626 if (netif_running(netdev)) {
1627 adapter->pool_config = 1;
1628 ibmveth_close(netdev);
1629 adapter->pool_config = 0;
1631 if ((rc = ibmveth_open(netdev)))
1636 } else if (attr == &veth_size_attr) {
1637 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE)
1640 if (netif_running(netdev)) {
1641 adapter->pool_config = 1;
1642 ibmveth_close(netdev);
1643 adapter->pool_config = 0;
1644 pool->buff_size = value;
1645 if ((rc = ibmveth_open(netdev)))
1648 pool->buff_size = value;
1652 /* kick the interrupt handler to allocate/deallocate pools */
1653 ibmveth_interrupt(netdev->irq, netdev);
1658 #define ATTR(_name, _mode) \
1659 struct attribute veth_##_name##_attr = { \
1660 .name = __stringify(_name), .mode = _mode, \
1663 static ATTR(active, 0644);
1664 static ATTR(num, 0644);
1665 static ATTR(size, 0644);
1667 static struct attribute * veth_pool_attrs[] = {
1674 static const struct sysfs_ops veth_pool_ops = {
1675 .show = veth_pool_show,
1676 .store = veth_pool_store,
1679 static struct kobj_type ktype_veth_pool = {
1681 .sysfs_ops = &veth_pool_ops,
1682 .default_attrs = veth_pool_attrs,
1685 static int ibmveth_resume(struct device *dev)
1687 struct net_device *netdev = dev_get_drvdata(dev);
1688 ibmveth_interrupt(netdev->irq, netdev);
1692 static struct vio_device_id ibmveth_device_table[] __devinitdata= {
1693 { "network", "IBM,l-lan"},
1696 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1698 static struct dev_pm_ops ibmveth_pm_ops = {
1699 .resume = ibmveth_resume
1702 static struct vio_driver ibmveth_driver = {
1703 .id_table = ibmveth_device_table,
1704 .probe = ibmveth_probe,
1705 .remove = ibmveth_remove,
1706 .get_desired_dma = ibmveth_get_desired_dma,
1708 .name = ibmveth_driver_name,
1709 .owner = THIS_MODULE,
1710 .pm = &ibmveth_pm_ops,
1714 static int __init ibmveth_module_init(void)
1716 ibmveth_printk("%s: %s %s\n", ibmveth_driver_name, ibmveth_driver_string, ibmveth_driver_version);
1718 ibmveth_proc_register_driver();
1720 return vio_register_driver(&ibmveth_driver);
1723 static void __exit ibmveth_module_exit(void)
1725 vio_unregister_driver(&ibmveth_driver);
1726 ibmveth_proc_unregister_driver();
1729 module_init(ibmveth_module_init);
1730 module_exit(ibmveth_module_exit);