2 * IBM Power Virtual Ethernet Device Driver
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
17 * Copyright (C) IBM Corporation, 2003, 2010
19 * Authors: Dave Larson <larson1@us.ibm.com>
20 * Santiago Leon <santil@linux.vnet.ibm.com>
21 * Brian King <brking@linux.vnet.ibm.com>
22 * Robert Jennings <rcj@linux.vnet.ibm.com>
23 * Anton Blanchard <anton@au.ibm.com>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/types.h>
29 #include <linux/errno.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/kernel.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/interrupt.h>
39 #include <linux/ethtool.h>
42 #include <linux/ipv6.h>
43 #include <linux/slab.h>
44 #include <asm/hvcall.h>
45 #include <linux/atomic.h>
47 #include <asm/iommu.h>
48 #include <asm/firmware.h>
52 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
53 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
54 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
56 static struct kobj_type ktype_veth_pool;
59 static const char ibmveth_driver_name[] = "ibmveth";
60 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
61 #define ibmveth_driver_version "1.06"
63 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
64 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(ibmveth_driver_version);
68 static unsigned int tx_copybreak __read_mostly = 128;
69 module_param(tx_copybreak, uint, 0644);
70 MODULE_PARM_DESC(tx_copybreak,
71 "Maximum size of packet that is copied to a new buffer on transmit");
73 static unsigned int rx_copybreak __read_mostly = 128;
74 module_param(rx_copybreak, uint, 0644);
75 MODULE_PARM_DESC(rx_copybreak,
76 "Maximum size of packet that is copied to a new buffer on receive");
78 static unsigned int rx_flush __read_mostly = 0;
79 module_param(rx_flush, uint, 0644);
80 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
82 static bool old_large_send __read_mostly;
83 module_param(old_large_send, bool, S_IRUGO);
84 MODULE_PARM_DESC(old_large_send,
85 "Use old large send method on firmware that supports the new method");
88 char name[ETH_GSTRING_LEN];
92 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
93 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
95 struct ibmveth_stat ibmveth_stats[] = {
96 { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
97 { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
98 { "replenish_add_buff_failure",
99 IBMVETH_STAT_OFF(replenish_add_buff_failure) },
100 { "replenish_add_buff_success",
101 IBMVETH_STAT_OFF(replenish_add_buff_success) },
102 { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
103 { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
104 { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
105 { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
106 { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
107 { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
108 { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
109 { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
110 { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
113 /* simple methods of getting data from the current rxq entry */
114 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
116 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
119 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
121 return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
122 IBMVETH_RXQ_TOGGLE_SHIFT;
125 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
127 return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
130 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
132 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
135 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
137 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
140 static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
142 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
145 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
147 return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
150 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
152 return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
155 /* setup the initial settings for a buffer pool */
156 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
157 u32 pool_index, u32 pool_size,
158 u32 buff_size, u32 pool_active)
160 pool->size = pool_size;
161 pool->index = pool_index;
162 pool->buff_size = buff_size;
163 pool->threshold = pool_size * 7 / 8;
164 pool->active = pool_active;
167 /* allocate and setup an buffer pool - called during open */
168 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
172 pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
177 pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
178 if (!pool->dma_addr) {
179 kfree(pool->free_map);
180 pool->free_map = NULL;
184 pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
187 kfree(pool->dma_addr);
188 pool->dma_addr = NULL;
190 kfree(pool->free_map);
191 pool->free_map = NULL;
195 memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
197 for (i = 0; i < pool->size; ++i)
198 pool->free_map[i] = i;
200 atomic_set(&pool->available, 0);
201 pool->producer_index = 0;
202 pool->consumer_index = 0;
207 static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
209 unsigned long offset;
211 for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
212 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
215 /* replenish the buffers for a pool. note that we don't need to
216 * skb_reserve these since they are used for incoming...
218 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
219 struct ibmveth_buff_pool *pool)
222 u32 count = pool->size - atomic_read(&pool->available);
223 u32 buffers_added = 0;
225 unsigned int free_index, index;
227 unsigned long lpar_rc;
232 for (i = 0; i < count; ++i) {
233 union ibmveth_buf_desc desc;
235 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
238 netdev_dbg(adapter->netdev,
239 "replenish: unable to allocate skb\n");
240 adapter->replenish_no_mem++;
244 free_index = pool->consumer_index;
245 pool->consumer_index++;
246 if (pool->consumer_index >= pool->size)
247 pool->consumer_index = 0;
248 index = pool->free_map[free_index];
250 BUG_ON(index == IBM_VETH_INVALID_MAP);
251 BUG_ON(pool->skbuff[index] != NULL);
253 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
254 pool->buff_size, DMA_FROM_DEVICE);
256 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
259 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
260 pool->dma_addr[index] = dma_addr;
261 pool->skbuff[index] = skb;
263 correlator = ((u64)pool->index << 32) | index;
264 *(u64 *)skb->data = correlator;
266 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
267 desc.fields.address = dma_addr;
270 unsigned int len = min(pool->buff_size,
271 adapter->netdev->mtu +
273 ibmveth_flush_buffer(skb->data, len);
275 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
278 if (lpar_rc != H_SUCCESS) {
282 adapter->replenish_add_buff_success++;
287 atomic_add(buffers_added, &(pool->available));
291 pool->free_map[free_index] = index;
292 pool->skbuff[index] = NULL;
293 if (pool->consumer_index == 0)
294 pool->consumer_index = pool->size - 1;
296 pool->consumer_index--;
297 if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
298 dma_unmap_single(&adapter->vdev->dev,
299 pool->dma_addr[index], pool->buff_size,
301 dev_kfree_skb_any(skb);
302 adapter->replenish_add_buff_failure++;
305 atomic_add(buffers_added, &(pool->available));
309 * The final 8 bytes of the buffer list is a counter of frames dropped
310 * because there was not a buffer in the buffer list capable of holding
313 static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
315 __be64 *p = adapter->buffer_list_addr + 4096 - 8;
317 adapter->rx_no_buffer = be64_to_cpup(p);
320 /* replenish routine */
321 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
325 adapter->replenish_task_cycles++;
327 for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
328 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
331 (atomic_read(&pool->available) < pool->threshold))
332 ibmveth_replenish_buffer_pool(adapter, pool);
335 ibmveth_update_rx_no_buffer(adapter);
338 /* empty and free ana buffer pool - also used to do cleanup in error paths */
339 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
340 struct ibmveth_buff_pool *pool)
344 kfree(pool->free_map);
345 pool->free_map = NULL;
347 if (pool->skbuff && pool->dma_addr) {
348 for (i = 0; i < pool->size; ++i) {
349 struct sk_buff *skb = pool->skbuff[i];
351 dma_unmap_single(&adapter->vdev->dev,
355 dev_kfree_skb_any(skb);
356 pool->skbuff[i] = NULL;
361 if (pool->dma_addr) {
362 kfree(pool->dma_addr);
363 pool->dma_addr = NULL;
372 /* remove a buffer from a pool */
373 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
376 unsigned int pool = correlator >> 32;
377 unsigned int index = correlator & 0xffffffffUL;
378 unsigned int free_index;
381 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
382 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
384 skb = adapter->rx_buff_pool[pool].skbuff[index];
388 adapter->rx_buff_pool[pool].skbuff[index] = NULL;
390 dma_unmap_single(&adapter->vdev->dev,
391 adapter->rx_buff_pool[pool].dma_addr[index],
392 adapter->rx_buff_pool[pool].buff_size,
395 free_index = adapter->rx_buff_pool[pool].producer_index;
396 adapter->rx_buff_pool[pool].producer_index++;
397 if (adapter->rx_buff_pool[pool].producer_index >=
398 adapter->rx_buff_pool[pool].size)
399 adapter->rx_buff_pool[pool].producer_index = 0;
400 adapter->rx_buff_pool[pool].free_map[free_index] = index;
404 atomic_dec(&(adapter->rx_buff_pool[pool].available));
407 /* get the current buffer on the rx queue */
408 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
410 u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
411 unsigned int pool = correlator >> 32;
412 unsigned int index = correlator & 0xffffffffUL;
414 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
415 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
417 return adapter->rx_buff_pool[pool].skbuff[index];
420 /* recycle the current buffer on the rx queue */
421 static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
423 u32 q_index = adapter->rx_queue.index;
424 u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
425 unsigned int pool = correlator >> 32;
426 unsigned int index = correlator & 0xffffffffUL;
427 union ibmveth_buf_desc desc;
428 unsigned long lpar_rc;
431 BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
432 BUG_ON(index >= adapter->rx_buff_pool[pool].size);
434 if (!adapter->rx_buff_pool[pool].active) {
435 ibmveth_rxq_harvest_buffer(adapter);
436 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
440 desc.fields.flags_len = IBMVETH_BUF_VALID |
441 adapter->rx_buff_pool[pool].buff_size;
442 desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
444 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
446 if (lpar_rc != H_SUCCESS) {
447 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
448 "during recycle rc=%ld", lpar_rc);
449 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
453 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
454 adapter->rx_queue.index = 0;
455 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
462 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
464 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
466 if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
467 adapter->rx_queue.index = 0;
468 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
472 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
475 struct device *dev = &adapter->vdev->dev;
477 if (adapter->buffer_list_addr != NULL) {
478 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
479 dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
481 adapter->buffer_list_dma = DMA_ERROR_CODE;
483 free_page((unsigned long)adapter->buffer_list_addr);
484 adapter->buffer_list_addr = NULL;
487 if (adapter->filter_list_addr != NULL) {
488 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
489 dma_unmap_single(dev, adapter->filter_list_dma, 4096,
491 adapter->filter_list_dma = DMA_ERROR_CODE;
493 free_page((unsigned long)adapter->filter_list_addr);
494 adapter->filter_list_addr = NULL;
497 if (adapter->rx_queue.queue_addr != NULL) {
498 dma_free_coherent(dev, adapter->rx_queue.queue_len,
499 adapter->rx_queue.queue_addr,
500 adapter->rx_queue.queue_dma);
501 adapter->rx_queue.queue_addr = NULL;
504 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
505 if (adapter->rx_buff_pool[i].active)
506 ibmveth_free_buffer_pool(adapter,
507 &adapter->rx_buff_pool[i]);
509 if (adapter->bounce_buffer != NULL) {
510 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
511 dma_unmap_single(&adapter->vdev->dev,
512 adapter->bounce_buffer_dma,
513 adapter->netdev->mtu + IBMVETH_BUFF_OH,
515 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
517 kfree(adapter->bounce_buffer);
518 adapter->bounce_buffer = NULL;
522 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
523 union ibmveth_buf_desc rxq_desc, u64 mac_address)
525 int rc, try_again = 1;
528 * After a kexec the adapter will still be open, so our attempt to
529 * open it will fail. So if we get a failure we free the adapter and
530 * try again, but only once.
533 rc = h_register_logical_lan(adapter->vdev->unit_address,
534 adapter->buffer_list_dma, rxq_desc.desc,
535 adapter->filter_list_dma, mac_address);
537 if (rc != H_SUCCESS && try_again) {
539 rc = h_free_logical_lan(adapter->vdev->unit_address);
540 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
549 static u64 ibmveth_encode_mac_addr(u8 *mac)
554 for (i = 0; i < ETH_ALEN; i++)
555 encoded = (encoded << 8) | mac[i];
560 static int ibmveth_open(struct net_device *netdev)
562 struct ibmveth_adapter *adapter = netdev_priv(netdev);
565 unsigned long lpar_rc;
567 union ibmveth_buf_desc rxq_desc;
571 netdev_dbg(netdev, "open starting\n");
573 napi_enable(&adapter->napi);
575 for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
576 rxq_entries += adapter->rx_buff_pool[i].size;
578 adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
579 adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
581 if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
582 netdev_err(netdev, "unable to allocate filter or buffer list "
588 dev = &adapter->vdev->dev;
590 adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
592 adapter->rx_queue.queue_addr =
593 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
594 &adapter->rx_queue.queue_dma, GFP_KERNEL);
595 if (!adapter->rx_queue.queue_addr) {
600 adapter->buffer_list_dma = dma_map_single(dev,
601 adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
602 adapter->filter_list_dma = dma_map_single(dev,
603 adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
605 if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
606 (dma_mapping_error(dev, adapter->filter_list_dma))) {
607 netdev_err(netdev, "unable to map filter or buffer list "
613 adapter->rx_queue.index = 0;
614 adapter->rx_queue.num_slots = rxq_entries;
615 adapter->rx_queue.toggle = 1;
617 mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
619 rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
620 adapter->rx_queue.queue_len;
621 rxq_desc.fields.address = adapter->rx_queue.queue_dma;
623 netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
624 netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
625 netdev_dbg(netdev, "receive q @ 0x%p\n", adapter->rx_queue.queue_addr);
627 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
629 lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
631 if (lpar_rc != H_SUCCESS) {
632 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
634 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
635 "desc:0x%llx MAC:0x%llx\n",
636 adapter->buffer_list_dma,
637 adapter->filter_list_dma,
644 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
645 if (!adapter->rx_buff_pool[i].active)
647 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
648 netdev_err(netdev, "unable to alloc pool\n");
649 adapter->rx_buff_pool[i].active = 0;
655 netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
656 rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
659 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
662 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
663 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
668 adapter->bounce_buffer =
669 kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
670 if (!adapter->bounce_buffer) {
672 goto err_out_free_irq;
674 adapter->bounce_buffer_dma =
675 dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
676 netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
677 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
678 netdev_err(netdev, "unable to map bounce buffer\n");
680 goto err_out_free_irq;
683 netdev_dbg(netdev, "initial replenish cycle\n");
684 ibmveth_interrupt(netdev->irq, netdev);
686 netif_start_queue(netdev);
688 netdev_dbg(netdev, "open complete\n");
693 free_irq(netdev->irq, netdev);
695 ibmveth_cleanup(adapter);
696 napi_disable(&adapter->napi);
700 static int ibmveth_close(struct net_device *netdev)
702 struct ibmveth_adapter *adapter = netdev_priv(netdev);
705 netdev_dbg(netdev, "close starting\n");
707 napi_disable(&adapter->napi);
709 if (!adapter->pool_config)
710 netif_stop_queue(netdev);
712 h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
715 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
716 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
718 if (lpar_rc != H_SUCCESS) {
719 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
720 "continuing with close\n", lpar_rc);
723 free_irq(netdev->irq, netdev);
725 ibmveth_update_rx_no_buffer(adapter);
727 ibmveth_cleanup(adapter);
729 netdev_dbg(netdev, "close complete\n");
734 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
736 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
738 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
740 ethtool_cmd_speed_set(cmd, SPEED_1000);
741 cmd->duplex = DUPLEX_FULL;
742 cmd->port = PORT_FIBRE;
743 cmd->phy_address = 0;
744 cmd->transceiver = XCVR_INTERNAL;
745 cmd->autoneg = AUTONEG_ENABLE;
751 static void netdev_get_drvinfo(struct net_device *dev,
752 struct ethtool_drvinfo *info)
754 strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
755 strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
758 static netdev_features_t ibmveth_fix_features(struct net_device *dev,
759 netdev_features_t features)
762 * Since the ibmveth firmware interface does not have the
763 * concept of separate tx/rx checksum offload enable, if rx
764 * checksum is disabled we also have to disable tx checksum
765 * offload. Once we disable rx checksum offload, we are no
766 * longer allowed to send tx buffers that are not properly
770 if (!(features & NETIF_F_RXCSUM))
771 features &= ~NETIF_F_ALL_CSUM;
776 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
778 struct ibmveth_adapter *adapter = netdev_priv(dev);
779 unsigned long set_attr, clr_attr, ret_attr;
780 unsigned long set_attr6, clr_attr6;
781 long ret, ret4, ret6;
782 int rc1 = 0, rc2 = 0;
785 if (netif_running(dev)) {
787 adapter->pool_config = 1;
789 adapter->pool_config = 0;
798 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
799 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
801 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
802 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
805 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
807 if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
808 !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
809 (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
810 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
811 set_attr, &ret_attr);
813 if (ret4 != H_SUCCESS) {
814 netdev_err(dev, "unable to change IPv4 checksum "
815 "offload settings. %d rc=%ld\n",
818 h_illan_attributes(adapter->vdev->unit_address,
819 set_attr, clr_attr, &ret_attr);
822 dev->features &= ~NETIF_F_IP_CSUM;
825 adapter->fw_ipv4_csum_support = data;
828 ret6 = h_illan_attributes(adapter->vdev->unit_address,
829 clr_attr6, set_attr6, &ret_attr);
831 if (ret6 != H_SUCCESS) {
832 netdev_err(dev, "unable to change IPv6 checksum "
833 "offload settings. %d rc=%ld\n",
836 h_illan_attributes(adapter->vdev->unit_address,
837 set_attr6, clr_attr6, &ret_attr);
840 dev->features &= ~NETIF_F_IPV6_CSUM;
843 adapter->fw_ipv6_csum_support = data;
845 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
846 adapter->rx_csum = data;
851 netdev_err(dev, "unable to change checksum offload settings."
852 " %d rc=%ld ret_attr=%lx\n", data, ret,
857 rc2 = ibmveth_open(dev);
859 return rc1 ? rc1 : rc2;
862 static int ibmveth_set_tso(struct net_device *dev, u32 data)
864 struct ibmveth_adapter *adapter = netdev_priv(dev);
865 unsigned long set_attr, clr_attr, ret_attr;
867 int rc1 = 0, rc2 = 0;
870 if (netif_running(dev)) {
872 adapter->pool_config = 1;
874 adapter->pool_config = 0;
881 set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
883 clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
885 ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
887 if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
889 ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
890 set_attr, &ret_attr);
892 if (ret2 != H_SUCCESS) {
893 netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
896 h_illan_attributes(adapter->vdev->unit_address,
897 set_attr, clr_attr, &ret_attr);
900 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
904 adapter->fw_large_send_support = data;
905 adapter->large_send = data;
908 /* Older firmware version of large send offload does not
912 dev->features &= ~NETIF_F_TSO6;
913 netdev_info(dev, "TSO feature requires all partitions to have updated driver");
915 adapter->large_send = data;
919 rc2 = ibmveth_open(dev);
921 return rc1 ? rc1 : rc2;
924 static int ibmveth_set_features(struct net_device *dev,
925 netdev_features_t features)
927 struct ibmveth_adapter *adapter = netdev_priv(dev);
928 int rx_csum = !!(features & NETIF_F_RXCSUM);
929 int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
930 int rc1 = 0, rc2 = 0;
932 if (rx_csum != adapter->rx_csum) {
933 rc1 = ibmveth_set_csum_offload(dev, rx_csum);
934 if (rc1 && !adapter->rx_csum)
936 features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
939 if (large_send != adapter->large_send) {
940 rc2 = ibmveth_set_tso(dev, large_send);
941 if (rc2 && !adapter->large_send)
943 features & ~(NETIF_F_TSO | NETIF_F_TSO6);
946 return rc1 ? rc1 : rc2;
949 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
953 if (stringset != ETH_SS_STATS)
956 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
957 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
960 static int ibmveth_get_sset_count(struct net_device *dev, int sset)
964 return ARRAY_SIZE(ibmveth_stats);
970 static void ibmveth_get_ethtool_stats(struct net_device *dev,
971 struct ethtool_stats *stats, u64 *data)
974 struct ibmveth_adapter *adapter = netdev_priv(dev);
976 for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
977 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
980 static const struct ethtool_ops netdev_ethtool_ops = {
981 .get_drvinfo = netdev_get_drvinfo,
982 .get_settings = netdev_get_settings,
983 .get_link = ethtool_op_get_link,
984 .get_strings = ibmveth_get_strings,
985 .get_sset_count = ibmveth_get_sset_count,
986 .get_ethtool_stats = ibmveth_get_ethtool_stats,
989 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
994 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
996 static int ibmveth_send(struct ibmveth_adapter *adapter,
997 union ibmveth_buf_desc *descs, unsigned long mss)
999 unsigned long correlator;
1000 unsigned int retry_count;
1004 * The retry count sets a maximum for the number of broadcast and
1005 * multicast destinations within the system.
1010 ret = h_send_logical_lan(adapter->vdev->unit_address,
1011 descs[0].desc, descs[1].desc,
1012 descs[2].desc, descs[3].desc,
1013 descs[4].desc, descs[5].desc,
1014 correlator, &correlator, mss,
1015 adapter->fw_large_send_support);
1016 } while ((ret == H_BUSY) && (retry_count--));
1018 if (ret != H_SUCCESS && ret != H_DROPPED) {
1019 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
1020 "with rc=%ld\n", ret);
1027 static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
1028 struct net_device *netdev)
1030 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1031 unsigned int desc_flags;
1032 union ibmveth_buf_desc descs[6];
1034 int force_bounce = 0;
1035 dma_addr_t dma_addr;
1036 unsigned long mss = 0;
1039 * veth handles a maximum of 6 segments including the header, so
1040 * we have to linearize the skb if there are more than this.
1042 if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
1043 netdev->stats.tx_dropped++;
1047 /* veth can't checksum offload UDP */
1048 if (skb->ip_summed == CHECKSUM_PARTIAL &&
1049 ((skb->protocol == htons(ETH_P_IP) &&
1050 ip_hdr(skb)->protocol != IPPROTO_TCP) ||
1051 (skb->protocol == htons(ETH_P_IPV6) &&
1052 ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
1053 skb_checksum_help(skb)) {
1055 netdev_err(netdev, "tx: failed to checksum packet\n");
1056 netdev->stats.tx_dropped++;
1060 desc_flags = IBMVETH_BUF_VALID;
1062 if (skb_is_gso(skb) && adapter->fw_large_send_support)
1063 desc_flags |= IBMVETH_BUF_LRG_SND;
1065 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1066 unsigned char *buf = skb_transport_header(skb) +
1069 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
1071 /* Need to zero out the checksum */
1077 memset(descs, 0, sizeof(descs));
1080 * If a linear packet is below the rx threshold then
1081 * copy it into the static bounce buffer. This avoids the
1082 * cost of a TCE insert and remove.
1084 if (force_bounce || (!skb_is_nonlinear(skb) &&
1085 (skb->len < tx_copybreak))) {
1086 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1089 descs[0].fields.flags_len = desc_flags | skb->len;
1090 descs[0].fields.address = adapter->bounce_buffer_dma;
1092 if (ibmveth_send(adapter, descs, 0)) {
1093 adapter->tx_send_failed++;
1094 netdev->stats.tx_dropped++;
1096 netdev->stats.tx_packets++;
1097 netdev->stats.tx_bytes += skb->len;
1103 /* Map the header */
1104 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1105 skb_headlen(skb), DMA_TO_DEVICE);
1106 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1109 descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1110 descs[0].fields.address = dma_addr;
1113 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1114 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1116 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1117 skb_frag_size(frag), DMA_TO_DEVICE);
1119 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1120 goto map_failed_frags;
1122 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1123 descs[i+1].fields.address = dma_addr;
1126 if (skb_is_gso(skb)) {
1127 if (adapter->fw_large_send_support) {
1128 mss = (unsigned long)skb_shinfo(skb)->gso_size;
1129 adapter->tx_large_packets++;
1130 } else if (!skb_is_gso_v6(skb)) {
1131 /* Put -1 in the IP checksum to tell phyp it
1132 * is a largesend packet. Put the mss in
1135 ip_hdr(skb)->check = 0xffff;
1136 tcp_hdr(skb)->check =
1137 cpu_to_be16(skb_shinfo(skb)->gso_size);
1138 adapter->tx_large_packets++;
1142 if (ibmveth_send(adapter, descs, mss)) {
1143 adapter->tx_send_failed++;
1144 netdev->stats.tx_dropped++;
1146 netdev->stats.tx_packets++;
1147 netdev->stats.tx_bytes += skb->len;
1150 dma_unmap_single(&adapter->vdev->dev,
1151 descs[0].fields.address,
1152 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1155 for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1156 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1157 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1161 dev_consume_skb_any(skb);
1162 return NETDEV_TX_OK;
1166 for (i = 1; i < last; i++)
1167 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1168 descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1171 dma_unmap_single(&adapter->vdev->dev,
1172 descs[0].fields.address,
1173 descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1176 if (!firmware_has_feature(FW_FEATURE_CMO))
1177 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1178 adapter->tx_map_failed++;
1184 static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
1186 struct tcphdr *tcph;
1190 /* only TCP packets will be aggregated */
1191 if (skb->protocol == htons(ETH_P_IP)) {
1192 struct iphdr *iph = (struct iphdr *)skb->data;
1194 if (iph->protocol == IPPROTO_TCP) {
1195 offset = iph->ihl * 4;
1196 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1200 } else if (skb->protocol == htons(ETH_P_IPV6)) {
1201 struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
1203 if (iph6->nexthdr == IPPROTO_TCP) {
1204 offset = sizeof(struct ipv6hdr);
1205 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1212 /* if mss is not set through Large Packet bit/mss in rx buffer,
1213 * expect that the mss will be written to the tcp header checksum.
1215 tcph = (struct tcphdr *)(skb->data + offset);
1217 skb_shinfo(skb)->gso_size = mss;
1218 } else if (offset) {
1219 skb_shinfo(skb)->gso_size = ntohs(tcph->check);
1223 if (skb_shinfo(skb)->gso_size) {
1224 hdr_len = offset + tcph->doff * 4;
1225 skb_shinfo(skb)->gso_segs =
1226 DIV_ROUND_UP(skb->len - hdr_len,
1227 skb_shinfo(skb)->gso_size);
1231 static int ibmveth_poll(struct napi_struct *napi, int budget)
1233 struct ibmveth_adapter *adapter =
1234 container_of(napi, struct ibmveth_adapter, napi);
1235 struct net_device *netdev = adapter->netdev;
1236 int frames_processed = 0;
1237 unsigned long lpar_rc;
1241 while (frames_processed < budget) {
1242 if (!ibmveth_rxq_pending_buffer(adapter))
1246 if (!ibmveth_rxq_buffer_valid(adapter)) {
1247 wmb(); /* suggested by larson1 */
1248 adapter->rx_invalid_buffer++;
1249 netdev_dbg(netdev, "recycling invalid buffer\n");
1250 ibmveth_rxq_recycle_buffer(adapter);
1252 struct sk_buff *skb, *new_skb;
1253 int length = ibmveth_rxq_frame_length(adapter);
1254 int offset = ibmveth_rxq_frame_offset(adapter);
1255 int csum_good = ibmveth_rxq_csum_good(adapter);
1256 int lrg_pkt = ibmveth_rxq_large_packet(adapter);
1257 __sum16 iph_check = 0;
1259 skb = ibmveth_rxq_get_buffer(adapter);
1261 /* if the large packet bit is set in the rx queue
1262 * descriptor, the mss will be written by PHYP eight
1263 * bytes from the start of the rx buffer, which is
1264 * skb->data at this stage
1267 __be64 *rxmss = (__be64 *)(skb->data + 8);
1269 mss = (u16)be64_to_cpu(*rxmss);
1273 if (length < rx_copybreak)
1274 new_skb = netdev_alloc_skb(netdev, length);
1277 skb_copy_to_linear_data(new_skb,
1281 ibmveth_flush_buffer(skb->data,
1283 if (!ibmveth_rxq_recycle_buffer(adapter))
1287 ibmveth_rxq_harvest_buffer(adapter);
1288 skb_reserve(skb, offset);
1291 skb_put(skb, length);
1292 skb->protocol = eth_type_trans(skb, netdev);
1295 skb->ip_summed = CHECKSUM_UNNECESSARY;
1296 if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
1297 iph = (struct iphdr *)skb->data;
1299 /* If the IP checksum is not offloaded and if the packet
1300 * is large send, the checksum must be rebuilt.
1302 if (iph->check == 0xffff) {
1304 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1309 /* PHYP without PLSO support places a -1 in the ip
1310 * checksum for large send frames.
1312 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
1313 struct iphdr *iph = (struct iphdr *)skb->data;
1315 iph_check = iph->check;
1318 if ((length > netdev->mtu + ETH_HLEN) ||
1319 lrg_pkt || iph_check == 0xffff) {
1320 ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
1321 adapter->rx_large_packets++;
1324 napi_gro_receive(napi, skb); /* send it up */
1326 netdev->stats.rx_packets++;
1327 netdev->stats.rx_bytes += length;
1332 ibmveth_replenish_task(adapter);
1334 if (frames_processed < budget) {
1335 napi_complete(napi);
1337 /* We think we are done - reenable interrupts,
1338 * then check once more to make sure we are done.
1340 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1343 BUG_ON(lpar_rc != H_SUCCESS);
1345 if (ibmveth_rxq_pending_buffer(adapter) &&
1346 napi_reschedule(napi)) {
1347 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1352 return frames_processed;
1355 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1357 struct net_device *netdev = dev_instance;
1358 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1359 unsigned long lpar_rc;
1361 if (napi_schedule_prep(&adapter->napi)) {
1362 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1364 BUG_ON(lpar_rc != H_SUCCESS);
1365 __napi_schedule(&adapter->napi);
1370 static void ibmveth_set_multicast_list(struct net_device *netdev)
1372 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1373 unsigned long lpar_rc;
1375 if ((netdev->flags & IFF_PROMISC) ||
1376 (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1377 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1378 IbmVethMcastEnableRecv |
1379 IbmVethMcastDisableFiltering,
1381 if (lpar_rc != H_SUCCESS) {
1382 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1383 "entering promisc mode\n", lpar_rc);
1386 struct netdev_hw_addr *ha;
1387 /* clear the filter table & disable filtering */
1388 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1389 IbmVethMcastEnableRecv |
1390 IbmVethMcastDisableFiltering |
1391 IbmVethMcastClearFilterTable,
1393 if (lpar_rc != H_SUCCESS) {
1394 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1395 "attempting to clear filter table\n",
1398 /* add the addresses to the filter table */
1399 netdev_for_each_mc_addr(ha, netdev) {
1400 /* add the multicast address to the filter table */
1402 mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1403 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1404 IbmVethMcastAddFilter,
1406 if (lpar_rc != H_SUCCESS) {
1407 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1408 "when adding an entry to the filter "
1409 "table\n", lpar_rc);
1413 /* re-enable filtering */
1414 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1415 IbmVethMcastEnableFiltering,
1417 if (lpar_rc != H_SUCCESS) {
1418 netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1419 "enabling filtering\n", lpar_rc);
1424 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1426 struct ibmveth_adapter *adapter = netdev_priv(dev);
1427 struct vio_dev *viodev = adapter->vdev;
1428 int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1430 int need_restart = 0;
1432 if (new_mtu < IBMVETH_MIN_MTU)
1435 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1436 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
1439 if (i == IBMVETH_NUM_BUFF_POOLS)
1442 /* Deactivate all the buffer pools so that the next loop can activate
1443 only the buffer pools necessary to hold the new MTU */
1444 if (netif_running(adapter->netdev)) {
1446 adapter->pool_config = 1;
1447 ibmveth_close(adapter->netdev);
1448 adapter->pool_config = 0;
1451 /* Look for an active buffer pool that can hold the new MTU */
1452 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1453 adapter->rx_buff_pool[i].active = 1;
1455 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1457 vio_cmo_set_dev_desired(viodev,
1458 ibmveth_get_desired_dma
1461 return ibmveth_open(adapter->netdev);
1467 if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1473 #ifdef CONFIG_NET_POLL_CONTROLLER
1474 static void ibmveth_poll_controller(struct net_device *dev)
1476 ibmveth_replenish_task(netdev_priv(dev));
1477 ibmveth_interrupt(dev->irq, dev);
1482 * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1484 * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1487 * Number of bytes of IO data the driver will need to perform well.
1489 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1491 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1492 struct ibmveth_adapter *adapter;
1493 struct iommu_table *tbl;
1498 tbl = get_iommu_table_base(&vdev->dev);
1500 /* netdev inits at probe time along with the structures we need below*/
1502 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1504 adapter = netdev_priv(netdev);
1506 ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1507 ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1509 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1510 /* add the size of the active receive buffers */
1511 if (adapter->rx_buff_pool[i].active)
1513 adapter->rx_buff_pool[i].size *
1514 IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1516 rxqentries += adapter->rx_buff_pool[i].size;
1518 /* add the size of the receive queue entries */
1519 ret += IOMMU_PAGE_ALIGN(
1520 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1525 static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1527 struct ibmveth_adapter *adapter = netdev_priv(dev);
1528 struct sockaddr *addr = p;
1532 if (!is_valid_ether_addr(addr->sa_data))
1533 return -EADDRNOTAVAIL;
1535 mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1536 rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1538 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1542 ether_addr_copy(dev->dev_addr, addr->sa_data);
1547 static const struct net_device_ops ibmveth_netdev_ops = {
1548 .ndo_open = ibmveth_open,
1549 .ndo_stop = ibmveth_close,
1550 .ndo_start_xmit = ibmveth_start_xmit,
1551 .ndo_set_rx_mode = ibmveth_set_multicast_list,
1552 .ndo_do_ioctl = ibmveth_ioctl,
1553 .ndo_change_mtu = ibmveth_change_mtu,
1554 .ndo_fix_features = ibmveth_fix_features,
1555 .ndo_set_features = ibmveth_set_features,
1556 .ndo_validate_addr = eth_validate_addr,
1557 .ndo_set_mac_address = ibmveth_set_mac_addr,
1558 #ifdef CONFIG_NET_POLL_CONTROLLER
1559 .ndo_poll_controller = ibmveth_poll_controller,
1563 static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1566 struct net_device *netdev;
1567 struct ibmveth_adapter *adapter;
1568 unsigned char *mac_addr_p;
1569 __be32 *mcastFilterSize_p;
1571 unsigned long ret_attr;
1573 dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1576 mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1579 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1582 /* Workaround for old/broken pHyp */
1585 else if (mac_len != 6) {
1586 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1591 mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
1592 VETH_MCAST_FILTER_SIZE,
1594 if (!mcastFilterSize_p) {
1595 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1600 netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1605 adapter = netdev_priv(netdev);
1606 dev_set_drvdata(&dev->dev, netdev);
1608 adapter->vdev = dev;
1609 adapter->netdev = netdev;
1610 adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1611 adapter->pool_config = 0;
1613 netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1615 netdev->irq = dev->irq;
1616 netdev->netdev_ops = &ibmveth_netdev_ops;
1617 netdev->ethtool_ops = &netdev_ethtool_ops;
1618 SET_NETDEV_DEV(netdev, &dev->dev);
1619 netdev->hw_features = NETIF_F_SG;
1620 if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
1621 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1625 netdev->features |= netdev->hw_features;
1627 ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1629 /* If running older firmware, TSO should not be enabled by default */
1630 if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1632 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1633 netdev->features |= netdev->hw_features;
1635 netdev->hw_features |= NETIF_F_TSO;
1638 memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1640 if (firmware_has_feature(FW_FEATURE_CMO))
1641 memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1643 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1644 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1647 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1648 pool_count[i], pool_size[i],
1650 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1651 &dev->dev.kobj, "pool%d", i);
1653 kobject_uevent(kobj, KOBJ_ADD);
1656 netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1658 adapter->buffer_list_dma = DMA_ERROR_CODE;
1659 adapter->filter_list_dma = DMA_ERROR_CODE;
1660 adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1662 netdev_dbg(netdev, "registering netdev...\n");
1664 ibmveth_set_features(netdev, netdev->features);
1666 rc = register_netdev(netdev);
1669 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1670 free_netdev(netdev);
1674 netdev_dbg(netdev, "registered\n");
1679 static int ibmveth_remove(struct vio_dev *dev)
1681 struct net_device *netdev = dev_get_drvdata(&dev->dev);
1682 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1685 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1686 kobject_put(&adapter->rx_buff_pool[i].kobj);
1688 unregister_netdev(netdev);
1690 free_netdev(netdev);
1691 dev_set_drvdata(&dev->dev, NULL);
1696 static struct attribute veth_active_attr;
1697 static struct attribute veth_num_attr;
1698 static struct attribute veth_size_attr;
1700 static ssize_t veth_pool_show(struct kobject *kobj,
1701 struct attribute *attr, char *buf)
1703 struct ibmveth_buff_pool *pool = container_of(kobj,
1704 struct ibmveth_buff_pool,
1707 if (attr == &veth_active_attr)
1708 return sprintf(buf, "%d\n", pool->active);
1709 else if (attr == &veth_num_attr)
1710 return sprintf(buf, "%d\n", pool->size);
1711 else if (attr == &veth_size_attr)
1712 return sprintf(buf, "%d\n", pool->buff_size);
1716 static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1717 const char *buf, size_t count)
1719 struct ibmveth_buff_pool *pool = container_of(kobj,
1720 struct ibmveth_buff_pool,
1722 struct net_device *netdev = dev_get_drvdata(
1723 container_of(kobj->parent, struct device, kobj));
1724 struct ibmveth_adapter *adapter = netdev_priv(netdev);
1725 long value = simple_strtol(buf, NULL, 10);
1728 if (attr == &veth_active_attr) {
1729 if (value && !pool->active) {
1730 if (netif_running(netdev)) {
1731 if (ibmveth_alloc_buffer_pool(pool)) {
1733 "unable to alloc pool\n");
1737 adapter->pool_config = 1;
1738 ibmveth_close(netdev);
1739 adapter->pool_config = 0;
1740 if ((rc = ibmveth_open(netdev)))
1745 } else if (!value && pool->active) {
1746 int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1748 /* Make sure there is a buffer pool with buffers that
1749 can hold a packet of the size of the MTU */
1750 for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1751 if (pool == &adapter->rx_buff_pool[i])
1753 if (!adapter->rx_buff_pool[i].active)
1755 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1759 if (i == IBMVETH_NUM_BUFF_POOLS) {
1760 netdev_err(netdev, "no active pool >= MTU\n");
1764 if (netif_running(netdev)) {
1765 adapter->pool_config = 1;
1766 ibmveth_close(netdev);
1768 adapter->pool_config = 0;
1769 if ((rc = ibmveth_open(netdev)))
1774 } else if (attr == &veth_num_attr) {
1775 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1778 if (netif_running(netdev)) {
1779 adapter->pool_config = 1;
1780 ibmveth_close(netdev);
1781 adapter->pool_config = 0;
1783 if ((rc = ibmveth_open(netdev)))
1789 } else if (attr == &veth_size_attr) {
1790 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1793 if (netif_running(netdev)) {
1794 adapter->pool_config = 1;
1795 ibmveth_close(netdev);
1796 adapter->pool_config = 0;
1797 pool->buff_size = value;
1798 if ((rc = ibmveth_open(netdev)))
1801 pool->buff_size = value;
1806 /* kick the interrupt handler to allocate/deallocate pools */
1807 ibmveth_interrupt(netdev->irq, netdev);
1812 #define ATTR(_name, _mode) \
1813 struct attribute veth_##_name##_attr = { \
1814 .name = __stringify(_name), .mode = _mode, \
1817 static ATTR(active, 0644);
1818 static ATTR(num, 0644);
1819 static ATTR(size, 0644);
1821 static struct attribute *veth_pool_attrs[] = {
1828 static const struct sysfs_ops veth_pool_ops = {
1829 .show = veth_pool_show,
1830 .store = veth_pool_store,
1833 static struct kobj_type ktype_veth_pool = {
1835 .sysfs_ops = &veth_pool_ops,
1836 .default_attrs = veth_pool_attrs,
1839 static int ibmveth_resume(struct device *dev)
1841 struct net_device *netdev = dev_get_drvdata(dev);
1842 ibmveth_interrupt(netdev->irq, netdev);
1846 static struct vio_device_id ibmveth_device_table[] = {
1847 { "network", "IBM,l-lan"},
1850 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1852 static struct dev_pm_ops ibmveth_pm_ops = {
1853 .resume = ibmveth_resume
1856 static struct vio_driver ibmveth_driver = {
1857 .id_table = ibmveth_device_table,
1858 .probe = ibmveth_probe,
1859 .remove = ibmveth_remove,
1860 .get_desired_dma = ibmveth_get_desired_dma,
1861 .name = ibmveth_driver_name,
1862 .pm = &ibmveth_pm_ops,
1865 static int __init ibmveth_module_init(void)
1867 printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1868 ibmveth_driver_string, ibmveth_driver_version);
1870 return vio_register_driver(&ibmveth_driver);
1873 static void __exit ibmveth_module_exit(void)
1875 vio_unregister_driver(&ibmveth_driver);
1878 module_init(ibmveth_module_init);
1879 module_exit(ibmveth_module_exit);