GNU Linux-libre 4.9.331-gnu1
[releases.git] / drivers / net / ethernet / ibm / ibmveth.c
1 /*
2  * IBM Power Virtual Ethernet Device Driver
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  *
17  * Copyright (C) IBM Corporation, 2003, 2010
18  *
19  * Authors: Dave Larson <larson1@us.ibm.com>
20  *          Santiago Leon <santil@linux.vnet.ibm.com>
21  *          Brian King <brking@linux.vnet.ibm.com>
22  *          Robert Jennings <rcj@linux.vnet.ibm.com>
23  *          Anton Blanchard <anton@au.ibm.com>
24  */
25
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/types.h>
29 #include <linux/errno.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/kernel.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/interrupt.h>
37 #include <linux/mm.h>
38 #include <linux/pm.h>
39 #include <linux/ethtool.h>
40 #include <linux/in.h>
41 #include <linux/ip.h>
42 #include <linux/ipv6.h>
43 #include <linux/slab.h>
44 #include <asm/hvcall.h>
45 #include <linux/atomic.h>
46 #include <asm/vio.h>
47 #include <asm/iommu.h>
48 #include <asm/firmware.h>
49
50 #include "ibmveth.h"
51
52 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
53 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
54 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
55
56 static struct kobj_type ktype_veth_pool;
57
58
59 static const char ibmveth_driver_name[] = "ibmveth";
60 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
61 #define ibmveth_driver_version "1.06"
62
63 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
64 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(ibmveth_driver_version);
67
68 static unsigned int tx_copybreak __read_mostly = 128;
69 module_param(tx_copybreak, uint, 0644);
70 MODULE_PARM_DESC(tx_copybreak,
71         "Maximum size of packet that is copied to a new buffer on transmit");
72
73 static unsigned int rx_copybreak __read_mostly = 128;
74 module_param(rx_copybreak, uint, 0644);
75 MODULE_PARM_DESC(rx_copybreak,
76         "Maximum size of packet that is copied to a new buffer on receive");
77
78 static unsigned int rx_flush __read_mostly = 0;
79 module_param(rx_flush, uint, 0644);
80 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
81
82 static bool old_large_send __read_mostly;
83 module_param(old_large_send, bool, S_IRUGO);
84 MODULE_PARM_DESC(old_large_send,
85         "Use old large send method on firmware that supports the new method");
86
87 struct ibmveth_stat {
88         char name[ETH_GSTRING_LEN];
89         int offset;
90 };
91
92 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
93 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
94
95 struct ibmveth_stat ibmveth_stats[] = {
96         { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
97         { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
98         { "replenish_add_buff_failure",
99                         IBMVETH_STAT_OFF(replenish_add_buff_failure) },
100         { "replenish_add_buff_success",
101                         IBMVETH_STAT_OFF(replenish_add_buff_success) },
102         { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
103         { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
104         { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
105         { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
106         { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
107         { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
108         { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
109         { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
110         { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
111 };
112
113 /* simple methods of getting data from the current rxq entry */
114 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
115 {
116         return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
117 }
118
119 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
120 {
121         return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
122                         IBMVETH_RXQ_TOGGLE_SHIFT;
123 }
124
125 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
126 {
127         return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
128 }
129
130 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
131 {
132         return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
133 }
134
135 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
136 {
137         return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
138 }
139
140 static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
141 {
142         return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
143 }
144
145 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
146 {
147         return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
148 }
149
150 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
151 {
152         return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
153 }
154
155 /* setup the initial settings for a buffer pool */
156 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
157                                      u32 pool_index, u32 pool_size,
158                                      u32 buff_size, u32 pool_active)
159 {
160         pool->size = pool_size;
161         pool->index = pool_index;
162         pool->buff_size = buff_size;
163         pool->threshold = pool_size * 7 / 8;
164         pool->active = pool_active;
165 }
166
167 /* allocate and setup an buffer pool - called during open */
168 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
169 {
170         int i;
171
172         pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
173
174         if (!pool->free_map)
175                 return -1;
176
177         pool->dma_addr = kcalloc(pool->size, sizeof(dma_addr_t), GFP_KERNEL);
178         if (!pool->dma_addr) {
179                 kfree(pool->free_map);
180                 pool->free_map = NULL;
181                 return -1;
182         }
183
184         pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
185
186         if (!pool->skbuff) {
187                 kfree(pool->dma_addr);
188                 pool->dma_addr = NULL;
189
190                 kfree(pool->free_map);
191                 pool->free_map = NULL;
192                 return -1;
193         }
194
195         for (i = 0; i < pool->size; ++i)
196                 pool->free_map[i] = i;
197
198         atomic_set(&pool->available, 0);
199         pool->producer_index = 0;
200         pool->consumer_index = 0;
201
202         return 0;
203 }
204
205 static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
206 {
207         unsigned long offset;
208
209         for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
210                 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
211 }
212
213 /* replenish the buffers for a pool.  note that we don't need to
214  * skb_reserve these since they are used for incoming...
215  */
216 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
217                                           struct ibmveth_buff_pool *pool)
218 {
219         u32 i;
220         u32 count = pool->size - atomic_read(&pool->available);
221         u32 buffers_added = 0;
222         struct sk_buff *skb;
223         unsigned int free_index, index;
224         u64 correlator;
225         unsigned long lpar_rc;
226         dma_addr_t dma_addr;
227
228         mb();
229
230         for (i = 0; i < count; ++i) {
231                 union ibmveth_buf_desc desc;
232
233                 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
234
235                 if (!skb) {
236                         netdev_dbg(adapter->netdev,
237                                    "replenish: unable to allocate skb\n");
238                         adapter->replenish_no_mem++;
239                         break;
240                 }
241
242                 free_index = pool->consumer_index;
243                 pool->consumer_index++;
244                 if (pool->consumer_index >= pool->size)
245                         pool->consumer_index = 0;
246                 index = pool->free_map[free_index];
247
248                 BUG_ON(index == IBM_VETH_INVALID_MAP);
249                 BUG_ON(pool->skbuff[index] != NULL);
250
251                 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
252                                 pool->buff_size, DMA_FROM_DEVICE);
253
254                 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
255                         goto failure;
256
257                 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
258                 pool->dma_addr[index] = dma_addr;
259                 pool->skbuff[index] = skb;
260
261                 correlator = ((u64)pool->index << 32) | index;
262                 *(u64 *)skb->data = correlator;
263
264                 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
265                 desc.fields.address = dma_addr;
266
267                 if (rx_flush) {
268                         unsigned int len = min(pool->buff_size,
269                                                 adapter->netdev->mtu +
270                                                 IBMVETH_BUFF_OH);
271                         ibmveth_flush_buffer(skb->data, len);
272                 }
273                 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
274                                                    desc.desc);
275
276                 if (lpar_rc != H_SUCCESS) {
277                         goto failure;
278                 } else {
279                         buffers_added++;
280                         adapter->replenish_add_buff_success++;
281                 }
282         }
283
284         mb();
285         atomic_add(buffers_added, &(pool->available));
286         return;
287
288 failure:
289         pool->free_map[free_index] = index;
290         pool->skbuff[index] = NULL;
291         if (pool->consumer_index == 0)
292                 pool->consumer_index = pool->size - 1;
293         else
294                 pool->consumer_index--;
295         if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
296                 dma_unmap_single(&adapter->vdev->dev,
297                                  pool->dma_addr[index], pool->buff_size,
298                                  DMA_FROM_DEVICE);
299         dev_kfree_skb_any(skb);
300         adapter->replenish_add_buff_failure++;
301
302         mb();
303         atomic_add(buffers_added, &(pool->available));
304 }
305
306 /*
307  * The final 8 bytes of the buffer list is a counter of frames dropped
308  * because there was not a buffer in the buffer list capable of holding
309  * the frame.
310  */
311 static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
312 {
313         __be64 *p = adapter->buffer_list_addr + 4096 - 8;
314
315         adapter->rx_no_buffer = be64_to_cpup(p);
316 }
317
318 /* replenish routine */
319 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
320 {
321         int i;
322
323         adapter->replenish_task_cycles++;
324
325         for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
326                 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
327
328                 if (pool->active &&
329                     (atomic_read(&pool->available) < pool->threshold))
330                         ibmveth_replenish_buffer_pool(adapter, pool);
331         }
332
333         ibmveth_update_rx_no_buffer(adapter);
334 }
335
336 /* empty and free ana buffer pool - also used to do cleanup in error paths */
337 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
338                                      struct ibmveth_buff_pool *pool)
339 {
340         int i;
341
342         kfree(pool->free_map);
343         pool->free_map = NULL;
344
345         if (pool->skbuff && pool->dma_addr) {
346                 for (i = 0; i < pool->size; ++i) {
347                         struct sk_buff *skb = pool->skbuff[i];
348                         if (skb) {
349                                 dma_unmap_single(&adapter->vdev->dev,
350                                                  pool->dma_addr[i],
351                                                  pool->buff_size,
352                                                  DMA_FROM_DEVICE);
353                                 dev_kfree_skb_any(skb);
354                                 pool->skbuff[i] = NULL;
355                         }
356                 }
357         }
358
359         if (pool->dma_addr) {
360                 kfree(pool->dma_addr);
361                 pool->dma_addr = NULL;
362         }
363
364         if (pool->skbuff) {
365                 kfree(pool->skbuff);
366                 pool->skbuff = NULL;
367         }
368 }
369
370 /* remove a buffer from a pool */
371 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
372                                             u64 correlator)
373 {
374         unsigned int pool  = correlator >> 32;
375         unsigned int index = correlator & 0xffffffffUL;
376         unsigned int free_index;
377         struct sk_buff *skb;
378
379         BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
380         BUG_ON(index >= adapter->rx_buff_pool[pool].size);
381
382         skb = adapter->rx_buff_pool[pool].skbuff[index];
383
384         BUG_ON(skb == NULL);
385
386         adapter->rx_buff_pool[pool].skbuff[index] = NULL;
387
388         dma_unmap_single(&adapter->vdev->dev,
389                          adapter->rx_buff_pool[pool].dma_addr[index],
390                          adapter->rx_buff_pool[pool].buff_size,
391                          DMA_FROM_DEVICE);
392
393         free_index = adapter->rx_buff_pool[pool].producer_index;
394         adapter->rx_buff_pool[pool].producer_index++;
395         if (adapter->rx_buff_pool[pool].producer_index >=
396             adapter->rx_buff_pool[pool].size)
397                 adapter->rx_buff_pool[pool].producer_index = 0;
398         adapter->rx_buff_pool[pool].free_map[free_index] = index;
399
400         mb();
401
402         atomic_dec(&(adapter->rx_buff_pool[pool].available));
403 }
404
405 /* get the current buffer on the rx queue */
406 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
407 {
408         u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
409         unsigned int pool = correlator >> 32;
410         unsigned int index = correlator & 0xffffffffUL;
411
412         BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
413         BUG_ON(index >= adapter->rx_buff_pool[pool].size);
414
415         return adapter->rx_buff_pool[pool].skbuff[index];
416 }
417
418 /* recycle the current buffer on the rx queue */
419 static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
420 {
421         u32 q_index = adapter->rx_queue.index;
422         u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
423         unsigned int pool = correlator >> 32;
424         unsigned int index = correlator & 0xffffffffUL;
425         union ibmveth_buf_desc desc;
426         unsigned long lpar_rc;
427         int ret = 1;
428
429         BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
430         BUG_ON(index >= adapter->rx_buff_pool[pool].size);
431
432         if (!adapter->rx_buff_pool[pool].active) {
433                 ibmveth_rxq_harvest_buffer(adapter);
434                 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
435                 goto out;
436         }
437
438         desc.fields.flags_len = IBMVETH_BUF_VALID |
439                 adapter->rx_buff_pool[pool].buff_size;
440         desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
441
442         lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
443
444         if (lpar_rc != H_SUCCESS) {
445                 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
446                            "during recycle rc=%ld", lpar_rc);
447                 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
448                 ret = 0;
449         }
450
451         if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
452                 adapter->rx_queue.index = 0;
453                 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
454         }
455
456 out:
457         return ret;
458 }
459
460 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
461 {
462         ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
463
464         if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
465                 adapter->rx_queue.index = 0;
466                 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
467         }
468 }
469
470 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
471 {
472         int i;
473         struct device *dev = &adapter->vdev->dev;
474
475         if (adapter->buffer_list_addr != NULL) {
476                 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
477                         dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
478                                         DMA_BIDIRECTIONAL);
479                         adapter->buffer_list_dma = DMA_ERROR_CODE;
480                 }
481                 free_page((unsigned long)adapter->buffer_list_addr);
482                 adapter->buffer_list_addr = NULL;
483         }
484
485         if (adapter->filter_list_addr != NULL) {
486                 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
487                         dma_unmap_single(dev, adapter->filter_list_dma, 4096,
488                                         DMA_BIDIRECTIONAL);
489                         adapter->filter_list_dma = DMA_ERROR_CODE;
490                 }
491                 free_page((unsigned long)adapter->filter_list_addr);
492                 adapter->filter_list_addr = NULL;
493         }
494
495         if (adapter->rx_queue.queue_addr != NULL) {
496                 dma_free_coherent(dev, adapter->rx_queue.queue_len,
497                                   adapter->rx_queue.queue_addr,
498                                   adapter->rx_queue.queue_dma);
499                 adapter->rx_queue.queue_addr = NULL;
500         }
501
502         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
503                 if (adapter->rx_buff_pool[i].active)
504                         ibmveth_free_buffer_pool(adapter,
505                                                  &adapter->rx_buff_pool[i]);
506
507         if (adapter->bounce_buffer != NULL) {
508                 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
509                         dma_unmap_single(&adapter->vdev->dev,
510                                         adapter->bounce_buffer_dma,
511                                         adapter->netdev->mtu + IBMVETH_BUFF_OH,
512                                         DMA_BIDIRECTIONAL);
513                         adapter->bounce_buffer_dma = DMA_ERROR_CODE;
514                 }
515                 kfree(adapter->bounce_buffer);
516                 adapter->bounce_buffer = NULL;
517         }
518 }
519
520 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
521         union ibmveth_buf_desc rxq_desc, u64 mac_address)
522 {
523         int rc, try_again = 1;
524
525         /*
526          * After a kexec the adapter will still be open, so our attempt to
527          * open it will fail. So if we get a failure we free the adapter and
528          * try again, but only once.
529          */
530 retry:
531         rc = h_register_logical_lan(adapter->vdev->unit_address,
532                                     adapter->buffer_list_dma, rxq_desc.desc,
533                                     adapter->filter_list_dma, mac_address);
534
535         if (rc != H_SUCCESS && try_again) {
536                 do {
537                         rc = h_free_logical_lan(adapter->vdev->unit_address);
538                 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
539
540                 try_again = 0;
541                 goto retry;
542         }
543
544         return rc;
545 }
546
547 static u64 ibmveth_encode_mac_addr(u8 *mac)
548 {
549         int i;
550         u64 encoded = 0;
551
552         for (i = 0; i < ETH_ALEN; i++)
553                 encoded = (encoded << 8) | mac[i];
554
555         return encoded;
556 }
557
558 static int ibmveth_open(struct net_device *netdev)
559 {
560         struct ibmveth_adapter *adapter = netdev_priv(netdev);
561         u64 mac_address;
562         int rxq_entries = 1;
563         unsigned long lpar_rc;
564         int rc;
565         union ibmveth_buf_desc rxq_desc;
566         int i;
567         struct device *dev;
568
569         netdev_dbg(netdev, "open starting\n");
570
571         napi_enable(&adapter->napi);
572
573         for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
574                 rxq_entries += adapter->rx_buff_pool[i].size;
575
576         adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
577         adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
578
579         if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
580                 netdev_err(netdev, "unable to allocate filter or buffer list "
581                            "pages\n");
582                 rc = -ENOMEM;
583                 goto err_out;
584         }
585
586         dev = &adapter->vdev->dev;
587
588         adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
589                                                 rxq_entries;
590         adapter->rx_queue.queue_addr =
591                 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
592                                    &adapter->rx_queue.queue_dma, GFP_KERNEL);
593         if (!adapter->rx_queue.queue_addr) {
594                 rc = -ENOMEM;
595                 goto err_out;
596         }
597
598         adapter->buffer_list_dma = dma_map_single(dev,
599                         adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
600         adapter->filter_list_dma = dma_map_single(dev,
601                         adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
602
603         if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
604             (dma_mapping_error(dev, adapter->filter_list_dma))) {
605                 netdev_err(netdev, "unable to map filter or buffer list "
606                            "pages\n");
607                 rc = -ENOMEM;
608                 goto err_out;
609         }
610
611         adapter->rx_queue.index = 0;
612         adapter->rx_queue.num_slots = rxq_entries;
613         adapter->rx_queue.toggle = 1;
614
615         mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
616
617         rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
618                                         adapter->rx_queue.queue_len;
619         rxq_desc.fields.address = adapter->rx_queue.queue_dma;
620
621         netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
622         netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
623         netdev_dbg(netdev, "receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);
624
625         h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
626
627         lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
628
629         if (lpar_rc != H_SUCCESS) {
630                 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
631                            lpar_rc);
632                 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
633                            "desc:0x%llx MAC:0x%llx\n",
634                                      adapter->buffer_list_dma,
635                                      adapter->filter_list_dma,
636                                      rxq_desc.desc,
637                                      mac_address);
638                 rc = -ENONET;
639                 goto err_out;
640         }
641
642         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
643                 if (!adapter->rx_buff_pool[i].active)
644                         continue;
645                 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
646                         netdev_err(netdev, "unable to alloc pool\n");
647                         adapter->rx_buff_pool[i].active = 0;
648                         rc = -ENOMEM;
649                         goto err_out;
650                 }
651         }
652
653         netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
654         rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
655                          netdev);
656         if (rc != 0) {
657                 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
658                            netdev->irq, rc);
659                 do {
660                         lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
661                 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
662
663                 goto err_out;
664         }
665
666         adapter->bounce_buffer =
667             kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
668         if (!adapter->bounce_buffer) {
669                 rc = -ENOMEM;
670                 goto err_out_free_irq;
671         }
672         adapter->bounce_buffer_dma =
673             dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
674                            netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
675         if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
676                 netdev_err(netdev, "unable to map bounce buffer\n");
677                 rc = -ENOMEM;
678                 goto err_out_free_irq;
679         }
680
681         netdev_dbg(netdev, "initial replenish cycle\n");
682         ibmveth_interrupt(netdev->irq, netdev);
683
684         netif_start_queue(netdev);
685
686         netdev_dbg(netdev, "open complete\n");
687
688         return 0;
689
690 err_out_free_irq:
691         free_irq(netdev->irq, netdev);
692 err_out:
693         ibmveth_cleanup(adapter);
694         napi_disable(&adapter->napi);
695         return rc;
696 }
697
698 static int ibmveth_close(struct net_device *netdev)
699 {
700         struct ibmveth_adapter *adapter = netdev_priv(netdev);
701         long lpar_rc;
702
703         netdev_dbg(netdev, "close starting\n");
704
705         napi_disable(&adapter->napi);
706
707         if (!adapter->pool_config)
708                 netif_stop_queue(netdev);
709
710         h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
711
712         do {
713                 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
714         } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
715
716         if (lpar_rc != H_SUCCESS) {
717                 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
718                            "continuing with close\n", lpar_rc);
719         }
720
721         free_irq(netdev->irq, netdev);
722
723         ibmveth_update_rx_no_buffer(adapter);
724
725         ibmveth_cleanup(adapter);
726
727         netdev_dbg(netdev, "close complete\n");
728
729         return 0;
730 }
731
732 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
733 {
734         cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
735                                 SUPPORTED_FIBRE);
736         cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
737                                 ADVERTISED_FIBRE);
738         ethtool_cmd_speed_set(cmd, SPEED_1000);
739         cmd->duplex = DUPLEX_FULL;
740         cmd->port = PORT_FIBRE;
741         cmd->phy_address = 0;
742         cmd->transceiver = XCVR_INTERNAL;
743         cmd->autoneg = AUTONEG_ENABLE;
744         cmd->maxtxpkt = 0;
745         cmd->maxrxpkt = 1;
746         return 0;
747 }
748
749 static void netdev_get_drvinfo(struct net_device *dev,
750                                struct ethtool_drvinfo *info)
751 {
752         strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
753         strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
754 }
755
756 static netdev_features_t ibmveth_fix_features(struct net_device *dev,
757         netdev_features_t features)
758 {
759         /*
760          * Since the ibmveth firmware interface does not have the
761          * concept of separate tx/rx checksum offload enable, if rx
762          * checksum is disabled we also have to disable tx checksum
763          * offload. Once we disable rx checksum offload, we are no
764          * longer allowed to send tx buffers that are not properly
765          * checksummed.
766          */
767
768         if (!(features & NETIF_F_RXCSUM))
769                 features &= ~NETIF_F_CSUM_MASK;
770
771         return features;
772 }
773
774 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
775 {
776         struct ibmveth_adapter *adapter = netdev_priv(dev);
777         unsigned long set_attr, clr_attr, ret_attr;
778         unsigned long set_attr6, clr_attr6;
779         long ret, ret4, ret6;
780         int rc1 = 0, rc2 = 0;
781         int restart = 0;
782
783         if (netif_running(dev)) {
784                 restart = 1;
785                 adapter->pool_config = 1;
786                 ibmveth_close(dev);
787                 adapter->pool_config = 0;
788         }
789
790         set_attr = 0;
791         clr_attr = 0;
792         set_attr6 = 0;
793         clr_attr6 = 0;
794
795         if (data) {
796                 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
797                 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
798         } else {
799                 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
800                 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
801         }
802
803         ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
804
805         if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
806             !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
807             (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
808                 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
809                                          set_attr, &ret_attr);
810
811                 if (ret4 != H_SUCCESS) {
812                         netdev_err(dev, "unable to change IPv4 checksum "
813                                         "offload settings. %d rc=%ld\n",
814                                         data, ret4);
815
816                         h_illan_attributes(adapter->vdev->unit_address,
817                                            set_attr, clr_attr, &ret_attr);
818
819                         if (data == 1)
820                                 dev->features &= ~NETIF_F_IP_CSUM;
821
822                 } else {
823                         adapter->fw_ipv4_csum_support = data;
824                 }
825
826                 ret6 = h_illan_attributes(adapter->vdev->unit_address,
827                                          clr_attr6, set_attr6, &ret_attr);
828
829                 if (ret6 != H_SUCCESS) {
830                         netdev_err(dev, "unable to change IPv6 checksum "
831                                         "offload settings. %d rc=%ld\n",
832                                         data, ret6);
833
834                         h_illan_attributes(adapter->vdev->unit_address,
835                                            set_attr6, clr_attr6, &ret_attr);
836
837                         if (data == 1)
838                                 dev->features &= ~NETIF_F_IPV6_CSUM;
839
840                 } else
841                         adapter->fw_ipv6_csum_support = data;
842
843                 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
844                         adapter->rx_csum = data;
845                 else
846                         rc1 = -EIO;
847         } else {
848                 rc1 = -EIO;
849                 netdev_err(dev, "unable to change checksum offload settings."
850                                      " %d rc=%ld ret_attr=%lx\n", data, ret,
851                                      ret_attr);
852         }
853
854         if (restart)
855                 rc2 = ibmveth_open(dev);
856
857         return rc1 ? rc1 : rc2;
858 }
859
860 static int ibmveth_set_tso(struct net_device *dev, u32 data)
861 {
862         struct ibmveth_adapter *adapter = netdev_priv(dev);
863         unsigned long set_attr, clr_attr, ret_attr;
864         long ret1, ret2;
865         int rc1 = 0, rc2 = 0;
866         int restart = 0;
867
868         if (netif_running(dev)) {
869                 restart = 1;
870                 adapter->pool_config = 1;
871                 ibmveth_close(dev);
872                 adapter->pool_config = 0;
873         }
874
875         set_attr = 0;
876         clr_attr = 0;
877
878         if (data)
879                 set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
880         else
881                 clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
882
883         ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
884
885         if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
886             !old_large_send) {
887                 ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
888                                           set_attr, &ret_attr);
889
890                 if (ret2 != H_SUCCESS) {
891                         netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
892                                    data, ret2);
893
894                         h_illan_attributes(adapter->vdev->unit_address,
895                                            set_attr, clr_attr, &ret_attr);
896
897                         if (data == 1)
898                                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
899                         rc1 = -EIO;
900
901                 } else {
902                         adapter->fw_large_send_support = data;
903                         adapter->large_send = data;
904                 }
905         } else {
906                 /* Older firmware version of large send offload does not
907                  * support tcp6/ipv6
908                  */
909                 if (data == 1) {
910                         dev->features &= ~NETIF_F_TSO6;
911                         netdev_info(dev, "TSO feature requires all partitions to have updated driver");
912                 }
913                 adapter->large_send = data;
914         }
915
916         if (restart)
917                 rc2 = ibmveth_open(dev);
918
919         return rc1 ? rc1 : rc2;
920 }
921
922 static int ibmveth_set_features(struct net_device *dev,
923         netdev_features_t features)
924 {
925         struct ibmveth_adapter *adapter = netdev_priv(dev);
926         int rx_csum = !!(features & NETIF_F_RXCSUM);
927         int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
928         int rc1 = 0, rc2 = 0;
929
930         if (rx_csum != adapter->rx_csum) {
931                 rc1 = ibmveth_set_csum_offload(dev, rx_csum);
932                 if (rc1 && !adapter->rx_csum)
933                         dev->features =
934                                 features & ~(NETIF_F_CSUM_MASK |
935                                              NETIF_F_RXCSUM);
936         }
937
938         if (large_send != adapter->large_send) {
939                 rc2 = ibmveth_set_tso(dev, large_send);
940                 if (rc2 && !adapter->large_send)
941                         dev->features =
942                                 features & ~(NETIF_F_TSO | NETIF_F_TSO6);
943         }
944
945         return rc1 ? rc1 : rc2;
946 }
947
948 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
949 {
950         int i;
951
952         if (stringset != ETH_SS_STATS)
953                 return;
954
955         for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
956                 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
957 }
958
959 static int ibmveth_get_sset_count(struct net_device *dev, int sset)
960 {
961         switch (sset) {
962         case ETH_SS_STATS:
963                 return ARRAY_SIZE(ibmveth_stats);
964         default:
965                 return -EOPNOTSUPP;
966         }
967 }
968
969 static void ibmveth_get_ethtool_stats(struct net_device *dev,
970                                       struct ethtool_stats *stats, u64 *data)
971 {
972         int i;
973         struct ibmveth_adapter *adapter = netdev_priv(dev);
974
975         for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
976                 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
977 }
978
979 static const struct ethtool_ops netdev_ethtool_ops = {
980         .get_drvinfo            = netdev_get_drvinfo,
981         .get_settings           = netdev_get_settings,
982         .get_link               = ethtool_op_get_link,
983         .get_strings            = ibmveth_get_strings,
984         .get_sset_count         = ibmveth_get_sset_count,
985         .get_ethtool_stats      = ibmveth_get_ethtool_stats,
986 };
987
988 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
989 {
990         return -EOPNOTSUPP;
991 }
992
993 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
994
995 static int ibmveth_send(struct ibmveth_adapter *adapter,
996                         union ibmveth_buf_desc *descs, unsigned long mss)
997 {
998         unsigned long correlator;
999         unsigned int retry_count;
1000         unsigned long ret;
1001
1002         /*
1003          * The retry count sets a maximum for the number of broadcast and
1004          * multicast destinations within the system.
1005          */
1006         retry_count = 1024;
1007         correlator = 0;
1008         do {
1009                 ret = h_send_logical_lan(adapter->vdev->unit_address,
1010                                              descs[0].desc, descs[1].desc,
1011                                              descs[2].desc, descs[3].desc,
1012                                              descs[4].desc, descs[5].desc,
1013                                              correlator, &correlator, mss,
1014                                              adapter->fw_large_send_support);
1015         } while ((ret == H_BUSY) && (retry_count--));
1016
1017         if (ret != H_SUCCESS && ret != H_DROPPED) {
1018                 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
1019                            "with rc=%ld\n", ret);
1020                 return 1;
1021         }
1022
1023         return 0;
1024 }
1025
1026 static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
1027                                       struct net_device *netdev)
1028 {
1029         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1030         unsigned int desc_flags;
1031         union ibmveth_buf_desc descs[6];
1032         int last, i;
1033         int force_bounce = 0;
1034         dma_addr_t dma_addr;
1035         unsigned long mss = 0;
1036
1037         /*
1038          * veth handles a maximum of 6 segments including the header, so
1039          * we have to linearize the skb if there are more than this.
1040          */
1041         if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
1042                 netdev->stats.tx_dropped++;
1043                 goto out;
1044         }
1045
1046         /* veth can't checksum offload UDP */
1047         if (skb->ip_summed == CHECKSUM_PARTIAL &&
1048             ((skb->protocol == htons(ETH_P_IP) &&
1049               ip_hdr(skb)->protocol != IPPROTO_TCP) ||
1050              (skb->protocol == htons(ETH_P_IPV6) &&
1051               ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
1052             skb_checksum_help(skb)) {
1053
1054                 netdev_err(netdev, "tx: failed to checksum packet\n");
1055                 netdev->stats.tx_dropped++;
1056                 goto out;
1057         }
1058
1059         desc_flags = IBMVETH_BUF_VALID;
1060
1061         if (skb_is_gso(skb) && adapter->fw_large_send_support)
1062                 desc_flags |= IBMVETH_BUF_LRG_SND;
1063
1064         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1065                 unsigned char *buf = skb_transport_header(skb) +
1066                                                 skb->csum_offset;
1067
1068                 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
1069
1070                 /* Need to zero out the checksum */
1071                 buf[0] = 0;
1072                 buf[1] = 0;
1073         }
1074
1075 retry_bounce:
1076         memset(descs, 0, sizeof(descs));
1077
1078         /*
1079          * If a linear packet is below the rx threshold then
1080          * copy it into the static bounce buffer. This avoids the
1081          * cost of a TCE insert and remove.
1082          */
1083         if (force_bounce || (!skb_is_nonlinear(skb) &&
1084                                 (skb->len < tx_copybreak))) {
1085                 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1086                                           skb->len);
1087
1088                 descs[0].fields.flags_len = desc_flags | skb->len;
1089                 descs[0].fields.address = adapter->bounce_buffer_dma;
1090
1091                 if (ibmveth_send(adapter, descs, 0)) {
1092                         adapter->tx_send_failed++;
1093                         netdev->stats.tx_dropped++;
1094                 } else {
1095                         netdev->stats.tx_packets++;
1096                         netdev->stats.tx_bytes += skb->len;
1097                 }
1098
1099                 goto out;
1100         }
1101
1102         /* Map the header */
1103         dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1104                                   skb_headlen(skb), DMA_TO_DEVICE);
1105         if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1106                 goto map_failed;
1107
1108         descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1109         descs[0].fields.address = dma_addr;
1110
1111         /* Map the frags */
1112         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1113                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1114
1115                 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1116                                             skb_frag_size(frag), DMA_TO_DEVICE);
1117
1118                 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1119                         goto map_failed_frags;
1120
1121                 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1122                 descs[i+1].fields.address = dma_addr;
1123         }
1124
1125         if (skb_is_gso(skb)) {
1126                 if (adapter->fw_large_send_support) {
1127                         mss = (unsigned long)skb_shinfo(skb)->gso_size;
1128                         adapter->tx_large_packets++;
1129                 } else if (!skb_is_gso_v6(skb)) {
1130                         /* Put -1 in the IP checksum to tell phyp it
1131                          * is a largesend packet. Put the mss in
1132                          * the TCP checksum.
1133                          */
1134                         ip_hdr(skb)->check = 0xffff;
1135                         tcp_hdr(skb)->check =
1136                                 cpu_to_be16(skb_shinfo(skb)->gso_size);
1137                         adapter->tx_large_packets++;
1138                 }
1139         }
1140
1141         if (ibmveth_send(adapter, descs, mss)) {
1142                 adapter->tx_send_failed++;
1143                 netdev->stats.tx_dropped++;
1144         } else {
1145                 netdev->stats.tx_packets++;
1146                 netdev->stats.tx_bytes += skb->len;
1147         }
1148
1149         dma_unmap_single(&adapter->vdev->dev,
1150                          descs[0].fields.address,
1151                          descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1152                          DMA_TO_DEVICE);
1153
1154         for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1155                 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1156                                descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1157                                DMA_TO_DEVICE);
1158
1159 out:
1160         dev_consume_skb_any(skb);
1161         return NETDEV_TX_OK;
1162
1163 map_failed_frags:
1164         last = i+1;
1165         for (i = 1; i < last; i++)
1166                 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1167                                descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1168                                DMA_TO_DEVICE);
1169
1170         dma_unmap_single(&adapter->vdev->dev,
1171                          descs[0].fields.address,
1172                          descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1173                          DMA_TO_DEVICE);
1174 map_failed:
1175         if (!firmware_has_feature(FW_FEATURE_CMO))
1176                 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1177         adapter->tx_map_failed++;
1178         if (skb_linearize(skb)) {
1179                 netdev->stats.tx_dropped++;
1180                 goto out;
1181         }
1182         force_bounce = 1;
1183         goto retry_bounce;
1184 }
1185
1186 static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
1187 {
1188         struct tcphdr *tcph;
1189         int offset = 0;
1190         int hdr_len;
1191
1192         /* only TCP packets will be aggregated */
1193         if (skb->protocol == htons(ETH_P_IP)) {
1194                 struct iphdr *iph = (struct iphdr *)skb->data;
1195
1196                 if (iph->protocol == IPPROTO_TCP) {
1197                         offset = iph->ihl * 4;
1198                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1199                 } else {
1200                         return;
1201                 }
1202         } else if (skb->protocol == htons(ETH_P_IPV6)) {
1203                 struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
1204
1205                 if (iph6->nexthdr == IPPROTO_TCP) {
1206                         offset = sizeof(struct ipv6hdr);
1207                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1208                 } else {
1209                         return;
1210                 }
1211         } else {
1212                 return;
1213         }
1214         /* if mss is not set through Large Packet bit/mss in rx buffer,
1215          * expect that the mss will be written to the tcp header checksum.
1216          */
1217         tcph = (struct tcphdr *)(skb->data + offset);
1218         if (lrg_pkt) {
1219                 skb_shinfo(skb)->gso_size = mss;
1220         } else if (offset) {
1221                 skb_shinfo(skb)->gso_size = ntohs(tcph->check);
1222                 tcph->check = 0;
1223         }
1224
1225         if (skb_shinfo(skb)->gso_size) {
1226                 hdr_len = offset + tcph->doff * 4;
1227                 skb_shinfo(skb)->gso_segs =
1228                                 DIV_ROUND_UP(skb->len - hdr_len,
1229                                              skb_shinfo(skb)->gso_size);
1230         }
1231 }
1232
1233 static int ibmveth_poll(struct napi_struct *napi, int budget)
1234 {
1235         struct ibmveth_adapter *adapter =
1236                         container_of(napi, struct ibmveth_adapter, napi);
1237         struct net_device *netdev = adapter->netdev;
1238         int frames_processed = 0;
1239         unsigned long lpar_rc;
1240         struct iphdr *iph;
1241         u16 mss = 0;
1242
1243         while (frames_processed < budget) {
1244                 if (!ibmveth_rxq_pending_buffer(adapter))
1245                         break;
1246
1247                 smp_rmb();
1248                 if (!ibmveth_rxq_buffer_valid(adapter)) {
1249                         wmb(); /* suggested by larson1 */
1250                         adapter->rx_invalid_buffer++;
1251                         netdev_dbg(netdev, "recycling invalid buffer\n");
1252                         ibmveth_rxq_recycle_buffer(adapter);
1253                 } else {
1254                         struct sk_buff *skb, *new_skb;
1255                         int length = ibmveth_rxq_frame_length(adapter);
1256                         int offset = ibmveth_rxq_frame_offset(adapter);
1257                         int csum_good = ibmveth_rxq_csum_good(adapter);
1258                         int lrg_pkt = ibmveth_rxq_large_packet(adapter);
1259                         __sum16 iph_check = 0;
1260
1261                         skb = ibmveth_rxq_get_buffer(adapter);
1262
1263                         /* if the large packet bit is set in the rx queue
1264                          * descriptor, the mss will be written by PHYP eight
1265                          * bytes from the start of the rx buffer, which is
1266                          * skb->data at this stage
1267                          */
1268                         if (lrg_pkt) {
1269                                 __be64 *rxmss = (__be64 *)(skb->data + 8);
1270
1271                                 mss = (u16)be64_to_cpu(*rxmss);
1272                         }
1273
1274                         new_skb = NULL;
1275                         if (length < rx_copybreak)
1276                                 new_skb = netdev_alloc_skb(netdev, length);
1277
1278                         if (new_skb) {
1279                                 skb_copy_to_linear_data(new_skb,
1280                                                         skb->data + offset,
1281                                                         length);
1282                                 if (rx_flush)
1283                                         ibmveth_flush_buffer(skb->data,
1284                                                 length + offset);
1285                                 if (!ibmveth_rxq_recycle_buffer(adapter))
1286                                         kfree_skb(skb);
1287                                 skb = new_skb;
1288                         } else {
1289                                 ibmveth_rxq_harvest_buffer(adapter);
1290                                 skb_reserve(skb, offset);
1291                         }
1292
1293                         skb_put(skb, length);
1294                         skb->protocol = eth_type_trans(skb, netdev);
1295
1296                         if (csum_good) {
1297                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1298                                 if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
1299                                         iph = (struct iphdr *)skb->data;
1300
1301                                         /* If the IP checksum is not offloaded and if the packet
1302                                          *  is large send, the checksum must be rebuilt.
1303                                          */
1304                                         if (iph->check == 0xffff) {
1305                                                 iph->check = 0;
1306                                                 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1307                                         }
1308                                 }
1309                         }
1310
1311                         /* PHYP without PLSO support places a -1 in the ip
1312                          * checksum for large send frames.
1313                          */
1314                         if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
1315                                 struct iphdr *iph = (struct iphdr *)skb->data;
1316
1317                                 iph_check = iph->check;
1318                         }
1319
1320                         if ((length > netdev->mtu + ETH_HLEN) ||
1321                             lrg_pkt || iph_check == 0xffff) {
1322                                 ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
1323                                 adapter->rx_large_packets++;
1324                         }
1325
1326                         napi_gro_receive(napi, skb);    /* send it up */
1327
1328                         netdev->stats.rx_packets++;
1329                         netdev->stats.rx_bytes += length;
1330                         frames_processed++;
1331                 }
1332         }
1333
1334         ibmveth_replenish_task(adapter);
1335
1336         if (frames_processed < budget) {
1337                 napi_complete(napi);
1338
1339                 /* We think we are done - reenable interrupts,
1340                  * then check once more to make sure we are done.
1341                  */
1342                 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1343                                        VIO_IRQ_ENABLE);
1344
1345                 BUG_ON(lpar_rc != H_SUCCESS);
1346
1347                 if (ibmveth_rxq_pending_buffer(adapter) &&
1348                     napi_reschedule(napi)) {
1349                         lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1350                                                VIO_IRQ_DISABLE);
1351                 }
1352         }
1353
1354         return frames_processed;
1355 }
1356
1357 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1358 {
1359         struct net_device *netdev = dev_instance;
1360         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1361         unsigned long lpar_rc;
1362
1363         if (napi_schedule_prep(&adapter->napi)) {
1364                 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1365                                        VIO_IRQ_DISABLE);
1366                 BUG_ON(lpar_rc != H_SUCCESS);
1367                 __napi_schedule(&adapter->napi);
1368         }
1369         return IRQ_HANDLED;
1370 }
1371
1372 static void ibmveth_set_multicast_list(struct net_device *netdev)
1373 {
1374         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1375         unsigned long lpar_rc;
1376
1377         if ((netdev->flags & IFF_PROMISC) ||
1378             (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1379                 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1380                                            IbmVethMcastEnableRecv |
1381                                            IbmVethMcastDisableFiltering,
1382                                            0);
1383                 if (lpar_rc != H_SUCCESS) {
1384                         netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1385                                    "entering promisc mode\n", lpar_rc);
1386                 }
1387         } else {
1388                 struct netdev_hw_addr *ha;
1389                 /* clear the filter table & disable filtering */
1390                 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1391                                            IbmVethMcastEnableRecv |
1392                                            IbmVethMcastDisableFiltering |
1393                                            IbmVethMcastClearFilterTable,
1394                                            0);
1395                 if (lpar_rc != H_SUCCESS) {
1396                         netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1397                                    "attempting to clear filter table\n",
1398                                    lpar_rc);
1399                 }
1400                 /* add the addresses to the filter table */
1401                 netdev_for_each_mc_addr(ha, netdev) {
1402                         /* add the multicast address to the filter table */
1403                         u64 mcast_addr;
1404                         mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1405                         lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1406                                                    IbmVethMcastAddFilter,
1407                                                    mcast_addr);
1408                         if (lpar_rc != H_SUCCESS) {
1409                                 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1410                                            "when adding an entry to the filter "
1411                                            "table\n", lpar_rc);
1412                         }
1413                 }
1414
1415                 /* re-enable filtering */
1416                 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1417                                            IbmVethMcastEnableFiltering,
1418                                            0);
1419                 if (lpar_rc != H_SUCCESS) {
1420                         netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1421                                    "enabling filtering\n", lpar_rc);
1422                 }
1423         }
1424 }
1425
1426 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1427 {
1428         struct ibmveth_adapter *adapter = netdev_priv(dev);
1429         struct vio_dev *viodev = adapter->vdev;
1430         int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1431         int i, rc;
1432         int need_restart = 0;
1433
1434         if (new_mtu < IBMVETH_MIN_MTU)
1435                 return -EINVAL;
1436
1437         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1438                 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
1439                         break;
1440
1441         if (i == IBMVETH_NUM_BUFF_POOLS)
1442                 return -EINVAL;
1443
1444         /* Deactivate all the buffer pools so that the next loop can activate
1445            only the buffer pools necessary to hold the new MTU */
1446         if (netif_running(adapter->netdev)) {
1447                 need_restart = 1;
1448                 adapter->pool_config = 1;
1449                 ibmveth_close(adapter->netdev);
1450                 adapter->pool_config = 0;
1451         }
1452
1453         /* Look for an active buffer pool that can hold the new MTU */
1454         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1455                 adapter->rx_buff_pool[i].active = 1;
1456
1457                 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1458                         dev->mtu = new_mtu;
1459                         vio_cmo_set_dev_desired(viodev,
1460                                                 ibmveth_get_desired_dma
1461                                                 (viodev));
1462                         if (need_restart) {
1463                                 return ibmveth_open(adapter->netdev);
1464                         }
1465                         return 0;
1466                 }
1467         }
1468
1469         if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1470                 return rc;
1471
1472         return -EINVAL;
1473 }
1474
1475 #ifdef CONFIG_NET_POLL_CONTROLLER
1476 static void ibmveth_poll_controller(struct net_device *dev)
1477 {
1478         ibmveth_replenish_task(netdev_priv(dev));
1479         ibmveth_interrupt(dev->irq, dev);
1480 }
1481 #endif
1482
1483 /**
1484  * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1485  *
1486  * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1487  *
1488  * Return value:
1489  *      Number of bytes of IO data the driver will need to perform well.
1490  */
1491 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1492 {
1493         struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1494         struct ibmveth_adapter *adapter;
1495         struct iommu_table *tbl;
1496         unsigned long ret;
1497         int i;
1498         int rxqentries = 1;
1499
1500         tbl = get_iommu_table_base(&vdev->dev);
1501
1502         /* netdev inits at probe time along with the structures we need below*/
1503         if (netdev == NULL)
1504                 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1505
1506         adapter = netdev_priv(netdev);
1507
1508         ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1509         ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1510
1511         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1512                 /* add the size of the active receive buffers */
1513                 if (adapter->rx_buff_pool[i].active)
1514                         ret +=
1515                             adapter->rx_buff_pool[i].size *
1516                             IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1517                                              buff_size, tbl);
1518                 rxqentries += adapter->rx_buff_pool[i].size;
1519         }
1520         /* add the size of the receive queue entries */
1521         ret += IOMMU_PAGE_ALIGN(
1522                 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1523
1524         return ret;
1525 }
1526
1527 static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1528 {
1529         struct ibmveth_adapter *adapter = netdev_priv(dev);
1530         struct sockaddr *addr = p;
1531         u64 mac_address;
1532         int rc;
1533
1534         if (!is_valid_ether_addr(addr->sa_data))
1535                 return -EADDRNOTAVAIL;
1536
1537         mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1538         rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1539         if (rc) {
1540                 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1541                 return rc;
1542         }
1543
1544         ether_addr_copy(dev->dev_addr, addr->sa_data);
1545
1546         return 0;
1547 }
1548
1549 static const struct net_device_ops ibmveth_netdev_ops = {
1550         .ndo_open               = ibmveth_open,
1551         .ndo_stop               = ibmveth_close,
1552         .ndo_start_xmit         = ibmveth_start_xmit,
1553         .ndo_set_rx_mode        = ibmveth_set_multicast_list,
1554         .ndo_do_ioctl           = ibmveth_ioctl,
1555         .ndo_change_mtu         = ibmveth_change_mtu,
1556         .ndo_fix_features       = ibmveth_fix_features,
1557         .ndo_set_features       = ibmveth_set_features,
1558         .ndo_validate_addr      = eth_validate_addr,
1559         .ndo_set_mac_address    = ibmveth_set_mac_addr,
1560 #ifdef CONFIG_NET_POLL_CONTROLLER
1561         .ndo_poll_controller    = ibmveth_poll_controller,
1562 #endif
1563 };
1564
1565 static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1566 {
1567         int rc, i, mac_len;
1568         struct net_device *netdev;
1569         struct ibmveth_adapter *adapter;
1570         unsigned char *mac_addr_p;
1571         __be32 *mcastFilterSize_p;
1572         long ret;
1573         unsigned long ret_attr;
1574
1575         dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1576                 dev->unit_address);
1577
1578         mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1579                                                         &mac_len);
1580         if (!mac_addr_p) {
1581                 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1582                 return -EINVAL;
1583         }
1584         /* Workaround for old/broken pHyp */
1585         if (mac_len == 8)
1586                 mac_addr_p += 2;
1587         else if (mac_len != 6) {
1588                 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1589                         mac_len);
1590                 return -EINVAL;
1591         }
1592
1593         mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
1594                                                         VETH_MCAST_FILTER_SIZE,
1595                                                         NULL);
1596         if (!mcastFilterSize_p) {
1597                 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1598                         "attribute\n");
1599                 return -EINVAL;
1600         }
1601
1602         netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1603
1604         if (!netdev)
1605                 return -ENOMEM;
1606
1607         adapter = netdev_priv(netdev);
1608         dev_set_drvdata(&dev->dev, netdev);
1609
1610         adapter->vdev = dev;
1611         adapter->netdev = netdev;
1612         adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1613         adapter->pool_config = 0;
1614
1615         netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1616
1617         netdev->irq = dev->irq;
1618         netdev->netdev_ops = &ibmveth_netdev_ops;
1619         netdev->ethtool_ops = &netdev_ethtool_ops;
1620         SET_NETDEV_DEV(netdev, &dev->dev);
1621         netdev->hw_features = NETIF_F_SG;
1622         if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
1623                 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1624                                        NETIF_F_RXCSUM;
1625         }
1626
1627         netdev->features |= netdev->hw_features;
1628
1629         ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1630
1631         /* If running older firmware, TSO should not be enabled by default */
1632         if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1633             !old_large_send) {
1634                 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1635                 netdev->features |= netdev->hw_features;
1636         } else {
1637                 netdev->hw_features |= NETIF_F_TSO;
1638         }
1639
1640         memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1641
1642         if (firmware_has_feature(FW_FEATURE_CMO))
1643                 memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1644
1645         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1646                 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1647                 int error;
1648
1649                 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1650                                          pool_count[i], pool_size[i],
1651                                          pool_active[i]);
1652                 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1653                                              &dev->dev.kobj, "pool%d", i);
1654                 if (!error)
1655                         kobject_uevent(kobj, KOBJ_ADD);
1656         }
1657
1658         netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1659
1660         adapter->buffer_list_dma = DMA_ERROR_CODE;
1661         adapter->filter_list_dma = DMA_ERROR_CODE;
1662         adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1663
1664         netdev_dbg(netdev, "registering netdev...\n");
1665
1666         ibmveth_set_features(netdev, netdev->features);
1667
1668         rc = register_netdev(netdev);
1669
1670         if (rc) {
1671                 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1672                 free_netdev(netdev);
1673                 return rc;
1674         }
1675
1676         netdev_dbg(netdev, "registered\n");
1677
1678         return 0;
1679 }
1680
1681 static int ibmveth_remove(struct vio_dev *dev)
1682 {
1683         struct net_device *netdev = dev_get_drvdata(&dev->dev);
1684         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1685         int i;
1686
1687         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1688                 kobject_put(&adapter->rx_buff_pool[i].kobj);
1689
1690         unregister_netdev(netdev);
1691
1692         free_netdev(netdev);
1693         dev_set_drvdata(&dev->dev, NULL);
1694
1695         return 0;
1696 }
1697
1698 static struct attribute veth_active_attr;
1699 static struct attribute veth_num_attr;
1700 static struct attribute veth_size_attr;
1701
1702 static ssize_t veth_pool_show(struct kobject *kobj,
1703                               struct attribute *attr, char *buf)
1704 {
1705         struct ibmveth_buff_pool *pool = container_of(kobj,
1706                                                       struct ibmveth_buff_pool,
1707                                                       kobj);
1708
1709         if (attr == &veth_active_attr)
1710                 return sprintf(buf, "%d\n", pool->active);
1711         else if (attr == &veth_num_attr)
1712                 return sprintf(buf, "%d\n", pool->size);
1713         else if (attr == &veth_size_attr)
1714                 return sprintf(buf, "%d\n", pool->buff_size);
1715         return 0;
1716 }
1717
1718 static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1719                                const char *buf, size_t count)
1720 {
1721         struct ibmveth_buff_pool *pool = container_of(kobj,
1722                                                       struct ibmveth_buff_pool,
1723                                                       kobj);
1724         struct net_device *netdev = dev_get_drvdata(
1725             container_of(kobj->parent, struct device, kobj));
1726         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1727         long value = simple_strtol(buf, NULL, 10);
1728         long rc;
1729
1730         if (attr == &veth_active_attr) {
1731                 if (value && !pool->active) {
1732                         if (netif_running(netdev)) {
1733                                 if (ibmveth_alloc_buffer_pool(pool)) {
1734                                         netdev_err(netdev,
1735                                                    "unable to alloc pool\n");
1736                                         return -ENOMEM;
1737                                 }
1738                                 pool->active = 1;
1739                                 adapter->pool_config = 1;
1740                                 ibmveth_close(netdev);
1741                                 adapter->pool_config = 0;
1742                                 if ((rc = ibmveth_open(netdev)))
1743                                         return rc;
1744                         } else {
1745                                 pool->active = 1;
1746                         }
1747                 } else if (!value && pool->active) {
1748                         int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1749                         int i;
1750                         /* Make sure there is a buffer pool with buffers that
1751                            can hold a packet of the size of the MTU */
1752                         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1753                                 if (pool == &adapter->rx_buff_pool[i])
1754                                         continue;
1755                                 if (!adapter->rx_buff_pool[i].active)
1756                                         continue;
1757                                 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1758                                         break;
1759                         }
1760
1761                         if (i == IBMVETH_NUM_BUFF_POOLS) {
1762                                 netdev_err(netdev, "no active pool >= MTU\n");
1763                                 return -EPERM;
1764                         }
1765
1766                         if (netif_running(netdev)) {
1767                                 adapter->pool_config = 1;
1768                                 ibmveth_close(netdev);
1769                                 pool->active = 0;
1770                                 adapter->pool_config = 0;
1771                                 if ((rc = ibmveth_open(netdev)))
1772                                         return rc;
1773                         }
1774                         pool->active = 0;
1775                 }
1776         } else if (attr == &veth_num_attr) {
1777                 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1778                         return -EINVAL;
1779                 } else {
1780                         if (netif_running(netdev)) {
1781                                 adapter->pool_config = 1;
1782                                 ibmveth_close(netdev);
1783                                 adapter->pool_config = 0;
1784                                 pool->size = value;
1785                                 if ((rc = ibmveth_open(netdev)))
1786                                         return rc;
1787                         } else {
1788                                 pool->size = value;
1789                         }
1790                 }
1791         } else if (attr == &veth_size_attr) {
1792                 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1793                         return -EINVAL;
1794                 } else {
1795                         if (netif_running(netdev)) {
1796                                 adapter->pool_config = 1;
1797                                 ibmveth_close(netdev);
1798                                 adapter->pool_config = 0;
1799                                 pool->buff_size = value;
1800                                 if ((rc = ibmveth_open(netdev)))
1801                                         return rc;
1802                         } else {
1803                                 pool->buff_size = value;
1804                         }
1805                 }
1806         }
1807
1808         /* kick the interrupt handler to allocate/deallocate pools */
1809         ibmveth_interrupt(netdev->irq, netdev);
1810         return count;
1811 }
1812
1813
1814 #define ATTR(_name, _mode)                              \
1815         struct attribute veth_##_name##_attr = {        \
1816         .name = __stringify(_name), .mode = _mode,      \
1817         };
1818
1819 static ATTR(active, 0644);
1820 static ATTR(num, 0644);
1821 static ATTR(size, 0644);
1822
1823 static struct attribute *veth_pool_attrs[] = {
1824         &veth_active_attr,
1825         &veth_num_attr,
1826         &veth_size_attr,
1827         NULL,
1828 };
1829
1830 static const struct sysfs_ops veth_pool_ops = {
1831         .show   = veth_pool_show,
1832         .store  = veth_pool_store,
1833 };
1834
1835 static struct kobj_type ktype_veth_pool = {
1836         .release        = NULL,
1837         .sysfs_ops      = &veth_pool_ops,
1838         .default_attrs  = veth_pool_attrs,
1839 };
1840
1841 static int ibmveth_resume(struct device *dev)
1842 {
1843         struct net_device *netdev = dev_get_drvdata(dev);
1844         ibmveth_interrupt(netdev->irq, netdev);
1845         return 0;
1846 }
1847
1848 static struct vio_device_id ibmveth_device_table[] = {
1849         { "network", "IBM,l-lan"},
1850         { "", "" }
1851 };
1852 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1853
1854 static struct dev_pm_ops ibmveth_pm_ops = {
1855         .resume = ibmveth_resume
1856 };
1857
1858 static struct vio_driver ibmveth_driver = {
1859         .id_table       = ibmveth_device_table,
1860         .probe          = ibmveth_probe,
1861         .remove         = ibmveth_remove,
1862         .get_desired_dma = ibmveth_get_desired_dma,
1863         .name           = ibmveth_driver_name,
1864         .pm             = &ibmveth_pm_ops,
1865 };
1866
1867 static int __init ibmveth_module_init(void)
1868 {
1869         printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1870                ibmveth_driver_string, ibmveth_driver_version);
1871
1872         return vio_register_driver(&ibmveth_driver);
1873 }
1874
1875 static void __exit ibmveth_module_exit(void)
1876 {
1877         vio_unregister_driver(&ibmveth_driver);
1878 }
1879
1880 module_init(ibmveth_module_init);
1881 module_exit(ibmveth_module_exit);