GNU Linux-libre 4.4.297-gnu1
[releases.git] / drivers / net / ethernet / ibm / ibmveth.c
1 /*
2  * IBM Power Virtual Ethernet Device Driver
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, see <http://www.gnu.org/licenses/>.
16  *
17  * Copyright (C) IBM Corporation, 2003, 2010
18  *
19  * Authors: Dave Larson <larson1@us.ibm.com>
20  *          Santiago Leon <santil@linux.vnet.ibm.com>
21  *          Brian King <brking@linux.vnet.ibm.com>
22  *          Robert Jennings <rcj@linux.vnet.ibm.com>
23  *          Anton Blanchard <anton@au.ibm.com>
24  */
25
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/types.h>
29 #include <linux/errno.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/kernel.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/init.h>
36 #include <linux/interrupt.h>
37 #include <linux/mm.h>
38 #include <linux/pm.h>
39 #include <linux/ethtool.h>
40 #include <linux/in.h>
41 #include <linux/ip.h>
42 #include <linux/ipv6.h>
43 #include <linux/slab.h>
44 #include <asm/hvcall.h>
45 #include <linux/atomic.h>
46 #include <asm/vio.h>
47 #include <asm/iommu.h>
48 #include <asm/firmware.h>
49
50 #include "ibmveth.h"
51
52 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance);
53 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter);
54 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev);
55
56 static struct kobj_type ktype_veth_pool;
57
58
59 static const char ibmveth_driver_name[] = "ibmveth";
60 static const char ibmveth_driver_string[] = "IBM Power Virtual Ethernet Driver";
61 #define ibmveth_driver_version "1.06"
62
63 MODULE_AUTHOR("Santiago Leon <santil@linux.vnet.ibm.com>");
64 MODULE_DESCRIPTION("IBM Power Virtual Ethernet Driver");
65 MODULE_LICENSE("GPL");
66 MODULE_VERSION(ibmveth_driver_version);
67
68 static unsigned int tx_copybreak __read_mostly = 128;
69 module_param(tx_copybreak, uint, 0644);
70 MODULE_PARM_DESC(tx_copybreak,
71         "Maximum size of packet that is copied to a new buffer on transmit");
72
73 static unsigned int rx_copybreak __read_mostly = 128;
74 module_param(rx_copybreak, uint, 0644);
75 MODULE_PARM_DESC(rx_copybreak,
76         "Maximum size of packet that is copied to a new buffer on receive");
77
78 static unsigned int rx_flush __read_mostly = 0;
79 module_param(rx_flush, uint, 0644);
80 MODULE_PARM_DESC(rx_flush, "Flush receive buffers before use");
81
82 static bool old_large_send __read_mostly;
83 module_param(old_large_send, bool, S_IRUGO);
84 MODULE_PARM_DESC(old_large_send,
85         "Use old large send method on firmware that supports the new method");
86
87 struct ibmveth_stat {
88         char name[ETH_GSTRING_LEN];
89         int offset;
90 };
91
92 #define IBMVETH_STAT_OFF(stat) offsetof(struct ibmveth_adapter, stat)
93 #define IBMVETH_GET_STAT(a, off) *((u64 *)(((unsigned long)(a)) + off))
94
95 struct ibmveth_stat ibmveth_stats[] = {
96         { "replenish_task_cycles", IBMVETH_STAT_OFF(replenish_task_cycles) },
97         { "replenish_no_mem", IBMVETH_STAT_OFF(replenish_no_mem) },
98         { "replenish_add_buff_failure",
99                         IBMVETH_STAT_OFF(replenish_add_buff_failure) },
100         { "replenish_add_buff_success",
101                         IBMVETH_STAT_OFF(replenish_add_buff_success) },
102         { "rx_invalid_buffer", IBMVETH_STAT_OFF(rx_invalid_buffer) },
103         { "rx_no_buffer", IBMVETH_STAT_OFF(rx_no_buffer) },
104         { "tx_map_failed", IBMVETH_STAT_OFF(tx_map_failed) },
105         { "tx_send_failed", IBMVETH_STAT_OFF(tx_send_failed) },
106         { "fw_enabled_ipv4_csum", IBMVETH_STAT_OFF(fw_ipv4_csum_support) },
107         { "fw_enabled_ipv6_csum", IBMVETH_STAT_OFF(fw_ipv6_csum_support) },
108         { "tx_large_packets", IBMVETH_STAT_OFF(tx_large_packets) },
109         { "rx_large_packets", IBMVETH_STAT_OFF(rx_large_packets) },
110         { "fw_enabled_large_send", IBMVETH_STAT_OFF(fw_large_send_support) }
111 };
112
113 /* simple methods of getting data from the current rxq entry */
114 static inline u32 ibmveth_rxq_flags(struct ibmveth_adapter *adapter)
115 {
116         return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].flags_off);
117 }
118
119 static inline int ibmveth_rxq_toggle(struct ibmveth_adapter *adapter)
120 {
121         return (ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_TOGGLE) >>
122                         IBMVETH_RXQ_TOGGLE_SHIFT;
123 }
124
125 static inline int ibmveth_rxq_pending_buffer(struct ibmveth_adapter *adapter)
126 {
127         return ibmveth_rxq_toggle(adapter) == adapter->rx_queue.toggle;
128 }
129
130 static inline int ibmveth_rxq_buffer_valid(struct ibmveth_adapter *adapter)
131 {
132         return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_VALID;
133 }
134
135 static inline int ibmveth_rxq_frame_offset(struct ibmveth_adapter *adapter)
136 {
137         return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_OFF_MASK;
138 }
139
140 static inline int ibmveth_rxq_large_packet(struct ibmveth_adapter *adapter)
141 {
142         return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_LRG_PKT;
143 }
144
145 static inline int ibmveth_rxq_frame_length(struct ibmveth_adapter *adapter)
146 {
147         return be32_to_cpu(adapter->rx_queue.queue_addr[adapter->rx_queue.index].length);
148 }
149
150 static inline int ibmveth_rxq_csum_good(struct ibmveth_adapter *adapter)
151 {
152         return ibmveth_rxq_flags(adapter) & IBMVETH_RXQ_CSUM_GOOD;
153 }
154
155 /* setup the initial settings for a buffer pool */
156 static void ibmveth_init_buffer_pool(struct ibmveth_buff_pool *pool,
157                                      u32 pool_index, u32 pool_size,
158                                      u32 buff_size, u32 pool_active)
159 {
160         pool->size = pool_size;
161         pool->index = pool_index;
162         pool->buff_size = buff_size;
163         pool->threshold = pool_size * 7 / 8;
164         pool->active = pool_active;
165 }
166
167 /* allocate and setup an buffer pool - called during open */
168 static int ibmveth_alloc_buffer_pool(struct ibmveth_buff_pool *pool)
169 {
170         int i;
171
172         pool->free_map = kmalloc(sizeof(u16) * pool->size, GFP_KERNEL);
173
174         if (!pool->free_map)
175                 return -1;
176
177         pool->dma_addr = kmalloc(sizeof(dma_addr_t) * pool->size, GFP_KERNEL);
178         if (!pool->dma_addr) {
179                 kfree(pool->free_map);
180                 pool->free_map = NULL;
181                 return -1;
182         }
183
184         pool->skbuff = kcalloc(pool->size, sizeof(void *), GFP_KERNEL);
185
186         if (!pool->skbuff) {
187                 kfree(pool->dma_addr);
188                 pool->dma_addr = NULL;
189
190                 kfree(pool->free_map);
191                 pool->free_map = NULL;
192                 return -1;
193         }
194
195         memset(pool->dma_addr, 0, sizeof(dma_addr_t) * pool->size);
196
197         for (i = 0; i < pool->size; ++i)
198                 pool->free_map[i] = i;
199
200         atomic_set(&pool->available, 0);
201         pool->producer_index = 0;
202         pool->consumer_index = 0;
203
204         return 0;
205 }
206
207 static inline void ibmveth_flush_buffer(void *addr, unsigned long length)
208 {
209         unsigned long offset;
210
211         for (offset = 0; offset < length; offset += SMP_CACHE_BYTES)
212                 asm("dcbfl %0,%1" :: "b" (addr), "r" (offset));
213 }
214
215 /* replenish the buffers for a pool.  note that we don't need to
216  * skb_reserve these since they are used for incoming...
217  */
218 static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter,
219                                           struct ibmveth_buff_pool *pool)
220 {
221         u32 i;
222         u32 count = pool->size - atomic_read(&pool->available);
223         u32 buffers_added = 0;
224         struct sk_buff *skb;
225         unsigned int free_index, index;
226         u64 correlator;
227         unsigned long lpar_rc;
228         dma_addr_t dma_addr;
229
230         mb();
231
232         for (i = 0; i < count; ++i) {
233                 union ibmveth_buf_desc desc;
234
235                 skb = netdev_alloc_skb(adapter->netdev, pool->buff_size);
236
237                 if (!skb) {
238                         netdev_dbg(adapter->netdev,
239                                    "replenish: unable to allocate skb\n");
240                         adapter->replenish_no_mem++;
241                         break;
242                 }
243
244                 free_index = pool->consumer_index;
245                 pool->consumer_index++;
246                 if (pool->consumer_index >= pool->size)
247                         pool->consumer_index = 0;
248                 index = pool->free_map[free_index];
249
250                 BUG_ON(index == IBM_VETH_INVALID_MAP);
251                 BUG_ON(pool->skbuff[index] != NULL);
252
253                 dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
254                                 pool->buff_size, DMA_FROM_DEVICE);
255
256                 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
257                         goto failure;
258
259                 pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
260                 pool->dma_addr[index] = dma_addr;
261                 pool->skbuff[index] = skb;
262
263                 correlator = ((u64)pool->index << 32) | index;
264                 *(u64 *)skb->data = correlator;
265
266                 desc.fields.flags_len = IBMVETH_BUF_VALID | pool->buff_size;
267                 desc.fields.address = dma_addr;
268
269                 if (rx_flush) {
270                         unsigned int len = min(pool->buff_size,
271                                                 adapter->netdev->mtu +
272                                                 IBMVETH_BUFF_OH);
273                         ibmveth_flush_buffer(skb->data, len);
274                 }
275                 lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address,
276                                                    desc.desc);
277
278                 if (lpar_rc != H_SUCCESS) {
279                         goto failure;
280                 } else {
281                         buffers_added++;
282                         adapter->replenish_add_buff_success++;
283                 }
284         }
285
286         mb();
287         atomic_add(buffers_added, &(pool->available));
288         return;
289
290 failure:
291         pool->free_map[free_index] = index;
292         pool->skbuff[index] = NULL;
293         if (pool->consumer_index == 0)
294                 pool->consumer_index = pool->size - 1;
295         else
296                 pool->consumer_index--;
297         if (!dma_mapping_error(&adapter->vdev->dev, dma_addr))
298                 dma_unmap_single(&adapter->vdev->dev,
299                                  pool->dma_addr[index], pool->buff_size,
300                                  DMA_FROM_DEVICE);
301         dev_kfree_skb_any(skb);
302         adapter->replenish_add_buff_failure++;
303
304         mb();
305         atomic_add(buffers_added, &(pool->available));
306 }
307
308 /*
309  * The final 8 bytes of the buffer list is a counter of frames dropped
310  * because there was not a buffer in the buffer list capable of holding
311  * the frame.
312  */
313 static void ibmveth_update_rx_no_buffer(struct ibmveth_adapter *adapter)
314 {
315         __be64 *p = adapter->buffer_list_addr + 4096 - 8;
316
317         adapter->rx_no_buffer = be64_to_cpup(p);
318 }
319
320 /* replenish routine */
321 static void ibmveth_replenish_task(struct ibmveth_adapter *adapter)
322 {
323         int i;
324
325         adapter->replenish_task_cycles++;
326
327         for (i = (IBMVETH_NUM_BUFF_POOLS - 1); i >= 0; i--) {
328                 struct ibmveth_buff_pool *pool = &adapter->rx_buff_pool[i];
329
330                 if (pool->active &&
331                     (atomic_read(&pool->available) < pool->threshold))
332                         ibmveth_replenish_buffer_pool(adapter, pool);
333         }
334
335         ibmveth_update_rx_no_buffer(adapter);
336 }
337
338 /* empty and free ana buffer pool - also used to do cleanup in error paths */
339 static void ibmveth_free_buffer_pool(struct ibmveth_adapter *adapter,
340                                      struct ibmveth_buff_pool *pool)
341 {
342         int i;
343
344         kfree(pool->free_map);
345         pool->free_map = NULL;
346
347         if (pool->skbuff && pool->dma_addr) {
348                 for (i = 0; i < pool->size; ++i) {
349                         struct sk_buff *skb = pool->skbuff[i];
350                         if (skb) {
351                                 dma_unmap_single(&adapter->vdev->dev,
352                                                  pool->dma_addr[i],
353                                                  pool->buff_size,
354                                                  DMA_FROM_DEVICE);
355                                 dev_kfree_skb_any(skb);
356                                 pool->skbuff[i] = NULL;
357                         }
358                 }
359         }
360
361         if (pool->dma_addr) {
362                 kfree(pool->dma_addr);
363                 pool->dma_addr = NULL;
364         }
365
366         if (pool->skbuff) {
367                 kfree(pool->skbuff);
368                 pool->skbuff = NULL;
369         }
370 }
371
372 /* remove a buffer from a pool */
373 static void ibmveth_remove_buffer_from_pool(struct ibmveth_adapter *adapter,
374                                             u64 correlator)
375 {
376         unsigned int pool  = correlator >> 32;
377         unsigned int index = correlator & 0xffffffffUL;
378         unsigned int free_index;
379         struct sk_buff *skb;
380
381         BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
382         BUG_ON(index >= adapter->rx_buff_pool[pool].size);
383
384         skb = adapter->rx_buff_pool[pool].skbuff[index];
385
386         BUG_ON(skb == NULL);
387
388         adapter->rx_buff_pool[pool].skbuff[index] = NULL;
389
390         dma_unmap_single(&adapter->vdev->dev,
391                          adapter->rx_buff_pool[pool].dma_addr[index],
392                          adapter->rx_buff_pool[pool].buff_size,
393                          DMA_FROM_DEVICE);
394
395         free_index = adapter->rx_buff_pool[pool].producer_index;
396         adapter->rx_buff_pool[pool].producer_index++;
397         if (adapter->rx_buff_pool[pool].producer_index >=
398             adapter->rx_buff_pool[pool].size)
399                 adapter->rx_buff_pool[pool].producer_index = 0;
400         adapter->rx_buff_pool[pool].free_map[free_index] = index;
401
402         mb();
403
404         atomic_dec(&(adapter->rx_buff_pool[pool].available));
405 }
406
407 /* get the current buffer on the rx queue */
408 static inline struct sk_buff *ibmveth_rxq_get_buffer(struct ibmveth_adapter *adapter)
409 {
410         u64 correlator = adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator;
411         unsigned int pool = correlator >> 32;
412         unsigned int index = correlator & 0xffffffffUL;
413
414         BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
415         BUG_ON(index >= adapter->rx_buff_pool[pool].size);
416
417         return adapter->rx_buff_pool[pool].skbuff[index];
418 }
419
420 /* recycle the current buffer on the rx queue */
421 static int ibmveth_rxq_recycle_buffer(struct ibmveth_adapter *adapter)
422 {
423         u32 q_index = adapter->rx_queue.index;
424         u64 correlator = adapter->rx_queue.queue_addr[q_index].correlator;
425         unsigned int pool = correlator >> 32;
426         unsigned int index = correlator & 0xffffffffUL;
427         union ibmveth_buf_desc desc;
428         unsigned long lpar_rc;
429         int ret = 1;
430
431         BUG_ON(pool >= IBMVETH_NUM_BUFF_POOLS);
432         BUG_ON(index >= adapter->rx_buff_pool[pool].size);
433
434         if (!adapter->rx_buff_pool[pool].active) {
435                 ibmveth_rxq_harvest_buffer(adapter);
436                 ibmveth_free_buffer_pool(adapter, &adapter->rx_buff_pool[pool]);
437                 goto out;
438         }
439
440         desc.fields.flags_len = IBMVETH_BUF_VALID |
441                 adapter->rx_buff_pool[pool].buff_size;
442         desc.fields.address = adapter->rx_buff_pool[pool].dma_addr[index];
443
444         lpar_rc = h_add_logical_lan_buffer(adapter->vdev->unit_address, desc.desc);
445
446         if (lpar_rc != H_SUCCESS) {
447                 netdev_dbg(adapter->netdev, "h_add_logical_lan_buffer failed "
448                            "during recycle rc=%ld", lpar_rc);
449                 ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
450                 ret = 0;
451         }
452
453         if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
454                 adapter->rx_queue.index = 0;
455                 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
456         }
457
458 out:
459         return ret;
460 }
461
462 static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
463 {
464         ibmveth_remove_buffer_from_pool(adapter, adapter->rx_queue.queue_addr[adapter->rx_queue.index].correlator);
465
466         if (++adapter->rx_queue.index == adapter->rx_queue.num_slots) {
467                 adapter->rx_queue.index = 0;
468                 adapter->rx_queue.toggle = !adapter->rx_queue.toggle;
469         }
470 }
471
472 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
473 {
474         int i;
475         struct device *dev = &adapter->vdev->dev;
476
477         if (adapter->buffer_list_addr != NULL) {
478                 if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
479                         dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
480                                         DMA_BIDIRECTIONAL);
481                         adapter->buffer_list_dma = DMA_ERROR_CODE;
482                 }
483                 free_page((unsigned long)adapter->buffer_list_addr);
484                 adapter->buffer_list_addr = NULL;
485         }
486
487         if (adapter->filter_list_addr != NULL) {
488                 if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
489                         dma_unmap_single(dev, adapter->filter_list_dma, 4096,
490                                         DMA_BIDIRECTIONAL);
491                         adapter->filter_list_dma = DMA_ERROR_CODE;
492                 }
493                 free_page((unsigned long)adapter->filter_list_addr);
494                 adapter->filter_list_addr = NULL;
495         }
496
497         if (adapter->rx_queue.queue_addr != NULL) {
498                 dma_free_coherent(dev, adapter->rx_queue.queue_len,
499                                   adapter->rx_queue.queue_addr,
500                                   adapter->rx_queue.queue_dma);
501                 adapter->rx_queue.queue_addr = NULL;
502         }
503
504         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
505                 if (adapter->rx_buff_pool[i].active)
506                         ibmveth_free_buffer_pool(adapter,
507                                                  &adapter->rx_buff_pool[i]);
508
509         if (adapter->bounce_buffer != NULL) {
510                 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
511                         dma_unmap_single(&adapter->vdev->dev,
512                                         adapter->bounce_buffer_dma,
513                                         adapter->netdev->mtu + IBMVETH_BUFF_OH,
514                                         DMA_BIDIRECTIONAL);
515                         adapter->bounce_buffer_dma = DMA_ERROR_CODE;
516                 }
517                 kfree(adapter->bounce_buffer);
518                 adapter->bounce_buffer = NULL;
519         }
520 }
521
522 static int ibmveth_register_logical_lan(struct ibmveth_adapter *adapter,
523         union ibmveth_buf_desc rxq_desc, u64 mac_address)
524 {
525         int rc, try_again = 1;
526
527         /*
528          * After a kexec the adapter will still be open, so our attempt to
529          * open it will fail. So if we get a failure we free the adapter and
530          * try again, but only once.
531          */
532 retry:
533         rc = h_register_logical_lan(adapter->vdev->unit_address,
534                                     adapter->buffer_list_dma, rxq_desc.desc,
535                                     adapter->filter_list_dma, mac_address);
536
537         if (rc != H_SUCCESS && try_again) {
538                 do {
539                         rc = h_free_logical_lan(adapter->vdev->unit_address);
540                 } while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));
541
542                 try_again = 0;
543                 goto retry;
544         }
545
546         return rc;
547 }
548
549 static u64 ibmveth_encode_mac_addr(u8 *mac)
550 {
551         int i;
552         u64 encoded = 0;
553
554         for (i = 0; i < ETH_ALEN; i++)
555                 encoded = (encoded << 8) | mac[i];
556
557         return encoded;
558 }
559
560 static int ibmveth_open(struct net_device *netdev)
561 {
562         struct ibmveth_adapter *adapter = netdev_priv(netdev);
563         u64 mac_address;
564         int rxq_entries = 1;
565         unsigned long lpar_rc;
566         int rc;
567         union ibmveth_buf_desc rxq_desc;
568         int i;
569         struct device *dev;
570
571         netdev_dbg(netdev, "open starting\n");
572
573         napi_enable(&adapter->napi);
574
575         for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
576                 rxq_entries += adapter->rx_buff_pool[i].size;
577
578         adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
579         adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
580
581         if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
582                 netdev_err(netdev, "unable to allocate filter or buffer list "
583                            "pages\n");
584                 rc = -ENOMEM;
585                 goto err_out;
586         }
587
588         dev = &adapter->vdev->dev;
589
590         adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
591                                                 rxq_entries;
592         adapter->rx_queue.queue_addr =
593                 dma_alloc_coherent(dev, adapter->rx_queue.queue_len,
594                                    &adapter->rx_queue.queue_dma, GFP_KERNEL);
595         if (!adapter->rx_queue.queue_addr) {
596                 rc = -ENOMEM;
597                 goto err_out;
598         }
599
600         adapter->buffer_list_dma = dma_map_single(dev,
601                         adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
602         adapter->filter_list_dma = dma_map_single(dev,
603                         adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
604
605         if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
606             (dma_mapping_error(dev, adapter->filter_list_dma))) {
607                 netdev_err(netdev, "unable to map filter or buffer list "
608                            "pages\n");
609                 rc = -ENOMEM;
610                 goto err_out;
611         }
612
613         adapter->rx_queue.index = 0;
614         adapter->rx_queue.num_slots = rxq_entries;
615         adapter->rx_queue.toggle = 1;
616
617         mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
618
619         rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
620                                         adapter->rx_queue.queue_len;
621         rxq_desc.fields.address = adapter->rx_queue.queue_dma;
622
623         netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
624         netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
625         netdev_dbg(netdev, "receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);
626
627         h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
628
629         lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);
630
631         if (lpar_rc != H_SUCCESS) {
632                 netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
633                            lpar_rc);
634                 netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
635                            "desc:0x%llx MAC:0x%llx\n",
636                                      adapter->buffer_list_dma,
637                                      adapter->filter_list_dma,
638                                      rxq_desc.desc,
639                                      mac_address);
640                 rc = -ENONET;
641                 goto err_out;
642         }
643
644         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
645                 if (!adapter->rx_buff_pool[i].active)
646                         continue;
647                 if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
648                         netdev_err(netdev, "unable to alloc pool\n");
649                         adapter->rx_buff_pool[i].active = 0;
650                         rc = -ENOMEM;
651                         goto err_out;
652                 }
653         }
654
655         netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
656         rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
657                          netdev);
658         if (rc != 0) {
659                 netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
660                            netdev->irq, rc);
661                 do {
662                         lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
663                 } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
664
665                 goto err_out;
666         }
667
668         adapter->bounce_buffer =
669             kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
670         if (!adapter->bounce_buffer) {
671                 rc = -ENOMEM;
672                 goto err_out_free_irq;
673         }
674         adapter->bounce_buffer_dma =
675             dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
676                            netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
677         if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
678                 netdev_err(netdev, "unable to map bounce buffer\n");
679                 rc = -ENOMEM;
680                 goto err_out_free_irq;
681         }
682
683         netdev_dbg(netdev, "initial replenish cycle\n");
684         ibmveth_interrupt(netdev->irq, netdev);
685
686         netif_start_queue(netdev);
687
688         netdev_dbg(netdev, "open complete\n");
689
690         return 0;
691
692 err_out_free_irq:
693         free_irq(netdev->irq, netdev);
694 err_out:
695         ibmveth_cleanup(adapter);
696         napi_disable(&adapter->napi);
697         return rc;
698 }
699
700 static int ibmveth_close(struct net_device *netdev)
701 {
702         struct ibmveth_adapter *adapter = netdev_priv(netdev);
703         long lpar_rc;
704
705         netdev_dbg(netdev, "close starting\n");
706
707         napi_disable(&adapter->napi);
708
709         if (!adapter->pool_config)
710                 netif_stop_queue(netdev);
711
712         h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);
713
714         do {
715                 lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
716         } while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));
717
718         if (lpar_rc != H_SUCCESS) {
719                 netdev_err(netdev, "h_free_logical_lan failed with %lx, "
720                            "continuing with close\n", lpar_rc);
721         }
722
723         free_irq(netdev->irq, netdev);
724
725         ibmveth_update_rx_no_buffer(adapter);
726
727         ibmveth_cleanup(adapter);
728
729         netdev_dbg(netdev, "close complete\n");
730
731         return 0;
732 }
733
734 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
735 {
736         cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
737                                 SUPPORTED_FIBRE);
738         cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
739                                 ADVERTISED_FIBRE);
740         ethtool_cmd_speed_set(cmd, SPEED_1000);
741         cmd->duplex = DUPLEX_FULL;
742         cmd->port = PORT_FIBRE;
743         cmd->phy_address = 0;
744         cmd->transceiver = XCVR_INTERNAL;
745         cmd->autoneg = AUTONEG_ENABLE;
746         cmd->maxtxpkt = 0;
747         cmd->maxrxpkt = 1;
748         return 0;
749 }
750
751 static void netdev_get_drvinfo(struct net_device *dev,
752                                struct ethtool_drvinfo *info)
753 {
754         strlcpy(info->driver, ibmveth_driver_name, sizeof(info->driver));
755         strlcpy(info->version, ibmveth_driver_version, sizeof(info->version));
756 }
757
758 static netdev_features_t ibmveth_fix_features(struct net_device *dev,
759         netdev_features_t features)
760 {
761         /*
762          * Since the ibmveth firmware interface does not have the
763          * concept of separate tx/rx checksum offload enable, if rx
764          * checksum is disabled we also have to disable tx checksum
765          * offload. Once we disable rx checksum offload, we are no
766          * longer allowed to send tx buffers that are not properly
767          * checksummed.
768          */
769
770         if (!(features & NETIF_F_RXCSUM))
771                 features &= ~NETIF_F_ALL_CSUM;
772
773         return features;
774 }
775
776 static int ibmveth_set_csum_offload(struct net_device *dev, u32 data)
777 {
778         struct ibmveth_adapter *adapter = netdev_priv(dev);
779         unsigned long set_attr, clr_attr, ret_attr;
780         unsigned long set_attr6, clr_attr6;
781         long ret, ret4, ret6;
782         int rc1 = 0, rc2 = 0;
783         int restart = 0;
784
785         if (netif_running(dev)) {
786                 restart = 1;
787                 adapter->pool_config = 1;
788                 ibmveth_close(dev);
789                 adapter->pool_config = 0;
790         }
791
792         set_attr = 0;
793         clr_attr = 0;
794         set_attr6 = 0;
795         clr_attr6 = 0;
796
797         if (data) {
798                 set_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
799                 set_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
800         } else {
801                 clr_attr = IBMVETH_ILLAN_IPV4_TCP_CSUM;
802                 clr_attr6 = IBMVETH_ILLAN_IPV6_TCP_CSUM;
803         }
804
805         ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
806
807         if (ret == H_SUCCESS && !(ret_attr & IBMVETH_ILLAN_ACTIVE_TRUNK) &&
808             !(ret_attr & IBMVETH_ILLAN_TRUNK_PRI_MASK) &&
809             (ret_attr & IBMVETH_ILLAN_PADDED_PKT_CSUM)) {
810                 ret4 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
811                                          set_attr, &ret_attr);
812
813                 if (ret4 != H_SUCCESS) {
814                         netdev_err(dev, "unable to change IPv4 checksum "
815                                         "offload settings. %d rc=%ld\n",
816                                         data, ret4);
817
818                         h_illan_attributes(adapter->vdev->unit_address,
819                                            set_attr, clr_attr, &ret_attr);
820
821                         if (data == 1)
822                                 dev->features &= ~NETIF_F_IP_CSUM;
823
824                 } else {
825                         adapter->fw_ipv4_csum_support = data;
826                 }
827
828                 ret6 = h_illan_attributes(adapter->vdev->unit_address,
829                                          clr_attr6, set_attr6, &ret_attr);
830
831                 if (ret6 != H_SUCCESS) {
832                         netdev_err(dev, "unable to change IPv6 checksum "
833                                         "offload settings. %d rc=%ld\n",
834                                         data, ret6);
835
836                         h_illan_attributes(adapter->vdev->unit_address,
837                                            set_attr6, clr_attr6, &ret_attr);
838
839                         if (data == 1)
840                                 dev->features &= ~NETIF_F_IPV6_CSUM;
841
842                 } else
843                         adapter->fw_ipv6_csum_support = data;
844
845                 if (ret4 == H_SUCCESS || ret6 == H_SUCCESS)
846                         adapter->rx_csum = data;
847                 else
848                         rc1 = -EIO;
849         } else {
850                 rc1 = -EIO;
851                 netdev_err(dev, "unable to change checksum offload settings."
852                                      " %d rc=%ld ret_attr=%lx\n", data, ret,
853                                      ret_attr);
854         }
855
856         if (restart)
857                 rc2 = ibmveth_open(dev);
858
859         return rc1 ? rc1 : rc2;
860 }
861
862 static int ibmveth_set_tso(struct net_device *dev, u32 data)
863 {
864         struct ibmveth_adapter *adapter = netdev_priv(dev);
865         unsigned long set_attr, clr_attr, ret_attr;
866         long ret1, ret2;
867         int rc1 = 0, rc2 = 0;
868         int restart = 0;
869
870         if (netif_running(dev)) {
871                 restart = 1;
872                 adapter->pool_config = 1;
873                 ibmveth_close(dev);
874                 adapter->pool_config = 0;
875         }
876
877         set_attr = 0;
878         clr_attr = 0;
879
880         if (data)
881                 set_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
882         else
883                 clr_attr = IBMVETH_ILLAN_LRG_SR_ENABLED;
884
885         ret1 = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
886
887         if (ret1 == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
888             !old_large_send) {
889                 ret2 = h_illan_attributes(adapter->vdev->unit_address, clr_attr,
890                                           set_attr, &ret_attr);
891
892                 if (ret2 != H_SUCCESS) {
893                         netdev_err(dev, "unable to change tso settings. %d rc=%ld\n",
894                                    data, ret2);
895
896                         h_illan_attributes(adapter->vdev->unit_address,
897                                            set_attr, clr_attr, &ret_attr);
898
899                         if (data == 1)
900                                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
901                         rc1 = -EIO;
902
903                 } else {
904                         adapter->fw_large_send_support = data;
905                         adapter->large_send = data;
906                 }
907         } else {
908                 /* Older firmware version of large send offload does not
909                  * support tcp6/ipv6
910                  */
911                 if (data == 1) {
912                         dev->features &= ~NETIF_F_TSO6;
913                         netdev_info(dev, "TSO feature requires all partitions to have updated driver");
914                 }
915                 adapter->large_send = data;
916         }
917
918         if (restart)
919                 rc2 = ibmveth_open(dev);
920
921         return rc1 ? rc1 : rc2;
922 }
923
924 static int ibmveth_set_features(struct net_device *dev,
925         netdev_features_t features)
926 {
927         struct ibmveth_adapter *adapter = netdev_priv(dev);
928         int rx_csum = !!(features & NETIF_F_RXCSUM);
929         int large_send = !!(features & (NETIF_F_TSO | NETIF_F_TSO6));
930         int rc1 = 0, rc2 = 0;
931
932         if (rx_csum != adapter->rx_csum) {
933                 rc1 = ibmveth_set_csum_offload(dev, rx_csum);
934                 if (rc1 && !adapter->rx_csum)
935                         dev->features =
936                                 features & ~(NETIF_F_ALL_CSUM | NETIF_F_RXCSUM);
937         }
938
939         if (large_send != adapter->large_send) {
940                 rc2 = ibmveth_set_tso(dev, large_send);
941                 if (rc2 && !adapter->large_send)
942                         dev->features =
943                                 features & ~(NETIF_F_TSO | NETIF_F_TSO6);
944         }
945
946         return rc1 ? rc1 : rc2;
947 }
948
949 static void ibmveth_get_strings(struct net_device *dev, u32 stringset, u8 *data)
950 {
951         int i;
952
953         if (stringset != ETH_SS_STATS)
954                 return;
955
956         for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++, data += ETH_GSTRING_LEN)
957                 memcpy(data, ibmveth_stats[i].name, ETH_GSTRING_LEN);
958 }
959
960 static int ibmveth_get_sset_count(struct net_device *dev, int sset)
961 {
962         switch (sset) {
963         case ETH_SS_STATS:
964                 return ARRAY_SIZE(ibmveth_stats);
965         default:
966                 return -EOPNOTSUPP;
967         }
968 }
969
970 static void ibmveth_get_ethtool_stats(struct net_device *dev,
971                                       struct ethtool_stats *stats, u64 *data)
972 {
973         int i;
974         struct ibmveth_adapter *adapter = netdev_priv(dev);
975
976         for (i = 0; i < ARRAY_SIZE(ibmveth_stats); i++)
977                 data[i] = IBMVETH_GET_STAT(adapter, ibmveth_stats[i].offset);
978 }
979
980 static const struct ethtool_ops netdev_ethtool_ops = {
981         .get_drvinfo            = netdev_get_drvinfo,
982         .get_settings           = netdev_get_settings,
983         .get_link               = ethtool_op_get_link,
984         .get_strings            = ibmveth_get_strings,
985         .get_sset_count         = ibmveth_get_sset_count,
986         .get_ethtool_stats      = ibmveth_get_ethtool_stats,
987 };
988
989 static int ibmveth_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
990 {
991         return -EOPNOTSUPP;
992 }
993
994 #define page_offset(v) ((unsigned long)(v) & ((1 << 12) - 1))
995
996 static int ibmveth_send(struct ibmveth_adapter *adapter,
997                         union ibmveth_buf_desc *descs, unsigned long mss)
998 {
999         unsigned long correlator;
1000         unsigned int retry_count;
1001         unsigned long ret;
1002
1003         /*
1004          * The retry count sets a maximum for the number of broadcast and
1005          * multicast destinations within the system.
1006          */
1007         retry_count = 1024;
1008         correlator = 0;
1009         do {
1010                 ret = h_send_logical_lan(adapter->vdev->unit_address,
1011                                              descs[0].desc, descs[1].desc,
1012                                              descs[2].desc, descs[3].desc,
1013                                              descs[4].desc, descs[5].desc,
1014                                              correlator, &correlator, mss,
1015                                              adapter->fw_large_send_support);
1016         } while ((ret == H_BUSY) && (retry_count--));
1017
1018         if (ret != H_SUCCESS && ret != H_DROPPED) {
1019                 netdev_err(adapter->netdev, "tx: h_send_logical_lan failed "
1020                            "with rc=%ld\n", ret);
1021                 return 1;
1022         }
1023
1024         return 0;
1025 }
1026
1027 static netdev_tx_t ibmveth_start_xmit(struct sk_buff *skb,
1028                                       struct net_device *netdev)
1029 {
1030         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1031         unsigned int desc_flags;
1032         union ibmveth_buf_desc descs[6];
1033         int last, i;
1034         int force_bounce = 0;
1035         dma_addr_t dma_addr;
1036         unsigned long mss = 0;
1037
1038         /*
1039          * veth handles a maximum of 6 segments including the header, so
1040          * we have to linearize the skb if there are more than this.
1041          */
1042         if (skb_shinfo(skb)->nr_frags > 5 && __skb_linearize(skb)) {
1043                 netdev->stats.tx_dropped++;
1044                 goto out;
1045         }
1046
1047         /* veth can't checksum offload UDP */
1048         if (skb->ip_summed == CHECKSUM_PARTIAL &&
1049             ((skb->protocol == htons(ETH_P_IP) &&
1050               ip_hdr(skb)->protocol != IPPROTO_TCP) ||
1051              (skb->protocol == htons(ETH_P_IPV6) &&
1052               ipv6_hdr(skb)->nexthdr != IPPROTO_TCP)) &&
1053             skb_checksum_help(skb)) {
1054
1055                 netdev_err(netdev, "tx: failed to checksum packet\n");
1056                 netdev->stats.tx_dropped++;
1057                 goto out;
1058         }
1059
1060         desc_flags = IBMVETH_BUF_VALID;
1061
1062         if (skb_is_gso(skb) && adapter->fw_large_send_support)
1063                 desc_flags |= IBMVETH_BUF_LRG_SND;
1064
1065         if (skb->ip_summed == CHECKSUM_PARTIAL) {
1066                 unsigned char *buf = skb_transport_header(skb) +
1067                                                 skb->csum_offset;
1068
1069                 desc_flags |= (IBMVETH_BUF_NO_CSUM | IBMVETH_BUF_CSUM_GOOD);
1070
1071                 /* Need to zero out the checksum */
1072                 buf[0] = 0;
1073                 buf[1] = 0;
1074         }
1075
1076 retry_bounce:
1077         memset(descs, 0, sizeof(descs));
1078
1079         /*
1080          * If a linear packet is below the rx threshold then
1081          * copy it into the static bounce buffer. This avoids the
1082          * cost of a TCE insert and remove.
1083          */
1084         if (force_bounce || (!skb_is_nonlinear(skb) &&
1085                                 (skb->len < tx_copybreak))) {
1086                 skb_copy_from_linear_data(skb, adapter->bounce_buffer,
1087                                           skb->len);
1088
1089                 descs[0].fields.flags_len = desc_flags | skb->len;
1090                 descs[0].fields.address = adapter->bounce_buffer_dma;
1091
1092                 if (ibmveth_send(adapter, descs, 0)) {
1093                         adapter->tx_send_failed++;
1094                         netdev->stats.tx_dropped++;
1095                 } else {
1096                         netdev->stats.tx_packets++;
1097                         netdev->stats.tx_bytes += skb->len;
1098                 }
1099
1100                 goto out;
1101         }
1102
1103         /* Map the header */
1104         dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
1105                                   skb_headlen(skb), DMA_TO_DEVICE);
1106         if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1107                 goto map_failed;
1108
1109         descs[0].fields.flags_len = desc_flags | skb_headlen(skb);
1110         descs[0].fields.address = dma_addr;
1111
1112         /* Map the frags */
1113         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1114                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1115
1116                 dma_addr = skb_frag_dma_map(&adapter->vdev->dev, frag, 0,
1117                                             skb_frag_size(frag), DMA_TO_DEVICE);
1118
1119                 if (dma_mapping_error(&adapter->vdev->dev, dma_addr))
1120                         goto map_failed_frags;
1121
1122                 descs[i+1].fields.flags_len = desc_flags | skb_frag_size(frag);
1123                 descs[i+1].fields.address = dma_addr;
1124         }
1125
1126         if (skb_is_gso(skb)) {
1127                 if (adapter->fw_large_send_support) {
1128                         mss = (unsigned long)skb_shinfo(skb)->gso_size;
1129                         adapter->tx_large_packets++;
1130                 } else if (!skb_is_gso_v6(skb)) {
1131                         /* Put -1 in the IP checksum to tell phyp it
1132                          * is a largesend packet. Put the mss in
1133                          * the TCP checksum.
1134                          */
1135                         ip_hdr(skb)->check = 0xffff;
1136                         tcp_hdr(skb)->check =
1137                                 cpu_to_be16(skb_shinfo(skb)->gso_size);
1138                         adapter->tx_large_packets++;
1139                 }
1140         }
1141
1142         if (ibmveth_send(adapter, descs, mss)) {
1143                 adapter->tx_send_failed++;
1144                 netdev->stats.tx_dropped++;
1145         } else {
1146                 netdev->stats.tx_packets++;
1147                 netdev->stats.tx_bytes += skb->len;
1148         }
1149
1150         dma_unmap_single(&adapter->vdev->dev,
1151                          descs[0].fields.address,
1152                          descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1153                          DMA_TO_DEVICE);
1154
1155         for (i = 1; i < skb_shinfo(skb)->nr_frags + 1; i++)
1156                 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1157                                descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1158                                DMA_TO_DEVICE);
1159
1160 out:
1161         dev_consume_skb_any(skb);
1162         return NETDEV_TX_OK;
1163
1164 map_failed_frags:
1165         last = i+1;
1166         for (i = 1; i < last; i++)
1167                 dma_unmap_page(&adapter->vdev->dev, descs[i].fields.address,
1168                                descs[i].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1169                                DMA_TO_DEVICE);
1170
1171         dma_unmap_single(&adapter->vdev->dev,
1172                          descs[0].fields.address,
1173                          descs[0].fields.flags_len & IBMVETH_BUF_LEN_MASK,
1174                          DMA_TO_DEVICE);
1175 map_failed:
1176         if (!firmware_has_feature(FW_FEATURE_CMO))
1177                 netdev_err(netdev, "tx: unable to map xmit buffer\n");
1178         adapter->tx_map_failed++;
1179         skb_linearize(skb);
1180         force_bounce = 1;
1181         goto retry_bounce;
1182 }
1183
1184 static void ibmveth_rx_mss_helper(struct sk_buff *skb, u16 mss, int lrg_pkt)
1185 {
1186         struct tcphdr *tcph;
1187         int offset = 0;
1188         int hdr_len;
1189
1190         /* only TCP packets will be aggregated */
1191         if (skb->protocol == htons(ETH_P_IP)) {
1192                 struct iphdr *iph = (struct iphdr *)skb->data;
1193
1194                 if (iph->protocol == IPPROTO_TCP) {
1195                         offset = iph->ihl * 4;
1196                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1197                 } else {
1198                         return;
1199                 }
1200         } else if (skb->protocol == htons(ETH_P_IPV6)) {
1201                 struct ipv6hdr *iph6 = (struct ipv6hdr *)skb->data;
1202
1203                 if (iph6->nexthdr == IPPROTO_TCP) {
1204                         offset = sizeof(struct ipv6hdr);
1205                         skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1206                 } else {
1207                         return;
1208                 }
1209         } else {
1210                 return;
1211         }
1212         /* if mss is not set through Large Packet bit/mss in rx buffer,
1213          * expect that the mss will be written to the tcp header checksum.
1214          */
1215         tcph = (struct tcphdr *)(skb->data + offset);
1216         if (lrg_pkt) {
1217                 skb_shinfo(skb)->gso_size = mss;
1218         } else if (offset) {
1219                 skb_shinfo(skb)->gso_size = ntohs(tcph->check);
1220                 tcph->check = 0;
1221         }
1222
1223         if (skb_shinfo(skb)->gso_size) {
1224                 hdr_len = offset + tcph->doff * 4;
1225                 skb_shinfo(skb)->gso_segs =
1226                                 DIV_ROUND_UP(skb->len - hdr_len,
1227                                              skb_shinfo(skb)->gso_size);
1228         }
1229 }
1230
1231 static int ibmveth_poll(struct napi_struct *napi, int budget)
1232 {
1233         struct ibmveth_adapter *adapter =
1234                         container_of(napi, struct ibmveth_adapter, napi);
1235         struct net_device *netdev = adapter->netdev;
1236         int frames_processed = 0;
1237         unsigned long lpar_rc;
1238         struct iphdr *iph;
1239         u16 mss = 0;
1240
1241         while (frames_processed < budget) {
1242                 if (!ibmveth_rxq_pending_buffer(adapter))
1243                         break;
1244
1245                 smp_rmb();
1246                 if (!ibmveth_rxq_buffer_valid(adapter)) {
1247                         wmb(); /* suggested by larson1 */
1248                         adapter->rx_invalid_buffer++;
1249                         netdev_dbg(netdev, "recycling invalid buffer\n");
1250                         ibmveth_rxq_recycle_buffer(adapter);
1251                 } else {
1252                         struct sk_buff *skb, *new_skb;
1253                         int length = ibmveth_rxq_frame_length(adapter);
1254                         int offset = ibmveth_rxq_frame_offset(adapter);
1255                         int csum_good = ibmveth_rxq_csum_good(adapter);
1256                         int lrg_pkt = ibmveth_rxq_large_packet(adapter);
1257                         __sum16 iph_check = 0;
1258
1259                         skb = ibmveth_rxq_get_buffer(adapter);
1260
1261                         /* if the large packet bit is set in the rx queue
1262                          * descriptor, the mss will be written by PHYP eight
1263                          * bytes from the start of the rx buffer, which is
1264                          * skb->data at this stage
1265                          */
1266                         if (lrg_pkt) {
1267                                 __be64 *rxmss = (__be64 *)(skb->data + 8);
1268
1269                                 mss = (u16)be64_to_cpu(*rxmss);
1270                         }
1271
1272                         new_skb = NULL;
1273                         if (length < rx_copybreak)
1274                                 new_skb = netdev_alloc_skb(netdev, length);
1275
1276                         if (new_skb) {
1277                                 skb_copy_to_linear_data(new_skb,
1278                                                         skb->data + offset,
1279                                                         length);
1280                                 if (rx_flush)
1281                                         ibmveth_flush_buffer(skb->data,
1282                                                 length + offset);
1283                                 if (!ibmveth_rxq_recycle_buffer(adapter))
1284                                         kfree_skb(skb);
1285                                 skb = new_skb;
1286                         } else {
1287                                 ibmveth_rxq_harvest_buffer(adapter);
1288                                 skb_reserve(skb, offset);
1289                         }
1290
1291                         skb_put(skb, length);
1292                         skb->protocol = eth_type_trans(skb, netdev);
1293
1294                         if (csum_good) {
1295                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1296                                 if (be16_to_cpu(skb->protocol) == ETH_P_IP) {
1297                                         iph = (struct iphdr *)skb->data;
1298
1299                                         /* If the IP checksum is not offloaded and if the packet
1300                                          *  is large send, the checksum must be rebuilt.
1301                                          */
1302                                         if (iph->check == 0xffff) {
1303                                                 iph->check = 0;
1304                                                 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
1305                                         }
1306                                 }
1307                         }
1308
1309                         /* PHYP without PLSO support places a -1 in the ip
1310                          * checksum for large send frames.
1311                          */
1312                         if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
1313                                 struct iphdr *iph = (struct iphdr *)skb->data;
1314
1315                                 iph_check = iph->check;
1316                         }
1317
1318                         if ((length > netdev->mtu + ETH_HLEN) ||
1319                             lrg_pkt || iph_check == 0xffff) {
1320                                 ibmveth_rx_mss_helper(skb, mss, lrg_pkt);
1321                                 adapter->rx_large_packets++;
1322                         }
1323
1324                         napi_gro_receive(napi, skb);    /* send it up */
1325
1326                         netdev->stats.rx_packets++;
1327                         netdev->stats.rx_bytes += length;
1328                         frames_processed++;
1329                 }
1330         }
1331
1332         ibmveth_replenish_task(adapter);
1333
1334         if (frames_processed < budget) {
1335                 napi_complete(napi);
1336
1337                 /* We think we are done - reenable interrupts,
1338                  * then check once more to make sure we are done.
1339                  */
1340                 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1341                                        VIO_IRQ_ENABLE);
1342
1343                 BUG_ON(lpar_rc != H_SUCCESS);
1344
1345                 if (ibmveth_rxq_pending_buffer(adapter) &&
1346                     napi_reschedule(napi)) {
1347                         lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1348                                                VIO_IRQ_DISABLE);
1349                 }
1350         }
1351
1352         return frames_processed;
1353 }
1354
1355 static irqreturn_t ibmveth_interrupt(int irq, void *dev_instance)
1356 {
1357         struct net_device *netdev = dev_instance;
1358         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1359         unsigned long lpar_rc;
1360
1361         if (napi_schedule_prep(&adapter->napi)) {
1362                 lpar_rc = h_vio_signal(adapter->vdev->unit_address,
1363                                        VIO_IRQ_DISABLE);
1364                 BUG_ON(lpar_rc != H_SUCCESS);
1365                 __napi_schedule(&adapter->napi);
1366         }
1367         return IRQ_HANDLED;
1368 }
1369
1370 static void ibmveth_set_multicast_list(struct net_device *netdev)
1371 {
1372         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1373         unsigned long lpar_rc;
1374
1375         if ((netdev->flags & IFF_PROMISC) ||
1376             (netdev_mc_count(netdev) > adapter->mcastFilterSize)) {
1377                 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1378                                            IbmVethMcastEnableRecv |
1379                                            IbmVethMcastDisableFiltering,
1380                                            0);
1381                 if (lpar_rc != H_SUCCESS) {
1382                         netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1383                                    "entering promisc mode\n", lpar_rc);
1384                 }
1385         } else {
1386                 struct netdev_hw_addr *ha;
1387                 /* clear the filter table & disable filtering */
1388                 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1389                                            IbmVethMcastEnableRecv |
1390                                            IbmVethMcastDisableFiltering |
1391                                            IbmVethMcastClearFilterTable,
1392                                            0);
1393                 if (lpar_rc != H_SUCCESS) {
1394                         netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1395                                    "attempting to clear filter table\n",
1396                                    lpar_rc);
1397                 }
1398                 /* add the addresses to the filter table */
1399                 netdev_for_each_mc_addr(ha, netdev) {
1400                         /* add the multicast address to the filter table */
1401                         u64 mcast_addr;
1402                         mcast_addr = ibmveth_encode_mac_addr(ha->addr);
1403                         lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1404                                                    IbmVethMcastAddFilter,
1405                                                    mcast_addr);
1406                         if (lpar_rc != H_SUCCESS) {
1407                                 netdev_err(netdev, "h_multicast_ctrl rc=%ld "
1408                                            "when adding an entry to the filter "
1409                                            "table\n", lpar_rc);
1410                         }
1411                 }
1412
1413                 /* re-enable filtering */
1414                 lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
1415                                            IbmVethMcastEnableFiltering,
1416                                            0);
1417                 if (lpar_rc != H_SUCCESS) {
1418                         netdev_err(netdev, "h_multicast_ctrl rc=%ld when "
1419                                    "enabling filtering\n", lpar_rc);
1420                 }
1421         }
1422 }
1423
1424 static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
1425 {
1426         struct ibmveth_adapter *adapter = netdev_priv(dev);
1427         struct vio_dev *viodev = adapter->vdev;
1428         int new_mtu_oh = new_mtu + IBMVETH_BUFF_OH;
1429         int i, rc;
1430         int need_restart = 0;
1431
1432         if (new_mtu < IBMVETH_MIN_MTU)
1433                 return -EINVAL;
1434
1435         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1436                 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
1437                         break;
1438
1439         if (i == IBMVETH_NUM_BUFF_POOLS)
1440                 return -EINVAL;
1441
1442         /* Deactivate all the buffer pools so that the next loop can activate
1443            only the buffer pools necessary to hold the new MTU */
1444         if (netif_running(adapter->netdev)) {
1445                 need_restart = 1;
1446                 adapter->pool_config = 1;
1447                 ibmveth_close(adapter->netdev);
1448                 adapter->pool_config = 0;
1449         }
1450
1451         /* Look for an active buffer pool that can hold the new MTU */
1452         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1453                 adapter->rx_buff_pool[i].active = 1;
1454
1455                 if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
1456                         dev->mtu = new_mtu;
1457                         vio_cmo_set_dev_desired(viodev,
1458                                                 ibmveth_get_desired_dma
1459                                                 (viodev));
1460                         if (need_restart) {
1461                                 return ibmveth_open(adapter->netdev);
1462                         }
1463                         return 0;
1464                 }
1465         }
1466
1467         if (need_restart && (rc = ibmveth_open(adapter->netdev)))
1468                 return rc;
1469
1470         return -EINVAL;
1471 }
1472
1473 #ifdef CONFIG_NET_POLL_CONTROLLER
1474 static void ibmveth_poll_controller(struct net_device *dev)
1475 {
1476         ibmveth_replenish_task(netdev_priv(dev));
1477         ibmveth_interrupt(dev->irq, dev);
1478 }
1479 #endif
1480
1481 /**
1482  * ibmveth_get_desired_dma - Calculate IO memory desired by the driver
1483  *
1484  * @vdev: struct vio_dev for the device whose desired IO mem is to be returned
1485  *
1486  * Return value:
1487  *      Number of bytes of IO data the driver will need to perform well.
1488  */
1489 static unsigned long ibmveth_get_desired_dma(struct vio_dev *vdev)
1490 {
1491         struct net_device *netdev = dev_get_drvdata(&vdev->dev);
1492         struct ibmveth_adapter *adapter;
1493         struct iommu_table *tbl;
1494         unsigned long ret;
1495         int i;
1496         int rxqentries = 1;
1497
1498         tbl = get_iommu_table_base(&vdev->dev);
1499
1500         /* netdev inits at probe time along with the structures we need below*/
1501         if (netdev == NULL)
1502                 return IOMMU_PAGE_ALIGN(IBMVETH_IO_ENTITLEMENT_DEFAULT, tbl);
1503
1504         adapter = netdev_priv(netdev);
1505
1506         ret = IBMVETH_BUFF_LIST_SIZE + IBMVETH_FILT_LIST_SIZE;
1507         ret += IOMMU_PAGE_ALIGN(netdev->mtu, tbl);
1508
1509         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1510                 /* add the size of the active receive buffers */
1511                 if (adapter->rx_buff_pool[i].active)
1512                         ret +=
1513                             adapter->rx_buff_pool[i].size *
1514                             IOMMU_PAGE_ALIGN(adapter->rx_buff_pool[i].
1515                                              buff_size, tbl);
1516                 rxqentries += adapter->rx_buff_pool[i].size;
1517         }
1518         /* add the size of the receive queue entries */
1519         ret += IOMMU_PAGE_ALIGN(
1520                 rxqentries * sizeof(struct ibmveth_rx_q_entry), tbl);
1521
1522         return ret;
1523 }
1524
1525 static int ibmveth_set_mac_addr(struct net_device *dev, void *p)
1526 {
1527         struct ibmveth_adapter *adapter = netdev_priv(dev);
1528         struct sockaddr *addr = p;
1529         u64 mac_address;
1530         int rc;
1531
1532         if (!is_valid_ether_addr(addr->sa_data))
1533                 return -EADDRNOTAVAIL;
1534
1535         mac_address = ibmveth_encode_mac_addr(addr->sa_data);
1536         rc = h_change_logical_lan_mac(adapter->vdev->unit_address, mac_address);
1537         if (rc) {
1538                 netdev_err(adapter->netdev, "h_change_logical_lan_mac failed with rc=%d\n", rc);
1539                 return rc;
1540         }
1541
1542         ether_addr_copy(dev->dev_addr, addr->sa_data);
1543
1544         return 0;
1545 }
1546
1547 static const struct net_device_ops ibmveth_netdev_ops = {
1548         .ndo_open               = ibmveth_open,
1549         .ndo_stop               = ibmveth_close,
1550         .ndo_start_xmit         = ibmveth_start_xmit,
1551         .ndo_set_rx_mode        = ibmveth_set_multicast_list,
1552         .ndo_do_ioctl           = ibmveth_ioctl,
1553         .ndo_change_mtu         = ibmveth_change_mtu,
1554         .ndo_fix_features       = ibmveth_fix_features,
1555         .ndo_set_features       = ibmveth_set_features,
1556         .ndo_validate_addr      = eth_validate_addr,
1557         .ndo_set_mac_address    = ibmveth_set_mac_addr,
1558 #ifdef CONFIG_NET_POLL_CONTROLLER
1559         .ndo_poll_controller    = ibmveth_poll_controller,
1560 #endif
1561 };
1562
1563 static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
1564 {
1565         int rc, i, mac_len;
1566         struct net_device *netdev;
1567         struct ibmveth_adapter *adapter;
1568         unsigned char *mac_addr_p;
1569         __be32 *mcastFilterSize_p;
1570         long ret;
1571         unsigned long ret_attr;
1572
1573         dev_dbg(&dev->dev, "entering ibmveth_probe for UA 0x%x\n",
1574                 dev->unit_address);
1575
1576         mac_addr_p = (unsigned char *)vio_get_attribute(dev, VETH_MAC_ADDR,
1577                                                         &mac_len);
1578         if (!mac_addr_p) {
1579                 dev_err(&dev->dev, "Can't find VETH_MAC_ADDR attribute\n");
1580                 return -EINVAL;
1581         }
1582         /* Workaround for old/broken pHyp */
1583         if (mac_len == 8)
1584                 mac_addr_p += 2;
1585         else if (mac_len != 6) {
1586                 dev_err(&dev->dev, "VETH_MAC_ADDR attribute wrong len %d\n",
1587                         mac_len);
1588                 return -EINVAL;
1589         }
1590
1591         mcastFilterSize_p = (__be32 *)vio_get_attribute(dev,
1592                                                         VETH_MCAST_FILTER_SIZE,
1593                                                         NULL);
1594         if (!mcastFilterSize_p) {
1595                 dev_err(&dev->dev, "Can't find VETH_MCAST_FILTER_SIZE "
1596                         "attribute\n");
1597                 return -EINVAL;
1598         }
1599
1600         netdev = alloc_etherdev(sizeof(struct ibmveth_adapter));
1601
1602         if (!netdev)
1603                 return -ENOMEM;
1604
1605         adapter = netdev_priv(netdev);
1606         dev_set_drvdata(&dev->dev, netdev);
1607
1608         adapter->vdev = dev;
1609         adapter->netdev = netdev;
1610         adapter->mcastFilterSize = be32_to_cpu(*mcastFilterSize_p);
1611         adapter->pool_config = 0;
1612
1613         netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
1614
1615         netdev->irq = dev->irq;
1616         netdev->netdev_ops = &ibmveth_netdev_ops;
1617         netdev->ethtool_ops = &netdev_ethtool_ops;
1618         SET_NETDEV_DEV(netdev, &dev->dev);
1619         netdev->hw_features = NETIF_F_SG;
1620         if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
1621                 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1622                                        NETIF_F_RXCSUM;
1623         }
1624
1625         netdev->features |= netdev->hw_features;
1626
1627         ret = h_illan_attributes(adapter->vdev->unit_address, 0, 0, &ret_attr);
1628
1629         /* If running older firmware, TSO should not be enabled by default */
1630         if (ret == H_SUCCESS && (ret_attr & IBMVETH_ILLAN_LRG_SND_SUPPORT) &&
1631             !old_large_send) {
1632                 netdev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
1633                 netdev->features |= netdev->hw_features;
1634         } else {
1635                 netdev->hw_features |= NETIF_F_TSO;
1636         }
1637
1638         memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
1639
1640         if (firmware_has_feature(FW_FEATURE_CMO))
1641                 memcpy(pool_count, pool_count_cmo, sizeof(pool_count));
1642
1643         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1644                 struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
1645                 int error;
1646
1647                 ibmveth_init_buffer_pool(&adapter->rx_buff_pool[i], i,
1648                                          pool_count[i], pool_size[i],
1649                                          pool_active[i]);
1650                 error = kobject_init_and_add(kobj, &ktype_veth_pool,
1651                                              &dev->dev.kobj, "pool%d", i);
1652                 if (!error)
1653                         kobject_uevent(kobj, KOBJ_ADD);
1654         }
1655
1656         netdev_dbg(netdev, "adapter @ 0x%p\n", adapter);
1657
1658         adapter->buffer_list_dma = DMA_ERROR_CODE;
1659         adapter->filter_list_dma = DMA_ERROR_CODE;
1660         adapter->rx_queue.queue_dma = DMA_ERROR_CODE;
1661
1662         netdev_dbg(netdev, "registering netdev...\n");
1663
1664         ibmveth_set_features(netdev, netdev->features);
1665
1666         rc = register_netdev(netdev);
1667
1668         if (rc) {
1669                 netdev_dbg(netdev, "failed to register netdev rc=%d\n", rc);
1670                 free_netdev(netdev);
1671                 return rc;
1672         }
1673
1674         netdev_dbg(netdev, "registered\n");
1675
1676         return 0;
1677 }
1678
1679 static int ibmveth_remove(struct vio_dev *dev)
1680 {
1681         struct net_device *netdev = dev_get_drvdata(&dev->dev);
1682         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1683         int i;
1684
1685         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
1686                 kobject_put(&adapter->rx_buff_pool[i].kobj);
1687
1688         unregister_netdev(netdev);
1689
1690         free_netdev(netdev);
1691         dev_set_drvdata(&dev->dev, NULL);
1692
1693         return 0;
1694 }
1695
1696 static struct attribute veth_active_attr;
1697 static struct attribute veth_num_attr;
1698 static struct attribute veth_size_attr;
1699
1700 static ssize_t veth_pool_show(struct kobject *kobj,
1701                               struct attribute *attr, char *buf)
1702 {
1703         struct ibmveth_buff_pool *pool = container_of(kobj,
1704                                                       struct ibmveth_buff_pool,
1705                                                       kobj);
1706
1707         if (attr == &veth_active_attr)
1708                 return sprintf(buf, "%d\n", pool->active);
1709         else if (attr == &veth_num_attr)
1710                 return sprintf(buf, "%d\n", pool->size);
1711         else if (attr == &veth_size_attr)
1712                 return sprintf(buf, "%d\n", pool->buff_size);
1713         return 0;
1714 }
1715
1716 static ssize_t veth_pool_store(struct kobject *kobj, struct attribute *attr,
1717                                const char *buf, size_t count)
1718 {
1719         struct ibmveth_buff_pool *pool = container_of(kobj,
1720                                                       struct ibmveth_buff_pool,
1721                                                       kobj);
1722         struct net_device *netdev = dev_get_drvdata(
1723             container_of(kobj->parent, struct device, kobj));
1724         struct ibmveth_adapter *adapter = netdev_priv(netdev);
1725         long value = simple_strtol(buf, NULL, 10);
1726         long rc;
1727
1728         if (attr == &veth_active_attr) {
1729                 if (value && !pool->active) {
1730                         if (netif_running(netdev)) {
1731                                 if (ibmveth_alloc_buffer_pool(pool)) {
1732                                         netdev_err(netdev,
1733                                                    "unable to alloc pool\n");
1734                                         return -ENOMEM;
1735                                 }
1736                                 pool->active = 1;
1737                                 adapter->pool_config = 1;
1738                                 ibmveth_close(netdev);
1739                                 adapter->pool_config = 0;
1740                                 if ((rc = ibmveth_open(netdev)))
1741                                         return rc;
1742                         } else {
1743                                 pool->active = 1;
1744                         }
1745                 } else if (!value && pool->active) {
1746                         int mtu = netdev->mtu + IBMVETH_BUFF_OH;
1747                         int i;
1748                         /* Make sure there is a buffer pool with buffers that
1749                            can hold a packet of the size of the MTU */
1750                         for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
1751                                 if (pool == &adapter->rx_buff_pool[i])
1752                                         continue;
1753                                 if (!adapter->rx_buff_pool[i].active)
1754                                         continue;
1755                                 if (mtu <= adapter->rx_buff_pool[i].buff_size)
1756                                         break;
1757                         }
1758
1759                         if (i == IBMVETH_NUM_BUFF_POOLS) {
1760                                 netdev_err(netdev, "no active pool >= MTU\n");
1761                                 return -EPERM;
1762                         }
1763
1764                         if (netif_running(netdev)) {
1765                                 adapter->pool_config = 1;
1766                                 ibmveth_close(netdev);
1767                                 pool->active = 0;
1768                                 adapter->pool_config = 0;
1769                                 if ((rc = ibmveth_open(netdev)))
1770                                         return rc;
1771                         }
1772                         pool->active = 0;
1773                 }
1774         } else if (attr == &veth_num_attr) {
1775                 if (value <= 0 || value > IBMVETH_MAX_POOL_COUNT) {
1776                         return -EINVAL;
1777                 } else {
1778                         if (netif_running(netdev)) {
1779                                 adapter->pool_config = 1;
1780                                 ibmveth_close(netdev);
1781                                 adapter->pool_config = 0;
1782                                 pool->size = value;
1783                                 if ((rc = ibmveth_open(netdev)))
1784                                         return rc;
1785                         } else {
1786                                 pool->size = value;
1787                         }
1788                 }
1789         } else if (attr == &veth_size_attr) {
1790                 if (value <= IBMVETH_BUFF_OH || value > IBMVETH_MAX_BUF_SIZE) {
1791                         return -EINVAL;
1792                 } else {
1793                         if (netif_running(netdev)) {
1794                                 adapter->pool_config = 1;
1795                                 ibmveth_close(netdev);
1796                                 adapter->pool_config = 0;
1797                                 pool->buff_size = value;
1798                                 if ((rc = ibmveth_open(netdev)))
1799                                         return rc;
1800                         } else {
1801                                 pool->buff_size = value;
1802                         }
1803                 }
1804         }
1805
1806         /* kick the interrupt handler to allocate/deallocate pools */
1807         ibmveth_interrupt(netdev->irq, netdev);
1808         return count;
1809 }
1810
1811
1812 #define ATTR(_name, _mode)                              \
1813         struct attribute veth_##_name##_attr = {        \
1814         .name = __stringify(_name), .mode = _mode,      \
1815         };
1816
1817 static ATTR(active, 0644);
1818 static ATTR(num, 0644);
1819 static ATTR(size, 0644);
1820
1821 static struct attribute *veth_pool_attrs[] = {
1822         &veth_active_attr,
1823         &veth_num_attr,
1824         &veth_size_attr,
1825         NULL,
1826 };
1827
1828 static const struct sysfs_ops veth_pool_ops = {
1829         .show   = veth_pool_show,
1830         .store  = veth_pool_store,
1831 };
1832
1833 static struct kobj_type ktype_veth_pool = {
1834         .release        = NULL,
1835         .sysfs_ops      = &veth_pool_ops,
1836         .default_attrs  = veth_pool_attrs,
1837 };
1838
1839 static int ibmveth_resume(struct device *dev)
1840 {
1841         struct net_device *netdev = dev_get_drvdata(dev);
1842         ibmveth_interrupt(netdev->irq, netdev);
1843         return 0;
1844 }
1845
1846 static struct vio_device_id ibmveth_device_table[] = {
1847         { "network", "IBM,l-lan"},
1848         { "", "" }
1849 };
1850 MODULE_DEVICE_TABLE(vio, ibmveth_device_table);
1851
1852 static struct dev_pm_ops ibmveth_pm_ops = {
1853         .resume = ibmveth_resume
1854 };
1855
1856 static struct vio_driver ibmveth_driver = {
1857         .id_table       = ibmveth_device_table,
1858         .probe          = ibmveth_probe,
1859         .remove         = ibmveth_remove,
1860         .get_desired_dma = ibmveth_get_desired_dma,
1861         .name           = ibmveth_driver_name,
1862         .pm             = &ibmveth_pm_ops,
1863 };
1864
1865 static int __init ibmveth_module_init(void)
1866 {
1867         printk(KERN_DEBUG "%s: %s %s\n", ibmveth_driver_name,
1868                ibmveth_driver_string, ibmveth_driver_version);
1869
1870         return vio_register_driver(&ibmveth_driver);
1871 }
1872
1873 static void __exit ibmveth_module_exit(void)
1874 {
1875         vio_unregister_driver(&ibmveth_driver);
1876 }
1877
1878 module_init(ibmveth_module_init);
1879 module_exit(ibmveth_module_exit);