GNU Linux-libre 5.4.200-gnu1
[releases.git] / drivers / s390 / net / qeth_core_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *    Copyright IBM Corp. 2007, 2009
4  *    Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
5  *               Frank Pavlic <fpavlic@de.ibm.com>,
6  *               Thomas Spatzier <tspat@de.ibm.com>,
7  *               Frank Blaschka <frank.blaschka@de.ibm.com>
8  */
9
10 #define KMSG_COMPONENT "qeth"
11 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
13 #include <linux/compat.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/kernel.h>
19 #include <linux/log2.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/mii.h>
23 #include <linux/mm.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/if_vlan.h>
27 #include <linux/netdevice.h>
28 #include <linux/netdev_features.h>
29 #include <linux/skbuff.h>
30 #include <linux/vmalloc.h>
31
32 #include <net/iucv/af_iucv.h>
33 #include <net/dsfield.h>
34 #include <net/sock.h>
35
36 #include <asm/ebcdic.h>
37 #include <asm/chpid.h>
38 #include <asm/io.h>
39 #include <asm/sysinfo.h>
40 #include <asm/diag.h>
41 #include <asm/cio.h>
42 #include <asm/ccwdev.h>
43 #include <asm/cpcmd.h>
44
45 #include "qeth_core.h"
46
47 struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS] = {
48         /* define dbf - Name, Pages, Areas, Maxlen, Level, View, Handle */
49         /*                   N  P  A    M  L  V                      H  */
50         [QETH_DBF_SETUP] = {"qeth_setup",
51                                 8, 1,   8, 5, &debug_hex_ascii_view, NULL},
52         [QETH_DBF_MSG]   = {"qeth_msg", 8, 1, 11 * sizeof(long), 3,
53                             &debug_sprintf_view, NULL},
54         [QETH_DBF_CTRL]  = {"qeth_control",
55                 8, 1, QETH_DBF_CTRL_LEN, 5, &debug_hex_ascii_view, NULL},
56 };
57 EXPORT_SYMBOL_GPL(qeth_dbf);
58
59 struct kmem_cache *qeth_core_header_cache;
60 EXPORT_SYMBOL_GPL(qeth_core_header_cache);
61 static struct kmem_cache *qeth_qdio_outbuf_cache;
62
63 static struct device *qeth_core_root_dev;
64 static struct lock_class_key qdio_out_skb_queue_key;
65
66 static void qeth_issue_next_read_cb(struct qeth_card *card,
67                                     struct qeth_cmd_buffer *iob,
68                                     unsigned int data_length);
69 static void qeth_free_buffer_pool(struct qeth_card *);
70 static int qeth_qdio_establish(struct qeth_card *);
71 static void qeth_free_qdio_queues(struct qeth_card *card);
72 static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
73                 struct qeth_qdio_out_buffer *buf,
74                 enum iucv_tx_notify notification);
75 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
76                                  int budget);
77 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
78
79 static void qeth_close_dev_handler(struct work_struct *work)
80 {
81         struct qeth_card *card;
82
83         card = container_of(work, struct qeth_card, close_dev_work);
84         QETH_CARD_TEXT(card, 2, "cldevhdl");
85         ccwgroup_set_offline(card->gdev);
86 }
87
88 static const char *qeth_get_cardname(struct qeth_card *card)
89 {
90         if (IS_VM_NIC(card)) {
91                 switch (card->info.type) {
92                 case QETH_CARD_TYPE_OSD:
93                         return " Virtual NIC QDIO";
94                 case QETH_CARD_TYPE_IQD:
95                         return " Virtual NIC Hiper";
96                 case QETH_CARD_TYPE_OSM:
97                         return " Virtual NIC QDIO - OSM";
98                 case QETH_CARD_TYPE_OSX:
99                         return " Virtual NIC QDIO - OSX";
100                 default:
101                         return " unknown";
102                 }
103         } else {
104                 switch (card->info.type) {
105                 case QETH_CARD_TYPE_OSD:
106                         return " OSD Express";
107                 case QETH_CARD_TYPE_IQD:
108                         return " HiperSockets";
109                 case QETH_CARD_TYPE_OSN:
110                         return " OSN QDIO";
111                 case QETH_CARD_TYPE_OSM:
112                         return " OSM QDIO";
113                 case QETH_CARD_TYPE_OSX:
114                         return " OSX QDIO";
115                 default:
116                         return " unknown";
117                 }
118         }
119         return " n/a";
120 }
121
122 /* max length to be returned: 14 */
123 const char *qeth_get_cardname_short(struct qeth_card *card)
124 {
125         if (IS_VM_NIC(card)) {
126                 switch (card->info.type) {
127                 case QETH_CARD_TYPE_OSD:
128                         return "Virt.NIC QDIO";
129                 case QETH_CARD_TYPE_IQD:
130                         return "Virt.NIC Hiper";
131                 case QETH_CARD_TYPE_OSM:
132                         return "Virt.NIC OSM";
133                 case QETH_CARD_TYPE_OSX:
134                         return "Virt.NIC OSX";
135                 default:
136                         return "unknown";
137                 }
138         } else {
139                 switch (card->info.type) {
140                 case QETH_CARD_TYPE_OSD:
141                         switch (card->info.link_type) {
142                         case QETH_LINK_TYPE_FAST_ETH:
143                                 return "OSD_100";
144                         case QETH_LINK_TYPE_HSTR:
145                                 return "HSTR";
146                         case QETH_LINK_TYPE_GBIT_ETH:
147                                 return "OSD_1000";
148                         case QETH_LINK_TYPE_10GBIT_ETH:
149                                 return "OSD_10GIG";
150                         case QETH_LINK_TYPE_25GBIT_ETH:
151                                 return "OSD_25GIG";
152                         case QETH_LINK_TYPE_LANE_ETH100:
153                                 return "OSD_FE_LANE";
154                         case QETH_LINK_TYPE_LANE_TR:
155                                 return "OSD_TR_LANE";
156                         case QETH_LINK_TYPE_LANE_ETH1000:
157                                 return "OSD_GbE_LANE";
158                         case QETH_LINK_TYPE_LANE:
159                                 return "OSD_ATM_LANE";
160                         default:
161                                 return "OSD_Express";
162                         }
163                 case QETH_CARD_TYPE_IQD:
164                         return "HiperSockets";
165                 case QETH_CARD_TYPE_OSN:
166                         return "OSN";
167                 case QETH_CARD_TYPE_OSM:
168                         return "OSM_1000";
169                 case QETH_CARD_TYPE_OSX:
170                         return "OSX_10GIG";
171                 default:
172                         return "unknown";
173                 }
174         }
175         return "n/a";
176 }
177
178 void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads,
179                          int clear_start_mask)
180 {
181         unsigned long flags;
182
183         spin_lock_irqsave(&card->thread_mask_lock, flags);
184         card->thread_allowed_mask = threads;
185         if (clear_start_mask)
186                 card->thread_start_mask &= threads;
187         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
188         wake_up(&card->wait_q);
189 }
190 EXPORT_SYMBOL_GPL(qeth_set_allowed_threads);
191
192 int qeth_threads_running(struct qeth_card *card, unsigned long threads)
193 {
194         unsigned long flags;
195         int rc = 0;
196
197         spin_lock_irqsave(&card->thread_mask_lock, flags);
198         rc = (card->thread_running_mask & threads);
199         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
200         return rc;
201 }
202 EXPORT_SYMBOL_GPL(qeth_threads_running);
203
204 void qeth_clear_working_pool_list(struct qeth_card *card)
205 {
206         struct qeth_buffer_pool_entry *pool_entry, *tmp;
207
208         QETH_CARD_TEXT(card, 5, "clwrklst");
209         list_for_each_entry_safe(pool_entry, tmp,
210                             &card->qdio.in_buf_pool.entry_list, list){
211                         list_del(&pool_entry->list);
212         }
213 }
214 EXPORT_SYMBOL_GPL(qeth_clear_working_pool_list);
215
216 static int qeth_alloc_buffer_pool(struct qeth_card *card)
217 {
218         struct qeth_buffer_pool_entry *pool_entry;
219         void *ptr;
220         int i, j;
221
222         QETH_CARD_TEXT(card, 5, "alocpool");
223         for (i = 0; i < card->qdio.init_pool.buf_count; ++i) {
224                 pool_entry = kzalloc(sizeof(*pool_entry), GFP_KERNEL);
225                 if (!pool_entry) {
226                         qeth_free_buffer_pool(card);
227                         return -ENOMEM;
228                 }
229                 for (j = 0; j < QETH_MAX_BUFFER_ELEMENTS(card); ++j) {
230                         ptr = (void *) __get_free_page(GFP_KERNEL);
231                         if (!ptr) {
232                                 while (j > 0)
233                                         free_page((unsigned long)
234                                                   pool_entry->elements[--j]);
235                                 kfree(pool_entry);
236                                 qeth_free_buffer_pool(card);
237                                 return -ENOMEM;
238                         }
239                         pool_entry->elements[j] = ptr;
240                 }
241                 list_add(&pool_entry->init_list,
242                          &card->qdio.init_pool.entry_list);
243         }
244         return 0;
245 }
246
247 int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
248 {
249         QETH_CARD_TEXT(card, 2, "realcbp");
250
251         if (card->state != CARD_STATE_DOWN)
252                 return -EPERM;
253
254         /* TODO: steel/add buffers from/to a running card's buffer pool (?) */
255         qeth_clear_working_pool_list(card);
256         qeth_free_buffer_pool(card);
257         card->qdio.in_buf_pool.buf_count = bufcnt;
258         card->qdio.init_pool.buf_count = bufcnt;
259         return qeth_alloc_buffer_pool(card);
260 }
261 EXPORT_SYMBOL_GPL(qeth_realloc_buffer_pool);
262
263 static void qeth_free_qdio_queue(struct qeth_qdio_q *q)
264 {
265         if (!q)
266                 return;
267
268         qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
269         kfree(q);
270 }
271
272 static struct qeth_qdio_q *qeth_alloc_qdio_queue(void)
273 {
274         struct qeth_qdio_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
275         int i;
276
277         if (!q)
278                 return NULL;
279
280         if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
281                 kfree(q);
282                 return NULL;
283         }
284
285         for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i)
286                 q->bufs[i].buffer = q->qdio_bufs[i];
287
288         QETH_DBF_HEX(SETUP, 2, &q, sizeof(void *));
289         return q;
290 }
291
292 static int qeth_cq_init(struct qeth_card *card)
293 {
294         int rc;
295
296         if (card->options.cq == QETH_CQ_ENABLED) {
297                 QETH_CARD_TEXT(card, 2, "cqinit");
298                 qdio_reset_buffers(card->qdio.c_q->qdio_bufs,
299                                    QDIO_MAX_BUFFERS_PER_Q);
300                 card->qdio.c_q->next_buf_to_init = 127;
301                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT,
302                              card->qdio.no_in_queues - 1, 0,
303                              127);
304                 if (rc) {
305                         QETH_CARD_TEXT_(card, 2, "1err%d", rc);
306                         goto out;
307                 }
308         }
309         rc = 0;
310 out:
311         return rc;
312 }
313
314 static int qeth_alloc_cq(struct qeth_card *card)
315 {
316         int rc;
317
318         if (card->options.cq == QETH_CQ_ENABLED) {
319                 int i;
320                 struct qdio_outbuf_state *outbuf_states;
321
322                 QETH_CARD_TEXT(card, 2, "cqon");
323                 card->qdio.c_q = qeth_alloc_qdio_queue();
324                 if (!card->qdio.c_q) {
325                         rc = -1;
326                         goto kmsg_out;
327                 }
328                 card->qdio.no_in_queues = 2;
329                 card->qdio.out_bufstates =
330                         kcalloc(card->qdio.no_out_queues *
331                                         QDIO_MAX_BUFFERS_PER_Q,
332                                 sizeof(struct qdio_outbuf_state),
333                                 GFP_KERNEL);
334                 outbuf_states = card->qdio.out_bufstates;
335                 if (outbuf_states == NULL) {
336                         rc = -1;
337                         goto free_cq_out;
338                 }
339                 for (i = 0; i < card->qdio.no_out_queues; ++i) {
340                         card->qdio.out_qs[i]->bufstates = outbuf_states;
341                         outbuf_states += QDIO_MAX_BUFFERS_PER_Q;
342                 }
343         } else {
344                 QETH_CARD_TEXT(card, 2, "nocq");
345                 card->qdio.c_q = NULL;
346                 card->qdio.no_in_queues = 1;
347         }
348         QETH_CARD_TEXT_(card, 2, "iqc%d", card->qdio.no_in_queues);
349         rc = 0;
350 out:
351         return rc;
352 free_cq_out:
353         qeth_free_qdio_queue(card->qdio.c_q);
354         card->qdio.c_q = NULL;
355 kmsg_out:
356         dev_err(&card->gdev->dev, "Failed to create completion queue\n");
357         goto out;
358 }
359
360 static void qeth_free_cq(struct qeth_card *card)
361 {
362         if (card->qdio.c_q) {
363                 --card->qdio.no_in_queues;
364                 qeth_free_qdio_queue(card->qdio.c_q);
365                 card->qdio.c_q = NULL;
366         }
367         kfree(card->qdio.out_bufstates);
368         card->qdio.out_bufstates = NULL;
369 }
370
371 static enum iucv_tx_notify qeth_compute_cq_notification(int sbalf15,
372                                                         int delayed)
373 {
374         enum iucv_tx_notify n;
375
376         switch (sbalf15) {
377         case 0:
378                 n = delayed ? TX_NOTIFY_DELAYED_OK : TX_NOTIFY_OK;
379                 break;
380         case 4:
381         case 16:
382         case 17:
383         case 18:
384                 n = delayed ? TX_NOTIFY_DELAYED_UNREACHABLE :
385                         TX_NOTIFY_UNREACHABLE;
386                 break;
387         default:
388                 n = delayed ? TX_NOTIFY_DELAYED_GENERALERROR :
389                         TX_NOTIFY_GENERALERROR;
390                 break;
391         }
392
393         return n;
394 }
395
396 static void qeth_cleanup_handled_pending(struct qeth_qdio_out_q *q, int bidx,
397                                          int forced_cleanup)
398 {
399         if (q->card->options.cq != QETH_CQ_ENABLED)
400                 return;
401
402         if (q->bufs[bidx]->next_pending != NULL) {
403                 struct qeth_qdio_out_buffer *head = q->bufs[bidx];
404                 struct qeth_qdio_out_buffer *c = q->bufs[bidx]->next_pending;
405
406                 while (c) {
407                         if (forced_cleanup ||
408                             atomic_read(&c->state) ==
409                               QETH_QDIO_BUF_HANDLED_DELAYED) {
410                                 struct qeth_qdio_out_buffer *f = c;
411                                 QETH_CARD_TEXT(f->q->card, 5, "fp");
412                                 QETH_CARD_TEXT_(f->q->card, 5, "%lx", (long) f);
413                                 /* release here to avoid interleaving between
414                                    outbound tasklet and inbound tasklet
415                                    regarding notifications and lifecycle */
416                                 qeth_tx_complete_buf(c, forced_cleanup, 0);
417
418                                 c = f->next_pending;
419                                 WARN_ON_ONCE(head->next_pending != f);
420                                 head->next_pending = c;
421                                 kmem_cache_free(qeth_qdio_outbuf_cache, f);
422                         } else {
423                                 head = c;
424                                 c = c->next_pending;
425                         }
426
427                 }
428         }
429 }
430
431
432 static void qeth_qdio_handle_aob(struct qeth_card *card,
433                                  unsigned long phys_aob_addr)
434 {
435         enum qeth_qdio_out_buffer_state new_state = QETH_QDIO_BUF_QAOB_OK;
436         struct qaob *aob;
437         struct qeth_qdio_out_buffer *buffer;
438         enum iucv_tx_notify notification;
439         unsigned int i;
440
441         aob = (struct qaob *) phys_to_virt(phys_aob_addr);
442         QETH_CARD_TEXT(card, 5, "haob");
443         QETH_CARD_TEXT_(card, 5, "%lx", phys_aob_addr);
444         buffer = (struct qeth_qdio_out_buffer *) aob->user1;
445         QETH_CARD_TEXT_(card, 5, "%lx", aob->user1);
446
447         /* Free dangling allocations. The attached skbs are handled by
448          * qeth_cleanup_handled_pending().
449          */
450         for (i = 0;
451              i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
452              i++) {
453                 if (aob->sba[i] && buffer->is_header[i])
454                         kmem_cache_free(qeth_core_header_cache,
455                                         (void *) aob->sba[i]);
456         }
457
458         if (aob->aorc) {
459                 QETH_CARD_TEXT_(card, 2, "aorc%02X", aob->aorc);
460                 new_state = QETH_QDIO_BUF_QAOB_ERROR;
461         }
462
463         switch (atomic_xchg(&buffer->state, new_state)) {
464         case QETH_QDIO_BUF_PRIMED:
465                 /* Faster than TX completion code. */
466                 notification = qeth_compute_cq_notification(aob->aorc, 0);
467                 qeth_notify_skbs(buffer->q, buffer, notification);
468                 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
469                 break;
470         case QETH_QDIO_BUF_PENDING:
471                 /* TX completion code is active and will handle the async
472                  * completion for us.
473                  */
474                 break;
475         case QETH_QDIO_BUF_NEED_QAOB:
476                 /* TX completion code is already finished. */
477                 notification = qeth_compute_cq_notification(aob->aorc, 1);
478                 qeth_notify_skbs(buffer->q, buffer, notification);
479                 atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
480                 break;
481         default:
482                 WARN_ON_ONCE(1);
483         }
484
485         qdio_release_aob(aob);
486 }
487
488 static inline int qeth_is_cq(struct qeth_card *card, unsigned int queue)
489 {
490         return card->options.cq == QETH_CQ_ENABLED &&
491             card->qdio.c_q != NULL &&
492             queue != 0 &&
493             queue == card->qdio.no_in_queues - 1;
494 }
495
496 static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
497                            void *data)
498 {
499         ccw->cmd_code = cmd_code;
500         ccw->flags = flags | CCW_FLAG_SLI;
501         ccw->count = len;
502         ccw->cda = (__u32) __pa(data);
503 }
504
505 static int __qeth_issue_next_read(struct qeth_card *card)
506 {
507         struct qeth_cmd_buffer *iob = card->read_cmd;
508         struct qeth_channel *channel = iob->channel;
509         struct ccw1 *ccw = __ccw_from_cmd(iob);
510         int rc;
511
512         QETH_CARD_TEXT(card, 5, "issnxrd");
513         if (channel->state != CH_STATE_UP)
514                 return -EIO;
515
516         memset(iob->data, 0, iob->length);
517         qeth_setup_ccw(ccw, CCW_CMD_READ, 0, iob->length, iob->data);
518         iob->callback = qeth_issue_next_read_cb;
519         /* keep the cmd alive after completion: */
520         qeth_get_cmd(iob);
521
522         QETH_CARD_TEXT(card, 6, "noirqpnd");
523         rc = ccw_device_start(channel->ccwdev, ccw, (addr_t) iob, 0, 0);
524         if (!rc) {
525                 channel->active_cmd = iob;
526         } else {
527                 QETH_DBF_MESSAGE(2, "error %i on device %x when starting next read ccw!\n",
528                                  rc, CARD_DEVID(card));
529                 atomic_set(&channel->irq_pending, 0);
530                 qeth_put_cmd(iob);
531                 card->read_or_write_problem = 1;
532                 qeth_schedule_recovery(card);
533                 wake_up(&card->wait_q);
534         }
535         return rc;
536 }
537
538 static int qeth_issue_next_read(struct qeth_card *card)
539 {
540         int ret;
541
542         spin_lock_irq(get_ccwdev_lock(CARD_RDEV(card)));
543         ret = __qeth_issue_next_read(card);
544         spin_unlock_irq(get_ccwdev_lock(CARD_RDEV(card)));
545
546         return ret;
547 }
548
549 static void qeth_enqueue_cmd(struct qeth_card *card,
550                              struct qeth_cmd_buffer *iob)
551 {
552         spin_lock_irq(&card->lock);
553         list_add_tail(&iob->list, &card->cmd_waiter_list);
554         spin_unlock_irq(&card->lock);
555 }
556
557 static void qeth_dequeue_cmd(struct qeth_card *card,
558                              struct qeth_cmd_buffer *iob)
559 {
560         spin_lock_irq(&card->lock);
561         list_del(&iob->list);
562         spin_unlock_irq(&card->lock);
563 }
564
565 void qeth_notify_cmd(struct qeth_cmd_buffer *iob, int reason)
566 {
567         iob->rc = reason;
568         complete(&iob->done);
569 }
570 EXPORT_SYMBOL_GPL(qeth_notify_cmd);
571
572 static void qeth_issue_ipa_msg(struct qeth_ipa_cmd *cmd, int rc,
573                 struct qeth_card *card)
574 {
575         const char *ipa_name;
576         int com = cmd->hdr.command;
577         ipa_name = qeth_get_ipa_cmd_name(com);
578
579         if (rc)
580                 QETH_DBF_MESSAGE(2, "IPA: %s(%#x) for device %x returned %#x \"%s\"\n",
581                                  ipa_name, com, CARD_DEVID(card), rc,
582                                  qeth_get_ipa_msg(rc));
583         else
584                 QETH_DBF_MESSAGE(5, "IPA: %s(%#x) for device %x succeeded\n",
585                                  ipa_name, com, CARD_DEVID(card));
586 }
587
588 static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card,
589                                                 struct qeth_ipa_cmd *cmd)
590 {
591         QETH_CARD_TEXT(card, 5, "chkipad");
592
593         if (IS_IPA_REPLY(cmd)) {
594                 if (cmd->hdr.command != IPA_CMD_SETCCID &&
595                     cmd->hdr.command != IPA_CMD_DELCCID &&
596                     cmd->hdr.command != IPA_CMD_MODCCID &&
597                     cmd->hdr.command != IPA_CMD_SET_DIAG_ASS)
598                         qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
599                 return cmd;
600         }
601
602         /* handle unsolicited event: */
603         switch (cmd->hdr.command) {
604         case IPA_CMD_STOPLAN:
605                 if (cmd->hdr.return_code == IPA_RC_VEPA_TO_VEB_TRANSITION) {
606                         dev_err(&card->gdev->dev,
607                                 "Interface %s is down because the adjacent port is no longer in reflective relay mode\n",
608                                 QETH_CARD_IFNAME(card));
609                         schedule_work(&card->close_dev_work);
610                 } else {
611                         dev_warn(&card->gdev->dev,
612                                  "The link for interface %s on CHPID 0x%X failed\n",
613                                  QETH_CARD_IFNAME(card), card->info.chpid);
614                         qeth_issue_ipa_msg(cmd, cmd->hdr.return_code, card);
615                         netif_carrier_off(card->dev);
616                 }
617                 return NULL;
618         case IPA_CMD_STARTLAN:
619                 dev_info(&card->gdev->dev,
620                          "The link for %s on CHPID 0x%X has been restored\n",
621                          QETH_CARD_IFNAME(card), card->info.chpid);
622                 if (card->info.hwtrap)
623                         card->info.hwtrap = 2;
624                 qeth_schedule_recovery(card);
625                 return NULL;
626         case IPA_CMD_SETBRIDGEPORT_IQD:
627         case IPA_CMD_SETBRIDGEPORT_OSA:
628         case IPA_CMD_ADDRESS_CHANGE_NOTIF:
629                 if (card->discipline->control_event_handler(card, cmd))
630                         return cmd;
631                 return NULL;
632         case IPA_CMD_MODCCID:
633                 return cmd;
634         case IPA_CMD_REGISTER_LOCAL_ADDR:
635                 QETH_CARD_TEXT(card, 3, "irla");
636                 return NULL;
637         case IPA_CMD_UNREGISTER_LOCAL_ADDR:
638                 QETH_CARD_TEXT(card, 3, "urla");
639                 return NULL;
640         default:
641                 QETH_DBF_MESSAGE(2, "Received data is IPA but not a reply!\n");
642                 return cmd;
643         }
644 }
645
646 void qeth_clear_ipacmd_list(struct qeth_card *card)
647 {
648         struct qeth_cmd_buffer *iob;
649         unsigned long flags;
650
651         QETH_CARD_TEXT(card, 4, "clipalst");
652
653         spin_lock_irqsave(&card->lock, flags);
654         list_for_each_entry(iob, &card->cmd_waiter_list, list)
655                 qeth_notify_cmd(iob, -EIO);
656         spin_unlock_irqrestore(&card->lock, flags);
657 }
658 EXPORT_SYMBOL_GPL(qeth_clear_ipacmd_list);
659
660 static int qeth_check_idx_response(struct qeth_card *card,
661         unsigned char *buffer)
662 {
663         QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN);
664         if ((buffer[2] & QETH_IDX_TERMINATE_MASK) == QETH_IDX_TERMINATE) {
665                 QETH_DBF_MESSAGE(2, "received an IDX TERMINATE with cause code %#04x\n",
666                                  buffer[4]);
667                 QETH_CARD_TEXT(card, 2, "ckidxres");
668                 QETH_CARD_TEXT(card, 2, " idxterm");
669                 QETH_CARD_TEXT_(card, 2, "rc%x", buffer[4]);
670                 if (buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT ||
671                     buffer[4] == QETH_IDX_TERM_BAD_TRANSPORT_VM) {
672                         dev_err(&card->gdev->dev,
673                                 "The device does not support the configured transport mode\n");
674                         return -EPROTONOSUPPORT;
675                 }
676                 return -EIO;
677         }
678         return 0;
679 }
680
681 void qeth_put_cmd(struct qeth_cmd_buffer *iob)
682 {
683         if (refcount_dec_and_test(&iob->ref_count)) {
684                 kfree(iob->data);
685                 kfree(iob);
686         }
687 }
688 EXPORT_SYMBOL_GPL(qeth_put_cmd);
689
690 static void qeth_release_buffer_cb(struct qeth_card *card,
691                                    struct qeth_cmd_buffer *iob,
692                                    unsigned int data_length)
693 {
694         qeth_put_cmd(iob);
695 }
696
697 static void qeth_cancel_cmd(struct qeth_cmd_buffer *iob, int rc)
698 {
699         qeth_notify_cmd(iob, rc);
700         qeth_put_cmd(iob);
701 }
702
703 struct qeth_cmd_buffer *qeth_alloc_cmd(struct qeth_channel *channel,
704                                        unsigned int length, unsigned int ccws,
705                                        long timeout)
706 {
707         struct qeth_cmd_buffer *iob;
708
709         if (length > QETH_BUFSIZE)
710                 return NULL;
711
712         iob = kzalloc(sizeof(*iob), GFP_KERNEL);
713         if (!iob)
714                 return NULL;
715
716         iob->data = kzalloc(ALIGN(length, 8) + ccws * sizeof(struct ccw1),
717                             GFP_KERNEL | GFP_DMA);
718         if (!iob->data) {
719                 kfree(iob);
720                 return NULL;
721         }
722
723         init_completion(&iob->done);
724         spin_lock_init(&iob->lock);
725         INIT_LIST_HEAD(&iob->list);
726         refcount_set(&iob->ref_count, 1);
727         iob->channel = channel;
728         iob->timeout = timeout;
729         iob->length = length;
730         return iob;
731 }
732 EXPORT_SYMBOL_GPL(qeth_alloc_cmd);
733
734 static void qeth_issue_next_read_cb(struct qeth_card *card,
735                                     struct qeth_cmd_buffer *iob,
736                                     unsigned int data_length)
737 {
738         struct qeth_cmd_buffer *request = NULL;
739         struct qeth_ipa_cmd *cmd = NULL;
740         struct qeth_reply *reply = NULL;
741         struct qeth_cmd_buffer *tmp;
742         unsigned long flags;
743         int rc = 0;
744
745         QETH_CARD_TEXT(card, 4, "sndctlcb");
746         rc = qeth_check_idx_response(card, iob->data);
747         switch (rc) {
748         case 0:
749                 break;
750         case -EIO:
751                 qeth_schedule_recovery(card);
752                 /* fall through */
753         default:
754                 qeth_clear_ipacmd_list(card);
755                 goto out;
756         }
757
758         if (IS_IPA(iob->data)) {
759                 cmd = (struct qeth_ipa_cmd *) PDU_ENCAPSULATION(iob->data);
760                 cmd = qeth_check_ipa_data(card, cmd);
761                 if (!cmd)
762                         goto out;
763                 if (IS_OSN(card) && card->osn_info.assist_cb &&
764                     cmd->hdr.command != IPA_CMD_STARTLAN) {
765                         card->osn_info.assist_cb(card->dev, cmd);
766                         goto out;
767                 }
768         } else {
769                 /* non-IPA commands should only flow during initialization */
770                 if (card->state != CARD_STATE_DOWN)
771                         goto out;
772         }
773
774         /* match against pending cmd requests */
775         spin_lock_irqsave(&card->lock, flags);
776         list_for_each_entry(tmp, &card->cmd_waiter_list, list) {
777                 if (!IS_IPA(tmp->data) ||
778                     __ipa_cmd(tmp)->hdr.seqno == cmd->hdr.seqno) {
779                         request = tmp;
780                         /* take the object outside the lock */
781                         qeth_get_cmd(request);
782                         break;
783                 }
784         }
785         spin_unlock_irqrestore(&card->lock, flags);
786
787         if (!request)
788                 goto out;
789
790         reply = &request->reply;
791         if (!reply->callback) {
792                 rc = 0;
793                 goto no_callback;
794         }
795
796         spin_lock_irqsave(&request->lock, flags);
797         if (request->rc)
798                 /* Bail out when the requestor has already left: */
799                 rc = request->rc;
800         else
801                 rc = reply->callback(card, reply, cmd ? (unsigned long)cmd :
802                                                         (unsigned long)iob);
803         spin_unlock_irqrestore(&request->lock, flags);
804
805 no_callback:
806         if (rc <= 0)
807                 qeth_notify_cmd(request, rc);
808         qeth_put_cmd(request);
809 out:
810         memcpy(&card->seqno.pdu_hdr_ack,
811                 QETH_PDU_HEADER_SEQ_NO(iob->data),
812                 QETH_SEQ_NO_LENGTH);
813         qeth_put_cmd(iob);
814         __qeth_issue_next_read(card);
815 }
816
817 static int qeth_set_thread_start_bit(struct qeth_card *card,
818                 unsigned long thread)
819 {
820         unsigned long flags;
821
822         spin_lock_irqsave(&card->thread_mask_lock, flags);
823         if (!(card->thread_allowed_mask & thread) ||
824               (card->thread_start_mask & thread)) {
825                 spin_unlock_irqrestore(&card->thread_mask_lock, flags);
826                 return -EPERM;
827         }
828         card->thread_start_mask |= thread;
829         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
830         return 0;
831 }
832
833 void qeth_clear_thread_start_bit(struct qeth_card *card, unsigned long thread)
834 {
835         unsigned long flags;
836
837         spin_lock_irqsave(&card->thread_mask_lock, flags);
838         card->thread_start_mask &= ~thread;
839         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
840         wake_up(&card->wait_q);
841 }
842 EXPORT_SYMBOL_GPL(qeth_clear_thread_start_bit);
843
844 void qeth_clear_thread_running_bit(struct qeth_card *card, unsigned long thread)
845 {
846         unsigned long flags;
847
848         spin_lock_irqsave(&card->thread_mask_lock, flags);
849         card->thread_running_mask &= ~thread;
850         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
851         wake_up_all(&card->wait_q);
852 }
853 EXPORT_SYMBOL_GPL(qeth_clear_thread_running_bit);
854
855 static int __qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
856 {
857         unsigned long flags;
858         int rc = 0;
859
860         spin_lock_irqsave(&card->thread_mask_lock, flags);
861         if (card->thread_start_mask & thread) {
862                 if ((card->thread_allowed_mask & thread) &&
863                     !(card->thread_running_mask & thread)) {
864                         rc = 1;
865                         card->thread_start_mask &= ~thread;
866                         card->thread_running_mask |= thread;
867                 } else
868                         rc = -EPERM;
869         }
870         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
871         return rc;
872 }
873
874 int qeth_do_run_thread(struct qeth_card *card, unsigned long thread)
875 {
876         int rc = 0;
877
878         wait_event(card->wait_q,
879                    (rc = __qeth_do_run_thread(card, thread)) >= 0);
880         return rc;
881 }
882 EXPORT_SYMBOL_GPL(qeth_do_run_thread);
883
884 void qeth_schedule_recovery(struct qeth_card *card)
885 {
886         QETH_CARD_TEXT(card, 2, "startrec");
887         if (qeth_set_thread_start_bit(card, QETH_RECOVER_THREAD) == 0)
888                 schedule_work(&card->kernel_thread_starter);
889 }
890 EXPORT_SYMBOL_GPL(qeth_schedule_recovery);
891
892 static int qeth_get_problem(struct qeth_card *card, struct ccw_device *cdev,
893                             struct irb *irb)
894 {
895         int dstat, cstat;
896         char *sense;
897
898         sense = (char *) irb->ecw;
899         cstat = irb->scsw.cmd.cstat;
900         dstat = irb->scsw.cmd.dstat;
901
902         if (cstat & (SCHN_STAT_CHN_CTRL_CHK | SCHN_STAT_INTF_CTRL_CHK |
903                      SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK |
904                      SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) {
905                 QETH_CARD_TEXT(card, 2, "CGENCHK");
906                 dev_warn(&cdev->dev, "The qeth device driver "
907                         "failed to recover an error on the device\n");
908                 QETH_DBF_MESSAGE(2, "check on channel %x with dstat=%#x, cstat=%#x\n",
909                                  CCW_DEVID(cdev), dstat, cstat);
910                 print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET,
911                                 16, 1, irb, 64, 1);
912                 return -EIO;
913         }
914
915         if (dstat & DEV_STAT_UNIT_CHECK) {
916                 if (sense[SENSE_RESETTING_EVENT_BYTE] &
917                     SENSE_RESETTING_EVENT_FLAG) {
918                         QETH_CARD_TEXT(card, 2, "REVIND");
919                         return -EIO;
920                 }
921                 if (sense[SENSE_COMMAND_REJECT_BYTE] &
922                     SENSE_COMMAND_REJECT_FLAG) {
923                         QETH_CARD_TEXT(card, 2, "CMDREJi");
924                         return -EIO;
925                 }
926                 if ((sense[2] == 0xaf) && (sense[3] == 0xfe)) {
927                         QETH_CARD_TEXT(card, 2, "AFFE");
928                         return -EIO;
929                 }
930                 if ((!sense[0]) && (!sense[1]) && (!sense[2]) && (!sense[3])) {
931                         QETH_CARD_TEXT(card, 2, "ZEROSEN");
932                         return 0;
933                 }
934                 QETH_CARD_TEXT(card, 2, "DGENCHK");
935                         return -EIO;
936         }
937         return 0;
938 }
939
940 static int qeth_check_irb_error(struct qeth_card *card, struct ccw_device *cdev,
941                                 struct irb *irb)
942 {
943         if (!IS_ERR(irb))
944                 return 0;
945
946         switch (PTR_ERR(irb)) {
947         case -EIO:
948                 QETH_DBF_MESSAGE(2, "i/o-error on channel %x\n",
949                                  CCW_DEVID(cdev));
950                 QETH_CARD_TEXT(card, 2, "ckirberr");
951                 QETH_CARD_TEXT_(card, 2, "  rc%d", -EIO);
952                 return -EIO;
953         case -ETIMEDOUT:
954                 dev_warn(&cdev->dev, "A hardware operation timed out"
955                         " on the device\n");
956                 QETH_CARD_TEXT(card, 2, "ckirberr");
957                 QETH_CARD_TEXT_(card, 2, "  rc%d", -ETIMEDOUT);
958                 return -ETIMEDOUT;
959         default:
960                 QETH_DBF_MESSAGE(2, "unknown error %ld on channel %x\n",
961                                  PTR_ERR(irb), CCW_DEVID(cdev));
962                 QETH_CARD_TEXT(card, 2, "ckirberr");
963                 QETH_CARD_TEXT(card, 2, "  rc???");
964                 return PTR_ERR(irb);
965         }
966 }
967
968 static void qeth_irq(struct ccw_device *cdev, unsigned long intparm,
969                 struct irb *irb)
970 {
971         int rc;
972         int cstat, dstat;
973         struct qeth_cmd_buffer *iob = NULL;
974         struct ccwgroup_device *gdev;
975         struct qeth_channel *channel;
976         struct qeth_card *card;
977
978         /* while we hold the ccwdev lock, this stays valid: */
979         gdev = dev_get_drvdata(&cdev->dev);
980         card = dev_get_drvdata(&gdev->dev);
981         if (!card)
982                 return;
983
984         QETH_CARD_TEXT(card, 5, "irq");
985
986         if (card->read.ccwdev == cdev) {
987                 channel = &card->read;
988                 QETH_CARD_TEXT(card, 5, "read");
989         } else if (card->write.ccwdev == cdev) {
990                 channel = &card->write;
991                 QETH_CARD_TEXT(card, 5, "write");
992         } else {
993                 channel = &card->data;
994                 QETH_CARD_TEXT(card, 5, "data");
995         }
996
997         if (intparm == 0) {
998                 QETH_CARD_TEXT(card, 5, "irqunsol");
999         } else if ((addr_t)intparm != (addr_t)channel->active_cmd) {
1000                 QETH_CARD_TEXT(card, 5, "irqunexp");
1001
1002                 dev_err(&cdev->dev,
1003                         "Received IRQ with intparm %lx, expected %px\n",
1004                         intparm, channel->active_cmd);
1005                 if (channel->active_cmd)
1006                         qeth_cancel_cmd(channel->active_cmd, -EIO);
1007         } else {
1008                 iob = (struct qeth_cmd_buffer *) (addr_t)intparm;
1009         }
1010
1011         channel->active_cmd = NULL;
1012
1013         rc = qeth_check_irb_error(card, cdev, irb);
1014         if (rc) {
1015                 /* IO was terminated, free its resources. */
1016                 if (iob)
1017                         qeth_cancel_cmd(iob, rc);
1018                 atomic_set(&channel->irq_pending, 0);
1019                 wake_up(&card->wait_q);
1020                 return;
1021         }
1022
1023         atomic_set(&channel->irq_pending, 0);
1024
1025         if (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC))
1026                 channel->state = CH_STATE_STOPPED;
1027
1028         if (irb->scsw.cmd.fctl & (SCSW_FCTL_HALT_FUNC))
1029                 channel->state = CH_STATE_HALTED;
1030
1031         if (iob && (irb->scsw.cmd.fctl & (SCSW_FCTL_CLEAR_FUNC |
1032                                           SCSW_FCTL_HALT_FUNC))) {
1033                 qeth_cancel_cmd(iob, -ECANCELED);
1034                 iob = NULL;
1035         }
1036
1037         cstat = irb->scsw.cmd.cstat;
1038         dstat = irb->scsw.cmd.dstat;
1039
1040         if ((dstat & DEV_STAT_UNIT_EXCEP) ||
1041             (dstat & DEV_STAT_UNIT_CHECK) ||
1042             (cstat)) {
1043                 if (irb->esw.esw0.erw.cons) {
1044                         dev_warn(&channel->ccwdev->dev,
1045                                 "The qeth device driver failed to recover "
1046                                 "an error on the device\n");
1047                         QETH_DBF_MESSAGE(2, "sense data available on channel %x: cstat %#X dstat %#X\n",
1048                                          CCW_DEVID(channel->ccwdev), cstat,
1049                                          dstat);
1050                         print_hex_dump(KERN_WARNING, "qeth: irb ",
1051                                 DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1);
1052                         print_hex_dump(KERN_WARNING, "qeth: sense data ",
1053                                 DUMP_PREFIX_OFFSET, 16, 1, irb->ecw, 32, 1);
1054                 }
1055
1056                 rc = qeth_get_problem(card, cdev, irb);
1057                 if (rc) {
1058                         card->read_or_write_problem = 1;
1059                         if (iob)
1060                                 qeth_cancel_cmd(iob, rc);
1061                         qeth_clear_ipacmd_list(card);
1062                         qeth_schedule_recovery(card);
1063                         goto out;
1064                 }
1065         }
1066
1067         if (iob) {
1068                 /* sanity check: */
1069                 if (irb->scsw.cmd.count > iob->length) {
1070                         qeth_cancel_cmd(iob, -EIO);
1071                         goto out;
1072                 }
1073                 if (iob->callback)
1074                         iob->callback(card, iob,
1075                                       iob->length - irb->scsw.cmd.count);
1076         }
1077
1078 out:
1079         wake_up(&card->wait_q);
1080         return;
1081 }
1082
1083 static void qeth_notify_skbs(struct qeth_qdio_out_q *q,
1084                 struct qeth_qdio_out_buffer *buf,
1085                 enum iucv_tx_notify notification)
1086 {
1087         struct sk_buff *skb;
1088
1089         skb_queue_walk(&buf->skb_list, skb) {
1090                 QETH_CARD_TEXT_(q->card, 5, "skbn%d", notification);
1091                 QETH_CARD_TEXT_(q->card, 5, "%lx", (long) skb);
1092                 if (skb->sk && skb->sk->sk_family == PF_IUCV)
1093                         iucv_sk(skb->sk)->sk_txnotify(skb, notification);
1094         }
1095 }
1096
1097 static void qeth_tx_complete_buf(struct qeth_qdio_out_buffer *buf, bool error,
1098                                  int budget)
1099 {
1100         struct qeth_qdio_out_q *queue = buf->q;
1101         struct sk_buff *skb;
1102
1103         if (atomic_read(&buf->state) == QETH_QDIO_BUF_PENDING)
1104                 qeth_notify_skbs(queue, buf, TX_NOTIFY_GENERALERROR);
1105
1106         /* Empty buffer? */
1107         if (buf->next_element_to_fill == 0)
1108                 return;
1109
1110         QETH_TXQ_STAT_INC(queue, bufs);
1111         QETH_TXQ_STAT_ADD(queue, buf_elements, buf->next_element_to_fill);
1112         while ((skb = __skb_dequeue(&buf->skb_list)) != NULL) {
1113                 unsigned int bytes = qdisc_pkt_len(skb);
1114                 bool is_tso = skb_is_gso(skb);
1115                 unsigned int packets;
1116
1117                 packets = is_tso ? skb_shinfo(skb)->gso_segs : 1;
1118                 if (error) {
1119                         QETH_TXQ_STAT_ADD(queue, tx_errors, packets);
1120                 } else {
1121                         QETH_TXQ_STAT_ADD(queue, tx_packets, packets);
1122                         QETH_TXQ_STAT_ADD(queue, tx_bytes, bytes);
1123                         if (skb->ip_summed == CHECKSUM_PARTIAL)
1124                                 QETH_TXQ_STAT_ADD(queue, skbs_csum, packets);
1125                         if (skb_is_nonlinear(skb))
1126                                 QETH_TXQ_STAT_INC(queue, skbs_sg);
1127                         if (is_tso) {
1128                                 QETH_TXQ_STAT_INC(queue, skbs_tso);
1129                                 QETH_TXQ_STAT_ADD(queue, tso_bytes, bytes);
1130                         }
1131                 }
1132
1133                 napi_consume_skb(skb, budget);
1134         }
1135 }
1136
1137 static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
1138                                      struct qeth_qdio_out_buffer *buf,
1139                                      bool error, int budget)
1140 {
1141         int i;
1142
1143         /* is PCI flag set on buffer? */
1144         if (buf->buffer->element[0].sflags & SBAL_SFLAGS0_PCI_REQ)
1145                 atomic_dec(&queue->set_pci_flags_count);
1146
1147         qeth_tx_complete_buf(buf, error, budget);
1148
1149         for (i = 0; i < queue->max_elements; ++i) {
1150                 if (buf->buffer->element[i].addr && buf->is_header[i])
1151                         kmem_cache_free(qeth_core_header_cache,
1152                                 buf->buffer->element[i].addr);
1153                 buf->is_header[i] = 0;
1154         }
1155
1156         qeth_scrub_qdio_buffer(buf->buffer, queue->max_elements);
1157         buf->next_element_to_fill = 0;
1158         buf->bytes = 0;
1159         atomic_set(&buf->state, QETH_QDIO_BUF_EMPTY);
1160 }
1161
1162 static void qeth_drain_output_queue(struct qeth_qdio_out_q *q, bool free)
1163 {
1164         int j;
1165
1166         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
1167                 if (!q->bufs[j])
1168                         continue;
1169                 qeth_cleanup_handled_pending(q, j, 1);
1170                 qeth_clear_output_buffer(q, q->bufs[j], true, 0);
1171                 if (free) {
1172                         kmem_cache_free(qeth_qdio_outbuf_cache, q->bufs[j]);
1173                         q->bufs[j] = NULL;
1174                 }
1175         }
1176 }
1177
1178 void qeth_drain_output_queues(struct qeth_card *card)
1179 {
1180         int i;
1181
1182         QETH_CARD_TEXT(card, 2, "clearqdbf");
1183         /* clear outbound buffers to free skbs */
1184         for (i = 0; i < card->qdio.no_out_queues; ++i) {
1185                 if (card->qdio.out_qs[i])
1186                         qeth_drain_output_queue(card->qdio.out_qs[i], false);
1187         }
1188 }
1189 EXPORT_SYMBOL_GPL(qeth_drain_output_queues);
1190
1191 static void qeth_free_buffer_pool(struct qeth_card *card)
1192 {
1193         struct qeth_buffer_pool_entry *pool_entry, *tmp;
1194         int i = 0;
1195         list_for_each_entry_safe(pool_entry, tmp,
1196                                  &card->qdio.init_pool.entry_list, init_list){
1197                 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i)
1198                         free_page((unsigned long)pool_entry->elements[i]);
1199                 list_del(&pool_entry->init_list);
1200                 kfree(pool_entry);
1201         }
1202 }
1203
1204 static void qeth_clean_channel(struct qeth_channel *channel)
1205 {
1206         struct ccw_device *cdev = channel->ccwdev;
1207
1208         QETH_DBF_TEXT(SETUP, 2, "freech");
1209
1210         spin_lock_irq(get_ccwdev_lock(cdev));
1211         cdev->handler = NULL;
1212         spin_unlock_irq(get_ccwdev_lock(cdev));
1213 }
1214
1215 static void qeth_setup_channel(struct qeth_channel *channel)
1216 {
1217         struct ccw_device *cdev = channel->ccwdev;
1218
1219         QETH_DBF_TEXT(SETUP, 2, "setupch");
1220
1221         channel->state = CH_STATE_DOWN;
1222         atomic_set(&channel->irq_pending, 0);
1223
1224         spin_lock_irq(get_ccwdev_lock(cdev));
1225         cdev->handler = qeth_irq;
1226         spin_unlock_irq(get_ccwdev_lock(cdev));
1227 }
1228
1229 static int qeth_osa_set_output_queues(struct qeth_card *card, bool single)
1230 {
1231         unsigned int count = single ? 1 : card->dev->num_tx_queues;
1232         int rc;
1233
1234         rtnl_lock();
1235         rc = netif_set_real_num_tx_queues(card->dev, count);
1236         rtnl_unlock();
1237
1238         if (rc)
1239                 return rc;
1240
1241         if (card->qdio.no_out_queues == count)
1242                 return 0;
1243
1244         if (atomic_read(&card->qdio.state) != QETH_QDIO_UNINITIALIZED)
1245                 qeth_free_qdio_queues(card);
1246
1247         if (count == 1)
1248                 dev_info(&card->gdev->dev, "Priority Queueing not supported\n");
1249
1250         card->qdio.no_out_queues = count;
1251         return 0;
1252 }
1253
1254 static int qeth_update_from_chp_desc(struct qeth_card *card)
1255 {
1256         struct ccw_device *ccwdev;
1257         struct channel_path_desc_fmt0 *chp_dsc;
1258         int rc = 0;
1259
1260         QETH_CARD_TEXT(card, 2, "chp_desc");
1261
1262         ccwdev = card->data.ccwdev;
1263         chp_dsc = ccw_device_get_chp_desc(ccwdev, 0);
1264         if (!chp_dsc)
1265                 return -ENOMEM;
1266
1267         card->info.func_level = 0x4100 + chp_dsc->desc;
1268
1269         if (IS_OSD(card) || IS_OSX(card))
1270                 /* CHPP field bit 6 == 1 -> single queue */
1271                 rc = qeth_osa_set_output_queues(card, chp_dsc->chpp & 0x02);
1272
1273         kfree(chp_dsc);
1274         QETH_CARD_TEXT_(card, 2, "nr:%x", card->qdio.no_out_queues);
1275         QETH_CARD_TEXT_(card, 2, "lvl:%02x", card->info.func_level);
1276         return rc;
1277 }
1278
1279 static void qeth_init_qdio_info(struct qeth_card *card)
1280 {
1281         QETH_CARD_TEXT(card, 4, "intqdinf");
1282         atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
1283         card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT;
1284         card->qdio.default_out_queue = QETH_DEFAULT_QUEUE;
1285
1286         /* inbound */
1287         card->qdio.no_in_queues = 1;
1288         card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT;
1289         if (IS_IQD(card))
1290                 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_HSDEFAULT;
1291         else
1292                 card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT;
1293         card->qdio.in_buf_pool.buf_count = card->qdio.init_pool.buf_count;
1294         INIT_LIST_HEAD(&card->qdio.in_buf_pool.entry_list);
1295         INIT_LIST_HEAD(&card->qdio.init_pool.entry_list);
1296 }
1297
1298 static void qeth_set_initial_options(struct qeth_card *card)
1299 {
1300         card->options.route4.type = NO_ROUTER;
1301         card->options.route6.type = NO_ROUTER;
1302         card->options.rx_sg_cb = QETH_RX_SG_CB;
1303         card->options.isolation = ISOLATION_MODE_NONE;
1304         card->options.cq = QETH_CQ_DISABLED;
1305         card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
1306 }
1307
1308 static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
1309 {
1310         unsigned long flags;
1311         int rc = 0;
1312
1313         spin_lock_irqsave(&card->thread_mask_lock, flags);
1314         QETH_CARD_TEXT_(card, 4, "  %02x%02x%02x",
1315                         (u8) card->thread_start_mask,
1316                         (u8) card->thread_allowed_mask,
1317                         (u8) card->thread_running_mask);
1318         rc = (card->thread_start_mask & thread);
1319         spin_unlock_irqrestore(&card->thread_mask_lock, flags);
1320         return rc;
1321 }
1322
1323 static void qeth_start_kernel_thread(struct work_struct *work)
1324 {
1325         struct task_struct *ts;
1326         struct qeth_card *card = container_of(work, struct qeth_card,
1327                                         kernel_thread_starter);
1328         QETH_CARD_TEXT(card , 2, "strthrd");
1329
1330         if (card->read.state != CH_STATE_UP &&
1331             card->write.state != CH_STATE_UP)
1332                 return;
1333         if (qeth_do_start_thread(card, QETH_RECOVER_THREAD)) {
1334                 ts = kthread_run(card->discipline->recover, (void *)card,
1335                                 "qeth_recover");
1336                 if (IS_ERR(ts)) {
1337                         qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD);
1338                         qeth_clear_thread_running_bit(card,
1339                                 QETH_RECOVER_THREAD);
1340                 }
1341         }
1342 }
1343
1344 static void qeth_buffer_reclaim_work(struct work_struct *);
1345 static void qeth_setup_card(struct qeth_card *card)
1346 {
1347         QETH_CARD_TEXT(card, 2, "setupcrd");
1348
1349         card->info.type = CARD_RDEV(card)->id.driver_info;
1350         card->state = CARD_STATE_DOWN;
1351         spin_lock_init(&card->lock);
1352         spin_lock_init(&card->thread_mask_lock);
1353         mutex_init(&card->conf_mutex);
1354         mutex_init(&card->discipline_mutex);
1355         INIT_WORK(&card->kernel_thread_starter, qeth_start_kernel_thread);
1356         INIT_LIST_HEAD(&card->cmd_waiter_list);
1357         init_waitqueue_head(&card->wait_q);
1358         qeth_set_initial_options(card);
1359         /* IP address takeover */
1360         INIT_LIST_HEAD(&card->ipato.entries);
1361         qeth_init_qdio_info(card);
1362         INIT_DELAYED_WORK(&card->buffer_reclaim_work, qeth_buffer_reclaim_work);
1363         INIT_WORK(&card->close_dev_work, qeth_close_dev_handler);
1364 }
1365
1366 static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr)
1367 {
1368         struct qeth_card *card = container_of(slr, struct qeth_card,
1369                                         qeth_service_level);
1370         if (card->info.mcl_level[0])
1371                 seq_printf(m, "qeth: %s firmware level %s\n",
1372                         CARD_BUS_ID(card), card->info.mcl_level);
1373 }
1374
1375 static struct qeth_card *qeth_alloc_card(struct ccwgroup_device *gdev)
1376 {
1377         struct qeth_card *card;
1378
1379         QETH_DBF_TEXT(SETUP, 2, "alloccrd");
1380         card = kzalloc(sizeof(*card), GFP_KERNEL);
1381         if (!card)
1382                 goto out;
1383         QETH_DBF_HEX(SETUP, 2, &card, sizeof(void *));
1384
1385         card->gdev = gdev;
1386         dev_set_drvdata(&gdev->dev, card);
1387         CARD_RDEV(card) = gdev->cdev[0];
1388         CARD_WDEV(card) = gdev->cdev[1];
1389         CARD_DDEV(card) = gdev->cdev[2];
1390
1391         card->event_wq = alloc_ordered_workqueue("%s_event", 0,
1392                                                  dev_name(&gdev->dev));
1393         if (!card->event_wq)
1394                 goto out_wq;
1395
1396         card->read_cmd = qeth_alloc_cmd(&card->read, QETH_BUFSIZE, 1, 0);
1397         if (!card->read_cmd)
1398                 goto out_read_cmd;
1399
1400         qeth_setup_channel(&card->read);
1401         qeth_setup_channel(&card->write);
1402         qeth_setup_channel(&card->data);
1403         card->qeth_service_level.seq_print = qeth_core_sl_print;
1404         register_service_level(&card->qeth_service_level);
1405         return card;
1406
1407 out_read_cmd:
1408         destroy_workqueue(card->event_wq);
1409 out_wq:
1410         dev_set_drvdata(&gdev->dev, NULL);
1411         kfree(card);
1412 out:
1413         return NULL;
1414 }
1415
1416 static int qeth_clear_channel(struct qeth_card *card,
1417                               struct qeth_channel *channel)
1418 {
1419         int rc;
1420
1421         QETH_CARD_TEXT(card, 3, "clearch");
1422         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1423         rc = ccw_device_clear(channel->ccwdev, (addr_t)channel->active_cmd);
1424         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1425
1426         if (rc)
1427                 return rc;
1428         rc = wait_event_interruptible_timeout(card->wait_q,
1429                         channel->state == CH_STATE_STOPPED, QETH_TIMEOUT);
1430         if (rc == -ERESTARTSYS)
1431                 return rc;
1432         if (channel->state != CH_STATE_STOPPED)
1433                 return -ETIME;
1434         channel->state = CH_STATE_DOWN;
1435         return 0;
1436 }
1437
1438 static int qeth_halt_channel(struct qeth_card *card,
1439                              struct qeth_channel *channel)
1440 {
1441         int rc;
1442
1443         QETH_CARD_TEXT(card, 3, "haltch");
1444         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1445         rc = ccw_device_halt(channel->ccwdev, (addr_t)channel->active_cmd);
1446         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1447
1448         if (rc)
1449                 return rc;
1450         rc = wait_event_interruptible_timeout(card->wait_q,
1451                         channel->state == CH_STATE_HALTED, QETH_TIMEOUT);
1452         if (rc == -ERESTARTSYS)
1453                 return rc;
1454         if (channel->state != CH_STATE_HALTED)
1455                 return -ETIME;
1456         return 0;
1457 }
1458
1459 int qeth_stop_channel(struct qeth_channel *channel)
1460 {
1461         struct ccw_device *cdev = channel->ccwdev;
1462         int rc;
1463
1464         rc = ccw_device_set_offline(cdev);
1465
1466         spin_lock_irq(get_ccwdev_lock(cdev));
1467         if (channel->active_cmd) {
1468                 dev_err(&cdev->dev, "Stopped channel while cmd %px was still active\n",
1469                         channel->active_cmd);
1470                 channel->active_cmd = NULL;
1471         }
1472         spin_unlock_irq(get_ccwdev_lock(cdev));
1473
1474         return rc;
1475 }
1476 EXPORT_SYMBOL_GPL(qeth_stop_channel);
1477
1478 static int qeth_halt_channels(struct qeth_card *card)
1479 {
1480         int rc1 = 0, rc2 = 0, rc3 = 0;
1481
1482         QETH_CARD_TEXT(card, 3, "haltchs");
1483         rc1 = qeth_halt_channel(card, &card->read);
1484         rc2 = qeth_halt_channel(card, &card->write);
1485         rc3 = qeth_halt_channel(card, &card->data);
1486         if (rc1)
1487                 return rc1;
1488         if (rc2)
1489                 return rc2;
1490         return rc3;
1491 }
1492
1493 static int qeth_clear_channels(struct qeth_card *card)
1494 {
1495         int rc1 = 0, rc2 = 0, rc3 = 0;
1496
1497         QETH_CARD_TEXT(card, 3, "clearchs");
1498         rc1 = qeth_clear_channel(card, &card->read);
1499         rc2 = qeth_clear_channel(card, &card->write);
1500         rc3 = qeth_clear_channel(card, &card->data);
1501         if (rc1)
1502                 return rc1;
1503         if (rc2)
1504                 return rc2;
1505         return rc3;
1506 }
1507
1508 static int qeth_clear_halt_card(struct qeth_card *card, int halt)
1509 {
1510         int rc = 0;
1511
1512         QETH_CARD_TEXT(card, 3, "clhacrd");
1513
1514         if (halt)
1515                 rc = qeth_halt_channels(card);
1516         if (rc)
1517                 return rc;
1518         return qeth_clear_channels(card);
1519 }
1520
1521 int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
1522 {
1523         int rc = 0;
1524
1525         QETH_CARD_TEXT(card, 3, "qdioclr");
1526         switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED,
1527                 QETH_QDIO_CLEANING)) {
1528         case QETH_QDIO_ESTABLISHED:
1529                 if (IS_IQD(card))
1530                         rc = qdio_shutdown(CARD_DDEV(card),
1531                                 QDIO_FLAG_CLEANUP_USING_HALT);
1532                 else
1533                         rc = qdio_shutdown(CARD_DDEV(card),
1534                                 QDIO_FLAG_CLEANUP_USING_CLEAR);
1535                 if (rc)
1536                         QETH_CARD_TEXT_(card, 3, "1err%d", rc);
1537                 atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
1538                 break;
1539         case QETH_QDIO_CLEANING:
1540                 return rc;
1541         default:
1542                 break;
1543         }
1544         rc = qeth_clear_halt_card(card, use_halt);
1545         if (rc)
1546                 QETH_CARD_TEXT_(card, 3, "2err%d", rc);
1547         card->state = CARD_STATE_DOWN;
1548         return rc;
1549 }
1550 EXPORT_SYMBOL_GPL(qeth_qdio_clear_card);
1551
1552 static enum qeth_discipline_id qeth_vm_detect_layer(struct qeth_card *card)
1553 {
1554         enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1555         struct diag26c_vnic_resp *response = NULL;
1556         struct diag26c_vnic_req *request = NULL;
1557         struct ccw_dev_id id;
1558         char userid[80];
1559         int rc = 0;
1560
1561         QETH_CARD_TEXT(card, 2, "vmlayer");
1562
1563         cpcmd("QUERY USERID", userid, sizeof(userid), &rc);
1564         if (rc)
1565                 goto out;
1566
1567         request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
1568         response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
1569         if (!request || !response) {
1570                 rc = -ENOMEM;
1571                 goto out;
1572         }
1573
1574         ccw_device_get_id(CARD_RDEV(card), &id);
1575         request->resp_buf_len = sizeof(*response);
1576         request->resp_version = DIAG26C_VERSION6_VM65918;
1577         request->req_format = DIAG26C_VNIC_INFO;
1578         ASCEBC(userid, 8);
1579         memcpy(&request->sys_name, userid, 8);
1580         request->devno = id.devno;
1581
1582         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1583         rc = diag26c(request, response, DIAG26C_PORT_VNIC);
1584         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
1585         if (rc)
1586                 goto out;
1587         QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
1588
1589         if (request->resp_buf_len < sizeof(*response) ||
1590             response->version != request->resp_version) {
1591                 rc = -EIO;
1592                 goto out;
1593         }
1594
1595         if (response->protocol == VNIC_INFO_PROT_L2)
1596                 disc = QETH_DISCIPLINE_LAYER2;
1597         else if (response->protocol == VNIC_INFO_PROT_L3)
1598                 disc = QETH_DISCIPLINE_LAYER3;
1599
1600 out:
1601         kfree(response);
1602         kfree(request);
1603         if (rc)
1604                 QETH_CARD_TEXT_(card, 2, "err%x", rc);
1605         return disc;
1606 }
1607
1608 /* Determine whether the device requires a specific layer discipline */
1609 static enum qeth_discipline_id qeth_enforce_discipline(struct qeth_card *card)
1610 {
1611         enum qeth_discipline_id disc = QETH_DISCIPLINE_UNDETERMINED;
1612
1613         if (IS_OSM(card) || IS_OSN(card))
1614                 disc = QETH_DISCIPLINE_LAYER2;
1615         else if (IS_VM_NIC(card))
1616                 disc = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
1617                                       qeth_vm_detect_layer(card);
1618
1619         switch (disc) {
1620         case QETH_DISCIPLINE_LAYER2:
1621                 QETH_CARD_TEXT(card, 3, "force l2");
1622                 break;
1623         case QETH_DISCIPLINE_LAYER3:
1624                 QETH_CARD_TEXT(card, 3, "force l3");
1625                 break;
1626         default:
1627                 QETH_CARD_TEXT(card, 3, "force no");
1628         }
1629
1630         return disc;
1631 }
1632
1633 static void qeth_set_blkt_defaults(struct qeth_card *card)
1634 {
1635         QETH_CARD_TEXT(card, 2, "cfgblkt");
1636
1637         if (card->info.use_v1_blkt) {
1638                 card->info.blkt.time_total = 0;
1639                 card->info.blkt.inter_packet = 0;
1640                 card->info.blkt.inter_packet_jumbo = 0;
1641         } else {
1642                 card->info.blkt.time_total = 250;
1643                 card->info.blkt.inter_packet = 5;
1644                 card->info.blkt.inter_packet_jumbo = 15;
1645         }
1646 }
1647
1648 static void qeth_init_tokens(struct qeth_card *card)
1649 {
1650         card->token.issuer_rm_w = 0x00010103UL;
1651         card->token.cm_filter_w = 0x00010108UL;
1652         card->token.cm_connection_w = 0x0001010aUL;
1653         card->token.ulp_filter_w = 0x0001010bUL;
1654         card->token.ulp_connection_w = 0x0001010dUL;
1655 }
1656
1657 static void qeth_init_func_level(struct qeth_card *card)
1658 {
1659         switch (card->info.type) {
1660         case QETH_CARD_TYPE_IQD:
1661                 card->info.func_level = QETH_IDX_FUNC_LEVEL_IQD;
1662                 break;
1663         case QETH_CARD_TYPE_OSD:
1664         case QETH_CARD_TYPE_OSN:
1665                 card->info.func_level = QETH_IDX_FUNC_LEVEL_OSD;
1666                 break;
1667         default:
1668                 break;
1669         }
1670 }
1671
1672 static void qeth_idx_finalize_cmd(struct qeth_card *card,
1673                                   struct qeth_cmd_buffer *iob)
1674 {
1675         memcpy(QETH_TRANSPORT_HEADER_SEQ_NO(iob->data), &card->seqno.trans_hdr,
1676                QETH_SEQ_NO_LENGTH);
1677         if (iob->channel == &card->write)
1678                 card->seqno.trans_hdr++;
1679 }
1680
1681 static int qeth_peer_func_level(int level)
1682 {
1683         if ((level & 0xff) == 8)
1684                 return (level & 0xff) + 0x400;
1685         if (((level >> 8) & 3) == 1)
1686                 return (level & 0xff) + 0x200;
1687         return level;
1688 }
1689
1690 static void qeth_mpc_finalize_cmd(struct qeth_card *card,
1691                                   struct qeth_cmd_buffer *iob)
1692 {
1693         qeth_idx_finalize_cmd(card, iob);
1694
1695         memcpy(QETH_PDU_HEADER_SEQ_NO(iob->data),
1696                &card->seqno.pdu_hdr, QETH_SEQ_NO_LENGTH);
1697         card->seqno.pdu_hdr++;
1698         memcpy(QETH_PDU_HEADER_ACK_SEQ_NO(iob->data),
1699                &card->seqno.pdu_hdr_ack, QETH_SEQ_NO_LENGTH);
1700
1701         iob->callback = qeth_release_buffer_cb;
1702 }
1703
1704 static struct qeth_cmd_buffer *qeth_mpc_alloc_cmd(struct qeth_card *card,
1705                                                   void *data,
1706                                                   unsigned int data_length)
1707 {
1708         struct qeth_cmd_buffer *iob;
1709
1710         iob = qeth_alloc_cmd(&card->write, data_length, 1, QETH_TIMEOUT);
1711         if (!iob)
1712                 return NULL;
1713
1714         memcpy(iob->data, data, data_length);
1715         qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, data_length,
1716                        iob->data);
1717         iob->finalize = qeth_mpc_finalize_cmd;
1718         return iob;
1719 }
1720
1721 /**
1722  * qeth_send_control_data() -   send control command to the card
1723  * @card:                       qeth_card structure pointer
1724  * @iob:                        qeth_cmd_buffer pointer
1725  * @reply_cb:                   callback function pointer
1726  * @cb_card:                    pointer to the qeth_card structure
1727  * @cb_reply:                   pointer to the qeth_reply structure
1728  * @cb_cmd:                     pointer to the original iob for non-IPA
1729  *                              commands, or to the qeth_ipa_cmd structure
1730  *                              for the IPA commands.
1731  * @reply_param:                private pointer passed to the callback
1732  *
1733  * Callback function gets called one or more times, with cb_cmd
1734  * pointing to the response returned by the hardware. Callback
1735  * function must return
1736  *   > 0 if more reply blocks are expected,
1737  *     0 if the last or only reply block is received, and
1738  *   < 0 on error.
1739  * Callback function can get the value of the reply_param pointer from the
1740  * field 'param' of the structure qeth_reply.
1741  */
1742
1743 static int qeth_send_control_data(struct qeth_card *card,
1744                                   struct qeth_cmd_buffer *iob,
1745                                   int (*reply_cb)(struct qeth_card *cb_card,
1746                                                   struct qeth_reply *cb_reply,
1747                                                   unsigned long cb_cmd),
1748                                   void *reply_param)
1749 {
1750         struct qeth_channel *channel = iob->channel;
1751         struct qeth_reply *reply = &iob->reply;
1752         long timeout = iob->timeout;
1753         int rc;
1754
1755         QETH_CARD_TEXT(card, 2, "sendctl");
1756
1757         reply->callback = reply_cb;
1758         reply->param = reply_param;
1759
1760         timeout = wait_event_interruptible_timeout(card->wait_q,
1761                                                    qeth_trylock_channel(channel),
1762                                                    timeout);
1763         if (timeout <= 0) {
1764                 qeth_put_cmd(iob);
1765                 return (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1766         }
1767
1768         if (iob->finalize)
1769                 iob->finalize(card, iob);
1770         QETH_DBF_HEX(CTRL, 2, iob->data, min(iob->length, QETH_DBF_CTRL_LEN));
1771
1772         qeth_enqueue_cmd(card, iob);
1773
1774         /* This pairs with iob->callback, and keeps the iob alive after IO: */
1775         qeth_get_cmd(iob);
1776
1777         QETH_CARD_TEXT(card, 6, "noirqpnd");
1778         spin_lock_irq(get_ccwdev_lock(channel->ccwdev));
1779         rc = ccw_device_start_timeout(channel->ccwdev, __ccw_from_cmd(iob),
1780                                       (addr_t) iob, 0, 0, timeout);
1781         if (!rc)
1782                 channel->active_cmd = iob;
1783         spin_unlock_irq(get_ccwdev_lock(channel->ccwdev));
1784         if (rc) {
1785                 QETH_DBF_MESSAGE(2, "qeth_send_control_data on device %x: ccw_device_start rc = %i\n",
1786                                  CARD_DEVID(card), rc);
1787                 QETH_CARD_TEXT_(card, 2, " err%d", rc);
1788                 qeth_dequeue_cmd(card, iob);
1789                 qeth_put_cmd(iob);
1790                 atomic_set(&channel->irq_pending, 0);
1791                 wake_up(&card->wait_q);
1792                 goto out;
1793         }
1794
1795         timeout = wait_for_completion_interruptible_timeout(&iob->done,
1796                                                             timeout);
1797         if (timeout <= 0)
1798                 rc = (timeout == -ERESTARTSYS) ? -EINTR : -ETIME;
1799
1800         qeth_dequeue_cmd(card, iob);
1801
1802         if (reply_cb) {
1803                 /* Wait until the callback for a late reply has completed: */
1804                 spin_lock_irq(&iob->lock);
1805                 if (rc)
1806                         /* Zap any callback that's still pending: */
1807                         iob->rc = rc;
1808                 spin_unlock_irq(&iob->lock);
1809         }
1810
1811         if (!rc)
1812                 rc = iob->rc;
1813
1814 out:
1815         qeth_put_cmd(iob);
1816         return rc;
1817 }
1818
1819 struct qeth_node_desc {
1820         struct node_descriptor nd1;
1821         struct node_descriptor nd2;
1822         struct node_descriptor nd3;
1823 };
1824
1825 static void qeth_read_conf_data_cb(struct qeth_card *card,
1826                                    struct qeth_cmd_buffer *iob,
1827                                    unsigned int data_length)
1828 {
1829         struct qeth_node_desc *nd = (struct qeth_node_desc *) iob->data;
1830         int rc = 0;
1831         u8 *tag;
1832
1833         QETH_CARD_TEXT(card, 2, "cfgunit");
1834
1835         if (data_length < sizeof(*nd)) {
1836                 rc = -EINVAL;
1837                 goto out;
1838         }
1839
1840         card->info.is_vm_nic = nd->nd1.plant[0] == _ascebc['V'] &&
1841                                nd->nd1.plant[1] == _ascebc['M'];
1842         tag = (u8 *)&nd->nd1.tag;
1843         card->info.chpid = tag[0];
1844         card->info.unit_addr2 = tag[1];
1845
1846         tag = (u8 *)&nd->nd2.tag;
1847         card->info.cula = tag[1];
1848
1849         card->info.use_v1_blkt = nd->nd3.model[0] == 0xF0 &&
1850                                  nd->nd3.model[1] == 0xF0 &&
1851                                  nd->nd3.model[2] >= 0xF1 &&
1852                                  nd->nd3.model[2] <= 0xF4;
1853
1854 out:
1855         qeth_notify_cmd(iob, rc);
1856         qeth_put_cmd(iob);
1857 }
1858
1859 static int qeth_read_conf_data(struct qeth_card *card)
1860 {
1861         struct qeth_channel *channel = &card->data;
1862         struct qeth_cmd_buffer *iob;
1863         struct ciw *ciw;
1864
1865         /* scan for RCD command in extended SenseID data */
1866         ciw = ccw_device_get_ciw(channel->ccwdev, CIW_TYPE_RCD);
1867         if (!ciw || ciw->cmd == 0)
1868                 return -EOPNOTSUPP;
1869         if (ciw->count < sizeof(struct qeth_node_desc))
1870                 return -EINVAL;
1871
1872         iob = qeth_alloc_cmd(channel, ciw->count, 1, QETH_RCD_TIMEOUT);
1873         if (!iob)
1874                 return -ENOMEM;
1875
1876         iob->callback = qeth_read_conf_data_cb;
1877         qeth_setup_ccw(__ccw_from_cmd(iob), ciw->cmd, 0, iob->length,
1878                        iob->data);
1879
1880         return qeth_send_control_data(card, iob, NULL, NULL);
1881 }
1882
1883 static int qeth_idx_check_activate_response(struct qeth_card *card,
1884                                             struct qeth_channel *channel,
1885                                             struct qeth_cmd_buffer *iob)
1886 {
1887         int rc;
1888
1889         rc = qeth_check_idx_response(card, iob->data);
1890         if (rc)
1891                 return rc;
1892
1893         if (QETH_IS_IDX_ACT_POS_REPLY(iob->data))
1894                 return 0;
1895
1896         /* negative reply: */
1897         QETH_CARD_TEXT_(card, 2, "idxneg%c",
1898                         QETH_IDX_ACT_CAUSE_CODE(iob->data));
1899
1900         switch (QETH_IDX_ACT_CAUSE_CODE(iob->data)) {
1901         case QETH_IDX_ACT_ERR_EXCL:
1902                 dev_err(&channel->ccwdev->dev,
1903                         "The adapter is used exclusively by another host\n");
1904                 return -EBUSY;
1905         case QETH_IDX_ACT_ERR_AUTH:
1906         case QETH_IDX_ACT_ERR_AUTH_USER:
1907                 dev_err(&channel->ccwdev->dev,
1908                         "Setting the device online failed because of insufficient authorization\n");
1909                 return -EPERM;
1910         default:
1911                 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: negative reply\n",
1912                                  CCW_DEVID(channel->ccwdev));
1913                 return -EIO;
1914         }
1915 }
1916
1917 static void qeth_idx_activate_read_channel_cb(struct qeth_card *card,
1918                                               struct qeth_cmd_buffer *iob,
1919                                               unsigned int data_length)
1920 {
1921         struct qeth_channel *channel = iob->channel;
1922         u16 peer_level;
1923         int rc;
1924
1925         QETH_CARD_TEXT(card, 2, "idxrdcb");
1926
1927         rc = qeth_idx_check_activate_response(card, channel, iob);
1928         if (rc)
1929                 goto out;
1930
1931         memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1932         if (peer_level != qeth_peer_func_level(card->info.func_level)) {
1933                 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1934                                  CCW_DEVID(channel->ccwdev),
1935                                  card->info.func_level, peer_level);
1936                 rc = -EINVAL;
1937                 goto out;
1938         }
1939
1940         memcpy(&card->token.issuer_rm_r,
1941                QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1942                QETH_MPC_TOKEN_LENGTH);
1943         memcpy(&card->info.mcl_level[0],
1944                QETH_IDX_REPLY_LEVEL(iob->data), QETH_MCL_LENGTH);
1945
1946 out:
1947         qeth_notify_cmd(iob, rc);
1948         qeth_put_cmd(iob);
1949 }
1950
1951 static void qeth_idx_activate_write_channel_cb(struct qeth_card *card,
1952                                                struct qeth_cmd_buffer *iob,
1953                                                unsigned int data_length)
1954 {
1955         struct qeth_channel *channel = iob->channel;
1956         u16 peer_level;
1957         int rc;
1958
1959         QETH_CARD_TEXT(card, 2, "idxwrcb");
1960
1961         rc = qeth_idx_check_activate_response(card, channel, iob);
1962         if (rc)
1963                 goto out;
1964
1965         memcpy(&peer_level, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2);
1966         if ((peer_level & ~0x0100) !=
1967             qeth_peer_func_level(card->info.func_level)) {
1968                 QETH_DBF_MESSAGE(2, "IDX_ACTIVATE on channel %x: function level mismatch (sent: %#x, received: %#x)\n",
1969                                  CCW_DEVID(channel->ccwdev),
1970                                  card->info.func_level, peer_level);
1971                 rc = -EINVAL;
1972         }
1973
1974 out:
1975         qeth_notify_cmd(iob, rc);
1976         qeth_put_cmd(iob);
1977 }
1978
1979 static void qeth_idx_setup_activate_cmd(struct qeth_card *card,
1980                                         struct qeth_cmd_buffer *iob)
1981 {
1982         u16 addr = (card->info.cula << 8) + card->info.unit_addr2;
1983         u8 port = ((u8)card->dev->dev_port) | 0x80;
1984         struct ccw1 *ccw = __ccw_from_cmd(iob);
1985         struct ccw_dev_id dev_id;
1986
1987         qeth_setup_ccw(&ccw[0], CCW_CMD_WRITE, CCW_FLAG_CC, IDX_ACTIVATE_SIZE,
1988                        iob->data);
1989         qeth_setup_ccw(&ccw[1], CCW_CMD_READ, 0, iob->length, iob->data);
1990         ccw_device_get_id(CARD_DDEV(card), &dev_id);
1991         iob->finalize = qeth_idx_finalize_cmd;
1992
1993         memcpy(QETH_IDX_ACT_PNO(iob->data), &port, 1);
1994         memcpy(QETH_IDX_ACT_ISSUER_RM_TOKEN(iob->data),
1995                &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH);
1996         memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data),
1997                &card->info.func_level, 2);
1998         memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &dev_id.devno, 2);
1999         memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &addr, 2);
2000 }
2001
2002 static int qeth_idx_activate_read_channel(struct qeth_card *card)
2003 {
2004         struct qeth_channel *channel = &card->read;
2005         struct qeth_cmd_buffer *iob;
2006         int rc;
2007
2008         QETH_CARD_TEXT(card, 2, "idxread");
2009
2010         iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2011         if (!iob)
2012                 return -ENOMEM;
2013
2014         memcpy(iob->data, IDX_ACTIVATE_READ, IDX_ACTIVATE_SIZE);
2015         qeth_idx_setup_activate_cmd(card, iob);
2016         iob->callback = qeth_idx_activate_read_channel_cb;
2017
2018         rc = qeth_send_control_data(card, iob, NULL, NULL);
2019         if (rc)
2020                 return rc;
2021
2022         channel->state = CH_STATE_UP;
2023         return 0;
2024 }
2025
2026 static int qeth_idx_activate_write_channel(struct qeth_card *card)
2027 {
2028         struct qeth_channel *channel = &card->write;
2029         struct qeth_cmd_buffer *iob;
2030         int rc;
2031
2032         QETH_CARD_TEXT(card, 2, "idxwrite");
2033
2034         iob = qeth_alloc_cmd(channel, QETH_BUFSIZE, 2, QETH_TIMEOUT);
2035         if (!iob)
2036                 return -ENOMEM;
2037
2038         memcpy(iob->data, IDX_ACTIVATE_WRITE, IDX_ACTIVATE_SIZE);
2039         qeth_idx_setup_activate_cmd(card, iob);
2040         iob->callback = qeth_idx_activate_write_channel_cb;
2041
2042         rc = qeth_send_control_data(card, iob, NULL, NULL);
2043         if (rc)
2044                 return rc;
2045
2046         channel->state = CH_STATE_UP;
2047         return 0;
2048 }
2049
2050 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2051                 unsigned long data)
2052 {
2053         struct qeth_cmd_buffer *iob;
2054
2055         QETH_CARD_TEXT(card, 2, "cmenblcb");
2056
2057         iob = (struct qeth_cmd_buffer *) data;
2058         memcpy(&card->token.cm_filter_r,
2059                QETH_CM_ENABLE_RESP_FILTER_TOKEN(iob->data),
2060                QETH_MPC_TOKEN_LENGTH);
2061         return 0;
2062 }
2063
2064 static int qeth_cm_enable(struct qeth_card *card)
2065 {
2066         struct qeth_cmd_buffer *iob;
2067
2068         QETH_CARD_TEXT(card, 2, "cmenable");
2069
2070         iob = qeth_mpc_alloc_cmd(card, CM_ENABLE, CM_ENABLE_SIZE);
2071         if (!iob)
2072                 return -ENOMEM;
2073
2074         memcpy(QETH_CM_ENABLE_ISSUER_RM_TOKEN(iob->data),
2075                &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2076         memcpy(QETH_CM_ENABLE_FILTER_TOKEN(iob->data),
2077                &card->token.cm_filter_w, QETH_MPC_TOKEN_LENGTH);
2078
2079         return qeth_send_control_data(card, iob, qeth_cm_enable_cb, NULL);
2080 }
2081
2082 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2083                 unsigned long data)
2084 {
2085         struct qeth_cmd_buffer *iob;
2086
2087         QETH_CARD_TEXT(card, 2, "cmsetpcb");
2088
2089         iob = (struct qeth_cmd_buffer *) data;
2090         memcpy(&card->token.cm_connection_r,
2091                QETH_CM_SETUP_RESP_DEST_ADDR(iob->data),
2092                QETH_MPC_TOKEN_LENGTH);
2093         return 0;
2094 }
2095
2096 static int qeth_cm_setup(struct qeth_card *card)
2097 {
2098         struct qeth_cmd_buffer *iob;
2099
2100         QETH_CARD_TEXT(card, 2, "cmsetup");
2101
2102         iob = qeth_mpc_alloc_cmd(card, CM_SETUP, CM_SETUP_SIZE);
2103         if (!iob)
2104                 return -ENOMEM;
2105
2106         memcpy(QETH_CM_SETUP_DEST_ADDR(iob->data),
2107                &card->token.issuer_rm_r, QETH_MPC_TOKEN_LENGTH);
2108         memcpy(QETH_CM_SETUP_CONNECTION_TOKEN(iob->data),
2109                &card->token.cm_connection_w, QETH_MPC_TOKEN_LENGTH);
2110         memcpy(QETH_CM_SETUP_FILTER_TOKEN(iob->data),
2111                &card->token.cm_filter_r, QETH_MPC_TOKEN_LENGTH);
2112         return qeth_send_control_data(card, iob, qeth_cm_setup_cb, NULL);
2113 }
2114
2115 static int qeth_update_max_mtu(struct qeth_card *card, unsigned int max_mtu)
2116 {
2117         struct net_device *dev = card->dev;
2118         unsigned int new_mtu;
2119
2120         if (!max_mtu) {
2121                 /* IQD needs accurate max MTU to set up its RX buffers: */
2122                 if (IS_IQD(card))
2123                         return -EINVAL;
2124                 /* tolerate quirky HW: */
2125                 max_mtu = ETH_MAX_MTU;
2126         }
2127
2128         rtnl_lock();
2129         if (IS_IQD(card)) {
2130                 /* move any device with default MTU to new max MTU: */
2131                 new_mtu = (dev->mtu == dev->max_mtu) ? max_mtu : dev->mtu;
2132
2133                 /* adjust RX buffer size to new max MTU: */
2134                 card->qdio.in_buf_size = max_mtu + 2 * PAGE_SIZE;
2135                 if (dev->max_mtu && dev->max_mtu != max_mtu)
2136                         qeth_free_qdio_queues(card);
2137         } else {
2138                 if (dev->mtu)
2139                         new_mtu = dev->mtu;
2140                 /* default MTUs for first setup: */
2141                 else if (IS_LAYER2(card))
2142                         new_mtu = ETH_DATA_LEN;
2143                 else
2144                         new_mtu = ETH_DATA_LEN - 8; /* allow for LLC + SNAP */
2145         }
2146
2147         dev->max_mtu = max_mtu;
2148         dev->mtu = min(new_mtu, max_mtu);
2149         rtnl_unlock();
2150         return 0;
2151 }
2152
2153 static int qeth_get_mtu_outof_framesize(int framesize)
2154 {
2155         switch (framesize) {
2156         case 0x4000:
2157                 return 8192;
2158         case 0x6000:
2159                 return 16384;
2160         case 0xa000:
2161                 return 32768;
2162         case 0xffff:
2163                 return 57344;
2164         default:
2165                 return 0;
2166         }
2167 }
2168
2169 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply,
2170                 unsigned long data)
2171 {
2172         __u16 mtu, framesize;
2173         __u16 len;
2174         __u8 link_type;
2175         struct qeth_cmd_buffer *iob;
2176
2177         QETH_CARD_TEXT(card, 2, "ulpenacb");
2178
2179         iob = (struct qeth_cmd_buffer *) data;
2180         memcpy(&card->token.ulp_filter_r,
2181                QETH_ULP_ENABLE_RESP_FILTER_TOKEN(iob->data),
2182                QETH_MPC_TOKEN_LENGTH);
2183         if (IS_IQD(card)) {
2184                 memcpy(&framesize, QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data), 2);
2185                 mtu = qeth_get_mtu_outof_framesize(framesize);
2186         } else {
2187                 mtu = *(__u16 *)QETH_ULP_ENABLE_RESP_MAX_MTU(iob->data);
2188         }
2189         *(u16 *)reply->param = mtu;
2190
2191         memcpy(&len, QETH_ULP_ENABLE_RESP_DIFINFO_LEN(iob->data), 2);
2192         if (len >= QETH_MPC_DIFINFO_LEN_INDICATES_LINK_TYPE) {
2193                 memcpy(&link_type,
2194                        QETH_ULP_ENABLE_RESP_LINK_TYPE(iob->data), 1);
2195                 card->info.link_type = link_type;
2196         } else
2197                 card->info.link_type = 0;
2198         QETH_CARD_TEXT_(card, 2, "link%d", card->info.link_type);
2199         return 0;
2200 }
2201
2202 static u8 qeth_mpc_select_prot_type(struct qeth_card *card)
2203 {
2204         if (IS_OSN(card))
2205                 return QETH_PROT_OSN2;
2206         return IS_LAYER2(card) ? QETH_PROT_LAYER2 : QETH_PROT_TCPIP;
2207 }
2208
2209 static int qeth_ulp_enable(struct qeth_card *card)
2210 {
2211         u8 prot_type = qeth_mpc_select_prot_type(card);
2212         struct qeth_cmd_buffer *iob;
2213         u16 max_mtu;
2214         int rc;
2215
2216         QETH_CARD_TEXT(card, 2, "ulpenabl");
2217
2218         iob = qeth_mpc_alloc_cmd(card, ULP_ENABLE, ULP_ENABLE_SIZE);
2219         if (!iob)
2220                 return -ENOMEM;
2221
2222         *(QETH_ULP_ENABLE_LINKNUM(iob->data)) = (u8) card->dev->dev_port;
2223         memcpy(QETH_ULP_ENABLE_PROT_TYPE(iob->data), &prot_type, 1);
2224         memcpy(QETH_ULP_ENABLE_DEST_ADDR(iob->data),
2225                &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2226         memcpy(QETH_ULP_ENABLE_FILTER_TOKEN(iob->data),
2227                &card->token.ulp_filter_w, QETH_MPC_TOKEN_LENGTH);
2228         rc = qeth_send_control_data(card, iob, qeth_ulp_enable_cb, &max_mtu);
2229         if (rc)
2230                 return rc;
2231         return qeth_update_max_mtu(card, max_mtu);
2232 }
2233
2234 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply,
2235                 unsigned long data)
2236 {
2237         struct qeth_cmd_buffer *iob;
2238
2239         QETH_CARD_TEXT(card, 2, "ulpstpcb");
2240
2241         iob = (struct qeth_cmd_buffer *) data;
2242         memcpy(&card->token.ulp_connection_r,
2243                QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2244                QETH_MPC_TOKEN_LENGTH);
2245         if (!strncmp("00S", QETH_ULP_SETUP_RESP_CONNECTION_TOKEN(iob->data),
2246                      3)) {
2247                 QETH_CARD_TEXT(card, 2, "olmlimit");
2248                 dev_err(&card->gdev->dev, "A connection could not be "
2249                         "established because of an OLM limit\n");
2250                 return -EMLINK;
2251         }
2252         return 0;
2253 }
2254
2255 static int qeth_ulp_setup(struct qeth_card *card)
2256 {
2257         __u16 temp;
2258         struct qeth_cmd_buffer *iob;
2259         struct ccw_dev_id dev_id;
2260
2261         QETH_CARD_TEXT(card, 2, "ulpsetup");
2262
2263         iob = qeth_mpc_alloc_cmd(card, ULP_SETUP, ULP_SETUP_SIZE);
2264         if (!iob)
2265                 return -ENOMEM;
2266
2267         memcpy(QETH_ULP_SETUP_DEST_ADDR(iob->data),
2268                &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2269         memcpy(QETH_ULP_SETUP_CONNECTION_TOKEN(iob->data),
2270                &card->token.ulp_connection_w, QETH_MPC_TOKEN_LENGTH);
2271         memcpy(QETH_ULP_SETUP_FILTER_TOKEN(iob->data),
2272                &card->token.ulp_filter_r, QETH_MPC_TOKEN_LENGTH);
2273
2274         ccw_device_get_id(CARD_DDEV(card), &dev_id);
2275         memcpy(QETH_ULP_SETUP_CUA(iob->data), &dev_id.devno, 2);
2276         temp = (card->info.cula << 8) + card->info.unit_addr2;
2277         memcpy(QETH_ULP_SETUP_REAL_DEVADDR(iob->data), &temp, 2);
2278         return qeth_send_control_data(card, iob, qeth_ulp_setup_cb, NULL);
2279 }
2280
2281 static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *q, int bidx)
2282 {
2283         struct qeth_qdio_out_buffer *newbuf;
2284
2285         newbuf = kmem_cache_zalloc(qeth_qdio_outbuf_cache, GFP_ATOMIC);
2286         if (!newbuf)
2287                 return -ENOMEM;
2288
2289         newbuf->buffer = q->qdio_bufs[bidx];
2290         skb_queue_head_init(&newbuf->skb_list);
2291         lockdep_set_class(&newbuf->skb_list.lock, &qdio_out_skb_queue_key);
2292         newbuf->q = q;
2293         newbuf->next_pending = q->bufs[bidx];
2294         atomic_set(&newbuf->state, QETH_QDIO_BUF_EMPTY);
2295         q->bufs[bidx] = newbuf;
2296         return 0;
2297 }
2298
2299 static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
2300 {
2301         if (!q)
2302                 return;
2303
2304         qeth_drain_output_queue(q, true);
2305         qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2306         kfree(q);
2307 }
2308
2309 static struct qeth_qdio_out_q *qeth_alloc_output_queue(void)
2310 {
2311         struct qeth_qdio_out_q *q = kzalloc(sizeof(*q), GFP_KERNEL);
2312
2313         if (!q)
2314                 return NULL;
2315
2316         if (qdio_alloc_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q)) {
2317                 kfree(q);
2318                 return NULL;
2319         }
2320         return q;
2321 }
2322
2323 static void qeth_tx_completion_timer(struct timer_list *timer)
2324 {
2325         struct qeth_qdio_out_q *queue = from_timer(queue, timer, timer);
2326
2327         napi_schedule(&queue->napi);
2328         QETH_TXQ_STAT_INC(queue, completion_timer);
2329 }
2330
2331 static int qeth_alloc_qdio_queues(struct qeth_card *card)
2332 {
2333         int i, j;
2334
2335         QETH_CARD_TEXT(card, 2, "allcqdbf");
2336
2337         if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED,
2338                 QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED)
2339                 return 0;
2340
2341         QETH_CARD_TEXT(card, 2, "inq");
2342         card->qdio.in_q = qeth_alloc_qdio_queue();
2343         if (!card->qdio.in_q)
2344                 goto out_nomem;
2345
2346         /* inbound buffer pool */
2347         if (qeth_alloc_buffer_pool(card))
2348                 goto out_freeinq;
2349
2350         /* outbound */
2351         for (i = 0; i < card->qdio.no_out_queues; ++i) {
2352                 struct qeth_qdio_out_q *queue;
2353
2354                 queue = qeth_alloc_output_queue();
2355                 if (!queue)
2356                         goto out_freeoutq;
2357                 QETH_CARD_TEXT_(card, 2, "outq %i", i);
2358                 QETH_CARD_HEX(card, 2, &queue, sizeof(void *));
2359                 card->qdio.out_qs[i] = queue;
2360                 queue->card = card;
2361                 queue->queue_no = i;
2362                 timer_setup(&queue->timer, qeth_tx_completion_timer, 0);
2363
2364                 /* give outbound qeth_qdio_buffers their qdio_buffers */
2365                 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2366                         WARN_ON(queue->bufs[j]);
2367                         if (qeth_init_qdio_out_buf(queue, j))
2368                                 goto out_freeoutqbufs;
2369                 }
2370         }
2371
2372         /* completion */
2373         if (qeth_alloc_cq(card))
2374                 goto out_freeoutq;
2375
2376         return 0;
2377
2378 out_freeoutqbufs:
2379         while (j > 0) {
2380                 --j;
2381                 kmem_cache_free(qeth_qdio_outbuf_cache,
2382                                 card->qdio.out_qs[i]->bufs[j]);
2383                 card->qdio.out_qs[i]->bufs[j] = NULL;
2384         }
2385 out_freeoutq:
2386         while (i > 0) {
2387                 qeth_free_output_queue(card->qdio.out_qs[--i]);
2388                 card->qdio.out_qs[i] = NULL;
2389         }
2390         qeth_free_buffer_pool(card);
2391 out_freeinq:
2392         qeth_free_qdio_queue(card->qdio.in_q);
2393         card->qdio.in_q = NULL;
2394 out_nomem:
2395         atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED);
2396         return -ENOMEM;
2397 }
2398
2399 static void qeth_free_qdio_queues(struct qeth_card *card)
2400 {
2401         int i, j;
2402
2403         if (atomic_xchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED) ==
2404                 QETH_QDIO_UNINITIALIZED)
2405                 return;
2406
2407         qeth_free_cq(card);
2408         cancel_delayed_work_sync(&card->buffer_reclaim_work);
2409         for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j) {
2410                 if (card->qdio.in_q->bufs[j].rx_skb)
2411                         dev_kfree_skb_any(card->qdio.in_q->bufs[j].rx_skb);
2412         }
2413         qeth_free_qdio_queue(card->qdio.in_q);
2414         card->qdio.in_q = NULL;
2415         /* inbound buffer pool */
2416         qeth_free_buffer_pool(card);
2417         /* free outbound qdio_qs */
2418         for (i = 0; i < card->qdio.no_out_queues; i++) {
2419                 qeth_free_output_queue(card->qdio.out_qs[i]);
2420                 card->qdio.out_qs[i] = NULL;
2421         }
2422 }
2423
2424 static void qeth_create_qib_param_field(struct qeth_card *card,
2425                 char *param_field)
2426 {
2427
2428         param_field[0] = _ascebc['P'];
2429         param_field[1] = _ascebc['C'];
2430         param_field[2] = _ascebc['I'];
2431         param_field[3] = _ascebc['T'];
2432         *((unsigned int *) (&param_field[4])) = QETH_PCI_THRESHOLD_A(card);
2433         *((unsigned int *) (&param_field[8])) = QETH_PCI_THRESHOLD_B(card);
2434         *((unsigned int *) (&param_field[12])) = QETH_PCI_TIMER_VALUE(card);
2435 }
2436
2437 static void qeth_create_qib_param_field_blkt(struct qeth_card *card,
2438                 char *param_field)
2439 {
2440         param_field[16] = _ascebc['B'];
2441         param_field[17] = _ascebc['L'];
2442         param_field[18] = _ascebc['K'];
2443         param_field[19] = _ascebc['T'];
2444         *((unsigned int *) (&param_field[20])) = card->info.blkt.time_total;
2445         *((unsigned int *) (&param_field[24])) = card->info.blkt.inter_packet;
2446         *((unsigned int *) (&param_field[28])) =
2447                 card->info.blkt.inter_packet_jumbo;
2448 }
2449
2450 static int qeth_qdio_activate(struct qeth_card *card)
2451 {
2452         QETH_CARD_TEXT(card, 3, "qdioact");
2453         return qdio_activate(CARD_DDEV(card));
2454 }
2455
2456 static int qeth_dm_act(struct qeth_card *card)
2457 {
2458         struct qeth_cmd_buffer *iob;
2459
2460         QETH_CARD_TEXT(card, 2, "dmact");
2461
2462         iob = qeth_mpc_alloc_cmd(card, DM_ACT, DM_ACT_SIZE);
2463         if (!iob)
2464                 return -ENOMEM;
2465
2466         memcpy(QETH_DM_ACT_DEST_ADDR(iob->data),
2467                &card->token.cm_connection_r, QETH_MPC_TOKEN_LENGTH);
2468         memcpy(QETH_DM_ACT_CONNECTION_TOKEN(iob->data),
2469                &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2470         return qeth_send_control_data(card, iob, NULL, NULL);
2471 }
2472
2473 static int qeth_mpc_initialize(struct qeth_card *card)
2474 {
2475         int rc;
2476
2477         QETH_CARD_TEXT(card, 2, "mpcinit");
2478
2479         rc = qeth_issue_next_read(card);
2480         if (rc) {
2481                 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2482                 return rc;
2483         }
2484         rc = qeth_cm_enable(card);
2485         if (rc) {
2486                 QETH_CARD_TEXT_(card, 2, "2err%d", rc);
2487                 return rc;
2488         }
2489         rc = qeth_cm_setup(card);
2490         if (rc) {
2491                 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
2492                 return rc;
2493         }
2494         rc = qeth_ulp_enable(card);
2495         if (rc) {
2496                 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
2497                 return rc;
2498         }
2499         rc = qeth_ulp_setup(card);
2500         if (rc) {
2501                 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2502                 return rc;
2503         }
2504         rc = qeth_alloc_qdio_queues(card);
2505         if (rc) {
2506                 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
2507                 return rc;
2508         }
2509         rc = qeth_qdio_establish(card);
2510         if (rc) {
2511                 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
2512                 qeth_free_qdio_queues(card);
2513                 return rc;
2514         }
2515         rc = qeth_qdio_activate(card);
2516         if (rc) {
2517                 QETH_CARD_TEXT_(card, 2, "7err%d", rc);
2518                 return rc;
2519         }
2520         rc = qeth_dm_act(card);
2521         if (rc) {
2522                 QETH_CARD_TEXT_(card, 2, "8err%d", rc);
2523                 return rc;
2524         }
2525
2526         return 0;
2527 }
2528
2529 void qeth_print_status_message(struct qeth_card *card)
2530 {
2531         switch (card->info.type) {
2532         case QETH_CARD_TYPE_OSD:
2533         case QETH_CARD_TYPE_OSM:
2534         case QETH_CARD_TYPE_OSX:
2535                 /* VM will use a non-zero first character
2536                  * to indicate a HiperSockets like reporting
2537                  * of the level OSA sets the first character to zero
2538                  * */
2539                 if (!card->info.mcl_level[0]) {
2540                         sprintf(card->info.mcl_level, "%02x%02x",
2541                                 card->info.mcl_level[2],
2542                                 card->info.mcl_level[3]);
2543                         break;
2544                 }
2545                 /* fallthrough */
2546         case QETH_CARD_TYPE_IQD:
2547                 if (IS_VM_NIC(card) || (card->info.mcl_level[0] & 0x80)) {
2548                         card->info.mcl_level[0] = (char) _ebcasc[(__u8)
2549                                 card->info.mcl_level[0]];
2550                         card->info.mcl_level[1] = (char) _ebcasc[(__u8)
2551                                 card->info.mcl_level[1]];
2552                         card->info.mcl_level[2] = (char) _ebcasc[(__u8)
2553                                 card->info.mcl_level[2]];
2554                         card->info.mcl_level[3] = (char) _ebcasc[(__u8)
2555                                 card->info.mcl_level[3]];
2556                         card->info.mcl_level[QETH_MCL_LENGTH] = 0;
2557                 }
2558                 break;
2559         default:
2560                 memset(&card->info.mcl_level[0], 0, QETH_MCL_LENGTH + 1);
2561         }
2562         dev_info(&card->gdev->dev,
2563                  "Device is a%s card%s%s%s\nwith link type %s.\n",
2564                  qeth_get_cardname(card),
2565                  (card->info.mcl_level[0]) ? " (level: " : "",
2566                  (card->info.mcl_level[0]) ? card->info.mcl_level : "",
2567                  (card->info.mcl_level[0]) ? ")" : "",
2568                  qeth_get_cardname_short(card));
2569 }
2570 EXPORT_SYMBOL_GPL(qeth_print_status_message);
2571
2572 static void qeth_initialize_working_pool_list(struct qeth_card *card)
2573 {
2574         struct qeth_buffer_pool_entry *entry;
2575
2576         QETH_CARD_TEXT(card, 5, "inwrklst");
2577
2578         list_for_each_entry(entry,
2579                             &card->qdio.init_pool.entry_list, init_list) {
2580                 qeth_put_buffer_pool_entry(card, entry);
2581         }
2582 }
2583
2584 static struct qeth_buffer_pool_entry *qeth_find_free_buffer_pool_entry(
2585                                         struct qeth_card *card)
2586 {
2587         struct list_head *plh;
2588         struct qeth_buffer_pool_entry *entry;
2589         int i, free;
2590         struct page *page;
2591
2592         if (list_empty(&card->qdio.in_buf_pool.entry_list))
2593                 return NULL;
2594
2595         list_for_each(plh, &card->qdio.in_buf_pool.entry_list) {
2596                 entry = list_entry(plh, struct qeth_buffer_pool_entry, list);
2597                 free = 1;
2598                 for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2599                         if (page_count(virt_to_page(entry->elements[i])) > 1) {
2600                                 free = 0;
2601                                 break;
2602                         }
2603                 }
2604                 if (free) {
2605                         list_del_init(&entry->list);
2606                         return entry;
2607                 }
2608         }
2609
2610         /* no free buffer in pool so take first one and swap pages */
2611         entry = list_entry(card->qdio.in_buf_pool.entry_list.next,
2612                         struct qeth_buffer_pool_entry, list);
2613         for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2614                 if (page_count(virt_to_page(entry->elements[i])) > 1) {
2615                         page = alloc_page(GFP_ATOMIC);
2616                         if (!page) {
2617                                 return NULL;
2618                         } else {
2619                                 free_page((unsigned long)entry->elements[i]);
2620                                 entry->elements[i] = page_address(page);
2621                                 QETH_CARD_STAT_INC(card, rx_sg_alloc_page);
2622                         }
2623                 }
2624         }
2625         list_del_init(&entry->list);
2626         return entry;
2627 }
2628
2629 static int qeth_init_input_buffer(struct qeth_card *card,
2630                 struct qeth_qdio_buffer *buf)
2631 {
2632         struct qeth_buffer_pool_entry *pool_entry;
2633         int i;
2634
2635         if ((card->options.cq == QETH_CQ_ENABLED) && (!buf->rx_skb)) {
2636                 buf->rx_skb = netdev_alloc_skb(card->dev,
2637                                                QETH_RX_PULL_LEN + ETH_HLEN);
2638                 if (!buf->rx_skb)
2639                         return -ENOMEM;
2640         }
2641
2642         pool_entry = qeth_find_free_buffer_pool_entry(card);
2643         if (!pool_entry)
2644                 return -ENOBUFS;
2645
2646         /*
2647          * since the buffer is accessed only from the input_tasklet
2648          * there shouldn't be a need to synchronize; also, since we use
2649          * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run  out off
2650          * buffers
2651          */
2652
2653         buf->pool_entry = pool_entry;
2654         for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
2655                 buf->buffer->element[i].length = PAGE_SIZE;
2656                 buf->buffer->element[i].addr =  pool_entry->elements[i];
2657                 if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
2658                         buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
2659                 else
2660                         buf->buffer->element[i].eflags = 0;
2661                 buf->buffer->element[i].sflags = 0;
2662         }
2663         return 0;
2664 }
2665
2666 int qeth_init_qdio_queues(struct qeth_card *card)
2667 {
2668         unsigned int i;
2669         int rc;
2670
2671         QETH_CARD_TEXT(card, 2, "initqdqs");
2672
2673         /* inbound queue */
2674         qdio_reset_buffers(card->qdio.in_q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2675         memset(&card->rx, 0, sizeof(struct qeth_rx));
2676
2677         qeth_initialize_working_pool_list(card);
2678         /*give only as many buffers to hardware as we have buffer pool entries*/
2679         for (i = 0; i < card->qdio.in_buf_pool.buf_count - 1; i++) {
2680                 rc = qeth_init_input_buffer(card, &card->qdio.in_q->bufs[i]);
2681                 if (rc)
2682                         return rc;
2683         }
2684
2685         card->qdio.in_q->next_buf_to_init =
2686                 card->qdio.in_buf_pool.buf_count - 1;
2687         rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0, 0,
2688                      card->qdio.in_buf_pool.buf_count - 1);
2689         if (rc) {
2690                 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
2691                 return rc;
2692         }
2693
2694         /* completion */
2695         rc = qeth_cq_init(card);
2696         if (rc) {
2697                 return rc;
2698         }
2699
2700         /* outbound queue */
2701         for (i = 0; i < card->qdio.no_out_queues; ++i) {
2702                 struct qeth_qdio_out_q *queue = card->qdio.out_qs[i];
2703
2704                 qdio_reset_buffers(queue->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
2705                 queue->max_elements = QETH_MAX_BUFFER_ELEMENTS(card);
2706                 queue->next_buf_to_fill = 0;
2707                 queue->do_pack = 0;
2708                 queue->prev_hdr = NULL;
2709                 queue->bulk_start = 0;
2710                 atomic_set(&queue->used_buffers, 0);
2711                 atomic_set(&queue->set_pci_flags_count, 0);
2712                 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
2713                 netdev_tx_reset_queue(netdev_get_tx_queue(card->dev, i));
2714         }
2715         return 0;
2716 }
2717 EXPORT_SYMBOL_GPL(qeth_init_qdio_queues);
2718
2719 static void qeth_ipa_finalize_cmd(struct qeth_card *card,
2720                                   struct qeth_cmd_buffer *iob)
2721 {
2722         qeth_mpc_finalize_cmd(card, iob);
2723
2724         /* override with IPA-specific values: */
2725         __ipa_cmd(iob)->hdr.seqno = card->seqno.ipa++;
2726 }
2727
2728 void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2729                           u16 cmd_length)
2730 {
2731         u8 prot_type = qeth_mpc_select_prot_type(card);
2732         u16 total_length = iob->length;
2733
2734         qeth_setup_ccw(__ccw_from_cmd(iob), CCW_CMD_WRITE, 0, total_length,
2735                        iob->data);
2736         iob->finalize = qeth_ipa_finalize_cmd;
2737
2738         memcpy(iob->data, IPA_PDU_HEADER, IPA_PDU_HEADER_SIZE);
2739         memcpy(QETH_IPA_PDU_LEN_TOTAL(iob->data), &total_length, 2);
2740         memcpy(QETH_IPA_CMD_PROT_TYPE(iob->data), &prot_type, 1);
2741         memcpy(QETH_IPA_PDU_LEN_PDU1(iob->data), &cmd_length, 2);
2742         memcpy(QETH_IPA_PDU_LEN_PDU2(iob->data), &cmd_length, 2);
2743         memcpy(QETH_IPA_CMD_DEST_ADDR(iob->data),
2744                &card->token.ulp_connection_r, QETH_MPC_TOKEN_LENGTH);
2745         memcpy(QETH_IPA_PDU_LEN_PDU3(iob->data), &cmd_length, 2);
2746 }
2747 EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
2748
2749 struct qeth_cmd_buffer *qeth_ipa_alloc_cmd(struct qeth_card *card,
2750                                            enum qeth_ipa_cmds cmd_code,
2751                                            enum qeth_prot_versions prot,
2752                                            unsigned int data_length)
2753 {
2754         enum qeth_link_types link_type = card->info.link_type;
2755         struct qeth_cmd_buffer *iob;
2756         struct qeth_ipacmd_hdr *hdr;
2757
2758         data_length += offsetof(struct qeth_ipa_cmd, data);
2759         iob = qeth_alloc_cmd(&card->write, IPA_PDU_HEADER_SIZE + data_length, 1,
2760                              QETH_IPA_TIMEOUT);
2761         if (!iob)
2762                 return NULL;
2763
2764         qeth_prepare_ipa_cmd(card, iob, data_length);
2765
2766         hdr = &__ipa_cmd(iob)->hdr;
2767         hdr->command = cmd_code;
2768         hdr->initiator = IPA_CMD_INITIATOR_HOST;
2769         /* hdr->seqno is set by qeth_send_control_data() */
2770         hdr->adapter_type = (link_type == QETH_LINK_TYPE_HSTR) ? 2 : 1;
2771         hdr->rel_adapter_no = (u8) card->dev->dev_port;
2772         hdr->prim_version_no = IS_LAYER2(card) ? 2 : 1;
2773         hdr->param_count = 1;
2774         hdr->prot_version = prot;
2775         return iob;
2776 }
2777 EXPORT_SYMBOL_GPL(qeth_ipa_alloc_cmd);
2778
2779 static int qeth_send_ipa_cmd_cb(struct qeth_card *card,
2780                                 struct qeth_reply *reply, unsigned long data)
2781 {
2782         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2783
2784         return (cmd->hdr.return_code) ? -EIO : 0;
2785 }
2786
2787 /**
2788  * qeth_send_ipa_cmd() - send an IPA command
2789  *
2790  * See qeth_send_control_data() for explanation of the arguments.
2791  */
2792
2793 int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
2794                 int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
2795                         unsigned long),
2796                 void *reply_param)
2797 {
2798         int rc;
2799
2800         QETH_CARD_TEXT(card, 4, "sendipa");
2801
2802         if (card->read_or_write_problem) {
2803                 qeth_put_cmd(iob);
2804                 return -EIO;
2805         }
2806
2807         if (reply_cb == NULL)
2808                 reply_cb = qeth_send_ipa_cmd_cb;
2809         rc = qeth_send_control_data(card, iob, reply_cb, reply_param);
2810         if (rc == -ETIME) {
2811                 qeth_clear_ipacmd_list(card);
2812                 qeth_schedule_recovery(card);
2813         }
2814         return rc;
2815 }
2816 EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
2817
2818 static int qeth_send_startlan_cb(struct qeth_card *card,
2819                                  struct qeth_reply *reply, unsigned long data)
2820 {
2821         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2822
2823         if (cmd->hdr.return_code == IPA_RC_LAN_OFFLINE)
2824                 return -ENETDOWN;
2825
2826         return (cmd->hdr.return_code) ? -EIO : 0;
2827 }
2828
2829 static int qeth_send_startlan(struct qeth_card *card)
2830 {
2831         struct qeth_cmd_buffer *iob;
2832
2833         QETH_CARD_TEXT(card, 2, "strtlan");
2834
2835         iob = qeth_ipa_alloc_cmd(card, IPA_CMD_STARTLAN, QETH_PROT_NONE, 0);
2836         if (!iob)
2837                 return -ENOMEM;
2838         return qeth_send_ipa_cmd(card, iob, qeth_send_startlan_cb, NULL);
2839 }
2840
2841 static int qeth_setadpparms_inspect_rc(struct qeth_ipa_cmd *cmd)
2842 {
2843         if (!cmd->hdr.return_code)
2844                 cmd->hdr.return_code =
2845                         cmd->data.setadapterparms.hdr.return_code;
2846         return cmd->hdr.return_code;
2847 }
2848
2849 static int qeth_query_setadapterparms_cb(struct qeth_card *card,
2850                 struct qeth_reply *reply, unsigned long data)
2851 {
2852         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2853
2854         QETH_CARD_TEXT(card, 3, "quyadpcb");
2855         if (qeth_setadpparms_inspect_rc(cmd))
2856                 return -EIO;
2857
2858         if (cmd->data.setadapterparms.data.query_cmds_supp.lan_type & 0x7f) {
2859                 card->info.link_type =
2860                       cmd->data.setadapterparms.data.query_cmds_supp.lan_type;
2861                 QETH_CARD_TEXT_(card, 2, "lnk %d", card->info.link_type);
2862         }
2863         card->options.adp.supported_funcs =
2864                 cmd->data.setadapterparms.data.query_cmds_supp.supported_cmds;
2865         return 0;
2866 }
2867
2868 static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
2869                                                     enum qeth_ipa_setadp_cmd adp_cmd,
2870                                                     unsigned int data_length)
2871 {
2872         struct qeth_ipacmd_setadpparms_hdr *hdr;
2873         struct qeth_cmd_buffer *iob;
2874
2875         iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETADAPTERPARMS, QETH_PROT_IPV4,
2876                                  data_length +
2877                                  offsetof(struct qeth_ipacmd_setadpparms,
2878                                           data));
2879         if (!iob)
2880                 return NULL;
2881
2882         hdr = &__ipa_cmd(iob)->data.setadapterparms.hdr;
2883         hdr->cmdlength = sizeof(*hdr) + data_length;
2884         hdr->command_code = adp_cmd;
2885         hdr->used_total = 1;
2886         hdr->seq_no = 1;
2887         return iob;
2888 }
2889
2890 static int qeth_query_setadapterparms(struct qeth_card *card)
2891 {
2892         int rc;
2893         struct qeth_cmd_buffer *iob;
2894
2895         QETH_CARD_TEXT(card, 3, "queryadp");
2896         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
2897                                    SETADP_DATA_SIZEOF(query_cmds_supp));
2898         if (!iob)
2899                 return -ENOMEM;
2900         rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
2901         return rc;
2902 }
2903
2904 static int qeth_query_ipassists_cb(struct qeth_card *card,
2905                 struct qeth_reply *reply, unsigned long data)
2906 {
2907         struct qeth_ipa_cmd *cmd;
2908
2909         QETH_CARD_TEXT(card, 2, "qipasscb");
2910
2911         cmd = (struct qeth_ipa_cmd *) data;
2912
2913         switch (cmd->hdr.return_code) {
2914         case IPA_RC_SUCCESS:
2915                 break;
2916         case IPA_RC_NOTSUPP:
2917         case IPA_RC_L2_UNSUPPORTED_CMD:
2918                 QETH_CARD_TEXT(card, 2, "ipaunsup");
2919                 card->options.ipa4.supported_funcs |= IPA_SETADAPTERPARMS;
2920                 card->options.ipa6.supported_funcs |= IPA_SETADAPTERPARMS;
2921                 return -EOPNOTSUPP;
2922         default:
2923                 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Unhandled rc=%#x\n",
2924                                  CARD_DEVID(card), cmd->hdr.return_code);
2925                 return -EIO;
2926         }
2927
2928         if (cmd->hdr.prot_version == QETH_PROT_IPV4) {
2929                 card->options.ipa4.supported_funcs = cmd->hdr.ipa_supported;
2930                 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
2931         } else if (cmd->hdr.prot_version == QETH_PROT_IPV6) {
2932                 card->options.ipa6.supported_funcs = cmd->hdr.ipa_supported;
2933                 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
2934         } else
2935                 QETH_DBF_MESSAGE(1, "IPA_CMD_QIPASSIST on device %x: Flawed LIC detected\n",
2936                                  CARD_DEVID(card));
2937         return 0;
2938 }
2939
2940 static int qeth_query_ipassists(struct qeth_card *card,
2941                                 enum qeth_prot_versions prot)
2942 {
2943         int rc;
2944         struct qeth_cmd_buffer *iob;
2945
2946         QETH_CARD_TEXT_(card, 2, "qipassi%i", prot);
2947         iob = qeth_ipa_alloc_cmd(card, IPA_CMD_QIPASSIST, prot, 0);
2948         if (!iob)
2949                 return -ENOMEM;
2950         rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
2951         return rc;
2952 }
2953
2954 static int qeth_query_switch_attributes_cb(struct qeth_card *card,
2955                                 struct qeth_reply *reply, unsigned long data)
2956 {
2957         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
2958         struct qeth_query_switch_attributes *attrs;
2959         struct qeth_switch_info *sw_info;
2960
2961         QETH_CARD_TEXT(card, 2, "qswiatcb");
2962         if (qeth_setadpparms_inspect_rc(cmd))
2963                 return -EIO;
2964
2965         sw_info = (struct qeth_switch_info *)reply->param;
2966         attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
2967         sw_info->capabilities = attrs->capabilities;
2968         sw_info->settings = attrs->settings;
2969         QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
2970                         sw_info->settings);
2971         return 0;
2972 }
2973
2974 int qeth_query_switch_attributes(struct qeth_card *card,
2975                                  struct qeth_switch_info *sw_info)
2976 {
2977         struct qeth_cmd_buffer *iob;
2978
2979         QETH_CARD_TEXT(card, 2, "qswiattr");
2980         if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
2981                 return -EOPNOTSUPP;
2982         if (!netif_carrier_ok(card->dev))
2983                 return -ENOMEDIUM;
2984         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES, 0);
2985         if (!iob)
2986                 return -ENOMEM;
2987         return qeth_send_ipa_cmd(card, iob,
2988                                 qeth_query_switch_attributes_cb, sw_info);
2989 }
2990
2991 struct qeth_cmd_buffer *qeth_get_diag_cmd(struct qeth_card *card,
2992                                           enum qeth_diags_cmds sub_cmd,
2993                                           unsigned int data_length)
2994 {
2995         struct qeth_ipacmd_diagass *cmd;
2996         struct qeth_cmd_buffer *iob;
2997
2998         iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SET_DIAG_ASS, QETH_PROT_NONE,
2999                                  DIAG_HDR_LEN + data_length);
3000         if (!iob)
3001                 return NULL;
3002
3003         cmd = &__ipa_cmd(iob)->data.diagass;
3004         cmd->subcmd_len = DIAG_SUB_HDR_LEN + data_length;
3005         cmd->subcmd = sub_cmd;
3006         return iob;
3007 }
3008 EXPORT_SYMBOL_GPL(qeth_get_diag_cmd);
3009
3010 static int qeth_query_setdiagass_cb(struct qeth_card *card,
3011                 struct qeth_reply *reply, unsigned long data)
3012 {
3013         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3014         u16 rc = cmd->hdr.return_code;
3015
3016         if (rc) {
3017                 QETH_CARD_TEXT_(card, 2, "diagq:%x", rc);
3018                 return -EIO;
3019         }
3020
3021         card->info.diagass_support = cmd->data.diagass.ext;
3022         return 0;
3023 }
3024
3025 static int qeth_query_setdiagass(struct qeth_card *card)
3026 {
3027         struct qeth_cmd_buffer *iob;
3028
3029         QETH_CARD_TEXT(card, 2, "qdiagass");
3030         iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_QUERY, 0);
3031         if (!iob)
3032                 return -ENOMEM;
3033         return qeth_send_ipa_cmd(card, iob, qeth_query_setdiagass_cb, NULL);
3034 }
3035
3036 static void qeth_get_trap_id(struct qeth_card *card, struct qeth_trap_id *tid)
3037 {
3038         unsigned long info = get_zeroed_page(GFP_KERNEL);
3039         struct sysinfo_2_2_2 *info222 = (struct sysinfo_2_2_2 *)info;
3040         struct sysinfo_3_2_2 *info322 = (struct sysinfo_3_2_2 *)info;
3041         struct ccw_dev_id ccwid;
3042         int level;
3043
3044         tid->chpid = card->info.chpid;
3045         ccw_device_get_id(CARD_RDEV(card), &ccwid);
3046         tid->ssid = ccwid.ssid;
3047         tid->devno = ccwid.devno;
3048         if (!info)
3049                 return;
3050         level = stsi(NULL, 0, 0, 0);
3051         if ((level >= 2) && (stsi(info222, 2, 2, 2) == 0))
3052                 tid->lparnr = info222->lpar_number;
3053         if ((level >= 3) && (stsi(info322, 3, 2, 2) == 0)) {
3054                 EBCASC(info322->vm[0].name, sizeof(info322->vm[0].name));
3055                 memcpy(tid->vmname, info322->vm[0].name, sizeof(tid->vmname));
3056         }
3057         free_page(info);
3058         return;
3059 }
3060
3061 static int qeth_hw_trap_cb(struct qeth_card *card,
3062                 struct qeth_reply *reply, unsigned long data)
3063 {
3064         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
3065         u16 rc = cmd->hdr.return_code;
3066
3067         if (rc) {
3068                 QETH_CARD_TEXT_(card, 2, "trapc:%x", rc);
3069                 return -EIO;
3070         }
3071         return 0;
3072 }
3073
3074 int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
3075 {
3076         struct qeth_cmd_buffer *iob;
3077         struct qeth_ipa_cmd *cmd;
3078
3079         QETH_CARD_TEXT(card, 2, "diagtrap");
3080         iob = qeth_get_diag_cmd(card, QETH_DIAGS_CMD_TRAP, 64);
3081         if (!iob)
3082                 return -ENOMEM;
3083         cmd = __ipa_cmd(iob);
3084         cmd->data.diagass.type = 1;
3085         cmd->data.diagass.action = action;
3086         switch (action) {
3087         case QETH_DIAGS_TRAP_ARM:
3088                 cmd->data.diagass.options = 0x0003;
3089                 cmd->data.diagass.ext = 0x00010000 +
3090                         sizeof(struct qeth_trap_id);
3091                 qeth_get_trap_id(card,
3092                         (struct qeth_trap_id *)cmd->data.diagass.cdata);
3093                 break;
3094         case QETH_DIAGS_TRAP_DISARM:
3095                 cmd->data.diagass.options = 0x0001;
3096                 break;
3097         case QETH_DIAGS_TRAP_CAPTURE:
3098                 break;
3099         }
3100         return qeth_send_ipa_cmd(card, iob, qeth_hw_trap_cb, NULL);
3101 }
3102 EXPORT_SYMBOL_GPL(qeth_hw_trap);
3103
3104 static int qeth_check_qdio_errors(struct qeth_card *card,
3105                                   struct qdio_buffer *buf,
3106                                   unsigned int qdio_error,
3107                                   const char *dbftext)
3108 {
3109         if (qdio_error) {
3110                 QETH_CARD_TEXT(card, 2, dbftext);
3111                 QETH_CARD_TEXT_(card, 2, " F15=%02X",
3112                                buf->element[15].sflags);
3113                 QETH_CARD_TEXT_(card, 2, " F14=%02X",
3114                                buf->element[14].sflags);
3115                 QETH_CARD_TEXT_(card, 2, " qerr=%X", qdio_error);
3116                 if ((buf->element[15].sflags) == 0x12) {
3117                         QETH_CARD_STAT_INC(card, rx_dropped);
3118                         return 0;
3119                 } else
3120                         return 1;
3121         }
3122         return 0;
3123 }
3124
3125 static void qeth_queue_input_buffer(struct qeth_card *card, int index)
3126 {
3127         struct qeth_qdio_q *queue = card->qdio.in_q;
3128         struct list_head *lh;
3129         int count;
3130         int i;
3131         int rc;
3132         int newcount = 0;
3133
3134         count = (index < queue->next_buf_to_init)?
3135                 card->qdio.in_buf_pool.buf_count -
3136                 (queue->next_buf_to_init - index) :
3137                 card->qdio.in_buf_pool.buf_count -
3138                 (queue->next_buf_to_init + QDIO_MAX_BUFFERS_PER_Q - index);
3139         /* only requeue at a certain threshold to avoid SIGAs */
3140         if (count >= QETH_IN_BUF_REQUEUE_THRESHOLD(card)) {
3141                 for (i = queue->next_buf_to_init;
3142                      i < queue->next_buf_to_init + count; ++i) {
3143                         if (qeth_init_input_buffer(card,
3144                                 &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q])) {
3145                                 break;
3146                         } else {
3147                                 newcount++;
3148                         }
3149                 }
3150
3151                 if (newcount < count) {
3152                         /* we are in memory shortage so we switch back to
3153                            traditional skb allocation and drop packages */
3154                         atomic_set(&card->force_alloc_skb, 3);
3155                         count = newcount;
3156                 } else {
3157                         atomic_add_unless(&card->force_alloc_skb, -1, 0);
3158                 }
3159
3160                 if (!count) {
3161                         i = 0;
3162                         list_for_each(lh, &card->qdio.in_buf_pool.entry_list)
3163                                 i++;
3164                         if (i == card->qdio.in_buf_pool.buf_count) {
3165                                 QETH_CARD_TEXT(card, 2, "qsarbw");
3166                                 card->reclaim_index = index;
3167                                 schedule_delayed_work(
3168                                         &card->buffer_reclaim_work,
3169                                         QETH_RECLAIM_WORK_TIME);
3170                         }
3171                         return;
3172                 }
3173
3174                 /*
3175                  * according to old code it should be avoided to requeue all
3176                  * 128 buffers in order to benefit from PCI avoidance.
3177                  * this function keeps at least one buffer (the buffer at
3178                  * 'index') un-requeued -> this buffer is the first buffer that
3179                  * will be requeued the next time
3180                  */
3181                 rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, 0,
3182                              queue->next_buf_to_init, count);
3183                 if (rc) {
3184                         QETH_CARD_TEXT(card, 2, "qinberr");
3185                 }
3186                 queue->next_buf_to_init = (queue->next_buf_to_init + count) %
3187                                           QDIO_MAX_BUFFERS_PER_Q;
3188         }
3189 }
3190
3191 static void qeth_buffer_reclaim_work(struct work_struct *work)
3192 {
3193         struct qeth_card *card = container_of(work, struct qeth_card,
3194                 buffer_reclaim_work.work);
3195
3196         QETH_CARD_TEXT_(card, 2, "brw:%x", card->reclaim_index);
3197         qeth_queue_input_buffer(card, card->reclaim_index);
3198 }
3199
3200 static void qeth_handle_send_error(struct qeth_card *card,
3201                 struct qeth_qdio_out_buffer *buffer, unsigned int qdio_err)
3202 {
3203         int sbalf15 = buffer->buffer->element[15].sflags;
3204
3205         QETH_CARD_TEXT(card, 6, "hdsnderr");
3206         qeth_check_qdio_errors(card, buffer->buffer, qdio_err, "qouterr");
3207
3208         if (!qdio_err)
3209                 return;
3210
3211         if ((sbalf15 >= 15) && (sbalf15 <= 31))
3212                 return;
3213
3214         QETH_CARD_TEXT(card, 1, "lnkfail");
3215         QETH_CARD_TEXT_(card, 1, "%04x %02x",
3216                        (u16)qdio_err, (u8)sbalf15);
3217 }
3218
3219 /**
3220  * qeth_prep_flush_pack_buffer - Prepares flushing of a packing buffer.
3221  * @queue: queue to check for packing buffer
3222  *
3223  * Returns number of buffers that were prepared for flush.
3224  */
3225 static int qeth_prep_flush_pack_buffer(struct qeth_qdio_out_q *queue)
3226 {
3227         struct qeth_qdio_out_buffer *buffer;
3228
3229         buffer = queue->bufs[queue->next_buf_to_fill];
3230         if ((atomic_read(&buffer->state) == QETH_QDIO_BUF_EMPTY) &&
3231             (buffer->next_element_to_fill > 0)) {
3232                 /* it's a packing buffer */
3233                 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3234                 queue->next_buf_to_fill =
3235                         (queue->next_buf_to_fill + 1) % QDIO_MAX_BUFFERS_PER_Q;
3236                 return 1;
3237         }
3238         return 0;
3239 }
3240
3241 /*
3242  * Switched to packing state if the number of used buffers on a queue
3243  * reaches a certain limit.
3244  */
3245 static void qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue)
3246 {
3247         if (!queue->do_pack) {
3248                 if (atomic_read(&queue->used_buffers)
3249                     >= QETH_HIGH_WATERMARK_PACK){
3250                         /* switch non-PACKING -> PACKING */
3251                         QETH_CARD_TEXT(queue->card, 6, "np->pack");
3252                         QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3253                         queue->do_pack = 1;
3254                 }
3255         }
3256 }
3257
3258 /*
3259  * Switches from packing to non-packing mode. If there is a packing
3260  * buffer on the queue this buffer will be prepared to be flushed.
3261  * In that case 1 is returned to inform the caller. If no buffer
3262  * has to be flushed, zero is returned.
3263  */
3264 static int qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue)
3265 {
3266         if (queue->do_pack) {
3267                 if (atomic_read(&queue->used_buffers)
3268                     <= QETH_LOW_WATERMARK_PACK) {
3269                         /* switch PACKING -> non-PACKING */
3270                         QETH_CARD_TEXT(queue->card, 6, "pack->np");
3271                         QETH_TXQ_STAT_INC(queue, packing_mode_switch);
3272                         queue->do_pack = 0;
3273                         return qeth_prep_flush_pack_buffer(queue);
3274                 }
3275         }
3276         return 0;
3277 }
3278
3279 static void qeth_flush_buffers(struct qeth_qdio_out_q *queue, int index,
3280                                int count)
3281 {
3282         struct qeth_card *card = queue->card;
3283         struct qeth_qdio_out_buffer *buf;
3284         int rc;
3285         int i;
3286         unsigned int qdio_flags;
3287
3288         for (i = index; i < index + count; ++i) {
3289                 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3290                 buf = queue->bufs[bidx];
3291                 buf->buffer->element[buf->next_element_to_fill - 1].eflags |=
3292                                 SBAL_EFLAGS_LAST_ENTRY;
3293
3294                 if (queue->bufstates)
3295                         queue->bufstates[bidx].user = buf;
3296
3297                 if (IS_IQD(queue->card))
3298                         continue;
3299
3300                 if (!queue->do_pack) {
3301                         if ((atomic_read(&queue->used_buffers) >=
3302                                 (QETH_HIGH_WATERMARK_PACK -
3303                                  QETH_WATERMARK_PACK_FUZZ)) &&
3304                             !atomic_read(&queue->set_pci_flags_count)) {
3305                                 /* it's likely that we'll go to packing
3306                                  * mode soon */
3307                                 atomic_inc(&queue->set_pci_flags_count);
3308                                 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3309                         }
3310                 } else {
3311                         if (!atomic_read(&queue->set_pci_flags_count)) {
3312                                 /*
3313                                  * there's no outstanding PCI any more, so we
3314                                  * have to request a PCI to be sure the the PCI
3315                                  * will wake at some time in the future then we
3316                                  * can flush packed buffers that might still be
3317                                  * hanging around, which can happen if no
3318                                  * further send was requested by the stack
3319                                  */
3320                                 atomic_inc(&queue->set_pci_flags_count);
3321                                 buf->buffer->element[0].sflags |= SBAL_SFLAGS0_PCI_REQ;
3322                         }
3323                 }
3324         }
3325
3326         qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
3327         if (atomic_read(&queue->set_pci_flags_count))
3328                 qdio_flags |= QDIO_FLAG_PCI_OUT;
3329         rc = do_QDIO(CARD_DDEV(queue->card), qdio_flags,
3330                      queue->queue_no, index, count);
3331
3332         /* Fake the TX completion interrupt: */
3333         if (IS_IQD(card))
3334                 napi_schedule(&queue->napi);
3335
3336         if (rc) {
3337                 /* ignore temporary SIGA errors without busy condition */
3338                 if (rc == -ENOBUFS)
3339                         return;
3340                 QETH_CARD_TEXT(queue->card, 2, "flushbuf");
3341                 QETH_CARD_TEXT_(queue->card, 2, " q%d", queue->queue_no);
3342                 QETH_CARD_TEXT_(queue->card, 2, " idx%d", index);
3343                 QETH_CARD_TEXT_(queue->card, 2, " c%d", count);
3344                 QETH_CARD_TEXT_(queue->card, 2, " err%d", rc);
3345
3346                 /* this must not happen under normal circumstances. if it
3347                  * happens something is really wrong -> recover */
3348                 qeth_schedule_recovery(queue->card);
3349                 return;
3350         }
3351 }
3352
3353 static void qeth_flush_queue(struct qeth_qdio_out_q *queue)
3354 {
3355         qeth_flush_buffers(queue, queue->bulk_start, 1);
3356
3357         queue->bulk_start = QDIO_BUFNR(queue->bulk_start + 1);
3358         queue->prev_hdr = NULL;
3359 }
3360
3361 static void qeth_check_outbound_queue(struct qeth_qdio_out_q *queue)
3362 {
3363         int index;
3364         int flush_cnt = 0;
3365         int q_was_packing = 0;
3366
3367         /*
3368          * check if weed have to switch to non-packing mode or if
3369          * we have to get a pci flag out on the queue
3370          */
3371         if ((atomic_read(&queue->used_buffers) <= QETH_LOW_WATERMARK_PACK) ||
3372             !atomic_read(&queue->set_pci_flags_count)) {
3373                 if (atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH) ==
3374                                 QETH_OUT_Q_UNLOCKED) {
3375                         /*
3376                          * If we get in here, there was no action in
3377                          * do_send_packet. So, we check if there is a
3378                          * packing buffer to be flushed here.
3379                          */
3380                         index = queue->next_buf_to_fill;
3381                         q_was_packing = queue->do_pack;
3382                         /* queue->do_pack may change */
3383                         barrier();
3384                         flush_cnt += qeth_switch_to_nonpacking_if_needed(queue);
3385                         if (!flush_cnt &&
3386                             !atomic_read(&queue->set_pci_flags_count))
3387                                 flush_cnt += qeth_prep_flush_pack_buffer(queue);
3388                         if (q_was_packing)
3389                                 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_cnt);
3390                         if (flush_cnt)
3391                                 qeth_flush_buffers(queue, index, flush_cnt);
3392                         atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3393                 }
3394         }
3395 }
3396
3397 static void qeth_qdio_start_poll(struct ccw_device *ccwdev, int queue,
3398                                  unsigned long card_ptr)
3399 {
3400         struct qeth_card *card = (struct qeth_card *)card_ptr;
3401
3402         if (card->dev->flags & IFF_UP)
3403                 napi_schedule(&card->napi);
3404 }
3405
3406 int qeth_configure_cq(struct qeth_card *card, enum qeth_cq cq)
3407 {
3408         int rc;
3409
3410         if (card->options.cq ==  QETH_CQ_NOTAVAILABLE) {
3411                 rc = -1;
3412                 goto out;
3413         } else {
3414                 if (card->options.cq == cq) {
3415                         rc = 0;
3416                         goto out;
3417                 }
3418
3419                 qeth_free_qdio_queues(card);
3420                 card->options.cq = cq;
3421                 rc = 0;
3422         }
3423 out:
3424         return rc;
3425
3426 }
3427 EXPORT_SYMBOL_GPL(qeth_configure_cq);
3428
3429 static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
3430                                  unsigned int queue, int first_element,
3431                                  int count)
3432 {
3433         struct qeth_qdio_q *cq = card->qdio.c_q;
3434         int i;
3435         int rc;
3436
3437         if (!qeth_is_cq(card, queue))
3438                 return;
3439
3440         QETH_CARD_TEXT_(card, 5, "qcqhe%d", first_element);
3441         QETH_CARD_TEXT_(card, 5, "qcqhc%d", count);
3442         QETH_CARD_TEXT_(card, 5, "qcqherr%d", qdio_err);
3443
3444         if (qdio_err) {
3445                 netif_tx_stop_all_queues(card->dev);
3446                 qeth_schedule_recovery(card);
3447                 return;
3448         }
3449
3450         for (i = first_element; i < first_element + count; ++i) {
3451                 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3452                 struct qdio_buffer *buffer = cq->qdio_bufs[bidx];
3453                 int e = 0;
3454
3455                 while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
3456                        buffer->element[e].addr) {
3457                         unsigned long phys_aob_addr;
3458
3459                         phys_aob_addr = (unsigned long) buffer->element[e].addr;
3460                         qeth_qdio_handle_aob(card, phys_aob_addr);
3461                         ++e;
3462                 }
3463                 qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
3464         }
3465         rc = do_QDIO(CARD_DDEV(card), QDIO_FLAG_SYNC_INPUT, queue,
3466                     card->qdio.c_q->next_buf_to_init,
3467                     count);
3468         if (rc) {
3469                 dev_warn(&card->gdev->dev,
3470                         "QDIO reported an error, rc=%i\n", rc);
3471                 QETH_CARD_TEXT(card, 2, "qcqherr");
3472         }
3473         card->qdio.c_q->next_buf_to_init = (card->qdio.c_q->next_buf_to_init
3474                                    + count) % QDIO_MAX_BUFFERS_PER_Q;
3475 }
3476
3477 static void qeth_qdio_input_handler(struct ccw_device *ccwdev,
3478                                     unsigned int qdio_err, int queue,
3479                                     int first_elem, int count,
3480                                     unsigned long card_ptr)
3481 {
3482         struct qeth_card *card = (struct qeth_card *)card_ptr;
3483
3484         QETH_CARD_TEXT_(card, 2, "qihq%d", queue);
3485         QETH_CARD_TEXT_(card, 2, "qiec%d", qdio_err);
3486
3487         if (qeth_is_cq(card, queue))
3488                 qeth_qdio_cq_handler(card, qdio_err, queue, first_elem, count);
3489         else if (qdio_err)
3490                 qeth_schedule_recovery(card);
3491 }
3492
3493 static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
3494                                      unsigned int qdio_error, int __queue,
3495                                      int first_element, int count,
3496                                      unsigned long card_ptr)
3497 {
3498         struct qeth_card *card        = (struct qeth_card *) card_ptr;
3499         struct qeth_qdio_out_q *queue = card->qdio.out_qs[__queue];
3500         struct qeth_qdio_out_buffer *buffer;
3501         struct net_device *dev = card->dev;
3502         struct netdev_queue *txq;
3503         int i;
3504
3505         QETH_CARD_TEXT(card, 6, "qdouhdl");
3506         if (qdio_error & QDIO_ERROR_FATAL) {
3507                 QETH_CARD_TEXT(card, 2, "achkcond");
3508                 netif_tx_stop_all_queues(dev);
3509                 qeth_schedule_recovery(card);
3510                 return;
3511         }
3512
3513         for (i = first_element; i < (first_element + count); ++i) {
3514                 int bidx = i % QDIO_MAX_BUFFERS_PER_Q;
3515                 buffer = queue->bufs[bidx];
3516                 qeth_handle_send_error(card, buffer, qdio_error);
3517                 qeth_clear_output_buffer(queue, buffer, qdio_error, 0);
3518         }
3519
3520         atomic_sub(count, &queue->used_buffers);
3521         qeth_check_outbound_queue(queue);
3522
3523         txq = netdev_get_tx_queue(dev, __queue);
3524         /* xmit may have observed the full-condition, but not yet stopped the
3525          * txq. In which case the code below won't trigger. So before returning,
3526          * xmit will re-check the txq's fill level and wake it up if needed.
3527          */
3528         if (netif_tx_queue_stopped(txq) && !qeth_out_queue_is_full(queue))
3529                 netif_tx_wake_queue(txq);
3530 }
3531
3532 /**
3533  * Note: Function assumes that we have 4 outbound queues.
3534  */
3535 int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb)
3536 {
3537         struct vlan_ethhdr *veth = vlan_eth_hdr(skb);
3538         u8 tos;
3539
3540         switch (card->qdio.do_prio_queueing) {
3541         case QETH_PRIO_Q_ING_TOS:
3542         case QETH_PRIO_Q_ING_PREC:
3543                 switch (qeth_get_ip_version(skb)) {
3544                 case 4:
3545                         tos = ipv4_get_dsfield(ip_hdr(skb));
3546                         break;
3547                 case 6:
3548                         tos = ipv6_get_dsfield(ipv6_hdr(skb));
3549                         break;
3550                 default:
3551                         return card->qdio.default_out_queue;
3552                 }
3553                 if (card->qdio.do_prio_queueing == QETH_PRIO_Q_ING_PREC)
3554                         return ~tos >> 6 & 3;
3555                 if (tos & IPTOS_MINCOST)
3556                         return 3;
3557                 if (tos & IPTOS_RELIABILITY)
3558                         return 2;
3559                 if (tos & IPTOS_THROUGHPUT)
3560                         return 1;
3561                 if (tos & IPTOS_LOWDELAY)
3562                         return 0;
3563                 break;
3564         case QETH_PRIO_Q_ING_SKB:
3565                 if (skb->priority > 5)
3566                         return 0;
3567                 return ~skb->priority >> 1 & 3;
3568         case QETH_PRIO_Q_ING_VLAN:
3569                 if (veth->h_vlan_proto == htons(ETH_P_8021Q))
3570                         return ~ntohs(veth->h_vlan_TCI) >>
3571                                (VLAN_PRIO_SHIFT + 1) & 3;
3572                 break;
3573         default:
3574                 break;
3575         }
3576         return card->qdio.default_out_queue;
3577 }
3578 EXPORT_SYMBOL_GPL(qeth_get_priority_queue);
3579
3580 /**
3581  * qeth_get_elements_for_frags() -      find number of SBALEs for skb frags.
3582  * @skb:                                SKB address
3583  *
3584  * Returns the number of pages, and thus QDIO buffer elements, needed to cover
3585  * fragmented part of the SKB. Returns zero for linear SKB.
3586  */
3587 static int qeth_get_elements_for_frags(struct sk_buff *skb)
3588 {
3589         int cnt, elements = 0;
3590
3591         for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3592                 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3593
3594                 elements += qeth_get_elements_for_range(
3595                         (addr_t)skb_frag_address(frag),
3596                         (addr_t)skb_frag_address(frag) + skb_frag_size(frag));
3597         }
3598         return elements;
3599 }
3600
3601 /**
3602  * qeth_count_elements() -      Counts the number of QDIO buffer elements needed
3603  *                              to transmit an skb.
3604  * @skb:                        the skb to operate on.
3605  * @data_offset:                skip this part of the skb's linear data
3606  *
3607  * Returns the number of pages, and thus QDIO buffer elements, needed to map the
3608  * skb's data (both its linear part and paged fragments).
3609  */
3610 unsigned int qeth_count_elements(struct sk_buff *skb, unsigned int data_offset)
3611 {
3612         unsigned int elements = qeth_get_elements_for_frags(skb);
3613         addr_t end = (addr_t)skb->data + skb_headlen(skb);
3614         addr_t start = (addr_t)skb->data + data_offset;
3615
3616         if (start != end)
3617                 elements += qeth_get_elements_for_range(start, end);
3618         return elements;
3619 }
3620 EXPORT_SYMBOL_GPL(qeth_count_elements);
3621
3622 #define QETH_HDR_CACHE_OBJ_SIZE         (sizeof(struct qeth_hdr_tso) + \
3623                                          MAX_TCP_HEADER)
3624
3625 /**
3626  * qeth_add_hw_header() - add a HW header to an skb.
3627  * @skb: skb that the HW header should be added to.
3628  * @hdr: double pointer to a qeth_hdr. When returning with >= 0,
3629  *       it contains a valid pointer to a qeth_hdr.
3630  * @hdr_len: length of the HW header.
3631  * @proto_len: length of protocol headers that need to be in same page as the
3632  *             HW header.
3633  *
3634  * Returns the pushed length. If the header can't be pushed on
3635  * (eg. because it would cross a page boundary), it is allocated from
3636  * the cache instead and 0 is returned.
3637  * The number of needed buffer elements is returned in @elements.
3638  * Error to create the hdr is indicated by returning with < 0.
3639  */
3640 static int qeth_add_hw_header(struct qeth_qdio_out_q *queue,
3641                               struct sk_buff *skb, struct qeth_hdr **hdr,
3642                               unsigned int hdr_len, unsigned int proto_len,
3643                               unsigned int *elements)
3644 {
3645         const unsigned int contiguous = proto_len ? proto_len : 1;
3646         const unsigned int max_elements = queue->max_elements;
3647         unsigned int __elements;
3648         addr_t start, end;
3649         bool push_ok;
3650         int rc;
3651
3652 check_layout:
3653         start = (addr_t)skb->data - hdr_len;
3654         end = (addr_t)skb->data;
3655
3656         if (qeth_get_elements_for_range(start, end + contiguous) == 1) {
3657                 /* Push HW header into same page as first protocol header. */
3658                 push_ok = true;
3659                 /* ... but TSO always needs a separate element for headers: */
3660                 if (skb_is_gso(skb))
3661                         __elements = 1 + qeth_count_elements(skb, proto_len);
3662                 else
3663                         __elements = qeth_count_elements(skb, 0);
3664         } else if (!proto_len && PAGE_ALIGNED(skb->data)) {
3665                 /* Push HW header into preceding page, flush with skb->data. */
3666                 push_ok = true;
3667                 __elements = 1 + qeth_count_elements(skb, 0);
3668         } else {
3669                 /* Use header cache, copy protocol headers up. */
3670                 push_ok = false;
3671                 __elements = 1 + qeth_count_elements(skb, proto_len);
3672         }
3673
3674         /* Compress skb to fit into one IO buffer: */
3675         if (__elements > max_elements) {
3676                 if (!skb_is_nonlinear(skb)) {
3677                         /* Drop it, no easy way of shrinking it further. */
3678                         QETH_DBF_MESSAGE(2, "Dropped an oversized skb (Max Elements=%u / Actual=%u / Length=%u).\n",
3679                                          max_elements, __elements, skb->len);
3680                         return -E2BIG;
3681                 }
3682
3683                 rc = skb_linearize(skb);
3684                 if (rc) {
3685                         QETH_TXQ_STAT_INC(queue, skbs_linearized_fail);
3686                         return rc;
3687                 }
3688
3689                 QETH_TXQ_STAT_INC(queue, skbs_linearized);
3690                 /* Linearization changed the layout, re-evaluate: */
3691                 goto check_layout;
3692         }
3693
3694         *elements = __elements;
3695         /* Add the header: */
3696         if (push_ok) {
3697                 *hdr = skb_push(skb, hdr_len);
3698                 return hdr_len;
3699         }
3700         /* fall back */
3701         if (hdr_len + proto_len > QETH_HDR_CACHE_OBJ_SIZE)
3702                 return -E2BIG;
3703         *hdr = kmem_cache_alloc(qeth_core_header_cache, GFP_ATOMIC);
3704         if (!*hdr)
3705                 return -ENOMEM;
3706         /* Copy protocol headers behind HW header: */
3707         skb_copy_from_linear_data(skb, ((char *)*hdr) + hdr_len, proto_len);
3708         return 0;
3709 }
3710
3711 static bool qeth_iqd_may_bulk(struct qeth_qdio_out_q *queue,
3712                               struct qeth_qdio_out_buffer *buffer,
3713                               struct sk_buff *curr_skb,
3714                               struct qeth_hdr *curr_hdr)
3715 {
3716         struct qeth_hdr *prev_hdr = queue->prev_hdr;
3717
3718         if (!prev_hdr)
3719                 return true;
3720
3721         /* All packets must have the same target: */
3722         if (curr_hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) {
3723                 struct sk_buff *prev_skb = skb_peek(&buffer->skb_list);
3724
3725                 return ether_addr_equal(eth_hdr(prev_skb)->h_dest,
3726                                         eth_hdr(curr_skb)->h_dest) &&
3727                        qeth_l2_same_vlan(&prev_hdr->hdr.l2, &curr_hdr->hdr.l2);
3728         }
3729
3730         return qeth_l3_same_next_hop(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3) &&
3731                qeth_l3_iqd_same_vlan(&prev_hdr->hdr.l3, &curr_hdr->hdr.l3);
3732 }
3733
3734 static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
3735                                        struct qeth_qdio_out_buffer *buf,
3736                                        bool is_first_elem, unsigned int offset)
3737 {
3738         struct qdio_buffer *buffer = buf->buffer;
3739         int element = buf->next_element_to_fill;
3740         int length = skb_headlen(skb) - offset;
3741         char *data = skb->data + offset;
3742         unsigned int elem_length, cnt;
3743
3744         /* map linear part into buffer element(s) */
3745         while (length > 0) {
3746                 elem_length = min_t(unsigned int, length,
3747                                     PAGE_SIZE - offset_in_page(data));
3748
3749                 buffer->element[element].addr = data;
3750                 buffer->element[element].length = elem_length;
3751                 length -= elem_length;
3752                 if (is_first_elem) {
3753                         is_first_elem = false;
3754                         if (length || skb_is_nonlinear(skb))
3755                                 /* skb needs additional elements */
3756                                 buffer->element[element].eflags =
3757                                         SBAL_EFLAGS_FIRST_FRAG;
3758                         else
3759                                 buffer->element[element].eflags = 0;
3760                 } else {
3761                         buffer->element[element].eflags =
3762                                 SBAL_EFLAGS_MIDDLE_FRAG;
3763                 }
3764
3765                 data += elem_length;
3766                 element++;
3767         }
3768
3769         /* map page frags into buffer element(s) */
3770         for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) {
3771                 skb_frag_t *frag = &skb_shinfo(skb)->frags[cnt];
3772
3773                 data = skb_frag_address(frag);
3774                 length = skb_frag_size(frag);
3775                 while (length > 0) {
3776                         elem_length = min_t(unsigned int, length,
3777                                             PAGE_SIZE - offset_in_page(data));
3778
3779                         buffer->element[element].addr = data;
3780                         buffer->element[element].length = elem_length;
3781                         buffer->element[element].eflags =
3782                                 SBAL_EFLAGS_MIDDLE_FRAG;
3783
3784                         length -= elem_length;
3785                         data += elem_length;
3786                         element++;
3787                 }
3788         }
3789
3790         if (buffer->element[element - 1].eflags)
3791                 buffer->element[element - 1].eflags = SBAL_EFLAGS_LAST_FRAG;
3792         buf->next_element_to_fill = element;
3793         return element;
3794 }
3795
3796 /**
3797  * qeth_fill_buffer() - map skb into an output buffer
3798  * @buf:        buffer to transport the skb
3799  * @skb:        skb to map into the buffer
3800  * @hdr:        qeth_hdr for this skb. Either at skb->data, or allocated
3801  *              from qeth_core_header_cache.
3802  * @offset:     when mapping the skb, start at skb->data + offset
3803  * @hd_len:     if > 0, build a dedicated header element of this size
3804  */
3805 static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
3806                                      struct sk_buff *skb, struct qeth_hdr *hdr,
3807                                      unsigned int offset, unsigned int hd_len)
3808 {
3809         struct qdio_buffer *buffer = buf->buffer;
3810         bool is_first_elem = true;
3811
3812         __skb_queue_tail(&buf->skb_list, skb);
3813
3814         /* build dedicated header element */
3815         if (hd_len) {
3816                 int element = buf->next_element_to_fill;
3817                 is_first_elem = false;
3818
3819                 buffer->element[element].addr = hdr;
3820                 buffer->element[element].length = hd_len;
3821                 buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
3822                 /* remember to free cache-allocated qeth_hdr: */
3823                 buf->is_header[element] = ((void *)hdr != skb->data);
3824                 buf->next_element_to_fill++;
3825         }
3826
3827         return __qeth_fill_buffer(skb, buf, is_first_elem, offset);
3828 }
3829
3830 static int __qeth_xmit(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3831                        struct sk_buff *skb, unsigned int elements,
3832                        struct qeth_hdr *hdr, unsigned int offset,
3833                        unsigned int hd_len)
3834 {
3835         struct qeth_qdio_out_buffer *buffer = queue->bufs[queue->bulk_start];
3836         unsigned int bytes = qdisc_pkt_len(skb);
3837         unsigned int next_element;
3838         struct netdev_queue *txq;
3839         bool stopped = false;
3840         bool flush;
3841
3842         txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
3843
3844         /* Just a sanity check, the wake/stop logic should ensure that we always
3845          * get a free buffer.
3846          */
3847         if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3848                 return -EBUSY;
3849
3850         if ((buffer->next_element_to_fill + elements > queue->max_elements) ||
3851             !qeth_iqd_may_bulk(queue, buffer, skb, hdr)) {
3852                 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3853                 qeth_flush_queue(queue);
3854                 buffer = queue->bufs[queue->bulk_start];
3855
3856                 /* Sanity-check again: */
3857                 if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY)
3858                         return -EBUSY;
3859         }
3860
3861         if (buffer->next_element_to_fill == 0 &&
3862             atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
3863                 /* If a TX completion happens right _here_ and misses to wake
3864                  * the txq, then our re-check below will catch the race.
3865                  */
3866                 QETH_TXQ_STAT_INC(queue, stopped);
3867                 netif_tx_stop_queue(txq);
3868                 stopped = true;
3869         }
3870
3871         next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
3872         buffer->bytes += bytes;
3873         queue->prev_hdr = hdr;
3874
3875         flush = __netdev_tx_sent_queue(txq, bytes,
3876                                        !stopped && netdev_xmit_more());
3877
3878         if (flush || next_element >= queue->max_elements) {
3879                 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3880                 qeth_flush_queue(queue);
3881         }
3882
3883         if (stopped && !qeth_out_queue_is_full(queue))
3884                 netif_tx_start_queue(txq);
3885         return 0;
3886 }
3887
3888 int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue,
3889                         struct sk_buff *skb, struct qeth_hdr *hdr,
3890                         unsigned int offset, unsigned int hd_len,
3891                         int elements_needed)
3892 {
3893         struct qeth_qdio_out_buffer *buffer;
3894         unsigned int next_element;
3895         struct netdev_queue *txq;
3896         bool stopped = false;
3897         int start_index;
3898         int flush_count = 0;
3899         int do_pack = 0;
3900         int tmp;
3901         int rc = 0;
3902
3903         /* spin until we get the queue ... */
3904         while (atomic_cmpxchg(&queue->state, QETH_OUT_Q_UNLOCKED,
3905                               QETH_OUT_Q_LOCKED) != QETH_OUT_Q_UNLOCKED);
3906         start_index = queue->next_buf_to_fill;
3907         buffer = queue->bufs[queue->next_buf_to_fill];
3908
3909         /* Just a sanity check, the wake/stop logic should ensure that we always
3910          * get a free buffer.
3911          */
3912         if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) {
3913                 atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED);
3914                 return -EBUSY;
3915         }
3916
3917         txq = netdev_get_tx_queue(card->dev, skb_get_queue_mapping(skb));
3918
3919         /* check if we need to switch packing state of this queue */
3920         qeth_switch_to_packing_if_needed(queue);
3921         if (queue->do_pack) {
3922                 do_pack = 1;
3923                 /* does packet fit in current buffer? */
3924                 if (buffer->next_element_to_fill + elements_needed >
3925                     queue->max_elements) {
3926                         /* ... no -> set state PRIMED */
3927                         atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3928                         flush_count++;
3929                         queue->next_buf_to_fill =
3930                                 (queue->next_buf_to_fill + 1) %
3931                                 QDIO_MAX_BUFFERS_PER_Q;
3932                         buffer = queue->bufs[queue->next_buf_to_fill];
3933
3934                         /* We stepped forward, so sanity-check again: */
3935                         if (atomic_read(&buffer->state) !=
3936                             QETH_QDIO_BUF_EMPTY) {
3937                                 qeth_flush_buffers(queue, start_index,
3938                                                            flush_count);
3939                                 atomic_set(&queue->state,
3940                                                 QETH_OUT_Q_UNLOCKED);
3941                                 rc = -EBUSY;
3942                                 goto out;
3943                         }
3944                 }
3945         }
3946
3947         if (buffer->next_element_to_fill == 0 &&
3948             atomic_inc_return(&queue->used_buffers) >= QDIO_MAX_BUFFERS_PER_Q) {
3949                 /* If a TX completion happens right _here_ and misses to wake
3950                  * the txq, then our re-check below will catch the race.
3951                  */
3952                 QETH_TXQ_STAT_INC(queue, stopped);
3953                 netif_tx_stop_queue(txq);
3954                 stopped = true;
3955         }
3956
3957         next_element = qeth_fill_buffer(buffer, skb, hdr, offset, hd_len);
3958
3959         if (queue->do_pack)
3960                 QETH_TXQ_STAT_INC(queue, skbs_pack);
3961         if (!queue->do_pack || stopped || next_element >= queue->max_elements) {
3962                 flush_count++;
3963                 atomic_set(&buffer->state, QETH_QDIO_BUF_PRIMED);
3964                 queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) %
3965                                           QDIO_MAX_BUFFERS_PER_Q;
3966         }
3967
3968         if (flush_count)
3969                 qeth_flush_buffers(queue, start_index, flush_count);
3970         else if (!atomic_read(&queue->set_pci_flags_count))
3971                 atomic_xchg(&queue->state, QETH_OUT_Q_LOCKED_FLUSH);
3972         /*
3973          * queue->state will go from LOCKED -> UNLOCKED or from
3974          * LOCKED_FLUSH -> LOCKED if output_handler wanted to 'notify' us
3975          * (switch packing state or flush buffer to get another pci flag out).
3976          * In that case we will enter this loop
3977          */
3978         while (atomic_dec_return(&queue->state)) {
3979                 start_index = queue->next_buf_to_fill;
3980                 /* check if we can go back to non-packing state */
3981                 tmp = qeth_switch_to_nonpacking_if_needed(queue);
3982                 /*
3983                  * check if we need to flush a packing buffer to get a pci
3984                  * flag out on the queue
3985                  */
3986                 if (!tmp && !atomic_read(&queue->set_pci_flags_count))
3987                         tmp = qeth_prep_flush_pack_buffer(queue);
3988                 if (tmp) {
3989                         qeth_flush_buffers(queue, start_index, tmp);
3990                         flush_count += tmp;
3991                 }
3992         }
3993 out:
3994         /* at this point the queue is UNLOCKED again */
3995         if (do_pack)
3996                 QETH_TXQ_STAT_ADD(queue, bufs_pack, flush_count);
3997
3998         if (stopped && !qeth_out_queue_is_full(queue))
3999                 netif_tx_start_queue(txq);
4000         return rc;
4001 }
4002 EXPORT_SYMBOL_GPL(qeth_do_send_packet);
4003
4004 static void qeth_fill_tso_ext(struct qeth_hdr_tso *hdr,
4005                               unsigned int payload_len, struct sk_buff *skb,
4006                               unsigned int proto_len)
4007 {
4008         struct qeth_hdr_ext_tso *ext = &hdr->ext;
4009
4010         ext->hdr_tot_len = sizeof(*ext);
4011         ext->imb_hdr_no = 1;
4012         ext->hdr_type = 1;
4013         ext->hdr_version = 1;
4014         ext->hdr_len = 28;
4015         ext->payload_len = payload_len;
4016         ext->mss = skb_shinfo(skb)->gso_size;
4017         ext->dg_hdr_len = proto_len;
4018 }
4019
4020 int qeth_xmit(struct qeth_card *card, struct sk_buff *skb,
4021               struct qeth_qdio_out_q *queue, int ipv,
4022               void (*fill_header)(struct qeth_qdio_out_q *queue,
4023                                   struct qeth_hdr *hdr, struct sk_buff *skb,
4024                                   int ipv, unsigned int data_len))
4025 {
4026         unsigned int proto_len, hw_hdr_len;
4027         unsigned int frame_len = skb->len;
4028         bool is_tso = skb_is_gso(skb);
4029         unsigned int data_offset = 0;
4030         struct qeth_hdr *hdr = NULL;
4031         unsigned int hd_len = 0;
4032         unsigned int elements;
4033         int push_len, rc;
4034
4035         if (is_tso) {
4036                 hw_hdr_len = sizeof(struct qeth_hdr_tso);
4037                 proto_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
4038         } else {
4039                 hw_hdr_len = sizeof(struct qeth_hdr);
4040                 proto_len = (IS_IQD(card) && IS_LAYER2(card)) ? ETH_HLEN : 0;
4041         }
4042
4043         rc = skb_cow_head(skb, hw_hdr_len);
4044         if (rc)
4045                 return rc;
4046
4047         push_len = qeth_add_hw_header(queue, skb, &hdr, hw_hdr_len, proto_len,
4048                                       &elements);
4049         if (push_len < 0)
4050                 return push_len;
4051         if (is_tso || !push_len) {
4052                 /* HW header needs its own buffer element. */
4053                 hd_len = hw_hdr_len + proto_len;
4054                 data_offset = push_len + proto_len;
4055         }
4056         memset(hdr, 0, hw_hdr_len);
4057         fill_header(queue, hdr, skb, ipv, frame_len);
4058         if (is_tso)
4059                 qeth_fill_tso_ext((struct qeth_hdr_tso *) hdr,
4060                                   frame_len - proto_len, skb, proto_len);
4061
4062         if (IS_IQD(card)) {
4063                 rc = __qeth_xmit(card, queue, skb, elements, hdr, data_offset,
4064                                  hd_len);
4065         } else {
4066                 /* TODO: drop skb_orphan() once TX completion is fast enough */
4067                 skb_orphan(skb);
4068                 rc = qeth_do_send_packet(card, queue, skb, hdr, data_offset,
4069                                          hd_len, elements);
4070         }
4071
4072         if (rc && !push_len)
4073                 kmem_cache_free(qeth_core_header_cache, hdr);
4074
4075         return rc;
4076 }
4077 EXPORT_SYMBOL_GPL(qeth_xmit);
4078
4079 static int qeth_setadp_promisc_mode_cb(struct qeth_card *card,
4080                 struct qeth_reply *reply, unsigned long data)
4081 {
4082         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4083         struct qeth_ipacmd_setadpparms *setparms;
4084
4085         QETH_CARD_TEXT(card, 4, "prmadpcb");
4086
4087         setparms = &(cmd->data.setadapterparms);
4088         if (qeth_setadpparms_inspect_rc(cmd)) {
4089                 QETH_CARD_TEXT_(card, 4, "prmrc%x", cmd->hdr.return_code);
4090                 setparms->data.mode = SET_PROMISC_MODE_OFF;
4091         }
4092         card->info.promisc_mode = setparms->data.mode;
4093         return (cmd->hdr.return_code) ? -EIO : 0;
4094 }
4095
4096 void qeth_setadp_promisc_mode(struct qeth_card *card, bool enable)
4097 {
4098         enum qeth_ipa_promisc_modes mode = enable ? SET_PROMISC_MODE_ON :
4099                                                     SET_PROMISC_MODE_OFF;
4100         struct qeth_cmd_buffer *iob;
4101         struct qeth_ipa_cmd *cmd;
4102
4103         QETH_CARD_TEXT(card, 4, "setprom");
4104         QETH_CARD_TEXT_(card, 4, "mode:%x", mode);
4105
4106         iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
4107                                    SETADP_DATA_SIZEOF(mode));
4108         if (!iob)
4109                 return;
4110         cmd = __ipa_cmd(iob);
4111         cmd->data.setadapterparms.data.mode = mode;
4112         qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
4113 }
4114 EXPORT_SYMBOL_GPL(qeth_setadp_promisc_mode);
4115
4116 static int qeth_setadpparms_change_macaddr_cb(struct qeth_card *card,
4117                 struct qeth_reply *reply, unsigned long data)
4118 {
4119         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4120         struct qeth_ipacmd_setadpparms *adp_cmd;
4121
4122         QETH_CARD_TEXT(card, 4, "chgmaccb");
4123         if (qeth_setadpparms_inspect_rc(cmd))
4124                 return -EIO;
4125
4126         adp_cmd = &cmd->data.setadapterparms;
4127         if (!is_valid_ether_addr(adp_cmd->data.change_addr.addr))
4128                 return -EADDRNOTAVAIL;
4129
4130         if (IS_LAYER2(card) && IS_OSD(card) && !IS_VM_NIC(card) &&
4131             !(adp_cmd->hdr.flags & QETH_SETADP_FLAGS_VIRTUAL_MAC))
4132                 return -EADDRNOTAVAIL;
4133
4134         ether_addr_copy(card->dev->dev_addr, adp_cmd->data.change_addr.addr);
4135         return 0;
4136 }
4137
4138 int qeth_setadpparms_change_macaddr(struct qeth_card *card)
4139 {
4140         int rc;
4141         struct qeth_cmd_buffer *iob;
4142         struct qeth_ipa_cmd *cmd;
4143
4144         QETH_CARD_TEXT(card, 4, "chgmac");
4145
4146         iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
4147                                    SETADP_DATA_SIZEOF(change_addr));
4148         if (!iob)
4149                 return -ENOMEM;
4150         cmd = __ipa_cmd(iob);
4151         cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
4152         cmd->data.setadapterparms.data.change_addr.addr_size = ETH_ALEN;
4153         ether_addr_copy(cmd->data.setadapterparms.data.change_addr.addr,
4154                         card->dev->dev_addr);
4155         rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_change_macaddr_cb,
4156                                NULL);
4157         return rc;
4158 }
4159 EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
4160
4161 static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
4162                 struct qeth_reply *reply, unsigned long data)
4163 {
4164         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4165         struct qeth_set_access_ctrl *access_ctrl_req;
4166         int fallback = *(int *)reply->param;
4167
4168         QETH_CARD_TEXT(card, 4, "setaccb");
4169
4170         access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4171         QETH_CARD_TEXT_(card, 2, "rc=%d",
4172                         cmd->data.setadapterparms.hdr.return_code);
4173         if (cmd->data.setadapterparms.hdr.return_code !=
4174                                                 SET_ACCESS_CTRL_RC_SUCCESS)
4175                 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%#x) on device %x: %#x\n",
4176                                  access_ctrl_req->subcmd_code, CARD_DEVID(card),
4177                                  cmd->data.setadapterparms.hdr.return_code);
4178         switch (qeth_setadpparms_inspect_rc(cmd)) {
4179         case SET_ACCESS_CTRL_RC_SUCCESS:
4180                 if (card->options.isolation == ISOLATION_MODE_NONE) {
4181                         dev_info(&card->gdev->dev,
4182                             "QDIO data connection isolation is deactivated\n");
4183                 } else {
4184                         dev_info(&card->gdev->dev,
4185                             "QDIO data connection isolation is activated\n");
4186                 }
4187                 break;
4188         case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
4189                 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already deactivated\n",
4190                                  CARD_DEVID(card));
4191                 if (fallback)
4192                         card->options.isolation = card->options.prev_isolation;
4193                 break;
4194         case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
4195                 QETH_DBF_MESSAGE(2, "QDIO data connection isolation on device %x already activated\n",
4196                                  CARD_DEVID(card));
4197                 if (fallback)
4198                         card->options.isolation = card->options.prev_isolation;
4199                 break;
4200         case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
4201                 dev_err(&card->gdev->dev, "Adapter does not "
4202                         "support QDIO data connection isolation\n");
4203                 break;
4204         case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
4205                 dev_err(&card->gdev->dev,
4206                         "Adapter is dedicated. "
4207                         "QDIO data connection isolation not supported\n");
4208                 if (fallback)
4209                         card->options.isolation = card->options.prev_isolation;
4210                 break;
4211         case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
4212                 dev_err(&card->gdev->dev,
4213                         "TSO does not permit QDIO data connection isolation\n");
4214                 if (fallback)
4215                         card->options.isolation = card->options.prev_isolation;
4216                 break;
4217         case SET_ACCESS_CTRL_RC_REFLREL_UNSUPPORTED:
4218                 dev_err(&card->gdev->dev, "The adjacent switch port does not "
4219                         "support reflective relay mode\n");
4220                 if (fallback)
4221                         card->options.isolation = card->options.prev_isolation;
4222                 break;
4223         case SET_ACCESS_CTRL_RC_REFLREL_FAILED:
4224                 dev_err(&card->gdev->dev, "The reflective relay mode cannot be "
4225                                         "enabled at the adjacent switch port");
4226                 if (fallback)
4227                         card->options.isolation = card->options.prev_isolation;
4228                 break;
4229         case SET_ACCESS_CTRL_RC_REFLREL_DEACT_FAILED:
4230                 dev_warn(&card->gdev->dev, "Turning off reflective relay mode "
4231                                         "at the adjacent switch failed\n");
4232                 break;
4233         default:
4234                 /* this should never happen */
4235                 if (fallback)
4236                         card->options.isolation = card->options.prev_isolation;
4237                 break;
4238         }
4239         return (cmd->hdr.return_code) ? -EIO : 0;
4240 }
4241
4242 static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
4243                 enum qeth_ipa_isolation_modes isolation, int fallback)
4244 {
4245         int rc;
4246         struct qeth_cmd_buffer *iob;
4247         struct qeth_ipa_cmd *cmd;
4248         struct qeth_set_access_ctrl *access_ctrl_req;
4249
4250         QETH_CARD_TEXT(card, 4, "setacctl");
4251
4252         iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
4253                                    SETADP_DATA_SIZEOF(set_access_ctrl));
4254         if (!iob)
4255                 return -ENOMEM;
4256         cmd = __ipa_cmd(iob);
4257         access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
4258         access_ctrl_req->subcmd_code = isolation;
4259
4260         rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
4261                                &fallback);
4262         QETH_CARD_TEXT_(card, 2, "rc=%d", rc);
4263         return rc;
4264 }
4265
4266 int qeth_set_access_ctrl_online(struct qeth_card *card, int fallback)
4267 {
4268         int rc = 0;
4269
4270         QETH_CARD_TEXT(card, 4, "setactlo");
4271
4272         if ((IS_OSD(card) || IS_OSX(card)) &&
4273             qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
4274                 rc = qeth_setadpparms_set_access_ctrl(card,
4275                         card->options.isolation, fallback);
4276                 if (rc) {
4277                         QETH_DBF_MESSAGE(3, "IPA(SET_ACCESS_CTRL(%d) on device %x: sent failed\n",
4278                                          rc, CARD_DEVID(card));
4279                         rc = -EOPNOTSUPP;
4280                 }
4281         } else if (card->options.isolation != ISOLATION_MODE_NONE) {
4282                 card->options.isolation = ISOLATION_MODE_NONE;
4283
4284                 dev_err(&card->gdev->dev, "Adapter does not "
4285                         "support QDIO data connection isolation\n");
4286                 rc = -EOPNOTSUPP;
4287         }
4288         return rc;
4289 }
4290 EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
4291
4292 void qeth_tx_timeout(struct net_device *dev)
4293 {
4294         struct qeth_card *card;
4295
4296         card = dev->ml_priv;
4297         QETH_CARD_TEXT(card, 4, "txtimeo");
4298         qeth_schedule_recovery(card);
4299 }
4300 EXPORT_SYMBOL_GPL(qeth_tx_timeout);
4301
4302 static int qeth_mdio_read(struct net_device *dev, int phy_id, int regnum)
4303 {
4304         struct qeth_card *card = dev->ml_priv;
4305         int rc = 0;
4306
4307         switch (regnum) {
4308         case MII_BMCR: /* Basic mode control register */
4309                 rc = BMCR_FULLDPLX;
4310                 if ((card->info.link_type != QETH_LINK_TYPE_GBIT_ETH) &&
4311                     (card->info.link_type != QETH_LINK_TYPE_OSN) &&
4312                     (card->info.link_type != QETH_LINK_TYPE_10GBIT_ETH) &&
4313                     (card->info.link_type != QETH_LINK_TYPE_25GBIT_ETH))
4314                         rc |= BMCR_SPEED100;
4315                 break;
4316         case MII_BMSR: /* Basic mode status register */
4317                 rc = BMSR_ERCAP | BMSR_ANEGCOMPLETE | BMSR_LSTATUS |
4318                      BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | BMSR_100FULL |
4319                      BMSR_100BASE4;
4320                 break;
4321         case MII_PHYSID1: /* PHYS ID 1 */
4322                 rc = (dev->dev_addr[0] << 16) | (dev->dev_addr[1] << 8) |
4323                      dev->dev_addr[2];
4324                 rc = (rc >> 5) & 0xFFFF;
4325                 break;
4326         case MII_PHYSID2: /* PHYS ID 2 */
4327                 rc = (dev->dev_addr[2] << 10) & 0xFFFF;
4328                 break;
4329         case MII_ADVERTISE: /* Advertisement control reg */
4330                 rc = ADVERTISE_ALL;
4331                 break;
4332         case MII_LPA: /* Link partner ability reg */
4333                 rc = LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL |
4334                      LPA_100BASE4 | LPA_LPACK;
4335                 break;
4336         case MII_EXPANSION: /* Expansion register */
4337                 break;
4338         case MII_DCOUNTER: /* disconnect counter */
4339                 break;
4340         case MII_FCSCOUNTER: /* false carrier counter */
4341                 break;
4342         case MII_NWAYTEST: /* N-way auto-neg test register */
4343                 break;
4344         case MII_RERRCOUNTER: /* rx error counter */
4345                 rc = card->stats.rx_errors;
4346                 break;
4347         case MII_SREVISION: /* silicon revision */
4348                 break;
4349         case MII_RESV1: /* reserved 1 */
4350                 break;
4351         case MII_LBRERROR: /* loopback, rx, bypass error */
4352                 break;
4353         case MII_PHYADDR: /* physical address */
4354                 break;
4355         case MII_RESV2: /* reserved 2 */
4356                 break;
4357         case MII_TPISTATUS: /* TPI status for 10mbps */
4358                 break;
4359         case MII_NCONFIG: /* network interface config */
4360                 break;
4361         default:
4362                 break;
4363         }
4364         return rc;
4365 }
4366
4367 static int qeth_snmp_command_cb(struct qeth_card *card,
4368                                 struct qeth_reply *reply, unsigned long data)
4369 {
4370         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
4371         struct qeth_arp_query_info *qinfo = reply->param;
4372         struct qeth_ipacmd_setadpparms *adp_cmd;
4373         unsigned int data_len;
4374         void *snmp_data;
4375
4376         QETH_CARD_TEXT(card, 3, "snpcmdcb");
4377
4378         if (cmd->hdr.return_code) {
4379                 QETH_CARD_TEXT_(card, 4, "scer1%x", cmd->hdr.return_code);
4380                 return -EIO;
4381         }
4382         if (cmd->data.setadapterparms.hdr.return_code) {
4383                 cmd->hdr.return_code =
4384                         cmd->data.setadapterparms.hdr.return_code;
4385                 QETH_CARD_TEXT_(card, 4, "scer2%x", cmd->hdr.return_code);
4386                 return -EIO;
4387         }
4388
4389         adp_cmd = &cmd->data.setadapterparms;
4390         data_len = adp_cmd->hdr.cmdlength - sizeof(adp_cmd->hdr);
4391         if (adp_cmd->hdr.seq_no == 1) {
4392                 snmp_data = &adp_cmd->data.snmp;
4393         } else {
4394                 snmp_data = &adp_cmd->data.snmp.request;
4395                 data_len -= offsetof(struct qeth_snmp_cmd, request);
4396         }
4397
4398         /* check if there is enough room in userspace */
4399         if ((qinfo->udata_len - qinfo->udata_offset) < data_len) {
4400                 QETH_CARD_TEXT_(card, 4, "scer3%i", -ENOSPC);
4401                 return -ENOSPC;
4402         }
4403         QETH_CARD_TEXT_(card, 4, "snore%i",
4404                         cmd->data.setadapterparms.hdr.used_total);
4405         QETH_CARD_TEXT_(card, 4, "sseqn%i",
4406                         cmd->data.setadapterparms.hdr.seq_no);
4407         /*copy entries to user buffer*/
4408         memcpy(qinfo->udata + qinfo->udata_offset, snmp_data, data_len);
4409         qinfo->udata_offset += data_len;
4410
4411         if (cmd->data.setadapterparms.hdr.seq_no <
4412             cmd->data.setadapterparms.hdr.used_total)
4413                 return 1;
4414         return 0;
4415 }
4416
4417 static int qeth_snmp_command(struct qeth_card *card, char __user *udata)
4418 {
4419         struct qeth_snmp_ureq __user *ureq;
4420         struct qeth_cmd_buffer *iob;
4421         unsigned int req_len;
4422         struct qeth_arp_query_info qinfo = {0, };
4423         int rc = 0;
4424
4425         QETH_CARD_TEXT(card, 3, "snmpcmd");
4426
4427         if (IS_VM_NIC(card))
4428                 return -EOPNOTSUPP;
4429
4430         if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) &&
4431             IS_LAYER3(card))
4432                 return -EOPNOTSUPP;
4433
4434         ureq = (struct qeth_snmp_ureq __user *) udata;
4435         if (get_user(qinfo.udata_len, &ureq->hdr.data_len) ||
4436             get_user(req_len, &ureq->hdr.req_len))
4437                 return -EFAULT;
4438
4439         /* Sanitize user input, to avoid overflows in iob size calculation: */
4440         if (req_len > QETH_BUFSIZE)
4441                 return -EINVAL;
4442
4443         iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL, req_len);
4444         if (!iob)
4445                 return -ENOMEM;
4446
4447         if (copy_from_user(&__ipa_cmd(iob)->data.setadapterparms.data.snmp,
4448                            &ureq->cmd, req_len)) {
4449                 qeth_put_cmd(iob);
4450                 return -EFAULT;
4451         }
4452
4453         qinfo.udata = kzalloc(qinfo.udata_len, GFP_KERNEL);
4454         if (!qinfo.udata) {
4455                 qeth_put_cmd(iob);
4456                 return -ENOMEM;
4457         }
4458         qinfo.udata_offset = sizeof(struct qeth_snmp_ureq_hdr);
4459
4460         rc = qeth_send_ipa_cmd(card, iob, qeth_snmp_command_cb, &qinfo);
4461         if (rc)
4462                 QETH_DBF_MESSAGE(2, "SNMP command failed on device %x: (%#x)\n",
4463                                  CARD_DEVID(card), rc);
4464         else {
4465                 if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
4466                         rc = -EFAULT;
4467         }
4468
4469         kfree(qinfo.udata);
4470         return rc;
4471 }
4472
4473 static int qeth_setadpparms_query_oat_cb(struct qeth_card *card,
4474                 struct qeth_reply *reply, unsigned long data)
4475 {
4476         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4477         struct qeth_qoat_priv *priv;
4478         char *resdata;
4479         int resdatalen;
4480
4481         QETH_CARD_TEXT(card, 3, "qoatcb");
4482         if (qeth_setadpparms_inspect_rc(cmd))
4483                 return -EIO;
4484
4485         priv = (struct qeth_qoat_priv *)reply->param;
4486         resdatalen = cmd->data.setadapterparms.hdr.cmdlength;
4487         resdata = (char *)data + 28;
4488
4489         if (resdatalen > (priv->buffer_len - priv->response_len))
4490                 return -ENOSPC;
4491
4492         memcpy((priv->buffer + priv->response_len), resdata,
4493                 resdatalen);
4494         priv->response_len += resdatalen;
4495
4496         if (cmd->data.setadapterparms.hdr.seq_no <
4497             cmd->data.setadapterparms.hdr.used_total)
4498                 return 1;
4499         return 0;
4500 }
4501
4502 static int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
4503 {
4504         int rc = 0;
4505         struct qeth_cmd_buffer *iob;
4506         struct qeth_ipa_cmd *cmd;
4507         struct qeth_query_oat *oat_req;
4508         struct qeth_query_oat_data oat_data;
4509         struct qeth_qoat_priv priv;
4510         void __user *tmp;
4511
4512         QETH_CARD_TEXT(card, 3, "qoatcmd");
4513
4514         if (!qeth_adp_supported(card, IPA_SETADP_QUERY_OAT)) {
4515                 rc = -EOPNOTSUPP;
4516                 goto out;
4517         }
4518
4519         if (copy_from_user(&oat_data, udata,
4520             sizeof(struct qeth_query_oat_data))) {
4521                         rc = -EFAULT;
4522                         goto out;
4523         }
4524
4525         priv.buffer_len = oat_data.buffer_len;
4526         priv.response_len = 0;
4527         priv.buffer = vzalloc(oat_data.buffer_len);
4528         if (!priv.buffer) {
4529                 rc = -ENOMEM;
4530                 goto out;
4531         }
4532
4533         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
4534                                    SETADP_DATA_SIZEOF(query_oat));
4535         if (!iob) {
4536                 rc = -ENOMEM;
4537                 goto out_free;
4538         }
4539         cmd = __ipa_cmd(iob);
4540         oat_req = &cmd->data.setadapterparms.data.query_oat;
4541         oat_req->subcmd_code = oat_data.command;
4542
4543         rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_query_oat_cb,
4544                                &priv);
4545         if (!rc) {
4546                 if (is_compat_task())
4547                         tmp = compat_ptr(oat_data.ptr);
4548                 else
4549                         tmp = (void __user *)(unsigned long)oat_data.ptr;
4550
4551                 if (copy_to_user(tmp, priv.buffer,
4552                     priv.response_len)) {
4553                         rc = -EFAULT;
4554                         goto out_free;
4555                 }
4556
4557                 oat_data.response_len = priv.response_len;
4558
4559                 if (copy_to_user(udata, &oat_data,
4560                     sizeof(struct qeth_query_oat_data)))
4561                         rc = -EFAULT;
4562         }
4563
4564 out_free:
4565         vfree(priv.buffer);
4566 out:
4567         return rc;
4568 }
4569
4570 static int qeth_query_card_info_cb(struct qeth_card *card,
4571                                    struct qeth_reply *reply, unsigned long data)
4572 {
4573         struct carrier_info *carrier_info = (struct carrier_info *)reply->param;
4574         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *)data;
4575         struct qeth_query_card_info *card_info;
4576
4577         QETH_CARD_TEXT(card, 2, "qcrdincb");
4578         if (qeth_setadpparms_inspect_rc(cmd))
4579                 return -EIO;
4580
4581         card_info = &cmd->data.setadapterparms.data.card_info;
4582         carrier_info->card_type = card_info->card_type;
4583         carrier_info->port_mode = card_info->port_mode;
4584         carrier_info->port_speed = card_info->port_speed;
4585         return 0;
4586 }
4587
4588 int qeth_query_card_info(struct qeth_card *card,
4589                          struct carrier_info *carrier_info)
4590 {
4591         struct qeth_cmd_buffer *iob;
4592
4593         QETH_CARD_TEXT(card, 2, "qcrdinfo");
4594         if (!qeth_adp_supported(card, IPA_SETADP_QUERY_CARD_INFO))
4595                 return -EOPNOTSUPP;
4596         iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO, 0);
4597         if (!iob)
4598                 return -ENOMEM;
4599         return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
4600                                         (void *)carrier_info);
4601 }
4602
4603 /**
4604  * qeth_vm_request_mac() - Request a hypervisor-managed MAC address
4605  * @card: pointer to a qeth_card
4606  *
4607  * Returns
4608  *      0, if a MAC address has been set for the card's netdevice
4609  *      a return code, for various error conditions
4610  */
4611 int qeth_vm_request_mac(struct qeth_card *card)
4612 {
4613         struct diag26c_mac_resp *response;
4614         struct diag26c_mac_req *request;
4615         struct ccw_dev_id id;
4616         int rc;
4617
4618         QETH_CARD_TEXT(card, 2, "vmreqmac");
4619
4620         request = kzalloc(sizeof(*request), GFP_KERNEL | GFP_DMA);
4621         response = kzalloc(sizeof(*response), GFP_KERNEL | GFP_DMA);
4622         if (!request || !response) {
4623                 rc = -ENOMEM;
4624                 goto out;
4625         }
4626
4627         ccw_device_get_id(CARD_DDEV(card), &id);
4628         request->resp_buf_len = sizeof(*response);
4629         request->resp_version = DIAG26C_VERSION2;
4630         request->op_code = DIAG26C_GET_MAC;
4631         request->devno = id.devno;
4632
4633         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4634         rc = diag26c(request, response, DIAG26C_MAC_SERVICES);
4635         QETH_DBF_HEX(CTRL, 2, request, sizeof(*request));
4636         if (rc)
4637                 goto out;
4638         QETH_DBF_HEX(CTRL, 2, response, sizeof(*response));
4639
4640         if (request->resp_buf_len < sizeof(*response) ||
4641             response->version != request->resp_version) {
4642                 rc = -EIO;
4643                 QETH_CARD_TEXT(card, 2, "badresp");
4644                 QETH_CARD_HEX(card, 2, &request->resp_buf_len,
4645                               sizeof(request->resp_buf_len));
4646         } else if (!is_valid_ether_addr(response->mac)) {
4647                 rc = -EINVAL;
4648                 QETH_CARD_TEXT(card, 2, "badmac");
4649                 QETH_CARD_HEX(card, 2, response->mac, ETH_ALEN);
4650         } else {
4651                 ether_addr_copy(card->dev->dev_addr, response->mac);
4652         }
4653
4654 out:
4655         kfree(response);
4656         kfree(request);
4657         return rc;
4658 }
4659 EXPORT_SYMBOL_GPL(qeth_vm_request_mac);
4660
4661 static void qeth_determine_capabilities(struct qeth_card *card)
4662 {
4663         struct qeth_channel *channel = &card->data;
4664         struct ccw_device *ddev = channel->ccwdev;
4665         int rc;
4666         int ddev_offline = 0;
4667
4668         QETH_CARD_TEXT(card, 2, "detcapab");
4669         if (!ddev->online) {
4670                 ddev_offline = 1;
4671                 rc = ccw_device_set_online(ddev);
4672                 if (rc) {
4673                         QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4674                         goto out;
4675                 }
4676         }
4677
4678         rc = qeth_read_conf_data(card);
4679         if (rc) {
4680                 QETH_DBF_MESSAGE(2, "qeth_read_conf_data on device %x returned %i\n",
4681                                  CARD_DEVID(card), rc);
4682                 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4683                 goto out_offline;
4684         }
4685
4686         rc = qdio_get_ssqd_desc(ddev, &card->ssqd);
4687         if (rc)
4688                 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4689
4690         QETH_CARD_TEXT_(card, 2, "qfmt%d", card->ssqd.qfmt);
4691         QETH_CARD_TEXT_(card, 2, "ac1:%02x", card->ssqd.qdioac1);
4692         QETH_CARD_TEXT_(card, 2, "ac2:%04x", card->ssqd.qdioac2);
4693         QETH_CARD_TEXT_(card, 2, "ac3:%04x", card->ssqd.qdioac3);
4694         QETH_CARD_TEXT_(card, 2, "icnt%d", card->ssqd.icnt);
4695         if (!((card->ssqd.qfmt != QDIO_IQDIO_QFMT) ||
4696             ((card->ssqd.qdioac1 & CHSC_AC1_INITIATE_INPUTQ) == 0) ||
4697             ((card->ssqd.qdioac3 & CHSC_AC3_FORMAT2_CQ_AVAILABLE) == 0))) {
4698                 dev_info(&card->gdev->dev,
4699                         "Completion Queueing supported\n");
4700         } else {
4701                 card->options.cq = QETH_CQ_NOTAVAILABLE;
4702         }
4703
4704
4705 out_offline:
4706         if (ddev_offline == 1)
4707                 qeth_stop_channel(channel);
4708 out:
4709         return;
4710 }
4711
4712 static void qeth_qdio_establish_cq(struct qeth_card *card,
4713                                    struct qdio_buffer **in_sbal_ptrs,
4714                                    void (**queue_start_poll)
4715                                         (struct ccw_device *, int,
4716                                          unsigned long))
4717 {
4718         int i;
4719
4720         if (card->options.cq == QETH_CQ_ENABLED) {
4721                 int offset = QDIO_MAX_BUFFERS_PER_Q *
4722                              (card->qdio.no_in_queues - 1);
4723
4724                 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
4725                         in_sbal_ptrs[offset + i] =
4726                                 card->qdio.c_q->bufs[i].buffer;
4727
4728                 queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
4729         }
4730 }
4731
4732 static int qeth_qdio_establish(struct qeth_card *card)
4733 {
4734         struct qdio_initialize init_data;
4735         char *qib_param_field;
4736         struct qdio_buffer **in_sbal_ptrs;
4737         void (**queue_start_poll) (struct ccw_device *, int, unsigned long);
4738         struct qdio_buffer **out_sbal_ptrs;
4739         int i, j, k;
4740         int rc = 0;
4741
4742         QETH_CARD_TEXT(card, 2, "qdioest");
4743
4744         qib_param_field = kzalloc(FIELD_SIZEOF(struct qib, parm), GFP_KERNEL);
4745         if (!qib_param_field) {
4746                 rc =  -ENOMEM;
4747                 goto out_free_nothing;
4748         }
4749
4750         qeth_create_qib_param_field(card, qib_param_field);
4751         qeth_create_qib_param_field_blkt(card, qib_param_field);
4752
4753         in_sbal_ptrs = kcalloc(card->qdio.no_in_queues * QDIO_MAX_BUFFERS_PER_Q,
4754                                sizeof(void *),
4755                                GFP_KERNEL);
4756         if (!in_sbal_ptrs) {
4757                 rc = -ENOMEM;
4758                 goto out_free_qib_param;
4759         }
4760
4761         for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
4762                 in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer;
4763
4764         queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
4765                                    GFP_KERNEL);
4766         if (!queue_start_poll) {
4767                 rc = -ENOMEM;
4768                 goto out_free_in_sbals;
4769         }
4770         for (i = 0; i < card->qdio.no_in_queues; ++i)
4771                 queue_start_poll[i] = qeth_qdio_start_poll;
4772
4773         qeth_qdio_establish_cq(card, in_sbal_ptrs, queue_start_poll);
4774
4775         out_sbal_ptrs =
4776                 kcalloc(card->qdio.no_out_queues * QDIO_MAX_BUFFERS_PER_Q,
4777                         sizeof(void *),
4778                         GFP_KERNEL);
4779         if (!out_sbal_ptrs) {
4780                 rc = -ENOMEM;
4781                 goto out_free_queue_start_poll;
4782         }
4783
4784         for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
4785                 for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++)
4786                         out_sbal_ptrs[k] =
4787                                 card->qdio.out_qs[i]->bufs[j]->buffer;
4788
4789         memset(&init_data, 0, sizeof(struct qdio_initialize));
4790         init_data.cdev                   = CARD_DDEV(card);
4791         init_data.q_format               = IS_IQD(card) ? QDIO_IQDIO_QFMT :
4792                                                           QDIO_QETH_QFMT;
4793         init_data.qib_param_field_format = 0;
4794         init_data.qib_param_field        = qib_param_field;
4795         init_data.no_input_qs            = card->qdio.no_in_queues;
4796         init_data.no_output_qs           = card->qdio.no_out_queues;
4797         init_data.input_handler          = qeth_qdio_input_handler;
4798         init_data.output_handler         = qeth_qdio_output_handler;
4799         init_data.queue_start_poll_array = queue_start_poll;
4800         init_data.int_parm               = (unsigned long) card;
4801         init_data.input_sbal_addr_array  = in_sbal_ptrs;
4802         init_data.output_sbal_addr_array = out_sbal_ptrs;
4803         init_data.output_sbal_state_array = card->qdio.out_bufstates;
4804         init_data.scan_threshold         = IS_IQD(card) ? 0 : 32;
4805
4806         if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED,
4807                 QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) {
4808                 rc = qdio_allocate(&init_data);
4809                 if (rc) {
4810                         atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4811                         goto out;
4812                 }
4813                 rc = qdio_establish(&init_data);
4814                 if (rc) {
4815                         atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
4816                         qdio_free(CARD_DDEV(card));
4817                 }
4818         }
4819
4820         switch (card->options.cq) {
4821         case QETH_CQ_ENABLED:
4822                 dev_info(&card->gdev->dev, "Completion Queue support enabled");
4823                 break;
4824         case QETH_CQ_DISABLED:
4825                 dev_info(&card->gdev->dev, "Completion Queue support disabled");
4826                 break;
4827         default:
4828                 break;
4829         }
4830 out:
4831         kfree(out_sbal_ptrs);
4832 out_free_queue_start_poll:
4833         kfree(queue_start_poll);
4834 out_free_in_sbals:
4835         kfree(in_sbal_ptrs);
4836 out_free_qib_param:
4837         kfree(qib_param_field);
4838 out_free_nothing:
4839         return rc;
4840 }
4841
4842 static void qeth_core_free_card(struct qeth_card *card)
4843 {
4844         QETH_CARD_TEXT(card, 2, "freecrd");
4845         qeth_clean_channel(&card->read);
4846         qeth_clean_channel(&card->write);
4847         qeth_clean_channel(&card->data);
4848         qeth_put_cmd(card->read_cmd);
4849         destroy_workqueue(card->event_wq);
4850         qeth_free_qdio_queues(card);
4851         unregister_service_level(&card->qeth_service_level);
4852         dev_set_drvdata(&card->gdev->dev, NULL);
4853         kfree(card);
4854 }
4855
4856 void qeth_trace_features(struct qeth_card *card)
4857 {
4858         QETH_CARD_TEXT(card, 2, "features");
4859         QETH_CARD_HEX(card, 2, &card->options.ipa4, sizeof(card->options.ipa4));
4860         QETH_CARD_HEX(card, 2, &card->options.ipa6, sizeof(card->options.ipa6));
4861         QETH_CARD_HEX(card, 2, &card->options.adp, sizeof(card->options.adp));
4862         QETH_CARD_HEX(card, 2, &card->info.diagass_support,
4863                       sizeof(card->info.diagass_support));
4864 }
4865 EXPORT_SYMBOL_GPL(qeth_trace_features);
4866
4867 static struct ccw_device_id qeth_ids[] = {
4868         {CCW_DEVICE_DEVTYPE(0x1731, 0x01, 0x1732, 0x01),
4869                                         .driver_info = QETH_CARD_TYPE_OSD},
4870         {CCW_DEVICE_DEVTYPE(0x1731, 0x05, 0x1732, 0x05),
4871                                         .driver_info = QETH_CARD_TYPE_IQD},
4872         {CCW_DEVICE_DEVTYPE(0x1731, 0x06, 0x1732, 0x06),
4873                                         .driver_info = QETH_CARD_TYPE_OSN},
4874         {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x03),
4875                                         .driver_info = QETH_CARD_TYPE_OSM},
4876         {CCW_DEVICE_DEVTYPE(0x1731, 0x02, 0x1732, 0x02),
4877                                         .driver_info = QETH_CARD_TYPE_OSX},
4878         {},
4879 };
4880 MODULE_DEVICE_TABLE(ccw, qeth_ids);
4881
4882 static struct ccw_driver qeth_ccw_driver = {
4883         .driver = {
4884                 .owner = THIS_MODULE,
4885                 .name = "qeth",
4886         },
4887         .ids = qeth_ids,
4888         .probe = ccwgroup_probe_ccwdev,
4889         .remove = ccwgroup_remove_ccwdev,
4890 };
4891
4892 int qeth_core_hardsetup_card(struct qeth_card *card, bool *carrier_ok)
4893 {
4894         int retries = 3;
4895         int rc;
4896
4897         QETH_CARD_TEXT(card, 2, "hrdsetup");
4898         atomic_set(&card->force_alloc_skb, 0);
4899         rc = qeth_update_from_chp_desc(card);
4900         if (rc)
4901                 return rc;
4902 retry:
4903         if (retries < 3)
4904                 QETH_DBF_MESSAGE(2, "Retrying to do IDX activates on device %x.\n",
4905                                  CARD_DEVID(card));
4906         rc = qeth_qdio_clear_card(card, !IS_IQD(card));
4907         qeth_stop_channel(&card->data);
4908         qeth_stop_channel(&card->write);
4909         qeth_stop_channel(&card->read);
4910         qdio_free(CARD_DDEV(card));
4911         rc = ccw_device_set_online(CARD_RDEV(card));
4912         if (rc)
4913                 goto retriable;
4914         rc = ccw_device_set_online(CARD_WDEV(card));
4915         if (rc)
4916                 goto retriable;
4917         rc = ccw_device_set_online(CARD_DDEV(card));
4918         if (rc)
4919                 goto retriable;
4920 retriable:
4921         if (rc == -ERESTARTSYS) {
4922                 QETH_CARD_TEXT(card, 2, "break1");
4923                 return rc;
4924         } else if (rc) {
4925                 QETH_CARD_TEXT_(card, 2, "1err%d", rc);
4926                 if (--retries < 0)
4927                         goto out;
4928                 else
4929                         goto retry;
4930         }
4931         qeth_determine_capabilities(card);
4932         qeth_init_tokens(card);
4933         qeth_init_func_level(card);
4934
4935         rc = qeth_idx_activate_read_channel(card);
4936         if (rc == -EINTR) {
4937                 QETH_CARD_TEXT(card, 2, "break2");
4938                 return rc;
4939         } else if (rc) {
4940                 QETH_CARD_TEXT_(card, 2, "3err%d", rc);
4941                 if (--retries < 0)
4942                         goto out;
4943                 else
4944                         goto retry;
4945         }
4946
4947         rc = qeth_idx_activate_write_channel(card);
4948         if (rc == -EINTR) {
4949                 QETH_CARD_TEXT(card, 2, "break3");
4950                 return rc;
4951         } else if (rc) {
4952                 QETH_CARD_TEXT_(card, 2, "4err%d", rc);
4953                 if (--retries < 0)
4954                         goto out;
4955                 else
4956                         goto retry;
4957         }
4958         card->read_or_write_problem = 0;
4959         rc = qeth_mpc_initialize(card);
4960         if (rc) {
4961                 QETH_CARD_TEXT_(card, 2, "5err%d", rc);
4962                 goto out;
4963         }
4964
4965         rc = qeth_send_startlan(card);
4966         if (rc) {
4967                 QETH_CARD_TEXT_(card, 2, "6err%d", rc);
4968                 if (rc == -ENETDOWN) {
4969                         dev_warn(&card->gdev->dev, "The LAN is offline\n");
4970                         *carrier_ok = false;
4971                 } else {
4972                         goto out;
4973                 }
4974         } else {
4975                 *carrier_ok = true;
4976         }
4977
4978         card->options.ipa4.supported_funcs = 0;
4979         card->options.ipa6.supported_funcs = 0;
4980         card->options.adp.supported_funcs = 0;
4981         card->options.sbp.supported_funcs = 0;
4982         card->info.diagass_support = 0;
4983         rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
4984         if (rc == -ENOMEM)
4985                 goto out;
4986         if (qeth_is_supported(card, IPA_IPV6)) {
4987                 rc = qeth_query_ipassists(card, QETH_PROT_IPV6);
4988                 if (rc == -ENOMEM)
4989                         goto out;
4990         }
4991         if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
4992                 rc = qeth_query_setadapterparms(card);
4993                 if (rc < 0) {
4994                         QETH_CARD_TEXT_(card, 2, "7err%d", rc);
4995                         goto out;
4996                 }
4997         }
4998         if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
4999                 rc = qeth_query_setdiagass(card);
5000                 if (rc)
5001                         QETH_CARD_TEXT_(card, 2, "8err%d", rc);
5002         }
5003         return 0;
5004 out:
5005         dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
5006                 "an error on the device\n");
5007         QETH_DBF_MESSAGE(2, "Initialization for device %x failed in hardsetup! rc=%d\n",
5008                          CARD_DEVID(card), rc);
5009         return rc;
5010 }
5011 EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card);
5012
5013 static void qeth_create_skb_frag(struct qdio_buffer_element *element,
5014                                  struct sk_buff *skb, int offset, int data_len)
5015 {
5016         struct page *page = virt_to_page(element->addr);
5017         unsigned int next_frag;
5018
5019         /* first fill the linear space */
5020         if (!skb->len) {
5021                 unsigned int linear = min(data_len, skb_tailroom(skb));
5022
5023                 skb_put_data(skb, element->addr + offset, linear);
5024                 data_len -= linear;
5025                 if (!data_len)
5026                         return;
5027                 offset += linear;
5028                 /* fall through to add page frag for remaining data */
5029         }
5030
5031         next_frag = skb_shinfo(skb)->nr_frags;
5032         get_page(page);
5033         skb_add_rx_frag(skb, next_frag, page, offset, data_len, data_len);
5034 }
5035
5036 static inline int qeth_is_last_sbale(struct qdio_buffer_element *sbale)
5037 {
5038         return (sbale->eflags & SBAL_EFLAGS_LAST_ENTRY);
5039 }
5040
5041 struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card,
5042                 struct qeth_qdio_buffer *qethbuffer,
5043                 struct qdio_buffer_element **__element, int *__offset,
5044                 struct qeth_hdr **hdr)
5045 {
5046         struct qdio_buffer_element *element = *__element;
5047         struct qdio_buffer *buffer = qethbuffer->buffer;
5048         int offset = *__offset;
5049         struct sk_buff *skb;
5050         int skb_len = 0;
5051         void *data_ptr;
5052         int data_len;
5053         int headroom = 0;
5054         int use_rx_sg = 0;
5055
5056         /* qeth_hdr must not cross element boundaries */
5057         while (element->length < offset + sizeof(struct qeth_hdr)) {
5058                 if (qeth_is_last_sbale(element))
5059                         return NULL;
5060                 element++;
5061                 offset = 0;
5062         }
5063         *hdr = element->addr + offset;
5064
5065         offset += sizeof(struct qeth_hdr);
5066         switch ((*hdr)->hdr.l2.id) {
5067         case QETH_HEADER_TYPE_LAYER2:
5068                 skb_len = (*hdr)->hdr.l2.pkt_length;
5069                 break;
5070         case QETH_HEADER_TYPE_LAYER3:
5071                 skb_len = (*hdr)->hdr.l3.length;
5072                 headroom = ETH_HLEN;
5073                 break;
5074         case QETH_HEADER_TYPE_OSN:
5075                 skb_len = (*hdr)->hdr.osn.pdu_length;
5076                 headroom = sizeof(struct qeth_hdr);
5077                 break;
5078         default:
5079                 break;
5080         }
5081
5082         if (!skb_len)
5083                 return NULL;
5084
5085         if (((skb_len >= card->options.rx_sg_cb) &&
5086              !IS_OSN(card) &&
5087              (!atomic_read(&card->force_alloc_skb))) ||
5088             (card->options.cq == QETH_CQ_ENABLED))
5089                 use_rx_sg = 1;
5090
5091         if (use_rx_sg && qethbuffer->rx_skb) {
5092                 /* QETH_CQ_ENABLED only: */
5093                 skb = qethbuffer->rx_skb;
5094                 qethbuffer->rx_skb = NULL;
5095         } else {
5096                 unsigned int linear = (use_rx_sg) ? QETH_RX_PULL_LEN : skb_len;
5097
5098                 skb = napi_alloc_skb(&card->napi, linear + headroom);
5099         }
5100         if (!skb)
5101                 goto no_mem;
5102         if (headroom)
5103                 skb_reserve(skb, headroom);
5104
5105         data_ptr = element->addr + offset;
5106         while (skb_len) {
5107                 data_len = min(skb_len, (int)(element->length - offset));
5108                 if (data_len) {
5109                         if (use_rx_sg)
5110                                 qeth_create_skb_frag(element, skb, offset,
5111                                                      data_len);
5112                         else
5113                                 skb_put_data(skb, data_ptr, data_len);
5114                 }
5115                 skb_len -= data_len;
5116                 if (skb_len) {
5117                         if (qeth_is_last_sbale(element)) {
5118                                 QETH_CARD_TEXT(card, 4, "unexeob");
5119                                 QETH_CARD_HEX(card, 2, buffer, sizeof(void *));
5120                                 dev_kfree_skb_any(skb);
5121                                 QETH_CARD_STAT_INC(card, rx_errors);
5122                                 return NULL;
5123                         }
5124                         element++;
5125                         offset = 0;
5126                         data_ptr = element->addr;
5127                 } else {
5128                         offset += data_len;
5129                 }
5130         }
5131         *__element = element;
5132         *__offset = offset;
5133         if (use_rx_sg) {
5134                 QETH_CARD_STAT_INC(card, rx_sg_skbs);
5135                 QETH_CARD_STAT_ADD(card, rx_sg_frags,
5136                                    skb_shinfo(skb)->nr_frags);
5137         }
5138         return skb;
5139 no_mem:
5140         if (net_ratelimit()) {
5141                 QETH_CARD_TEXT(card, 2, "noskbmem");
5142         }
5143         QETH_CARD_STAT_INC(card, rx_dropped);
5144         return NULL;
5145 }
5146 EXPORT_SYMBOL_GPL(qeth_core_get_next_skb);
5147
5148 int qeth_poll(struct napi_struct *napi, int budget)
5149 {
5150         struct qeth_card *card = container_of(napi, struct qeth_card, napi);
5151         int work_done = 0;
5152         struct qeth_qdio_buffer *buffer;
5153         int done;
5154         int new_budget = budget;
5155
5156         while (1) {
5157                 if (!card->rx.b_count) {
5158                         card->rx.qdio_err = 0;
5159                         card->rx.b_count = qdio_get_next_buffers(
5160                                 card->data.ccwdev, 0, &card->rx.b_index,
5161                                 &card->rx.qdio_err);
5162                         if (card->rx.b_count <= 0) {
5163                                 card->rx.b_count = 0;
5164                                 break;
5165                         }
5166                         card->rx.b_element =
5167                                 &card->qdio.in_q->bufs[card->rx.b_index]
5168                                 .buffer->element[0];
5169                         card->rx.e_offset = 0;
5170                 }
5171
5172                 while (card->rx.b_count) {
5173                         buffer = &card->qdio.in_q->bufs[card->rx.b_index];
5174                         if (!(card->rx.qdio_err &&
5175                             qeth_check_qdio_errors(card, buffer->buffer,
5176                             card->rx.qdio_err, "qinerr")))
5177                                 work_done +=
5178                                         card->discipline->process_rx_buffer(
5179                                                 card, new_budget, &done);
5180                         else
5181                                 done = 1;
5182
5183                         if (done) {
5184                                 QETH_CARD_STAT_INC(card, rx_bufs);
5185                                 qeth_put_buffer_pool_entry(card,
5186                                         buffer->pool_entry);
5187                                 qeth_queue_input_buffer(card, card->rx.b_index);
5188                                 card->rx.b_count--;
5189                                 if (card->rx.b_count) {
5190                                         card->rx.b_index =
5191                                                 (card->rx.b_index + 1) %
5192                                                 QDIO_MAX_BUFFERS_PER_Q;
5193                                         card->rx.b_element =
5194                                                 &card->qdio.in_q
5195                                                 ->bufs[card->rx.b_index]
5196                                                 .buffer->element[0];
5197                                         card->rx.e_offset = 0;
5198                                 }
5199                         }
5200
5201                         if (work_done >= budget)
5202                                 goto out;
5203                         else
5204                                 new_budget = budget - work_done;
5205                 }
5206         }
5207
5208         napi_complete_done(napi, work_done);
5209         if (qdio_start_irq(card->data.ccwdev, 0))
5210                 napi_schedule(&card->napi);
5211 out:
5212         return work_done;
5213 }
5214 EXPORT_SYMBOL_GPL(qeth_poll);
5215
5216 static void qeth_iqd_tx_complete(struct qeth_qdio_out_q *queue,
5217                                  unsigned int bidx, bool error, int budget)
5218 {
5219         struct qeth_qdio_out_buffer *buffer = queue->bufs[bidx];
5220         u8 sflags = buffer->buffer->element[15].sflags;
5221         struct qeth_card *card = queue->card;
5222
5223         if (queue->bufstates && (queue->bufstates[bidx].flags &
5224                                  QDIO_OUTBUF_STATE_FLAG_PENDING)) {
5225                 WARN_ON_ONCE(card->options.cq != QETH_CQ_ENABLED);
5226
5227                 if (atomic_cmpxchg(&buffer->state, QETH_QDIO_BUF_PRIMED,
5228                                                    QETH_QDIO_BUF_PENDING) ==
5229                     QETH_QDIO_BUF_PRIMED) {
5230                         qeth_notify_skbs(queue, buffer, TX_NOTIFY_PENDING);
5231
5232                         /* Handle race with qeth_qdio_handle_aob(): */
5233                         switch (atomic_xchg(&buffer->state,
5234                                             QETH_QDIO_BUF_NEED_QAOB)) {
5235                         case QETH_QDIO_BUF_PENDING:
5236                                 /* No concurrent QAOB notification. */
5237                                 break;
5238                         case QETH_QDIO_BUF_QAOB_OK:
5239                                 qeth_notify_skbs(queue, buffer,
5240                                                  TX_NOTIFY_DELAYED_OK);
5241                                 atomic_set(&buffer->state,
5242                                            QETH_QDIO_BUF_HANDLED_DELAYED);
5243                                 break;
5244                         case QETH_QDIO_BUF_QAOB_ERROR:
5245                                 qeth_notify_skbs(queue, buffer,
5246                                                  TX_NOTIFY_DELAYED_GENERALERROR);
5247                                 atomic_set(&buffer->state,
5248                                            QETH_QDIO_BUF_HANDLED_DELAYED);
5249                                 break;
5250                         default:
5251                                 WARN_ON_ONCE(1);
5252                         }
5253                 }
5254
5255                 QETH_CARD_TEXT_(card, 5, "pel%u", bidx);
5256
5257                 /* prepare the queue slot for re-use: */
5258                 qeth_scrub_qdio_buffer(buffer->buffer, queue->max_elements);
5259                 if (qeth_init_qdio_out_buf(queue, bidx)) {
5260                         QETH_CARD_TEXT(card, 2, "outofbuf");
5261                         qeth_schedule_recovery(card);
5262                 }
5263
5264                 return;
5265         }
5266
5267         if (card->options.cq == QETH_CQ_ENABLED)
5268                 qeth_notify_skbs(queue, buffer,
5269                                  qeth_compute_cq_notification(sflags, 0));
5270         qeth_clear_output_buffer(queue, buffer, error, budget);
5271 }
5272
5273 static int qeth_tx_poll(struct napi_struct *napi, int budget)
5274 {
5275         struct qeth_qdio_out_q *queue = qeth_napi_to_out_queue(napi);
5276         unsigned int queue_no = queue->queue_no;
5277         struct qeth_card *card = queue->card;
5278         struct net_device *dev = card->dev;
5279         unsigned int work_done = 0;
5280         struct netdev_queue *txq;
5281
5282         txq = netdev_get_tx_queue(dev, qeth_iqd_translate_txq(dev, queue_no));
5283
5284         while (1) {
5285                 unsigned int start, error, i;
5286                 unsigned int packets = 0;
5287                 unsigned int bytes = 0;
5288                 int completed;
5289
5290                 if (qeth_out_queue_is_empty(queue)) {
5291                         napi_complete(napi);
5292                         return 0;
5293                 }
5294
5295                 /* Give the CPU a breather: */
5296                 if (work_done >= QDIO_MAX_BUFFERS_PER_Q) {
5297                         QETH_TXQ_STAT_INC(queue, completion_yield);
5298                         if (napi_complete_done(napi, 0))
5299                                 napi_schedule(napi);
5300                         return 0;
5301                 }
5302
5303                 completed = qdio_inspect_queue(CARD_DDEV(card), queue_no, false,
5304                                                &start, &error);
5305                 if (completed <= 0) {
5306                         /* Ensure we see TX completion for pending work: */
5307                         if (napi_complete_done(napi, 0))
5308                                 qeth_tx_arm_timer(queue);
5309                         return 0;
5310                 }
5311
5312                 for (i = start; i < start + completed; i++) {
5313                         struct qeth_qdio_out_buffer *buffer;
5314                         unsigned int bidx = QDIO_BUFNR(i);
5315
5316                         buffer = queue->bufs[bidx];
5317                         packets += skb_queue_len(&buffer->skb_list);
5318                         bytes += buffer->bytes;
5319
5320                         qeth_handle_send_error(card, buffer, error);
5321                         qeth_iqd_tx_complete(queue, bidx, error, budget);
5322                         qeth_cleanup_handled_pending(queue, bidx, false);
5323                 }
5324
5325                 netdev_tx_completed_queue(txq, packets, bytes);
5326                 atomic_sub(completed, &queue->used_buffers);
5327                 work_done += completed;
5328
5329                 /* xmit may have observed the full-condition, but not yet
5330                  * stopped the txq. In which case the code below won't trigger.
5331                  * So before returning, xmit will re-check the txq's fill level
5332                  * and wake it up if needed.
5333                  */
5334                 if (netif_tx_queue_stopped(txq) &&
5335                     !qeth_out_queue_is_full(queue))
5336                         netif_tx_wake_queue(txq);
5337         }
5338 }
5339
5340 static int qeth_setassparms_inspect_rc(struct qeth_ipa_cmd *cmd)
5341 {
5342         if (!cmd->hdr.return_code)
5343                 cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5344         return cmd->hdr.return_code;
5345 }
5346
5347 static int qeth_setassparms_get_caps_cb(struct qeth_card *card,
5348                                         struct qeth_reply *reply,
5349                                         unsigned long data)
5350 {
5351         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5352         struct qeth_ipa_caps *caps = reply->param;
5353
5354         if (qeth_setassparms_inspect_rc(cmd))
5355                 return -EIO;
5356
5357         caps->supported = cmd->data.setassparms.data.caps.supported;
5358         caps->enabled = cmd->data.setassparms.data.caps.enabled;
5359         return 0;
5360 }
5361
5362 int qeth_setassparms_cb(struct qeth_card *card,
5363                         struct qeth_reply *reply, unsigned long data)
5364 {
5365         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5366
5367         QETH_CARD_TEXT(card, 4, "defadpcb");
5368
5369         if (cmd->hdr.return_code)
5370                 return -EIO;
5371
5372         cmd->hdr.return_code = cmd->data.setassparms.hdr.return_code;
5373         if (cmd->hdr.prot_version == QETH_PROT_IPV4)
5374                 card->options.ipa4.enabled_funcs = cmd->hdr.ipa_enabled;
5375         if (cmd->hdr.prot_version == QETH_PROT_IPV6)
5376                 card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
5377         return 0;
5378 }
5379 EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
5380
5381 struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *card,
5382                                                  enum qeth_ipa_funcs ipa_func,
5383                                                  u16 cmd_code,
5384                                                  unsigned int data_length,
5385                                                  enum qeth_prot_versions prot)
5386 {
5387         struct qeth_ipacmd_setassparms *setassparms;
5388         struct qeth_ipacmd_setassparms_hdr *hdr;
5389         struct qeth_cmd_buffer *iob;
5390
5391         QETH_CARD_TEXT(card, 4, "getasscm");
5392         iob = qeth_ipa_alloc_cmd(card, IPA_CMD_SETASSPARMS, prot,
5393                                  data_length +
5394                                  offsetof(struct qeth_ipacmd_setassparms,
5395                                           data));
5396         if (!iob)
5397                 return NULL;
5398
5399         setassparms = &__ipa_cmd(iob)->data.setassparms;
5400         setassparms->assist_no = ipa_func;
5401
5402         hdr = &setassparms->hdr;
5403         hdr->length = sizeof(*hdr) + data_length;
5404         hdr->command_code = cmd_code;
5405         return iob;
5406 }
5407 EXPORT_SYMBOL_GPL(qeth_get_setassparms_cmd);
5408
5409 int qeth_send_simple_setassparms_prot(struct qeth_card *card,
5410                                       enum qeth_ipa_funcs ipa_func,
5411                                       u16 cmd_code, u32 *data,
5412                                       enum qeth_prot_versions prot)
5413 {
5414         unsigned int length = data ? SETASS_DATA_SIZEOF(flags_32bit) : 0;
5415         struct qeth_cmd_buffer *iob;
5416
5417         QETH_CARD_TEXT_(card, 4, "simassp%i", prot);
5418         iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code, length, prot);
5419         if (!iob)
5420                 return -ENOMEM;
5421
5422         if (data)
5423                 __ipa_cmd(iob)->data.setassparms.data.flags_32bit = *data;
5424         return qeth_send_ipa_cmd(card, iob, qeth_setassparms_cb, NULL);
5425 }
5426 EXPORT_SYMBOL_GPL(qeth_send_simple_setassparms_prot);
5427
5428 static void qeth_unregister_dbf_views(void)
5429 {
5430         int x;
5431         for (x = 0; x < QETH_DBF_INFOS; x++) {
5432                 debug_unregister(qeth_dbf[x].id);
5433                 qeth_dbf[x].id = NULL;
5434         }
5435 }
5436
5437 void qeth_dbf_longtext(debug_info_t *id, int level, char *fmt, ...)
5438 {
5439         char dbf_txt_buf[32];
5440         va_list args;
5441
5442         if (!debug_level_enabled(id, level))
5443                 return;
5444         va_start(args, fmt);
5445         vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args);
5446         va_end(args);
5447         debug_text_event(id, level, dbf_txt_buf);
5448 }
5449 EXPORT_SYMBOL_GPL(qeth_dbf_longtext);
5450
5451 static int qeth_register_dbf_views(void)
5452 {
5453         int ret;
5454         int x;
5455
5456         for (x = 0; x < QETH_DBF_INFOS; x++) {
5457                 /* register the areas */
5458                 qeth_dbf[x].id = debug_register(qeth_dbf[x].name,
5459                                                 qeth_dbf[x].pages,
5460                                                 qeth_dbf[x].areas,
5461                                                 qeth_dbf[x].len);
5462                 if (qeth_dbf[x].id == NULL) {
5463                         qeth_unregister_dbf_views();
5464                         return -ENOMEM;
5465                 }
5466
5467                 /* register a view */
5468                 ret = debug_register_view(qeth_dbf[x].id, qeth_dbf[x].view);
5469                 if (ret) {
5470                         qeth_unregister_dbf_views();
5471                         return ret;
5472                 }
5473
5474                 /* set a passing level */
5475                 debug_set_level(qeth_dbf[x].id, qeth_dbf[x].level);
5476         }
5477
5478         return 0;
5479 }
5480
5481 static DEFINE_MUTEX(qeth_mod_mutex);    /* for synchronized module loading */
5482
5483 int qeth_core_load_discipline(struct qeth_card *card,
5484                 enum qeth_discipline_id discipline)
5485 {
5486         mutex_lock(&qeth_mod_mutex);
5487         switch (discipline) {
5488         case QETH_DISCIPLINE_LAYER3:
5489                 card->discipline = try_then_request_module(
5490                         symbol_get(qeth_l3_discipline), "qeth_l3");
5491                 break;
5492         case QETH_DISCIPLINE_LAYER2:
5493                 card->discipline = try_then_request_module(
5494                         symbol_get(qeth_l2_discipline), "qeth_l2");
5495                 break;
5496         default:
5497                 break;
5498         }
5499         mutex_unlock(&qeth_mod_mutex);
5500
5501         if (!card->discipline) {
5502                 dev_err(&card->gdev->dev, "There is no kernel module to "
5503                         "support discipline %d\n", discipline);
5504                 return -EINVAL;
5505         }
5506
5507         card->options.layer = discipline;
5508         return 0;
5509 }
5510
5511 void qeth_core_free_discipline(struct qeth_card *card)
5512 {
5513         if (IS_LAYER2(card))
5514                 symbol_put(qeth_l2_discipline);
5515         else
5516                 symbol_put(qeth_l3_discipline);
5517         card->options.layer = QETH_DISCIPLINE_UNDETERMINED;
5518         card->discipline = NULL;
5519 }
5520
5521 const struct device_type qeth_generic_devtype = {
5522         .name = "qeth_generic",
5523         .groups = qeth_generic_attr_groups,
5524 };
5525 EXPORT_SYMBOL_GPL(qeth_generic_devtype);
5526
5527 static const struct device_type qeth_osn_devtype = {
5528         .name = "qeth_osn",
5529         .groups = qeth_osn_attr_groups,
5530 };
5531
5532 #define DBF_NAME_LEN    20
5533
5534 struct qeth_dbf_entry {
5535         char dbf_name[DBF_NAME_LEN];
5536         debug_info_t *dbf_info;
5537         struct list_head dbf_list;
5538 };
5539
5540 static LIST_HEAD(qeth_dbf_list);
5541 static DEFINE_MUTEX(qeth_dbf_list_mutex);
5542
5543 static debug_info_t *qeth_get_dbf_entry(char *name)
5544 {
5545         struct qeth_dbf_entry *entry;
5546         debug_info_t *rc = NULL;
5547
5548         mutex_lock(&qeth_dbf_list_mutex);
5549         list_for_each_entry(entry, &qeth_dbf_list, dbf_list) {
5550                 if (strcmp(entry->dbf_name, name) == 0) {
5551                         rc = entry->dbf_info;
5552                         break;
5553                 }
5554         }
5555         mutex_unlock(&qeth_dbf_list_mutex);
5556         return rc;
5557 }
5558
5559 static int qeth_add_dbf_entry(struct qeth_card *card, char *name)
5560 {
5561         struct qeth_dbf_entry *new_entry;
5562
5563         card->debug = debug_register(name, 2, 1, 8);
5564         if (!card->debug) {
5565                 QETH_DBF_TEXT_(SETUP, 2, "%s", "qcdbf");
5566                 goto err;
5567         }
5568         if (debug_register_view(card->debug, &debug_hex_ascii_view))
5569                 goto err_dbg;
5570         new_entry = kzalloc(sizeof(struct qeth_dbf_entry), GFP_KERNEL);
5571         if (!new_entry)
5572                 goto err_dbg;
5573         strncpy(new_entry->dbf_name, name, DBF_NAME_LEN);
5574         new_entry->dbf_info = card->debug;
5575         mutex_lock(&qeth_dbf_list_mutex);
5576         list_add(&new_entry->dbf_list, &qeth_dbf_list);
5577         mutex_unlock(&qeth_dbf_list_mutex);
5578
5579         return 0;
5580
5581 err_dbg:
5582         debug_unregister(card->debug);
5583 err:
5584         return -ENOMEM;
5585 }
5586
5587 static void qeth_clear_dbf_list(void)
5588 {
5589         struct qeth_dbf_entry *entry, *tmp;
5590
5591         mutex_lock(&qeth_dbf_list_mutex);
5592         list_for_each_entry_safe(entry, tmp, &qeth_dbf_list, dbf_list) {
5593                 list_del(&entry->dbf_list);
5594                 debug_unregister(entry->dbf_info);
5595                 kfree(entry);
5596         }
5597         mutex_unlock(&qeth_dbf_list_mutex);
5598 }
5599
5600 static struct net_device *qeth_alloc_netdev(struct qeth_card *card)
5601 {
5602         struct net_device *dev;
5603
5604         switch (card->info.type) {
5605         case QETH_CARD_TYPE_IQD:
5606                 dev = alloc_netdev_mqs(0, "hsi%d", NET_NAME_UNKNOWN,
5607                                        ether_setup, QETH_MAX_QUEUES, 1);
5608                 break;
5609         case QETH_CARD_TYPE_OSM:
5610                 dev = alloc_etherdev(0);
5611                 break;
5612         case QETH_CARD_TYPE_OSN:
5613                 dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN, ether_setup);
5614                 break;
5615         default:
5616                 dev = alloc_etherdev_mqs(0, QETH_MAX_QUEUES, 1);
5617         }
5618
5619         if (!dev)
5620                 return NULL;
5621
5622         dev->ml_priv = card;
5623         dev->watchdog_timeo = QETH_TX_TIMEOUT;
5624         dev->min_mtu = IS_OSN(card) ? 64 : 576;
5625          /* initialized when device first goes online: */
5626         dev->max_mtu = 0;
5627         dev->mtu = 0;
5628         SET_NETDEV_DEV(dev, &card->gdev->dev);
5629         netif_carrier_off(dev);
5630
5631         if (IS_OSN(card)) {
5632                 dev->ethtool_ops = &qeth_osn_ethtool_ops;
5633         } else {
5634                 dev->ethtool_ops = &qeth_ethtool_ops;
5635                 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
5636                 dev->hw_features |= NETIF_F_SG;
5637                 dev->vlan_features |= NETIF_F_SG;
5638                 if (IS_IQD(card)) {
5639                         dev->features |= NETIF_F_SG;
5640                         if (netif_set_real_num_tx_queues(dev,
5641                                                          QETH_IQD_MIN_TXQ)) {
5642                                 free_netdev(dev);
5643                                 return NULL;
5644                         }
5645                 }
5646         }
5647
5648         return dev;
5649 }
5650
5651 struct net_device *qeth_clone_netdev(struct net_device *orig)
5652 {
5653         struct net_device *clone = qeth_alloc_netdev(orig->ml_priv);
5654
5655         if (!clone)
5656                 return NULL;
5657
5658         clone->dev_port = orig->dev_port;
5659         return clone;
5660 }
5661
5662 static int qeth_core_probe_device(struct ccwgroup_device *gdev)
5663 {
5664         struct qeth_card *card;
5665         struct device *dev;
5666         int rc;
5667         enum qeth_discipline_id enforced_disc;
5668         char dbf_name[DBF_NAME_LEN];
5669
5670         QETH_DBF_TEXT(SETUP, 2, "probedev");
5671
5672         dev = &gdev->dev;
5673         if (!get_device(dev))
5674                 return -ENODEV;
5675
5676         QETH_DBF_TEXT_(SETUP, 2, "%s", dev_name(&gdev->dev));
5677
5678         card = qeth_alloc_card(gdev);
5679         if (!card) {
5680                 QETH_DBF_TEXT_(SETUP, 2, "1err%d", -ENOMEM);
5681                 rc = -ENOMEM;
5682                 goto err_dev;
5683         }
5684
5685         snprintf(dbf_name, sizeof(dbf_name), "qeth_card_%s",
5686                 dev_name(&gdev->dev));
5687         card->debug = qeth_get_dbf_entry(dbf_name);
5688         if (!card->debug) {
5689                 rc = qeth_add_dbf_entry(card, dbf_name);
5690                 if (rc)
5691                         goto err_card;
5692         }
5693
5694         qeth_setup_card(card);
5695         card->dev = qeth_alloc_netdev(card);
5696         if (!card->dev) {
5697                 rc = -ENOMEM;
5698                 goto err_card;
5699         }
5700
5701         card->qdio.no_out_queues = card->dev->num_tx_queues;
5702         rc = qeth_update_from_chp_desc(card);
5703         if (rc)
5704                 goto err_chp_desc;
5705         qeth_determine_capabilities(card);
5706         qeth_set_blkt_defaults(card);
5707
5708         enforced_disc = qeth_enforce_discipline(card);
5709         switch (enforced_disc) {
5710         case QETH_DISCIPLINE_UNDETERMINED:
5711                 gdev->dev.type = &qeth_generic_devtype;
5712                 break;
5713         default:
5714                 card->info.layer_enforced = true;
5715                 rc = qeth_core_load_discipline(card, enforced_disc);
5716                 if (rc)
5717                         goto err_load;
5718
5719                 gdev->dev.type = IS_OSN(card) ? &qeth_osn_devtype :
5720                                                 card->discipline->devtype;
5721                 rc = card->discipline->setup(card->gdev);
5722                 if (rc)
5723                         goto err_disc;
5724                 break;
5725         }
5726
5727         return 0;
5728
5729 err_disc:
5730         qeth_core_free_discipline(card);
5731 err_load:
5732 err_chp_desc:
5733         free_netdev(card->dev);
5734 err_card:
5735         qeth_core_free_card(card);
5736 err_dev:
5737         put_device(dev);
5738         return rc;
5739 }
5740
5741 static void qeth_core_remove_device(struct ccwgroup_device *gdev)
5742 {
5743         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5744
5745         QETH_CARD_TEXT(card, 2, "removedv");
5746
5747         if (card->discipline) {
5748                 card->discipline->remove(gdev);
5749                 qeth_core_free_discipline(card);
5750         }
5751
5752         free_netdev(card->dev);
5753         qeth_core_free_card(card);
5754         put_device(&gdev->dev);
5755 }
5756
5757 static int qeth_core_set_online(struct ccwgroup_device *gdev)
5758 {
5759         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5760         int rc = 0;
5761         enum qeth_discipline_id def_discipline;
5762
5763         if (!card->discipline) {
5764                 def_discipline = IS_IQD(card) ? QETH_DISCIPLINE_LAYER3 :
5765                                                 QETH_DISCIPLINE_LAYER2;
5766                 rc = qeth_core_load_discipline(card, def_discipline);
5767                 if (rc)
5768                         goto err;
5769                 rc = card->discipline->setup(card->gdev);
5770                 if (rc) {
5771                         qeth_core_free_discipline(card);
5772                         goto err;
5773                 }
5774         }
5775         rc = card->discipline->set_online(gdev);
5776 err:
5777         return rc;
5778 }
5779
5780 static int qeth_core_set_offline(struct ccwgroup_device *gdev)
5781 {
5782         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5783         return card->discipline->set_offline(gdev);
5784 }
5785
5786 static void qeth_core_shutdown(struct ccwgroup_device *gdev)
5787 {
5788         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5789         qeth_set_allowed_threads(card, 0, 1);
5790         if ((gdev->state == CCWGROUP_ONLINE) && card->info.hwtrap)
5791                 qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
5792         qeth_qdio_clear_card(card, 0);
5793         qeth_drain_output_queues(card);
5794         qdio_free(CARD_DDEV(card));
5795 }
5796
5797 static int qeth_suspend(struct ccwgroup_device *gdev)
5798 {
5799         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5800
5801         qeth_set_allowed_threads(card, 0, 1);
5802         wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0);
5803         if (gdev->state == CCWGROUP_OFFLINE)
5804                 return 0;
5805
5806         card->discipline->set_offline(gdev);
5807         return 0;
5808 }
5809
5810 static int qeth_resume(struct ccwgroup_device *gdev)
5811 {
5812         struct qeth_card *card = dev_get_drvdata(&gdev->dev);
5813         int rc;
5814
5815         rc = card->discipline->set_online(gdev);
5816
5817         qeth_set_allowed_threads(card, 0xffffffff, 0);
5818         if (rc)
5819                 dev_warn(&card->gdev->dev, "The qeth device driver failed to recover an error on the device\n");
5820         return rc;
5821 }
5822
5823 static ssize_t group_store(struct device_driver *ddrv, const char *buf,
5824                            size_t count)
5825 {
5826         int err;
5827
5828         err = ccwgroup_create_dev(qeth_core_root_dev, to_ccwgroupdrv(ddrv), 3,
5829                                   buf);
5830
5831         return err ? err : count;
5832 }
5833 static DRIVER_ATTR_WO(group);
5834
5835 static struct attribute *qeth_drv_attrs[] = {
5836         &driver_attr_group.attr,
5837         NULL,
5838 };
5839 static struct attribute_group qeth_drv_attr_group = {
5840         .attrs = qeth_drv_attrs,
5841 };
5842 static const struct attribute_group *qeth_drv_attr_groups[] = {
5843         &qeth_drv_attr_group,
5844         NULL,
5845 };
5846
5847 static struct ccwgroup_driver qeth_core_ccwgroup_driver = {
5848         .driver = {
5849                 .groups = qeth_drv_attr_groups,
5850                 .owner = THIS_MODULE,
5851                 .name = "qeth",
5852         },
5853         .ccw_driver = &qeth_ccw_driver,
5854         .setup = qeth_core_probe_device,
5855         .remove = qeth_core_remove_device,
5856         .set_online = qeth_core_set_online,
5857         .set_offline = qeth_core_set_offline,
5858         .shutdown = qeth_core_shutdown,
5859         .prepare = NULL,
5860         .complete = NULL,
5861         .freeze = qeth_suspend,
5862         .thaw = qeth_resume,
5863         .restore = qeth_resume,
5864 };
5865
5866 struct qeth_card *qeth_get_card_by_busid(char *bus_id)
5867 {
5868         struct ccwgroup_device *gdev;
5869         struct qeth_card *card;
5870
5871         gdev = get_ccwgroupdev_by_busid(&qeth_core_ccwgroup_driver, bus_id);
5872         if (!gdev)
5873                 return NULL;
5874
5875         card = dev_get_drvdata(&gdev->dev);
5876         put_device(&gdev->dev);
5877         return card;
5878 }
5879 EXPORT_SYMBOL_GPL(qeth_get_card_by_busid);
5880
5881 int qeth_do_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
5882 {
5883         struct qeth_card *card = dev->ml_priv;
5884         struct mii_ioctl_data *mii_data;
5885         int rc = 0;
5886
5887         if (!card)
5888                 return -ENODEV;
5889
5890         switch (cmd) {
5891         case SIOC_QETH_ADP_SET_SNMP_CONTROL:
5892                 rc = qeth_snmp_command(card, rq->ifr_ifru.ifru_data);
5893                 break;
5894         case SIOC_QETH_GET_CARD_TYPE:
5895                 if ((IS_OSD(card) || IS_OSM(card) || IS_OSX(card)) &&
5896                     !IS_VM_NIC(card))
5897                         return 1;
5898                 return 0;
5899         case SIOCGMIIPHY:
5900                 mii_data = if_mii(rq);
5901                 mii_data->phy_id = 0;
5902                 break;
5903         case SIOCGMIIREG:
5904                 mii_data = if_mii(rq);
5905                 if (mii_data->phy_id != 0)
5906                         rc = -EINVAL;
5907                 else
5908                         mii_data->val_out = qeth_mdio_read(dev,
5909                                 mii_data->phy_id, mii_data->reg_num);
5910                 break;
5911         case SIOC_QETH_QUERY_OAT:
5912                 rc = qeth_query_oat_command(card, rq->ifr_ifru.ifru_data);
5913                 break;
5914         default:
5915                 if (card->discipline->do_ioctl)
5916                         rc = card->discipline->do_ioctl(dev, rq, cmd);
5917                 else
5918                         rc = -EOPNOTSUPP;
5919         }
5920         if (rc)
5921                 QETH_CARD_TEXT_(card, 2, "ioce%x", rc);
5922         return rc;
5923 }
5924 EXPORT_SYMBOL_GPL(qeth_do_ioctl);
5925
5926 static int qeth_start_csum_cb(struct qeth_card *card, struct qeth_reply *reply,
5927                               unsigned long data)
5928 {
5929         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
5930         u32 *features = reply->param;
5931
5932         if (qeth_setassparms_inspect_rc(cmd))
5933                 return -EIO;
5934
5935         *features = cmd->data.setassparms.data.flags_32bit;
5936         return 0;
5937 }
5938
5939 static int qeth_set_csum_off(struct qeth_card *card, enum qeth_ipa_funcs cstype,
5940                              enum qeth_prot_versions prot)
5941 {
5942         return qeth_send_simple_setassparms_prot(card, cstype, IPA_CMD_ASS_STOP,
5943                                                  NULL, prot);
5944 }
5945
5946 static int qeth_set_csum_on(struct qeth_card *card, enum qeth_ipa_funcs cstype,
5947                             enum qeth_prot_versions prot)
5948 {
5949         u32 required_features = QETH_IPA_CHECKSUM_UDP | QETH_IPA_CHECKSUM_TCP;
5950         struct qeth_cmd_buffer *iob;
5951         struct qeth_ipa_caps caps;
5952         u32 features;
5953         int rc;
5954
5955         /* some L3 HW requires combined L3+L4 csum offload: */
5956         if (IS_LAYER3(card) && prot == QETH_PROT_IPV4 &&
5957             cstype == IPA_OUTBOUND_CHECKSUM)
5958                 required_features |= QETH_IPA_CHECKSUM_IP_HDR;
5959
5960         iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_START, 0,
5961                                        prot);
5962         if (!iob)
5963                 return -ENOMEM;
5964
5965         rc = qeth_send_ipa_cmd(card, iob, qeth_start_csum_cb, &features);
5966         if (rc)
5967                 return rc;
5968
5969         if ((required_features & features) != required_features) {
5970                 qeth_set_csum_off(card, cstype, prot);
5971                 return -EOPNOTSUPP;
5972         }
5973
5974         iob = qeth_get_setassparms_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
5975                                        SETASS_DATA_SIZEOF(flags_32bit),
5976                                        prot);
5977         if (!iob) {
5978                 qeth_set_csum_off(card, cstype, prot);
5979                 return -ENOMEM;
5980         }
5981
5982         if (features & QETH_IPA_CHECKSUM_LP2LP)
5983                 required_features |= QETH_IPA_CHECKSUM_LP2LP;
5984         __ipa_cmd(iob)->data.setassparms.data.flags_32bit = required_features;
5985         rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
5986         if (rc) {
5987                 qeth_set_csum_off(card, cstype, prot);
5988                 return rc;
5989         }
5990
5991         if (!qeth_ipa_caps_supported(&caps, required_features) ||
5992             !qeth_ipa_caps_enabled(&caps, required_features)) {
5993                 qeth_set_csum_off(card, cstype, prot);
5994                 return -EOPNOTSUPP;
5995         }
5996
5997         dev_info(&card->gdev->dev, "HW Checksumming (%sbound IPv%d) enabled\n",
5998                  cstype == IPA_INBOUND_CHECKSUM ? "in" : "out", prot);
5999         if (!qeth_ipa_caps_enabled(&caps, QETH_IPA_CHECKSUM_LP2LP) &&
6000             cstype == IPA_OUTBOUND_CHECKSUM)
6001                 dev_warn(&card->gdev->dev,
6002                          "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
6003                          QETH_CARD_IFNAME(card));
6004         return 0;
6005 }
6006
6007 static int qeth_set_ipa_csum(struct qeth_card *card, bool on, int cstype,
6008                              enum qeth_prot_versions prot)
6009 {
6010         return on ? qeth_set_csum_on(card, cstype, prot) :
6011                     qeth_set_csum_off(card, cstype, prot);
6012 }
6013
6014 static int qeth_start_tso_cb(struct qeth_card *card, struct qeth_reply *reply,
6015                              unsigned long data)
6016 {
6017         struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
6018         struct qeth_tso_start_data *tso_data = reply->param;
6019
6020         if (qeth_setassparms_inspect_rc(cmd))
6021                 return -EIO;
6022
6023         tso_data->mss = cmd->data.setassparms.data.tso.mss;
6024         tso_data->supported = cmd->data.setassparms.data.tso.supported;
6025         return 0;
6026 }
6027
6028 static int qeth_set_tso_off(struct qeth_card *card,
6029                             enum qeth_prot_versions prot)
6030 {
6031         return qeth_send_simple_setassparms_prot(card, IPA_OUTBOUND_TSO,
6032                                                  IPA_CMD_ASS_STOP, NULL, prot);
6033 }
6034
6035 static int qeth_set_tso_on(struct qeth_card *card,
6036                            enum qeth_prot_versions prot)
6037 {
6038         struct qeth_tso_start_data tso_data;
6039         struct qeth_cmd_buffer *iob;
6040         struct qeth_ipa_caps caps;
6041         int rc;
6042
6043         iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6044                                        IPA_CMD_ASS_START, 0, prot);
6045         if (!iob)
6046                 return -ENOMEM;
6047
6048         rc = qeth_send_ipa_cmd(card, iob, qeth_start_tso_cb, &tso_data);
6049         if (rc)
6050                 return rc;
6051
6052         if (!tso_data.mss || !(tso_data.supported & QETH_IPA_LARGE_SEND_TCP)) {
6053                 qeth_set_tso_off(card, prot);
6054                 return -EOPNOTSUPP;
6055         }
6056
6057         iob = qeth_get_setassparms_cmd(card, IPA_OUTBOUND_TSO,
6058                                        IPA_CMD_ASS_ENABLE,
6059                                        SETASS_DATA_SIZEOF(caps), prot);
6060         if (!iob) {
6061                 qeth_set_tso_off(card, prot);
6062                 return -ENOMEM;
6063         }
6064
6065         /* enable TSO capability */
6066         __ipa_cmd(iob)->data.setassparms.data.caps.enabled =
6067                 QETH_IPA_LARGE_SEND_TCP;
6068         rc = qeth_send_ipa_cmd(card, iob, qeth_setassparms_get_caps_cb, &caps);
6069         if (rc) {
6070                 qeth_set_tso_off(card, prot);
6071                 return rc;
6072         }
6073
6074         if (!qeth_ipa_caps_supported(&caps, QETH_IPA_LARGE_SEND_TCP) ||
6075             !qeth_ipa_caps_enabled(&caps, QETH_IPA_LARGE_SEND_TCP)) {
6076                 qeth_set_tso_off(card, prot);
6077                 return -EOPNOTSUPP;
6078         }
6079
6080         dev_info(&card->gdev->dev, "TSOv%u enabled (MSS: %u)\n", prot,
6081                  tso_data.mss);
6082         return 0;
6083 }
6084
6085 static int qeth_set_ipa_tso(struct qeth_card *card, bool on,
6086                             enum qeth_prot_versions prot)
6087 {
6088         return on ? qeth_set_tso_on(card, prot) : qeth_set_tso_off(card, prot);
6089 }
6090
6091 static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
6092 {
6093         int rc_ipv4 = (on) ? -EOPNOTSUPP : 0;
6094         int rc_ipv6;
6095
6096         if (qeth_is_supported(card, IPA_INBOUND_CHECKSUM))
6097                 rc_ipv4 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6098                                             QETH_PROT_IPV4);
6099         if (!qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6100                 /* no/one Offload Assist available, so the rc is trivial */
6101                 return rc_ipv4;
6102
6103         rc_ipv6 = qeth_set_ipa_csum(card, on, IPA_INBOUND_CHECKSUM,
6104                                     QETH_PROT_IPV6);
6105
6106         if (on)
6107                 /* enable: success if any Assist is active */
6108                 return (rc_ipv6) ? rc_ipv4 : 0;
6109
6110         /* disable: failure if any Assist is still active */
6111         return (rc_ipv6) ? rc_ipv6 : rc_ipv4;
6112 }
6113
6114 /**
6115  * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
6116  * @dev:        a net_device
6117  */
6118 void qeth_enable_hw_features(struct net_device *dev)
6119 {
6120         struct qeth_card *card = dev->ml_priv;
6121         netdev_features_t features;
6122
6123         features = dev->features;
6124         /* force-off any feature that might need an IPA sequence.
6125          * netdev_update_features() will restart them.
6126          */
6127         dev->features &= ~dev->hw_features;
6128         /* toggle VLAN filter, so that VIDs are re-programmed: */
6129         if (IS_LAYER2(card) && IS_VM_NIC(card)) {
6130                 dev->features &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
6131                 dev->wanted_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
6132         }
6133         netdev_update_features(dev);
6134         if (features != dev->features)
6135                 dev_warn(&card->gdev->dev,
6136                          "Device recovery failed to restore all offload features\n");
6137 }
6138 EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
6139
6140 int qeth_set_features(struct net_device *dev, netdev_features_t features)
6141 {
6142         struct qeth_card *card = dev->ml_priv;
6143         netdev_features_t changed = dev->features ^ features;
6144         int rc = 0;
6145
6146         QETH_CARD_TEXT(card, 2, "setfeat");
6147         QETH_CARD_HEX(card, 2, &features, sizeof(features));
6148
6149         if ((changed & NETIF_F_IP_CSUM)) {
6150                 rc = qeth_set_ipa_csum(card, features & NETIF_F_IP_CSUM,
6151                                        IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV4);
6152                 if (rc)
6153                         changed ^= NETIF_F_IP_CSUM;
6154         }
6155         if (changed & NETIF_F_IPV6_CSUM) {
6156                 rc = qeth_set_ipa_csum(card, features & NETIF_F_IPV6_CSUM,
6157                                        IPA_OUTBOUND_CHECKSUM, QETH_PROT_IPV6);
6158                 if (rc)
6159                         changed ^= NETIF_F_IPV6_CSUM;
6160         }
6161         if (changed & NETIF_F_RXCSUM) {
6162                 rc = qeth_set_ipa_rx_csum(card, features & NETIF_F_RXCSUM);
6163                 if (rc)
6164                         changed ^= NETIF_F_RXCSUM;
6165         }
6166         if (changed & NETIF_F_TSO) {
6167                 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO,
6168                                       QETH_PROT_IPV4);
6169                 if (rc)
6170                         changed ^= NETIF_F_TSO;
6171         }
6172         if (changed & NETIF_F_TSO6) {
6173                 rc = qeth_set_ipa_tso(card, features & NETIF_F_TSO6,
6174                                       QETH_PROT_IPV6);
6175                 if (rc)
6176                         changed ^= NETIF_F_TSO6;
6177         }
6178
6179         /* everything changed successfully? */
6180         if ((dev->features ^ features) == changed)
6181                 return 0;
6182         /* something went wrong. save changed features and return error */
6183         dev->features ^= changed;
6184         return -EIO;
6185 }
6186 EXPORT_SYMBOL_GPL(qeth_set_features);
6187
6188 netdev_features_t qeth_fix_features(struct net_device *dev,
6189                                     netdev_features_t features)
6190 {
6191         struct qeth_card *card = dev->ml_priv;
6192
6193         QETH_CARD_TEXT(card, 2, "fixfeat");
6194         if (!qeth_is_supported(card, IPA_OUTBOUND_CHECKSUM))
6195                 features &= ~NETIF_F_IP_CSUM;
6196         if (!qeth_is_supported6(card, IPA_OUTBOUND_CHECKSUM_V6))
6197                 features &= ~NETIF_F_IPV6_CSUM;
6198         if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM) &&
6199             !qeth_is_supported6(card, IPA_INBOUND_CHECKSUM_V6))
6200                 features &= ~NETIF_F_RXCSUM;
6201         if (!qeth_is_supported(card, IPA_OUTBOUND_TSO))
6202                 features &= ~NETIF_F_TSO;
6203         if (!qeth_is_supported6(card, IPA_OUTBOUND_TSO))
6204                 features &= ~NETIF_F_TSO6;
6205
6206         QETH_CARD_HEX(card, 2, &features, sizeof(features));
6207         return features;
6208 }
6209 EXPORT_SYMBOL_GPL(qeth_fix_features);
6210
6211 netdev_features_t qeth_features_check(struct sk_buff *skb,
6212                                       struct net_device *dev,
6213                                       netdev_features_t features)
6214 {
6215         /* GSO segmentation builds skbs with
6216          *      a (small) linear part for the headers, and
6217          *      page frags for the data.
6218          * Compared to a linear skb, the header-only part consumes an
6219          * additional buffer element. This reduces buffer utilization, and
6220          * hurts throughput. So compress small segments into one element.
6221          */
6222         if (netif_needs_gso(skb, features)) {
6223                 /* match skb_segment(): */
6224                 unsigned int doffset = skb->data - skb_mac_header(skb);
6225                 unsigned int hsize = skb_shinfo(skb)->gso_size;
6226                 unsigned int hroom = skb_headroom(skb);
6227
6228                 /* linearize only if resulting skb allocations are order-0: */
6229                 if (SKB_DATA_ALIGN(hroom + doffset + hsize) <= SKB_MAX_HEAD(0))
6230                         features &= ~NETIF_F_SG;
6231         }
6232
6233         return vlan_features_check(skb, features);
6234 }
6235 EXPORT_SYMBOL_GPL(qeth_features_check);
6236
6237 void qeth_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
6238 {
6239         struct qeth_card *card = dev->ml_priv;
6240         struct qeth_qdio_out_q *queue;
6241         unsigned int i;
6242
6243         QETH_CARD_TEXT(card, 5, "getstat");
6244
6245         stats->rx_packets = card->stats.rx_packets;
6246         stats->rx_bytes = card->stats.rx_bytes;
6247         stats->rx_errors = card->stats.rx_errors;
6248         stats->rx_dropped = card->stats.rx_dropped;
6249         stats->multicast = card->stats.rx_multicast;
6250
6251         for (i = 0; i < card->qdio.no_out_queues; i++) {
6252                 queue = card->qdio.out_qs[i];
6253
6254                 stats->tx_packets += queue->stats.tx_packets;
6255                 stats->tx_bytes += queue->stats.tx_bytes;
6256                 stats->tx_errors += queue->stats.tx_errors;
6257                 stats->tx_dropped += queue->stats.tx_dropped;
6258         }
6259 }
6260 EXPORT_SYMBOL_GPL(qeth_get_stats64);
6261
6262 u16 qeth_iqd_select_queue(struct net_device *dev, struct sk_buff *skb,
6263                           u8 cast_type, struct net_device *sb_dev)
6264 {
6265         if (cast_type != RTN_UNICAST)
6266                 return QETH_IQD_MCAST_TXQ;
6267         return QETH_IQD_MIN_UCAST_TXQ;
6268 }
6269 EXPORT_SYMBOL_GPL(qeth_iqd_select_queue);
6270
6271 int qeth_open(struct net_device *dev)
6272 {
6273         struct qeth_card *card = dev->ml_priv;
6274
6275         QETH_CARD_TEXT(card, 4, "qethopen");
6276
6277         if (qdio_stop_irq(CARD_DDEV(card), 0) < 0)
6278                 return -EIO;
6279
6280         card->data.state = CH_STATE_UP;
6281         netif_tx_start_all_queues(dev);
6282
6283         napi_enable(&card->napi);
6284         local_bh_disable();
6285         napi_schedule(&card->napi);
6286         if (IS_IQD(card)) {
6287                 struct qeth_qdio_out_q *queue;
6288                 unsigned int i;
6289
6290                 qeth_for_each_output_queue(card, queue, i) {
6291                         netif_tx_napi_add(dev, &queue->napi, qeth_tx_poll,
6292                                           QETH_NAPI_WEIGHT);
6293                         napi_enable(&queue->napi);
6294                         napi_schedule(&queue->napi);
6295                 }
6296         }
6297         /* kick-start the NAPI softirq: */
6298         local_bh_enable();
6299         return 0;
6300 }
6301 EXPORT_SYMBOL_GPL(qeth_open);
6302
6303 int qeth_stop(struct net_device *dev)
6304 {
6305         struct qeth_card *card = dev->ml_priv;
6306
6307         QETH_CARD_TEXT(card, 4, "qethstop");
6308         if (IS_IQD(card)) {
6309                 struct qeth_qdio_out_q *queue;
6310                 unsigned int i;
6311
6312                 /* Quiesce the NAPI instances: */
6313                 qeth_for_each_output_queue(card, queue, i) {
6314                         napi_disable(&queue->napi);
6315                         del_timer_sync(&queue->timer);
6316                 }
6317
6318                 /* Stop .ndo_start_xmit, might still access queue->napi. */
6319                 netif_tx_disable(dev);
6320
6321                 /* Queues may get re-allocated, so remove the NAPIs here. */
6322                 qeth_for_each_output_queue(card, queue, i)
6323                         netif_napi_del(&queue->napi);
6324         } else {
6325                 netif_tx_disable(dev);
6326         }
6327
6328         napi_disable(&card->napi);
6329         return 0;
6330 }
6331 EXPORT_SYMBOL_GPL(qeth_stop);
6332
6333 static int __init qeth_core_init(void)
6334 {
6335         int rc;
6336
6337         pr_info("loading core functions\n");
6338
6339         rc = qeth_register_dbf_views();
6340         if (rc)
6341                 goto dbf_err;
6342         qeth_core_root_dev = root_device_register("qeth");
6343         rc = PTR_ERR_OR_ZERO(qeth_core_root_dev);
6344         if (rc)
6345                 goto register_err;
6346         qeth_core_header_cache =
6347                 kmem_cache_create("qeth_hdr", QETH_HDR_CACHE_OBJ_SIZE,
6348                                   roundup_pow_of_two(QETH_HDR_CACHE_OBJ_SIZE),
6349                                   0, NULL);
6350         if (!qeth_core_header_cache) {
6351                 rc = -ENOMEM;
6352                 goto slab_err;
6353         }
6354         qeth_qdio_outbuf_cache = kmem_cache_create("qeth_buf",
6355                         sizeof(struct qeth_qdio_out_buffer), 0, 0, NULL);
6356         if (!qeth_qdio_outbuf_cache) {
6357                 rc = -ENOMEM;
6358                 goto cqslab_err;
6359         }
6360         rc = ccw_driver_register(&qeth_ccw_driver);
6361         if (rc)
6362                 goto ccw_err;
6363         rc = ccwgroup_driver_register(&qeth_core_ccwgroup_driver);
6364         if (rc)
6365                 goto ccwgroup_err;
6366
6367         return 0;
6368
6369 ccwgroup_err:
6370         ccw_driver_unregister(&qeth_ccw_driver);
6371 ccw_err:
6372         kmem_cache_destroy(qeth_qdio_outbuf_cache);
6373 cqslab_err:
6374         kmem_cache_destroy(qeth_core_header_cache);
6375 slab_err:
6376         root_device_unregister(qeth_core_root_dev);
6377 register_err:
6378         qeth_unregister_dbf_views();
6379 dbf_err:
6380         pr_err("Initializing the qeth device driver failed\n");
6381         return rc;
6382 }
6383
6384 static void __exit qeth_core_exit(void)
6385 {
6386         qeth_clear_dbf_list();
6387         ccwgroup_driver_unregister(&qeth_core_ccwgroup_driver);
6388         ccw_driver_unregister(&qeth_ccw_driver);
6389         kmem_cache_destroy(qeth_qdio_outbuf_cache);
6390         kmem_cache_destroy(qeth_core_header_cache);
6391         root_device_unregister(qeth_core_root_dev);
6392         qeth_unregister_dbf_views();
6393         pr_info("core functions removed\n");
6394 }
6395
6396 module_init(qeth_core_init);
6397 module_exit(qeth_core_exit);
6398 MODULE_AUTHOR("Frank Blaschka <frank.blaschka@de.ibm.com>");
6399 MODULE_DESCRIPTION("qeth core functions");
6400 MODULE_LICENSE("GPL");