GNU Linux-libre 4.9.315-gnu1
[releases.git] / drivers / net / ethernet / cavium / liquidio / lio_main.c
1 /**********************************************************************
2 * Author: Cavium, Inc.
3 *
4 * Contact: support@cavium.com
5 *          Please include "LiquidIO" in the subject.
6 *
7 * Copyright (c) 2003-2015 Cavium, Inc.
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT.  See the GNU General Public License for more
17 * details.
18 *
19 * This file may also be available under a different license from Cavium.
20 * Contact Cavium, Inc. for more information
21 **********************************************************************/
22 #include <linux/version.h>
23 #include <linux/pci.h>
24 #include <linux/firmware.h>
25 #include <linux/ptp_clock_kernel.h>
26 #include <net/vxlan.h>
27 #include <linux/kthread.h>
28 #include "liquidio_common.h"
29 #include "octeon_droq.h"
30 #include "octeon_iq.h"
31 #include "response_manager.h"
32 #include "octeon_device.h"
33 #include "octeon_nic.h"
34 #include "octeon_main.h"
35 #include "octeon_network.h"
36 #include "cn66xx_regs.h"
37 #include "cn66xx_device.h"
38 #include "cn68xx_device.h"
39 #include "cn23xx_pf_device.h"
40 #include "liquidio_image.h"
41
42 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
43 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
44 MODULE_LICENSE("GPL");
45 MODULE_VERSION(LIQUIDIO_VERSION);
46 /*(DEBLOBBED)*/
47
48 static int ddr_timeout = 10000;
49 module_param(ddr_timeout, int, 0644);
50 MODULE_PARM_DESC(ddr_timeout,
51                  "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
52
53 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
54
55 #define INCR_INSTRQUEUE_PKT_COUNT(octeon_dev_ptr, iq_no, field, count)  \
56         (octeon_dev_ptr->instr_queue[iq_no]->stats.field += count)
57
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
61
62 static char fw_type[LIO_MAX_FW_TYPE_LEN];
63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0000);
64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\"");
65
66 static int conf_type;
67 module_param(conf_type, int, 0);
68 MODULE_PARM_DESC(conf_type, "select octeon configuration 0 default 1 ovs");
69
70 static int ptp_enable = 1;
71
72 /* Bit mask values for lio->ifstate */
73 #define   LIO_IFSTATE_DROQ_OPS             0x01
74 #define   LIO_IFSTATE_REGISTERED           0x02
75 #define   LIO_IFSTATE_RUNNING              0x04
76 #define   LIO_IFSTATE_RX_TIMESTAMP_ENABLED 0x08
77
78 /* Polling interval for determining when NIC application is alive */
79 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
80
81 /* runtime link query interval */
82 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
83
84 struct liquidio_if_cfg_context {
85         int octeon_id;
86
87         wait_queue_head_t wc;
88
89         int cond;
90 };
91
92 struct liquidio_if_cfg_resp {
93         u64 rh;
94         struct liquidio_if_cfg_info cfg_info;
95         u64 status;
96 };
97
98 struct liquidio_rx_ctl_context {
99         int octeon_id;
100
101         wait_queue_head_t wc;
102
103         int cond;
104 };
105
106 struct oct_link_status_resp {
107         u64 rh;
108         struct oct_link_info link_info;
109         u64 status;
110 };
111
112 struct oct_timestamp_resp {
113         u64 rh;
114         u64 timestamp;
115         u64 status;
116 };
117
118 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
119
120 union tx_info {
121         u64 u64;
122         struct {
123 #ifdef __BIG_ENDIAN_BITFIELD
124                 u16 gso_size;
125                 u16 gso_segs;
126                 u32 reserved;
127 #else
128                 u32 reserved;
129                 u16 gso_segs;
130                 u16 gso_size;
131 #endif
132         } s;
133 };
134
135 /** Octeon device properties to be used by the NIC module.
136  * Each octeon device in the system will be represented
137  * by this structure in the NIC module.
138  */
139
140 #define OCTNIC_MAX_SG  (MAX_SKB_FRAGS)
141
142 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
143 #define OCTNIC_GSO_MAX_SIZE                                                    \
144         (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
145
146 /** Structure of a node in list of gather components maintained by
147  * NIC driver for each network device.
148  */
149 struct octnic_gather {
150         /** List manipulation. Next and prev pointers. */
151         struct list_head list;
152
153         /** Size of the gather component at sg in bytes. */
154         int sg_size;
155
156         /** Number of bytes that sg was adjusted to make it 8B-aligned. */
157         int adjust;
158
159         /** Gather component that can accommodate max sized fragment list
160          *  received from the IP layer.
161          */
162         struct octeon_sg_entry *sg;
163
164         u64 sg_dma_ptr;
165 };
166
167 struct handshake {
168         struct completion init;
169         struct completion started;
170         struct pci_dev *pci_dev;
171         int init_ok;
172         int started_ok;
173 };
174
175 struct octeon_device_priv {
176         /** Tasklet structures for this device. */
177         struct tasklet_struct droq_tasklet;
178         unsigned long napi_mask;
179 };
180
181 static int octeon_device_init(struct octeon_device *);
182 static int liquidio_stop(struct net_device *netdev);
183 static void liquidio_remove(struct pci_dev *pdev);
184 static int liquidio_probe(struct pci_dev *pdev,
185                           const struct pci_device_id *ent);
186
187 static struct handshake handshake[MAX_OCTEON_DEVICES];
188 static struct completion first_stage;
189
190 static void octeon_droq_bh(unsigned long pdev)
191 {
192         int q_no;
193         int reschedule = 0;
194         struct octeon_device *oct = (struct octeon_device *)pdev;
195         struct octeon_device_priv *oct_priv =
196                 (struct octeon_device_priv *)oct->priv;
197
198         /* for (q_no = 0; q_no < oct->num_oqs; q_no++) { */
199         for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
200                 if (!(oct->io_qmask.oq & (1ULL << q_no)))
201                         continue;
202                 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
203                                                           MAX_PACKET_BUDGET);
204                 lio_enable_irq(oct->droq[q_no], NULL);
205
206                 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
207                         /* set time and cnt interrupt thresholds for this DROQ
208                          * for NAPI
209                          */
210                         int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
211
212                         octeon_write_csr64(
213                             oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
214                             0x5700000040ULL);
215                         octeon_write_csr64(
216                             oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
217                 }
218         }
219
220         if (reschedule)
221                 tasklet_schedule(&oct_priv->droq_tasklet);
222 }
223
224 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
225 {
226         struct octeon_device_priv *oct_priv =
227                 (struct octeon_device_priv *)oct->priv;
228         int retry = 100, pkt_cnt = 0, pending_pkts = 0;
229         int i;
230
231         do {
232                 pending_pkts = 0;
233
234                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
235                         if (!(oct->io_qmask.oq & (1ULL << i)))
236                                 continue;
237                         pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
238                 }
239                 if (pkt_cnt > 0) {
240                         pending_pkts += pkt_cnt;
241                         tasklet_schedule(&oct_priv->droq_tasklet);
242                 }
243                 pkt_cnt = 0;
244                 schedule_timeout_uninterruptible(1);
245
246         } while (retry-- && pending_pkts);
247
248         return pkt_cnt;
249 }
250
251 /**
252  * \brief Forces all IO queues off on a given device
253  * @param oct Pointer to Octeon device
254  */
255 static void force_io_queues_off(struct octeon_device *oct)
256 {
257         if ((oct->chip_id == OCTEON_CN66XX) ||
258             (oct->chip_id == OCTEON_CN68XX)) {
259                 /* Reset the Enable bits for Input Queues. */
260                 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
261
262                 /* Reset the Enable bits for Output Queues. */
263                 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
264         }
265 }
266
267 /**
268  * \brief wait for all pending requests to complete
269  * @param oct Pointer to Octeon device
270  *
271  * Called during shutdown sequence
272  */
273 static int wait_for_pending_requests(struct octeon_device *oct)
274 {
275         int i, pcount = 0;
276
277         for (i = 0; i < 100; i++) {
278                 pcount =
279                         atomic_read(&oct->response_list
280                                 [OCTEON_ORDERED_SC_LIST].pending_req_count);
281                 if (pcount)
282                         schedule_timeout_uninterruptible(HZ / 10);
283                 else
284                         break;
285         }
286
287         if (pcount)
288                 return 1;
289
290         return 0;
291 }
292
293 /**
294  * \brief Cause device to go quiet so it can be safely removed/reset/etc
295  * @param oct Pointer to Octeon device
296  */
297 static inline void pcierror_quiesce_device(struct octeon_device *oct)
298 {
299         int i;
300
301         /* Disable the input and output queues now. No more packets will
302          * arrive from Octeon, but we should wait for all packet processing
303          * to finish.
304          */
305         force_io_queues_off(oct);
306
307         /* To allow for in-flight requests */
308         schedule_timeout_uninterruptible(100);
309
310         if (wait_for_pending_requests(oct))
311                 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
312
313         /* Force all requests waiting to be fetched by OCTEON to complete. */
314         for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
315                 struct octeon_instr_queue *iq;
316
317                 if (!(oct->io_qmask.iq & (1ULL << i)))
318                         continue;
319                 iq = oct->instr_queue[i];
320
321                 if (atomic_read(&iq->instr_pending)) {
322                         spin_lock_bh(&iq->lock);
323                         iq->fill_cnt = 0;
324                         iq->octeon_read_index = iq->host_write_index;
325                         iq->stats.instr_processed +=
326                                 atomic_read(&iq->instr_pending);
327                         lio_process_iq_request_list(oct, iq, 0);
328                         spin_unlock_bh(&iq->lock);
329                 }
330         }
331
332         /* Force all pending ordered list requests to time out. */
333         lio_process_ordered_list(oct, 1);
334
335         /* We do not need to wait for output queue packets to be processed. */
336 }
337
338 /**
339  * \brief Cleanup PCI AER uncorrectable error status
340  * @param dev Pointer to PCI device
341  */
342 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
343 {
344         int pos = 0x100;
345         u32 status, mask;
346
347         pr_info("%s :\n", __func__);
348
349         pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
350         pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
351         if (dev->error_state == pci_channel_io_normal)
352                 status &= ~mask;        /* Clear corresponding nonfatal bits */
353         else
354                 status &= mask;         /* Clear corresponding fatal bits */
355         pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
356 }
357
358 /**
359  * \brief Stop all PCI IO to a given device
360  * @param dev Pointer to Octeon device
361  */
362 static void stop_pci_io(struct octeon_device *oct)
363 {
364         /* No more instructions will be forwarded. */
365         atomic_set(&oct->status, OCT_DEV_IN_RESET);
366
367         pci_disable_device(oct->pci_dev);
368
369         /* Disable interrupts  */
370         oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
371
372         pcierror_quiesce_device(oct);
373
374         /* Release the interrupt line */
375         free_irq(oct->pci_dev->irq, oct);
376
377         if (oct->flags & LIO_FLAG_MSI_ENABLED)
378                 pci_disable_msi(oct->pci_dev);
379
380         dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
381                 lio_get_state_string(&oct->status));
382
383         /* cn63xx_cleanup_aer_uncorrect_error_status(oct->pci_dev); */
384         /* making it a common function for all OCTEON models */
385         cleanup_aer_uncorrect_error_status(oct->pci_dev);
386 }
387
388 /**
389  * \brief called when PCI error is detected
390  * @param pdev Pointer to PCI device
391  * @param state The current pci connection state
392  *
393  * This function is called after a PCI bus error affecting
394  * this device has been detected.
395  */
396 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
397                                                      pci_channel_state_t state)
398 {
399         struct octeon_device *oct = pci_get_drvdata(pdev);
400
401         /* Non-correctable Non-fatal errors */
402         if (state == pci_channel_io_normal) {
403                 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
404                 cleanup_aer_uncorrect_error_status(oct->pci_dev);
405                 return PCI_ERS_RESULT_CAN_RECOVER;
406         }
407
408         /* Non-correctable Fatal errors */
409         dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
410         stop_pci_io(oct);
411
412         /* Always return a DISCONNECT. There is no support for recovery but only
413          * for a clean shutdown.
414          */
415         return PCI_ERS_RESULT_DISCONNECT;
416 }
417
418 /**
419  * \brief mmio handler
420  * @param pdev Pointer to PCI device
421  */
422 static pci_ers_result_t liquidio_pcie_mmio_enabled(
423                                 struct pci_dev *pdev __attribute__((unused)))
424 {
425         /* We should never hit this since we never ask for a reset for a Fatal
426          * Error. We always return DISCONNECT in io_error above.
427          * But play safe and return RECOVERED for now.
428          */
429         return PCI_ERS_RESULT_RECOVERED;
430 }
431
432 /**
433  * \brief called after the pci bus has been reset.
434  * @param pdev Pointer to PCI device
435  *
436  * Restart the card from scratch, as if from a cold-boot. Implementation
437  * resembles the first-half of the octeon_resume routine.
438  */
439 static pci_ers_result_t liquidio_pcie_slot_reset(
440                                 struct pci_dev *pdev __attribute__((unused)))
441 {
442         /* We should never hit this since we never ask for a reset for a Fatal
443          * Error. We always return DISCONNECT in io_error above.
444          * But play safe and return RECOVERED for now.
445          */
446         return PCI_ERS_RESULT_RECOVERED;
447 }
448
449 /**
450  * \brief called when traffic can start flowing again.
451  * @param pdev Pointer to PCI device
452  *
453  * This callback is called when the error recovery driver tells us that
454  * its OK to resume normal operation. Implementation resembles the
455  * second-half of the octeon_resume routine.
456  */
457 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
458 {
459         /* Nothing to be done here. */
460 }
461
462 #ifdef CONFIG_PM
463 /**
464  * \brief called when suspending
465  * @param pdev Pointer to PCI device
466  * @param state state to suspend to
467  */
468 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
469                             pm_message_t state __attribute__((unused)))
470 {
471         return 0;
472 }
473
474 /**
475  * \brief called when resuming
476  * @param pdev Pointer to PCI device
477  */
478 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
479 {
480         return 0;
481 }
482 #endif
483
484 /* For PCI-E Advanced Error Recovery (AER) Interface */
485 static const struct pci_error_handlers liquidio_err_handler = {
486         .error_detected = liquidio_pcie_error_detected,
487         .mmio_enabled   = liquidio_pcie_mmio_enabled,
488         .slot_reset     = liquidio_pcie_slot_reset,
489         .resume         = liquidio_pcie_resume,
490 };
491
492 static const struct pci_device_id liquidio_pci_tbl[] = {
493         {       /* 68xx */
494                 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
495         },
496         {       /* 66xx */
497                 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
498         },
499         {       /* 23xx pf */
500                 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
501         },
502         {
503                 0, 0, 0, 0, 0, 0, 0
504         }
505 };
506 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
507
508 static struct pci_driver liquidio_pci_driver = {
509         .name           = "LiquidIO",
510         .id_table       = liquidio_pci_tbl,
511         .probe          = liquidio_probe,
512         .remove         = liquidio_remove,
513         .err_handler    = &liquidio_err_handler,    /* For AER */
514
515 #ifdef CONFIG_PM
516         .suspend        = liquidio_suspend,
517         .resume         = liquidio_resume,
518 #endif
519 };
520
521 /**
522  * \brief register PCI driver
523  */
524 static int liquidio_init_pci(void)
525 {
526         return pci_register_driver(&liquidio_pci_driver);
527 }
528
529 /**
530  * \brief unregister PCI driver
531  */
532 static void liquidio_deinit_pci(void)
533 {
534         pci_unregister_driver(&liquidio_pci_driver);
535 }
536
537 /**
538  * \brief check interface state
539  * @param lio per-network private data
540  * @param state_flag flag state to check
541  */
542 static inline int ifstate_check(struct lio *lio, int state_flag)
543 {
544         return atomic_read(&lio->ifstate) & state_flag;
545 }
546
547 /**
548  * \brief set interface state
549  * @param lio per-network private data
550  * @param state_flag flag state to set
551  */
552 static inline void ifstate_set(struct lio *lio, int state_flag)
553 {
554         atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) | state_flag));
555 }
556
557 /**
558  * \brief clear interface state
559  * @param lio per-network private data
560  * @param state_flag flag state to clear
561  */
562 static inline void ifstate_reset(struct lio *lio, int state_flag)
563 {
564         atomic_set(&lio->ifstate, (atomic_read(&lio->ifstate) & ~(state_flag)));
565 }
566
567 /**
568  * \brief Stop Tx queues
569  * @param netdev network device
570  */
571 static inline void txqs_stop(struct net_device *netdev)
572 {
573         if (netif_is_multiqueue(netdev)) {
574                 int i;
575
576                 for (i = 0; i < netdev->num_tx_queues; i++)
577                         netif_stop_subqueue(netdev, i);
578         } else {
579                 netif_stop_queue(netdev);
580         }
581 }
582
583 /**
584  * \brief Start Tx queues
585  * @param netdev network device
586  */
587 static inline void txqs_start(struct net_device *netdev)
588 {
589         if (netif_is_multiqueue(netdev)) {
590                 int i;
591
592                 for (i = 0; i < netdev->num_tx_queues; i++)
593                         netif_start_subqueue(netdev, i);
594         } else {
595                 netif_start_queue(netdev);
596         }
597 }
598
599 /**
600  * \brief Wake Tx queues
601  * @param netdev network device
602  */
603 static inline void txqs_wake(struct net_device *netdev)
604 {
605         struct lio *lio = GET_LIO(netdev);
606
607         if (netif_is_multiqueue(netdev)) {
608                 int i;
609
610                 for (i = 0; i < netdev->num_tx_queues; i++) {
611                         int qno = lio->linfo.txpciq[i %
612                                 (lio->linfo.num_txpciq)].s.q_no;
613
614                         if (__netif_subqueue_stopped(netdev, i)) {
615                                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
616                                                           tx_restart, 1);
617                                 netif_wake_subqueue(netdev, i);
618                         }
619                 }
620         } else {
621                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
622                                           tx_restart, 1);
623                 netif_wake_queue(netdev);
624         }
625 }
626
627 /**
628  * \brief Stop Tx queue
629  * @param netdev network device
630  */
631 static void stop_txq(struct net_device *netdev)
632 {
633         txqs_stop(netdev);
634 }
635
636 /**
637  * \brief Start Tx queue
638  * @param netdev network device
639  */
640 static void start_txq(struct net_device *netdev)
641 {
642         struct lio *lio = GET_LIO(netdev);
643
644         if (lio->linfo.link.s.link_up) {
645                 txqs_start(netdev);
646                 return;
647         }
648 }
649
650 /**
651  * \brief Wake a queue
652  * @param netdev network device
653  * @param q which queue to wake
654  */
655 static inline void wake_q(struct net_device *netdev, int q)
656 {
657         if (netif_is_multiqueue(netdev))
658                 netif_wake_subqueue(netdev, q);
659         else
660                 netif_wake_queue(netdev);
661 }
662
663 /**
664  * \brief Stop a queue
665  * @param netdev network device
666  * @param q which queue to stop
667  */
668 static inline void stop_q(struct net_device *netdev, int q)
669 {
670         if (netif_is_multiqueue(netdev))
671                 netif_stop_subqueue(netdev, q);
672         else
673                 netif_stop_queue(netdev);
674 }
675
676 /**
677  * \brief Check Tx queue status, and take appropriate action
678  * @param lio per-network private data
679  * @returns 0 if full, number of queues woken up otherwise
680  */
681 static inline int check_txq_status(struct lio *lio)
682 {
683         int ret_val = 0;
684
685         if (netif_is_multiqueue(lio->netdev)) {
686                 int numqs = lio->netdev->num_tx_queues;
687                 int q, iq = 0;
688
689                 /* check each sub-queue state */
690                 for (q = 0; q < numqs; q++) {
691                         iq = lio->linfo.txpciq[q %
692                                 (lio->linfo.num_txpciq)].s.q_no;
693                         if (octnet_iq_is_full(lio->oct_dev, iq))
694                                 continue;
695                         if (__netif_subqueue_stopped(lio->netdev, q)) {
696                                 wake_q(lio->netdev, q);
697                                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
698                                                           tx_restart, 1);
699                                 ret_val++;
700                         }
701                 }
702         } else {
703                 if (octnet_iq_is_full(lio->oct_dev, lio->txq))
704                         return 0;
705                 wake_q(lio->netdev, lio->txq);
706                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
707                                           tx_restart, 1);
708                 ret_val = 1;
709         }
710         return ret_val;
711 }
712
713 /**
714  * Remove the node at the head of the list. The list would be empty at
715  * the end of this call if there are no more nodes in the list.
716  */
717 static inline struct list_head *list_delete_head(struct list_head *root)
718 {
719         struct list_head *node;
720
721         if ((root->prev == root) && (root->next == root))
722                 node = NULL;
723         else
724                 node = root->next;
725
726         if (node)
727                 list_del(node);
728
729         return node;
730 }
731
732 /**
733  * \brief Delete gather lists
734  * @param lio per-network private data
735  */
736 static void delete_glists(struct lio *lio)
737 {
738         struct octnic_gather *g;
739         int i;
740
741         if (!lio->glist)
742                 return;
743
744         for (i = 0; i < lio->linfo.num_txpciq; i++) {
745                 do {
746                         g = (struct octnic_gather *)
747                                 list_delete_head(&lio->glist[i]);
748                         if (g) {
749                                 if (g->sg) {
750                                         dma_unmap_single(&lio->oct_dev->
751                                                          pci_dev->dev,
752                                                          g->sg_dma_ptr,
753                                                          g->sg_size,
754                                                          DMA_TO_DEVICE);
755                                         kfree((void *)((unsigned long)g->sg -
756                                                        g->adjust));
757                                 }
758                                 kfree(g);
759                         }
760                 } while (g);
761         }
762
763         kfree((void *)lio->glist);
764 }
765
766 /**
767  * \brief Setup gather lists
768  * @param lio per-network private data
769  */
770 static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
771 {
772         int i, j;
773         struct octnic_gather *g;
774
775         lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
776                                   GFP_KERNEL);
777         if (!lio->glist_lock)
778                 return 1;
779
780         lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
781                              GFP_KERNEL);
782         if (!lio->glist) {
783                 kfree((void *)lio->glist_lock);
784                 return 1;
785         }
786
787         for (i = 0; i < num_iqs; i++) {
788                 int numa_node = cpu_to_node(i % num_online_cpus());
789
790                 spin_lock_init(&lio->glist_lock[i]);
791
792                 INIT_LIST_HEAD(&lio->glist[i]);
793
794                 for (j = 0; j < lio->tx_qsize; j++) {
795                         g = kzalloc_node(sizeof(*g), GFP_KERNEL,
796                                          numa_node);
797                         if (!g)
798                                 g = kzalloc(sizeof(*g), GFP_KERNEL);
799                         if (!g)
800                                 break;
801
802                         g->sg_size = ((ROUNDUP4(OCTNIC_MAX_SG) >> 2) *
803                                       OCT_SG_ENTRY_SIZE);
804
805                         g->sg = kmalloc_node(g->sg_size + 8,
806                                              GFP_KERNEL, numa_node);
807                         if (!g->sg)
808                                 g->sg = kmalloc(g->sg_size + 8, GFP_KERNEL);
809                         if (!g->sg) {
810                                 kfree(g);
811                                 break;
812                         }
813
814                         /* The gather component should be aligned on 64-bit
815                          * boundary
816                          */
817                         if (((unsigned long)g->sg) & 7) {
818                                 g->adjust = 8 - (((unsigned long)g->sg) & 7);
819                                 g->sg = (struct octeon_sg_entry *)
820                                         ((unsigned long)g->sg + g->adjust);
821                         }
822                         g->sg_dma_ptr = dma_map_single(&oct->pci_dev->dev,
823                                                        g->sg, g->sg_size,
824                                                        DMA_TO_DEVICE);
825                         if (dma_mapping_error(&oct->pci_dev->dev,
826                                               g->sg_dma_ptr)) {
827                                 kfree((void *)((unsigned long)g->sg -
828                                                g->adjust));
829                                 kfree(g);
830                                 break;
831                         }
832
833                         list_add_tail(&g->list, &lio->glist[i]);
834                 }
835
836                 if (j != lio->tx_qsize) {
837                         delete_glists(lio);
838                         return 1;
839                 }
840         }
841
842         return 0;
843 }
844
845 /**
846  * \brief Print link information
847  * @param netdev network device
848  */
849 static void print_link_info(struct net_device *netdev)
850 {
851         struct lio *lio = GET_LIO(netdev);
852
853         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED) {
854                 struct oct_link_info *linfo = &lio->linfo;
855
856                 if (linfo->link.s.link_up) {
857                         netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
858                                    linfo->link.s.speed,
859                                    (linfo->link.s.duplex) ? "Full" : "Half");
860                 } else {
861                         netif_info(lio, link, lio->netdev, "Link Down\n");
862                 }
863         }
864 }
865
866 /**
867  * \brief Routine to notify MTU change
868  * @param work work_struct data structure
869  */
870 static void octnet_link_status_change(struct work_struct *work)
871 {
872         struct cavium_wk *wk = (struct cavium_wk *)work;
873         struct lio *lio = (struct lio *)wk->ctxptr;
874
875         rtnl_lock();
876         call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
877         rtnl_unlock();
878 }
879
880 /**
881  * \brief Sets up the mtu status change work
882  * @param netdev network device
883  */
884 static inline int setup_link_status_change_wq(struct net_device *netdev)
885 {
886         struct lio *lio = GET_LIO(netdev);
887         struct octeon_device *oct = lio->oct_dev;
888
889         lio->link_status_wq.wq = alloc_workqueue("link-status",
890                                                  WQ_MEM_RECLAIM, 0);
891         if (!lio->link_status_wq.wq) {
892                 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
893                 return -1;
894         }
895         INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
896                           octnet_link_status_change);
897         lio->link_status_wq.wk.ctxptr = lio;
898
899         return 0;
900 }
901
902 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
903 {
904         struct lio *lio = GET_LIO(netdev);
905
906         if (lio->link_status_wq.wq) {
907                 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
908                 destroy_workqueue(lio->link_status_wq.wq);
909         }
910 }
911
912 /**
913  * \brief Update link status
914  * @param netdev network device
915  * @param ls link status structure
916  *
917  * Called on receipt of a link status response from the core application to
918  * update each interface's link status.
919  */
920 static inline void update_link_status(struct net_device *netdev,
921                                       union oct_link_status *ls)
922 {
923         struct lio *lio = GET_LIO(netdev);
924         int changed = (lio->linfo.link.u64 != ls->u64);
925
926         lio->linfo.link.u64 = ls->u64;
927
928         if ((lio->intf_open) && (changed)) {
929                 print_link_info(netdev);
930                 lio->link_changes++;
931
932                 if (lio->linfo.link.s.link_up) {
933                         netif_carrier_on(netdev);
934                         /* start_txq(netdev); */
935                         txqs_wake(netdev);
936                 } else {
937                         netif_carrier_off(netdev);
938                         stop_txq(netdev);
939                 }
940         }
941 }
942
943 /* Runs in interrupt context. */
944 static void update_txq_status(struct octeon_device *oct, int iq_num)
945 {
946         struct net_device *netdev;
947         struct lio *lio;
948         struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
949
950         netdev = oct->props[iq->ifidx].netdev;
951
952         /* This is needed because the first IQ does not have
953          * a netdev associated with it.
954          */
955         if (!netdev)
956                 return;
957
958         lio = GET_LIO(netdev);
959         if (netif_is_multiqueue(netdev)) {
960                 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
961                     lio->linfo.link.s.link_up &&
962                     (!octnet_iq_is_full(oct, iq_num))) {
963                         INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
964                                                   tx_restart, 1);
965                         netif_wake_subqueue(netdev, iq->q_index);
966                 } else {
967                         if (!octnet_iq_is_full(oct, lio->txq)) {
968                                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev,
969                                                           lio->txq,
970                                                           tx_restart, 1);
971                                 wake_q(netdev, lio->txq);
972                         }
973                 }
974         }
975 }
976
977 static
978 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
979 {
980         struct octeon_device *oct = droq->oct_dev;
981         struct octeon_device_priv *oct_priv =
982             (struct octeon_device_priv *)oct->priv;
983
984         if (droq->ops.poll_mode) {
985                 droq->ops.napi_fn(droq);
986         } else {
987                 if (ret & MSIX_PO_INT) {
988                         tasklet_schedule(&oct_priv->droq_tasklet);
989                         return 1;
990                 }
991                 /* this will be flushed periodically by check iq db */
992                 if (ret & MSIX_PI_INT)
993                         return 0;
994         }
995         return 0;
996 }
997
998 /**
999  * \brief Droq packet processor sceduler
1000  * @param oct octeon device
1001  */
1002 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
1003 {
1004         struct octeon_device_priv *oct_priv =
1005                 (struct octeon_device_priv *)oct->priv;
1006         u64 oq_no;
1007         struct octeon_droq *droq;
1008
1009         if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
1010                 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
1011                      oq_no++) {
1012                         if (!(oct->droq_intr & (1ULL << oq_no)))
1013                                 continue;
1014
1015                         droq = oct->droq[oq_no];
1016
1017                         if (droq->ops.poll_mode) {
1018                                 droq->ops.napi_fn(droq);
1019                                 oct_priv->napi_mask |= (1 << oq_no);
1020                         } else {
1021                                 tasklet_schedule(&oct_priv->droq_tasklet);
1022                         }
1023                 }
1024         }
1025 }
1026
1027 static irqreturn_t
1028 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
1029 {
1030         u64 ret;
1031         struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
1032         struct octeon_device *oct = ioq_vector->oct_dev;
1033         struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
1034
1035         ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
1036
1037         if ((ret & MSIX_PO_INT) || (ret & MSIX_PI_INT))
1038                 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
1039
1040         return IRQ_HANDLED;
1041 }
1042
1043 /**
1044  * \brief Interrupt handler for octeon
1045  * @param irq unused
1046  * @param dev octeon device
1047  */
1048 static
1049 irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
1050                                          void *dev)
1051 {
1052         struct octeon_device *oct = (struct octeon_device *)dev;
1053         irqreturn_t ret;
1054
1055         /* Disable our interrupts for the duration of ISR */
1056         oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1057
1058         ret = oct->fn_list.process_interrupt_regs(oct);
1059
1060         if (ret == IRQ_HANDLED)
1061                 liquidio_schedule_droq_pkt_handlers(oct);
1062
1063         /* Re-enable our interrupts  */
1064         if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
1065                 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
1066
1067         return ret;
1068 }
1069
1070 /**
1071  * \brief Setup interrupt for octeon device
1072  * @param oct octeon device
1073  *
1074  *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
1075  */
1076 static int octeon_setup_interrupt(struct octeon_device *oct)
1077 {
1078         int irqret, err;
1079         struct msix_entry *msix_entries;
1080         int i;
1081         int num_ioq_vectors;
1082         int num_alloc_ioq_vectors;
1083
1084         if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
1085                 oct->num_msix_irqs = oct->sriov_info.num_pf_rings;
1086                 /* one non ioq interrupt for handling sli_mac_pf_int_sum */
1087                 oct->num_msix_irqs += 1;
1088
1089                 oct->msix_entries = kcalloc(
1090                     oct->num_msix_irqs, sizeof(struct msix_entry), GFP_KERNEL);
1091                 if (!oct->msix_entries)
1092                         return 1;
1093
1094                 msix_entries = (struct msix_entry *)oct->msix_entries;
1095                 /*Assumption is that pf msix vectors start from pf srn to pf to
1096                  * trs and not from 0. if not change this code
1097                  */
1098                 for (i = 0; i < oct->num_msix_irqs - 1; i++)
1099                         msix_entries[i].entry = oct->sriov_info.pf_srn + i;
1100                 msix_entries[oct->num_msix_irqs - 1].entry =
1101                     oct->sriov_info.trs;
1102                 num_alloc_ioq_vectors = pci_enable_msix_range(
1103                                                 oct->pci_dev, msix_entries,
1104                                                 oct->num_msix_irqs,
1105                                                 oct->num_msix_irqs);
1106                 if (num_alloc_ioq_vectors < 0) {
1107                         dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
1108                         kfree(oct->msix_entries);
1109                         oct->msix_entries = NULL;
1110                         return 1;
1111                 }
1112                 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
1113
1114                 num_ioq_vectors = oct->num_msix_irqs;
1115
1116                 /** For PF, there is one non-ioq interrupt handler */
1117                 num_ioq_vectors -= 1;
1118                 irqret = request_irq(msix_entries[num_ioq_vectors].vector,
1119                                      liquidio_legacy_intr_handler, 0, "octeon",
1120                                      oct);
1121                 if (irqret) {
1122                         dev_err(&oct->pci_dev->dev,
1123                                 "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1124                                 irqret);
1125                         pci_disable_msix(oct->pci_dev);
1126                         kfree(oct->msix_entries);
1127                         oct->msix_entries = NULL;
1128                         return 1;
1129                 }
1130
1131                 for (i = 0; i < num_ioq_vectors; i++) {
1132                         irqret = request_irq(msix_entries[i].vector,
1133                                              liquidio_msix_intr_handler, 0,
1134                                              "octeon", &oct->ioq_vector[i]);
1135                         if (irqret) {
1136                                 dev_err(&oct->pci_dev->dev,
1137                                         "OCTEON: Request_irq failed for MSIX interrupt Error: %d\n",
1138                                         irqret);
1139                                 /** Freeing the non-ioq irq vector here . */
1140                                 free_irq(msix_entries[num_ioq_vectors].vector,
1141                                          oct);
1142
1143                                 while (i) {
1144                                         i--;
1145                                         /** clearing affinity mask. */
1146                                         irq_set_affinity_hint(
1147                                                 msix_entries[i].vector, NULL);
1148                                         free_irq(msix_entries[i].vector,
1149                                                  &oct->ioq_vector[i]);
1150                                 }
1151                                 pci_disable_msix(oct->pci_dev);
1152                                 kfree(oct->msix_entries);
1153                                 oct->msix_entries = NULL;
1154                                 return 1;
1155                         }
1156                         oct->ioq_vector[i].vector = msix_entries[i].vector;
1157                         /* assign the cpu mask for this msix interrupt vector */
1158                         irq_set_affinity_hint(
1159                                         msix_entries[i].vector,
1160                                         (&oct->ioq_vector[i].affinity_mask));
1161                 }
1162                 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1163                         oct->octeon_id);
1164         } else {
1165                 err = pci_enable_msi(oct->pci_dev);
1166                 if (err)
1167                         dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1168                                  err);
1169                 else
1170                         oct->flags |= LIO_FLAG_MSI_ENABLED;
1171
1172                 irqret = request_irq(oct->pci_dev->irq,
1173                                      liquidio_legacy_intr_handler, IRQF_SHARED,
1174                                      "octeon", oct);
1175                 if (irqret) {
1176                         if (oct->flags & LIO_FLAG_MSI_ENABLED)
1177                                 pci_disable_msi(oct->pci_dev);
1178                         dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1179                                 irqret);
1180                         return 1;
1181                 }
1182         }
1183         return 0;
1184 }
1185
1186 static int liquidio_watchdog(void *param)
1187 {
1188         u64 wdog;
1189         u16 mask_of_stuck_cores = 0;
1190         u16 mask_of_crashed_cores = 0;
1191         int core_num;
1192         u8 core_is_stuck[LIO_MAX_CORES];
1193         u8 core_crashed[LIO_MAX_CORES];
1194         struct octeon_device *oct = param;
1195
1196         memset(core_is_stuck, 0, sizeof(core_is_stuck));
1197         memset(core_crashed, 0, sizeof(core_crashed));
1198
1199         while (!kthread_should_stop()) {
1200                 mask_of_crashed_cores =
1201                     (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
1202
1203                 for (core_num = 0; core_num < LIO_MAX_CORES; core_num++) {
1204                         if (!core_is_stuck[core_num]) {
1205                                 wdog = lio_pci_readq(oct, CIU3_WDOG(core_num));
1206
1207                                 /* look at watchdog state field */
1208                                 wdog &= CIU3_WDOG_MASK;
1209                                 if (wdog) {
1210                                         /* this watchdog timer has expired */
1211                                         core_is_stuck[core_num] =
1212                                                 LIO_MONITOR_WDOG_EXPIRE;
1213                                         mask_of_stuck_cores |= (1 << core_num);
1214                                 }
1215                         }
1216
1217                         if (!core_crashed[core_num])
1218                                 core_crashed[core_num] =
1219                                     (mask_of_crashed_cores >> core_num) & 1;
1220                 }
1221
1222                 if (mask_of_stuck_cores) {
1223                         for (core_num = 0; core_num < LIO_MAX_CORES;
1224                              core_num++) {
1225                                 if (core_is_stuck[core_num] == 1) {
1226                                         dev_err(&oct->pci_dev->dev,
1227                                                 "ERROR: Octeon core %d is stuck!\n",
1228                                                 core_num);
1229                                         /* 2 means we have printk'd  an error
1230                                          * so no need to repeat the same printk
1231                                          */
1232                                         core_is_stuck[core_num] =
1233                                                 LIO_MONITOR_CORE_STUCK_MSGD;
1234                                 }
1235                         }
1236                 }
1237
1238                 if (mask_of_crashed_cores) {
1239                         for (core_num = 0; core_num < LIO_MAX_CORES;
1240                              core_num++) {
1241                                 if (core_crashed[core_num] == 1) {
1242                                         dev_err(&oct->pci_dev->dev,
1243                                                 "ERROR: Octeon core %d crashed!  See oct-fwdump for details.\n",
1244                                                 core_num);
1245                                         /* 2 means we have printk'd  an error
1246                                          * so no need to repeat the same printk
1247                                          */
1248                                         core_crashed[core_num] =
1249                                                 LIO_MONITOR_CORE_STUCK_MSGD;
1250                                 }
1251                         }
1252                 }
1253 #ifdef CONFIG_MODULE_UNLOAD
1254                 if (mask_of_stuck_cores || mask_of_crashed_cores) {
1255                         /* make module refcount=0 so that rmmod will work */
1256                         long refcount;
1257
1258                         refcount = module_refcount(THIS_MODULE);
1259
1260                         while (refcount > 0) {
1261                                 module_put(THIS_MODULE);
1262                                 refcount = module_refcount(THIS_MODULE);
1263                         }
1264
1265                         /* compensate for and withstand an unlikely (but still
1266                          * possible) race condition
1267                          */
1268                         while (refcount < 0) {
1269                                 try_module_get(THIS_MODULE);
1270                                 refcount = module_refcount(THIS_MODULE);
1271                         }
1272                 }
1273 #endif
1274                 /* sleep for two seconds */
1275                 set_current_state(TASK_INTERRUPTIBLE);
1276                 schedule_timeout(2 * HZ);
1277         }
1278
1279         return 0;
1280 }
1281
1282 /**
1283  * \brief PCI probe handler
1284  * @param pdev PCI device structure
1285  * @param ent unused
1286  */
1287 static int
1288 liquidio_probe(struct pci_dev *pdev,
1289                const struct pci_device_id *ent __attribute__((unused)))
1290 {
1291         struct octeon_device *oct_dev = NULL;
1292         struct handshake *hs;
1293
1294         oct_dev = octeon_allocate_device(pdev->device,
1295                                          sizeof(struct octeon_device_priv));
1296         if (!oct_dev) {
1297                 dev_err(&pdev->dev, "Unable to allocate device\n");
1298                 return -ENOMEM;
1299         }
1300
1301         if (pdev->device == OCTEON_CN23XX_PF_VID)
1302                 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
1303
1304         dev_info(&pdev->dev, "Initializing device %x:%x.\n",
1305                  (u32)pdev->vendor, (u32)pdev->device);
1306
1307         /* Assign octeon_device for this device to the private data area. */
1308         pci_set_drvdata(pdev, oct_dev);
1309
1310         /* set linux specific device pointer */
1311         oct_dev->pci_dev = (void *)pdev;
1312
1313         hs = &handshake[oct_dev->octeon_id];
1314         init_completion(&hs->init);
1315         init_completion(&hs->started);
1316         hs->pci_dev = pdev;
1317
1318         if (oct_dev->octeon_id == 0)
1319                 /* first LiquidIO NIC is detected */
1320                 complete(&first_stage);
1321
1322         if (octeon_device_init(oct_dev)) {
1323                 liquidio_remove(pdev);
1324                 return -ENOMEM;
1325         }
1326
1327         if (OCTEON_CN23XX_PF(oct_dev)) {
1328                 u64 scratch1;
1329                 u8 bus, device, function;
1330
1331                 scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1);
1332                 if (!(scratch1 & 4ULL)) {
1333                         /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
1334                          * the lio watchdog kernel thread is running for this
1335                          * NIC.  Each NIC gets one watchdog kernel thread.
1336                          */
1337                         scratch1 |= 4ULL;
1338                         octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1,
1339                                            scratch1);
1340
1341                         bus = pdev->bus->number;
1342                         device = PCI_SLOT(pdev->devfn);
1343                         function = PCI_FUNC(pdev->devfn);
1344                         oct_dev->watchdog_task = kthread_create(
1345                             liquidio_watchdog, oct_dev,
1346                             "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
1347                         wake_up_process(oct_dev->watchdog_task);
1348                 }
1349         }
1350
1351         oct_dev->rx_pause = 1;
1352         oct_dev->tx_pause = 1;
1353
1354         dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1355
1356         return 0;
1357 }
1358
1359 /**
1360  *\brief Destroy resources associated with octeon device
1361  * @param pdev PCI device structure
1362  * @param ent unused
1363  */
1364 static void octeon_destroy_resources(struct octeon_device *oct)
1365 {
1366         int i;
1367         struct msix_entry *msix_entries;
1368         struct octeon_device_priv *oct_priv =
1369                 (struct octeon_device_priv *)oct->priv;
1370
1371         struct handshake *hs;
1372
1373         switch (atomic_read(&oct->status)) {
1374         case OCT_DEV_RUNNING:
1375         case OCT_DEV_CORE_OK:
1376
1377                 /* No more instructions will be forwarded. */
1378                 atomic_set(&oct->status, OCT_DEV_IN_RESET);
1379
1380                 oct->app_mode = CVM_DRV_INVALID_APP;
1381                 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1382                         lio_get_state_string(&oct->status));
1383
1384                 schedule_timeout_uninterruptible(HZ / 10);
1385
1386                 /* fallthrough */
1387         case OCT_DEV_HOST_OK:
1388
1389                 /* fallthrough */
1390         case OCT_DEV_CONSOLE_INIT_DONE:
1391                 /* Remove any consoles */
1392                 octeon_remove_consoles(oct);
1393
1394                 /* fallthrough */
1395         case OCT_DEV_IO_QUEUES_DONE:
1396                 if (wait_for_pending_requests(oct))
1397                         dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1398
1399                 if (lio_wait_for_instr_fetch(oct))
1400                         dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1401
1402                 /* Disable the input and output queues now. No more packets will
1403                  * arrive from Octeon, but we should wait for all packet
1404                  * processing to finish.
1405                  */
1406                 oct->fn_list.disable_io_queues(oct);
1407
1408                 if (lio_wait_for_oq_pkts(oct))
1409                         dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1410
1411                 /* Disable interrupts  */
1412                 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1413
1414                 if (oct->msix_on) {
1415                         msix_entries = (struct msix_entry *)oct->msix_entries;
1416                         for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1417                                 /* clear the affinity_cpumask */
1418                                 irq_set_affinity_hint(msix_entries[i].vector,
1419                                                       NULL);
1420                                 free_irq(msix_entries[i].vector,
1421                                          &oct->ioq_vector[i]);
1422                         }
1423                         /* non-iov vector's argument is oct struct */
1424                         free_irq(msix_entries[i].vector, oct);
1425
1426                         pci_disable_msix(oct->pci_dev);
1427                         kfree(oct->msix_entries);
1428                         oct->msix_entries = NULL;
1429                 } else {
1430                         /* Release the interrupt line */
1431                         free_irq(oct->pci_dev->irq, oct);
1432
1433                         if (oct->flags & LIO_FLAG_MSI_ENABLED)
1434                                 pci_disable_msi(oct->pci_dev);
1435                 }
1436
1437                 if (OCTEON_CN23XX_PF(oct))
1438                         octeon_free_ioq_vector(oct);
1439         /* fallthrough */
1440         case OCT_DEV_IN_RESET:
1441         case OCT_DEV_DROQ_INIT_DONE:
1442                 /*atomic_set(&oct->status, OCT_DEV_DROQ_INIT_DONE);*/
1443                 mdelay(100);
1444                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1445                         if (!(oct->io_qmask.oq & BIT_ULL(i)))
1446                                 continue;
1447                         octeon_delete_droq(oct, i);
1448                 }
1449
1450                 /* Force any pending handshakes to complete */
1451                 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1452                         hs = &handshake[i];
1453
1454                         if (hs->pci_dev) {
1455                                 handshake[oct->octeon_id].init_ok = 0;
1456                                 complete(&handshake[oct->octeon_id].init);
1457                                 handshake[oct->octeon_id].started_ok = 0;
1458                                 complete(&handshake[oct->octeon_id].started);
1459                         }
1460                 }
1461
1462                 /* fallthrough */
1463         case OCT_DEV_RESP_LIST_INIT_DONE:
1464                 octeon_delete_response_list(oct);
1465
1466                 /* fallthrough */
1467         case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1468                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1469                         if (!(oct->io_qmask.iq & BIT_ULL(i)))
1470                                 continue;
1471                         octeon_delete_instr_queue(oct, i);
1472                 }
1473                 /* fallthrough */
1474         case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1475                 octeon_free_sc_buffer_pool(oct);
1476
1477                 /* fallthrough */
1478         case OCT_DEV_DISPATCH_INIT_DONE:
1479                 octeon_delete_dispatch_list(oct);
1480                 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1481
1482                 /* fallthrough */
1483         case OCT_DEV_PCI_MAP_DONE:
1484                 /* Soft reset the octeon device before exiting */
1485                 if ((!OCTEON_CN23XX_PF(oct)) || !oct->octeon_id)
1486                         oct->fn_list.soft_reset(oct);
1487
1488                 octeon_unmap_pci_barx(oct, 0);
1489                 octeon_unmap_pci_barx(oct, 1);
1490
1491                 /* fallthrough */
1492         case OCT_DEV_BEGIN_STATE:
1493                 /* Disable the device, releasing the PCI INT */
1494                 pci_disable_device(oct->pci_dev);
1495
1496                 /* Nothing to be done here either */
1497                 break;
1498         }                       /* end switch (oct->status) */
1499
1500         tasklet_kill(&oct_priv->droq_tasklet);
1501 }
1502
1503 /**
1504  * \brief Callback for rx ctrl
1505  * @param status status of request
1506  * @param buf pointer to resp structure
1507  */
1508 static void rx_ctl_callback(struct octeon_device *oct,
1509                             u32 status,
1510                             void *buf)
1511 {
1512         struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1513         struct liquidio_rx_ctl_context *ctx;
1514
1515         ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1516
1517         oct = lio_get_device(ctx->octeon_id);
1518         if (status)
1519                 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
1520                         CVM_CAST64(status));
1521         WRITE_ONCE(ctx->cond, 1);
1522
1523         /* This barrier is required to be sure that the response has been
1524          * written fully before waking up the handler
1525          */
1526         wmb();
1527
1528         wake_up_interruptible(&ctx->wc);
1529 }
1530
1531 /**
1532  * \brief Send Rx control command
1533  * @param lio per-network private data
1534  * @param start_stop whether to start or stop
1535  */
1536 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1537 {
1538         struct octeon_soft_command *sc;
1539         struct liquidio_rx_ctl_context *ctx;
1540         union octnet_cmd *ncmd;
1541         int ctx_size = sizeof(struct liquidio_rx_ctl_context);
1542         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1543         int retval;
1544
1545         if (oct->props[lio->ifidx].rx_on == start_stop)
1546                 return;
1547
1548         sc = (struct octeon_soft_command *)
1549                 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1550                                           16, ctx_size);
1551
1552         ncmd = (union octnet_cmd *)sc->virtdptr;
1553         ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1554
1555         WRITE_ONCE(ctx->cond, 0);
1556         ctx->octeon_id = lio_get_device_id(oct);
1557         init_waitqueue_head(&ctx->wc);
1558
1559         ncmd->u64 = 0;
1560         ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1561         ncmd->s.param1 = start_stop;
1562
1563         octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1564
1565         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1566
1567         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1568                                     OPCODE_NIC_CMD, 0, 0, 0);
1569
1570         sc->callback = rx_ctl_callback;
1571         sc->callback_arg = sc;
1572         sc->wait_time = 5000;
1573
1574         retval = octeon_send_soft_command(oct, sc);
1575         if (retval == IQ_SEND_FAILED) {
1576                 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1577         } else {
1578                 /* Sleep on a wait queue till the cond flag indicates that the
1579                  * response arrived or timed-out.
1580                  */
1581                 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
1582                         return;
1583                 oct->props[lio->ifidx].rx_on = start_stop;
1584         }
1585
1586         octeon_free_soft_command(oct, sc);
1587 }
1588
1589 /**
1590  * \brief Destroy NIC device interface
1591  * @param oct octeon device
1592  * @param ifidx which interface to destroy
1593  *
1594  * Cleanup associated with each interface for an Octeon device  when NIC
1595  * module is being unloaded or if initialization fails during load.
1596  */
1597 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1598 {
1599         struct net_device *netdev = oct->props[ifidx].netdev;
1600         struct lio *lio;
1601         struct napi_struct *napi, *n;
1602
1603         if (!netdev) {
1604                 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1605                         __func__, ifidx);
1606                 return;
1607         }
1608
1609         lio = GET_LIO(netdev);
1610
1611         dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1612
1613         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1614                 liquidio_stop(netdev);
1615
1616         if (oct->props[lio->ifidx].napi_enabled == 1) {
1617                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1618                         napi_disable(napi);
1619
1620                 oct->props[lio->ifidx].napi_enabled = 0;
1621
1622                 if (OCTEON_CN23XX_PF(oct))
1623                         oct->droq[0]->ops.poll_mode = 0;
1624         }
1625
1626         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1627                 unregister_netdev(netdev);
1628
1629         cleanup_link_status_change_wq(netdev);
1630
1631         delete_glists(lio);
1632
1633         free_netdev(netdev);
1634
1635         oct->props[ifidx].gmxport = -1;
1636
1637         oct->props[ifidx].netdev = NULL;
1638 }
1639
1640 /**
1641  * \brief Stop complete NIC functionality
1642  * @param oct octeon device
1643  */
1644 static int liquidio_stop_nic_module(struct octeon_device *oct)
1645 {
1646         int i, j;
1647         struct lio *lio;
1648
1649         dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1650         if (!oct->ifcount) {
1651                 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1652                 return 1;
1653         }
1654
1655         spin_lock_bh(&oct->cmd_resp_wqlock);
1656         oct->cmd_resp_state = OCT_DRV_OFFLINE;
1657         spin_unlock_bh(&oct->cmd_resp_wqlock);
1658
1659         for (i = 0; i < oct->ifcount; i++) {
1660                 lio = GET_LIO(oct->props[i].netdev);
1661                 for (j = 0; j < lio->linfo.num_rxpciq; j++)
1662                         octeon_unregister_droq_ops(oct,
1663                                                    lio->linfo.rxpciq[j].s.q_no);
1664         }
1665
1666         for (i = 0; i < oct->ifcount; i++)
1667                 liquidio_destroy_nic_device(oct, i);
1668
1669         dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1670         return 0;
1671 }
1672
1673 /**
1674  * \brief Cleans up resources at unload time
1675  * @param pdev PCI device structure
1676  */
1677 static void liquidio_remove(struct pci_dev *pdev)
1678 {
1679         struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1680
1681         dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1682
1683         if (oct_dev->watchdog_task)
1684                 kthread_stop(oct_dev->watchdog_task);
1685
1686         if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1687                 liquidio_stop_nic_module(oct_dev);
1688
1689         /* Reset the octeon device and cleanup all memory allocated for
1690          * the octeon device by driver.
1691          */
1692         octeon_destroy_resources(oct_dev);
1693
1694         dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1695
1696         /* This octeon device has been removed. Update the global
1697          * data structure to reflect this. Free the device structure.
1698          */
1699         octeon_free_device_mem(oct_dev);
1700 }
1701
1702 /**
1703  * \brief Identify the Octeon device and to map the BAR address space
1704  * @param oct octeon device
1705  */
1706 static int octeon_chip_specific_setup(struct octeon_device *oct)
1707 {
1708         u32 dev_id, rev_id;
1709         int ret = 1;
1710         char *s;
1711
1712         pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1713         pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1714         oct->rev_id = rev_id & 0xff;
1715
1716         switch (dev_id) {
1717         case OCTEON_CN68XX_PCIID:
1718                 oct->chip_id = OCTEON_CN68XX;
1719                 ret = lio_setup_cn68xx_octeon_device(oct);
1720                 s = "CN68XX";
1721                 break;
1722
1723         case OCTEON_CN66XX_PCIID:
1724                 oct->chip_id = OCTEON_CN66XX;
1725                 ret = lio_setup_cn66xx_octeon_device(oct);
1726                 s = "CN66XX";
1727                 break;
1728
1729         case OCTEON_CN23XX_PCIID_PF:
1730                 oct->chip_id = OCTEON_CN23XX_PF_VID;
1731                 ret = setup_cn23xx_octeon_pf_device(oct);
1732                 s = "CN23XX";
1733                 break;
1734
1735         default:
1736                 s = "?";
1737                 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1738                         dev_id);
1739         }
1740
1741         if (!ret)
1742                 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1743                          OCTEON_MAJOR_REV(oct),
1744                          OCTEON_MINOR_REV(oct),
1745                          octeon_get_conf(oct)->card_name,
1746                          LIQUIDIO_VERSION);
1747
1748         return ret;
1749 }
1750
1751 /**
1752  * \brief PCI initialization for each Octeon device.
1753  * @param oct octeon device
1754  */
1755 static int octeon_pci_os_setup(struct octeon_device *oct)
1756 {
1757         /* setup PCI stuff first */
1758         if (pci_enable_device(oct->pci_dev)) {
1759                 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1760                 return 1;
1761         }
1762
1763         if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1764                 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1765                 return 1;
1766         }
1767
1768         /* Enable PCI DMA Master. */
1769         pci_set_master(oct->pci_dev);
1770
1771         return 0;
1772 }
1773
1774 static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
1775 {
1776         int q = 0;
1777
1778         if (netif_is_multiqueue(lio->netdev))
1779                 q = skb->queue_mapping % lio->linfo.num_txpciq;
1780
1781         return q;
1782 }
1783
1784 /**
1785  * \brief Check Tx queue state for a given network buffer
1786  * @param lio per-network private data
1787  * @param skb network buffer
1788  */
1789 static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
1790 {
1791         int q = 0, iq = 0;
1792
1793         if (netif_is_multiqueue(lio->netdev)) {
1794                 q = skb->queue_mapping;
1795                 iq = lio->linfo.txpciq[(q % (lio->linfo.num_txpciq))].s.q_no;
1796         } else {
1797                 iq = lio->txq;
1798                 q = iq;
1799         }
1800
1801         if (octnet_iq_is_full(lio->oct_dev, iq))
1802                 return 0;
1803
1804         if (__netif_subqueue_stopped(lio->netdev, q)) {
1805                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
1806                 wake_q(lio->netdev, q);
1807         }
1808         return 1;
1809 }
1810
1811 /**
1812  * \brief Unmap and free network buffer
1813  * @param buf buffer
1814  */
1815 static void free_netbuf(void *buf)
1816 {
1817         struct sk_buff *skb;
1818         struct octnet_buf_free_info *finfo;
1819         struct lio *lio;
1820
1821         finfo = (struct octnet_buf_free_info *)buf;
1822         skb = finfo->skb;
1823         lio = finfo->lio;
1824
1825         dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1826                          DMA_TO_DEVICE);
1827
1828         check_txq_state(lio, skb);
1829
1830         tx_buffer_free(skb);
1831 }
1832
1833 /**
1834  * \brief Unmap and free gather buffer
1835  * @param buf buffer
1836  */
1837 static void free_netsgbuf(void *buf)
1838 {
1839         struct octnet_buf_free_info *finfo;
1840         struct sk_buff *skb;
1841         struct lio *lio;
1842         struct octnic_gather *g;
1843         int i, frags, iq;
1844
1845         finfo = (struct octnet_buf_free_info *)buf;
1846         skb = finfo->skb;
1847         lio = finfo->lio;
1848         g = finfo->g;
1849         frags = skb_shinfo(skb)->nr_frags;
1850
1851         dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1852                          g->sg[0].ptr[0], (skb->len - skb->data_len),
1853                          DMA_TO_DEVICE);
1854
1855         i = 1;
1856         while (frags--) {
1857                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1858
1859                 pci_unmap_page((lio->oct_dev)->pci_dev,
1860                                g->sg[(i >> 2)].ptr[(i & 3)],
1861                                frag->size, DMA_TO_DEVICE);
1862                 i++;
1863         }
1864
1865         dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1866                                 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
1867
1868         iq = skb_iq(lio, skb);
1869         spin_lock(&lio->glist_lock[iq]);
1870         list_add_tail(&g->list, &lio->glist[iq]);
1871         spin_unlock(&lio->glist_lock[iq]);
1872
1873         check_txq_state(lio, skb);     /* mq support: sub-queue state check */
1874
1875         tx_buffer_free(skb);
1876 }
1877
1878 /**
1879  * \brief Unmap and free gather buffer with response
1880  * @param buf buffer
1881  */
1882 static void free_netsgbuf_with_resp(void *buf)
1883 {
1884         struct octeon_soft_command *sc;
1885         struct octnet_buf_free_info *finfo;
1886         struct sk_buff *skb;
1887         struct lio *lio;
1888         struct octnic_gather *g;
1889         int i, frags, iq;
1890
1891         sc = (struct octeon_soft_command *)buf;
1892         skb = (struct sk_buff *)sc->callback_arg;
1893         finfo = (struct octnet_buf_free_info *)&skb->cb;
1894
1895         lio = finfo->lio;
1896         g = finfo->g;
1897         frags = skb_shinfo(skb)->nr_frags;
1898
1899         dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1900                          g->sg[0].ptr[0], (skb->len - skb->data_len),
1901                          DMA_TO_DEVICE);
1902
1903         i = 1;
1904         while (frags--) {
1905                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1906
1907                 pci_unmap_page((lio->oct_dev)->pci_dev,
1908                                g->sg[(i >> 2)].ptr[(i & 3)],
1909                                frag->size, DMA_TO_DEVICE);
1910                 i++;
1911         }
1912
1913         dma_sync_single_for_cpu(&lio->oct_dev->pci_dev->dev,
1914                                 g->sg_dma_ptr, g->sg_size, DMA_TO_DEVICE);
1915
1916         iq = skb_iq(lio, skb);
1917
1918         spin_lock(&lio->glist_lock[iq]);
1919         list_add_tail(&g->list, &lio->glist[iq]);
1920         spin_unlock(&lio->glist_lock[iq]);
1921
1922         /* Don't free the skb yet */
1923
1924         check_txq_state(lio, skb);
1925 }
1926
1927 /**
1928  * \brief Adjust ptp frequency
1929  * @param ptp PTP clock info
1930  * @param ppb how much to adjust by, in parts-per-billion
1931  */
1932 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1933 {
1934         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1935         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1936         u64 comp, delta;
1937         unsigned long flags;
1938         bool neg_adj = false;
1939
1940         if (ppb < 0) {
1941                 neg_adj = true;
1942                 ppb = -ppb;
1943         }
1944
1945         /* The hardware adds the clock compensation value to the
1946          * PTP clock on every coprocessor clock cycle, so we
1947          * compute the delta in terms of coprocessor clocks.
1948          */
1949         delta = (u64)ppb << 32;
1950         do_div(delta, oct->coproc_clock_rate);
1951
1952         spin_lock_irqsave(&lio->ptp_lock, flags);
1953         comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1954         if (neg_adj)
1955                 comp -= delta;
1956         else
1957                 comp += delta;
1958         lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1959         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1960
1961         return 0;
1962 }
1963
1964 /**
1965  * \brief Adjust ptp time
1966  * @param ptp PTP clock info
1967  * @param delta how much to adjust by, in nanosecs
1968  */
1969 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1970 {
1971         unsigned long flags;
1972         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1973
1974         spin_lock_irqsave(&lio->ptp_lock, flags);
1975         lio->ptp_adjust += delta;
1976         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1977
1978         return 0;
1979 }
1980
1981 /**
1982  * \brief Get hardware clock time, including any adjustment
1983  * @param ptp PTP clock info
1984  * @param ts timespec
1985  */
1986 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1987                                 struct timespec64 *ts)
1988 {
1989         u64 ns;
1990         unsigned long flags;
1991         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1992         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1993
1994         spin_lock_irqsave(&lio->ptp_lock, flags);
1995         ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1996         ns += lio->ptp_adjust;
1997         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1998
1999         *ts = ns_to_timespec64(ns);
2000
2001         return 0;
2002 }
2003
2004 /**
2005  * \brief Set hardware clock time. Reset adjustment
2006  * @param ptp PTP clock info
2007  * @param ts timespec
2008  */
2009 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
2010                                 const struct timespec64 *ts)
2011 {
2012         u64 ns;
2013         unsigned long flags;
2014         struct lio *lio = container_of(ptp, struct lio, ptp_info);
2015         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
2016
2017         ns = timespec_to_ns(ts);
2018
2019         spin_lock_irqsave(&lio->ptp_lock, flags);
2020         lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
2021         lio->ptp_adjust = 0;
2022         spin_unlock_irqrestore(&lio->ptp_lock, flags);
2023
2024         return 0;
2025 }
2026
2027 /**
2028  * \brief Check if PTP is enabled
2029  * @param ptp PTP clock info
2030  * @param rq request
2031  * @param on is it on
2032  */
2033 static int
2034 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
2035                     struct ptp_clock_request *rq __attribute__((unused)),
2036                     int on __attribute__((unused)))
2037 {
2038         return -EOPNOTSUPP;
2039 }
2040
2041 /**
2042  * \brief Open PTP clock source
2043  * @param netdev network device
2044  */
2045 static void oct_ptp_open(struct net_device *netdev)
2046 {
2047         struct lio *lio = GET_LIO(netdev);
2048         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
2049
2050         spin_lock_init(&lio->ptp_lock);
2051
2052         snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
2053         lio->ptp_info.owner = THIS_MODULE;
2054         lio->ptp_info.max_adj = 250000000;
2055         lio->ptp_info.n_alarm = 0;
2056         lio->ptp_info.n_ext_ts = 0;
2057         lio->ptp_info.n_per_out = 0;
2058         lio->ptp_info.pps = 0;
2059         lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
2060         lio->ptp_info.adjtime = liquidio_ptp_adjtime;
2061         lio->ptp_info.gettime64 = liquidio_ptp_gettime;
2062         lio->ptp_info.settime64 = liquidio_ptp_settime;
2063         lio->ptp_info.enable = liquidio_ptp_enable;
2064
2065         lio->ptp_adjust = 0;
2066
2067         lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
2068                                              &oct->pci_dev->dev);
2069
2070         if (IS_ERR(lio->ptp_clock))
2071                 lio->ptp_clock = NULL;
2072 }
2073
2074 /**
2075  * \brief Init PTP clock
2076  * @param oct octeon device
2077  */
2078 static void liquidio_ptp_init(struct octeon_device *oct)
2079 {
2080         u64 clock_comp, cfg;
2081
2082         clock_comp = (u64)NSEC_PER_SEC << 32;
2083         do_div(clock_comp, oct->coproc_clock_rate);
2084         lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
2085
2086         /* Enable */
2087         cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
2088         lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
2089 }
2090
2091 /**
2092  * \brief Load firmware to device
2093  * @param oct octeon device
2094  *
2095  * Maps device to firmware filename, requests firmware, and downloads it
2096  */
2097 static int load_firmware(struct octeon_device *oct)
2098 {
2099         int ret = 0;
2100         const struct firmware *fw;
2101         char fw_name[LIO_MAX_FW_FILENAME_LEN];
2102         char *tmp_fw_type;
2103
2104         if (strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
2105                     sizeof(LIO_FW_NAME_TYPE_NONE)) == 0) {
2106                 dev_info(&oct->pci_dev->dev, "Skipping firmware load\n");
2107                 return ret;
2108         }
2109
2110         if (fw_type[0] == '\0')
2111                 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
2112         else
2113                 tmp_fw_type = fw_type;
2114
2115         sprintf(fw_name, "/*(DEBLOBBED)*/", LIO_FW_DIR, LIO_FW_BASE_NAME,
2116                 octeon_get_conf(oct)->card_name, tmp_fw_type,
2117                 LIO_FW_NAME_SUFFIX);
2118
2119         ret = reject_firmware(&fw, fw_name, &oct->pci_dev->dev);
2120         if (ret) {
2121                 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
2122                         fw_name);
2123                 release_firmware(fw);
2124                 return ret;
2125         }
2126
2127         ret = octeon_download_firmware(oct, fw->data, fw->size);
2128
2129         release_firmware(fw);
2130
2131         return ret;
2132 }
2133
2134 /**
2135  * \brief Setup output queue
2136  * @param oct octeon device
2137  * @param q_no which queue
2138  * @param num_descs how many descriptors
2139  * @param desc_size size of each descriptor
2140  * @param app_ctx application context
2141  */
2142 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
2143                              int desc_size, void *app_ctx)
2144 {
2145         int ret_val = 0;
2146
2147         dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
2148         /* droq creation and local register settings. */
2149         ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
2150         if (ret_val < 0)
2151                 return ret_val;
2152
2153         if (ret_val == 1) {
2154                 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
2155                 return 0;
2156         }
2157         /* tasklet creation for the droq */
2158
2159         /* Enable the droq queues */
2160         octeon_set_droq_pkt_op(oct, q_no, 1);
2161
2162         /* Send Credit for Octeon Output queues. Credits are always
2163          * sent after the output queue is enabled.
2164          */
2165         writel(oct->droq[q_no]->max_count,
2166                oct->droq[q_no]->pkts_credit_reg);
2167
2168         return ret_val;
2169 }
2170
2171 /**
2172  * \brief Callback for getting interface configuration
2173  * @param status status of request
2174  * @param buf pointer to resp structure
2175  */
2176 static void if_cfg_callback(struct octeon_device *oct,
2177                             u32 status __attribute__((unused)),
2178                             void *buf)
2179 {
2180         struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
2181         struct liquidio_if_cfg_resp *resp;
2182         struct liquidio_if_cfg_context *ctx;
2183
2184         resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
2185         ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
2186
2187         oct = lio_get_device(ctx->octeon_id);
2188         if (resp->status)
2189                 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: %llx\n",
2190                         CVM_CAST64(resp->status));
2191         WRITE_ONCE(ctx->cond, 1);
2192
2193         snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
2194                  resp->cfg_info.liquidio_firmware_version);
2195
2196         /* This barrier is required to be sure that the response has been
2197          * written fully before waking up the handler
2198          */
2199         wmb();
2200
2201         wake_up_interruptible(&ctx->wc);
2202 }
2203
2204 /**
2205  * \brief Select queue based on hash
2206  * @param dev Net device
2207  * @param skb sk_buff structure
2208  * @returns selected queue number
2209  */
2210 static u16 select_q(struct net_device *dev, struct sk_buff *skb,
2211                     void *accel_priv __attribute__((unused)),
2212                     select_queue_fallback_t fallback __attribute__((unused)))
2213 {
2214         u32 qindex = 0;
2215         struct lio *lio;
2216
2217         lio = GET_LIO(dev);
2218         qindex = skb_tx_hash(dev, skb);
2219
2220         return (u16)(qindex % (lio->linfo.num_txpciq));
2221 }
2222
2223 /** Routine to push packets arriving on Octeon interface upto network layer.
2224  * @param oct_id   - octeon device id.
2225  * @param skbuff   - skbuff struct to be passed to network layer.
2226  * @param len      - size of total data received.
2227  * @param rh       - Control header associated with the packet
2228  * @param param    - additional control data with the packet
2229  * @param arg      - farg registered in droq_ops
2230  */
2231 static void
2232 liquidio_push_packet(u32 octeon_id __attribute__((unused)),
2233                      void *skbuff,
2234                      u32 len,
2235                      union octeon_rh *rh,
2236                      void *param,
2237                      void *arg)
2238 {
2239         struct napi_struct *napi = param;
2240         struct sk_buff *skb = (struct sk_buff *)skbuff;
2241         struct skb_shared_hwtstamps *shhwtstamps;
2242         u64 ns;
2243         u16 vtag = 0;
2244         struct net_device *netdev = (struct net_device *)arg;
2245         struct octeon_droq *droq = container_of(param, struct octeon_droq,
2246                                                 napi);
2247         if (netdev) {
2248                 int packet_was_received;
2249                 struct lio *lio = GET_LIO(netdev);
2250                 struct octeon_device *oct = lio->oct_dev;
2251
2252                 /* Do not proceed if the interface is not in RUNNING state. */
2253                 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
2254                         recv_buffer_free(skb);
2255                         droq->stats.rx_dropped++;
2256                         return;
2257                 }
2258
2259                 skb->dev = netdev;
2260
2261                 skb_record_rx_queue(skb, droq->q_no);
2262                 if (likely(len > MIN_SKB_SIZE)) {
2263                         struct octeon_skb_page_info *pg_info;
2264                         unsigned char *va;
2265
2266                         pg_info = ((struct octeon_skb_page_info *)(skb->cb));
2267                         if (pg_info->page) {
2268                                 /* For Paged allocation use the frags */
2269                                 va = page_address(pg_info->page) +
2270                                         pg_info->page_offset;
2271                                 memcpy(skb->data, va, MIN_SKB_SIZE);
2272                                 skb_put(skb, MIN_SKB_SIZE);
2273                                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
2274                                                 pg_info->page,
2275                                                 pg_info->page_offset +
2276                                                 MIN_SKB_SIZE,
2277                                                 len - MIN_SKB_SIZE,
2278                                                 LIO_RXBUFFER_SZ);
2279                         }
2280                 } else {
2281                         struct octeon_skb_page_info *pg_info =
2282                                 ((struct octeon_skb_page_info *)(skb->cb));
2283                         skb_copy_to_linear_data(skb, page_address(pg_info->page)
2284                                                 + pg_info->page_offset, len);
2285                         skb_put(skb, len);
2286                         put_page(pg_info->page);
2287                 }
2288
2289                 if (((oct->chip_id == OCTEON_CN66XX) ||
2290                      (oct->chip_id == OCTEON_CN68XX)) &&
2291                     ptp_enable) {
2292                         if (rh->r_dh.has_hwtstamp) {
2293                                 /* timestamp is included from the hardware at
2294                                  * the beginning of the packet.
2295                                  */
2296                                 if (ifstate_check
2297                                     (lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
2298                                         /* Nanoseconds are in the first 64-bits
2299                                          * of the packet.
2300                                          */
2301                                         memcpy(&ns, (skb->data), sizeof(ns));
2302                                         shhwtstamps = skb_hwtstamps(skb);
2303                                         shhwtstamps->hwtstamp =
2304                                                 ns_to_ktime(ns +
2305                                                             lio->ptp_adjust);
2306                                 }
2307                                 skb_pull(skb, sizeof(ns));
2308                         }
2309                 }
2310
2311                 skb->protocol = eth_type_trans(skb, skb->dev);
2312                 if ((netdev->features & NETIF_F_RXCSUM) &&
2313                     (((rh->r_dh.encap_on) &&
2314                       (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
2315                      (!(rh->r_dh.encap_on) &&
2316                       (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
2317                         /* checksum has already been verified */
2318                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2319                 else
2320                         skb->ip_summed = CHECKSUM_NONE;
2321
2322                 /* Setting Encapsulation field on basis of status received
2323                  * from the firmware
2324                  */
2325                 if (rh->r_dh.encap_on) {
2326                         skb->encapsulation = 1;
2327                         skb->csum_level = 1;
2328                         droq->stats.rx_vxlan++;
2329                 }
2330
2331                 /* inbound VLAN tag */
2332                 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
2333                     (rh->r_dh.vlan != 0)) {
2334                         u16 vid = rh->r_dh.vlan;
2335                         u16 priority = rh->r_dh.priority;
2336
2337                         vtag = priority << 13 | vid;
2338                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
2339                 }
2340
2341                 packet_was_received = napi_gro_receive(napi, skb) != GRO_DROP;
2342
2343                 if (packet_was_received) {
2344                         droq->stats.rx_bytes_received += len;
2345                         droq->stats.rx_pkts_received++;
2346                         netdev->last_rx = jiffies;
2347                 } else {
2348                         droq->stats.rx_dropped++;
2349                         netif_info(lio, rx_err, lio->netdev,
2350                                    "droq:%d  error rx_dropped:%llu\n",
2351                                    droq->q_no, droq->stats.rx_dropped);
2352                 }
2353
2354         } else {
2355                 recv_buffer_free(skb);
2356         }
2357 }
2358
2359 /**
2360  * \brief wrapper for calling napi_schedule
2361  * @param param parameters to pass to napi_schedule
2362  *
2363  * Used when scheduling on different CPUs
2364  */
2365 static void napi_schedule_wrapper(void *param)
2366 {
2367         struct napi_struct *napi = param;
2368
2369         napi_schedule(napi);
2370 }
2371
2372 /**
2373  * \brief callback when receive interrupt occurs and we are in NAPI mode
2374  * @param arg pointer to octeon output queue
2375  */
2376 static void liquidio_napi_drv_callback(void *arg)
2377 {
2378         struct octeon_device *oct;
2379         struct octeon_droq *droq = arg;
2380         int this_cpu = smp_processor_id();
2381
2382         oct = droq->oct_dev;
2383
2384         if (OCTEON_CN23XX_PF(oct) || droq->cpu_id == this_cpu) {
2385                 napi_schedule_irqoff(&droq->napi);
2386         } else {
2387                 struct call_single_data *csd = &droq->csd;
2388
2389                 csd->func = napi_schedule_wrapper;
2390                 csd->info = &droq->napi;
2391                 csd->flags = 0;
2392
2393                 smp_call_function_single_async(droq->cpu_id, csd);
2394         }
2395 }
2396
2397 /**
2398  * \brief Entry point for NAPI polling
2399  * @param napi NAPI structure
2400  * @param budget maximum number of items to process
2401  */
2402 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
2403 {
2404         struct octeon_droq *droq;
2405         int work_done;
2406         int tx_done = 0, iq_no;
2407         struct octeon_instr_queue *iq;
2408         struct octeon_device *oct;
2409
2410         droq = container_of(napi, struct octeon_droq, napi);
2411         oct = droq->oct_dev;
2412         iq_no = droq->q_no;
2413         /* Handle Droq descriptors */
2414         work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
2415                                                  POLL_EVENT_PROCESS_PKTS,
2416                                                  budget);
2417
2418         /* Flush the instruction queue */
2419         iq = oct->instr_queue[iq_no];
2420         if (iq) {
2421                 /* Process iq buffers with in the budget limits */
2422                 tx_done = octeon_flush_iq(oct, iq, 1, budget);
2423                 /* Update iq read-index rather than waiting for next interrupt.
2424                  * Return back if tx_done is false.
2425                  */
2426                 update_txq_status(oct, iq_no);
2427                 /*tx_done = (iq->flush_index == iq->octeon_read_index);*/
2428         } else {
2429                 dev_err(&oct->pci_dev->dev, "%s:  iq (%d) num invalid\n",
2430                         __func__, iq_no);
2431         }
2432
2433         if ((work_done < budget) && (tx_done)) {
2434                 napi_complete(napi);
2435                 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
2436                                              POLL_EVENT_ENABLE_INTR, 0);
2437                 return 0;
2438         }
2439
2440         return (!tx_done) ? (budget) : (work_done);
2441 }
2442
2443 /**
2444  * \brief Setup input and output queues
2445  * @param octeon_dev octeon device
2446  * @param ifidx  Interface Index
2447  *
2448  * Note: Queues are with respect to the octeon device. Thus
2449  * an input queue is for egress packets, and output queues
2450  * are for ingress packets.
2451  */
2452 static inline int setup_io_queues(struct octeon_device *octeon_dev,
2453                                   int ifidx)
2454 {
2455         struct octeon_droq_ops droq_ops;
2456         struct net_device *netdev;
2457         static int cpu_id;
2458         static int cpu_id_modulus;
2459         struct octeon_droq *droq;
2460         struct napi_struct *napi;
2461         int q, q_no, retval = 0;
2462         struct lio *lio;
2463         int num_tx_descs;
2464
2465         netdev = octeon_dev->props[ifidx].netdev;
2466
2467         lio = GET_LIO(netdev);
2468
2469         memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
2470
2471         droq_ops.fptr = liquidio_push_packet;
2472         droq_ops.farg = (void *)netdev;
2473
2474         droq_ops.poll_mode = 1;
2475         droq_ops.napi_fn = liquidio_napi_drv_callback;
2476         cpu_id = 0;
2477         cpu_id_modulus = num_present_cpus();
2478
2479         /* set up DROQs. */
2480         for (q = 0; q < lio->linfo.num_rxpciq; q++) {
2481                 q_no = lio->linfo.rxpciq[q].s.q_no;
2482                 dev_dbg(&octeon_dev->pci_dev->dev,
2483                         "setup_io_queues index:%d linfo.rxpciq.s.q_no:%d\n",
2484                         q, q_no);
2485                 retval = octeon_setup_droq(octeon_dev, q_no,
2486                                            CFG_GET_NUM_RX_DESCS_NIC_IF
2487                                                    (octeon_get_conf(octeon_dev),
2488                                                    lio->ifidx),
2489                                            CFG_GET_NUM_RX_BUF_SIZE_NIC_IF
2490                                                    (octeon_get_conf(octeon_dev),
2491                                                    lio->ifidx), NULL);
2492                 if (retval) {
2493                         dev_err(&octeon_dev->pci_dev->dev,
2494                                 "%s : Runtime DROQ(RxQ) creation failed.\n",
2495                                 __func__);
2496                         return 1;
2497                 }
2498
2499                 droq = octeon_dev->droq[q_no];
2500                 napi = &droq->napi;
2501                 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx pf_num:%d\n",
2502                         (u64)netdev, (u64)octeon_dev, octeon_dev->pf_num);
2503                 netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
2504
2505                 /* designate a CPU for this droq */
2506                 droq->cpu_id = cpu_id;
2507                 cpu_id++;
2508                 if (cpu_id >= cpu_id_modulus)
2509                         cpu_id = 0;
2510
2511                 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
2512         }
2513
2514         if (OCTEON_CN23XX_PF(octeon_dev)) {
2515                 /* 23XX PF can receive control messages (via the first PF-owned
2516                  * droq) from the firmware even if the ethX interface is down,
2517                  * so that's why poll_mode must be off for the first droq.
2518                  */
2519                 octeon_dev->droq[0]->ops.poll_mode = 0;
2520         }
2521
2522         /* set up IQs. */
2523         for (q = 0; q < lio->linfo.num_txpciq; q++) {
2524                 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(octeon_get_conf
2525                                                            (octeon_dev),
2526                                                            lio->ifidx);
2527                 retval = octeon_setup_iq(octeon_dev, ifidx, q,
2528                                          lio->linfo.txpciq[q], num_tx_descs,
2529                                          netdev_get_tx_queue(netdev, q));
2530                 if (retval) {
2531                         dev_err(&octeon_dev->pci_dev->dev,
2532                                 " %s : Runtime IQ(TxQ) creation failed.\n",
2533                                 __func__);
2534                         return 1;
2535                 }
2536         }
2537
2538         return 0;
2539 }
2540
2541 /**
2542  * \brief Poll routine for checking transmit queue status
2543  * @param work work_struct data structure
2544  */
2545 static void octnet_poll_check_txq_status(struct work_struct *work)
2546 {
2547         struct cavium_wk *wk = (struct cavium_wk *)work;
2548         struct lio *lio = (struct lio *)wk->ctxptr;
2549
2550         if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
2551                 return;
2552
2553         check_txq_status(lio);
2554         queue_delayed_work(lio->txq_status_wq.wq,
2555                            &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2556 }
2557
2558 /**
2559  * \brief Sets up the txq poll check
2560  * @param netdev network device
2561  */
2562 static inline int setup_tx_poll_fn(struct net_device *netdev)
2563 {
2564         struct lio *lio = GET_LIO(netdev);
2565         struct octeon_device *oct = lio->oct_dev;
2566
2567         lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2568                                                 WQ_MEM_RECLAIM, 0);
2569         if (!lio->txq_status_wq.wq) {
2570                 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
2571                 return -1;
2572         }
2573         INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
2574                           octnet_poll_check_txq_status);
2575         lio->txq_status_wq.wk.ctxptr = lio;
2576         queue_delayed_work(lio->txq_status_wq.wq,
2577                            &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2578         return 0;
2579 }
2580
2581 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2582 {
2583         struct lio *lio = GET_LIO(netdev);
2584
2585         if (lio->txq_status_wq.wq) {
2586                 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2587                 destroy_workqueue(lio->txq_status_wq.wq);
2588         }
2589 }
2590
2591 /**
2592  * \brief Net device open for LiquidIO
2593  * @param netdev network device
2594  */
2595 static int liquidio_open(struct net_device *netdev)
2596 {
2597         struct lio *lio = GET_LIO(netdev);
2598         struct octeon_device *oct = lio->oct_dev;
2599         struct napi_struct *napi, *n;
2600
2601         if (oct->props[lio->ifidx].napi_enabled == 0) {
2602                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2603                         napi_enable(napi);
2604
2605                 oct->props[lio->ifidx].napi_enabled = 1;
2606
2607                 if (OCTEON_CN23XX_PF(oct))
2608                         oct->droq[0]->ops.poll_mode = 1;
2609         }
2610
2611         oct_ptp_open(netdev);
2612
2613         ifstate_set(lio, LIO_IFSTATE_RUNNING);
2614
2615         /* Ready for link status updates */
2616         lio->intf_open = 1;
2617
2618         netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2619
2620         if (OCTEON_CN23XX_PF(oct)) {
2621                 if (!oct->msix_on)
2622                         if (setup_tx_poll_fn(netdev))
2623                                 return -1;
2624         } else {
2625                 if (setup_tx_poll_fn(netdev))
2626                         return -1;
2627         }
2628
2629         start_txq(netdev);
2630
2631         /* tell Octeon to start forwarding packets to host */
2632         send_rx_ctrl_cmd(lio, 1);
2633
2634         dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
2635                  netdev->name);
2636
2637         return 0;
2638 }
2639
2640 /**
2641  * \brief Net device stop for LiquidIO
2642  * @param netdev network device
2643  */
2644 static int liquidio_stop(struct net_device *netdev)
2645 {
2646         struct lio *lio = GET_LIO(netdev);
2647         struct octeon_device *oct = lio->oct_dev;
2648
2649         ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2650
2651         netif_tx_disable(netdev);
2652
2653         /* Inform that netif carrier is down */
2654         netif_carrier_off(netdev);
2655         lio->intf_open = 0;
2656         lio->linfo.link.s.link_up = 0;
2657         lio->link_changes++;
2658
2659         /* Pause for a moment and wait for Octeon to flush out (to the wire) any
2660          * egress packets that are in-flight.
2661          */
2662         set_current_state(TASK_INTERRUPTIBLE);
2663         schedule_timeout(msecs_to_jiffies(100));
2664
2665         /* Now it should be safe to tell Octeon that nic interface is down. */
2666         send_rx_ctrl_cmd(lio, 0);
2667
2668         if (OCTEON_CN23XX_PF(oct)) {
2669                 if (!oct->msix_on)
2670                         cleanup_tx_poll_fn(netdev);
2671         } else {
2672                 cleanup_tx_poll_fn(netdev);
2673         }
2674
2675         if (lio->ptp_clock) {
2676                 ptp_clock_unregister(lio->ptp_clock);
2677                 lio->ptp_clock = NULL;
2678         }
2679
2680         dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
2681
2682         return 0;
2683 }
2684
2685 /**
2686  * \brief Converts a mask based on net device flags
2687  * @param netdev network device
2688  *
2689  * This routine generates a octnet_ifflags mask from the net device flags
2690  * received from the OS.
2691  */
2692 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
2693 {
2694         enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
2695
2696         if (netdev->flags & IFF_PROMISC)
2697                 f |= OCTNET_IFFLAG_PROMISC;
2698
2699         if (netdev->flags & IFF_ALLMULTI)
2700                 f |= OCTNET_IFFLAG_ALLMULTI;
2701
2702         if (netdev->flags & IFF_MULTICAST) {
2703                 f |= OCTNET_IFFLAG_MULTICAST;
2704
2705                 /* Accept all multicast addresses if there are more than we
2706                  * can handle
2707                  */
2708                 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
2709                         f |= OCTNET_IFFLAG_ALLMULTI;
2710         }
2711
2712         if (netdev->flags & IFF_BROADCAST)
2713                 f |= OCTNET_IFFLAG_BROADCAST;
2714
2715         return f;
2716 }
2717
2718 /**
2719  * \brief Net device set_multicast_list
2720  * @param netdev network device
2721  */
2722 static void liquidio_set_mcast_list(struct net_device *netdev)
2723 {
2724         struct lio *lio = GET_LIO(netdev);
2725         struct octeon_device *oct = lio->oct_dev;
2726         struct octnic_ctrl_pkt nctrl;
2727         struct netdev_hw_addr *ha;
2728         u64 *mc;
2729         int ret;
2730         int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2731
2732         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2733
2734         /* Create a ctrl pkt command to be sent to core app. */
2735         nctrl.ncmd.u64 = 0;
2736         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
2737         nctrl.ncmd.s.param1 = get_new_flags(netdev);
2738         nctrl.ncmd.s.param2 = mc_count;
2739         nctrl.ncmd.s.more = mc_count;
2740         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2741         nctrl.netpndev = (u64)netdev;
2742         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2743
2744         /* copy all the addresses into the udd */
2745         mc = &nctrl.udd[0];
2746         netdev_for_each_mc_addr(ha, netdev) {
2747                 *mc = 0;
2748                 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2749                 /* no need to swap bytes */
2750
2751                 if (++mc > &nctrl.udd[mc_count])
2752                         break;
2753         }
2754
2755         /* Apparently, any activity in this call from the kernel has to
2756          * be atomic. So we won't wait for response.
2757          */
2758         nctrl.wait_time = 0;
2759
2760         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2761         if (ret < 0) {
2762                 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2763                         ret);
2764         }
2765 }
2766
2767 /**
2768  * \brief Net device set_mac_address
2769  * @param netdev network device
2770  */
2771 static int liquidio_set_mac(struct net_device *netdev, void *p)
2772 {
2773         int ret = 0;
2774         struct lio *lio = GET_LIO(netdev);
2775         struct octeon_device *oct = lio->oct_dev;
2776         struct sockaddr *addr = (struct sockaddr *)p;
2777         struct octnic_ctrl_pkt nctrl;
2778
2779         if (!is_valid_ether_addr(addr->sa_data))
2780                 return -EADDRNOTAVAIL;
2781
2782         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2783
2784         nctrl.ncmd.u64 = 0;
2785         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2786         nctrl.ncmd.s.param1 = 0;
2787         nctrl.ncmd.s.more = 1;
2788         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2789         nctrl.netpndev = (u64)netdev;
2790         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2791         nctrl.wait_time = 100;
2792
2793         nctrl.udd[0] = 0;
2794         /* The MAC Address is presented in network byte order. */
2795         memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2796
2797         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2798         if (ret < 0) {
2799                 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2800                 return -ENOMEM;
2801         }
2802         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2803         memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2804
2805         return 0;
2806 }
2807
2808 /**
2809  * \brief Net device get_stats
2810  * @param netdev network device
2811  */
2812 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2813 {
2814         struct lio *lio = GET_LIO(netdev);
2815         struct net_device_stats *stats = &netdev->stats;
2816         struct octeon_device *oct;
2817         u64 pkts = 0, drop = 0, bytes = 0;
2818         struct oct_droq_stats *oq_stats;
2819         struct oct_iq_stats *iq_stats;
2820         int i, iq_no, oq_no;
2821
2822         oct = lio->oct_dev;
2823
2824         for (i = 0; i < lio->linfo.num_txpciq; i++) {
2825                 iq_no = lio->linfo.txpciq[i].s.q_no;
2826                 iq_stats = &oct->instr_queue[iq_no]->stats;
2827                 pkts += iq_stats->tx_done;
2828                 drop += iq_stats->tx_dropped;
2829                 bytes += iq_stats->tx_tot_bytes;
2830         }
2831
2832         stats->tx_packets = pkts;
2833         stats->tx_bytes = bytes;
2834         stats->tx_dropped = drop;
2835
2836         pkts = 0;
2837         drop = 0;
2838         bytes = 0;
2839
2840         for (i = 0; i < lio->linfo.num_rxpciq; i++) {
2841                 oq_no = lio->linfo.rxpciq[i].s.q_no;
2842                 oq_stats = &oct->droq[oq_no]->stats;
2843                 pkts += oq_stats->rx_pkts_received;
2844                 drop += (oq_stats->rx_dropped +
2845                          oq_stats->dropped_nodispatch +
2846                          oq_stats->dropped_toomany +
2847                          oq_stats->dropped_nomem);
2848                 bytes += oq_stats->rx_bytes_received;
2849         }
2850
2851         stats->rx_bytes = bytes;
2852         stats->rx_packets = pkts;
2853         stats->rx_dropped = drop;
2854
2855         return stats;
2856 }
2857
2858 /**
2859  * \brief Net device change_mtu
2860  * @param netdev network device
2861  */
2862 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2863 {
2864         struct lio *lio = GET_LIO(netdev);
2865         struct octeon_device *oct = lio->oct_dev;
2866         struct octnic_ctrl_pkt nctrl;
2867         int ret = 0;
2868
2869         /* Limit the MTU to make sure the ethernet packets are between 68 bytes
2870          * and 16000 bytes
2871          */
2872         if ((new_mtu < LIO_MIN_MTU_SIZE) ||
2873             (new_mtu > LIO_MAX_MTU_SIZE)) {
2874                 dev_err(&oct->pci_dev->dev, "Invalid MTU: %d\n", new_mtu);
2875                 dev_err(&oct->pci_dev->dev, "Valid range %d and %d\n",
2876                         LIO_MIN_MTU_SIZE, LIO_MAX_MTU_SIZE);
2877                 return -EINVAL;
2878         }
2879
2880         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2881
2882         nctrl.ncmd.u64 = 0;
2883         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
2884         nctrl.ncmd.s.param1 = new_mtu;
2885         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2886         nctrl.wait_time = 100;
2887         nctrl.netpndev = (u64)netdev;
2888         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2889
2890         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2891         if (ret < 0) {
2892                 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
2893                 return -1;
2894         }
2895
2896         lio->mtu = new_mtu;
2897
2898         return 0;
2899 }
2900
2901 /**
2902  * \brief Handler for SIOCSHWTSTAMP ioctl
2903  * @param netdev network device
2904  * @param ifr interface request
2905  * @param cmd command
2906  */
2907 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2908 {
2909         struct hwtstamp_config conf;
2910         struct lio *lio = GET_LIO(netdev);
2911
2912         if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2913                 return -EFAULT;
2914
2915         if (conf.flags)
2916                 return -EINVAL;
2917
2918         switch (conf.tx_type) {
2919         case HWTSTAMP_TX_ON:
2920         case HWTSTAMP_TX_OFF:
2921                 break;
2922         default:
2923                 return -ERANGE;
2924         }
2925
2926         switch (conf.rx_filter) {
2927         case HWTSTAMP_FILTER_NONE:
2928                 break;
2929         case HWTSTAMP_FILTER_ALL:
2930         case HWTSTAMP_FILTER_SOME:
2931         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2932         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2933         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2934         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2935         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2936         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2937         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2938         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2939         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2940         case HWTSTAMP_FILTER_PTP_V2_EVENT:
2941         case HWTSTAMP_FILTER_PTP_V2_SYNC:
2942         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2943                 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2944                 break;
2945         default:
2946                 return -ERANGE;
2947         }
2948
2949         if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2950                 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2951
2952         else
2953                 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2954
2955         return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2956 }
2957
2958 /**
2959  * \brief ioctl handler
2960  * @param netdev network device
2961  * @param ifr interface request
2962  * @param cmd command
2963  */
2964 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2965 {
2966         switch (cmd) {
2967         case SIOCSHWTSTAMP:
2968                 return hwtstamp_ioctl(netdev, ifr);
2969         default:
2970                 return -EOPNOTSUPP;
2971         }
2972 }
2973
2974 /**
2975  * \brief handle a Tx timestamp response
2976  * @param status response status
2977  * @param buf pointer to skb
2978  */
2979 static void handle_timestamp(struct octeon_device *oct,
2980                              u32 status,
2981                              void *buf)
2982 {
2983         struct octnet_buf_free_info *finfo;
2984         struct octeon_soft_command *sc;
2985         struct oct_timestamp_resp *resp;
2986         struct lio *lio;
2987         struct sk_buff *skb = (struct sk_buff *)buf;
2988
2989         finfo = (struct octnet_buf_free_info *)skb->cb;
2990         lio = finfo->lio;
2991         sc = finfo->sc;
2992         oct = lio->oct_dev;
2993         resp = (struct oct_timestamp_resp *)sc->virtrptr;
2994
2995         if (status != OCTEON_REQUEST_DONE) {
2996                 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2997                         CVM_CAST64(status));
2998                 resp->timestamp = 0;
2999         }
3000
3001         octeon_swap_8B_data(&resp->timestamp, 1);
3002
3003         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
3004                 struct skb_shared_hwtstamps ts;
3005                 u64 ns = resp->timestamp;
3006
3007                 netif_info(lio, tx_done, lio->netdev,
3008                            "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
3009                            skb, (unsigned long long)ns);
3010                 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
3011                 skb_tstamp_tx(skb, &ts);
3012         }
3013
3014         octeon_free_soft_command(oct, sc);
3015         tx_buffer_free(skb);
3016 }
3017
3018 /* \brief Send a data packet that will be timestamped
3019  * @param oct octeon device
3020  * @param ndata pointer to network data
3021  * @param finfo pointer to private network data
3022  */
3023 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
3024                                          struct octnic_data_pkt *ndata,
3025                                          struct octnet_buf_free_info *finfo)
3026 {
3027         int retval;
3028         struct octeon_soft_command *sc;
3029         struct lio *lio;
3030         int ring_doorbell;
3031         u32 len;
3032
3033         lio = finfo->lio;
3034
3035         sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
3036                                             sizeof(struct oct_timestamp_resp));
3037         finfo->sc = sc;
3038
3039         if (!sc) {
3040                 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
3041                 return IQ_SEND_FAILED;
3042         }
3043
3044         if (ndata->reqtype == REQTYPE_NORESP_NET)
3045                 ndata->reqtype = REQTYPE_RESP_NET;
3046         else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
3047                 ndata->reqtype = REQTYPE_RESP_NET_SG;
3048
3049         sc->callback = handle_timestamp;
3050         sc->callback_arg = finfo->skb;
3051         sc->iq_no = ndata->q_no;
3052
3053         if (OCTEON_CN23XX_PF(oct))
3054                 len = (u32)((struct octeon_instr_ih3 *)
3055                             (&sc->cmd.cmd3.ih3))->dlengsz;
3056         else
3057                 len = (u32)((struct octeon_instr_ih2 *)
3058                             (&sc->cmd.cmd2.ih2))->dlengsz;
3059
3060         ring_doorbell = 1;
3061
3062         retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
3063                                      sc, len, ndata->reqtype);
3064
3065         if (retval == IQ_SEND_FAILED) {
3066                 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
3067                         retval);
3068                 octeon_free_soft_command(oct, sc);
3069         } else {
3070                 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
3071         }
3072
3073         return retval;
3074 }
3075
3076 /** \brief Transmit networks packets to the Octeon interface
3077  * @param skbuff   skbuff struct to be passed to network layer.
3078  * @param netdev    pointer to network device
3079  * @returns whether the packet was transmitted to the device okay or not
3080  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
3081  */
3082 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
3083 {
3084         struct lio *lio;
3085         struct octnet_buf_free_info *finfo;
3086         union octnic_cmd_setup cmdsetup;
3087         struct octnic_data_pkt ndata;
3088         struct octeon_device *oct;
3089         struct oct_iq_stats *stats;
3090         struct octeon_instr_irh *irh;
3091         union tx_info *tx_info;
3092         int status = 0;
3093         int q_idx = 0, iq_no = 0;
3094         int j;
3095         u64 dptr = 0;
3096         u32 tag = 0;
3097
3098         lio = GET_LIO(netdev);
3099         oct = lio->oct_dev;
3100
3101         if (netif_is_multiqueue(netdev)) {
3102                 q_idx = skb->queue_mapping;
3103                 q_idx = (q_idx % (lio->linfo.num_txpciq));
3104                 tag = q_idx;
3105                 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
3106         } else {
3107                 iq_no = lio->txq;
3108         }
3109
3110         stats = &oct->instr_queue[iq_no]->stats;
3111
3112         /* Check for all conditions in which the current packet cannot be
3113          * transmitted.
3114          */
3115         if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
3116             (!lio->linfo.link.s.link_up) ||
3117             (skb->len <= 0)) {
3118                 netif_info(lio, tx_err, lio->netdev,
3119                            "Transmit failed link_status : %d\n",
3120                            lio->linfo.link.s.link_up);
3121                 goto lio_xmit_failed;
3122         }
3123
3124         /* Use space in skb->cb to store info used to unmap and
3125          * free the buffers.
3126          */
3127         finfo = (struct octnet_buf_free_info *)skb->cb;
3128         finfo->lio = lio;
3129         finfo->skb = skb;
3130         finfo->sc = NULL;
3131
3132         /* Prepare the attributes for the data to be passed to OSI. */
3133         memset(&ndata, 0, sizeof(struct octnic_data_pkt));
3134
3135         ndata.buf = (void *)finfo;
3136
3137         ndata.q_no = iq_no;
3138
3139         if (netif_is_multiqueue(netdev)) {
3140                 if (octnet_iq_is_full(oct, ndata.q_no)) {
3141                         /* defer sending if queue is full */
3142                         netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
3143                                    ndata.q_no);
3144                         stats->tx_iq_busy++;
3145                         return NETDEV_TX_BUSY;
3146                 }
3147         } else {
3148                 if (octnet_iq_is_full(oct, lio->txq)) {
3149                         /* defer sending if queue is full */
3150                         stats->tx_iq_busy++;
3151                         netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
3152                                    lio->txq);
3153                         return NETDEV_TX_BUSY;
3154                 }
3155         }
3156         /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
3157          *      lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
3158          */
3159
3160         ndata.datasize = skb->len;
3161
3162         cmdsetup.u64 = 0;
3163         cmdsetup.s.iq_no = iq_no;
3164
3165         if (skb->ip_summed == CHECKSUM_PARTIAL) {
3166                 if (skb->encapsulation) {
3167                         cmdsetup.s.tnl_csum = 1;
3168                         stats->tx_vxlan++;
3169                 } else {
3170                         cmdsetup.s.transport_csum = 1;
3171                 }
3172         }
3173         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
3174                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
3175                 cmdsetup.s.timestamp = 1;
3176         }
3177
3178         if (skb_shinfo(skb)->nr_frags == 0) {
3179                 cmdsetup.s.u.datasize = skb->len;
3180                 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
3181
3182                 /* Offload checksum calculation for TCP/UDP packets */
3183                 dptr = dma_map_single(&oct->pci_dev->dev,
3184                                       skb->data,
3185                                       skb->len,
3186                                       DMA_TO_DEVICE);
3187                 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
3188                         dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
3189                                 __func__);
3190                         return NETDEV_TX_BUSY;
3191                 }
3192
3193                 if (OCTEON_CN23XX_PF(oct))
3194                         ndata.cmd.cmd3.dptr = dptr;
3195                 else
3196                         ndata.cmd.cmd2.dptr = dptr;
3197                 finfo->dptr = dptr;
3198                 ndata.reqtype = REQTYPE_NORESP_NET;
3199
3200         } else {
3201                 int i, frags;
3202                 struct skb_frag_struct *frag;
3203                 struct octnic_gather *g;
3204
3205                 spin_lock(&lio->glist_lock[q_idx]);
3206                 g = (struct octnic_gather *)
3207                         list_delete_head(&lio->glist[q_idx]);
3208                 spin_unlock(&lio->glist_lock[q_idx]);
3209
3210                 if (!g) {
3211                         netif_info(lio, tx_err, lio->netdev,
3212                                    "Transmit scatter gather: glist null!\n");
3213                         goto lio_xmit_failed;
3214                 }
3215
3216                 cmdsetup.s.gather = 1;
3217                 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
3218                 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
3219
3220                 memset(g->sg, 0, g->sg_size);
3221
3222                 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
3223                                                  skb->data,
3224                                                  (skb->len - skb->data_len),
3225                                                  DMA_TO_DEVICE);
3226                 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
3227                         dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
3228                                 __func__);
3229                         return NETDEV_TX_BUSY;
3230                 }
3231                 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
3232
3233                 frags = skb_shinfo(skb)->nr_frags;
3234                 i = 1;
3235                 while (frags--) {
3236                         frag = &skb_shinfo(skb)->frags[i - 1];
3237
3238                         g->sg[(i >> 2)].ptr[(i & 3)] =
3239                                 dma_map_page(&oct->pci_dev->dev,
3240                                              frag->page.p,
3241                                              frag->page_offset,
3242                                              frag->size,
3243                                              DMA_TO_DEVICE);
3244
3245                         if (dma_mapping_error(&oct->pci_dev->dev,
3246                                               g->sg[i >> 2].ptr[i & 3])) {
3247                                 dma_unmap_single(&oct->pci_dev->dev,
3248                                                  g->sg[0].ptr[0],
3249                                                  skb->len - skb->data_len,
3250                                                  DMA_TO_DEVICE);
3251                                 for (j = 1; j < i; j++) {
3252                                         frag = &skb_shinfo(skb)->frags[j - 1];
3253                                         dma_unmap_page(&oct->pci_dev->dev,
3254                                                        g->sg[j >> 2].ptr[j & 3],
3255                                                        frag->size,
3256                                                        DMA_TO_DEVICE);
3257                                 }
3258                                 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
3259                                         __func__);
3260                                 return NETDEV_TX_BUSY;
3261                         }
3262
3263                         add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
3264                         i++;
3265                 }
3266
3267                 dma_sync_single_for_device(&oct->pci_dev->dev, g->sg_dma_ptr,
3268                                            g->sg_size, DMA_TO_DEVICE);
3269                 dptr = g->sg_dma_ptr;
3270
3271                 if (OCTEON_CN23XX_PF(oct))
3272                         ndata.cmd.cmd3.dptr = dptr;
3273                 else
3274                         ndata.cmd.cmd2.dptr = dptr;
3275                 finfo->dptr = dptr;
3276                 finfo->g = g;
3277
3278                 ndata.reqtype = REQTYPE_NORESP_NET_SG;
3279         }
3280
3281         if (OCTEON_CN23XX_PF(oct)) {
3282                 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
3283                 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
3284         } else {
3285                 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
3286                 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
3287         }
3288
3289         if (skb_shinfo(skb)->gso_size) {
3290                 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
3291                 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
3292                 stats->tx_gso++;
3293         }
3294
3295         /* HW insert VLAN tag */
3296         if (skb_vlan_tag_present(skb)) {
3297                 irh->priority = skb_vlan_tag_get(skb) >> 13;
3298                 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
3299         }
3300
3301         if (unlikely(cmdsetup.s.timestamp))
3302                 status = send_nic_timestamp_pkt(oct, &ndata, finfo);
3303         else
3304                 status = octnet_send_nic_data_pkt(oct, &ndata);
3305         if (status == IQ_SEND_FAILED)
3306                 goto lio_xmit_failed;
3307
3308         netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
3309
3310         if (status == IQ_SEND_STOP)
3311                 stop_q(lio->netdev, q_idx);
3312
3313         netif_trans_update(netdev);
3314
3315         if (skb_shinfo(skb)->gso_size)
3316                 stats->tx_done += skb_shinfo(skb)->gso_segs;
3317         else
3318                 stats->tx_done++;
3319         stats->tx_tot_bytes += skb->len;
3320
3321         return NETDEV_TX_OK;
3322
3323 lio_xmit_failed:
3324         stats->tx_dropped++;
3325         netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
3326                    iq_no, stats->tx_dropped);
3327         if (dptr)
3328                 dma_unmap_single(&oct->pci_dev->dev, dptr,
3329                                  ndata.datasize, DMA_TO_DEVICE);
3330         tx_buffer_free(skb);
3331         return NETDEV_TX_OK;
3332 }
3333
3334 /** \brief Network device Tx timeout
3335  * @param netdev    pointer to network device
3336  */
3337 static void liquidio_tx_timeout(struct net_device *netdev)
3338 {
3339         struct lio *lio;
3340
3341         lio = GET_LIO(netdev);
3342
3343         netif_info(lio, tx_err, lio->netdev,
3344                    "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
3345                    netdev->stats.tx_dropped);
3346         netif_trans_update(netdev);
3347         txqs_wake(netdev);
3348 }
3349
3350 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
3351                                     __be16 proto __attribute__((unused)),
3352                                     u16 vid)
3353 {
3354         struct lio *lio = GET_LIO(netdev);
3355         struct octeon_device *oct = lio->oct_dev;
3356         struct octnic_ctrl_pkt nctrl;
3357         int ret = 0;
3358
3359         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3360
3361         nctrl.ncmd.u64 = 0;
3362         nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3363         nctrl.ncmd.s.param1 = vid;
3364         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3365         nctrl.wait_time = 100;
3366         nctrl.netpndev = (u64)netdev;
3367         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3368
3369         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3370         if (ret < 0) {
3371                 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3372                         ret);
3373         }
3374
3375         return ret;
3376 }
3377
3378 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
3379                                      __be16 proto __attribute__((unused)),
3380                                      u16 vid)
3381 {
3382         struct lio *lio = GET_LIO(netdev);
3383         struct octeon_device *oct = lio->oct_dev;
3384         struct octnic_ctrl_pkt nctrl;
3385         int ret = 0;
3386
3387         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3388
3389         nctrl.ncmd.u64 = 0;
3390         nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3391         nctrl.ncmd.s.param1 = vid;
3392         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3393         nctrl.wait_time = 100;
3394         nctrl.netpndev = (u64)netdev;
3395         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3396
3397         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3398         if (ret < 0) {
3399                 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
3400                         ret);
3401         }
3402         return ret;
3403 }
3404
3405 /** Sending command to enable/disable RX checksum offload
3406  * @param netdev                pointer to network device
3407  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
3408  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
3409  *                              OCTNET_CMD_RXCSUM_DISABLE
3410  * @returns                     SUCCESS or FAILURE
3411  */
3412 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
3413                                        u8 rx_cmd)
3414 {
3415         struct lio *lio = GET_LIO(netdev);
3416         struct octeon_device *oct = lio->oct_dev;
3417         struct octnic_ctrl_pkt nctrl;
3418         int ret = 0;
3419
3420         nctrl.ncmd.u64 = 0;
3421         nctrl.ncmd.s.cmd = command;
3422         nctrl.ncmd.s.param1 = rx_cmd;
3423         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3424         nctrl.wait_time = 100;
3425         nctrl.netpndev = (u64)netdev;
3426         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3427
3428         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3429         if (ret < 0) {
3430                 dev_err(&oct->pci_dev->dev,
3431                         "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
3432                         ret);
3433         }
3434         return ret;
3435 }
3436
3437 /** Sending command to add/delete VxLAN UDP port to firmware
3438  * @param netdev                pointer to network device
3439  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
3440  * @param vxlan_port            VxLAN port to be added or deleted
3441  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
3442  *                              OCTNET_CMD_VXLAN_PORT_DEL
3443  * @returns                     SUCCESS or FAILURE
3444  */
3445 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
3446                                        u16 vxlan_port, u8 vxlan_cmd_bit)
3447 {
3448         struct lio *lio = GET_LIO(netdev);
3449         struct octeon_device *oct = lio->oct_dev;
3450         struct octnic_ctrl_pkt nctrl;
3451         int ret = 0;
3452
3453         nctrl.ncmd.u64 = 0;
3454         nctrl.ncmd.s.cmd = command;
3455         nctrl.ncmd.s.more = vxlan_cmd_bit;
3456         nctrl.ncmd.s.param1 = vxlan_port;
3457         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3458         nctrl.wait_time = 100;
3459         nctrl.netpndev = (u64)netdev;
3460         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3461
3462         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3463         if (ret < 0) {
3464                 dev_err(&oct->pci_dev->dev,
3465                         "VxLAN port add/delete failed in core (ret:0x%x)\n",
3466                         ret);
3467         }
3468         return ret;
3469 }
3470
3471 /** \brief Net device fix features
3472  * @param netdev  pointer to network device
3473  * @param request features requested
3474  * @returns updated features list
3475  */
3476 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
3477                                                netdev_features_t request)
3478 {
3479         struct lio *lio = netdev_priv(netdev);
3480
3481         if ((request & NETIF_F_RXCSUM) &&
3482             !(lio->dev_capability & NETIF_F_RXCSUM))
3483                 request &= ~NETIF_F_RXCSUM;
3484
3485         if ((request & NETIF_F_HW_CSUM) &&
3486             !(lio->dev_capability & NETIF_F_HW_CSUM))
3487                 request &= ~NETIF_F_HW_CSUM;
3488
3489         if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
3490                 request &= ~NETIF_F_TSO;
3491
3492         if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
3493                 request &= ~NETIF_F_TSO6;
3494
3495         if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
3496                 request &= ~NETIF_F_LRO;
3497
3498         /*Disable LRO if RXCSUM is off */
3499         if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
3500             (lio->dev_capability & NETIF_F_LRO))
3501                 request &= ~NETIF_F_LRO;
3502
3503         return request;
3504 }
3505
3506 /** \brief Net device set features
3507  * @param netdev  pointer to network device
3508  * @param features features to enable/disable
3509  */
3510 static int liquidio_set_features(struct net_device *netdev,
3511                                  netdev_features_t features)
3512 {
3513         struct lio *lio = netdev_priv(netdev);
3514
3515         if (!((netdev->features ^ features) & NETIF_F_LRO))
3516                 return 0;
3517
3518         if ((features & NETIF_F_LRO) && (lio->dev_capability & NETIF_F_LRO))
3519                 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3520                                      OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3521         else if (!(features & NETIF_F_LRO) &&
3522                  (lio->dev_capability & NETIF_F_LRO))
3523                 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
3524                                      OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3525
3526         /* Sending command to firmware to enable/disable RX checksum
3527          * offload settings using ethtool
3528          */
3529         if (!(netdev->features & NETIF_F_RXCSUM) &&
3530             (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3531             (features & NETIF_F_RXCSUM))
3532                 liquidio_set_rxcsum_command(netdev,
3533                                             OCTNET_CMD_TNL_RX_CSUM_CTL,
3534                                             OCTNET_CMD_RXCSUM_ENABLE);
3535         else if ((netdev->features & NETIF_F_RXCSUM) &&
3536                  (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3537                  !(features & NETIF_F_RXCSUM))
3538                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3539                                             OCTNET_CMD_RXCSUM_DISABLE);
3540
3541         return 0;
3542 }
3543
3544 static void liquidio_add_vxlan_port(struct net_device *netdev,
3545                                     struct udp_tunnel_info *ti)
3546 {
3547         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3548                 return;
3549
3550         liquidio_vxlan_port_command(netdev,
3551                                     OCTNET_CMD_VXLAN_PORT_CONFIG,
3552                                     htons(ti->port),
3553                                     OCTNET_CMD_VXLAN_PORT_ADD);
3554 }
3555
3556 static void liquidio_del_vxlan_port(struct net_device *netdev,
3557                                     struct udp_tunnel_info *ti)
3558 {
3559         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3560                 return;
3561
3562         liquidio_vxlan_port_command(netdev,
3563                                     OCTNET_CMD_VXLAN_PORT_CONFIG,
3564                                     htons(ti->port),
3565                                     OCTNET_CMD_VXLAN_PORT_DEL);
3566 }
3567
3568 static struct net_device_ops lionetdevops = {
3569         .ndo_open               = liquidio_open,
3570         .ndo_stop               = liquidio_stop,
3571         .ndo_start_xmit         = liquidio_xmit,
3572         .ndo_get_stats          = liquidio_get_stats,
3573         .ndo_set_mac_address    = liquidio_set_mac,
3574         .ndo_set_rx_mode        = liquidio_set_mcast_list,
3575         .ndo_tx_timeout         = liquidio_tx_timeout,
3576
3577         .ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3578         .ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3579         .ndo_change_mtu         = liquidio_change_mtu,
3580         .ndo_do_ioctl           = liquidio_ioctl,
3581         .ndo_fix_features       = liquidio_fix_features,
3582         .ndo_set_features       = liquidio_set_features,
3583         .ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
3584         .ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
3585 };
3586
3587 /** \brief Entry point for the liquidio module
3588  */
3589 static int __init liquidio_init(void)
3590 {
3591         int i;
3592         struct handshake *hs;
3593
3594         init_completion(&first_stage);
3595
3596         octeon_init_device_list(conf_type);
3597
3598         if (liquidio_init_pci())
3599                 return -EINVAL;
3600
3601         wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3602
3603         for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3604                 hs = &handshake[i];
3605                 if (hs->pci_dev) {
3606                         wait_for_completion(&hs->init);
3607                         if (!hs->init_ok) {
3608                                 /* init handshake failed */
3609                                 dev_err(&hs->pci_dev->dev,
3610                                         "Failed to init device\n");
3611                                 liquidio_deinit_pci();
3612                                 return -EIO;
3613                         }
3614                 }
3615         }
3616
3617         for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3618                 hs = &handshake[i];
3619                 if (hs->pci_dev) {
3620                         wait_for_completion_timeout(&hs->started,
3621                                                     msecs_to_jiffies(30000));
3622                         if (!hs->started_ok) {
3623                                 /* starter handshake failed */
3624                                 dev_err(&hs->pci_dev->dev,
3625                                         "Firmware failed to start\n");
3626                                 liquidio_deinit_pci();
3627                                 return -EIO;
3628                         }
3629                 }
3630         }
3631
3632         return 0;
3633 }
3634
3635 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3636 {
3637         struct octeon_device *oct = (struct octeon_device *)buf;
3638         struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3639         int gmxport = 0;
3640         union oct_link_status *ls;
3641         int i;
3642
3643         if (recv_pkt->buffer_size[0] != sizeof(*ls)) {
3644                 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3645                         recv_pkt->buffer_size[0],
3646                         recv_pkt->rh.r_nic_info.gmxport);
3647                 goto nic_info_err;
3648         }
3649
3650         gmxport = recv_pkt->rh.r_nic_info.gmxport;
3651         ls = (union oct_link_status *)get_rbd(recv_pkt->buffer_ptr[0]);
3652
3653         octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3654         for (i = 0; i < oct->ifcount; i++) {
3655                 if (oct->props[i].gmxport == gmxport) {
3656                         update_link_status(oct->props[i].netdev, ls);
3657                         break;
3658                 }
3659         }
3660
3661 nic_info_err:
3662         for (i = 0; i < recv_pkt->buffer_count; i++)
3663                 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3664         octeon_free_recv_info(recv_info);
3665         return 0;
3666 }
3667
3668 /**
3669  * \brief Setup network interfaces
3670  * @param octeon_dev  octeon device
3671  *
3672  * Called during init time for each device. It assumes the NIC
3673  * is already up and running.  The link information for each
3674  * interface is passed in link_info.
3675  */
3676 static int setup_nic_devices(struct octeon_device *octeon_dev)
3677 {
3678         struct lio *lio = NULL;
3679         struct net_device *netdev;
3680         u8 mac[6], i, j;
3681         struct octeon_soft_command *sc;
3682         struct liquidio_if_cfg_context *ctx;
3683         struct liquidio_if_cfg_resp *resp;
3684         struct octdev_props *props;
3685         int retval, num_iqueues, num_oqueues;
3686         union oct_nic_if_cfg if_cfg;
3687         unsigned int base_queue;
3688         unsigned int gmx_port_id;
3689         u32 resp_size, ctx_size, data_size;
3690         u32 ifidx_or_pfnum;
3691         struct lio_version *vdata;
3692
3693         /* This is to handle link status changes */
3694         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3695                                     OPCODE_NIC_INFO,
3696                                     lio_nic_info, octeon_dev);
3697
3698         /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3699          * They are handled directly.
3700          */
3701         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3702                                         free_netbuf);
3703
3704         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3705                                         free_netsgbuf);
3706
3707         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3708                                         free_netsgbuf_with_resp);
3709
3710         for (i = 0; i < octeon_dev->ifcount; i++) {
3711                 resp_size = sizeof(struct liquidio_if_cfg_resp);
3712                 ctx_size = sizeof(struct liquidio_if_cfg_context);
3713                 data_size = sizeof(struct lio_version);
3714                 sc = (struct octeon_soft_command *)
3715                         octeon_alloc_soft_command(octeon_dev, data_size,
3716                                                   resp_size, ctx_size);
3717                 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3718                 ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
3719                 vdata = (struct lio_version *)sc->virtdptr;
3720
3721                 *((u64 *)vdata) = 0;
3722                 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3723                 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3724                 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3725
3726                 if (OCTEON_CN23XX_PF(octeon_dev)) {
3727                         num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3728                         num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3729                         base_queue = octeon_dev->sriov_info.pf_srn;
3730
3731                         gmx_port_id = octeon_dev->pf_num;
3732                         ifidx_or_pfnum = octeon_dev->pf_num;
3733                 } else {
3734                         num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3735                                                 octeon_get_conf(octeon_dev), i);
3736                         num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3737                                                 octeon_get_conf(octeon_dev), i);
3738                         base_queue = CFG_GET_BASE_QUE_NIC_IF(
3739                                                 octeon_get_conf(octeon_dev), i);
3740                         gmx_port_id = CFG_GET_GMXID_NIC_IF(
3741                                                 octeon_get_conf(octeon_dev), i);
3742                         ifidx_or_pfnum = i;
3743                 }
3744
3745                 dev_dbg(&octeon_dev->pci_dev->dev,
3746                         "requesting config for interface %d, iqs %d, oqs %d\n",
3747                         ifidx_or_pfnum, num_iqueues, num_oqueues);
3748                 WRITE_ONCE(ctx->cond, 0);
3749                 ctx->octeon_id = lio_get_device_id(octeon_dev);
3750                 init_waitqueue_head(&ctx->wc);
3751
3752                 if_cfg.u64 = 0;
3753                 if_cfg.s.num_iqueues = num_iqueues;
3754                 if_cfg.s.num_oqueues = num_oqueues;
3755                 if_cfg.s.base_queue = base_queue;
3756                 if_cfg.s.gmx_port_id = gmx_port_id;
3757
3758                 sc->iq_no = 0;
3759
3760                 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3761                                             OPCODE_NIC_IF_CFG, 0,
3762                                             if_cfg.u64, 0);
3763
3764                 sc->callback = if_cfg_callback;
3765                 sc->callback_arg = sc;
3766                 sc->wait_time = 3000;
3767
3768                 retval = octeon_send_soft_command(octeon_dev, sc);
3769                 if (retval == IQ_SEND_FAILED) {
3770                         dev_err(&octeon_dev->pci_dev->dev,
3771                                 "iq/oq config failed status: %x\n",
3772                                 retval);
3773                         /* Soft instr is freed by driver in case of failure. */
3774                         goto setup_nic_dev_fail;
3775                 }
3776
3777                 /* Sleep on a wait queue till the cond flag indicates that the
3778                  * response arrived or timed-out.
3779                  */
3780                 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
3781                         dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
3782                         goto setup_nic_wait_intr;
3783                 }
3784
3785                 retval = resp->status;
3786                 if (retval) {
3787                         dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3788                         goto setup_nic_dev_fail;
3789                 }
3790
3791                 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3792                                     (sizeof(struct liquidio_if_cfg_info)) >> 3);
3793
3794                 num_iqueues = hweight64(resp->cfg_info.iqmask);
3795                 num_oqueues = hweight64(resp->cfg_info.oqmask);
3796
3797                 if (!(num_iqueues) || !(num_oqueues)) {
3798                         dev_err(&octeon_dev->pci_dev->dev,
3799                                 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3800                                 resp->cfg_info.iqmask,
3801                                 resp->cfg_info.oqmask);
3802                         goto setup_nic_dev_fail;
3803                 }
3804                 dev_dbg(&octeon_dev->pci_dev->dev,
3805                         "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3806                         i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3807                         num_iqueues, num_oqueues);
3808                 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
3809
3810                 if (!netdev) {
3811                         dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3812                         goto setup_nic_dev_fail;
3813                 }
3814
3815                 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3816
3817                 if (num_iqueues > 1)
3818                         lionetdevops.ndo_select_queue = select_q;
3819
3820                 /* Associate the routines that will handle different
3821                  * netdev tasks.
3822                  */
3823                 netdev->netdev_ops = &lionetdevops;
3824
3825                 lio = GET_LIO(netdev);
3826
3827                 memset(lio, 0, sizeof(struct lio));
3828
3829                 lio->ifidx = ifidx_or_pfnum;
3830
3831                 props = &octeon_dev->props[i];
3832                 props->gmxport = resp->cfg_info.linfo.gmxport;
3833                 props->netdev = netdev;
3834
3835                 lio->linfo.num_rxpciq = num_oqueues;
3836                 lio->linfo.num_txpciq = num_iqueues;
3837                 for (j = 0; j < num_oqueues; j++) {
3838                         lio->linfo.rxpciq[j].u64 =
3839                                 resp->cfg_info.linfo.rxpciq[j].u64;
3840                 }
3841                 for (j = 0; j < num_iqueues; j++) {
3842                         lio->linfo.txpciq[j].u64 =
3843                                 resp->cfg_info.linfo.txpciq[j].u64;
3844                 }
3845                 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3846                 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3847                 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3848
3849                 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3850
3851                 if (OCTEON_CN23XX_PF(octeon_dev) ||
3852                     OCTEON_CN6XXX(octeon_dev)) {
3853                         lio->dev_capability = NETIF_F_HIGHDMA
3854                                               | NETIF_F_IP_CSUM
3855                                               | NETIF_F_IPV6_CSUM
3856                                               | NETIF_F_SG | NETIF_F_RXCSUM
3857                                               | NETIF_F_GRO
3858                                               | NETIF_F_TSO | NETIF_F_TSO6
3859                                               | NETIF_F_LRO;
3860                 }
3861                 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3862
3863                 /*  Copy of transmit encapsulation capabilities:
3864                  *  TSO, TSO6, Checksums for this device
3865                  */
3866                 lio->enc_dev_capability = NETIF_F_IP_CSUM
3867                                           | NETIF_F_IPV6_CSUM
3868                                           | NETIF_F_GSO_UDP_TUNNEL
3869                                           | NETIF_F_HW_CSUM | NETIF_F_SG
3870                                           | NETIF_F_RXCSUM
3871                                           | NETIF_F_TSO | NETIF_F_TSO6
3872                                           | NETIF_F_LRO;
3873
3874                 netdev->hw_enc_features = (lio->enc_dev_capability &
3875                                            ~NETIF_F_LRO);
3876
3877                 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3878
3879                 netdev->vlan_features = lio->dev_capability;
3880                 /* Add any unchangeable hw features */
3881                 lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3882                                         NETIF_F_HW_VLAN_CTAG_RX |
3883                                         NETIF_F_HW_VLAN_CTAG_TX;
3884
3885                 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3886
3887                 netdev->hw_features = lio->dev_capability;
3888                 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3889                 netdev->hw_features = netdev->hw_features &
3890                         ~NETIF_F_HW_VLAN_CTAG_RX;
3891
3892                 /* Point to the  properties for octeon device to which this
3893                  * interface belongs.
3894                  */
3895                 lio->oct_dev = octeon_dev;
3896                 lio->octprops = props;
3897                 lio->netdev = netdev;
3898
3899                 dev_dbg(&octeon_dev->pci_dev->dev,
3900                         "if%d gmx: %d hw_addr: 0x%llx\n", i,
3901                         lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3902
3903                 /* 64-bit swap required on LE machines */
3904                 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3905                 for (j = 0; j < 6; j++)
3906                         mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3907
3908                 /* Copy MAC Address to OS network device structure */
3909
3910                 ether_addr_copy(netdev->dev_addr, mac);
3911
3912                 /* By default all interfaces on a single Octeon uses the same
3913                  * tx and rx queues
3914                  */
3915                 lio->txq = lio->linfo.txpciq[0].s.q_no;
3916                 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3917                 if (setup_io_queues(octeon_dev, i)) {
3918                         dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3919                         goto setup_nic_dev_fail;
3920                 }
3921
3922                 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3923
3924                 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3925                 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3926
3927                 if (setup_glists(octeon_dev, lio, num_iqueues)) {
3928                         dev_err(&octeon_dev->pci_dev->dev,
3929                                 "Gather list allocation failed\n");
3930                         goto setup_nic_dev_fail;
3931                 }
3932
3933                 /* Register ethtool support */
3934                 liquidio_set_ethtool_ops(netdev);
3935                 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3936                         octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3937                 else
3938                         octeon_dev->priv_flags = 0x0;
3939
3940                 if (netdev->features & NETIF_F_LRO)
3941                         liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3942                                              OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3943
3944                 liquidio_set_feature(netdev, OCTNET_CMD_ENABLE_VLAN_FILTER, 0);
3945
3946                 if ((debug != -1) && (debug & NETIF_MSG_HW))
3947                         liquidio_set_feature(netdev,
3948                                              OCTNET_CMD_VERBOSE_ENABLE, 0);
3949
3950                 if (setup_link_status_change_wq(netdev))
3951                         goto setup_nic_dev_fail;
3952
3953                 /* Register the network device with the OS */
3954                 if (register_netdev(netdev)) {
3955                         dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3956                         goto setup_nic_dev_fail;
3957                 }
3958
3959                 dev_dbg(&octeon_dev->pci_dev->dev,
3960                         "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3961                         i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3962                 netif_carrier_off(netdev);
3963                 lio->link_changes++;
3964
3965                 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3966
3967                 /* Sending command to firmware to enable Rx checksum offload
3968                  * by default at the time of setup of Liquidio driver for
3969                  * this device
3970                  */
3971                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3972                                             OCTNET_CMD_RXCSUM_ENABLE);
3973                 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3974                                      OCTNET_CMD_TXCSUM_ENABLE);
3975
3976                 dev_dbg(&octeon_dev->pci_dev->dev,
3977                         "NIC ifidx:%d Setup successful\n", i);
3978
3979                 octeon_free_soft_command(octeon_dev, sc);
3980         }
3981
3982         return 0;
3983
3984 setup_nic_dev_fail:
3985
3986         octeon_free_soft_command(octeon_dev, sc);
3987
3988 setup_nic_wait_intr:
3989
3990         while (i--) {
3991                 dev_err(&octeon_dev->pci_dev->dev,
3992                         "NIC ifidx:%d Setup failed\n", i);
3993                 liquidio_destroy_nic_device(octeon_dev, i);
3994         }
3995         return -ENODEV;
3996 }
3997
3998 /**
3999  * \brief initialize the NIC
4000  * @param oct octeon device
4001  *
4002  * This initialization routine is called once the Octeon device application is
4003  * up and running
4004  */
4005 static int liquidio_init_nic_module(struct octeon_device *oct)
4006 {
4007         struct oct_intrmod_cfg *intrmod_cfg;
4008         int i, retval = 0;
4009         int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
4010
4011         dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
4012
4013         /* only default iq and oq were initialized
4014          * initialize the rest as well
4015          */
4016         /* run port_config command for each port */
4017         oct->ifcount = num_nic_ports;
4018
4019         memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
4020
4021         for (i = 0; i < MAX_OCTEON_LINKS; i++)
4022                 oct->props[i].gmxport = -1;
4023
4024         retval = setup_nic_devices(oct);
4025         if (retval) {
4026                 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
4027                 goto octnet_init_failure;
4028         }
4029
4030         liquidio_ptp_init(oct);
4031
4032         /* Initialize interrupt moderation params */
4033         intrmod_cfg = &((struct octeon_device *)oct)->intrmod;
4034         intrmod_cfg->rx_enable = 1;
4035         intrmod_cfg->check_intrvl = LIO_INTRMOD_CHECK_INTERVAL;
4036         intrmod_cfg->maxpkt_ratethr = LIO_INTRMOD_MAXPKT_RATETHR;
4037         intrmod_cfg->minpkt_ratethr = LIO_INTRMOD_MINPKT_RATETHR;
4038         intrmod_cfg->rx_maxcnt_trigger = LIO_INTRMOD_RXMAXCNT_TRIGGER;
4039         intrmod_cfg->rx_maxtmr_trigger = LIO_INTRMOD_RXMAXTMR_TRIGGER;
4040         intrmod_cfg->rx_mintmr_trigger = LIO_INTRMOD_RXMINTMR_TRIGGER;
4041         intrmod_cfg->rx_mincnt_trigger = LIO_INTRMOD_RXMINCNT_TRIGGER;
4042         intrmod_cfg->tx_enable = 1;
4043         intrmod_cfg->tx_maxcnt_trigger = LIO_INTRMOD_TXMAXCNT_TRIGGER;
4044         intrmod_cfg->tx_mincnt_trigger = LIO_INTRMOD_TXMINCNT_TRIGGER;
4045         intrmod_cfg->rx_frames = CFG_GET_OQ_INTR_PKT(octeon_get_conf(oct));
4046         intrmod_cfg->rx_usecs = CFG_GET_OQ_INTR_TIME(octeon_get_conf(oct));
4047         intrmod_cfg->tx_frames = CFG_GET_IQ_INTR_PKT(octeon_get_conf(oct));
4048         dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
4049
4050         return retval;
4051
4052 octnet_init_failure:
4053
4054         oct->ifcount = 0;
4055
4056         return retval;
4057 }
4058
4059 /**
4060  * \brief starter callback that invokes the remaining initialization work after
4061  * the NIC is up and running.
4062  * @param octptr  work struct work_struct
4063  */
4064 static void nic_starter(struct work_struct *work)
4065 {
4066         struct octeon_device *oct;
4067         struct cavium_wk *wk = (struct cavium_wk *)work;
4068
4069         oct = (struct octeon_device *)wk->ctxptr;
4070
4071         if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
4072                 return;
4073
4074         /* If the status of the device is CORE_OK, the core
4075          * application has reported its application type. Call
4076          * any registered handlers now and move to the RUNNING
4077          * state.
4078          */
4079         if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
4080                 schedule_delayed_work(&oct->nic_poll_work.work,
4081                                       LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4082                 return;
4083         }
4084
4085         atomic_set(&oct->status, OCT_DEV_RUNNING);
4086
4087         if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
4088                 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
4089
4090                 if (liquidio_init_nic_module(oct))
4091                         dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
4092                 else
4093                         handshake[oct->octeon_id].started_ok = 1;
4094         } else {
4095                 dev_err(&oct->pci_dev->dev,
4096                         "Unexpected application running on NIC (%d). Check firmware.\n",
4097                         oct->app_mode);
4098         }
4099
4100         complete(&handshake[oct->octeon_id].started);
4101 }
4102
4103 /**
4104  * \brief Device initialization for each Octeon device that is probed
4105  * @param octeon_dev  octeon device
4106  */
4107 static int octeon_device_init(struct octeon_device *octeon_dev)
4108 {
4109         int j, ret;
4110         int fw_loaded = 0;
4111         char bootcmd[] = "\n";
4112         struct octeon_device_priv *oct_priv =
4113                 (struct octeon_device_priv *)octeon_dev->priv;
4114         atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4115
4116         /* Enable access to the octeon device and make its DMA capability
4117          * known to the OS.
4118          */
4119         if (octeon_pci_os_setup(octeon_dev))
4120                 return 1;
4121
4122         /* Identify the Octeon type and map the BAR address space. */
4123         if (octeon_chip_specific_setup(octeon_dev)) {
4124                 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4125                 return 1;
4126         }
4127
4128         atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4129
4130         octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4131
4132         if (OCTEON_CN23XX_PF(octeon_dev)) {
4133                 if (!cn23xx_fw_loaded(octeon_dev)) {
4134                         fw_loaded = 0;
4135                         /* Do a soft reset of the Octeon device. */
4136                         if (octeon_dev->fn_list.soft_reset(octeon_dev))
4137                                 return 1;
4138                         /* things might have changed */
4139                         if (!cn23xx_fw_loaded(octeon_dev))
4140                                 fw_loaded = 0;
4141                         else
4142                                 fw_loaded = 1;
4143                 } else {
4144                         fw_loaded = 1;
4145                 }
4146         } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
4147                 return 1;
4148         }
4149
4150         /* Initialize the dispatch mechanism used to push packets arriving on
4151          * Octeon Output queues.
4152          */
4153         if (octeon_init_dispatch_list(octeon_dev))
4154                 return 1;
4155
4156         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4157                                     OPCODE_NIC_CORE_DRV_ACTIVE,
4158                                     octeon_core_drv_init,
4159                                     octeon_dev);
4160
4161         INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4162         octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4163         schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4164                               LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4165
4166         atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4167
4168         octeon_set_io_queues_off(octeon_dev);
4169
4170         if (OCTEON_CN23XX_PF(octeon_dev)) {
4171                 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4172                 if (ret) {
4173                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4174                         return ret;
4175                 }
4176         }
4177
4178         /* Initialize soft command buffer pool
4179          */
4180         if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4181                 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4182                 return 1;
4183         }
4184         atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4185
4186         /*  Setup the data structures that manage this Octeon's Input queues. */
4187         if (octeon_setup_instr_queues(octeon_dev)) {
4188                 dev_err(&octeon_dev->pci_dev->dev,
4189                         "instruction queue initialization failed\n");
4190                 /* On error, release any previously allocated queues */
4191                 for (j = 0; j < octeon_dev->num_iqs; j++)
4192                         octeon_delete_instr_queue(octeon_dev, j);
4193                 return 1;
4194         }
4195         atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4196
4197         /* Initialize lists to manage the requests of different types that
4198          * arrive from user & kernel applications for this octeon device.
4199          */
4200         if (octeon_setup_response_list(octeon_dev)) {
4201                 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4202                 return 1;
4203         }
4204         atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4205
4206         if (octeon_setup_output_queues(octeon_dev)) {
4207                 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4208                 /* Release any previously allocated queues */
4209                 for (j = 0; j < octeon_dev->num_oqs; j++)
4210                         octeon_delete_droq(octeon_dev, j);
4211                 return 1;
4212         }
4213
4214         atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4215
4216         if (OCTEON_CN23XX_PF(octeon_dev)) {
4217                 if (octeon_allocate_ioq_vector(octeon_dev)) {
4218                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4219                         return 1;
4220                 }
4221
4222         } else {
4223                 /* The input and output queue registers were setup earlier (the
4224                  * queues were not enabled). Any additional registers
4225                  * that need to be programmed should be done now.
4226                  */
4227                 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4228                 if (ret) {
4229                         dev_err(&octeon_dev->pci_dev->dev,
4230                                 "Failed to configure device registers\n");
4231                         return ret;
4232                 }
4233         }
4234
4235         /* Initialize the tasklet that handles output queue packet processing.*/
4236         dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4237         tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4238                      (unsigned long)octeon_dev);
4239
4240         /* Setup the interrupt handler and record the INT SUM register address
4241          */
4242         if (octeon_setup_interrupt(octeon_dev))
4243                 return 1;
4244
4245         /* Enable Octeon device interrupts */
4246         octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4247
4248         /* Enable the input and output queues for this Octeon device */
4249         ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4250         if (ret) {
4251                 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4252                 return ret;
4253         }
4254
4255         atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4256
4257         if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
4258                 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4259                 if (!ddr_timeout) {
4260                         dev_info(&octeon_dev->pci_dev->dev,
4261                                  "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4262                 }
4263
4264                 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4265
4266                 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4267                 while (!ddr_timeout) {
4268                         set_current_state(TASK_INTERRUPTIBLE);
4269                         if (schedule_timeout(HZ / 10)) {
4270                                 /* user probably pressed Control-C */
4271                                 return 1;
4272                         }
4273                 }
4274                 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4275                 if (ret) {
4276                         dev_err(&octeon_dev->pci_dev->dev,
4277                                 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4278                                 ret);
4279                         return 1;
4280                 }
4281
4282                 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4283                         dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4284                         return 1;
4285                 }
4286
4287                 /* Divert uboot to take commands from host instead. */
4288                 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4289
4290                 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4291                 ret = octeon_init_consoles(octeon_dev);
4292                 if (ret) {
4293                         dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4294                         return 1;
4295                 }
4296                 ret = octeon_add_console(octeon_dev, 0);
4297                 if (ret) {
4298                         dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4299                         return 1;
4300                 }
4301
4302                 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4303
4304                 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4305                 ret = load_firmware(octeon_dev);
4306                 if (ret) {
4307                         dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4308                         return 1;
4309                 }
4310                 /* set bit 1 of SLI_SCRATCH_1 to indicate that firmware is
4311                  * loaded
4312                  */
4313                 if (OCTEON_CN23XX_PF(octeon_dev))
4314                         octeon_write_csr64(octeon_dev, CN23XX_SLI_SCRATCH1,
4315                                            2ULL);
4316         }
4317
4318         handshake[octeon_dev->octeon_id].init_ok = 1;
4319         complete(&handshake[octeon_dev->octeon_id].init);
4320
4321         atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4322
4323         /* Send Credit for Octeon Output queues. Credits are always sent after
4324          * the output queue is enabled.
4325          */
4326         for (j = 0; j < octeon_dev->num_oqs; j++)
4327                 writel(octeon_dev->droq[j]->max_count,
4328                        octeon_dev->droq[j]->pkts_credit_reg);
4329
4330         /* Packets can start arriving on the output queues from this point. */
4331         return 0;
4332 }
4333
4334 /**
4335  * \brief Exits the module
4336  */
4337 static void __exit liquidio_exit(void)
4338 {
4339         liquidio_deinit_pci();
4340
4341         pr_info("LiquidIO network module is now unloaded\n");
4342 }
4343
4344 module_init(liquidio_init);
4345 module_exit(liquidio_exit);