GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / net / ethernet / cavium / liquidio / lio_main.c
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include "liquidio_common.h"
25 #include "octeon_droq.h"
26 #include "octeon_iq.h"
27 #include "response_manager.h"
28 #include "octeon_device.h"
29 #include "octeon_nic.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_regs.h"
33 #include "cn66xx_device.h"
34 #include "cn68xx_device.h"
35 #include "cn23xx_pf_device.h"
36 #include "liquidio_image.h"
37
38 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
39 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(LIQUIDIO_VERSION);
42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
43                 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
45                 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
47                 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
49                 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50
51 static int ddr_timeout = 10000;
52 module_param(ddr_timeout, int, 0644);
53 MODULE_PARM_DESC(ddr_timeout,
54                  "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
55
56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
57
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
61
62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_NIC;
63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\".  Use \"none\" to load firmware from flash.");
65
66 static u32 console_bitmask;
67 module_param(console_bitmask, int, 0644);
68 MODULE_PARM_DESC(console_bitmask,
69                  "Bitmask indicating which consoles have debug output redirected to syslog.");
70
71 /**
72  * \brief determines if a given console has debug enabled.
73  * @param console console to check
74  * @returns  1 = enabled. 0 otherwise
75  */
76 static int octeon_console_debug_enabled(u32 console)
77 {
78         return (console_bitmask >> (console)) & 0x1;
79 }
80
81 /* Polling interval for determining when NIC application is alive */
82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
83
84 /* runtime link query interval */
85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
86
87 struct liquidio_if_cfg_context {
88         int octeon_id;
89
90         wait_queue_head_t wc;
91
92         int cond;
93 };
94
95 struct liquidio_if_cfg_resp {
96         u64 rh;
97         struct liquidio_if_cfg_info cfg_info;
98         u64 status;
99 };
100
101 struct liquidio_rx_ctl_context {
102         int octeon_id;
103
104         wait_queue_head_t wc;
105
106         int cond;
107 };
108
109 struct oct_link_status_resp {
110         u64 rh;
111         struct oct_link_info link_info;
112         u64 status;
113 };
114
115 struct oct_timestamp_resp {
116         u64 rh;
117         u64 timestamp;
118         u64 status;
119 };
120
121 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
122
123 union tx_info {
124         u64 u64;
125         struct {
126 #ifdef __BIG_ENDIAN_BITFIELD
127                 u16 gso_size;
128                 u16 gso_segs;
129                 u32 reserved;
130 #else
131                 u32 reserved;
132                 u16 gso_segs;
133                 u16 gso_size;
134 #endif
135         } s;
136 };
137
138 /** Octeon device properties to be used by the NIC module.
139  * Each octeon device in the system will be represented
140  * by this structure in the NIC module.
141  */
142
143 #define OCTNIC_MAX_SG  (MAX_SKB_FRAGS)
144
145 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
146 #define OCTNIC_GSO_MAX_SIZE                                                    \
147         (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
148
149 /** Structure of a node in list of gather components maintained by
150  * NIC driver for each network device.
151  */
152 struct octnic_gather {
153         /** List manipulation. Next and prev pointers. */
154         struct list_head list;
155
156         /** Size of the gather component at sg in bytes. */
157         int sg_size;
158
159         /** Number of bytes that sg was adjusted to make it 8B-aligned. */
160         int adjust;
161
162         /** Gather component that can accommodate max sized fragment list
163          *  received from the IP layer.
164          */
165         struct octeon_sg_entry *sg;
166
167         dma_addr_t sg_dma_ptr;
168 };
169
170 struct handshake {
171         struct completion init;
172         struct completion started;
173         struct pci_dev *pci_dev;
174         int init_ok;
175         int started_ok;
176 };
177
178 #ifdef CONFIG_PCI_IOV
179 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
180 #endif
181
182 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
183                                     char *prefix, char *suffix);
184
185 static int octeon_device_init(struct octeon_device *);
186 static int liquidio_stop(struct net_device *netdev);
187 static void liquidio_remove(struct pci_dev *pdev);
188 static int liquidio_probe(struct pci_dev *pdev,
189                           const struct pci_device_id *ent);
190 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
191                                       int linkstate);
192
193 static struct handshake handshake[MAX_OCTEON_DEVICES];
194 static struct completion first_stage;
195
196 static void octeon_droq_bh(unsigned long pdev)
197 {
198         int q_no;
199         int reschedule = 0;
200         struct octeon_device *oct = (struct octeon_device *)pdev;
201         struct octeon_device_priv *oct_priv =
202                 (struct octeon_device_priv *)oct->priv;
203
204         for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
205                 if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
206                         continue;
207                 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
208                                                           MAX_PACKET_BUDGET);
209                 lio_enable_irq(oct->droq[q_no], NULL);
210
211                 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
212                         /* set time and cnt interrupt thresholds for this DROQ
213                          * for NAPI
214                          */
215                         int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
216
217                         octeon_write_csr64(
218                             oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
219                             0x5700000040ULL);
220                         octeon_write_csr64(
221                             oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
222                 }
223         }
224
225         if (reschedule)
226                 tasklet_schedule(&oct_priv->droq_tasklet);
227 }
228
229 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
230 {
231         struct octeon_device_priv *oct_priv =
232                 (struct octeon_device_priv *)oct->priv;
233         int retry = 100, pkt_cnt = 0, pending_pkts = 0;
234         int i;
235
236         do {
237                 pending_pkts = 0;
238
239                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
240                         if (!(oct->io_qmask.oq & BIT_ULL(i)))
241                                 continue;
242                         pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
243                 }
244                 if (pkt_cnt > 0) {
245                         pending_pkts += pkt_cnt;
246                         tasklet_schedule(&oct_priv->droq_tasklet);
247                 }
248                 pkt_cnt = 0;
249                 schedule_timeout_uninterruptible(1);
250
251         } while (retry-- && pending_pkts);
252
253         return pkt_cnt;
254 }
255
256 /**
257  * \brief Forces all IO queues off on a given device
258  * @param oct Pointer to Octeon device
259  */
260 static void force_io_queues_off(struct octeon_device *oct)
261 {
262         if ((oct->chip_id == OCTEON_CN66XX) ||
263             (oct->chip_id == OCTEON_CN68XX)) {
264                 /* Reset the Enable bits for Input Queues. */
265                 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
266
267                 /* Reset the Enable bits for Output Queues. */
268                 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
269         }
270 }
271
272 /**
273  * \brief Cause device to go quiet so it can be safely removed/reset/etc
274  * @param oct Pointer to Octeon device
275  */
276 static inline void pcierror_quiesce_device(struct octeon_device *oct)
277 {
278         int i;
279
280         /* Disable the input and output queues now. No more packets will
281          * arrive from Octeon, but we should wait for all packet processing
282          * to finish.
283          */
284         force_io_queues_off(oct);
285
286         /* To allow for in-flight requests */
287         schedule_timeout_uninterruptible(100);
288
289         if (wait_for_pending_requests(oct))
290                 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
291
292         /* Force all requests waiting to be fetched by OCTEON to complete. */
293         for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
294                 struct octeon_instr_queue *iq;
295
296                 if (!(oct->io_qmask.iq & BIT_ULL(i)))
297                         continue;
298                 iq = oct->instr_queue[i];
299
300                 if (atomic_read(&iq->instr_pending)) {
301                         spin_lock_bh(&iq->lock);
302                         iq->fill_cnt = 0;
303                         iq->octeon_read_index = iq->host_write_index;
304                         iq->stats.instr_processed +=
305                                 atomic_read(&iq->instr_pending);
306                         lio_process_iq_request_list(oct, iq, 0);
307                         spin_unlock_bh(&iq->lock);
308                 }
309         }
310
311         /* Force all pending ordered list requests to time out. */
312         lio_process_ordered_list(oct, 1);
313
314         /* We do not need to wait for output queue packets to be processed. */
315 }
316
317 /**
318  * \brief Cleanup PCI AER uncorrectable error status
319  * @param dev Pointer to PCI device
320  */
321 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
322 {
323         int pos = 0x100;
324         u32 status, mask;
325
326         pr_info("%s :\n", __func__);
327
328         pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
329         pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
330         if (dev->error_state == pci_channel_io_normal)
331                 status &= ~mask;        /* Clear corresponding nonfatal bits */
332         else
333                 status &= mask;         /* Clear corresponding fatal bits */
334         pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
335 }
336
337 /**
338  * \brief Stop all PCI IO to a given device
339  * @param dev Pointer to Octeon device
340  */
341 static void stop_pci_io(struct octeon_device *oct)
342 {
343         /* No more instructions will be forwarded. */
344         atomic_set(&oct->status, OCT_DEV_IN_RESET);
345
346         pci_disable_device(oct->pci_dev);
347
348         /* Disable interrupts  */
349         oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
350
351         pcierror_quiesce_device(oct);
352
353         /* Release the interrupt line */
354         free_irq(oct->pci_dev->irq, oct);
355
356         if (oct->flags & LIO_FLAG_MSI_ENABLED)
357                 pci_disable_msi(oct->pci_dev);
358
359         dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
360                 lio_get_state_string(&oct->status));
361
362         /* making it a common function for all OCTEON models */
363         cleanup_aer_uncorrect_error_status(oct->pci_dev);
364 }
365
366 /**
367  * \brief called when PCI error is detected
368  * @param pdev Pointer to PCI device
369  * @param state The current pci connection state
370  *
371  * This function is called after a PCI bus error affecting
372  * this device has been detected.
373  */
374 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
375                                                      pci_channel_state_t state)
376 {
377         struct octeon_device *oct = pci_get_drvdata(pdev);
378
379         /* Non-correctable Non-fatal errors */
380         if (state == pci_channel_io_normal) {
381                 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
382                 cleanup_aer_uncorrect_error_status(oct->pci_dev);
383                 return PCI_ERS_RESULT_CAN_RECOVER;
384         }
385
386         /* Non-correctable Fatal errors */
387         dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
388         stop_pci_io(oct);
389
390         /* Always return a DISCONNECT. There is no support for recovery but only
391          * for a clean shutdown.
392          */
393         return PCI_ERS_RESULT_DISCONNECT;
394 }
395
396 /**
397  * \brief mmio handler
398  * @param pdev Pointer to PCI device
399  */
400 static pci_ers_result_t liquidio_pcie_mmio_enabled(
401                                 struct pci_dev *pdev __attribute__((unused)))
402 {
403         /* We should never hit this since we never ask for a reset for a Fatal
404          * Error. We always return DISCONNECT in io_error above.
405          * But play safe and return RECOVERED for now.
406          */
407         return PCI_ERS_RESULT_RECOVERED;
408 }
409
410 /**
411  * \brief called after the pci bus has been reset.
412  * @param pdev Pointer to PCI device
413  *
414  * Restart the card from scratch, as if from a cold-boot. Implementation
415  * resembles the first-half of the octeon_resume routine.
416  */
417 static pci_ers_result_t liquidio_pcie_slot_reset(
418                                 struct pci_dev *pdev __attribute__((unused)))
419 {
420         /* We should never hit this since we never ask for a reset for a Fatal
421          * Error. We always return DISCONNECT in io_error above.
422          * But play safe and return RECOVERED for now.
423          */
424         return PCI_ERS_RESULT_RECOVERED;
425 }
426
427 /**
428  * \brief called when traffic can start flowing again.
429  * @param pdev Pointer to PCI device
430  *
431  * This callback is called when the error recovery driver tells us that
432  * its OK to resume normal operation. Implementation resembles the
433  * second-half of the octeon_resume routine.
434  */
435 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
436 {
437         /* Nothing to be done here. */
438 }
439
440 #ifdef CONFIG_PM
441 /**
442  * \brief called when suspending
443  * @param pdev Pointer to PCI device
444  * @param state state to suspend to
445  */
446 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
447                             pm_message_t state __attribute__((unused)))
448 {
449         return 0;
450 }
451
452 /**
453  * \brief called when resuming
454  * @param pdev Pointer to PCI device
455  */
456 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
457 {
458         return 0;
459 }
460 #endif
461
462 /* For PCI-E Advanced Error Recovery (AER) Interface */
463 static const struct pci_error_handlers liquidio_err_handler = {
464         .error_detected = liquidio_pcie_error_detected,
465         .mmio_enabled   = liquidio_pcie_mmio_enabled,
466         .slot_reset     = liquidio_pcie_slot_reset,
467         .resume         = liquidio_pcie_resume,
468 };
469
470 static const struct pci_device_id liquidio_pci_tbl[] = {
471         {       /* 68xx */
472                 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
473         },
474         {       /* 66xx */
475                 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
476         },
477         {       /* 23xx pf */
478                 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
479         },
480         {
481                 0, 0, 0, 0, 0, 0, 0
482         }
483 };
484 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
485
486 static struct pci_driver liquidio_pci_driver = {
487         .name           = "LiquidIO",
488         .id_table       = liquidio_pci_tbl,
489         .probe          = liquidio_probe,
490         .remove         = liquidio_remove,
491         .err_handler    = &liquidio_err_handler,    /* For AER */
492
493 #ifdef CONFIG_PM
494         .suspend        = liquidio_suspend,
495         .resume         = liquidio_resume,
496 #endif
497 #ifdef CONFIG_PCI_IOV
498         .sriov_configure = liquidio_enable_sriov,
499 #endif
500 };
501
502 /**
503  * \brief register PCI driver
504  */
505 static int liquidio_init_pci(void)
506 {
507         return pci_register_driver(&liquidio_pci_driver);
508 }
509
510 /**
511  * \brief unregister PCI driver
512  */
513 static void liquidio_deinit_pci(void)
514 {
515         pci_unregister_driver(&liquidio_pci_driver);
516 }
517
518 /**
519  * \brief Stop Tx queues
520  * @param netdev network device
521  */
522 static inline void txqs_stop(struct net_device *netdev)
523 {
524         if (netif_is_multiqueue(netdev)) {
525                 int i;
526
527                 for (i = 0; i < netdev->num_tx_queues; i++)
528                         netif_stop_subqueue(netdev, i);
529         } else {
530                 netif_stop_queue(netdev);
531         }
532 }
533
534 /**
535  * \brief Start Tx queues
536  * @param netdev network device
537  */
538 static inline void txqs_start(struct net_device *netdev)
539 {
540         if (netif_is_multiqueue(netdev)) {
541                 int i;
542
543                 for (i = 0; i < netdev->num_tx_queues; i++)
544                         netif_start_subqueue(netdev, i);
545         } else {
546                 netif_start_queue(netdev);
547         }
548 }
549
550 /**
551  * \brief Wake Tx queues
552  * @param netdev network device
553  */
554 static inline void txqs_wake(struct net_device *netdev)
555 {
556         struct lio *lio = GET_LIO(netdev);
557
558         if (netif_is_multiqueue(netdev)) {
559                 int i;
560
561                 for (i = 0; i < netdev->num_tx_queues; i++) {
562                         int qno = lio->linfo.txpciq[i %
563                                 lio->oct_dev->num_iqs].s.q_no;
564
565                         if (__netif_subqueue_stopped(netdev, i)) {
566                                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
567                                                           tx_restart, 1);
568                                 netif_wake_subqueue(netdev, i);
569                         }
570                 }
571         } else {
572                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
573                                           tx_restart, 1);
574                 netif_wake_queue(netdev);
575         }
576 }
577
578 /**
579  * \brief Stop Tx queue
580  * @param netdev network device
581  */
582 static void stop_txq(struct net_device *netdev)
583 {
584         txqs_stop(netdev);
585 }
586
587 /**
588  * \brief Start Tx queue
589  * @param netdev network device
590  */
591 static void start_txq(struct net_device *netdev)
592 {
593         struct lio *lio = GET_LIO(netdev);
594
595         if (lio->linfo.link.s.link_up) {
596                 txqs_start(netdev);
597                 return;
598         }
599 }
600
601 /**
602  * \brief Wake a queue
603  * @param netdev network device
604  * @param q which queue to wake
605  */
606 static inline void wake_q(struct net_device *netdev, int q)
607 {
608         if (netif_is_multiqueue(netdev))
609                 netif_wake_subqueue(netdev, q);
610         else
611                 netif_wake_queue(netdev);
612 }
613
614 /**
615  * \brief Stop a queue
616  * @param netdev network device
617  * @param q which queue to stop
618  */
619 static inline void stop_q(struct net_device *netdev, int q)
620 {
621         if (netif_is_multiqueue(netdev))
622                 netif_stop_subqueue(netdev, q);
623         else
624                 netif_stop_queue(netdev);
625 }
626
627 /**
628  * \brief Check Tx queue status, and take appropriate action
629  * @param lio per-network private data
630  * @returns 0 if full, number of queues woken up otherwise
631  */
632 static inline int check_txq_status(struct lio *lio)
633 {
634         int ret_val = 0;
635
636         if (netif_is_multiqueue(lio->netdev)) {
637                 int numqs = lio->netdev->num_tx_queues;
638                 int q, iq = 0;
639
640                 /* check each sub-queue state */
641                 for (q = 0; q < numqs; q++) {
642                         iq = lio->linfo.txpciq[q %
643                                 lio->oct_dev->num_iqs].s.q_no;
644                         if (octnet_iq_is_full(lio->oct_dev, iq))
645                                 continue;
646                         if (__netif_subqueue_stopped(lio->netdev, q)) {
647                                 wake_q(lio->netdev, q);
648                                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
649                                                           tx_restart, 1);
650                                 ret_val++;
651                         }
652                 }
653         } else {
654                 if (octnet_iq_is_full(lio->oct_dev, lio->txq))
655                         return 0;
656                 wake_q(lio->netdev, lio->txq);
657                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
658                                           tx_restart, 1);
659                 ret_val = 1;
660         }
661         return ret_val;
662 }
663
664 /**
665  * Remove the node at the head of the list. The list would be empty at
666  * the end of this call if there are no more nodes in the list.
667  */
668 static inline struct list_head *list_delete_head(struct list_head *root)
669 {
670         struct list_head *node;
671
672         if ((root->prev == root) && (root->next == root))
673                 node = NULL;
674         else
675                 node = root->next;
676
677         if (node)
678                 list_del(node);
679
680         return node;
681 }
682
683 /**
684  * \brief Delete gather lists
685  * @param lio per-network private data
686  */
687 static void delete_glists(struct lio *lio)
688 {
689         struct octnic_gather *g;
690         int i;
691
692         kfree(lio->glist_lock);
693         lio->glist_lock = NULL;
694
695         if (!lio->glist)
696                 return;
697
698         for (i = 0; i < lio->linfo.num_txpciq; i++) {
699                 do {
700                         g = (struct octnic_gather *)
701                                 list_delete_head(&lio->glist[i]);
702                         if (g)
703                                 kfree(g);
704                 } while (g);
705
706                 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
707                     lio->glists_dma_base && lio->glists_dma_base[i]) {
708                         lio_dma_free(lio->oct_dev,
709                                      lio->glist_entry_size * lio->tx_qsize,
710                                      lio->glists_virt_base[i],
711                                      lio->glists_dma_base[i]);
712                 }
713         }
714
715         kfree(lio->glists_virt_base);
716         lio->glists_virt_base = NULL;
717
718         kfree(lio->glists_dma_base);
719         lio->glists_dma_base = NULL;
720
721         kfree(lio->glist);
722         lio->glist = NULL;
723 }
724
725 /**
726  * \brief Setup gather lists
727  * @param lio per-network private data
728  */
729 static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
730 {
731         int i, j;
732         struct octnic_gather *g;
733
734         lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
735                                   GFP_KERNEL);
736         if (!lio->glist_lock)
737                 return -ENOMEM;
738
739         lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
740                              GFP_KERNEL);
741         if (!lio->glist) {
742                 kfree(lio->glist_lock);
743                 lio->glist_lock = NULL;
744                 return -ENOMEM;
745         }
746
747         lio->glist_entry_size =
748                 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
749
750         /* allocate memory to store virtual and dma base address of
751          * per glist consistent memory
752          */
753         lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
754                                         GFP_KERNEL);
755         lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
756                                        GFP_KERNEL);
757
758         if (!lio->glists_virt_base || !lio->glists_dma_base) {
759                 delete_glists(lio);
760                 return -ENOMEM;
761         }
762
763         for (i = 0; i < num_iqs; i++) {
764                 int numa_node = dev_to_node(&oct->pci_dev->dev);
765
766                 spin_lock_init(&lio->glist_lock[i]);
767
768                 INIT_LIST_HEAD(&lio->glist[i]);
769
770                 lio->glists_virt_base[i] =
771                         lio_dma_alloc(oct,
772                                       lio->glist_entry_size * lio->tx_qsize,
773                                       &lio->glists_dma_base[i]);
774
775                 if (!lio->glists_virt_base[i]) {
776                         delete_glists(lio);
777                         return -ENOMEM;
778                 }
779
780                 for (j = 0; j < lio->tx_qsize; j++) {
781                         g = kzalloc_node(sizeof(*g), GFP_KERNEL,
782                                          numa_node);
783                         if (!g)
784                                 g = kzalloc(sizeof(*g), GFP_KERNEL);
785                         if (!g)
786                                 break;
787
788                         g->sg = lio->glists_virt_base[i] +
789                                 (j * lio->glist_entry_size);
790
791                         g->sg_dma_ptr = lio->glists_dma_base[i] +
792                                         (j * lio->glist_entry_size);
793
794                         list_add_tail(&g->list, &lio->glist[i]);
795                 }
796
797                 if (j != lio->tx_qsize) {
798                         delete_glists(lio);
799                         return -ENOMEM;
800                 }
801         }
802
803         return 0;
804 }
805
806 /**
807  * \brief Print link information
808  * @param netdev network device
809  */
810 static void print_link_info(struct net_device *netdev)
811 {
812         struct lio *lio = GET_LIO(netdev);
813
814         if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
815             ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
816                 struct oct_link_info *linfo = &lio->linfo;
817
818                 if (linfo->link.s.link_up) {
819                         netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
820                                    linfo->link.s.speed,
821                                    (linfo->link.s.duplex) ? "Full" : "Half");
822                 } else {
823                         netif_info(lio, link, lio->netdev, "Link Down\n");
824                 }
825         }
826 }
827
828 /**
829  * \brief Routine to notify MTU change
830  * @param work work_struct data structure
831  */
832 static void octnet_link_status_change(struct work_struct *work)
833 {
834         struct cavium_wk *wk = (struct cavium_wk *)work;
835         struct lio *lio = (struct lio *)wk->ctxptr;
836
837         rtnl_lock();
838         call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
839         rtnl_unlock();
840 }
841
842 /**
843  * \brief Sets up the mtu status change work
844  * @param netdev network device
845  */
846 static inline int setup_link_status_change_wq(struct net_device *netdev)
847 {
848         struct lio *lio = GET_LIO(netdev);
849         struct octeon_device *oct = lio->oct_dev;
850
851         lio->link_status_wq.wq = alloc_workqueue("link-status",
852                                                  WQ_MEM_RECLAIM, 0);
853         if (!lio->link_status_wq.wq) {
854                 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
855                 return -1;
856         }
857         INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
858                           octnet_link_status_change);
859         lio->link_status_wq.wk.ctxptr = lio;
860
861         return 0;
862 }
863
864 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
865 {
866         struct lio *lio = GET_LIO(netdev);
867
868         if (lio->link_status_wq.wq) {
869                 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
870                 destroy_workqueue(lio->link_status_wq.wq);
871         }
872 }
873
874 /**
875  * \brief Update link status
876  * @param netdev network device
877  * @param ls link status structure
878  *
879  * Called on receipt of a link status response from the core application to
880  * update each interface's link status.
881  */
882 static inline void update_link_status(struct net_device *netdev,
883                                       union oct_link_status *ls)
884 {
885         struct lio *lio = GET_LIO(netdev);
886         int changed = (lio->linfo.link.u64 != ls->u64);
887
888         lio->linfo.link.u64 = ls->u64;
889
890         if ((lio->intf_open) && (changed)) {
891                 print_link_info(netdev);
892                 lio->link_changes++;
893
894                 if (lio->linfo.link.s.link_up) {
895                         netif_carrier_on(netdev);
896                         txqs_wake(netdev);
897                 } else {
898                         netif_carrier_off(netdev);
899                         stop_txq(netdev);
900                 }
901         }
902 }
903
904 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
905 {
906         struct octeon_device *other_oct;
907
908         other_oct = lio_get_device(oct->octeon_id + 1);
909
910         if (other_oct && other_oct->pci_dev) {
911                 int oct_busnum, other_oct_busnum;
912
913                 oct_busnum = oct->pci_dev->bus->number;
914                 other_oct_busnum = other_oct->pci_dev->bus->number;
915
916                 if (oct_busnum == other_oct_busnum) {
917                         int oct_slot, other_oct_slot;
918
919                         oct_slot = PCI_SLOT(oct->pci_dev->devfn);
920                         other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
921
922                         if (oct_slot == other_oct_slot)
923                                 return other_oct;
924                 }
925         }
926
927         return NULL;
928 }
929
930 static void disable_all_vf_links(struct octeon_device *oct)
931 {
932         struct net_device *netdev;
933         int max_vfs, vf, i;
934
935         if (!oct)
936                 return;
937
938         max_vfs = oct->sriov_info.max_vfs;
939
940         for (i = 0; i < oct->ifcount; i++) {
941                 netdev = oct->props[i].netdev;
942                 if (!netdev)
943                         continue;
944
945                 for (vf = 0; vf < max_vfs; vf++)
946                         liquidio_set_vf_link_state(netdev, vf,
947                                                    IFLA_VF_LINK_STATE_DISABLE);
948         }
949 }
950
951 static int liquidio_watchdog(void *param)
952 {
953         bool err_msg_was_printed[LIO_MAX_CORES];
954         u16 mask_of_crashed_or_stuck_cores = 0;
955         bool all_vf_links_are_disabled = false;
956         struct octeon_device *oct = param;
957         struct octeon_device *other_oct;
958 #ifdef CONFIG_MODULE_UNLOAD
959         long refcount, vfs_referencing_pf;
960         u64 vfs_mask1, vfs_mask2;
961 #endif
962         int core;
963
964         memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
965
966         while (!kthread_should_stop()) {
967                 /* sleep for a couple of seconds so that we don't hog the CPU */
968                 set_current_state(TASK_INTERRUPTIBLE);
969                 schedule_timeout(msecs_to_jiffies(2000));
970
971                 mask_of_crashed_or_stuck_cores =
972                     (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
973
974                 if (!mask_of_crashed_or_stuck_cores)
975                         continue;
976
977                 WRITE_ONCE(oct->cores_crashed, true);
978                 other_oct = get_other_octeon_device(oct);
979                 if (other_oct)
980                         WRITE_ONCE(other_oct->cores_crashed, true);
981
982                 for (core = 0; core < LIO_MAX_CORES; core++) {
983                         bool core_crashed_or_got_stuck;
984
985                         core_crashed_or_got_stuck =
986                                                 (mask_of_crashed_or_stuck_cores
987                                                  >> core) & 1;
988
989                         if (core_crashed_or_got_stuck &&
990                             !err_msg_was_printed[core]) {
991                                 dev_err(&oct->pci_dev->dev,
992                                         "ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
993                                         core);
994                                         err_msg_was_printed[core] = true;
995                         }
996                 }
997
998                 if (all_vf_links_are_disabled)
999                         continue;
1000
1001                 disable_all_vf_links(oct);
1002                 disable_all_vf_links(other_oct);
1003                 all_vf_links_are_disabled = true;
1004
1005 #ifdef CONFIG_MODULE_UNLOAD
1006                 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
1007                 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
1008
1009                 vfs_referencing_pf  = hweight64(vfs_mask1);
1010                 vfs_referencing_pf += hweight64(vfs_mask2);
1011
1012                 refcount = module_refcount(THIS_MODULE);
1013                 if (refcount >= vfs_referencing_pf) {
1014                         while (vfs_referencing_pf) {
1015                                 module_put(THIS_MODULE);
1016                                 vfs_referencing_pf--;
1017                         }
1018                 }
1019 #endif
1020         }
1021
1022         return 0;
1023 }
1024
1025 /**
1026  * \brief PCI probe handler
1027  * @param pdev PCI device structure
1028  * @param ent unused
1029  */
1030 static int
1031 liquidio_probe(struct pci_dev *pdev,
1032                const struct pci_device_id *ent __attribute__((unused)))
1033 {
1034         struct octeon_device *oct_dev = NULL;
1035         struct handshake *hs;
1036
1037         oct_dev = octeon_allocate_device(pdev->device,
1038                                          sizeof(struct octeon_device_priv));
1039         if (!oct_dev) {
1040                 dev_err(&pdev->dev, "Unable to allocate device\n");
1041                 return -ENOMEM;
1042         }
1043
1044         if (pdev->device == OCTEON_CN23XX_PF_VID)
1045                 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
1046
1047         /* Enable PTP for 6XXX Device */
1048         if (((pdev->device == OCTEON_CN66XX) ||
1049              (pdev->device == OCTEON_CN68XX)))
1050                 oct_dev->ptp_enable = true;
1051         else
1052                 oct_dev->ptp_enable = false;
1053
1054         dev_info(&pdev->dev, "Initializing device %x:%x.\n",
1055                  (u32)pdev->vendor, (u32)pdev->device);
1056
1057         /* Assign octeon_device for this device to the private data area. */
1058         pci_set_drvdata(pdev, oct_dev);
1059
1060         /* set linux specific device pointer */
1061         oct_dev->pci_dev = (void *)pdev;
1062
1063         hs = &handshake[oct_dev->octeon_id];
1064         init_completion(&hs->init);
1065         init_completion(&hs->started);
1066         hs->pci_dev = pdev;
1067
1068         if (oct_dev->octeon_id == 0)
1069                 /* first LiquidIO NIC is detected */
1070                 complete(&first_stage);
1071
1072         if (octeon_device_init(oct_dev)) {
1073                 complete(&hs->init);
1074                 liquidio_remove(pdev);
1075                 return -ENOMEM;
1076         }
1077
1078         if (OCTEON_CN23XX_PF(oct_dev)) {
1079                 u64 scratch1;
1080                 u8 bus, device, function;
1081
1082                 scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1);
1083                 if (!(scratch1 & 4ULL)) {
1084                         /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
1085                          * the lio watchdog kernel thread is running for this
1086                          * NIC.  Each NIC gets one watchdog kernel thread.
1087                          */
1088                         scratch1 |= 4ULL;
1089                         octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1,
1090                                            scratch1);
1091
1092                         bus = pdev->bus->number;
1093                         device = PCI_SLOT(pdev->devfn);
1094                         function = PCI_FUNC(pdev->devfn);
1095                         oct_dev->watchdog_task = kthread_create(
1096                             liquidio_watchdog, oct_dev,
1097                             "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
1098                         if (!IS_ERR(oct_dev->watchdog_task)) {
1099                                 wake_up_process(oct_dev->watchdog_task);
1100                         } else {
1101                                 oct_dev->watchdog_task = NULL;
1102                                 dev_err(&oct_dev->pci_dev->dev,
1103                                         "failed to create kernel_thread\n");
1104                                 liquidio_remove(pdev);
1105                                 return -1;
1106                         }
1107                 }
1108         }
1109
1110         oct_dev->rx_pause = 1;
1111         oct_dev->tx_pause = 1;
1112
1113         dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1114
1115         return 0;
1116 }
1117
1118 static bool fw_type_is_none(void)
1119 {
1120         return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
1121                        sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
1122 }
1123
1124 /**
1125  * \brief PCI FLR for each Octeon device.
1126  * @param oct octeon device
1127  */
1128 static void octeon_pci_flr(struct octeon_device *oct)
1129 {
1130         int rc;
1131
1132         pci_save_state(oct->pci_dev);
1133
1134         pci_cfg_access_lock(oct->pci_dev);
1135
1136         /* Quiesce the device completely */
1137         pci_write_config_word(oct->pci_dev, PCI_COMMAND,
1138                               PCI_COMMAND_INTX_DISABLE);
1139
1140         rc = __pci_reset_function_locked(oct->pci_dev);
1141
1142         if (rc != 0)
1143                 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
1144                         rc, oct->pf_num);
1145
1146         pci_cfg_access_unlock(oct->pci_dev);
1147
1148         pci_restore_state(oct->pci_dev);
1149 }
1150
1151 /**
1152  *\brief Destroy resources associated with octeon device
1153  * @param pdev PCI device structure
1154  * @param ent unused
1155  */
1156 static void octeon_destroy_resources(struct octeon_device *oct)
1157 {
1158         int i, refcount;
1159         struct msix_entry *msix_entries;
1160         struct octeon_device_priv *oct_priv =
1161                 (struct octeon_device_priv *)oct->priv;
1162
1163         struct handshake *hs;
1164
1165         switch (atomic_read(&oct->status)) {
1166         case OCT_DEV_RUNNING:
1167         case OCT_DEV_CORE_OK:
1168
1169                 /* No more instructions will be forwarded. */
1170                 atomic_set(&oct->status, OCT_DEV_IN_RESET);
1171
1172                 oct->app_mode = CVM_DRV_INVALID_APP;
1173                 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1174                         lio_get_state_string(&oct->status));
1175
1176                 schedule_timeout_uninterruptible(HZ / 10);
1177
1178                 /* fallthrough */
1179         case OCT_DEV_HOST_OK:
1180
1181                 /* fallthrough */
1182         case OCT_DEV_CONSOLE_INIT_DONE:
1183                 /* Remove any consoles */
1184                 octeon_remove_consoles(oct);
1185
1186                 /* fallthrough */
1187         case OCT_DEV_IO_QUEUES_DONE:
1188                 if (wait_for_pending_requests(oct))
1189                         dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1190
1191                 if (lio_wait_for_instr_fetch(oct))
1192                         dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1193
1194                 /* Disable the input and output queues now. No more packets will
1195                  * arrive from Octeon, but we should wait for all packet
1196                  * processing to finish.
1197                  */
1198                 oct->fn_list.disable_io_queues(oct);
1199
1200                 if (lio_wait_for_oq_pkts(oct))
1201                         dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1202
1203         /* fallthrough */
1204         case OCT_DEV_INTR_SET_DONE:
1205                 /* Disable interrupts  */
1206                 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1207
1208                 if (oct->msix_on) {
1209                         msix_entries = (struct msix_entry *)oct->msix_entries;
1210                         for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1211                                 if (oct->ioq_vector[i].vector) {
1212                                         /* clear the affinity_cpumask */
1213                                         irq_set_affinity_hint(
1214                                                         msix_entries[i].vector,
1215                                                         NULL);
1216                                         free_irq(msix_entries[i].vector,
1217                                                  &oct->ioq_vector[i]);
1218                                         oct->ioq_vector[i].vector = 0;
1219                                 }
1220                         }
1221                         /* non-iov vector's argument is oct struct */
1222                         free_irq(msix_entries[i].vector, oct);
1223
1224                         pci_disable_msix(oct->pci_dev);
1225                         kfree(oct->msix_entries);
1226                         oct->msix_entries = NULL;
1227                 } else {
1228                         /* Release the interrupt line */
1229                         free_irq(oct->pci_dev->irq, oct);
1230
1231                         if (oct->flags & LIO_FLAG_MSI_ENABLED)
1232                                 pci_disable_msi(oct->pci_dev);
1233                 }
1234
1235                 kfree(oct->irq_name_storage);
1236                 oct->irq_name_storage = NULL;
1237
1238         /* fallthrough */
1239         case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1240                 if (OCTEON_CN23XX_PF(oct))
1241                         octeon_free_ioq_vector(oct);
1242
1243         /* fallthrough */
1244         case OCT_DEV_MBOX_SETUP_DONE:
1245                 if (OCTEON_CN23XX_PF(oct))
1246                         oct->fn_list.free_mbox(oct);
1247
1248         /* fallthrough */
1249         case OCT_DEV_IN_RESET:
1250         case OCT_DEV_DROQ_INIT_DONE:
1251                 /* Wait for any pending operations */
1252                 mdelay(100);
1253                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1254                         if (!(oct->io_qmask.oq & BIT_ULL(i)))
1255                                 continue;
1256                         octeon_delete_droq(oct, i);
1257                 }
1258
1259                 /* Force any pending handshakes to complete */
1260                 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1261                         hs = &handshake[i];
1262
1263                         if (hs->pci_dev) {
1264                                 handshake[oct->octeon_id].init_ok = 0;
1265                                 complete(&handshake[oct->octeon_id].init);
1266                                 handshake[oct->octeon_id].started_ok = 0;
1267                                 complete(&handshake[oct->octeon_id].started);
1268                         }
1269                 }
1270
1271                 /* fallthrough */
1272         case OCT_DEV_RESP_LIST_INIT_DONE:
1273                 octeon_delete_response_list(oct);
1274
1275                 /* fallthrough */
1276         case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1277                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1278                         if (!(oct->io_qmask.iq & BIT_ULL(i)))
1279                                 continue;
1280                         octeon_delete_instr_queue(oct, i);
1281                 }
1282 #ifdef CONFIG_PCI_IOV
1283                 if (oct->sriov_info.sriov_enabled)
1284                         pci_disable_sriov(oct->pci_dev);
1285 #endif
1286                 /* fallthrough */
1287         case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1288                 octeon_free_sc_buffer_pool(oct);
1289
1290                 /* fallthrough */
1291         case OCT_DEV_DISPATCH_INIT_DONE:
1292                 octeon_delete_dispatch_list(oct);
1293                 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1294
1295                 /* fallthrough */
1296         case OCT_DEV_PCI_MAP_DONE:
1297                 refcount = octeon_deregister_device(oct);
1298
1299                 /* Soft reset the octeon device before exiting.
1300                  * However, if fw was loaded from card (i.e. autoboot),
1301                  * perform an FLR instead.
1302                  * Implementation note: only soft-reset the device
1303                  * if it is a CN6XXX OR the LAST CN23XX device.
1304                  */
1305                 if (fw_type_is_none())
1306                         octeon_pci_flr(oct);
1307                 else if (OCTEON_CN6XXX(oct) || !refcount)
1308                         oct->fn_list.soft_reset(oct);
1309
1310                 octeon_unmap_pci_barx(oct, 0);
1311                 octeon_unmap_pci_barx(oct, 1);
1312
1313                 /* fallthrough */
1314         case OCT_DEV_PCI_ENABLE_DONE:
1315                 pci_clear_master(oct->pci_dev);
1316                 /* Disable the device, releasing the PCI INT */
1317                 pci_disable_device(oct->pci_dev);
1318
1319                 /* fallthrough */
1320         case OCT_DEV_BEGIN_STATE:
1321                 /* Nothing to be done here either */
1322                 break;
1323         }                       /* end switch (oct->status) */
1324
1325         tasklet_kill(&oct_priv->droq_tasklet);
1326 }
1327
1328 /**
1329  * \brief Callback for rx ctrl
1330  * @param status status of request
1331  * @param buf pointer to resp structure
1332  */
1333 static void rx_ctl_callback(struct octeon_device *oct,
1334                             u32 status,
1335                             void *buf)
1336 {
1337         struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1338         struct liquidio_rx_ctl_context *ctx;
1339
1340         ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1341
1342         oct = lio_get_device(ctx->octeon_id);
1343         if (status)
1344                 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
1345                         CVM_CAST64(status));
1346         WRITE_ONCE(ctx->cond, 1);
1347
1348         /* This barrier is required to be sure that the response has been
1349          * written fully before waking up the handler
1350          */
1351         wmb();
1352
1353         wake_up_interruptible(&ctx->wc);
1354 }
1355
1356 /**
1357  * \brief Send Rx control command
1358  * @param lio per-network private data
1359  * @param start_stop whether to start or stop
1360  */
1361 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1362 {
1363         struct octeon_soft_command *sc;
1364         struct liquidio_rx_ctl_context *ctx;
1365         union octnet_cmd *ncmd;
1366         int ctx_size = sizeof(struct liquidio_rx_ctl_context);
1367         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1368         int retval;
1369
1370         if (oct->props[lio->ifidx].rx_on == start_stop)
1371                 return;
1372
1373         sc = (struct octeon_soft_command *)
1374                 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1375                                           16, ctx_size);
1376
1377         ncmd = (union octnet_cmd *)sc->virtdptr;
1378         ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1379
1380         WRITE_ONCE(ctx->cond, 0);
1381         ctx->octeon_id = lio_get_device_id(oct);
1382         init_waitqueue_head(&ctx->wc);
1383
1384         ncmd->u64 = 0;
1385         ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1386         ncmd->s.param1 = start_stop;
1387
1388         octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1389
1390         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1391
1392         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1393                                     OPCODE_NIC_CMD, 0, 0, 0);
1394
1395         sc->callback = rx_ctl_callback;
1396         sc->callback_arg = sc;
1397         sc->wait_time = 5000;
1398
1399         retval = octeon_send_soft_command(oct, sc);
1400         if (retval == IQ_SEND_FAILED) {
1401                 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1402         } else {
1403                 /* Sleep on a wait queue till the cond flag indicates that the
1404                  * response arrived or timed-out.
1405                  */
1406                 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
1407                         return;
1408                 oct->props[lio->ifidx].rx_on = start_stop;
1409         }
1410
1411         octeon_free_soft_command(oct, sc);
1412 }
1413
1414 /**
1415  * \brief Destroy NIC device interface
1416  * @param oct octeon device
1417  * @param ifidx which interface to destroy
1418  *
1419  * Cleanup associated with each interface for an Octeon device  when NIC
1420  * module is being unloaded or if initialization fails during load.
1421  */
1422 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1423 {
1424         struct net_device *netdev = oct->props[ifidx].netdev;
1425         struct lio *lio;
1426         struct napi_struct *napi, *n;
1427
1428         if (!netdev) {
1429                 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1430                         __func__, ifidx);
1431                 return;
1432         }
1433
1434         lio = GET_LIO(netdev);
1435
1436         dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1437
1438         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1439                 liquidio_stop(netdev);
1440
1441         if (oct->props[lio->ifidx].napi_enabled == 1) {
1442                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1443                         napi_disable(napi);
1444
1445                 oct->props[lio->ifidx].napi_enabled = 0;
1446
1447                 if (OCTEON_CN23XX_PF(oct))
1448                         oct->droq[0]->ops.poll_mode = 0;
1449         }
1450
1451         /* Delete NAPI */
1452         list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1453                 netif_napi_del(napi);
1454
1455         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1456                 unregister_netdev(netdev);
1457
1458         cleanup_link_status_change_wq(netdev);
1459
1460         cleanup_rx_oom_poll_fn(netdev);
1461
1462         delete_glists(lio);
1463
1464         free_netdev(netdev);
1465
1466         oct->props[ifidx].gmxport = -1;
1467
1468         oct->props[ifidx].netdev = NULL;
1469 }
1470
1471 /**
1472  * \brief Stop complete NIC functionality
1473  * @param oct octeon device
1474  */
1475 static int liquidio_stop_nic_module(struct octeon_device *oct)
1476 {
1477         int i, j;
1478         struct lio *lio;
1479
1480         dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1481         if (!oct->ifcount) {
1482                 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1483                 return 1;
1484         }
1485
1486         spin_lock_bh(&oct->cmd_resp_wqlock);
1487         oct->cmd_resp_state = OCT_DRV_OFFLINE;
1488         spin_unlock_bh(&oct->cmd_resp_wqlock);
1489
1490         for (i = 0; i < oct->ifcount; i++) {
1491                 lio = GET_LIO(oct->props[i].netdev);
1492                 for (j = 0; j < oct->num_oqs; j++)
1493                         octeon_unregister_droq_ops(oct,
1494                                                    lio->linfo.rxpciq[j].s.q_no);
1495         }
1496
1497         for (i = 0; i < oct->ifcount; i++)
1498                 liquidio_destroy_nic_device(oct, i);
1499
1500         dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1501         return 0;
1502 }
1503
1504 /**
1505  * \brief Cleans up resources at unload time
1506  * @param pdev PCI device structure
1507  */
1508 static void liquidio_remove(struct pci_dev *pdev)
1509 {
1510         struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1511
1512         dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1513
1514         if (oct_dev->watchdog_task)
1515                 kthread_stop(oct_dev->watchdog_task);
1516
1517         if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1518                 liquidio_stop_nic_module(oct_dev);
1519
1520         /* Reset the octeon device and cleanup all memory allocated for
1521          * the octeon device by driver.
1522          */
1523         octeon_destroy_resources(oct_dev);
1524
1525         dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1526
1527         /* This octeon device has been removed. Update the global
1528          * data structure to reflect this. Free the device structure.
1529          */
1530         octeon_free_device_mem(oct_dev);
1531 }
1532
1533 /**
1534  * \brief Identify the Octeon device and to map the BAR address space
1535  * @param oct octeon device
1536  */
1537 static int octeon_chip_specific_setup(struct octeon_device *oct)
1538 {
1539         u32 dev_id, rev_id;
1540         int ret = 1;
1541         char *s;
1542
1543         pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1544         pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1545         oct->rev_id = rev_id & 0xff;
1546
1547         switch (dev_id) {
1548         case OCTEON_CN68XX_PCIID:
1549                 oct->chip_id = OCTEON_CN68XX;
1550                 ret = lio_setup_cn68xx_octeon_device(oct);
1551                 s = "CN68XX";
1552                 break;
1553
1554         case OCTEON_CN66XX_PCIID:
1555                 oct->chip_id = OCTEON_CN66XX;
1556                 ret = lio_setup_cn66xx_octeon_device(oct);
1557                 s = "CN66XX";
1558                 break;
1559
1560         case OCTEON_CN23XX_PCIID_PF:
1561                 oct->chip_id = OCTEON_CN23XX_PF_VID;
1562                 ret = setup_cn23xx_octeon_pf_device(oct);
1563                 if (ret)
1564                         break;
1565 #ifdef CONFIG_PCI_IOV
1566                 if (!ret)
1567                         pci_sriov_set_totalvfs(oct->pci_dev,
1568                                                oct->sriov_info.max_vfs);
1569 #endif
1570                 s = "CN23XX";
1571                 break;
1572
1573         default:
1574                 s = "?";
1575                 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1576                         dev_id);
1577         }
1578
1579         if (!ret)
1580                 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1581                          OCTEON_MAJOR_REV(oct),
1582                          OCTEON_MINOR_REV(oct),
1583                          octeon_get_conf(oct)->card_name,
1584                          LIQUIDIO_VERSION);
1585
1586         return ret;
1587 }
1588
1589 /**
1590  * \brief PCI initialization for each Octeon device.
1591  * @param oct octeon device
1592  */
1593 static int octeon_pci_os_setup(struct octeon_device *oct)
1594 {
1595         /* setup PCI stuff first */
1596         if (pci_enable_device(oct->pci_dev)) {
1597                 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1598                 return 1;
1599         }
1600
1601         if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1602                 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1603                 pci_disable_device(oct->pci_dev);
1604                 return 1;
1605         }
1606
1607         /* Enable PCI DMA Master. */
1608         pci_set_master(oct->pci_dev);
1609
1610         return 0;
1611 }
1612
1613 static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
1614 {
1615         int q = 0;
1616
1617         if (netif_is_multiqueue(lio->netdev))
1618                 q = skb->queue_mapping % lio->linfo.num_txpciq;
1619
1620         return q;
1621 }
1622
1623 /**
1624  * \brief Check Tx queue state for a given network buffer
1625  * @param lio per-network private data
1626  * @param skb network buffer
1627  */
1628 static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
1629 {
1630         int q = 0, iq = 0;
1631
1632         if (netif_is_multiqueue(lio->netdev)) {
1633                 q = skb->queue_mapping;
1634                 iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
1635         } else {
1636                 iq = lio->txq;
1637                 q = iq;
1638         }
1639
1640         if (octnet_iq_is_full(lio->oct_dev, iq))
1641                 return 0;
1642
1643         if (__netif_subqueue_stopped(lio->netdev, q)) {
1644                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
1645                 wake_q(lio->netdev, q);
1646         }
1647         return 1;
1648 }
1649
1650 /**
1651  * \brief Unmap and free network buffer
1652  * @param buf buffer
1653  */
1654 static void free_netbuf(void *buf)
1655 {
1656         struct sk_buff *skb;
1657         struct octnet_buf_free_info *finfo;
1658         struct lio *lio;
1659
1660         finfo = (struct octnet_buf_free_info *)buf;
1661         skb = finfo->skb;
1662         lio = finfo->lio;
1663
1664         dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1665                          DMA_TO_DEVICE);
1666
1667         check_txq_state(lio, skb);
1668
1669         tx_buffer_free(skb);
1670 }
1671
1672 /**
1673  * \brief Unmap and free gather buffer
1674  * @param buf buffer
1675  */
1676 static void free_netsgbuf(void *buf)
1677 {
1678         struct octnet_buf_free_info *finfo;
1679         struct sk_buff *skb;
1680         struct lio *lio;
1681         struct octnic_gather *g;
1682         int i, frags, iq;
1683
1684         finfo = (struct octnet_buf_free_info *)buf;
1685         skb = finfo->skb;
1686         lio = finfo->lio;
1687         g = finfo->g;
1688         frags = skb_shinfo(skb)->nr_frags;
1689
1690         dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1691                          g->sg[0].ptr[0], (skb->len - skb->data_len),
1692                          DMA_TO_DEVICE);
1693
1694         i = 1;
1695         while (frags--) {
1696                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1697
1698                 pci_unmap_page((lio->oct_dev)->pci_dev,
1699                                g->sg[(i >> 2)].ptr[(i & 3)],
1700                                frag->size, DMA_TO_DEVICE);
1701                 i++;
1702         }
1703
1704         iq = skb_iq(lio, skb);
1705         spin_lock(&lio->glist_lock[iq]);
1706         list_add_tail(&g->list, &lio->glist[iq]);
1707         spin_unlock(&lio->glist_lock[iq]);
1708
1709         check_txq_state(lio, skb);     /* mq support: sub-queue state check */
1710
1711         tx_buffer_free(skb);
1712 }
1713
1714 /**
1715  * \brief Unmap and free gather buffer with response
1716  * @param buf buffer
1717  */
1718 static void free_netsgbuf_with_resp(void *buf)
1719 {
1720         struct octeon_soft_command *sc;
1721         struct octnet_buf_free_info *finfo;
1722         struct sk_buff *skb;
1723         struct lio *lio;
1724         struct octnic_gather *g;
1725         int i, frags, iq;
1726
1727         sc = (struct octeon_soft_command *)buf;
1728         skb = (struct sk_buff *)sc->callback_arg;
1729         finfo = (struct octnet_buf_free_info *)&skb->cb;
1730
1731         lio = finfo->lio;
1732         g = finfo->g;
1733         frags = skb_shinfo(skb)->nr_frags;
1734
1735         dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1736                          g->sg[0].ptr[0], (skb->len - skb->data_len),
1737                          DMA_TO_DEVICE);
1738
1739         i = 1;
1740         while (frags--) {
1741                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1742
1743                 pci_unmap_page((lio->oct_dev)->pci_dev,
1744                                g->sg[(i >> 2)].ptr[(i & 3)],
1745                                frag->size, DMA_TO_DEVICE);
1746                 i++;
1747         }
1748
1749         iq = skb_iq(lio, skb);
1750
1751         spin_lock(&lio->glist_lock[iq]);
1752         list_add_tail(&g->list, &lio->glist[iq]);
1753         spin_unlock(&lio->glist_lock[iq]);
1754
1755         /* Don't free the skb yet */
1756
1757         check_txq_state(lio, skb);
1758 }
1759
1760 /**
1761  * \brief Adjust ptp frequency
1762  * @param ptp PTP clock info
1763  * @param ppb how much to adjust by, in parts-per-billion
1764  */
1765 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1766 {
1767         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1768         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1769         u64 comp, delta;
1770         unsigned long flags;
1771         bool neg_adj = false;
1772
1773         if (ppb < 0) {
1774                 neg_adj = true;
1775                 ppb = -ppb;
1776         }
1777
1778         /* The hardware adds the clock compensation value to the
1779          * PTP clock on every coprocessor clock cycle, so we
1780          * compute the delta in terms of coprocessor clocks.
1781          */
1782         delta = (u64)ppb << 32;
1783         do_div(delta, oct->coproc_clock_rate);
1784
1785         spin_lock_irqsave(&lio->ptp_lock, flags);
1786         comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1787         if (neg_adj)
1788                 comp -= delta;
1789         else
1790                 comp += delta;
1791         lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1792         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1793
1794         return 0;
1795 }
1796
1797 /**
1798  * \brief Adjust ptp time
1799  * @param ptp PTP clock info
1800  * @param delta how much to adjust by, in nanosecs
1801  */
1802 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1803 {
1804         unsigned long flags;
1805         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1806
1807         spin_lock_irqsave(&lio->ptp_lock, flags);
1808         lio->ptp_adjust += delta;
1809         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1810
1811         return 0;
1812 }
1813
1814 /**
1815  * \brief Get hardware clock time, including any adjustment
1816  * @param ptp PTP clock info
1817  * @param ts timespec
1818  */
1819 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1820                                 struct timespec64 *ts)
1821 {
1822         u64 ns;
1823         unsigned long flags;
1824         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1825         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1826
1827         spin_lock_irqsave(&lio->ptp_lock, flags);
1828         ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1829         ns += lio->ptp_adjust;
1830         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1831
1832         *ts = ns_to_timespec64(ns);
1833
1834         return 0;
1835 }
1836
1837 /**
1838  * \brief Set hardware clock time. Reset adjustment
1839  * @param ptp PTP clock info
1840  * @param ts timespec
1841  */
1842 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1843                                 const struct timespec64 *ts)
1844 {
1845         u64 ns;
1846         unsigned long flags;
1847         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1848         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1849
1850         ns = timespec64_to_ns(ts);
1851
1852         spin_lock_irqsave(&lio->ptp_lock, flags);
1853         lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1854         lio->ptp_adjust = 0;
1855         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1856
1857         return 0;
1858 }
1859
1860 /**
1861  * \brief Check if PTP is enabled
1862  * @param ptp PTP clock info
1863  * @param rq request
1864  * @param on is it on
1865  */
1866 static int
1867 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1868                     struct ptp_clock_request *rq __attribute__((unused)),
1869                     int on __attribute__((unused)))
1870 {
1871         return -EOPNOTSUPP;
1872 }
1873
1874 /**
1875  * \brief Open PTP clock source
1876  * @param netdev network device
1877  */
1878 static void oct_ptp_open(struct net_device *netdev)
1879 {
1880         struct lio *lio = GET_LIO(netdev);
1881         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1882
1883         spin_lock_init(&lio->ptp_lock);
1884
1885         snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1886         lio->ptp_info.owner = THIS_MODULE;
1887         lio->ptp_info.max_adj = 250000000;
1888         lio->ptp_info.n_alarm = 0;
1889         lio->ptp_info.n_ext_ts = 0;
1890         lio->ptp_info.n_per_out = 0;
1891         lio->ptp_info.pps = 0;
1892         lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1893         lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1894         lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1895         lio->ptp_info.settime64 = liquidio_ptp_settime;
1896         lio->ptp_info.enable = liquidio_ptp_enable;
1897
1898         lio->ptp_adjust = 0;
1899
1900         lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1901                                              &oct->pci_dev->dev);
1902
1903         if (IS_ERR(lio->ptp_clock))
1904                 lio->ptp_clock = NULL;
1905 }
1906
1907 /**
1908  * \brief Init PTP clock
1909  * @param oct octeon device
1910  */
1911 static void liquidio_ptp_init(struct octeon_device *oct)
1912 {
1913         u64 clock_comp, cfg;
1914
1915         clock_comp = (u64)NSEC_PER_SEC << 32;
1916         do_div(clock_comp, oct->coproc_clock_rate);
1917         lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1918
1919         /* Enable */
1920         cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1921         lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1922 }
1923
1924 /**
1925  * \brief Load firmware to device
1926  * @param oct octeon device
1927  *
1928  * Maps device to firmware filename, requests firmware, and downloads it
1929  */
1930 static int load_firmware(struct octeon_device *oct)
1931 {
1932         int ret = 0;
1933         const struct firmware *fw;
1934         char fw_name[LIO_MAX_FW_FILENAME_LEN];
1935         char *tmp_fw_type;
1936
1937         if (fw_type[0] == '\0')
1938                 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1939         else
1940                 tmp_fw_type = fw_type;
1941
1942         sprintf(fw_name, "/*(DEBLOBBED)*/", LIO_FW_DIR, LIO_FW_BASE_NAME,
1943                 octeon_get_conf(oct)->card_name, tmp_fw_type,
1944                 LIO_FW_NAME_SUFFIX);
1945
1946         ret = reject_firmware(&fw, fw_name, &oct->pci_dev->dev);
1947         if (ret) {
1948                 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
1949                         fw_name);
1950                 release_firmware(fw);
1951                 return ret;
1952         }
1953
1954         ret = octeon_download_firmware(oct, fw->data, fw->size);
1955
1956         release_firmware(fw);
1957
1958         return ret;
1959 }
1960
1961 /**
1962  * \brief Callback for getting interface configuration
1963  * @param status status of request
1964  * @param buf pointer to resp structure
1965  */
1966 static void if_cfg_callback(struct octeon_device *oct,
1967                             u32 status __attribute__((unused)),
1968                             void *buf)
1969 {
1970         struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1971         struct liquidio_if_cfg_resp *resp;
1972         struct liquidio_if_cfg_context *ctx;
1973
1974         resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1975         ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1976
1977         oct = lio_get_device(ctx->octeon_id);
1978         if (resp->status)
1979                 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
1980                         CVM_CAST64(resp->status), status);
1981         WRITE_ONCE(ctx->cond, 1);
1982
1983         snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1984                  resp->cfg_info.liquidio_firmware_version);
1985
1986         /* This barrier is required to be sure that the response has been
1987          * written fully before waking up the handler
1988          */
1989         wmb();
1990
1991         wake_up_interruptible(&ctx->wc);
1992 }
1993
1994 /**
1995  * \brief Poll routine for checking transmit queue status
1996  * @param work work_struct data structure
1997  */
1998 static void octnet_poll_check_txq_status(struct work_struct *work)
1999 {
2000         struct cavium_wk *wk = (struct cavium_wk *)work;
2001         struct lio *lio = (struct lio *)wk->ctxptr;
2002
2003         if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
2004                 return;
2005
2006         check_txq_status(lio);
2007         queue_delayed_work(lio->txq_status_wq.wq,
2008                            &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2009 }
2010
2011 /**
2012  * \brief Sets up the txq poll check
2013  * @param netdev network device
2014  */
2015 static inline int setup_tx_poll_fn(struct net_device *netdev)
2016 {
2017         struct lio *lio = GET_LIO(netdev);
2018         struct octeon_device *oct = lio->oct_dev;
2019
2020         lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2021                                                 WQ_MEM_RECLAIM, 0);
2022         if (!lio->txq_status_wq.wq) {
2023                 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
2024                 return -1;
2025         }
2026         INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
2027                           octnet_poll_check_txq_status);
2028         lio->txq_status_wq.wk.ctxptr = lio;
2029         queue_delayed_work(lio->txq_status_wq.wq,
2030                            &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2031         return 0;
2032 }
2033
2034 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2035 {
2036         struct lio *lio = GET_LIO(netdev);
2037
2038         if (lio->txq_status_wq.wq) {
2039                 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2040                 destroy_workqueue(lio->txq_status_wq.wq);
2041         }
2042 }
2043
2044 /**
2045  * \brief Net device open for LiquidIO
2046  * @param netdev network device
2047  */
2048 static int liquidio_open(struct net_device *netdev)
2049 {
2050         struct lio *lio = GET_LIO(netdev);
2051         struct octeon_device *oct = lio->oct_dev;
2052         struct napi_struct *napi, *n;
2053
2054         if (oct->props[lio->ifidx].napi_enabled == 0) {
2055                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2056                         napi_enable(napi);
2057
2058                 oct->props[lio->ifidx].napi_enabled = 1;
2059
2060                 if (OCTEON_CN23XX_PF(oct))
2061                         oct->droq[0]->ops.poll_mode = 1;
2062         }
2063
2064         if (oct->ptp_enable)
2065                 oct_ptp_open(netdev);
2066
2067         ifstate_set(lio, LIO_IFSTATE_RUNNING);
2068
2069         /* Ready for link status updates */
2070         lio->intf_open = 1;
2071
2072         netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2073
2074         if (OCTEON_CN23XX_PF(oct)) {
2075                 if (!oct->msix_on)
2076                         if (setup_tx_poll_fn(netdev))
2077                                 return -1;
2078         } else {
2079                 if (setup_tx_poll_fn(netdev))
2080                         return -1;
2081         }
2082
2083         start_txq(netdev);
2084
2085         /* tell Octeon to start forwarding packets to host */
2086         send_rx_ctrl_cmd(lio, 1);
2087
2088         dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
2089                  netdev->name);
2090
2091         return 0;
2092 }
2093
2094 /**
2095  * \brief Net device stop for LiquidIO
2096  * @param netdev network device
2097  */
2098 static int liquidio_stop(struct net_device *netdev)
2099 {
2100         struct lio *lio = GET_LIO(netdev);
2101         struct octeon_device *oct = lio->oct_dev;
2102         struct napi_struct *napi, *n;
2103
2104         if (oct->props[lio->ifidx].napi_enabled) {
2105                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2106                         napi_disable(napi);
2107
2108                 oct->props[lio->ifidx].napi_enabled = 0;
2109
2110                 if (OCTEON_CN23XX_PF(oct))
2111                         oct->droq[0]->ops.poll_mode = 0;
2112         }
2113
2114         ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2115
2116         netif_tx_disable(netdev);
2117
2118         /* Inform that netif carrier is down */
2119         netif_carrier_off(netdev);
2120         lio->intf_open = 0;
2121         lio->linfo.link.s.link_up = 0;
2122         lio->link_changes++;
2123
2124         /* Tell Octeon that nic interface is down. */
2125         send_rx_ctrl_cmd(lio, 0);
2126
2127         if (OCTEON_CN23XX_PF(oct)) {
2128                 if (!oct->msix_on)
2129                         cleanup_tx_poll_fn(netdev);
2130         } else {
2131                 cleanup_tx_poll_fn(netdev);
2132         }
2133
2134         if (lio->ptp_clock) {
2135                 ptp_clock_unregister(lio->ptp_clock);
2136                 lio->ptp_clock = NULL;
2137         }
2138
2139         dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
2140
2141         return 0;
2142 }
2143
2144 /**
2145  * \brief Converts a mask based on net device flags
2146  * @param netdev network device
2147  *
2148  * This routine generates a octnet_ifflags mask from the net device flags
2149  * received from the OS.
2150  */
2151 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
2152 {
2153         enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
2154
2155         if (netdev->flags & IFF_PROMISC)
2156                 f |= OCTNET_IFFLAG_PROMISC;
2157
2158         if (netdev->flags & IFF_ALLMULTI)
2159                 f |= OCTNET_IFFLAG_ALLMULTI;
2160
2161         if (netdev->flags & IFF_MULTICAST) {
2162                 f |= OCTNET_IFFLAG_MULTICAST;
2163
2164                 /* Accept all multicast addresses if there are more than we
2165                  * can handle
2166                  */
2167                 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
2168                         f |= OCTNET_IFFLAG_ALLMULTI;
2169         }
2170
2171         if (netdev->flags & IFF_BROADCAST)
2172                 f |= OCTNET_IFFLAG_BROADCAST;
2173
2174         return f;
2175 }
2176
2177 /**
2178  * \brief Net device set_multicast_list
2179  * @param netdev network device
2180  */
2181 static void liquidio_set_mcast_list(struct net_device *netdev)
2182 {
2183         struct lio *lio = GET_LIO(netdev);
2184         struct octeon_device *oct = lio->oct_dev;
2185         struct octnic_ctrl_pkt nctrl;
2186         struct netdev_hw_addr *ha;
2187         u64 *mc;
2188         int ret;
2189         int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2190
2191         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2192
2193         /* Create a ctrl pkt command to be sent to core app. */
2194         nctrl.ncmd.u64 = 0;
2195         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
2196         nctrl.ncmd.s.param1 = get_new_flags(netdev);
2197         nctrl.ncmd.s.param2 = mc_count;
2198         nctrl.ncmd.s.more = mc_count;
2199         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2200         nctrl.netpndev = (u64)netdev;
2201         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2202
2203         /* copy all the addresses into the udd */
2204         mc = &nctrl.udd[0];
2205         netdev_for_each_mc_addr(ha, netdev) {
2206                 *mc = 0;
2207                 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2208                 /* no need to swap bytes */
2209
2210                 if (++mc > &nctrl.udd[mc_count])
2211                         break;
2212         }
2213
2214         /* Apparently, any activity in this call from the kernel has to
2215          * be atomic. So we won't wait for response.
2216          */
2217         nctrl.wait_time = 0;
2218
2219         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2220         if (ret < 0) {
2221                 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2222                         ret);
2223         }
2224 }
2225
2226 /**
2227  * \brief Net device set_mac_address
2228  * @param netdev network device
2229  */
2230 static int liquidio_set_mac(struct net_device *netdev, void *p)
2231 {
2232         int ret = 0;
2233         struct lio *lio = GET_LIO(netdev);
2234         struct octeon_device *oct = lio->oct_dev;
2235         struct sockaddr *addr = (struct sockaddr *)p;
2236         struct octnic_ctrl_pkt nctrl;
2237
2238         if (!is_valid_ether_addr(addr->sa_data))
2239                 return -EADDRNOTAVAIL;
2240
2241         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2242
2243         nctrl.ncmd.u64 = 0;
2244         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2245         nctrl.ncmd.s.param1 = 0;
2246         nctrl.ncmd.s.more = 1;
2247         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2248         nctrl.netpndev = (u64)netdev;
2249         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2250         nctrl.wait_time = 100;
2251
2252         nctrl.udd[0] = 0;
2253         /* The MAC Address is presented in network byte order. */
2254         memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2255
2256         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2257         if (ret < 0) {
2258                 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2259                 return -ENOMEM;
2260         }
2261         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2262         memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2263
2264         return 0;
2265 }
2266
2267 /**
2268  * \brief Net device get_stats
2269  * @param netdev network device
2270  */
2271 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2272 {
2273         struct lio *lio = GET_LIO(netdev);
2274         struct net_device_stats *stats = &netdev->stats;
2275         struct octeon_device *oct;
2276         u64 pkts = 0, drop = 0, bytes = 0;
2277         struct oct_droq_stats *oq_stats;
2278         struct oct_iq_stats *iq_stats;
2279         int i, iq_no, oq_no;
2280
2281         oct = lio->oct_dev;
2282
2283         if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2284                 return stats;
2285
2286         for (i = 0; i < oct->num_iqs; i++) {
2287                 iq_no = lio->linfo.txpciq[i].s.q_no;
2288                 iq_stats = &oct->instr_queue[iq_no]->stats;
2289                 pkts += iq_stats->tx_done;
2290                 drop += iq_stats->tx_dropped;
2291                 bytes += iq_stats->tx_tot_bytes;
2292         }
2293
2294         stats->tx_packets = pkts;
2295         stats->tx_bytes = bytes;
2296         stats->tx_dropped = drop;
2297
2298         pkts = 0;
2299         drop = 0;
2300         bytes = 0;
2301
2302         for (i = 0; i < oct->num_oqs; i++) {
2303                 oq_no = lio->linfo.rxpciq[i].s.q_no;
2304                 oq_stats = &oct->droq[oq_no]->stats;
2305                 pkts += oq_stats->rx_pkts_received;
2306                 drop += (oq_stats->rx_dropped +
2307                          oq_stats->dropped_nodispatch +
2308                          oq_stats->dropped_toomany +
2309                          oq_stats->dropped_nomem);
2310                 bytes += oq_stats->rx_bytes_received;
2311         }
2312
2313         stats->rx_bytes = bytes;
2314         stats->rx_packets = pkts;
2315         stats->rx_dropped = drop;
2316
2317         return stats;
2318 }
2319
2320 /**
2321  * \brief Net device change_mtu
2322  * @param netdev network device
2323  */
2324 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2325 {
2326         struct lio *lio = GET_LIO(netdev);
2327         struct octeon_device *oct = lio->oct_dev;
2328         struct octnic_ctrl_pkt nctrl;
2329         int ret = 0;
2330
2331         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2332
2333         nctrl.ncmd.u64 = 0;
2334         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
2335         nctrl.ncmd.s.param1 = new_mtu;
2336         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2337         nctrl.wait_time = 100;
2338         nctrl.netpndev = (u64)netdev;
2339         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2340
2341         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2342         if (ret < 0) {
2343                 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
2344                 return -1;
2345         }
2346
2347         lio->mtu = new_mtu;
2348
2349         return 0;
2350 }
2351
2352 /**
2353  * \brief Handler for SIOCSHWTSTAMP ioctl
2354  * @param netdev network device
2355  * @param ifr interface request
2356  * @param cmd command
2357  */
2358 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2359 {
2360         struct hwtstamp_config conf;
2361         struct lio *lio = GET_LIO(netdev);
2362
2363         if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2364                 return -EFAULT;
2365
2366         if (conf.flags)
2367                 return -EINVAL;
2368
2369         switch (conf.tx_type) {
2370         case HWTSTAMP_TX_ON:
2371         case HWTSTAMP_TX_OFF:
2372                 break;
2373         default:
2374                 return -ERANGE;
2375         }
2376
2377         switch (conf.rx_filter) {
2378         case HWTSTAMP_FILTER_NONE:
2379                 break;
2380         case HWTSTAMP_FILTER_ALL:
2381         case HWTSTAMP_FILTER_SOME:
2382         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2383         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2384         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2385         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2386         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2387         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2388         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2389         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2390         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2391         case HWTSTAMP_FILTER_PTP_V2_EVENT:
2392         case HWTSTAMP_FILTER_PTP_V2_SYNC:
2393         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2394         case HWTSTAMP_FILTER_NTP_ALL:
2395                 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2396                 break;
2397         default:
2398                 return -ERANGE;
2399         }
2400
2401         if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2402                 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2403
2404         else
2405                 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2406
2407         return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2408 }
2409
2410 /**
2411  * \brief ioctl handler
2412  * @param netdev network device
2413  * @param ifr interface request
2414  * @param cmd command
2415  */
2416 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2417 {
2418         struct lio *lio = GET_LIO(netdev);
2419
2420         switch (cmd) {
2421         case SIOCSHWTSTAMP:
2422                 if (lio->oct_dev->ptp_enable)
2423                         return hwtstamp_ioctl(netdev, ifr);
2424         default:
2425                 return -EOPNOTSUPP;
2426         }
2427 }
2428
2429 /**
2430  * \brief handle a Tx timestamp response
2431  * @param status response status
2432  * @param buf pointer to skb
2433  */
2434 static void handle_timestamp(struct octeon_device *oct,
2435                              u32 status,
2436                              void *buf)
2437 {
2438         struct octnet_buf_free_info *finfo;
2439         struct octeon_soft_command *sc;
2440         struct oct_timestamp_resp *resp;
2441         struct lio *lio;
2442         struct sk_buff *skb = (struct sk_buff *)buf;
2443
2444         finfo = (struct octnet_buf_free_info *)skb->cb;
2445         lio = finfo->lio;
2446         sc = finfo->sc;
2447         oct = lio->oct_dev;
2448         resp = (struct oct_timestamp_resp *)sc->virtrptr;
2449
2450         if (status != OCTEON_REQUEST_DONE) {
2451                 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2452                         CVM_CAST64(status));
2453                 resp->timestamp = 0;
2454         }
2455
2456         octeon_swap_8B_data(&resp->timestamp, 1);
2457
2458         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2459                 struct skb_shared_hwtstamps ts;
2460                 u64 ns = resp->timestamp;
2461
2462                 netif_info(lio, tx_done, lio->netdev,
2463                            "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2464                            skb, (unsigned long long)ns);
2465                 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2466                 skb_tstamp_tx(skb, &ts);
2467         }
2468
2469         octeon_free_soft_command(oct, sc);
2470         tx_buffer_free(skb);
2471 }
2472
2473 /* \brief Send a data packet that will be timestamped
2474  * @param oct octeon device
2475  * @param ndata pointer to network data
2476  * @param finfo pointer to private network data
2477  */
2478 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2479                                          struct octnic_data_pkt *ndata,
2480                                          struct octnet_buf_free_info *finfo)
2481 {
2482         int retval;
2483         struct octeon_soft_command *sc;
2484         struct lio *lio;
2485         int ring_doorbell;
2486         u32 len;
2487
2488         lio = finfo->lio;
2489
2490         sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2491                                             sizeof(struct oct_timestamp_resp));
2492         finfo->sc = sc;
2493
2494         if (!sc) {
2495                 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2496                 return IQ_SEND_FAILED;
2497         }
2498
2499         if (ndata->reqtype == REQTYPE_NORESP_NET)
2500                 ndata->reqtype = REQTYPE_RESP_NET;
2501         else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2502                 ndata->reqtype = REQTYPE_RESP_NET_SG;
2503
2504         sc->callback = handle_timestamp;
2505         sc->callback_arg = finfo->skb;
2506         sc->iq_no = ndata->q_no;
2507
2508         if (OCTEON_CN23XX_PF(oct))
2509                 len = (u32)((struct octeon_instr_ih3 *)
2510                             (&sc->cmd.cmd3.ih3))->dlengsz;
2511         else
2512                 len = (u32)((struct octeon_instr_ih2 *)
2513                             (&sc->cmd.cmd2.ih2))->dlengsz;
2514
2515         ring_doorbell = 1;
2516
2517         retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2518                                      sc, len, ndata->reqtype);
2519
2520         if (retval == IQ_SEND_FAILED) {
2521                 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2522                         retval);
2523                 octeon_free_soft_command(oct, sc);
2524         } else {
2525                 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2526         }
2527
2528         return retval;
2529 }
2530
2531 /** \brief Transmit networks packets to the Octeon interface
2532  * @param skbuff   skbuff struct to be passed to network layer.
2533  * @param netdev    pointer to network device
2534  * @returns whether the packet was transmitted to the device okay or not
2535  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2536  */
2537 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2538 {
2539         struct lio *lio;
2540         struct octnet_buf_free_info *finfo;
2541         union octnic_cmd_setup cmdsetup;
2542         struct octnic_data_pkt ndata;
2543         struct octeon_device *oct;
2544         struct oct_iq_stats *stats;
2545         struct octeon_instr_irh *irh;
2546         union tx_info *tx_info;
2547         int status = 0;
2548         int q_idx = 0, iq_no = 0;
2549         int j;
2550         u64 dptr = 0;
2551         u32 tag = 0;
2552
2553         lio = GET_LIO(netdev);
2554         oct = lio->oct_dev;
2555
2556         if (netif_is_multiqueue(netdev)) {
2557                 q_idx = skb->queue_mapping;
2558                 q_idx = (q_idx % (lio->linfo.num_txpciq));
2559                 tag = q_idx;
2560                 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2561         } else {
2562                 iq_no = lio->txq;
2563         }
2564
2565         stats = &oct->instr_queue[iq_no]->stats;
2566
2567         /* Check for all conditions in which the current packet cannot be
2568          * transmitted.
2569          */
2570         if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2571             (!lio->linfo.link.s.link_up) ||
2572             (skb->len <= 0)) {
2573                 netif_info(lio, tx_err, lio->netdev,
2574                            "Transmit failed link_status : %d\n",
2575                            lio->linfo.link.s.link_up);
2576                 goto lio_xmit_failed;
2577         }
2578
2579         /* Use space in skb->cb to store info used to unmap and
2580          * free the buffers.
2581          */
2582         finfo = (struct octnet_buf_free_info *)skb->cb;
2583         finfo->lio = lio;
2584         finfo->skb = skb;
2585         finfo->sc = NULL;
2586
2587         /* Prepare the attributes for the data to be passed to OSI. */
2588         memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2589
2590         ndata.buf = (void *)finfo;
2591
2592         ndata.q_no = iq_no;
2593
2594         if (netif_is_multiqueue(netdev)) {
2595                 if (octnet_iq_is_full(oct, ndata.q_no)) {
2596                         /* defer sending if queue is full */
2597                         netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2598                                    ndata.q_no);
2599                         stats->tx_iq_busy++;
2600                         return NETDEV_TX_BUSY;
2601                 }
2602         } else {
2603                 if (octnet_iq_is_full(oct, lio->txq)) {
2604                         /* defer sending if queue is full */
2605                         stats->tx_iq_busy++;
2606                         netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2607                                    lio->txq);
2608                         return NETDEV_TX_BUSY;
2609                 }
2610         }
2611         /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2612          *      lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2613          */
2614
2615         ndata.datasize = skb->len;
2616
2617         cmdsetup.u64 = 0;
2618         cmdsetup.s.iq_no = iq_no;
2619
2620         if (skb->ip_summed == CHECKSUM_PARTIAL) {
2621                 if (skb->encapsulation) {
2622                         cmdsetup.s.tnl_csum = 1;
2623                         stats->tx_vxlan++;
2624                 } else {
2625                         cmdsetup.s.transport_csum = 1;
2626                 }
2627         }
2628         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2629                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2630                 cmdsetup.s.timestamp = 1;
2631         }
2632
2633         if (skb_shinfo(skb)->nr_frags == 0) {
2634                 cmdsetup.s.u.datasize = skb->len;
2635                 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2636
2637                 /* Offload checksum calculation for TCP/UDP packets */
2638                 dptr = dma_map_single(&oct->pci_dev->dev,
2639                                       skb->data,
2640                                       skb->len,
2641                                       DMA_TO_DEVICE);
2642                 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2643                         dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2644                                 __func__);
2645                         return NETDEV_TX_BUSY;
2646                 }
2647
2648                 if (OCTEON_CN23XX_PF(oct))
2649                         ndata.cmd.cmd3.dptr = dptr;
2650                 else
2651                         ndata.cmd.cmd2.dptr = dptr;
2652                 finfo->dptr = dptr;
2653                 ndata.reqtype = REQTYPE_NORESP_NET;
2654
2655         } else {
2656                 int i, frags;
2657                 struct skb_frag_struct *frag;
2658                 struct octnic_gather *g;
2659
2660                 spin_lock(&lio->glist_lock[q_idx]);
2661                 g = (struct octnic_gather *)
2662                         list_delete_head(&lio->glist[q_idx]);
2663                 spin_unlock(&lio->glist_lock[q_idx]);
2664
2665                 if (!g) {
2666                         netif_info(lio, tx_err, lio->netdev,
2667                                    "Transmit scatter gather: glist null!\n");
2668                         goto lio_xmit_failed;
2669                 }
2670
2671                 cmdsetup.s.gather = 1;
2672                 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2673                 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2674
2675                 memset(g->sg, 0, g->sg_size);
2676
2677                 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2678                                                  skb->data,
2679                                                  (skb->len - skb->data_len),
2680                                                  DMA_TO_DEVICE);
2681                 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2682                         dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2683                                 __func__);
2684                         return NETDEV_TX_BUSY;
2685                 }
2686                 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2687
2688                 frags = skb_shinfo(skb)->nr_frags;
2689                 i = 1;
2690                 while (frags--) {
2691                         frag = &skb_shinfo(skb)->frags[i - 1];
2692
2693                         g->sg[(i >> 2)].ptr[(i & 3)] =
2694                                 dma_map_page(&oct->pci_dev->dev,
2695                                              frag->page.p,
2696                                              frag->page_offset,
2697                                              frag->size,
2698                                              DMA_TO_DEVICE);
2699
2700                         if (dma_mapping_error(&oct->pci_dev->dev,
2701                                               g->sg[i >> 2].ptr[i & 3])) {
2702                                 dma_unmap_single(&oct->pci_dev->dev,
2703                                                  g->sg[0].ptr[0],
2704                                                  skb->len - skb->data_len,
2705                                                  DMA_TO_DEVICE);
2706                                 for (j = 1; j < i; j++) {
2707                                         frag = &skb_shinfo(skb)->frags[j - 1];
2708                                         dma_unmap_page(&oct->pci_dev->dev,
2709                                                        g->sg[j >> 2].ptr[j & 3],
2710                                                        frag->size,
2711                                                        DMA_TO_DEVICE);
2712                                 }
2713                                 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2714                                         __func__);
2715                                 return NETDEV_TX_BUSY;
2716                         }
2717
2718                         add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2719                         i++;
2720                 }
2721
2722                 dptr = g->sg_dma_ptr;
2723
2724                 if (OCTEON_CN23XX_PF(oct))
2725                         ndata.cmd.cmd3.dptr = dptr;
2726                 else
2727                         ndata.cmd.cmd2.dptr = dptr;
2728                 finfo->dptr = dptr;
2729                 finfo->g = g;
2730
2731                 ndata.reqtype = REQTYPE_NORESP_NET_SG;
2732         }
2733
2734         if (OCTEON_CN23XX_PF(oct)) {
2735                 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2736                 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2737         } else {
2738                 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2739                 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2740         }
2741
2742         if (skb_shinfo(skb)->gso_size) {
2743                 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2744                 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2745                 stats->tx_gso++;
2746         }
2747
2748         /* HW insert VLAN tag */
2749         if (skb_vlan_tag_present(skb)) {
2750                 irh->priority = skb_vlan_tag_get(skb) >> 13;
2751                 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2752         }
2753
2754         if (unlikely(cmdsetup.s.timestamp))
2755                 status = send_nic_timestamp_pkt(oct, &ndata, finfo);
2756         else
2757                 status = octnet_send_nic_data_pkt(oct, &ndata);
2758         if (status == IQ_SEND_FAILED)
2759                 goto lio_xmit_failed;
2760
2761         netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2762
2763         if (status == IQ_SEND_STOP)
2764                 stop_q(lio->netdev, q_idx);
2765
2766         netif_trans_update(netdev);
2767
2768         if (tx_info->s.gso_segs)
2769                 stats->tx_done += tx_info->s.gso_segs;
2770         else
2771                 stats->tx_done++;
2772         stats->tx_tot_bytes += ndata.datasize;
2773
2774         return NETDEV_TX_OK;
2775
2776 lio_xmit_failed:
2777         stats->tx_dropped++;
2778         netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2779                    iq_no, stats->tx_dropped);
2780         if (dptr)
2781                 dma_unmap_single(&oct->pci_dev->dev, dptr,
2782                                  ndata.datasize, DMA_TO_DEVICE);
2783         tx_buffer_free(skb);
2784         return NETDEV_TX_OK;
2785 }
2786
2787 /** \brief Network device Tx timeout
2788  * @param netdev    pointer to network device
2789  */
2790 static void liquidio_tx_timeout(struct net_device *netdev)
2791 {
2792         struct lio *lio;
2793
2794         lio = GET_LIO(netdev);
2795
2796         netif_info(lio, tx_err, lio->netdev,
2797                    "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2798                    netdev->stats.tx_dropped);
2799         netif_trans_update(netdev);
2800         txqs_wake(netdev);
2801 }
2802
2803 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2804                                     __be16 proto __attribute__((unused)),
2805                                     u16 vid)
2806 {
2807         struct lio *lio = GET_LIO(netdev);
2808         struct octeon_device *oct = lio->oct_dev;
2809         struct octnic_ctrl_pkt nctrl;
2810         int ret = 0;
2811
2812         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2813
2814         nctrl.ncmd.u64 = 0;
2815         nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2816         nctrl.ncmd.s.param1 = vid;
2817         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2818         nctrl.wait_time = 100;
2819         nctrl.netpndev = (u64)netdev;
2820         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2821
2822         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2823         if (ret < 0) {
2824                 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2825                         ret);
2826         }
2827
2828         return ret;
2829 }
2830
2831 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2832                                      __be16 proto __attribute__((unused)),
2833                                      u16 vid)
2834 {
2835         struct lio *lio = GET_LIO(netdev);
2836         struct octeon_device *oct = lio->oct_dev;
2837         struct octnic_ctrl_pkt nctrl;
2838         int ret = 0;
2839
2840         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2841
2842         nctrl.ncmd.u64 = 0;
2843         nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2844         nctrl.ncmd.s.param1 = vid;
2845         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2846         nctrl.wait_time = 100;
2847         nctrl.netpndev = (u64)netdev;
2848         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2849
2850         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2851         if (ret < 0) {
2852                 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2853                         ret);
2854         }
2855         return ret;
2856 }
2857
2858 /** Sending command to enable/disable RX checksum offload
2859  * @param netdev                pointer to network device
2860  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
2861  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
2862  *                              OCTNET_CMD_RXCSUM_DISABLE
2863  * @returns                     SUCCESS or FAILURE
2864  */
2865 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2866                                        u8 rx_cmd)
2867 {
2868         struct lio *lio = GET_LIO(netdev);
2869         struct octeon_device *oct = lio->oct_dev;
2870         struct octnic_ctrl_pkt nctrl;
2871         int ret = 0;
2872
2873         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2874
2875         nctrl.ncmd.u64 = 0;
2876         nctrl.ncmd.s.cmd = command;
2877         nctrl.ncmd.s.param1 = rx_cmd;
2878         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2879         nctrl.wait_time = 100;
2880         nctrl.netpndev = (u64)netdev;
2881         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2882
2883         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2884         if (ret < 0) {
2885                 dev_err(&oct->pci_dev->dev,
2886                         "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2887                         ret);
2888         }
2889         return ret;
2890 }
2891
2892 /** Sending command to add/delete VxLAN UDP port to firmware
2893  * @param netdev                pointer to network device
2894  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
2895  * @param vxlan_port            VxLAN port to be added or deleted
2896  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
2897  *                              OCTNET_CMD_VXLAN_PORT_DEL
2898  * @returns                     SUCCESS or FAILURE
2899  */
2900 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2901                                        u16 vxlan_port, u8 vxlan_cmd_bit)
2902 {
2903         struct lio *lio = GET_LIO(netdev);
2904         struct octeon_device *oct = lio->oct_dev;
2905         struct octnic_ctrl_pkt nctrl;
2906         int ret = 0;
2907
2908         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2909
2910         nctrl.ncmd.u64 = 0;
2911         nctrl.ncmd.s.cmd = command;
2912         nctrl.ncmd.s.more = vxlan_cmd_bit;
2913         nctrl.ncmd.s.param1 = vxlan_port;
2914         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2915         nctrl.wait_time = 100;
2916         nctrl.netpndev = (u64)netdev;
2917         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2918
2919         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2920         if (ret < 0) {
2921                 dev_err(&oct->pci_dev->dev,
2922                         "VxLAN port add/delete failed in core (ret:0x%x)\n",
2923                         ret);
2924         }
2925         return ret;
2926 }
2927
2928 /** \brief Net device fix features
2929  * @param netdev  pointer to network device
2930  * @param request features requested
2931  * @returns updated features list
2932  */
2933 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2934                                                netdev_features_t request)
2935 {
2936         struct lio *lio = netdev_priv(netdev);
2937
2938         if ((request & NETIF_F_RXCSUM) &&
2939             !(lio->dev_capability & NETIF_F_RXCSUM))
2940                 request &= ~NETIF_F_RXCSUM;
2941
2942         if ((request & NETIF_F_HW_CSUM) &&
2943             !(lio->dev_capability & NETIF_F_HW_CSUM))
2944                 request &= ~NETIF_F_HW_CSUM;
2945
2946         if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2947                 request &= ~NETIF_F_TSO;
2948
2949         if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2950                 request &= ~NETIF_F_TSO6;
2951
2952         if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2953                 request &= ~NETIF_F_LRO;
2954
2955         /*Disable LRO if RXCSUM is off */
2956         if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2957             (lio->dev_capability & NETIF_F_LRO))
2958                 request &= ~NETIF_F_LRO;
2959
2960         if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2961             !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2962                 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2963
2964         return request;
2965 }
2966
2967 /** \brief Net device set features
2968  * @param netdev  pointer to network device
2969  * @param features features to enable/disable
2970  */
2971 static int liquidio_set_features(struct net_device *netdev,
2972                                  netdev_features_t features)
2973 {
2974         struct lio *lio = netdev_priv(netdev);
2975
2976         if ((features & NETIF_F_LRO) &&
2977             (lio->dev_capability & NETIF_F_LRO) &&
2978             !(netdev->features & NETIF_F_LRO))
2979                 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2980                                      OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2981         else if (!(features & NETIF_F_LRO) &&
2982                  (lio->dev_capability & NETIF_F_LRO) &&
2983                  (netdev->features & NETIF_F_LRO))
2984                 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2985                                      OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2986
2987         /* Sending command to firmware to enable/disable RX checksum
2988          * offload settings using ethtool
2989          */
2990         if (!(netdev->features & NETIF_F_RXCSUM) &&
2991             (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2992             (features & NETIF_F_RXCSUM))
2993                 liquidio_set_rxcsum_command(netdev,
2994                                             OCTNET_CMD_TNL_RX_CSUM_CTL,
2995                                             OCTNET_CMD_RXCSUM_ENABLE);
2996         else if ((netdev->features & NETIF_F_RXCSUM) &&
2997                  (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2998                  !(features & NETIF_F_RXCSUM))
2999                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3000                                             OCTNET_CMD_RXCSUM_DISABLE);
3001
3002         if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3003             (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3004             !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3005                 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3006                                      OCTNET_CMD_VLAN_FILTER_ENABLE);
3007         else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3008                  (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3009                  (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3010                 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3011                                      OCTNET_CMD_VLAN_FILTER_DISABLE);
3012
3013         return 0;
3014 }
3015
3016 static void liquidio_add_vxlan_port(struct net_device *netdev,
3017                                     struct udp_tunnel_info *ti)
3018 {
3019         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3020                 return;
3021
3022         liquidio_vxlan_port_command(netdev,
3023                                     OCTNET_CMD_VXLAN_PORT_CONFIG,
3024                                     htons(ti->port),
3025                                     OCTNET_CMD_VXLAN_PORT_ADD);
3026 }
3027
3028 static void liquidio_del_vxlan_port(struct net_device *netdev,
3029                                     struct udp_tunnel_info *ti)
3030 {
3031         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3032                 return;
3033
3034         liquidio_vxlan_port_command(netdev,
3035                                     OCTNET_CMD_VXLAN_PORT_CONFIG,
3036                                     htons(ti->port),
3037                                     OCTNET_CMD_VXLAN_PORT_DEL);
3038 }
3039
3040 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
3041                                  u8 *mac, bool is_admin_assigned)
3042 {
3043         struct lio *lio = GET_LIO(netdev);
3044         struct octeon_device *oct = lio->oct_dev;
3045         struct octnic_ctrl_pkt nctrl;
3046
3047         if (!is_valid_ether_addr(mac))
3048                 return -EINVAL;
3049
3050         if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
3051                 return -EINVAL;
3052
3053         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3054
3055         nctrl.ncmd.u64 = 0;
3056         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
3057         /* vfidx is 0 based, but vf_num (param1) is 1 based */
3058         nctrl.ncmd.s.param1 = vfidx + 1;
3059         nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
3060         nctrl.ncmd.s.more = 1;
3061         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3062         nctrl.netpndev = (u64)netdev;
3063         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3064         nctrl.wait_time = LIO_CMD_WAIT_TM;
3065
3066         nctrl.udd[0] = 0;
3067         /* The MAC Address is presented in network byte order. */
3068         ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
3069
3070         oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
3071
3072         octnet_send_nic_ctrl_pkt(oct, &nctrl);
3073
3074         return 0;
3075 }
3076
3077 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
3078 {
3079         struct lio *lio = GET_LIO(netdev);
3080         struct octeon_device *oct = lio->oct_dev;
3081         int retval;
3082
3083         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3084                 return -EINVAL;
3085
3086         retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
3087         if (!retval)
3088                 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
3089
3090         return retval;
3091 }
3092
3093 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
3094                                 u16 vlan, u8 qos, __be16 vlan_proto)
3095 {
3096         struct lio *lio = GET_LIO(netdev);
3097         struct octeon_device *oct = lio->oct_dev;
3098         struct octnic_ctrl_pkt nctrl;
3099         u16 vlantci;
3100
3101         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3102                 return -EINVAL;
3103
3104         if (vlan_proto != htons(ETH_P_8021Q))
3105                 return -EPROTONOSUPPORT;
3106
3107         if (vlan >= VLAN_N_VID || qos > 7)
3108                 return -EINVAL;
3109
3110         if (vlan)
3111                 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
3112         else
3113                 vlantci = 0;
3114
3115         if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
3116                 return 0;
3117
3118         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3119
3120         if (vlan)
3121                 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3122         else
3123                 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3124
3125         nctrl.ncmd.s.param1 = vlantci;
3126         nctrl.ncmd.s.param2 =
3127             vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
3128         nctrl.ncmd.s.more = 0;
3129         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3130         nctrl.cb_fn = 0;
3131         nctrl.wait_time = LIO_CMD_WAIT_TM;
3132
3133         octnet_send_nic_ctrl_pkt(oct, &nctrl);
3134
3135         oct->sriov_info.vf_vlantci[vfidx] = vlantci;
3136
3137         return 0;
3138 }
3139
3140 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
3141                                   struct ifla_vf_info *ivi)
3142 {
3143         struct lio *lio = GET_LIO(netdev);
3144         struct octeon_device *oct = lio->oct_dev;
3145         u8 *macaddr;
3146
3147         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3148                 return -EINVAL;
3149
3150         ivi->vf = vfidx;
3151         macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
3152         ether_addr_copy(&ivi->mac[0], macaddr);
3153         ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
3154         ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
3155         ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3156         return 0;
3157 }
3158
3159 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3160                                       int linkstate)
3161 {
3162         struct lio *lio = GET_LIO(netdev);
3163         struct octeon_device *oct = lio->oct_dev;
3164         struct octnic_ctrl_pkt nctrl;
3165
3166         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3167                 return -EINVAL;
3168
3169         if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3170                 return 0;
3171
3172         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3173         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3174         nctrl.ncmd.s.param1 =
3175             vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3176         nctrl.ncmd.s.param2 = linkstate;
3177         nctrl.ncmd.s.more = 0;
3178         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3179         nctrl.cb_fn = 0;
3180         nctrl.wait_time = LIO_CMD_WAIT_TM;
3181
3182         octnet_send_nic_ctrl_pkt(oct, &nctrl);
3183
3184         oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3185
3186         return 0;
3187 }
3188
3189 static const struct net_device_ops lionetdevops = {
3190         .ndo_open               = liquidio_open,
3191         .ndo_stop               = liquidio_stop,
3192         .ndo_start_xmit         = liquidio_xmit,
3193         .ndo_get_stats          = liquidio_get_stats,
3194         .ndo_set_mac_address    = liquidio_set_mac,
3195         .ndo_set_rx_mode        = liquidio_set_mcast_list,
3196         .ndo_tx_timeout         = liquidio_tx_timeout,
3197
3198         .ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3199         .ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3200         .ndo_change_mtu         = liquidio_change_mtu,
3201         .ndo_do_ioctl           = liquidio_ioctl,
3202         .ndo_fix_features       = liquidio_fix_features,
3203         .ndo_set_features       = liquidio_set_features,
3204         .ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
3205         .ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
3206         .ndo_set_vf_mac         = liquidio_set_vf_mac,
3207         .ndo_set_vf_vlan        = liquidio_set_vf_vlan,
3208         .ndo_get_vf_config      = liquidio_get_vf_config,
3209         .ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3210 };
3211
3212 /** \brief Entry point for the liquidio module
3213  */
3214 static int __init liquidio_init(void)
3215 {
3216         int i;
3217         struct handshake *hs;
3218
3219         init_completion(&first_stage);
3220
3221         octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3222
3223         if (liquidio_init_pci())
3224                 return -EINVAL;
3225
3226         wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3227
3228         for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3229                 hs = &handshake[i];
3230                 if (hs->pci_dev) {
3231                         wait_for_completion(&hs->init);
3232                         if (!hs->init_ok) {
3233                                 /* init handshake failed */
3234                                 dev_err(&hs->pci_dev->dev,
3235                                         "Failed to init device\n");
3236                                 liquidio_deinit_pci();
3237                                 return -EIO;
3238                         }
3239                 }
3240         }
3241
3242         for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3243                 hs = &handshake[i];
3244                 if (hs->pci_dev) {
3245                         wait_for_completion_timeout(&hs->started,
3246                                                     msecs_to_jiffies(30000));
3247                         if (!hs->started_ok) {
3248                                 /* starter handshake failed */
3249                                 dev_err(&hs->pci_dev->dev,
3250                                         "Firmware failed to start\n");
3251                                 liquidio_deinit_pci();
3252                                 return -EIO;
3253                         }
3254                 }
3255         }
3256
3257         return 0;
3258 }
3259
3260 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3261 {
3262         struct octeon_device *oct = (struct octeon_device *)buf;
3263         struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3264         int gmxport = 0;
3265         union oct_link_status *ls;
3266         int i;
3267
3268         if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3269                 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3270                         recv_pkt->buffer_size[0],
3271                         recv_pkt->rh.r_nic_info.gmxport);
3272                 goto nic_info_err;
3273         }
3274
3275         gmxport = recv_pkt->rh.r_nic_info.gmxport;
3276         ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3277                 OCT_DROQ_INFO_SIZE);
3278
3279         octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3280         for (i = 0; i < oct->ifcount; i++) {
3281                 if (oct->props[i].gmxport == gmxport) {
3282                         update_link_status(oct->props[i].netdev, ls);
3283                         break;
3284                 }
3285         }
3286
3287 nic_info_err:
3288         for (i = 0; i < recv_pkt->buffer_count; i++)
3289                 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3290         octeon_free_recv_info(recv_info);
3291         return 0;
3292 }
3293
3294 /**
3295  * \brief Setup network interfaces
3296  * @param octeon_dev  octeon device
3297  *
3298  * Called during init time for each device. It assumes the NIC
3299  * is already up and running.  The link information for each
3300  * interface is passed in link_info.
3301  */
3302 static int setup_nic_devices(struct octeon_device *octeon_dev)
3303 {
3304         struct lio *lio = NULL;
3305         struct net_device *netdev;
3306         u8 mac[6], i, j;
3307         struct octeon_soft_command *sc;
3308         struct liquidio_if_cfg_context *ctx;
3309         struct liquidio_if_cfg_resp *resp;
3310         struct octdev_props *props;
3311         int retval, num_iqueues, num_oqueues;
3312         union oct_nic_if_cfg if_cfg;
3313         unsigned int base_queue;
3314         unsigned int gmx_port_id;
3315         u32 resp_size, ctx_size, data_size;
3316         u32 ifidx_or_pfnum;
3317         struct lio_version *vdata;
3318
3319         /* This is to handle link status changes */
3320         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3321                                     OPCODE_NIC_INFO,
3322                                     lio_nic_info, octeon_dev);
3323
3324         /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3325          * They are handled directly.
3326          */
3327         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3328                                         free_netbuf);
3329
3330         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3331                                         free_netsgbuf);
3332
3333         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3334                                         free_netsgbuf_with_resp);
3335
3336         for (i = 0; i < octeon_dev->ifcount; i++) {
3337                 resp_size = sizeof(struct liquidio_if_cfg_resp);
3338                 ctx_size = sizeof(struct liquidio_if_cfg_context);
3339                 data_size = sizeof(struct lio_version);
3340                 sc = (struct octeon_soft_command *)
3341                         octeon_alloc_soft_command(octeon_dev, data_size,
3342                                                   resp_size, ctx_size);
3343                 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3344                 ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
3345                 vdata = (struct lio_version *)sc->virtdptr;
3346
3347                 *((u64 *)vdata) = 0;
3348                 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3349                 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3350                 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3351
3352                 if (OCTEON_CN23XX_PF(octeon_dev)) {
3353                         num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3354                         num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3355                         base_queue = octeon_dev->sriov_info.pf_srn;
3356
3357                         gmx_port_id = octeon_dev->pf_num;
3358                         ifidx_or_pfnum = octeon_dev->pf_num;
3359                 } else {
3360                         num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3361                                                 octeon_get_conf(octeon_dev), i);
3362                         num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3363                                                 octeon_get_conf(octeon_dev), i);
3364                         base_queue = CFG_GET_BASE_QUE_NIC_IF(
3365                                                 octeon_get_conf(octeon_dev), i);
3366                         gmx_port_id = CFG_GET_GMXID_NIC_IF(
3367                                                 octeon_get_conf(octeon_dev), i);
3368                         ifidx_or_pfnum = i;
3369                 }
3370
3371                 dev_dbg(&octeon_dev->pci_dev->dev,
3372                         "requesting config for interface %d, iqs %d, oqs %d\n",
3373                         ifidx_or_pfnum, num_iqueues, num_oqueues);
3374                 WRITE_ONCE(ctx->cond, 0);
3375                 ctx->octeon_id = lio_get_device_id(octeon_dev);
3376                 init_waitqueue_head(&ctx->wc);
3377
3378                 if_cfg.u64 = 0;
3379                 if_cfg.s.num_iqueues = num_iqueues;
3380                 if_cfg.s.num_oqueues = num_oqueues;
3381                 if_cfg.s.base_queue = base_queue;
3382                 if_cfg.s.gmx_port_id = gmx_port_id;
3383
3384                 sc->iq_no = 0;
3385
3386                 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3387                                             OPCODE_NIC_IF_CFG, 0,
3388                                             if_cfg.u64, 0);
3389
3390                 sc->callback = if_cfg_callback;
3391                 sc->callback_arg = sc;
3392                 sc->wait_time = 3000;
3393
3394                 retval = octeon_send_soft_command(octeon_dev, sc);
3395                 if (retval == IQ_SEND_FAILED) {
3396                         dev_err(&octeon_dev->pci_dev->dev,
3397                                 "iq/oq config failed status: %x\n",
3398                                 retval);
3399                         /* Soft instr is freed by driver in case of failure. */
3400                         goto setup_nic_dev_fail;
3401                 }
3402
3403                 /* Sleep on a wait queue till the cond flag indicates that the
3404                  * response arrived or timed-out.
3405                  */
3406                 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
3407                         dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
3408                         goto setup_nic_wait_intr;
3409                 }
3410
3411                 retval = resp->status;
3412                 if (retval) {
3413                         dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3414                         goto setup_nic_dev_fail;
3415                 }
3416
3417                 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3418                                     (sizeof(struct liquidio_if_cfg_info)) >> 3);
3419
3420                 num_iqueues = hweight64(resp->cfg_info.iqmask);
3421                 num_oqueues = hweight64(resp->cfg_info.oqmask);
3422
3423                 if (!(num_iqueues) || !(num_oqueues)) {
3424                         dev_err(&octeon_dev->pci_dev->dev,
3425                                 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3426                                 resp->cfg_info.iqmask,
3427                                 resp->cfg_info.oqmask);
3428                         goto setup_nic_dev_fail;
3429                 }
3430                 dev_dbg(&octeon_dev->pci_dev->dev,
3431                         "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3432                         i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3433                         num_iqueues, num_oqueues);
3434                 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
3435
3436                 if (!netdev) {
3437                         dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3438                         goto setup_nic_dev_fail;
3439                 }
3440
3441                 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3442
3443                 /* Associate the routines that will handle different
3444                  * netdev tasks.
3445                  */
3446                 netdev->netdev_ops = &lionetdevops;
3447
3448                 lio = GET_LIO(netdev);
3449
3450                 memset(lio, 0, sizeof(struct lio));
3451
3452                 lio->ifidx = ifidx_or_pfnum;
3453
3454                 props = &octeon_dev->props[i];
3455                 props->gmxport = resp->cfg_info.linfo.gmxport;
3456                 props->netdev = netdev;
3457
3458                 lio->linfo.num_rxpciq = num_oqueues;
3459                 lio->linfo.num_txpciq = num_iqueues;
3460                 for (j = 0; j < num_oqueues; j++) {
3461                         lio->linfo.rxpciq[j].u64 =
3462                                 resp->cfg_info.linfo.rxpciq[j].u64;
3463                 }
3464                 for (j = 0; j < num_iqueues; j++) {
3465                         lio->linfo.txpciq[j].u64 =
3466                                 resp->cfg_info.linfo.txpciq[j].u64;
3467                 }
3468                 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3469                 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3470                 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3471
3472                 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3473
3474                 if (OCTEON_CN23XX_PF(octeon_dev) ||
3475                     OCTEON_CN6XXX(octeon_dev)) {
3476                         lio->dev_capability = NETIF_F_HIGHDMA
3477                                               | NETIF_F_IP_CSUM
3478                                               | NETIF_F_IPV6_CSUM
3479                                               | NETIF_F_SG | NETIF_F_RXCSUM
3480                                               | NETIF_F_GRO
3481                                               | NETIF_F_TSO | NETIF_F_TSO6
3482                                               | NETIF_F_LRO;
3483                 }
3484                 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3485
3486                 /*  Copy of transmit encapsulation capabilities:
3487                  *  TSO, TSO6, Checksums for this device
3488                  */
3489                 lio->enc_dev_capability = NETIF_F_IP_CSUM
3490                                           | NETIF_F_IPV6_CSUM
3491                                           | NETIF_F_GSO_UDP_TUNNEL
3492                                           | NETIF_F_HW_CSUM | NETIF_F_SG
3493                                           | NETIF_F_RXCSUM
3494                                           | NETIF_F_TSO | NETIF_F_TSO6
3495                                           | NETIF_F_LRO;
3496
3497                 netdev->hw_enc_features = (lio->enc_dev_capability &
3498                                            ~NETIF_F_LRO);
3499
3500                 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3501
3502                 netdev->vlan_features = lio->dev_capability;
3503                 /* Add any unchangeable hw features */
3504                 lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3505                                         NETIF_F_HW_VLAN_CTAG_RX |
3506                                         NETIF_F_HW_VLAN_CTAG_TX;
3507
3508                 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3509
3510                 netdev->hw_features = lio->dev_capability;
3511                 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3512                 netdev->hw_features = netdev->hw_features &
3513                         ~NETIF_F_HW_VLAN_CTAG_RX;
3514
3515                 /* MTU range: 68 - 16000 */
3516                 netdev->min_mtu = LIO_MIN_MTU_SIZE;
3517                 netdev->max_mtu = LIO_MAX_MTU_SIZE;
3518
3519                 /* Point to the  properties for octeon device to which this
3520                  * interface belongs.
3521                  */
3522                 lio->oct_dev = octeon_dev;
3523                 lio->octprops = props;
3524                 lio->netdev = netdev;
3525
3526                 dev_dbg(&octeon_dev->pci_dev->dev,
3527                         "if%d gmx: %d hw_addr: 0x%llx\n", i,
3528                         lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3529
3530                 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3531                         u8 vfmac[ETH_ALEN];
3532
3533                         random_ether_addr(&vfmac[0]);
3534                         if (__liquidio_set_vf_mac(netdev, j,
3535                                                   &vfmac[0], false)) {
3536                                 dev_err(&octeon_dev->pci_dev->dev,
3537                                         "Error setting VF%d MAC address\n",
3538                                         j);
3539                                 goto setup_nic_dev_fail;
3540                         }
3541                 }
3542
3543                 /* 64-bit swap required on LE machines */
3544                 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3545                 for (j = 0; j < 6; j++)
3546                         mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3547
3548                 /* Copy MAC Address to OS network device structure */
3549
3550                 ether_addr_copy(netdev->dev_addr, mac);
3551
3552                 /* By default all interfaces on a single Octeon uses the same
3553                  * tx and rx queues
3554                  */
3555                 lio->txq = lio->linfo.txpciq[0].s.q_no;
3556                 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3557                 if (liquidio_setup_io_queues(octeon_dev, i,
3558                                              lio->linfo.num_txpciq,
3559                                              lio->linfo.num_rxpciq)) {
3560                         dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3561                         goto setup_nic_dev_fail;
3562                 }
3563
3564                 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3565
3566                 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3567                 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3568
3569                 if (setup_glists(octeon_dev, lio, num_iqueues)) {
3570                         dev_err(&octeon_dev->pci_dev->dev,
3571                                 "Gather list allocation failed\n");
3572                         goto setup_nic_dev_fail;
3573                 }
3574
3575                 /* Register ethtool support */
3576                 liquidio_set_ethtool_ops(netdev);
3577                 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3578                         octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3579                 else
3580                         octeon_dev->priv_flags = 0x0;
3581
3582                 if (netdev->features & NETIF_F_LRO)
3583                         liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3584                                              OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3585
3586                 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3587                                      OCTNET_CMD_VLAN_FILTER_ENABLE);
3588
3589                 if ((debug != -1) && (debug & NETIF_MSG_HW))
3590                         liquidio_set_feature(netdev,
3591                                              OCTNET_CMD_VERBOSE_ENABLE, 0);
3592
3593                 if (setup_link_status_change_wq(netdev))
3594                         goto setup_nic_dev_fail;
3595
3596                 if (setup_rx_oom_poll_fn(netdev))
3597                         goto setup_nic_dev_fail;
3598
3599                 /* Register the network device with the OS */
3600                 if (register_netdev(netdev)) {
3601                         dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3602                         goto setup_nic_dev_fail;
3603                 }
3604
3605                 dev_dbg(&octeon_dev->pci_dev->dev,
3606                         "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3607                         i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3608                 netif_carrier_off(netdev);
3609                 lio->link_changes++;
3610
3611                 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3612
3613                 /* Sending command to firmware to enable Rx checksum offload
3614                  * by default at the time of setup of Liquidio driver for
3615                  * this device
3616                  */
3617                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3618                                             OCTNET_CMD_RXCSUM_ENABLE);
3619                 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3620                                      OCTNET_CMD_TXCSUM_ENABLE);
3621
3622                 dev_dbg(&octeon_dev->pci_dev->dev,
3623                         "NIC ifidx:%d Setup successful\n", i);
3624
3625                 octeon_free_soft_command(octeon_dev, sc);
3626         }
3627
3628         return 0;
3629
3630 setup_nic_dev_fail:
3631
3632         octeon_free_soft_command(octeon_dev, sc);
3633
3634 setup_nic_wait_intr:
3635
3636         while (i--) {
3637                 dev_err(&octeon_dev->pci_dev->dev,
3638                         "NIC ifidx:%d Setup failed\n", i);
3639                 liquidio_destroy_nic_device(octeon_dev, i);
3640         }
3641         return -ENODEV;
3642 }
3643
3644 #ifdef CONFIG_PCI_IOV
3645 static int octeon_enable_sriov(struct octeon_device *oct)
3646 {
3647         unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3648         struct pci_dev *vfdev;
3649         int err;
3650         u32 u;
3651
3652         if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3653                 err = pci_enable_sriov(oct->pci_dev,
3654                                        oct->sriov_info.num_vfs_alloced);
3655                 if (err) {
3656                         dev_err(&oct->pci_dev->dev,
3657                                 "OCTEON: Failed to enable PCI sriov: %d\n",
3658                                 err);
3659                         oct->sriov_info.num_vfs_alloced = 0;
3660                         return err;
3661                 }
3662                 oct->sriov_info.sriov_enabled = 1;
3663
3664                 /* init lookup table that maps DPI ring number to VF pci_dev
3665                  * struct pointer
3666                  */
3667                 u = 0;
3668                 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3669                                        OCTEON_CN23XX_VF_VID, NULL);
3670                 while (vfdev) {
3671                         if (vfdev->is_virtfn &&
3672                             (vfdev->physfn == oct->pci_dev)) {
3673                                 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3674                                         vfdev;
3675                                 u += oct->sriov_info.rings_per_vf;
3676                         }
3677                         vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3678                                                OCTEON_CN23XX_VF_VID, vfdev);
3679                 }
3680         }
3681
3682         return num_vfs_alloced;
3683 }
3684
3685 static int lio_pci_sriov_disable(struct octeon_device *oct)
3686 {
3687         int u;
3688
3689         if (pci_vfs_assigned(oct->pci_dev)) {
3690                 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3691                 return -EPERM;
3692         }
3693
3694         pci_disable_sriov(oct->pci_dev);
3695
3696         u = 0;
3697         while (u < MAX_POSSIBLE_VFS) {
3698                 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3699                 u += oct->sriov_info.rings_per_vf;
3700         }
3701
3702         oct->sriov_info.num_vfs_alloced = 0;
3703         dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3704                  oct->pf_num);
3705
3706         return 0;
3707 }
3708
3709 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3710 {
3711         struct octeon_device *oct = pci_get_drvdata(dev);
3712         int ret = 0;
3713
3714         if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3715             (oct->sriov_info.sriov_enabled)) {
3716                 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3717                          oct->pf_num, num_vfs);
3718                 return 0;
3719         }
3720
3721         if (!num_vfs) {
3722                 ret = lio_pci_sriov_disable(oct);
3723         } else if (num_vfs > oct->sriov_info.max_vfs) {
3724                 dev_err(&oct->pci_dev->dev,
3725                         "OCTEON: Max allowed VFs:%d user requested:%d",
3726                         oct->sriov_info.max_vfs, num_vfs);
3727                 ret = -EPERM;
3728         } else {
3729                 oct->sriov_info.num_vfs_alloced = num_vfs;
3730                 ret = octeon_enable_sriov(oct);
3731                 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3732                          oct->pf_num, num_vfs);
3733         }
3734
3735         return ret;
3736 }
3737 #endif
3738
3739 /**
3740  * \brief initialize the NIC
3741  * @param oct octeon device
3742  *
3743  * This initialization routine is called once the Octeon device application is
3744  * up and running
3745  */
3746 static int liquidio_init_nic_module(struct octeon_device *oct)
3747 {
3748         int i, retval = 0;
3749         int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3750
3751         dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3752
3753         /* only default iq and oq were initialized
3754          * initialize the rest as well
3755          */
3756         /* run port_config command for each port */
3757         oct->ifcount = num_nic_ports;
3758
3759         memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3760
3761         for (i = 0; i < MAX_OCTEON_LINKS; i++)
3762                 oct->props[i].gmxport = -1;
3763
3764         retval = setup_nic_devices(oct);
3765         if (retval) {
3766                 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3767                 goto octnet_init_failure;
3768         }
3769
3770         liquidio_ptp_init(oct);
3771
3772         dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3773
3774         return retval;
3775
3776 octnet_init_failure:
3777
3778         oct->ifcount = 0;
3779
3780         return retval;
3781 }
3782
3783 /**
3784  * \brief starter callback that invokes the remaining initialization work after
3785  * the NIC is up and running.
3786  * @param octptr  work struct work_struct
3787  */
3788 static void nic_starter(struct work_struct *work)
3789 {
3790         struct octeon_device *oct;
3791         struct cavium_wk *wk = (struct cavium_wk *)work;
3792
3793         oct = (struct octeon_device *)wk->ctxptr;
3794
3795         if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3796                 return;
3797
3798         /* If the status of the device is CORE_OK, the core
3799          * application has reported its application type. Call
3800          * any registered handlers now and move to the RUNNING
3801          * state.
3802          */
3803         if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3804                 schedule_delayed_work(&oct->nic_poll_work.work,
3805                                       LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3806                 return;
3807         }
3808
3809         atomic_set(&oct->status, OCT_DEV_RUNNING);
3810
3811         if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3812                 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3813
3814                 if (liquidio_init_nic_module(oct))
3815                         dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3816                 else
3817                         handshake[oct->octeon_id].started_ok = 1;
3818         } else {
3819                 dev_err(&oct->pci_dev->dev,
3820                         "Unexpected application running on NIC (%d). Check firmware.\n",
3821                         oct->app_mode);
3822         }
3823
3824         complete(&handshake[oct->octeon_id].started);
3825 }
3826
3827 static int
3828 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
3829 {
3830         struct octeon_device *oct = (struct octeon_device *)buf;
3831         struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3832         int i, notice, vf_idx;
3833         bool cores_crashed;
3834         u64 *data, vf_num;
3835
3836         notice = recv_pkt->rh.r.ossp;
3837         data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
3838
3839         /* the first 64-bit word of data is the vf_num */
3840         vf_num = data[0];
3841         octeon_swap_8B_data(&vf_num, 1);
3842         vf_idx = (int)vf_num - 1;
3843
3844         cores_crashed = READ_ONCE(oct->cores_crashed);
3845
3846         if (notice == VF_DRV_LOADED) {
3847                 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
3848                         oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
3849                         dev_info(&oct->pci_dev->dev,
3850                                  "driver for VF%d was loaded\n", vf_idx);
3851                         if (!cores_crashed)
3852                                 try_module_get(THIS_MODULE);
3853                 }
3854         } else if (notice == VF_DRV_REMOVED) {
3855                 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
3856                         oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
3857                         dev_info(&oct->pci_dev->dev,
3858                                  "driver for VF%d was removed\n", vf_idx);
3859                         if (!cores_crashed)
3860                                 module_put(THIS_MODULE);
3861                 }
3862         } else if (notice == VF_DRV_MACADDR_CHANGED) {
3863                 u8 *b = (u8 *)&data[1];
3864
3865                 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
3866                 dev_info(&oct->pci_dev->dev,
3867                          "VF driver changed VF%d's MAC address to %pM\n",
3868                          vf_idx, b + 2);
3869         }
3870
3871         for (i = 0; i < recv_pkt->buffer_count; i++)
3872                 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3873         octeon_free_recv_info(recv_info);
3874
3875         return 0;
3876 }
3877
3878 /**
3879  * \brief Device initialization for each Octeon device that is probed
3880  * @param octeon_dev  octeon device
3881  */
3882 static int octeon_device_init(struct octeon_device *octeon_dev)
3883 {
3884         int j, ret;
3885         int fw_loaded = 0;
3886         char bootcmd[] = "\n";
3887         char *dbg_enb = NULL;
3888         struct octeon_device_priv *oct_priv =
3889                 (struct octeon_device_priv *)octeon_dev->priv;
3890         atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
3891
3892         /* Enable access to the octeon device and make its DMA capability
3893          * known to the OS.
3894          */
3895         if (octeon_pci_os_setup(octeon_dev))
3896                 return 1;
3897
3898         atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
3899
3900         /* Identify the Octeon type and map the BAR address space. */
3901         if (octeon_chip_specific_setup(octeon_dev)) {
3902                 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
3903                 return 1;
3904         }
3905
3906         atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
3907
3908         /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
3909          * since that is what is required for the reference to be removed
3910          * during de-initialization (see 'octeon_destroy_resources').
3911          */
3912         octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
3913                                PCI_SLOT(octeon_dev->pci_dev->devfn),
3914                                PCI_FUNC(octeon_dev->pci_dev->devfn),
3915                                true);
3916
3917         octeon_dev->app_mode = CVM_DRV_INVALID_APP;
3918
3919         if (OCTEON_CN23XX_PF(octeon_dev)) {
3920                 if (!cn23xx_fw_loaded(octeon_dev) && !fw_type_is_none()) {
3921                         fw_loaded = 0;
3922                         /* Do a soft reset of the Octeon device. */
3923                         if (octeon_dev->fn_list.soft_reset(octeon_dev))
3924                                 return 1;
3925                         /* things might have changed */
3926                         if (!cn23xx_fw_loaded(octeon_dev))
3927                                 fw_loaded = 0;
3928                         else
3929                                 fw_loaded = 1;
3930                 } else {
3931                         fw_loaded = 1;
3932                 }
3933         } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
3934                 return 1;
3935         }
3936
3937         /* Initialize the dispatch mechanism used to push packets arriving on
3938          * Octeon Output queues.
3939          */
3940         if (octeon_init_dispatch_list(octeon_dev))
3941                 return 1;
3942
3943         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3944                                     OPCODE_NIC_CORE_DRV_ACTIVE,
3945                                     octeon_core_drv_init,
3946                                     octeon_dev);
3947
3948         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3949                                     OPCODE_NIC_VF_DRV_NOTICE,
3950                                     octeon_recv_vf_drv_notice, octeon_dev);
3951         INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
3952         octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
3953         schedule_delayed_work(&octeon_dev->nic_poll_work.work,
3954                               LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3955
3956         atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
3957
3958         if (octeon_set_io_queues_off(octeon_dev)) {
3959                 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
3960                 return 1;
3961         }
3962
3963         if (OCTEON_CN23XX_PF(octeon_dev)) {
3964                 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
3965                 if (ret) {
3966                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
3967                         return ret;
3968                 }
3969         }
3970
3971         /* Initialize soft command buffer pool
3972          */
3973         if (octeon_setup_sc_buffer_pool(octeon_dev)) {
3974                 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
3975                 return 1;
3976         }
3977         atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
3978
3979         /*  Setup the data structures that manage this Octeon's Input queues. */
3980         if (octeon_setup_instr_queues(octeon_dev)) {
3981                 dev_err(&octeon_dev->pci_dev->dev,
3982                         "instruction queue initialization failed\n");
3983                 return 1;
3984         }
3985         atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
3986
3987         /* Initialize lists to manage the requests of different types that
3988          * arrive from user & kernel applications for this octeon device.
3989          */
3990         if (octeon_setup_response_list(octeon_dev)) {
3991                 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
3992                 return 1;
3993         }
3994         atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
3995
3996         if (octeon_setup_output_queues(octeon_dev)) {
3997                 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
3998                 return 1;
3999         }
4000
4001         atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4002
4003         if (OCTEON_CN23XX_PF(octeon_dev)) {
4004                 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4005                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4006                         return 1;
4007                 }
4008                 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4009
4010                 if (octeon_allocate_ioq_vector(octeon_dev)) {
4011                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4012                         return 1;
4013                 }
4014                 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4015
4016         } else {
4017                 /* The input and output queue registers were setup earlier (the
4018                  * queues were not enabled). Any additional registers
4019                  * that need to be programmed should be done now.
4020                  */
4021                 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4022                 if (ret) {
4023                         dev_err(&octeon_dev->pci_dev->dev,
4024                                 "Failed to configure device registers\n");
4025                         return ret;
4026                 }
4027         }
4028
4029         /* Initialize the tasklet that handles output queue packet processing.*/
4030         dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4031         tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4032                      (unsigned long)octeon_dev);
4033
4034         /* Setup the interrupt handler and record the INT SUM register address
4035          */
4036         if (octeon_setup_interrupt(octeon_dev,
4037                                    octeon_dev->sriov_info.num_pf_rings))
4038                 return 1;
4039
4040         /* Enable Octeon device interrupts */
4041         octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4042
4043         atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4044
4045         /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4046          * the output queue is enabled.
4047          * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4048          * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4049          * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4050          * before any credits have been issued, causing the ring to be reset
4051          * (and the f/w appear to never have started).
4052          */
4053         for (j = 0; j < octeon_dev->num_oqs; j++)
4054                 writel(octeon_dev->droq[j]->max_count,
4055                        octeon_dev->droq[j]->pkts_credit_reg);
4056
4057         /* Enable the input and output queues for this Octeon device */
4058         ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4059         if (ret) {
4060                 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4061                 return ret;
4062         }
4063
4064         atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4065
4066         if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
4067                 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4068                 if (!ddr_timeout) {
4069                         dev_info(&octeon_dev->pci_dev->dev,
4070                                  "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4071                 }
4072
4073                 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4074
4075                 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4076                 while (!ddr_timeout) {
4077                         set_current_state(TASK_INTERRUPTIBLE);
4078                         if (schedule_timeout(HZ / 10)) {
4079                                 /* user probably pressed Control-C */
4080                                 return 1;
4081                         }
4082                 }
4083                 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4084                 if (ret) {
4085                         dev_err(&octeon_dev->pci_dev->dev,
4086                                 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4087                                 ret);
4088                         return 1;
4089                 }
4090
4091                 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4092                         dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4093                         return 1;
4094                 }
4095
4096                 /* Divert uboot to take commands from host instead. */
4097                 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4098
4099                 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4100                 ret = octeon_init_consoles(octeon_dev);
4101                 if (ret) {
4102                         dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4103                         return 1;
4104                 }
4105                 /* If console debug enabled, specify empty string to use default
4106                  * enablement ELSE specify NULL string for 'disabled'.
4107                  */
4108                 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4109                 ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4110                 if (ret) {
4111                         dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4112                         return 1;
4113                 } else if (octeon_console_debug_enabled(0)) {
4114                         /* If console was added AND we're logging console output
4115                          * then set our console print function.
4116                          */
4117                         octeon_dev->console[0].print = octeon_dbg_console_print;
4118                 }
4119
4120                 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4121
4122                 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4123                 ret = load_firmware(octeon_dev);
4124                 if (ret) {
4125                         dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4126                         return 1;
4127                 }
4128         }
4129
4130         handshake[octeon_dev->octeon_id].init_ok = 1;
4131         complete(&handshake[octeon_dev->octeon_id].init);
4132
4133         atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4134
4135         return 0;
4136 }
4137
4138 /**
4139  * \brief Debug console print function
4140  * @param octeon_dev  octeon device
4141  * @param console_num console number
4142  * @param prefix      first portion of line to display
4143  * @param suffix      second portion of line to display
4144  *
4145  * The OCTEON debug console outputs entire lines (excluding '\n').
4146  * Normally, the line will be passed in the 'prefix' parameter.
4147  * However, due to buffering, it is possible for a line to be split into two
4148  * parts, in which case they will be passed as the 'prefix' parameter and
4149  * 'suffix' parameter.
4150  */
4151 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4152                                     char *prefix, char *suffix)
4153 {
4154         if (prefix && suffix)
4155                 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4156                          suffix);
4157         else if (prefix)
4158                 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4159         else if (suffix)
4160                 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4161
4162         return 0;
4163 }
4164
4165 /**
4166  * \brief Exits the module
4167  */
4168 static void __exit liquidio_exit(void)
4169 {
4170         liquidio_deinit_pci();
4171
4172         pr_info("LiquidIO network module is now unloaded\n");
4173 }
4174
4175 module_init(liquidio_init);
4176 module_exit(liquidio_exit);