1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include "liquidio_common.h"
25 #include "octeon_droq.h"
26 #include "octeon_iq.h"
27 #include "response_manager.h"
28 #include "octeon_device.h"
29 #include "octeon_nic.h"
30 #include "octeon_main.h"
31 #include "octeon_network.h"
32 #include "cn66xx_regs.h"
33 #include "cn66xx_device.h"
34 #include "cn68xx_device.h"
35 #include "cn23xx_pf_device.h"
36 #include "liquidio_image.h"
38 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
39 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
40 MODULE_LICENSE("GPL");
41 MODULE_VERSION(LIQUIDIO_VERSION);
42 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
43 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
45 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
47 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
49 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
51 static int ddr_timeout = 10000;
52 module_param(ddr_timeout, int, 0644);
53 MODULE_PARM_DESC(ddr_timeout,
54 "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
56 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
58 static int debug = -1;
59 module_param(debug, int, 0644);
60 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
62 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_NIC;
63 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
64 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded. Default \"nic\". Use \"none\" to load firmware from flash.");
66 static u32 console_bitmask;
67 module_param(console_bitmask, int, 0644);
68 MODULE_PARM_DESC(console_bitmask,
69 "Bitmask indicating which consoles have debug output redirected to syslog.");
72 * \brief determines if a given console has debug enabled.
73 * @param console console to check
74 * @returns 1 = enabled. 0 otherwise
76 static int octeon_console_debug_enabled(u32 console)
78 return (console_bitmask >> (console)) & 0x1;
81 /* Polling interval for determining when NIC application is alive */
82 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
84 /* runtime link query interval */
85 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
87 struct liquidio_if_cfg_context {
95 struct liquidio_if_cfg_resp {
97 struct liquidio_if_cfg_info cfg_info;
101 struct liquidio_rx_ctl_context {
104 wait_queue_head_t wc;
109 struct oct_link_status_resp {
111 struct oct_link_info link_info;
115 struct oct_timestamp_resp {
121 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
126 #ifdef __BIG_ENDIAN_BITFIELD
138 /** Octeon device properties to be used by the NIC module.
139 * Each octeon device in the system will be represented
140 * by this structure in the NIC module.
143 #define OCTNIC_MAX_SG (MAX_SKB_FRAGS)
145 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
146 #define OCTNIC_GSO_MAX_SIZE \
147 (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
149 /** Structure of a node in list of gather components maintained by
150 * NIC driver for each network device.
152 struct octnic_gather {
153 /** List manipulation. Next and prev pointers. */
154 struct list_head list;
156 /** Size of the gather component at sg in bytes. */
159 /** Number of bytes that sg was adjusted to make it 8B-aligned. */
162 /** Gather component that can accommodate max sized fragment list
163 * received from the IP layer.
165 struct octeon_sg_entry *sg;
167 dma_addr_t sg_dma_ptr;
171 struct completion init;
172 struct completion started;
173 struct pci_dev *pci_dev;
178 #ifdef CONFIG_PCI_IOV
179 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
182 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
183 char *prefix, char *suffix);
185 static int octeon_device_init(struct octeon_device *);
186 static int liquidio_stop(struct net_device *netdev);
187 static void liquidio_remove(struct pci_dev *pdev);
188 static int liquidio_probe(struct pci_dev *pdev,
189 const struct pci_device_id *ent);
190 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
193 static struct handshake handshake[MAX_OCTEON_DEVICES];
194 static struct completion first_stage;
196 static void octeon_droq_bh(unsigned long pdev)
200 struct octeon_device *oct = (struct octeon_device *)pdev;
201 struct octeon_device_priv *oct_priv =
202 (struct octeon_device_priv *)oct->priv;
204 for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
205 if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
207 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
209 lio_enable_irq(oct->droq[q_no], NULL);
211 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
212 /* set time and cnt interrupt thresholds for this DROQ
215 int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
218 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
221 oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
226 tasklet_schedule(&oct_priv->droq_tasklet);
229 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
231 struct octeon_device_priv *oct_priv =
232 (struct octeon_device_priv *)oct->priv;
233 int retry = 100, pkt_cnt = 0, pending_pkts = 0;
239 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
240 if (!(oct->io_qmask.oq & BIT_ULL(i)))
242 pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
245 pending_pkts += pkt_cnt;
246 tasklet_schedule(&oct_priv->droq_tasklet);
249 schedule_timeout_uninterruptible(1);
251 } while (retry-- && pending_pkts);
257 * \brief Forces all IO queues off on a given device
258 * @param oct Pointer to Octeon device
260 static void force_io_queues_off(struct octeon_device *oct)
262 if ((oct->chip_id == OCTEON_CN66XX) ||
263 (oct->chip_id == OCTEON_CN68XX)) {
264 /* Reset the Enable bits for Input Queues. */
265 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
267 /* Reset the Enable bits for Output Queues. */
268 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
273 * \brief Cause device to go quiet so it can be safely removed/reset/etc
274 * @param oct Pointer to Octeon device
276 static inline void pcierror_quiesce_device(struct octeon_device *oct)
280 /* Disable the input and output queues now. No more packets will
281 * arrive from Octeon, but we should wait for all packet processing
284 force_io_queues_off(oct);
286 /* To allow for in-flight requests */
287 schedule_timeout_uninterruptible(100);
289 if (wait_for_pending_requests(oct))
290 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
292 /* Force all requests waiting to be fetched by OCTEON to complete. */
293 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
294 struct octeon_instr_queue *iq;
296 if (!(oct->io_qmask.iq & BIT_ULL(i)))
298 iq = oct->instr_queue[i];
300 if (atomic_read(&iq->instr_pending)) {
301 spin_lock_bh(&iq->lock);
303 iq->octeon_read_index = iq->host_write_index;
304 iq->stats.instr_processed +=
305 atomic_read(&iq->instr_pending);
306 lio_process_iq_request_list(oct, iq, 0);
307 spin_unlock_bh(&iq->lock);
311 /* Force all pending ordered list requests to time out. */
312 lio_process_ordered_list(oct, 1);
314 /* We do not need to wait for output queue packets to be processed. */
318 * \brief Cleanup PCI AER uncorrectable error status
319 * @param dev Pointer to PCI device
321 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
326 pr_info("%s :\n", __func__);
328 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
329 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
330 if (dev->error_state == pci_channel_io_normal)
331 status &= ~mask; /* Clear corresponding nonfatal bits */
333 status &= mask; /* Clear corresponding fatal bits */
334 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
338 * \brief Stop all PCI IO to a given device
339 * @param dev Pointer to Octeon device
341 static void stop_pci_io(struct octeon_device *oct)
343 /* No more instructions will be forwarded. */
344 atomic_set(&oct->status, OCT_DEV_IN_RESET);
346 pci_disable_device(oct->pci_dev);
348 /* Disable interrupts */
349 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
351 pcierror_quiesce_device(oct);
353 /* Release the interrupt line */
354 free_irq(oct->pci_dev->irq, oct);
356 if (oct->flags & LIO_FLAG_MSI_ENABLED)
357 pci_disable_msi(oct->pci_dev);
359 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
360 lio_get_state_string(&oct->status));
362 /* making it a common function for all OCTEON models */
363 cleanup_aer_uncorrect_error_status(oct->pci_dev);
367 * \brief called when PCI error is detected
368 * @param pdev Pointer to PCI device
369 * @param state The current pci connection state
371 * This function is called after a PCI bus error affecting
372 * this device has been detected.
374 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
375 pci_channel_state_t state)
377 struct octeon_device *oct = pci_get_drvdata(pdev);
379 /* Non-correctable Non-fatal errors */
380 if (state == pci_channel_io_normal) {
381 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
382 cleanup_aer_uncorrect_error_status(oct->pci_dev);
383 return PCI_ERS_RESULT_CAN_RECOVER;
386 /* Non-correctable Fatal errors */
387 dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
390 /* Always return a DISCONNECT. There is no support for recovery but only
391 * for a clean shutdown.
393 return PCI_ERS_RESULT_DISCONNECT;
397 * \brief mmio handler
398 * @param pdev Pointer to PCI device
400 static pci_ers_result_t liquidio_pcie_mmio_enabled(
401 struct pci_dev *pdev __attribute__((unused)))
403 /* We should never hit this since we never ask for a reset for a Fatal
404 * Error. We always return DISCONNECT in io_error above.
405 * But play safe and return RECOVERED for now.
407 return PCI_ERS_RESULT_RECOVERED;
411 * \brief called after the pci bus has been reset.
412 * @param pdev Pointer to PCI device
414 * Restart the card from scratch, as if from a cold-boot. Implementation
415 * resembles the first-half of the octeon_resume routine.
417 static pci_ers_result_t liquidio_pcie_slot_reset(
418 struct pci_dev *pdev __attribute__((unused)))
420 /* We should never hit this since we never ask for a reset for a Fatal
421 * Error. We always return DISCONNECT in io_error above.
422 * But play safe and return RECOVERED for now.
424 return PCI_ERS_RESULT_RECOVERED;
428 * \brief called when traffic can start flowing again.
429 * @param pdev Pointer to PCI device
431 * This callback is called when the error recovery driver tells us that
432 * its OK to resume normal operation. Implementation resembles the
433 * second-half of the octeon_resume routine.
435 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
437 /* Nothing to be done here. */
442 * \brief called when suspending
443 * @param pdev Pointer to PCI device
444 * @param state state to suspend to
446 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
447 pm_message_t state __attribute__((unused)))
453 * \brief called when resuming
454 * @param pdev Pointer to PCI device
456 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
462 /* For PCI-E Advanced Error Recovery (AER) Interface */
463 static const struct pci_error_handlers liquidio_err_handler = {
464 .error_detected = liquidio_pcie_error_detected,
465 .mmio_enabled = liquidio_pcie_mmio_enabled,
466 .slot_reset = liquidio_pcie_slot_reset,
467 .resume = liquidio_pcie_resume,
470 static const struct pci_device_id liquidio_pci_tbl[] = {
472 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
475 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
478 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
484 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
486 static struct pci_driver liquidio_pci_driver = {
488 .id_table = liquidio_pci_tbl,
489 .probe = liquidio_probe,
490 .remove = liquidio_remove,
491 .err_handler = &liquidio_err_handler, /* For AER */
494 .suspend = liquidio_suspend,
495 .resume = liquidio_resume,
497 #ifdef CONFIG_PCI_IOV
498 .sriov_configure = liquidio_enable_sriov,
503 * \brief register PCI driver
505 static int liquidio_init_pci(void)
507 return pci_register_driver(&liquidio_pci_driver);
511 * \brief unregister PCI driver
513 static void liquidio_deinit_pci(void)
515 pci_unregister_driver(&liquidio_pci_driver);
519 * \brief Stop Tx queues
520 * @param netdev network device
522 static inline void txqs_stop(struct net_device *netdev)
524 if (netif_is_multiqueue(netdev)) {
527 for (i = 0; i < netdev->num_tx_queues; i++)
528 netif_stop_subqueue(netdev, i);
530 netif_stop_queue(netdev);
535 * \brief Start Tx queues
536 * @param netdev network device
538 static inline void txqs_start(struct net_device *netdev)
540 if (netif_is_multiqueue(netdev)) {
543 for (i = 0; i < netdev->num_tx_queues; i++)
544 netif_start_subqueue(netdev, i);
546 netif_start_queue(netdev);
551 * \brief Wake Tx queues
552 * @param netdev network device
554 static inline void txqs_wake(struct net_device *netdev)
556 struct lio *lio = GET_LIO(netdev);
558 if (netif_is_multiqueue(netdev)) {
561 for (i = 0; i < netdev->num_tx_queues; i++) {
562 int qno = lio->linfo.txpciq[i %
563 lio->oct_dev->num_iqs].s.q_no;
565 if (__netif_subqueue_stopped(netdev, i)) {
566 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
568 netif_wake_subqueue(netdev, i);
572 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
574 netif_wake_queue(netdev);
579 * \brief Stop Tx queue
580 * @param netdev network device
582 static void stop_txq(struct net_device *netdev)
588 * \brief Start Tx queue
589 * @param netdev network device
591 static void start_txq(struct net_device *netdev)
593 struct lio *lio = GET_LIO(netdev);
595 if (lio->linfo.link.s.link_up) {
602 * \brief Wake a queue
603 * @param netdev network device
604 * @param q which queue to wake
606 static inline void wake_q(struct net_device *netdev, int q)
608 if (netif_is_multiqueue(netdev))
609 netif_wake_subqueue(netdev, q);
611 netif_wake_queue(netdev);
615 * \brief Stop a queue
616 * @param netdev network device
617 * @param q which queue to stop
619 static inline void stop_q(struct net_device *netdev, int q)
621 if (netif_is_multiqueue(netdev))
622 netif_stop_subqueue(netdev, q);
624 netif_stop_queue(netdev);
628 * \brief Check Tx queue status, and take appropriate action
629 * @param lio per-network private data
630 * @returns 0 if full, number of queues woken up otherwise
632 static inline int check_txq_status(struct lio *lio)
636 if (netif_is_multiqueue(lio->netdev)) {
637 int numqs = lio->netdev->num_tx_queues;
640 /* check each sub-queue state */
641 for (q = 0; q < numqs; q++) {
642 iq = lio->linfo.txpciq[q %
643 lio->oct_dev->num_iqs].s.q_no;
644 if (octnet_iq_is_full(lio->oct_dev, iq))
646 if (__netif_subqueue_stopped(lio->netdev, q)) {
647 wake_q(lio->netdev, q);
648 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
654 if (octnet_iq_is_full(lio->oct_dev, lio->txq))
656 wake_q(lio->netdev, lio->txq);
657 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
665 * Remove the node at the head of the list. The list would be empty at
666 * the end of this call if there are no more nodes in the list.
668 static inline struct list_head *list_delete_head(struct list_head *root)
670 struct list_head *node;
672 if ((root->prev == root) && (root->next == root))
684 * \brief Delete gather lists
685 * @param lio per-network private data
687 static void delete_glists(struct lio *lio)
689 struct octnic_gather *g;
692 kfree(lio->glist_lock);
693 lio->glist_lock = NULL;
698 for (i = 0; i < lio->linfo.num_txpciq; i++) {
700 g = (struct octnic_gather *)
701 list_delete_head(&lio->glist[i]);
706 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
707 lio->glists_dma_base && lio->glists_dma_base[i]) {
708 lio_dma_free(lio->oct_dev,
709 lio->glist_entry_size * lio->tx_qsize,
710 lio->glists_virt_base[i],
711 lio->glists_dma_base[i]);
715 kfree(lio->glists_virt_base);
716 lio->glists_virt_base = NULL;
718 kfree(lio->glists_dma_base);
719 lio->glists_dma_base = NULL;
726 * \brief Setup gather lists
727 * @param lio per-network private data
729 static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
732 struct octnic_gather *g;
734 lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
736 if (!lio->glist_lock)
739 lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
742 kfree(lio->glist_lock);
743 lio->glist_lock = NULL;
747 lio->glist_entry_size =
748 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
750 /* allocate memory to store virtual and dma base address of
751 * per glist consistent memory
753 lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
755 lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
758 if (!lio->glists_virt_base || !lio->glists_dma_base) {
763 for (i = 0; i < num_iqs; i++) {
764 int numa_node = dev_to_node(&oct->pci_dev->dev);
766 spin_lock_init(&lio->glist_lock[i]);
768 INIT_LIST_HEAD(&lio->glist[i]);
770 lio->glists_virt_base[i] =
772 lio->glist_entry_size * lio->tx_qsize,
773 &lio->glists_dma_base[i]);
775 if (!lio->glists_virt_base[i]) {
780 for (j = 0; j < lio->tx_qsize; j++) {
781 g = kzalloc_node(sizeof(*g), GFP_KERNEL,
784 g = kzalloc(sizeof(*g), GFP_KERNEL);
788 g->sg = lio->glists_virt_base[i] +
789 (j * lio->glist_entry_size);
791 g->sg_dma_ptr = lio->glists_dma_base[i] +
792 (j * lio->glist_entry_size);
794 list_add_tail(&g->list, &lio->glist[i]);
797 if (j != lio->tx_qsize) {
807 * \brief Print link information
808 * @param netdev network device
810 static void print_link_info(struct net_device *netdev)
812 struct lio *lio = GET_LIO(netdev);
814 if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
815 ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
816 struct oct_link_info *linfo = &lio->linfo;
818 if (linfo->link.s.link_up) {
819 netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
821 (linfo->link.s.duplex) ? "Full" : "Half");
823 netif_info(lio, link, lio->netdev, "Link Down\n");
829 * \brief Routine to notify MTU change
830 * @param work work_struct data structure
832 static void octnet_link_status_change(struct work_struct *work)
834 struct cavium_wk *wk = (struct cavium_wk *)work;
835 struct lio *lio = (struct lio *)wk->ctxptr;
838 call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
843 * \brief Sets up the mtu status change work
844 * @param netdev network device
846 static inline int setup_link_status_change_wq(struct net_device *netdev)
848 struct lio *lio = GET_LIO(netdev);
849 struct octeon_device *oct = lio->oct_dev;
851 lio->link_status_wq.wq = alloc_workqueue("link-status",
853 if (!lio->link_status_wq.wq) {
854 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
857 INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
858 octnet_link_status_change);
859 lio->link_status_wq.wk.ctxptr = lio;
864 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
866 struct lio *lio = GET_LIO(netdev);
868 if (lio->link_status_wq.wq) {
869 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
870 destroy_workqueue(lio->link_status_wq.wq);
875 * \brief Update link status
876 * @param netdev network device
877 * @param ls link status structure
879 * Called on receipt of a link status response from the core application to
880 * update each interface's link status.
882 static inline void update_link_status(struct net_device *netdev,
883 union oct_link_status *ls)
885 struct lio *lio = GET_LIO(netdev);
886 int changed = (lio->linfo.link.u64 != ls->u64);
888 lio->linfo.link.u64 = ls->u64;
890 if ((lio->intf_open) && (changed)) {
891 print_link_info(netdev);
894 if (lio->linfo.link.s.link_up) {
895 netif_carrier_on(netdev);
898 netif_carrier_off(netdev);
904 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
906 struct octeon_device *other_oct;
908 other_oct = lio_get_device(oct->octeon_id + 1);
910 if (other_oct && other_oct->pci_dev) {
911 int oct_busnum, other_oct_busnum;
913 oct_busnum = oct->pci_dev->bus->number;
914 other_oct_busnum = other_oct->pci_dev->bus->number;
916 if (oct_busnum == other_oct_busnum) {
917 int oct_slot, other_oct_slot;
919 oct_slot = PCI_SLOT(oct->pci_dev->devfn);
920 other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
922 if (oct_slot == other_oct_slot)
930 static void disable_all_vf_links(struct octeon_device *oct)
932 struct net_device *netdev;
938 max_vfs = oct->sriov_info.max_vfs;
940 for (i = 0; i < oct->ifcount; i++) {
941 netdev = oct->props[i].netdev;
945 for (vf = 0; vf < max_vfs; vf++)
946 liquidio_set_vf_link_state(netdev, vf,
947 IFLA_VF_LINK_STATE_DISABLE);
951 static int liquidio_watchdog(void *param)
953 bool err_msg_was_printed[LIO_MAX_CORES];
954 u16 mask_of_crashed_or_stuck_cores = 0;
955 bool all_vf_links_are_disabled = false;
956 struct octeon_device *oct = param;
957 struct octeon_device *other_oct;
958 #ifdef CONFIG_MODULE_UNLOAD
959 long refcount, vfs_referencing_pf;
960 u64 vfs_mask1, vfs_mask2;
964 memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
966 while (!kthread_should_stop()) {
967 /* sleep for a couple of seconds so that we don't hog the CPU */
968 set_current_state(TASK_INTERRUPTIBLE);
969 schedule_timeout(msecs_to_jiffies(2000));
971 mask_of_crashed_or_stuck_cores =
972 (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
974 if (!mask_of_crashed_or_stuck_cores)
977 WRITE_ONCE(oct->cores_crashed, true);
978 other_oct = get_other_octeon_device(oct);
980 WRITE_ONCE(other_oct->cores_crashed, true);
982 for (core = 0; core < LIO_MAX_CORES; core++) {
983 bool core_crashed_or_got_stuck;
985 core_crashed_or_got_stuck =
986 (mask_of_crashed_or_stuck_cores
989 if (core_crashed_or_got_stuck &&
990 !err_msg_was_printed[core]) {
991 dev_err(&oct->pci_dev->dev,
992 "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
994 err_msg_was_printed[core] = true;
998 if (all_vf_links_are_disabled)
1001 disable_all_vf_links(oct);
1002 disable_all_vf_links(other_oct);
1003 all_vf_links_are_disabled = true;
1005 #ifdef CONFIG_MODULE_UNLOAD
1006 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
1007 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
1009 vfs_referencing_pf = hweight64(vfs_mask1);
1010 vfs_referencing_pf += hweight64(vfs_mask2);
1012 refcount = module_refcount(THIS_MODULE);
1013 if (refcount >= vfs_referencing_pf) {
1014 while (vfs_referencing_pf) {
1015 module_put(THIS_MODULE);
1016 vfs_referencing_pf--;
1026 * \brief PCI probe handler
1027 * @param pdev PCI device structure
1031 liquidio_probe(struct pci_dev *pdev,
1032 const struct pci_device_id *ent __attribute__((unused)))
1034 struct octeon_device *oct_dev = NULL;
1035 struct handshake *hs;
1037 oct_dev = octeon_allocate_device(pdev->device,
1038 sizeof(struct octeon_device_priv));
1040 dev_err(&pdev->dev, "Unable to allocate device\n");
1044 if (pdev->device == OCTEON_CN23XX_PF_VID)
1045 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
1047 /* Enable PTP for 6XXX Device */
1048 if (((pdev->device == OCTEON_CN66XX) ||
1049 (pdev->device == OCTEON_CN68XX)))
1050 oct_dev->ptp_enable = true;
1052 oct_dev->ptp_enable = false;
1054 dev_info(&pdev->dev, "Initializing device %x:%x.\n",
1055 (u32)pdev->vendor, (u32)pdev->device);
1057 /* Assign octeon_device for this device to the private data area. */
1058 pci_set_drvdata(pdev, oct_dev);
1060 /* set linux specific device pointer */
1061 oct_dev->pci_dev = (void *)pdev;
1063 hs = &handshake[oct_dev->octeon_id];
1064 init_completion(&hs->init);
1065 init_completion(&hs->started);
1068 if (oct_dev->octeon_id == 0)
1069 /* first LiquidIO NIC is detected */
1070 complete(&first_stage);
1072 if (octeon_device_init(oct_dev)) {
1073 complete(&hs->init);
1074 liquidio_remove(pdev);
1078 if (OCTEON_CN23XX_PF(oct_dev)) {
1080 u8 bus, device, function;
1082 scratch1 = octeon_read_csr64(oct_dev, CN23XX_SLI_SCRATCH1);
1083 if (!(scratch1 & 4ULL)) {
1084 /* Bit 2 of SLI_SCRATCH_1 is a flag that indicates that
1085 * the lio watchdog kernel thread is running for this
1086 * NIC. Each NIC gets one watchdog kernel thread.
1089 octeon_write_csr64(oct_dev, CN23XX_SLI_SCRATCH1,
1092 bus = pdev->bus->number;
1093 device = PCI_SLOT(pdev->devfn);
1094 function = PCI_FUNC(pdev->devfn);
1095 oct_dev->watchdog_task = kthread_create(
1096 liquidio_watchdog, oct_dev,
1097 "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
1098 if (!IS_ERR(oct_dev->watchdog_task)) {
1099 wake_up_process(oct_dev->watchdog_task);
1101 oct_dev->watchdog_task = NULL;
1102 dev_err(&oct_dev->pci_dev->dev,
1103 "failed to create kernel_thread\n");
1104 liquidio_remove(pdev);
1110 oct_dev->rx_pause = 1;
1111 oct_dev->tx_pause = 1;
1113 dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1118 static bool fw_type_is_none(void)
1120 return strncmp(fw_type, LIO_FW_NAME_TYPE_NONE,
1121 sizeof(LIO_FW_NAME_TYPE_NONE)) == 0;
1125 * \brief PCI FLR for each Octeon device.
1126 * @param oct octeon device
1128 static void octeon_pci_flr(struct octeon_device *oct)
1132 pci_save_state(oct->pci_dev);
1134 pci_cfg_access_lock(oct->pci_dev);
1136 /* Quiesce the device completely */
1137 pci_write_config_word(oct->pci_dev, PCI_COMMAND,
1138 PCI_COMMAND_INTX_DISABLE);
1140 rc = __pci_reset_function_locked(oct->pci_dev);
1143 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
1146 pci_cfg_access_unlock(oct->pci_dev);
1148 pci_restore_state(oct->pci_dev);
1152 *\brief Destroy resources associated with octeon device
1153 * @param pdev PCI device structure
1156 static void octeon_destroy_resources(struct octeon_device *oct)
1159 struct msix_entry *msix_entries;
1160 struct octeon_device_priv *oct_priv =
1161 (struct octeon_device_priv *)oct->priv;
1163 struct handshake *hs;
1165 switch (atomic_read(&oct->status)) {
1166 case OCT_DEV_RUNNING:
1167 case OCT_DEV_CORE_OK:
1169 /* No more instructions will be forwarded. */
1170 atomic_set(&oct->status, OCT_DEV_IN_RESET);
1172 oct->app_mode = CVM_DRV_INVALID_APP;
1173 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1174 lio_get_state_string(&oct->status));
1176 schedule_timeout_uninterruptible(HZ / 10);
1179 case OCT_DEV_HOST_OK:
1182 case OCT_DEV_CONSOLE_INIT_DONE:
1183 /* Remove any consoles */
1184 octeon_remove_consoles(oct);
1187 case OCT_DEV_IO_QUEUES_DONE:
1188 if (wait_for_pending_requests(oct))
1189 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1191 if (lio_wait_for_instr_fetch(oct))
1192 dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1194 /* Disable the input and output queues now. No more packets will
1195 * arrive from Octeon, but we should wait for all packet
1196 * processing to finish.
1198 oct->fn_list.disable_io_queues(oct);
1200 if (lio_wait_for_oq_pkts(oct))
1201 dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1204 case OCT_DEV_INTR_SET_DONE:
1205 /* Disable interrupts */
1206 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1209 msix_entries = (struct msix_entry *)oct->msix_entries;
1210 for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1211 if (oct->ioq_vector[i].vector) {
1212 /* clear the affinity_cpumask */
1213 irq_set_affinity_hint(
1214 msix_entries[i].vector,
1216 free_irq(msix_entries[i].vector,
1217 &oct->ioq_vector[i]);
1218 oct->ioq_vector[i].vector = 0;
1221 /* non-iov vector's argument is oct struct */
1222 free_irq(msix_entries[i].vector, oct);
1224 pci_disable_msix(oct->pci_dev);
1225 kfree(oct->msix_entries);
1226 oct->msix_entries = NULL;
1228 /* Release the interrupt line */
1229 free_irq(oct->pci_dev->irq, oct);
1231 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1232 pci_disable_msi(oct->pci_dev);
1235 kfree(oct->irq_name_storage);
1236 oct->irq_name_storage = NULL;
1239 case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1240 if (OCTEON_CN23XX_PF(oct))
1241 octeon_free_ioq_vector(oct);
1244 case OCT_DEV_MBOX_SETUP_DONE:
1245 if (OCTEON_CN23XX_PF(oct))
1246 oct->fn_list.free_mbox(oct);
1249 case OCT_DEV_IN_RESET:
1250 case OCT_DEV_DROQ_INIT_DONE:
1251 /* Wait for any pending operations */
1253 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1254 if (!(oct->io_qmask.oq & BIT_ULL(i)))
1256 octeon_delete_droq(oct, i);
1259 /* Force any pending handshakes to complete */
1260 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1264 handshake[oct->octeon_id].init_ok = 0;
1265 complete(&handshake[oct->octeon_id].init);
1266 handshake[oct->octeon_id].started_ok = 0;
1267 complete(&handshake[oct->octeon_id].started);
1272 case OCT_DEV_RESP_LIST_INIT_DONE:
1273 octeon_delete_response_list(oct);
1276 case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1277 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1278 if (!(oct->io_qmask.iq & BIT_ULL(i)))
1280 octeon_delete_instr_queue(oct, i);
1282 #ifdef CONFIG_PCI_IOV
1283 if (oct->sriov_info.sriov_enabled)
1284 pci_disable_sriov(oct->pci_dev);
1287 case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1288 octeon_free_sc_buffer_pool(oct);
1291 case OCT_DEV_DISPATCH_INIT_DONE:
1292 octeon_delete_dispatch_list(oct);
1293 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1296 case OCT_DEV_PCI_MAP_DONE:
1297 refcount = octeon_deregister_device(oct);
1299 /* Soft reset the octeon device before exiting.
1300 * However, if fw was loaded from card (i.e. autoboot),
1301 * perform an FLR instead.
1302 * Implementation note: only soft-reset the device
1303 * if it is a CN6XXX OR the LAST CN23XX device.
1305 if (fw_type_is_none())
1306 octeon_pci_flr(oct);
1307 else if (OCTEON_CN6XXX(oct) || !refcount)
1308 oct->fn_list.soft_reset(oct);
1310 octeon_unmap_pci_barx(oct, 0);
1311 octeon_unmap_pci_barx(oct, 1);
1314 case OCT_DEV_PCI_ENABLE_DONE:
1315 pci_clear_master(oct->pci_dev);
1316 /* Disable the device, releasing the PCI INT */
1317 pci_disable_device(oct->pci_dev);
1320 case OCT_DEV_BEGIN_STATE:
1321 /* Nothing to be done here either */
1323 } /* end switch (oct->status) */
1325 tasklet_kill(&oct_priv->droq_tasklet);
1329 * \brief Callback for rx ctrl
1330 * @param status status of request
1331 * @param buf pointer to resp structure
1333 static void rx_ctl_callback(struct octeon_device *oct,
1337 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1338 struct liquidio_rx_ctl_context *ctx;
1340 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1342 oct = lio_get_device(ctx->octeon_id);
1344 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
1345 CVM_CAST64(status));
1346 WRITE_ONCE(ctx->cond, 1);
1348 /* This barrier is required to be sure that the response has been
1349 * written fully before waking up the handler
1353 wake_up_interruptible(&ctx->wc);
1357 * \brief Send Rx control command
1358 * @param lio per-network private data
1359 * @param start_stop whether to start or stop
1361 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1363 struct octeon_soft_command *sc;
1364 struct liquidio_rx_ctl_context *ctx;
1365 union octnet_cmd *ncmd;
1366 int ctx_size = sizeof(struct liquidio_rx_ctl_context);
1367 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1370 if (oct->props[lio->ifidx].rx_on == start_stop)
1373 sc = (struct octeon_soft_command *)
1374 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1377 ncmd = (union octnet_cmd *)sc->virtdptr;
1378 ctx = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1380 WRITE_ONCE(ctx->cond, 0);
1381 ctx->octeon_id = lio_get_device_id(oct);
1382 init_waitqueue_head(&ctx->wc);
1385 ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1386 ncmd->s.param1 = start_stop;
1388 octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1390 sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1392 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1393 OPCODE_NIC_CMD, 0, 0, 0);
1395 sc->callback = rx_ctl_callback;
1396 sc->callback_arg = sc;
1397 sc->wait_time = 5000;
1399 retval = octeon_send_soft_command(oct, sc);
1400 if (retval == IQ_SEND_FAILED) {
1401 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1403 /* Sleep on a wait queue till the cond flag indicates that the
1404 * response arrived or timed-out.
1406 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
1408 oct->props[lio->ifidx].rx_on = start_stop;
1411 octeon_free_soft_command(oct, sc);
1415 * \brief Destroy NIC device interface
1416 * @param oct octeon device
1417 * @param ifidx which interface to destroy
1419 * Cleanup associated with each interface for an Octeon device when NIC
1420 * module is being unloaded or if initialization fails during load.
1422 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1424 struct net_device *netdev = oct->props[ifidx].netdev;
1426 struct napi_struct *napi, *n;
1429 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1434 lio = GET_LIO(netdev);
1436 dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1438 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1439 liquidio_stop(netdev);
1441 if (oct->props[lio->ifidx].napi_enabled == 1) {
1442 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1445 oct->props[lio->ifidx].napi_enabled = 0;
1447 if (OCTEON_CN23XX_PF(oct))
1448 oct->droq[0]->ops.poll_mode = 0;
1452 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1453 netif_napi_del(napi);
1455 if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1456 unregister_netdev(netdev);
1458 cleanup_link_status_change_wq(netdev);
1460 cleanup_rx_oom_poll_fn(netdev);
1464 free_netdev(netdev);
1466 oct->props[ifidx].gmxport = -1;
1468 oct->props[ifidx].netdev = NULL;
1472 * \brief Stop complete NIC functionality
1473 * @param oct octeon device
1475 static int liquidio_stop_nic_module(struct octeon_device *oct)
1480 dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1481 if (!oct->ifcount) {
1482 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1486 spin_lock_bh(&oct->cmd_resp_wqlock);
1487 oct->cmd_resp_state = OCT_DRV_OFFLINE;
1488 spin_unlock_bh(&oct->cmd_resp_wqlock);
1490 for (i = 0; i < oct->ifcount; i++) {
1491 lio = GET_LIO(oct->props[i].netdev);
1492 for (j = 0; j < oct->num_oqs; j++)
1493 octeon_unregister_droq_ops(oct,
1494 lio->linfo.rxpciq[j].s.q_no);
1497 for (i = 0; i < oct->ifcount; i++)
1498 liquidio_destroy_nic_device(oct, i);
1500 dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1505 * \brief Cleans up resources at unload time
1506 * @param pdev PCI device structure
1508 static void liquidio_remove(struct pci_dev *pdev)
1510 struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1512 dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1514 if (oct_dev->watchdog_task)
1515 kthread_stop(oct_dev->watchdog_task);
1517 if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1518 liquidio_stop_nic_module(oct_dev);
1520 /* Reset the octeon device and cleanup all memory allocated for
1521 * the octeon device by driver.
1523 octeon_destroy_resources(oct_dev);
1525 dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1527 /* This octeon device has been removed. Update the global
1528 * data structure to reflect this. Free the device structure.
1530 octeon_free_device_mem(oct_dev);
1534 * \brief Identify the Octeon device and to map the BAR address space
1535 * @param oct octeon device
1537 static int octeon_chip_specific_setup(struct octeon_device *oct)
1543 pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1544 pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1545 oct->rev_id = rev_id & 0xff;
1548 case OCTEON_CN68XX_PCIID:
1549 oct->chip_id = OCTEON_CN68XX;
1550 ret = lio_setup_cn68xx_octeon_device(oct);
1554 case OCTEON_CN66XX_PCIID:
1555 oct->chip_id = OCTEON_CN66XX;
1556 ret = lio_setup_cn66xx_octeon_device(oct);
1560 case OCTEON_CN23XX_PCIID_PF:
1561 oct->chip_id = OCTEON_CN23XX_PF_VID;
1562 ret = setup_cn23xx_octeon_pf_device(oct);
1565 #ifdef CONFIG_PCI_IOV
1567 pci_sriov_set_totalvfs(oct->pci_dev,
1568 oct->sriov_info.max_vfs);
1575 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1580 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1581 OCTEON_MAJOR_REV(oct),
1582 OCTEON_MINOR_REV(oct),
1583 octeon_get_conf(oct)->card_name,
1590 * \brief PCI initialization for each Octeon device.
1591 * @param oct octeon device
1593 static int octeon_pci_os_setup(struct octeon_device *oct)
1595 /* setup PCI stuff first */
1596 if (pci_enable_device(oct->pci_dev)) {
1597 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1601 if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1602 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1603 pci_disable_device(oct->pci_dev);
1607 /* Enable PCI DMA Master. */
1608 pci_set_master(oct->pci_dev);
1613 static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
1617 if (netif_is_multiqueue(lio->netdev))
1618 q = skb->queue_mapping % lio->linfo.num_txpciq;
1624 * \brief Check Tx queue state for a given network buffer
1625 * @param lio per-network private data
1626 * @param skb network buffer
1628 static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
1632 if (netif_is_multiqueue(lio->netdev)) {
1633 q = skb->queue_mapping;
1634 iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
1640 if (octnet_iq_is_full(lio->oct_dev, iq))
1643 if (__netif_subqueue_stopped(lio->netdev, q)) {
1644 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
1645 wake_q(lio->netdev, q);
1651 * \brief Unmap and free network buffer
1654 static void free_netbuf(void *buf)
1656 struct sk_buff *skb;
1657 struct octnet_buf_free_info *finfo;
1660 finfo = (struct octnet_buf_free_info *)buf;
1664 dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1667 check_txq_state(lio, skb);
1669 tx_buffer_free(skb);
1673 * \brief Unmap and free gather buffer
1676 static void free_netsgbuf(void *buf)
1678 struct octnet_buf_free_info *finfo;
1679 struct sk_buff *skb;
1681 struct octnic_gather *g;
1684 finfo = (struct octnet_buf_free_info *)buf;
1688 frags = skb_shinfo(skb)->nr_frags;
1690 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1691 g->sg[0].ptr[0], (skb->len - skb->data_len),
1696 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1698 pci_unmap_page((lio->oct_dev)->pci_dev,
1699 g->sg[(i >> 2)].ptr[(i & 3)],
1700 frag->size, DMA_TO_DEVICE);
1704 iq = skb_iq(lio, skb);
1705 spin_lock(&lio->glist_lock[iq]);
1706 list_add_tail(&g->list, &lio->glist[iq]);
1707 spin_unlock(&lio->glist_lock[iq]);
1709 check_txq_state(lio, skb); /* mq support: sub-queue state check */
1711 tx_buffer_free(skb);
1715 * \brief Unmap and free gather buffer with response
1718 static void free_netsgbuf_with_resp(void *buf)
1720 struct octeon_soft_command *sc;
1721 struct octnet_buf_free_info *finfo;
1722 struct sk_buff *skb;
1724 struct octnic_gather *g;
1727 sc = (struct octeon_soft_command *)buf;
1728 skb = (struct sk_buff *)sc->callback_arg;
1729 finfo = (struct octnet_buf_free_info *)&skb->cb;
1733 frags = skb_shinfo(skb)->nr_frags;
1735 dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1736 g->sg[0].ptr[0], (skb->len - skb->data_len),
1741 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1743 pci_unmap_page((lio->oct_dev)->pci_dev,
1744 g->sg[(i >> 2)].ptr[(i & 3)],
1745 frag->size, DMA_TO_DEVICE);
1749 iq = skb_iq(lio, skb);
1751 spin_lock(&lio->glist_lock[iq]);
1752 list_add_tail(&g->list, &lio->glist[iq]);
1753 spin_unlock(&lio->glist_lock[iq]);
1755 /* Don't free the skb yet */
1757 check_txq_state(lio, skb);
1761 * \brief Adjust ptp frequency
1762 * @param ptp PTP clock info
1763 * @param ppb how much to adjust by, in parts-per-billion
1765 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1767 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1768 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1770 unsigned long flags;
1771 bool neg_adj = false;
1778 /* The hardware adds the clock compensation value to the
1779 * PTP clock on every coprocessor clock cycle, so we
1780 * compute the delta in terms of coprocessor clocks.
1782 delta = (u64)ppb << 32;
1783 do_div(delta, oct->coproc_clock_rate);
1785 spin_lock_irqsave(&lio->ptp_lock, flags);
1786 comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1791 lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1792 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1798 * \brief Adjust ptp time
1799 * @param ptp PTP clock info
1800 * @param delta how much to adjust by, in nanosecs
1802 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1804 unsigned long flags;
1805 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1807 spin_lock_irqsave(&lio->ptp_lock, flags);
1808 lio->ptp_adjust += delta;
1809 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1815 * \brief Get hardware clock time, including any adjustment
1816 * @param ptp PTP clock info
1817 * @param ts timespec
1819 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1820 struct timespec64 *ts)
1823 unsigned long flags;
1824 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1825 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1827 spin_lock_irqsave(&lio->ptp_lock, flags);
1828 ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1829 ns += lio->ptp_adjust;
1830 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1832 *ts = ns_to_timespec64(ns);
1838 * \brief Set hardware clock time. Reset adjustment
1839 * @param ptp PTP clock info
1840 * @param ts timespec
1842 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1843 const struct timespec64 *ts)
1846 unsigned long flags;
1847 struct lio *lio = container_of(ptp, struct lio, ptp_info);
1848 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1850 ns = timespec64_to_ns(ts);
1852 spin_lock_irqsave(&lio->ptp_lock, flags);
1853 lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1854 lio->ptp_adjust = 0;
1855 spin_unlock_irqrestore(&lio->ptp_lock, flags);
1861 * \brief Check if PTP is enabled
1862 * @param ptp PTP clock info
1864 * @param on is it on
1867 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1868 struct ptp_clock_request *rq __attribute__((unused)),
1869 int on __attribute__((unused)))
1875 * \brief Open PTP clock source
1876 * @param netdev network device
1878 static void oct_ptp_open(struct net_device *netdev)
1880 struct lio *lio = GET_LIO(netdev);
1881 struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1883 spin_lock_init(&lio->ptp_lock);
1885 snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1886 lio->ptp_info.owner = THIS_MODULE;
1887 lio->ptp_info.max_adj = 250000000;
1888 lio->ptp_info.n_alarm = 0;
1889 lio->ptp_info.n_ext_ts = 0;
1890 lio->ptp_info.n_per_out = 0;
1891 lio->ptp_info.pps = 0;
1892 lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1893 lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1894 lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1895 lio->ptp_info.settime64 = liquidio_ptp_settime;
1896 lio->ptp_info.enable = liquidio_ptp_enable;
1898 lio->ptp_adjust = 0;
1900 lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1901 &oct->pci_dev->dev);
1903 if (IS_ERR(lio->ptp_clock))
1904 lio->ptp_clock = NULL;
1908 * \brief Init PTP clock
1909 * @param oct octeon device
1911 static void liquidio_ptp_init(struct octeon_device *oct)
1913 u64 clock_comp, cfg;
1915 clock_comp = (u64)NSEC_PER_SEC << 32;
1916 do_div(clock_comp, oct->coproc_clock_rate);
1917 lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1920 cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1921 lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1925 * \brief Load firmware to device
1926 * @param oct octeon device
1928 * Maps device to firmware filename, requests firmware, and downloads it
1930 static int load_firmware(struct octeon_device *oct)
1933 const struct firmware *fw;
1934 char fw_name[LIO_MAX_FW_FILENAME_LEN];
1937 if (fw_type[0] == '\0')
1938 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1940 tmp_fw_type = fw_type;
1942 sprintf(fw_name, "/*(DEBLOBBED)*/", LIO_FW_DIR, LIO_FW_BASE_NAME,
1943 octeon_get_conf(oct)->card_name, tmp_fw_type,
1944 LIO_FW_NAME_SUFFIX);
1946 ret = reject_firmware(&fw, fw_name, &oct->pci_dev->dev);
1948 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
1950 release_firmware(fw);
1954 ret = octeon_download_firmware(oct, fw->data, fw->size);
1956 release_firmware(fw);
1962 * \brief Callback for getting interface configuration
1963 * @param status status of request
1964 * @param buf pointer to resp structure
1966 static void if_cfg_callback(struct octeon_device *oct,
1967 u32 status __attribute__((unused)),
1970 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1971 struct liquidio_if_cfg_resp *resp;
1972 struct liquidio_if_cfg_context *ctx;
1974 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
1975 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
1977 oct = lio_get_device(ctx->octeon_id);
1979 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
1980 CVM_CAST64(resp->status), status);
1981 WRITE_ONCE(ctx->cond, 1);
1983 snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
1984 resp->cfg_info.liquidio_firmware_version);
1986 /* This barrier is required to be sure that the response has been
1987 * written fully before waking up the handler
1991 wake_up_interruptible(&ctx->wc);
1995 * \brief Poll routine for checking transmit queue status
1996 * @param work work_struct data structure
1998 static void octnet_poll_check_txq_status(struct work_struct *work)
2000 struct cavium_wk *wk = (struct cavium_wk *)work;
2001 struct lio *lio = (struct lio *)wk->ctxptr;
2003 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
2006 check_txq_status(lio);
2007 queue_delayed_work(lio->txq_status_wq.wq,
2008 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2012 * \brief Sets up the txq poll check
2013 * @param netdev network device
2015 static inline int setup_tx_poll_fn(struct net_device *netdev)
2017 struct lio *lio = GET_LIO(netdev);
2018 struct octeon_device *oct = lio->oct_dev;
2020 lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2022 if (!lio->txq_status_wq.wq) {
2023 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
2026 INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
2027 octnet_poll_check_txq_status);
2028 lio->txq_status_wq.wk.ctxptr = lio;
2029 queue_delayed_work(lio->txq_status_wq.wq,
2030 &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2034 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2036 struct lio *lio = GET_LIO(netdev);
2038 if (lio->txq_status_wq.wq) {
2039 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2040 destroy_workqueue(lio->txq_status_wq.wq);
2045 * \brief Net device open for LiquidIO
2046 * @param netdev network device
2048 static int liquidio_open(struct net_device *netdev)
2050 struct lio *lio = GET_LIO(netdev);
2051 struct octeon_device *oct = lio->oct_dev;
2052 struct napi_struct *napi, *n;
2054 if (oct->props[lio->ifidx].napi_enabled == 0) {
2055 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2058 oct->props[lio->ifidx].napi_enabled = 1;
2060 if (OCTEON_CN23XX_PF(oct))
2061 oct->droq[0]->ops.poll_mode = 1;
2064 if (oct->ptp_enable)
2065 oct_ptp_open(netdev);
2067 ifstate_set(lio, LIO_IFSTATE_RUNNING);
2069 /* Ready for link status updates */
2072 netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2074 if (OCTEON_CN23XX_PF(oct)) {
2076 if (setup_tx_poll_fn(netdev))
2079 if (setup_tx_poll_fn(netdev))
2085 /* tell Octeon to start forwarding packets to host */
2086 send_rx_ctrl_cmd(lio, 1);
2088 dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
2095 * \brief Net device stop for LiquidIO
2096 * @param netdev network device
2098 static int liquidio_stop(struct net_device *netdev)
2100 struct lio *lio = GET_LIO(netdev);
2101 struct octeon_device *oct = lio->oct_dev;
2102 struct napi_struct *napi, *n;
2104 if (oct->props[lio->ifidx].napi_enabled) {
2105 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2108 oct->props[lio->ifidx].napi_enabled = 0;
2110 if (OCTEON_CN23XX_PF(oct))
2111 oct->droq[0]->ops.poll_mode = 0;
2114 ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2116 netif_tx_disable(netdev);
2118 /* Inform that netif carrier is down */
2119 netif_carrier_off(netdev);
2121 lio->linfo.link.s.link_up = 0;
2122 lio->link_changes++;
2124 /* Tell Octeon that nic interface is down. */
2125 send_rx_ctrl_cmd(lio, 0);
2127 if (OCTEON_CN23XX_PF(oct)) {
2129 cleanup_tx_poll_fn(netdev);
2131 cleanup_tx_poll_fn(netdev);
2134 if (lio->ptp_clock) {
2135 ptp_clock_unregister(lio->ptp_clock);
2136 lio->ptp_clock = NULL;
2139 dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
2145 * \brief Converts a mask based on net device flags
2146 * @param netdev network device
2148 * This routine generates a octnet_ifflags mask from the net device flags
2149 * received from the OS.
2151 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
2153 enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
2155 if (netdev->flags & IFF_PROMISC)
2156 f |= OCTNET_IFFLAG_PROMISC;
2158 if (netdev->flags & IFF_ALLMULTI)
2159 f |= OCTNET_IFFLAG_ALLMULTI;
2161 if (netdev->flags & IFF_MULTICAST) {
2162 f |= OCTNET_IFFLAG_MULTICAST;
2164 /* Accept all multicast addresses if there are more than we
2167 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
2168 f |= OCTNET_IFFLAG_ALLMULTI;
2171 if (netdev->flags & IFF_BROADCAST)
2172 f |= OCTNET_IFFLAG_BROADCAST;
2178 * \brief Net device set_multicast_list
2179 * @param netdev network device
2181 static void liquidio_set_mcast_list(struct net_device *netdev)
2183 struct lio *lio = GET_LIO(netdev);
2184 struct octeon_device *oct = lio->oct_dev;
2185 struct octnic_ctrl_pkt nctrl;
2186 struct netdev_hw_addr *ha;
2189 int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2191 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2193 /* Create a ctrl pkt command to be sent to core app. */
2195 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
2196 nctrl.ncmd.s.param1 = get_new_flags(netdev);
2197 nctrl.ncmd.s.param2 = mc_count;
2198 nctrl.ncmd.s.more = mc_count;
2199 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2200 nctrl.netpndev = (u64)netdev;
2201 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2203 /* copy all the addresses into the udd */
2205 netdev_for_each_mc_addr(ha, netdev) {
2207 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2208 /* no need to swap bytes */
2210 if (++mc > &nctrl.udd[mc_count])
2214 /* Apparently, any activity in this call from the kernel has to
2215 * be atomic. So we won't wait for response.
2217 nctrl.wait_time = 0;
2219 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2221 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2227 * \brief Net device set_mac_address
2228 * @param netdev network device
2230 static int liquidio_set_mac(struct net_device *netdev, void *p)
2233 struct lio *lio = GET_LIO(netdev);
2234 struct octeon_device *oct = lio->oct_dev;
2235 struct sockaddr *addr = (struct sockaddr *)p;
2236 struct octnic_ctrl_pkt nctrl;
2238 if (!is_valid_ether_addr(addr->sa_data))
2239 return -EADDRNOTAVAIL;
2241 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2244 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2245 nctrl.ncmd.s.param1 = 0;
2246 nctrl.ncmd.s.more = 1;
2247 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2248 nctrl.netpndev = (u64)netdev;
2249 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2250 nctrl.wait_time = 100;
2253 /* The MAC Address is presented in network byte order. */
2254 memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2256 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2258 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2261 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2262 memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2268 * \brief Net device get_stats
2269 * @param netdev network device
2271 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2273 struct lio *lio = GET_LIO(netdev);
2274 struct net_device_stats *stats = &netdev->stats;
2275 struct octeon_device *oct;
2276 u64 pkts = 0, drop = 0, bytes = 0;
2277 struct oct_droq_stats *oq_stats;
2278 struct oct_iq_stats *iq_stats;
2279 int i, iq_no, oq_no;
2283 if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2286 for (i = 0; i < oct->num_iqs; i++) {
2287 iq_no = lio->linfo.txpciq[i].s.q_no;
2288 iq_stats = &oct->instr_queue[iq_no]->stats;
2289 pkts += iq_stats->tx_done;
2290 drop += iq_stats->tx_dropped;
2291 bytes += iq_stats->tx_tot_bytes;
2294 stats->tx_packets = pkts;
2295 stats->tx_bytes = bytes;
2296 stats->tx_dropped = drop;
2302 for (i = 0; i < oct->num_oqs; i++) {
2303 oq_no = lio->linfo.rxpciq[i].s.q_no;
2304 oq_stats = &oct->droq[oq_no]->stats;
2305 pkts += oq_stats->rx_pkts_received;
2306 drop += (oq_stats->rx_dropped +
2307 oq_stats->dropped_nodispatch +
2308 oq_stats->dropped_toomany +
2309 oq_stats->dropped_nomem);
2310 bytes += oq_stats->rx_bytes_received;
2313 stats->rx_bytes = bytes;
2314 stats->rx_packets = pkts;
2315 stats->rx_dropped = drop;
2321 * \brief Net device change_mtu
2322 * @param netdev network device
2324 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2326 struct lio *lio = GET_LIO(netdev);
2327 struct octeon_device *oct = lio->oct_dev;
2328 struct octnic_ctrl_pkt nctrl;
2331 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2334 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
2335 nctrl.ncmd.s.param1 = new_mtu;
2336 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2337 nctrl.wait_time = 100;
2338 nctrl.netpndev = (u64)netdev;
2339 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2341 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2343 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
2353 * \brief Handler for SIOCSHWTSTAMP ioctl
2354 * @param netdev network device
2355 * @param ifr interface request
2356 * @param cmd command
2358 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2360 struct hwtstamp_config conf;
2361 struct lio *lio = GET_LIO(netdev);
2363 if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2369 switch (conf.tx_type) {
2370 case HWTSTAMP_TX_ON:
2371 case HWTSTAMP_TX_OFF:
2377 switch (conf.rx_filter) {
2378 case HWTSTAMP_FILTER_NONE:
2380 case HWTSTAMP_FILTER_ALL:
2381 case HWTSTAMP_FILTER_SOME:
2382 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2383 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2384 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2385 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2386 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2387 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2388 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2389 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2390 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2391 case HWTSTAMP_FILTER_PTP_V2_EVENT:
2392 case HWTSTAMP_FILTER_PTP_V2_SYNC:
2393 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2394 case HWTSTAMP_FILTER_NTP_ALL:
2395 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2401 if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2402 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2405 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2407 return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2411 * \brief ioctl handler
2412 * @param netdev network device
2413 * @param ifr interface request
2414 * @param cmd command
2416 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2418 struct lio *lio = GET_LIO(netdev);
2422 if (lio->oct_dev->ptp_enable)
2423 return hwtstamp_ioctl(netdev, ifr);
2430 * \brief handle a Tx timestamp response
2431 * @param status response status
2432 * @param buf pointer to skb
2434 static void handle_timestamp(struct octeon_device *oct,
2438 struct octnet_buf_free_info *finfo;
2439 struct octeon_soft_command *sc;
2440 struct oct_timestamp_resp *resp;
2442 struct sk_buff *skb = (struct sk_buff *)buf;
2444 finfo = (struct octnet_buf_free_info *)skb->cb;
2448 resp = (struct oct_timestamp_resp *)sc->virtrptr;
2450 if (status != OCTEON_REQUEST_DONE) {
2451 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2452 CVM_CAST64(status));
2453 resp->timestamp = 0;
2456 octeon_swap_8B_data(&resp->timestamp, 1);
2458 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2459 struct skb_shared_hwtstamps ts;
2460 u64 ns = resp->timestamp;
2462 netif_info(lio, tx_done, lio->netdev,
2463 "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2464 skb, (unsigned long long)ns);
2465 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2466 skb_tstamp_tx(skb, &ts);
2469 octeon_free_soft_command(oct, sc);
2470 tx_buffer_free(skb);
2473 /* \brief Send a data packet that will be timestamped
2474 * @param oct octeon device
2475 * @param ndata pointer to network data
2476 * @param finfo pointer to private network data
2478 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2479 struct octnic_data_pkt *ndata,
2480 struct octnet_buf_free_info *finfo)
2483 struct octeon_soft_command *sc;
2490 sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2491 sizeof(struct oct_timestamp_resp));
2495 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2496 return IQ_SEND_FAILED;
2499 if (ndata->reqtype == REQTYPE_NORESP_NET)
2500 ndata->reqtype = REQTYPE_RESP_NET;
2501 else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2502 ndata->reqtype = REQTYPE_RESP_NET_SG;
2504 sc->callback = handle_timestamp;
2505 sc->callback_arg = finfo->skb;
2506 sc->iq_no = ndata->q_no;
2508 if (OCTEON_CN23XX_PF(oct))
2509 len = (u32)((struct octeon_instr_ih3 *)
2510 (&sc->cmd.cmd3.ih3))->dlengsz;
2512 len = (u32)((struct octeon_instr_ih2 *)
2513 (&sc->cmd.cmd2.ih2))->dlengsz;
2517 retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2518 sc, len, ndata->reqtype);
2520 if (retval == IQ_SEND_FAILED) {
2521 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2523 octeon_free_soft_command(oct, sc);
2525 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2531 /** \brief Transmit networks packets to the Octeon interface
2532 * @param skbuff skbuff struct to be passed to network layer.
2533 * @param netdev pointer to network device
2534 * @returns whether the packet was transmitted to the device okay or not
2535 * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2537 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2540 struct octnet_buf_free_info *finfo;
2541 union octnic_cmd_setup cmdsetup;
2542 struct octnic_data_pkt ndata;
2543 struct octeon_device *oct;
2544 struct oct_iq_stats *stats;
2545 struct octeon_instr_irh *irh;
2546 union tx_info *tx_info;
2548 int q_idx = 0, iq_no = 0;
2553 lio = GET_LIO(netdev);
2556 if (netif_is_multiqueue(netdev)) {
2557 q_idx = skb->queue_mapping;
2558 q_idx = (q_idx % (lio->linfo.num_txpciq));
2560 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2565 stats = &oct->instr_queue[iq_no]->stats;
2567 /* Check for all conditions in which the current packet cannot be
2570 if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2571 (!lio->linfo.link.s.link_up) ||
2573 netif_info(lio, tx_err, lio->netdev,
2574 "Transmit failed link_status : %d\n",
2575 lio->linfo.link.s.link_up);
2576 goto lio_xmit_failed;
2579 /* Use space in skb->cb to store info used to unmap and
2582 finfo = (struct octnet_buf_free_info *)skb->cb;
2587 /* Prepare the attributes for the data to be passed to OSI. */
2588 memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2590 ndata.buf = (void *)finfo;
2594 if (netif_is_multiqueue(netdev)) {
2595 if (octnet_iq_is_full(oct, ndata.q_no)) {
2596 /* defer sending if queue is full */
2597 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2599 stats->tx_iq_busy++;
2600 return NETDEV_TX_BUSY;
2603 if (octnet_iq_is_full(oct, lio->txq)) {
2604 /* defer sending if queue is full */
2605 stats->tx_iq_busy++;
2606 netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2608 return NETDEV_TX_BUSY;
2611 /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2612 * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2615 ndata.datasize = skb->len;
2618 cmdsetup.s.iq_no = iq_no;
2620 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2621 if (skb->encapsulation) {
2622 cmdsetup.s.tnl_csum = 1;
2625 cmdsetup.s.transport_csum = 1;
2628 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2629 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2630 cmdsetup.s.timestamp = 1;
2633 if (skb_shinfo(skb)->nr_frags == 0) {
2634 cmdsetup.s.u.datasize = skb->len;
2635 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2637 /* Offload checksum calculation for TCP/UDP packets */
2638 dptr = dma_map_single(&oct->pci_dev->dev,
2642 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2643 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2645 return NETDEV_TX_BUSY;
2648 if (OCTEON_CN23XX_PF(oct))
2649 ndata.cmd.cmd3.dptr = dptr;
2651 ndata.cmd.cmd2.dptr = dptr;
2653 ndata.reqtype = REQTYPE_NORESP_NET;
2657 struct skb_frag_struct *frag;
2658 struct octnic_gather *g;
2660 spin_lock(&lio->glist_lock[q_idx]);
2661 g = (struct octnic_gather *)
2662 list_delete_head(&lio->glist[q_idx]);
2663 spin_unlock(&lio->glist_lock[q_idx]);
2666 netif_info(lio, tx_err, lio->netdev,
2667 "Transmit scatter gather: glist null!\n");
2668 goto lio_xmit_failed;
2671 cmdsetup.s.gather = 1;
2672 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2673 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2675 memset(g->sg, 0, g->sg_size);
2677 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2679 (skb->len - skb->data_len),
2681 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2682 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2684 return NETDEV_TX_BUSY;
2686 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2688 frags = skb_shinfo(skb)->nr_frags;
2691 frag = &skb_shinfo(skb)->frags[i - 1];
2693 g->sg[(i >> 2)].ptr[(i & 3)] =
2694 dma_map_page(&oct->pci_dev->dev,
2700 if (dma_mapping_error(&oct->pci_dev->dev,
2701 g->sg[i >> 2].ptr[i & 3])) {
2702 dma_unmap_single(&oct->pci_dev->dev,
2704 skb->len - skb->data_len,
2706 for (j = 1; j < i; j++) {
2707 frag = &skb_shinfo(skb)->frags[j - 1];
2708 dma_unmap_page(&oct->pci_dev->dev,
2709 g->sg[j >> 2].ptr[j & 3],
2713 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2715 return NETDEV_TX_BUSY;
2718 add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2722 dptr = g->sg_dma_ptr;
2724 if (OCTEON_CN23XX_PF(oct))
2725 ndata.cmd.cmd3.dptr = dptr;
2727 ndata.cmd.cmd2.dptr = dptr;
2731 ndata.reqtype = REQTYPE_NORESP_NET_SG;
2734 if (OCTEON_CN23XX_PF(oct)) {
2735 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2736 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2738 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2739 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2742 if (skb_shinfo(skb)->gso_size) {
2743 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2744 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2748 /* HW insert VLAN tag */
2749 if (skb_vlan_tag_present(skb)) {
2750 irh->priority = skb_vlan_tag_get(skb) >> 13;
2751 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2754 if (unlikely(cmdsetup.s.timestamp))
2755 status = send_nic_timestamp_pkt(oct, &ndata, finfo);
2757 status = octnet_send_nic_data_pkt(oct, &ndata);
2758 if (status == IQ_SEND_FAILED)
2759 goto lio_xmit_failed;
2761 netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2763 if (status == IQ_SEND_STOP)
2764 stop_q(lio->netdev, q_idx);
2766 netif_trans_update(netdev);
2768 if (tx_info->s.gso_segs)
2769 stats->tx_done += tx_info->s.gso_segs;
2772 stats->tx_tot_bytes += ndata.datasize;
2774 return NETDEV_TX_OK;
2777 stats->tx_dropped++;
2778 netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2779 iq_no, stats->tx_dropped);
2781 dma_unmap_single(&oct->pci_dev->dev, dptr,
2782 ndata.datasize, DMA_TO_DEVICE);
2783 tx_buffer_free(skb);
2784 return NETDEV_TX_OK;
2787 /** \brief Network device Tx timeout
2788 * @param netdev pointer to network device
2790 static void liquidio_tx_timeout(struct net_device *netdev)
2794 lio = GET_LIO(netdev);
2796 netif_info(lio, tx_err, lio->netdev,
2797 "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2798 netdev->stats.tx_dropped);
2799 netif_trans_update(netdev);
2803 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2804 __be16 proto __attribute__((unused)),
2807 struct lio *lio = GET_LIO(netdev);
2808 struct octeon_device *oct = lio->oct_dev;
2809 struct octnic_ctrl_pkt nctrl;
2812 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2815 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2816 nctrl.ncmd.s.param1 = vid;
2817 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2818 nctrl.wait_time = 100;
2819 nctrl.netpndev = (u64)netdev;
2820 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2822 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2824 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2831 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2832 __be16 proto __attribute__((unused)),
2835 struct lio *lio = GET_LIO(netdev);
2836 struct octeon_device *oct = lio->oct_dev;
2837 struct octnic_ctrl_pkt nctrl;
2840 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2843 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2844 nctrl.ncmd.s.param1 = vid;
2845 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2846 nctrl.wait_time = 100;
2847 nctrl.netpndev = (u64)netdev;
2848 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2850 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2852 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2858 /** Sending command to enable/disable RX checksum offload
2859 * @param netdev pointer to network device
2860 * @param command OCTNET_CMD_TNL_RX_CSUM_CTL
2861 * @param rx_cmd_bit OCTNET_CMD_RXCSUM_ENABLE/
2862 * OCTNET_CMD_RXCSUM_DISABLE
2863 * @returns SUCCESS or FAILURE
2865 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2868 struct lio *lio = GET_LIO(netdev);
2869 struct octeon_device *oct = lio->oct_dev;
2870 struct octnic_ctrl_pkt nctrl;
2873 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2876 nctrl.ncmd.s.cmd = command;
2877 nctrl.ncmd.s.param1 = rx_cmd;
2878 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2879 nctrl.wait_time = 100;
2880 nctrl.netpndev = (u64)netdev;
2881 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2883 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2885 dev_err(&oct->pci_dev->dev,
2886 "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2892 /** Sending command to add/delete VxLAN UDP port to firmware
2893 * @param netdev pointer to network device
2894 * @param command OCTNET_CMD_VXLAN_PORT_CONFIG
2895 * @param vxlan_port VxLAN port to be added or deleted
2896 * @param vxlan_cmd_bit OCTNET_CMD_VXLAN_PORT_ADD,
2897 * OCTNET_CMD_VXLAN_PORT_DEL
2898 * @returns SUCCESS or FAILURE
2900 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2901 u16 vxlan_port, u8 vxlan_cmd_bit)
2903 struct lio *lio = GET_LIO(netdev);
2904 struct octeon_device *oct = lio->oct_dev;
2905 struct octnic_ctrl_pkt nctrl;
2908 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2911 nctrl.ncmd.s.cmd = command;
2912 nctrl.ncmd.s.more = vxlan_cmd_bit;
2913 nctrl.ncmd.s.param1 = vxlan_port;
2914 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2915 nctrl.wait_time = 100;
2916 nctrl.netpndev = (u64)netdev;
2917 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2919 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2921 dev_err(&oct->pci_dev->dev,
2922 "VxLAN port add/delete failed in core (ret:0x%x)\n",
2928 /** \brief Net device fix features
2929 * @param netdev pointer to network device
2930 * @param request features requested
2931 * @returns updated features list
2933 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2934 netdev_features_t request)
2936 struct lio *lio = netdev_priv(netdev);
2938 if ((request & NETIF_F_RXCSUM) &&
2939 !(lio->dev_capability & NETIF_F_RXCSUM))
2940 request &= ~NETIF_F_RXCSUM;
2942 if ((request & NETIF_F_HW_CSUM) &&
2943 !(lio->dev_capability & NETIF_F_HW_CSUM))
2944 request &= ~NETIF_F_HW_CSUM;
2946 if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2947 request &= ~NETIF_F_TSO;
2949 if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2950 request &= ~NETIF_F_TSO6;
2952 if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2953 request &= ~NETIF_F_LRO;
2955 /*Disable LRO if RXCSUM is off */
2956 if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2957 (lio->dev_capability & NETIF_F_LRO))
2958 request &= ~NETIF_F_LRO;
2960 if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2961 !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2962 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2967 /** \brief Net device set features
2968 * @param netdev pointer to network device
2969 * @param features features to enable/disable
2971 static int liquidio_set_features(struct net_device *netdev,
2972 netdev_features_t features)
2974 struct lio *lio = netdev_priv(netdev);
2976 if ((features & NETIF_F_LRO) &&
2977 (lio->dev_capability & NETIF_F_LRO) &&
2978 !(netdev->features & NETIF_F_LRO))
2979 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2980 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2981 else if (!(features & NETIF_F_LRO) &&
2982 (lio->dev_capability & NETIF_F_LRO) &&
2983 (netdev->features & NETIF_F_LRO))
2984 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2985 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2987 /* Sending command to firmware to enable/disable RX checksum
2988 * offload settings using ethtool
2990 if (!(netdev->features & NETIF_F_RXCSUM) &&
2991 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2992 (features & NETIF_F_RXCSUM))
2993 liquidio_set_rxcsum_command(netdev,
2994 OCTNET_CMD_TNL_RX_CSUM_CTL,
2995 OCTNET_CMD_RXCSUM_ENABLE);
2996 else if ((netdev->features & NETIF_F_RXCSUM) &&
2997 (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2998 !(features & NETIF_F_RXCSUM))
2999 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3000 OCTNET_CMD_RXCSUM_DISABLE);
3002 if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3003 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3004 !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3005 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3006 OCTNET_CMD_VLAN_FILTER_ENABLE);
3007 else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3008 (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3009 (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3010 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3011 OCTNET_CMD_VLAN_FILTER_DISABLE);
3016 static void liquidio_add_vxlan_port(struct net_device *netdev,
3017 struct udp_tunnel_info *ti)
3019 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3022 liquidio_vxlan_port_command(netdev,
3023 OCTNET_CMD_VXLAN_PORT_CONFIG,
3025 OCTNET_CMD_VXLAN_PORT_ADD);
3028 static void liquidio_del_vxlan_port(struct net_device *netdev,
3029 struct udp_tunnel_info *ti)
3031 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3034 liquidio_vxlan_port_command(netdev,
3035 OCTNET_CMD_VXLAN_PORT_CONFIG,
3037 OCTNET_CMD_VXLAN_PORT_DEL);
3040 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
3041 u8 *mac, bool is_admin_assigned)
3043 struct lio *lio = GET_LIO(netdev);
3044 struct octeon_device *oct = lio->oct_dev;
3045 struct octnic_ctrl_pkt nctrl;
3047 if (!is_valid_ether_addr(mac))
3050 if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
3053 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3056 nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
3057 /* vfidx is 0 based, but vf_num (param1) is 1 based */
3058 nctrl.ncmd.s.param1 = vfidx + 1;
3059 nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
3060 nctrl.ncmd.s.more = 1;
3061 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3062 nctrl.netpndev = (u64)netdev;
3063 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3064 nctrl.wait_time = LIO_CMD_WAIT_TM;
3067 /* The MAC Address is presented in network byte order. */
3068 ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
3070 oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
3072 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3077 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
3079 struct lio *lio = GET_LIO(netdev);
3080 struct octeon_device *oct = lio->oct_dev;
3083 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3086 retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
3088 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
3093 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
3094 u16 vlan, u8 qos, __be16 vlan_proto)
3096 struct lio *lio = GET_LIO(netdev);
3097 struct octeon_device *oct = lio->oct_dev;
3098 struct octnic_ctrl_pkt nctrl;
3101 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3104 if (vlan_proto != htons(ETH_P_8021Q))
3105 return -EPROTONOSUPPORT;
3107 if (vlan >= VLAN_N_VID || qos > 7)
3111 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
3115 if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
3118 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3121 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3123 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3125 nctrl.ncmd.s.param1 = vlantci;
3126 nctrl.ncmd.s.param2 =
3127 vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
3128 nctrl.ncmd.s.more = 0;
3129 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3131 nctrl.wait_time = LIO_CMD_WAIT_TM;
3133 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3135 oct->sriov_info.vf_vlantci[vfidx] = vlantci;
3140 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
3141 struct ifla_vf_info *ivi)
3143 struct lio *lio = GET_LIO(netdev);
3144 struct octeon_device *oct = lio->oct_dev;
3147 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3151 macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
3152 ether_addr_copy(&ivi->mac[0], macaddr);
3153 ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
3154 ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
3155 ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3159 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3162 struct lio *lio = GET_LIO(netdev);
3163 struct octeon_device *oct = lio->oct_dev;
3164 struct octnic_ctrl_pkt nctrl;
3166 if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3169 if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3172 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3173 nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3174 nctrl.ncmd.s.param1 =
3175 vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3176 nctrl.ncmd.s.param2 = linkstate;
3177 nctrl.ncmd.s.more = 0;
3178 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3180 nctrl.wait_time = LIO_CMD_WAIT_TM;
3182 octnet_send_nic_ctrl_pkt(oct, &nctrl);
3184 oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3189 static const struct net_device_ops lionetdevops = {
3190 .ndo_open = liquidio_open,
3191 .ndo_stop = liquidio_stop,
3192 .ndo_start_xmit = liquidio_xmit,
3193 .ndo_get_stats = liquidio_get_stats,
3194 .ndo_set_mac_address = liquidio_set_mac,
3195 .ndo_set_rx_mode = liquidio_set_mcast_list,
3196 .ndo_tx_timeout = liquidio_tx_timeout,
3198 .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3199 .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
3200 .ndo_change_mtu = liquidio_change_mtu,
3201 .ndo_do_ioctl = liquidio_ioctl,
3202 .ndo_fix_features = liquidio_fix_features,
3203 .ndo_set_features = liquidio_set_features,
3204 .ndo_udp_tunnel_add = liquidio_add_vxlan_port,
3205 .ndo_udp_tunnel_del = liquidio_del_vxlan_port,
3206 .ndo_set_vf_mac = liquidio_set_vf_mac,
3207 .ndo_set_vf_vlan = liquidio_set_vf_vlan,
3208 .ndo_get_vf_config = liquidio_get_vf_config,
3209 .ndo_set_vf_link_state = liquidio_set_vf_link_state,
3212 /** \brief Entry point for the liquidio module
3214 static int __init liquidio_init(void)
3217 struct handshake *hs;
3219 init_completion(&first_stage);
3221 octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3223 if (liquidio_init_pci())
3226 wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3228 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3231 wait_for_completion(&hs->init);
3233 /* init handshake failed */
3234 dev_err(&hs->pci_dev->dev,
3235 "Failed to init device\n");
3236 liquidio_deinit_pci();
3242 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3245 wait_for_completion_timeout(&hs->started,
3246 msecs_to_jiffies(30000));
3247 if (!hs->started_ok) {
3248 /* starter handshake failed */
3249 dev_err(&hs->pci_dev->dev,
3250 "Firmware failed to start\n");
3251 liquidio_deinit_pci();
3260 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3262 struct octeon_device *oct = (struct octeon_device *)buf;
3263 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3265 union oct_link_status *ls;
3268 if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3269 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3270 recv_pkt->buffer_size[0],
3271 recv_pkt->rh.r_nic_info.gmxport);
3275 gmxport = recv_pkt->rh.r_nic_info.gmxport;
3276 ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3277 OCT_DROQ_INFO_SIZE);
3279 octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3280 for (i = 0; i < oct->ifcount; i++) {
3281 if (oct->props[i].gmxport == gmxport) {
3282 update_link_status(oct->props[i].netdev, ls);
3288 for (i = 0; i < recv_pkt->buffer_count; i++)
3289 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3290 octeon_free_recv_info(recv_info);
3295 * \brief Setup network interfaces
3296 * @param octeon_dev octeon device
3298 * Called during init time for each device. It assumes the NIC
3299 * is already up and running. The link information for each
3300 * interface is passed in link_info.
3302 static int setup_nic_devices(struct octeon_device *octeon_dev)
3304 struct lio *lio = NULL;
3305 struct net_device *netdev;
3307 struct octeon_soft_command *sc;
3308 struct liquidio_if_cfg_context *ctx;
3309 struct liquidio_if_cfg_resp *resp;
3310 struct octdev_props *props;
3311 int retval, num_iqueues, num_oqueues;
3312 union oct_nic_if_cfg if_cfg;
3313 unsigned int base_queue;
3314 unsigned int gmx_port_id;
3315 u32 resp_size, ctx_size, data_size;
3317 struct lio_version *vdata;
3319 /* This is to handle link status changes */
3320 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3322 lio_nic_info, octeon_dev);
3324 /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3325 * They are handled directly.
3327 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3330 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3333 octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3334 free_netsgbuf_with_resp);
3336 for (i = 0; i < octeon_dev->ifcount; i++) {
3337 resp_size = sizeof(struct liquidio_if_cfg_resp);
3338 ctx_size = sizeof(struct liquidio_if_cfg_context);
3339 data_size = sizeof(struct lio_version);
3340 sc = (struct octeon_soft_command *)
3341 octeon_alloc_soft_command(octeon_dev, data_size,
3342 resp_size, ctx_size);
3343 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3344 ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
3345 vdata = (struct lio_version *)sc->virtdptr;
3347 *((u64 *)vdata) = 0;
3348 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3349 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3350 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3352 if (OCTEON_CN23XX_PF(octeon_dev)) {
3353 num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3354 num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3355 base_queue = octeon_dev->sriov_info.pf_srn;
3357 gmx_port_id = octeon_dev->pf_num;
3358 ifidx_or_pfnum = octeon_dev->pf_num;
3360 num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3361 octeon_get_conf(octeon_dev), i);
3362 num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3363 octeon_get_conf(octeon_dev), i);
3364 base_queue = CFG_GET_BASE_QUE_NIC_IF(
3365 octeon_get_conf(octeon_dev), i);
3366 gmx_port_id = CFG_GET_GMXID_NIC_IF(
3367 octeon_get_conf(octeon_dev), i);
3371 dev_dbg(&octeon_dev->pci_dev->dev,
3372 "requesting config for interface %d, iqs %d, oqs %d\n",
3373 ifidx_or_pfnum, num_iqueues, num_oqueues);
3374 WRITE_ONCE(ctx->cond, 0);
3375 ctx->octeon_id = lio_get_device_id(octeon_dev);
3376 init_waitqueue_head(&ctx->wc);
3379 if_cfg.s.num_iqueues = num_iqueues;
3380 if_cfg.s.num_oqueues = num_oqueues;
3381 if_cfg.s.base_queue = base_queue;
3382 if_cfg.s.gmx_port_id = gmx_port_id;
3386 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3387 OPCODE_NIC_IF_CFG, 0,
3390 sc->callback = if_cfg_callback;
3391 sc->callback_arg = sc;
3392 sc->wait_time = 3000;
3394 retval = octeon_send_soft_command(octeon_dev, sc);
3395 if (retval == IQ_SEND_FAILED) {
3396 dev_err(&octeon_dev->pci_dev->dev,
3397 "iq/oq config failed status: %x\n",
3399 /* Soft instr is freed by driver in case of failure. */
3400 goto setup_nic_dev_fail;
3403 /* Sleep on a wait queue till the cond flag indicates that the
3404 * response arrived or timed-out.
3406 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
3407 dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
3408 goto setup_nic_wait_intr;
3411 retval = resp->status;
3413 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3414 goto setup_nic_dev_fail;
3417 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3418 (sizeof(struct liquidio_if_cfg_info)) >> 3);
3420 num_iqueues = hweight64(resp->cfg_info.iqmask);
3421 num_oqueues = hweight64(resp->cfg_info.oqmask);
3423 if (!(num_iqueues) || !(num_oqueues)) {
3424 dev_err(&octeon_dev->pci_dev->dev,
3425 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3426 resp->cfg_info.iqmask,
3427 resp->cfg_info.oqmask);
3428 goto setup_nic_dev_fail;
3430 dev_dbg(&octeon_dev->pci_dev->dev,
3431 "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3432 i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3433 num_iqueues, num_oqueues);
3434 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
3437 dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3438 goto setup_nic_dev_fail;
3441 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3443 /* Associate the routines that will handle different
3446 netdev->netdev_ops = &lionetdevops;
3448 lio = GET_LIO(netdev);
3450 memset(lio, 0, sizeof(struct lio));
3452 lio->ifidx = ifidx_or_pfnum;
3454 props = &octeon_dev->props[i];
3455 props->gmxport = resp->cfg_info.linfo.gmxport;
3456 props->netdev = netdev;
3458 lio->linfo.num_rxpciq = num_oqueues;
3459 lio->linfo.num_txpciq = num_iqueues;
3460 for (j = 0; j < num_oqueues; j++) {
3461 lio->linfo.rxpciq[j].u64 =
3462 resp->cfg_info.linfo.rxpciq[j].u64;
3464 for (j = 0; j < num_iqueues; j++) {
3465 lio->linfo.txpciq[j].u64 =
3466 resp->cfg_info.linfo.txpciq[j].u64;
3468 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3469 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3470 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3472 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3474 if (OCTEON_CN23XX_PF(octeon_dev) ||
3475 OCTEON_CN6XXX(octeon_dev)) {
3476 lio->dev_capability = NETIF_F_HIGHDMA
3479 | NETIF_F_SG | NETIF_F_RXCSUM
3481 | NETIF_F_TSO | NETIF_F_TSO6
3484 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3486 /* Copy of transmit encapsulation capabilities:
3487 * TSO, TSO6, Checksums for this device
3489 lio->enc_dev_capability = NETIF_F_IP_CSUM
3491 | NETIF_F_GSO_UDP_TUNNEL
3492 | NETIF_F_HW_CSUM | NETIF_F_SG
3494 | NETIF_F_TSO | NETIF_F_TSO6
3497 netdev->hw_enc_features = (lio->enc_dev_capability &
3500 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3502 netdev->vlan_features = lio->dev_capability;
3503 /* Add any unchangeable hw features */
3504 lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
3505 NETIF_F_HW_VLAN_CTAG_RX |
3506 NETIF_F_HW_VLAN_CTAG_TX;
3508 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3510 netdev->hw_features = lio->dev_capability;
3511 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3512 netdev->hw_features = netdev->hw_features &
3513 ~NETIF_F_HW_VLAN_CTAG_RX;
3515 /* MTU range: 68 - 16000 */
3516 netdev->min_mtu = LIO_MIN_MTU_SIZE;
3517 netdev->max_mtu = LIO_MAX_MTU_SIZE;
3519 /* Point to the properties for octeon device to which this
3520 * interface belongs.
3522 lio->oct_dev = octeon_dev;
3523 lio->octprops = props;
3524 lio->netdev = netdev;
3526 dev_dbg(&octeon_dev->pci_dev->dev,
3527 "if%d gmx: %d hw_addr: 0x%llx\n", i,
3528 lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3530 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3533 random_ether_addr(&vfmac[0]);
3534 if (__liquidio_set_vf_mac(netdev, j,
3535 &vfmac[0], false)) {
3536 dev_err(&octeon_dev->pci_dev->dev,
3537 "Error setting VF%d MAC address\n",
3539 goto setup_nic_dev_fail;
3543 /* 64-bit swap required on LE machines */
3544 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3545 for (j = 0; j < 6; j++)
3546 mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3548 /* Copy MAC Address to OS network device structure */
3550 ether_addr_copy(netdev->dev_addr, mac);
3552 /* By default all interfaces on a single Octeon uses the same
3555 lio->txq = lio->linfo.txpciq[0].s.q_no;
3556 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3557 if (liquidio_setup_io_queues(octeon_dev, i,
3558 lio->linfo.num_txpciq,
3559 lio->linfo.num_rxpciq)) {
3560 dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3561 goto setup_nic_dev_fail;
3564 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3566 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3567 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3569 if (setup_glists(octeon_dev, lio, num_iqueues)) {
3570 dev_err(&octeon_dev->pci_dev->dev,
3571 "Gather list allocation failed\n");
3572 goto setup_nic_dev_fail;
3575 /* Register ethtool support */
3576 liquidio_set_ethtool_ops(netdev);
3577 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3578 octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3580 octeon_dev->priv_flags = 0x0;
3582 if (netdev->features & NETIF_F_LRO)
3583 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3584 OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3586 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3587 OCTNET_CMD_VLAN_FILTER_ENABLE);
3589 if ((debug != -1) && (debug & NETIF_MSG_HW))
3590 liquidio_set_feature(netdev,
3591 OCTNET_CMD_VERBOSE_ENABLE, 0);
3593 if (setup_link_status_change_wq(netdev))
3594 goto setup_nic_dev_fail;
3596 if (setup_rx_oom_poll_fn(netdev))
3597 goto setup_nic_dev_fail;
3599 /* Register the network device with the OS */
3600 if (register_netdev(netdev)) {
3601 dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3602 goto setup_nic_dev_fail;
3605 dev_dbg(&octeon_dev->pci_dev->dev,
3606 "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3607 i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3608 netif_carrier_off(netdev);
3609 lio->link_changes++;
3611 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3613 /* Sending command to firmware to enable Rx checksum offload
3614 * by default at the time of setup of Liquidio driver for
3617 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3618 OCTNET_CMD_RXCSUM_ENABLE);
3619 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3620 OCTNET_CMD_TXCSUM_ENABLE);
3622 dev_dbg(&octeon_dev->pci_dev->dev,
3623 "NIC ifidx:%d Setup successful\n", i);
3625 octeon_free_soft_command(octeon_dev, sc);
3632 octeon_free_soft_command(octeon_dev, sc);
3634 setup_nic_wait_intr:
3637 dev_err(&octeon_dev->pci_dev->dev,
3638 "NIC ifidx:%d Setup failed\n", i);
3639 liquidio_destroy_nic_device(octeon_dev, i);
3644 #ifdef CONFIG_PCI_IOV
3645 static int octeon_enable_sriov(struct octeon_device *oct)
3647 unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3648 struct pci_dev *vfdev;
3652 if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3653 err = pci_enable_sriov(oct->pci_dev,
3654 oct->sriov_info.num_vfs_alloced);
3656 dev_err(&oct->pci_dev->dev,
3657 "OCTEON: Failed to enable PCI sriov: %d\n",
3659 oct->sriov_info.num_vfs_alloced = 0;
3662 oct->sriov_info.sriov_enabled = 1;
3664 /* init lookup table that maps DPI ring number to VF pci_dev
3668 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3669 OCTEON_CN23XX_VF_VID, NULL);
3671 if (vfdev->is_virtfn &&
3672 (vfdev->physfn == oct->pci_dev)) {
3673 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3675 u += oct->sriov_info.rings_per_vf;
3677 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3678 OCTEON_CN23XX_VF_VID, vfdev);
3682 return num_vfs_alloced;
3685 static int lio_pci_sriov_disable(struct octeon_device *oct)
3689 if (pci_vfs_assigned(oct->pci_dev)) {
3690 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3694 pci_disable_sriov(oct->pci_dev);
3697 while (u < MAX_POSSIBLE_VFS) {
3698 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3699 u += oct->sriov_info.rings_per_vf;
3702 oct->sriov_info.num_vfs_alloced = 0;
3703 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3709 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3711 struct octeon_device *oct = pci_get_drvdata(dev);
3714 if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3715 (oct->sriov_info.sriov_enabled)) {
3716 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3717 oct->pf_num, num_vfs);
3722 ret = lio_pci_sriov_disable(oct);
3723 } else if (num_vfs > oct->sriov_info.max_vfs) {
3724 dev_err(&oct->pci_dev->dev,
3725 "OCTEON: Max allowed VFs:%d user requested:%d",
3726 oct->sriov_info.max_vfs, num_vfs);
3729 oct->sriov_info.num_vfs_alloced = num_vfs;
3730 ret = octeon_enable_sriov(oct);
3731 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3732 oct->pf_num, num_vfs);
3740 * \brief initialize the NIC
3741 * @param oct octeon device
3743 * This initialization routine is called once the Octeon device application is
3746 static int liquidio_init_nic_module(struct octeon_device *oct)
3749 int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3751 dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3753 /* only default iq and oq were initialized
3754 * initialize the rest as well
3756 /* run port_config command for each port */
3757 oct->ifcount = num_nic_ports;
3759 memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3761 for (i = 0; i < MAX_OCTEON_LINKS; i++)
3762 oct->props[i].gmxport = -1;
3764 retval = setup_nic_devices(oct);
3766 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3767 goto octnet_init_failure;
3770 liquidio_ptp_init(oct);
3772 dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3776 octnet_init_failure:
3784 * \brief starter callback that invokes the remaining initialization work after
3785 * the NIC is up and running.
3786 * @param octptr work struct work_struct
3788 static void nic_starter(struct work_struct *work)
3790 struct octeon_device *oct;
3791 struct cavium_wk *wk = (struct cavium_wk *)work;
3793 oct = (struct octeon_device *)wk->ctxptr;
3795 if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3798 /* If the status of the device is CORE_OK, the core
3799 * application has reported its application type. Call
3800 * any registered handlers now and move to the RUNNING
3803 if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3804 schedule_delayed_work(&oct->nic_poll_work.work,
3805 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3809 atomic_set(&oct->status, OCT_DEV_RUNNING);
3811 if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3812 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3814 if (liquidio_init_nic_module(oct))
3815 dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3817 handshake[oct->octeon_id].started_ok = 1;
3819 dev_err(&oct->pci_dev->dev,
3820 "Unexpected application running on NIC (%d). Check firmware.\n",
3824 complete(&handshake[oct->octeon_id].started);
3828 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
3830 struct octeon_device *oct = (struct octeon_device *)buf;
3831 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3832 int i, notice, vf_idx;
3836 notice = recv_pkt->rh.r.ossp;
3837 data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
3839 /* the first 64-bit word of data is the vf_num */
3841 octeon_swap_8B_data(&vf_num, 1);
3842 vf_idx = (int)vf_num - 1;
3844 cores_crashed = READ_ONCE(oct->cores_crashed);
3846 if (notice == VF_DRV_LOADED) {
3847 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
3848 oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
3849 dev_info(&oct->pci_dev->dev,
3850 "driver for VF%d was loaded\n", vf_idx);
3852 try_module_get(THIS_MODULE);
3854 } else if (notice == VF_DRV_REMOVED) {
3855 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
3856 oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
3857 dev_info(&oct->pci_dev->dev,
3858 "driver for VF%d was removed\n", vf_idx);
3860 module_put(THIS_MODULE);
3862 } else if (notice == VF_DRV_MACADDR_CHANGED) {
3863 u8 *b = (u8 *)&data[1];
3865 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
3866 dev_info(&oct->pci_dev->dev,
3867 "VF driver changed VF%d's MAC address to %pM\n",
3871 for (i = 0; i < recv_pkt->buffer_count; i++)
3872 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3873 octeon_free_recv_info(recv_info);
3879 * \brief Device initialization for each Octeon device that is probed
3880 * @param octeon_dev octeon device
3882 static int octeon_device_init(struct octeon_device *octeon_dev)
3886 char bootcmd[] = "\n";
3887 char *dbg_enb = NULL;
3888 struct octeon_device_priv *oct_priv =
3889 (struct octeon_device_priv *)octeon_dev->priv;
3890 atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
3892 /* Enable access to the octeon device and make its DMA capability
3895 if (octeon_pci_os_setup(octeon_dev))
3898 atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
3900 /* Identify the Octeon type and map the BAR address space. */
3901 if (octeon_chip_specific_setup(octeon_dev)) {
3902 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
3906 atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
3908 /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
3909 * since that is what is required for the reference to be removed
3910 * during de-initialization (see 'octeon_destroy_resources').
3912 octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
3913 PCI_SLOT(octeon_dev->pci_dev->devfn),
3914 PCI_FUNC(octeon_dev->pci_dev->devfn),
3917 octeon_dev->app_mode = CVM_DRV_INVALID_APP;
3919 if (OCTEON_CN23XX_PF(octeon_dev)) {
3920 if (!cn23xx_fw_loaded(octeon_dev) && !fw_type_is_none()) {
3922 /* Do a soft reset of the Octeon device. */
3923 if (octeon_dev->fn_list.soft_reset(octeon_dev))
3925 /* things might have changed */
3926 if (!cn23xx_fw_loaded(octeon_dev))
3933 } else if (octeon_dev->fn_list.soft_reset(octeon_dev)) {
3937 /* Initialize the dispatch mechanism used to push packets arriving on
3938 * Octeon Output queues.
3940 if (octeon_init_dispatch_list(octeon_dev))
3943 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3944 OPCODE_NIC_CORE_DRV_ACTIVE,
3945 octeon_core_drv_init,
3948 octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3949 OPCODE_NIC_VF_DRV_NOTICE,
3950 octeon_recv_vf_drv_notice, octeon_dev);
3951 INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
3952 octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
3953 schedule_delayed_work(&octeon_dev->nic_poll_work.work,
3954 LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3956 atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
3958 if (octeon_set_io_queues_off(octeon_dev)) {
3959 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
3963 if (OCTEON_CN23XX_PF(octeon_dev)) {
3964 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
3966 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
3971 /* Initialize soft command buffer pool
3973 if (octeon_setup_sc_buffer_pool(octeon_dev)) {
3974 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
3977 atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
3979 /* Setup the data structures that manage this Octeon's Input queues. */
3980 if (octeon_setup_instr_queues(octeon_dev)) {
3981 dev_err(&octeon_dev->pci_dev->dev,
3982 "instruction queue initialization failed\n");
3985 atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
3987 /* Initialize lists to manage the requests of different types that
3988 * arrive from user & kernel applications for this octeon device.
3990 if (octeon_setup_response_list(octeon_dev)) {
3991 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
3994 atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
3996 if (octeon_setup_output_queues(octeon_dev)) {
3997 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4001 atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4003 if (OCTEON_CN23XX_PF(octeon_dev)) {
4004 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4005 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4008 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4010 if (octeon_allocate_ioq_vector(octeon_dev)) {
4011 dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4014 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4017 /* The input and output queue registers were setup earlier (the
4018 * queues were not enabled). Any additional registers
4019 * that need to be programmed should be done now.
4021 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4023 dev_err(&octeon_dev->pci_dev->dev,
4024 "Failed to configure device registers\n");
4029 /* Initialize the tasklet that handles output queue packet processing.*/
4030 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4031 tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4032 (unsigned long)octeon_dev);
4034 /* Setup the interrupt handler and record the INT SUM register address
4036 if (octeon_setup_interrupt(octeon_dev,
4037 octeon_dev->sriov_info.num_pf_rings))
4040 /* Enable Octeon device interrupts */
4041 octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4043 atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4045 /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4046 * the output queue is enabled.
4047 * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4048 * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4049 * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4050 * before any credits have been issued, causing the ring to be reset
4051 * (and the f/w appear to never have started).
4053 for (j = 0; j < octeon_dev->num_oqs; j++)
4054 writel(octeon_dev->droq[j]->max_count,
4055 octeon_dev->droq[j]->pkts_credit_reg);
4057 /* Enable the input and output queues for this Octeon device */
4058 ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4060 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4064 atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4066 if ((!OCTEON_CN23XX_PF(octeon_dev)) || !fw_loaded) {
4067 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4069 dev_info(&octeon_dev->pci_dev->dev,
4070 "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4073 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4075 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4076 while (!ddr_timeout) {
4077 set_current_state(TASK_INTERRUPTIBLE);
4078 if (schedule_timeout(HZ / 10)) {
4079 /* user probably pressed Control-C */
4083 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4085 dev_err(&octeon_dev->pci_dev->dev,
4086 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4091 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4092 dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4096 /* Divert uboot to take commands from host instead. */
4097 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4099 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4100 ret = octeon_init_consoles(octeon_dev);
4102 dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4105 /* If console debug enabled, specify empty string to use default
4106 * enablement ELSE specify NULL string for 'disabled'.
4108 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4109 ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4111 dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4113 } else if (octeon_console_debug_enabled(0)) {
4114 /* If console was added AND we're logging console output
4115 * then set our console print function.
4117 octeon_dev->console[0].print = octeon_dbg_console_print;
4120 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4122 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4123 ret = load_firmware(octeon_dev);
4125 dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4130 handshake[octeon_dev->octeon_id].init_ok = 1;
4131 complete(&handshake[octeon_dev->octeon_id].init);
4133 atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4139 * \brief Debug console print function
4140 * @param octeon_dev octeon device
4141 * @param console_num console number
4142 * @param prefix first portion of line to display
4143 * @param suffix second portion of line to display
4145 * The OCTEON debug console outputs entire lines (excluding '\n').
4146 * Normally, the line will be passed in the 'prefix' parameter.
4147 * However, due to buffering, it is possible for a line to be split into two
4148 * parts, in which case they will be passed as the 'prefix' parameter and
4149 * 'suffix' parameter.
4151 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4152 char *prefix, char *suffix)
4154 if (prefix && suffix)
4155 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4158 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4160 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4166 * \brief Exits the module
4168 static void __exit liquidio_exit(void)
4170 liquidio_deinit_pci();
4172 pr_info("LiquidIO network module is now unloaded\n");
4175 module_init(liquidio_init);
4176 module_exit(liquidio_exit);