1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
29 /* OOM task polling interval */
30 #define LIO_OOM_POLL_INTERVAL_MS 250
32 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
34 struct lio *lio = GET_LIO(netdev);
35 struct octeon_device *oct = lio->oct_dev;
36 struct octnic_ctrl_pkt nctrl;
39 memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
42 nctrl.ncmd.s.cmd = cmd;
43 nctrl.ncmd.s.param1 = param1;
44 nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
45 nctrl.wait_time = 100;
46 nctrl.netpndev = (u64)netdev;
47 nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
49 ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
51 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
57 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
58 unsigned int bytes_compl)
60 struct netdev_queue *netdev_queue = txq;
62 netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
65 void octeon_update_tx_completion_counters(void *buf, int reqtype,
66 unsigned int *pkts_compl,
67 unsigned int *bytes_compl)
69 struct octnet_buf_free_info *finfo;
70 struct sk_buff *skb = NULL;
71 struct octeon_soft_command *sc;
74 case REQTYPE_NORESP_NET:
75 case REQTYPE_NORESP_NET_SG:
80 case REQTYPE_RESP_NET_SG:
81 case REQTYPE_RESP_NET:
83 skb = sc->callback_arg;
91 *bytes_compl += skb->len;
94 void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
96 struct octnet_buf_free_info *finfo;
98 struct octeon_soft_command *sc;
99 struct netdev_queue *txq;
102 case REQTYPE_NORESP_NET:
103 case REQTYPE_NORESP_NET_SG:
108 case REQTYPE_RESP_NET_SG:
109 case REQTYPE_RESP_NET:
111 skb = sc->callback_arg;
118 txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
119 netdev_tx_sent_queue(txq, skb->len);
122 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
124 struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
125 struct net_device *netdev = (struct net_device *)nctrl->netpndev;
126 struct lio *lio = GET_LIO(netdev);
127 struct octeon_device *oct = lio->oct_dev;
130 if (nctrl->completion && nctrl->response_code) {
131 /* Signal whoever is interested that the response code from the
132 * firmware has arrived.
134 WRITE_ONCE(*nctrl->response_code, nctrl->status);
135 complete(nctrl->completion);
141 switch (nctrl->ncmd.s.cmd) {
142 case OCTNET_CMD_CHANGE_DEVFLAGS:
143 case OCTNET_CMD_SET_MULTI_LIST:
146 case OCTNET_CMD_CHANGE_MACADDR:
147 mac = ((u8 *)&nctrl->udd[0]) + 2;
148 if (nctrl->ncmd.s.param1) {
149 /* vfidx is 0 based, but vf_num (param1) is 1 based */
150 int vfidx = nctrl->ncmd.s.param1 - 1;
151 bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
153 if (mac_is_admin_assigned)
154 netif_info(lio, probe, lio->netdev,
155 "MAC Address %pM is configured for VF %d\n",
158 netif_info(lio, probe, lio->netdev,
159 " MACAddr changed to %pM\n",
164 case OCTNET_CMD_CHANGE_MTU:
165 /* If command is successful, change the MTU. */
166 netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n",
167 netdev->mtu, nctrl->ncmd.s.param1);
168 netdev->mtu = nctrl->ncmd.s.param1;
169 queue_delayed_work(lio->link_status_wq.wq,
170 &lio->link_status_wq.wk.work, 0);
173 case OCTNET_CMD_GPIO_ACCESS:
174 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
178 case OCTNET_CMD_ID_ACTIVE:
179 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
183 case OCTNET_CMD_LRO_ENABLE:
184 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
187 case OCTNET_CMD_LRO_DISABLE:
188 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
192 case OCTNET_CMD_VERBOSE_ENABLE:
193 dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
197 case OCTNET_CMD_VERBOSE_DISABLE:
198 dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
202 case OCTNET_CMD_VLAN_FILTER_CTL:
203 if (nctrl->ncmd.s.param1)
204 dev_info(&oct->pci_dev->dev,
205 "%s VLAN filter enabled\n", netdev->name);
207 dev_info(&oct->pci_dev->dev,
208 "%s VLAN filter disabled\n", netdev->name);
211 case OCTNET_CMD_ADD_VLAN_FILTER:
212 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
213 netdev->name, nctrl->ncmd.s.param1);
216 case OCTNET_CMD_DEL_VLAN_FILTER:
217 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
218 netdev->name, nctrl->ncmd.s.param1);
221 case OCTNET_CMD_SET_SETTINGS:
222 dev_info(&oct->pci_dev->dev, "%s settings changed\n",
227 /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
228 * Command passed by NIC driver
230 case OCTNET_CMD_TNL_RX_CSUM_CTL:
231 if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
232 netif_info(lio, probe, lio->netdev,
233 "RX Checksum Offload Enabled\n");
234 } else if (nctrl->ncmd.s.param1 ==
235 OCTNET_CMD_RXCSUM_DISABLE) {
236 netif_info(lio, probe, lio->netdev,
237 "RX Checksum Offload Disabled\n");
241 /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
242 * Command passed by NIC driver
244 case OCTNET_CMD_TNL_TX_CSUM_CTL:
245 if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
246 netif_info(lio, probe, lio->netdev,
247 "TX Checksum Offload Enabled\n");
248 } else if (nctrl->ncmd.s.param1 ==
249 OCTNET_CMD_TXCSUM_DISABLE) {
250 netif_info(lio, probe, lio->netdev,
251 "TX Checksum Offload Disabled\n");
255 /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
256 * Command passed by NIC driver
258 case OCTNET_CMD_VXLAN_PORT_CONFIG:
259 if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
260 netif_info(lio, probe, lio->netdev,
261 "VxLAN Destination UDP PORT:%d ADDED\n",
262 nctrl->ncmd.s.param1);
263 } else if (nctrl->ncmd.s.more ==
264 OCTNET_CMD_VXLAN_PORT_DEL) {
265 netif_info(lio, probe, lio->netdev,
266 "VxLAN Destination UDP PORT:%d DELETED\n",
267 nctrl->ncmd.s.param1);
271 case OCTNET_CMD_SET_FLOW_CTL:
272 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
275 case OCTNET_CMD_QUEUE_COUNT_CTL:
276 netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
277 nctrl->ncmd.s.param1);
281 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
286 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
288 bool macaddr_changed = false;
289 struct net_device *netdev;
294 netdev = oct->props[0].netdev;
295 lio = GET_LIO(netdev);
297 lio->linfo.macaddr_is_admin_asgnd = true;
299 if (!ether_addr_equal(netdev->dev_addr, mac)) {
300 macaddr_changed = true;
301 ether_addr_copy(netdev->dev_addr, mac);
302 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
303 call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
309 dev_info(&oct->pci_dev->dev,
310 "PF changed VF's MAC address to %pM\n", mac);
312 /* no need to notify the firmware of the macaddr change because
313 * the PF did that already
317 static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
319 struct cavium_wk *wk = (struct cavium_wk *)work;
320 struct lio *lio = (struct lio *)wk->ctxptr;
321 struct octeon_device *oct = lio->oct_dev;
322 struct octeon_droq *droq;
325 if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
326 for (q = 0; q < lio->linfo.num_rxpciq; q++) {
327 q_no = lio->linfo.rxpciq[q].s.q_no;
328 droq = oct->droq[q_no];
331 octeon_droq_check_oom(droq);
334 queue_delayed_work(lio->rxq_status_wq.wq,
335 &lio->rxq_status_wq.wk.work,
336 msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
339 int setup_rx_oom_poll_fn(struct net_device *netdev)
341 struct lio *lio = GET_LIO(netdev);
342 struct octeon_device *oct = lio->oct_dev;
344 lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
346 if (!lio->rxq_status_wq.wq) {
347 dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
350 INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
351 octnet_poll_check_rxq_oom_status);
352 lio->rxq_status_wq.wk.ctxptr = lio;
353 queue_delayed_work(lio->rxq_status_wq.wq,
354 &lio->rxq_status_wq.wk.work,
355 msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
359 void cleanup_rx_oom_poll_fn(struct net_device *netdev)
361 struct lio *lio = GET_LIO(netdev);
363 if (lio->rxq_status_wq.wq) {
364 cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
365 flush_workqueue(lio->rxq_status_wq.wq);
366 destroy_workqueue(lio->rxq_status_wq.wq);
370 /* Runs in interrupt context. */
371 static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
373 struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
374 struct net_device *netdev;
377 netdev = oct->props[iq->ifidx].netdev;
379 /* This is needed because the first IQ does not have
380 * a netdev associated with it.
385 lio = GET_LIO(netdev);
386 if (netif_is_multiqueue(netdev)) {
387 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
388 lio->linfo.link.s.link_up &&
389 (!octnet_iq_is_full(oct, iq_num))) {
390 netif_wake_subqueue(netdev, iq->q_index);
391 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
394 } else if (netif_queue_stopped(netdev) &&
395 lio->linfo.link.s.link_up &&
396 (!octnet_iq_is_full(oct, lio->txq))) {
397 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
399 netif_wake_queue(netdev);
404 * \brief Setup output queue
405 * @param oct octeon device
406 * @param q_no which queue
407 * @param num_descs how many descriptors
408 * @param desc_size size of each descriptor
409 * @param app_ctx application context
411 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
412 int desc_size, void *app_ctx)
416 dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
417 /* droq creation and local register settings. */
418 ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
423 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
427 /* Enable the droq queues */
428 octeon_set_droq_pkt_op(oct, q_no, 1);
430 /* Send Credit for Octeon Output queues. Credits are always
431 * sent after the output queue is enabled.
433 writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
438 /** Routine to push packets arriving on Octeon interface upto network layer.
439 * @param oct_id - octeon device id.
440 * @param skbuff - skbuff struct to be passed to network layer.
441 * @param len - size of total data received.
442 * @param rh - Control header associated with the packet
443 * @param param - additional control data with the packet
444 * @param arg - farg registered in droq_ops
447 liquidio_push_packet(u32 octeon_id __attribute__((unused)),
454 struct net_device *netdev = (struct net_device *)arg;
455 struct octeon_droq *droq =
456 container_of(param, struct octeon_droq, napi);
457 struct sk_buff *skb = (struct sk_buff *)skbuff;
458 struct skb_shared_hwtstamps *shhwtstamps;
459 struct napi_struct *napi = param;
465 struct lio *lio = GET_LIO(netdev);
466 struct octeon_device *oct = lio->oct_dev;
467 int packet_was_received;
469 /* Do not proceed if the interface is not in RUNNING state. */
470 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
471 recv_buffer_free(skb);
472 droq->stats.rx_dropped++;
478 skb_record_rx_queue(skb, droq->q_no);
479 if (likely(len > MIN_SKB_SIZE)) {
480 struct octeon_skb_page_info *pg_info;
483 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
485 /* For Paged allocation use the frags */
486 va = page_address(pg_info->page) +
487 pg_info->page_offset;
488 memcpy(skb->data, va, MIN_SKB_SIZE);
489 skb_put(skb, MIN_SKB_SIZE);
490 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
492 pg_info->page_offset +
498 struct octeon_skb_page_info *pg_info =
499 ((struct octeon_skb_page_info *)(skb->cb));
500 skb_copy_to_linear_data(skb, page_address(pg_info->page)
501 + pg_info->page_offset, len);
503 put_page(pg_info->page);
506 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
508 if (oct->ptp_enable) {
509 if (rh->r_dh.has_hwtstamp) {
510 /* timestamp is included from the hardware at
511 * the beginning of the packet.
515 LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
516 /* Nanoseconds are in the first 64-bits
519 memcpy(&ns, (skb->data + r_dh_off),
521 r_dh_off -= BYTES_PER_DHLEN_UNIT;
522 shhwtstamps = skb_hwtstamps(skb);
523 shhwtstamps->hwtstamp =
530 if (rh->r_dh.has_hash) {
531 __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
532 u32 hash = be32_to_cpu(*hash_be);
534 skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
535 r_dh_off -= BYTES_PER_DHLEN_UNIT;
538 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
539 skb->protocol = eth_type_trans(skb, skb->dev);
541 if ((netdev->features & NETIF_F_RXCSUM) &&
542 (((rh->r_dh.encap_on) &&
543 (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
544 (!(rh->r_dh.encap_on) &&
545 (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
546 /* checksum has already been verified */
547 skb->ip_summed = CHECKSUM_UNNECESSARY;
549 skb->ip_summed = CHECKSUM_NONE;
551 /* Setting Encapsulation field on basis of status received
554 if (rh->r_dh.encap_on) {
555 skb->encapsulation = 1;
557 droq->stats.rx_vxlan++;
560 /* inbound VLAN tag */
561 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
563 u16 priority = rh->r_dh.priority;
564 u16 vid = rh->r_dh.vlan;
566 vtag = (priority << VLAN_PRIO_SHIFT) | vid;
567 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
570 packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP);
572 if (packet_was_received) {
573 droq->stats.rx_bytes_received += len;
574 droq->stats.rx_pkts_received++;
576 droq->stats.rx_dropped++;
577 netif_info(lio, rx_err, lio->netdev,
578 "droq:%d error rx_dropped:%llu\n",
579 droq->q_no, droq->stats.rx_dropped);
583 recv_buffer_free(skb);
588 * \brief wrapper for calling napi_schedule
589 * @param param parameters to pass to napi_schedule
591 * Used when scheduling on different CPUs
593 static void napi_schedule_wrapper(void *param)
595 struct napi_struct *napi = param;
601 * \brief callback when receive interrupt occurs and we are in NAPI mode
602 * @param arg pointer to octeon output queue
604 static void liquidio_napi_drv_callback(void *arg)
606 struct octeon_device *oct;
607 struct octeon_droq *droq = arg;
608 int this_cpu = smp_processor_id();
612 if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
613 droq->cpu_id == this_cpu) {
614 napi_schedule_irqoff(&droq->napi);
616 call_single_data_t *csd = &droq->csd;
618 csd->func = napi_schedule_wrapper;
619 csd->info = &droq->napi;
622 smp_call_function_single_async(droq->cpu_id, csd);
627 * \brief Entry point for NAPI polling
628 * @param napi NAPI structure
629 * @param budget maximum number of items to process
631 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
633 struct octeon_instr_queue *iq;
634 struct octeon_device *oct;
635 struct octeon_droq *droq;
636 int tx_done = 0, iq_no;
639 droq = container_of(napi, struct octeon_droq, napi);
643 /* Handle Droq descriptors */
644 work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
645 POLL_EVENT_PROCESS_PKTS,
648 /* Flush the instruction queue */
649 iq = oct->instr_queue[iq_no];
651 /* TODO: move this check to inside octeon_flush_iq,
652 * once check_db_timeout is removed
654 if (atomic_read(&iq->instr_pending))
655 /* Process iq buffers with in the budget limits */
656 tx_done = octeon_flush_iq(oct, iq, budget);
659 /* Update iq read-index rather than waiting for next interrupt.
660 * Return back if tx_done is false.
662 /* sub-queue status update */
663 lio_update_txq_status(oct, iq_no);
665 dev_err(&oct->pci_dev->dev, "%s: iq (%d) num invalid\n",
669 #define MAX_REG_CNT 2000000U
670 /* force enable interrupt if reg cnts are high to avoid wraparound */
671 if ((work_done < budget && tx_done) ||
672 (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
673 (droq->pkt_count >= MAX_REG_CNT)) {
675 napi_complete_done(napi, work_done);
677 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
678 POLL_EVENT_ENABLE_INTR, 0);
682 return (!tx_done) ? (budget) : (work_done);
686 * \brief Setup input and output queues
687 * @param octeon_dev octeon device
688 * @param ifidx Interface index
690 * Note: Queues are with respect to the octeon device. Thus
691 * an input queue is for egress packets, and output queues
692 * are for ingress packets.
694 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
695 u32 num_iqs, u32 num_oqs)
697 struct octeon_droq_ops droq_ops;
698 struct net_device *netdev;
699 struct octeon_droq *droq;
700 struct napi_struct *napi;
708 netdev = octeon_dev->props[ifidx].netdev;
710 lio = GET_LIO(netdev);
712 memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
714 droq_ops.fptr = liquidio_push_packet;
715 droq_ops.farg = netdev;
717 droq_ops.poll_mode = 1;
718 droq_ops.napi_fn = liquidio_napi_drv_callback;
720 cpu_id_modulus = num_present_cpus();
723 for (q = 0; q < num_oqs; q++) {
724 q_no = lio->linfo.rxpciq[q].s.q_no;
725 dev_dbg(&octeon_dev->pci_dev->dev,
726 "%s index:%d linfo.rxpciq.s.q_no:%d\n",
728 retval = octeon_setup_droq(
730 CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
732 CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
736 dev_err(&octeon_dev->pci_dev->dev,
737 "%s : Runtime DROQ(RxQ) creation failed.\n",
742 droq = octeon_dev->droq[q_no];
744 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
745 (u64)netdev, (u64)octeon_dev);
746 netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
748 /* designate a CPU for this droq */
749 droq->cpu_id = cpu_id;
751 if (cpu_id >= cpu_id_modulus)
754 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
757 if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
758 /* 23XX PF/VF can send/recv control messages (via the first
759 * PF/VF-owned droq) from the firmware even if the ethX
760 * interface is down, so that's why poll_mode must be off
761 * for the first droq.
763 octeon_dev->droq[0]->ops.poll_mode = 0;
767 for (q = 0; q < num_iqs; q++) {
768 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
769 octeon_get_conf(octeon_dev), lio->ifidx);
770 retval = octeon_setup_iq(octeon_dev, ifidx, q,
771 lio->linfo.txpciq[q], num_tx_descs,
772 netdev_get_tx_queue(netdev, q));
774 dev_err(&octeon_dev->pci_dev->dev,
775 " %s : Runtime IQ(TxQ) creation failed.\n",
781 if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
782 octeon_dev->ioq_vector) {
783 struct octeon_ioq_vector *ioq_vector;
785 ioq_vector = &octeon_dev->ioq_vector[q];
786 netif_set_xps_queue(netdev,
787 &ioq_vector->affinity_mask,
788 ioq_vector->iq_index);
796 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
798 struct octeon_device *oct = droq->oct_dev;
799 struct octeon_device_priv *oct_priv =
800 (struct octeon_device_priv *)oct->priv;
802 if (droq->ops.poll_mode) {
803 droq->ops.napi_fn(droq);
805 if (ret & MSIX_PO_INT) {
806 if (OCTEON_CN23XX_VF(oct))
807 dev_err(&oct->pci_dev->dev,
808 "should not come here should not get rx when poll mode = 0 for vf\n");
809 tasklet_schedule(&oct_priv->droq_tasklet);
812 /* this will be flushed periodically by check iq db */
813 if (ret & MSIX_PI_INT)
821 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
823 struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
824 struct octeon_device *oct = ioq_vector->oct_dev;
825 struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
828 ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
830 if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
831 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
837 * \brief Droq packet processor sceduler
838 * @param oct octeon device
840 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
842 struct octeon_device_priv *oct_priv =
843 (struct octeon_device_priv *)oct->priv;
844 struct octeon_droq *droq;
847 if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
848 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
850 if (!(oct->droq_intr & BIT_ULL(oq_no)))
853 droq = oct->droq[oq_no];
855 if (droq->ops.poll_mode) {
856 droq->ops.napi_fn(droq);
857 oct_priv->napi_mask |= BIT_ULL(oq_no);
859 tasklet_schedule(&oct_priv->droq_tasklet);
866 * \brief Interrupt handler for octeon
868 * @param dev octeon device
871 irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
874 struct octeon_device *oct = (struct octeon_device *)dev;
877 /* Disable our interrupts for the duration of ISR */
878 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
880 ret = oct->fn_list.process_interrupt_regs(oct);
882 if (ret == IRQ_HANDLED)
883 liquidio_schedule_droq_pkt_handlers(oct);
885 /* Re-enable our interrupts */
886 if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
887 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
893 * \brief Setup interrupt for octeon device
894 * @param oct octeon device
896 * Enable interrupt in Octeon device as given in the PCI interrupt mask.
898 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
900 struct msix_entry *msix_entries;
901 char *queue_irq_names = NULL;
902 int i, num_interrupts = 0;
903 int num_alloc_ioq_vectors;
904 char *aux_irq_name = NULL;
908 oct->num_msix_irqs = num_ioqs;
910 if (OCTEON_CN23XX_PF(oct)) {
911 num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
913 /* one non ioq interrupt for handling
916 oct->num_msix_irqs += 1;
917 } else if (OCTEON_CN23XX_VF(oct)) {
918 num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
921 /* allocate storage for the names assigned to each irq */
922 oct->irq_name_storage =
923 kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
924 if (!oct->irq_name_storage) {
925 dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
929 queue_irq_names = oct->irq_name_storage;
931 if (OCTEON_CN23XX_PF(oct))
932 aux_irq_name = &queue_irq_names
933 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
935 oct->msix_entries = kcalloc(oct->num_msix_irqs,
936 sizeof(struct msix_entry),
938 if (!oct->msix_entries) {
939 dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
940 kfree(oct->irq_name_storage);
941 oct->irq_name_storage = NULL;
945 msix_entries = (struct msix_entry *)oct->msix_entries;
947 /*Assumption is that pf msix vectors start from pf srn to pf to
948 * trs and not from 0. if not change this code
950 if (OCTEON_CN23XX_PF(oct)) {
951 for (i = 0; i < oct->num_msix_irqs - 1; i++)
952 msix_entries[i].entry =
953 oct->sriov_info.pf_srn + i;
955 msix_entries[oct->num_msix_irqs - 1].entry =
957 } else if (OCTEON_CN23XX_VF(oct)) {
958 for (i = 0; i < oct->num_msix_irqs; i++)
959 msix_entries[i].entry = i;
961 num_alloc_ioq_vectors = pci_enable_msix_range(
962 oct->pci_dev, msix_entries,
965 if (num_alloc_ioq_vectors < 0) {
966 dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
967 kfree(oct->msix_entries);
968 oct->msix_entries = NULL;
969 kfree(oct->irq_name_storage);
970 oct->irq_name_storage = NULL;
971 return num_alloc_ioq_vectors;
974 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
976 num_ioq_vectors = oct->num_msix_irqs;
977 /** For PF, there is one non-ioq interrupt handler */
978 if (OCTEON_CN23XX_PF(oct)) {
979 num_ioq_vectors -= 1;
981 snprintf(aux_irq_name, INTRNAMSIZ,
982 "LiquidIO%u-pf%u-aux", oct->octeon_id,
984 irqret = request_irq(
985 msix_entries[num_ioq_vectors].vector,
986 liquidio_legacy_intr_handler, 0,
989 dev_err(&oct->pci_dev->dev,
990 "Request_irq failed for MSIX interrupt Error: %d\n",
992 pci_disable_msix(oct->pci_dev);
993 kfree(oct->msix_entries);
994 kfree(oct->irq_name_storage);
995 oct->irq_name_storage = NULL;
996 oct->msix_entries = NULL;
1000 for (i = 0 ; i < num_ioq_vectors ; i++) {
1001 if (OCTEON_CN23XX_PF(oct))
1002 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1003 INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
1004 oct->octeon_id, oct->pf_num, i);
1006 if (OCTEON_CN23XX_VF(oct))
1007 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1008 INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
1009 oct->octeon_id, oct->vf_num, i);
1011 irqret = request_irq(msix_entries[i].vector,
1012 liquidio_msix_intr_handler, 0,
1013 &queue_irq_names[IRQ_NAME_OFF(i)],
1014 &oct->ioq_vector[i]);
1017 dev_err(&oct->pci_dev->dev,
1018 "Request_irq failed for MSIX interrupt Error: %d\n",
1020 /** Freeing the non-ioq irq vector here . */
1021 free_irq(msix_entries[num_ioq_vectors].vector,
1026 /** clearing affinity mask. */
1027 irq_set_affinity_hint(
1028 msix_entries[i].vector,
1030 free_irq(msix_entries[i].vector,
1031 &oct->ioq_vector[i]);
1033 pci_disable_msix(oct->pci_dev);
1034 kfree(oct->msix_entries);
1035 kfree(oct->irq_name_storage);
1036 oct->irq_name_storage = NULL;
1037 oct->msix_entries = NULL;
1040 oct->ioq_vector[i].vector = msix_entries[i].vector;
1041 /* assign the cpu mask for this msix interrupt vector */
1042 irq_set_affinity_hint(msix_entries[i].vector,
1043 &oct->ioq_vector[i].affinity_mask
1046 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1049 err = pci_enable_msi(oct->pci_dev);
1051 dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1054 oct->flags |= LIO_FLAG_MSI_ENABLED;
1056 /* allocate storage for the names assigned to the irq */
1057 oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
1058 if (!oct->irq_name_storage)
1061 queue_irq_names = oct->irq_name_storage;
1063 if (OCTEON_CN23XX_PF(oct))
1064 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1065 "LiquidIO%u-pf%u-rxtx-%u",
1066 oct->octeon_id, oct->pf_num, 0);
1068 if (OCTEON_CN23XX_VF(oct))
1069 snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1070 "LiquidIO%u-vf%u-rxtx-%u",
1071 oct->octeon_id, oct->vf_num, 0);
1073 irqret = request_irq(oct->pci_dev->irq,
1074 liquidio_legacy_intr_handler,
1076 &queue_irq_names[IRQ_NAME_OFF(0)], oct);
1078 if (oct->flags & LIO_FLAG_MSI_ENABLED)
1079 pci_disable_msi(oct->pci_dev);
1080 dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1082 kfree(oct->irq_name_storage);
1083 oct->irq_name_storage = NULL;