GNU Linux-libre 4.14.290-gnu1
[releases.git] / drivers / net / ethernet / cavium / liquidio / lio_core.c
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28
29 /* OOM task polling interval */
30 #define LIO_OOM_POLL_INTERVAL_MS 250
31
32 int liquidio_set_feature(struct net_device *netdev, int cmd, u16 param1)
33 {
34         struct lio *lio = GET_LIO(netdev);
35         struct octeon_device *oct = lio->oct_dev;
36         struct octnic_ctrl_pkt nctrl;
37         int ret = 0;
38
39         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
40
41         nctrl.ncmd.u64 = 0;
42         nctrl.ncmd.s.cmd = cmd;
43         nctrl.ncmd.s.param1 = param1;
44         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
45         nctrl.wait_time = 100;
46         nctrl.netpndev = (u64)netdev;
47         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
48
49         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
50         if (ret < 0) {
51                 dev_err(&oct->pci_dev->dev, "Feature change failed in core (ret: 0x%x)\n",
52                         ret);
53         }
54         return ret;
55 }
56
57 void octeon_report_tx_completion_to_bql(void *txq, unsigned int pkts_compl,
58                                         unsigned int bytes_compl)
59 {
60         struct netdev_queue *netdev_queue = txq;
61
62         netdev_tx_completed_queue(netdev_queue, pkts_compl, bytes_compl);
63 }
64
65 void octeon_update_tx_completion_counters(void *buf, int reqtype,
66                                           unsigned int *pkts_compl,
67                                           unsigned int *bytes_compl)
68 {
69         struct octnet_buf_free_info *finfo;
70         struct sk_buff *skb = NULL;
71         struct octeon_soft_command *sc;
72
73         switch (reqtype) {
74         case REQTYPE_NORESP_NET:
75         case REQTYPE_NORESP_NET_SG:
76                 finfo = buf;
77                 skb = finfo->skb;
78                 break;
79
80         case REQTYPE_RESP_NET_SG:
81         case REQTYPE_RESP_NET:
82                 sc = buf;
83                 skb = sc->callback_arg;
84                 break;
85
86         default:
87                 return;
88         }
89
90         (*pkts_compl)++;
91         *bytes_compl += skb->len;
92 }
93
94 void octeon_report_sent_bytes_to_bql(void *buf, int reqtype)
95 {
96         struct octnet_buf_free_info *finfo;
97         struct sk_buff *skb;
98         struct octeon_soft_command *sc;
99         struct netdev_queue *txq;
100
101         switch (reqtype) {
102         case REQTYPE_NORESP_NET:
103         case REQTYPE_NORESP_NET_SG:
104                 finfo = buf;
105                 skb = finfo->skb;
106                 break;
107
108         case REQTYPE_RESP_NET_SG:
109         case REQTYPE_RESP_NET:
110                 sc = buf;
111                 skb = sc->callback_arg;
112                 break;
113
114         default:
115                 return;
116         }
117
118         txq = netdev_get_tx_queue(skb->dev, skb_get_queue_mapping(skb));
119         netdev_tx_sent_queue(txq, skb->len);
120 }
121
122 void liquidio_link_ctrl_cmd_completion(void *nctrl_ptr)
123 {
124         struct octnic_ctrl_pkt *nctrl = (struct octnic_ctrl_pkt *)nctrl_ptr;
125         struct net_device *netdev = (struct net_device *)nctrl->netpndev;
126         struct lio *lio = GET_LIO(netdev);
127         struct octeon_device *oct = lio->oct_dev;
128         u8 *mac;
129
130         if (nctrl->completion && nctrl->response_code) {
131                 /* Signal whoever is interested that the response code from the
132                  * firmware has arrived.
133                  */
134                 WRITE_ONCE(*nctrl->response_code, nctrl->status);
135                 complete(nctrl->completion);
136         }
137
138         if (nctrl->status)
139                 return;
140
141         switch (nctrl->ncmd.s.cmd) {
142         case OCTNET_CMD_CHANGE_DEVFLAGS:
143         case OCTNET_CMD_SET_MULTI_LIST:
144                 break;
145
146         case OCTNET_CMD_CHANGE_MACADDR:
147                 mac = ((u8 *)&nctrl->udd[0]) + 2;
148                 if (nctrl->ncmd.s.param1) {
149                         /* vfidx is 0 based, but vf_num (param1) is 1 based */
150                         int vfidx = nctrl->ncmd.s.param1 - 1;
151                         bool mac_is_admin_assigned = nctrl->ncmd.s.param2;
152
153                         if (mac_is_admin_assigned)
154                                 netif_info(lio, probe, lio->netdev,
155                                            "MAC Address %pM is configured for VF %d\n",
156                                            mac, vfidx);
157                 } else {
158                         netif_info(lio, probe, lio->netdev,
159                                    " MACAddr changed to %pM\n",
160                                    mac);
161                 }
162                 break;
163
164         case OCTNET_CMD_CHANGE_MTU:
165                 /* If command is successful, change the MTU. */
166                 netif_info(lio, probe, lio->netdev, "MTU Changed from %d to %d\n",
167                            netdev->mtu, nctrl->ncmd.s.param1);
168                 netdev->mtu = nctrl->ncmd.s.param1;
169                 queue_delayed_work(lio->link_status_wq.wq,
170                                    &lio->link_status_wq.wk.work, 0);
171                 break;
172
173         case OCTNET_CMD_GPIO_ACCESS:
174                 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
175
176                 break;
177
178         case OCTNET_CMD_ID_ACTIVE:
179                 netif_info(lio, probe, lio->netdev, "LED Flashing visual identification\n");
180
181                 break;
182
183         case OCTNET_CMD_LRO_ENABLE:
184                 dev_info(&oct->pci_dev->dev, "%s LRO Enabled\n", netdev->name);
185                 break;
186
187         case OCTNET_CMD_LRO_DISABLE:
188                 dev_info(&oct->pci_dev->dev, "%s LRO Disabled\n",
189                          netdev->name);
190                 break;
191
192         case OCTNET_CMD_VERBOSE_ENABLE:
193                 dev_info(&oct->pci_dev->dev, "%s Firmware debug enabled\n",
194                          netdev->name);
195                 break;
196
197         case OCTNET_CMD_VERBOSE_DISABLE:
198                 dev_info(&oct->pci_dev->dev, "%s Firmware debug disabled\n",
199                          netdev->name);
200                 break;
201
202         case OCTNET_CMD_VLAN_FILTER_CTL:
203                 if (nctrl->ncmd.s.param1)
204                         dev_info(&oct->pci_dev->dev,
205                                  "%s VLAN filter enabled\n", netdev->name);
206                 else
207                         dev_info(&oct->pci_dev->dev,
208                                  "%s VLAN filter disabled\n", netdev->name);
209                 break;
210
211         case OCTNET_CMD_ADD_VLAN_FILTER:
212                 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d added\n",
213                          netdev->name, nctrl->ncmd.s.param1);
214                 break;
215
216         case OCTNET_CMD_DEL_VLAN_FILTER:
217                 dev_info(&oct->pci_dev->dev, "%s VLAN filter %d removed\n",
218                          netdev->name, nctrl->ncmd.s.param1);
219                 break;
220
221         case OCTNET_CMD_SET_SETTINGS:
222                 dev_info(&oct->pci_dev->dev, "%s settings changed\n",
223                          netdev->name);
224
225                 break;
226
227         /* Case to handle "OCTNET_CMD_TNL_RX_CSUM_CTL"
228          * Command passed by NIC driver
229          */
230         case OCTNET_CMD_TNL_RX_CSUM_CTL:
231                 if (nctrl->ncmd.s.param1 == OCTNET_CMD_RXCSUM_ENABLE) {
232                         netif_info(lio, probe, lio->netdev,
233                                    "RX Checksum Offload Enabled\n");
234                 } else if (nctrl->ncmd.s.param1 ==
235                            OCTNET_CMD_RXCSUM_DISABLE) {
236                         netif_info(lio, probe, lio->netdev,
237                                    "RX Checksum Offload Disabled\n");
238                 }
239                 break;
240
241                 /* Case to handle "OCTNET_CMD_TNL_TX_CSUM_CTL"
242                  * Command passed by NIC driver
243                  */
244         case OCTNET_CMD_TNL_TX_CSUM_CTL:
245                 if (nctrl->ncmd.s.param1 == OCTNET_CMD_TXCSUM_ENABLE) {
246                         netif_info(lio, probe, lio->netdev,
247                                    "TX Checksum Offload Enabled\n");
248                 } else if (nctrl->ncmd.s.param1 ==
249                            OCTNET_CMD_TXCSUM_DISABLE) {
250                         netif_info(lio, probe, lio->netdev,
251                                    "TX Checksum Offload Disabled\n");
252                 }
253                 break;
254
255                 /* Case to handle "OCTNET_CMD_VXLAN_PORT_CONFIG"
256                  * Command passed by NIC driver
257                  */
258         case OCTNET_CMD_VXLAN_PORT_CONFIG:
259                 if (nctrl->ncmd.s.more == OCTNET_CMD_VXLAN_PORT_ADD) {
260                         netif_info(lio, probe, lio->netdev,
261                                    "VxLAN Destination UDP PORT:%d ADDED\n",
262                                    nctrl->ncmd.s.param1);
263                 } else if (nctrl->ncmd.s.more ==
264                            OCTNET_CMD_VXLAN_PORT_DEL) {
265                         netif_info(lio, probe, lio->netdev,
266                                    "VxLAN Destination UDP PORT:%d DELETED\n",
267                                    nctrl->ncmd.s.param1);
268                 }
269                 break;
270
271         case OCTNET_CMD_SET_FLOW_CTL:
272                 netif_info(lio, probe, lio->netdev, "Set RX/TX flow control parameters\n");
273                 break;
274
275         case OCTNET_CMD_QUEUE_COUNT_CTL:
276                 netif_info(lio, probe, lio->netdev, "Queue count updated to %d\n",
277                            nctrl->ncmd.s.param1);
278                 break;
279
280         default:
281                 dev_err(&oct->pci_dev->dev, "%s Unknown cmd %d\n", __func__,
282                         nctrl->ncmd.s.cmd);
283         }
284 }
285
286 void octeon_pf_changed_vf_macaddr(struct octeon_device *oct, u8 *mac)
287 {
288         bool macaddr_changed = false;
289         struct net_device *netdev;
290         struct lio *lio;
291
292         rtnl_lock();
293
294         netdev = oct->props[0].netdev;
295         lio = GET_LIO(netdev);
296
297         lio->linfo.macaddr_is_admin_asgnd = true;
298
299         if (!ether_addr_equal(netdev->dev_addr, mac)) {
300                 macaddr_changed = true;
301                 ether_addr_copy(netdev->dev_addr, mac);
302                 ether_addr_copy(((u8 *)&lio->linfo.hw_addr) + 2, mac);
303                 call_netdevice_notifiers(NETDEV_CHANGEADDR, netdev);
304         }
305
306         rtnl_unlock();
307
308         if (macaddr_changed)
309                 dev_info(&oct->pci_dev->dev,
310                          "PF changed VF's MAC address to %pM\n", mac);
311
312         /* no need to notify the firmware of the macaddr change because
313          * the PF did that already
314          */
315 }
316
317 static void octnet_poll_check_rxq_oom_status(struct work_struct *work)
318 {
319         struct cavium_wk *wk = (struct cavium_wk *)work;
320         struct lio *lio = (struct lio *)wk->ctxptr;
321         struct octeon_device *oct = lio->oct_dev;
322         struct octeon_droq *droq;
323         int q, q_no = 0;
324
325         if (ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
326                 for (q = 0; q < lio->linfo.num_rxpciq; q++) {
327                         q_no = lio->linfo.rxpciq[q].s.q_no;
328                         droq = oct->droq[q_no];
329                         if (!droq)
330                                 continue;
331                         octeon_droq_check_oom(droq);
332                 }
333         }
334         queue_delayed_work(lio->rxq_status_wq.wq,
335                            &lio->rxq_status_wq.wk.work,
336                            msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
337 }
338
339 int setup_rx_oom_poll_fn(struct net_device *netdev)
340 {
341         struct lio *lio = GET_LIO(netdev);
342         struct octeon_device *oct = lio->oct_dev;
343
344         lio->rxq_status_wq.wq = alloc_workqueue("rxq-oom-status",
345                                                 WQ_MEM_RECLAIM, 0);
346         if (!lio->rxq_status_wq.wq) {
347                 dev_err(&oct->pci_dev->dev, "unable to create cavium rxq oom status wq\n");
348                 return -ENOMEM;
349         }
350         INIT_DELAYED_WORK(&lio->rxq_status_wq.wk.work,
351                           octnet_poll_check_rxq_oom_status);
352         lio->rxq_status_wq.wk.ctxptr = lio;
353         queue_delayed_work(lio->rxq_status_wq.wq,
354                            &lio->rxq_status_wq.wk.work,
355                            msecs_to_jiffies(LIO_OOM_POLL_INTERVAL_MS));
356         return 0;
357 }
358
359 void cleanup_rx_oom_poll_fn(struct net_device *netdev)
360 {
361         struct lio *lio = GET_LIO(netdev);
362
363         if (lio->rxq_status_wq.wq) {
364                 cancel_delayed_work_sync(&lio->rxq_status_wq.wk.work);
365                 flush_workqueue(lio->rxq_status_wq.wq);
366                 destroy_workqueue(lio->rxq_status_wq.wq);
367         }
368 }
369
370 /* Runs in interrupt context. */
371 static void lio_update_txq_status(struct octeon_device *oct, int iq_num)
372 {
373         struct octeon_instr_queue *iq = oct->instr_queue[iq_num];
374         struct net_device *netdev;
375         struct lio *lio;
376
377         netdev = oct->props[iq->ifidx].netdev;
378
379         /* This is needed because the first IQ does not have
380          * a netdev associated with it.
381          */
382         if (!netdev)
383                 return;
384
385         lio = GET_LIO(netdev);
386         if (netif_is_multiqueue(netdev)) {
387                 if (__netif_subqueue_stopped(netdev, iq->q_index) &&
388                     lio->linfo.link.s.link_up &&
389                     (!octnet_iq_is_full(oct, iq_num))) {
390                         netif_wake_subqueue(netdev, iq->q_index);
391                         INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq_num,
392                                                   tx_restart, 1);
393                 }
394         } else if (netif_queue_stopped(netdev) &&
395                    lio->linfo.link.s.link_up &&
396                    (!octnet_iq_is_full(oct, lio->txq))) {
397                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
398                                           tx_restart, 1);
399                 netif_wake_queue(netdev);
400         }
401 }
402
403 /**
404  * \brief Setup output queue
405  * @param oct octeon device
406  * @param q_no which queue
407  * @param num_descs how many descriptors
408  * @param desc_size size of each descriptor
409  * @param app_ctx application context
410  */
411 static int octeon_setup_droq(struct octeon_device *oct, int q_no, int num_descs,
412                              int desc_size, void *app_ctx)
413 {
414         int ret_val;
415
416         dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
417         /* droq creation and local register settings. */
418         ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
419         if (ret_val < 0)
420                 return ret_val;
421
422         if (ret_val == 1) {
423                 dev_dbg(&oct->pci_dev->dev, "Using default droq %d\n", q_no);
424                 return 0;
425         }
426
427         /* Enable the droq queues */
428         octeon_set_droq_pkt_op(oct, q_no, 1);
429
430         /* Send Credit for Octeon Output queues. Credits are always
431          * sent after the output queue is enabled.
432          */
433         writel(oct->droq[q_no]->max_count, oct->droq[q_no]->pkts_credit_reg);
434
435         return ret_val;
436 }
437
438 /** Routine to push packets arriving on Octeon interface upto network layer.
439  * @param oct_id   - octeon device id.
440  * @param skbuff   - skbuff struct to be passed to network layer.
441  * @param len      - size of total data received.
442  * @param rh       - Control header associated with the packet
443  * @param param    - additional control data with the packet
444  * @param arg      - farg registered in droq_ops
445  */
446 static void
447 liquidio_push_packet(u32 octeon_id __attribute__((unused)),
448                      void *skbuff,
449                      u32 len,
450                      union octeon_rh *rh,
451                      void *param,
452                      void *arg)
453 {
454         struct net_device *netdev = (struct net_device *)arg;
455         struct octeon_droq *droq =
456             container_of(param, struct octeon_droq, napi);
457         struct sk_buff *skb = (struct sk_buff *)skbuff;
458         struct skb_shared_hwtstamps *shhwtstamps;
459         struct napi_struct *napi = param;
460         u16 vtag = 0;
461         u32 r_dh_off;
462         u64 ns;
463
464         if (netdev) {
465                 struct lio *lio = GET_LIO(netdev);
466                 struct octeon_device *oct = lio->oct_dev;
467                 int packet_was_received;
468
469                 /* Do not proceed if the interface is not in RUNNING state. */
470                 if (!ifstate_check(lio, LIO_IFSTATE_RUNNING)) {
471                         recv_buffer_free(skb);
472                         droq->stats.rx_dropped++;
473                         return;
474                 }
475
476                 skb->dev = netdev;
477
478                 skb_record_rx_queue(skb, droq->q_no);
479                 if (likely(len > MIN_SKB_SIZE)) {
480                         struct octeon_skb_page_info *pg_info;
481                         unsigned char *va;
482
483                         pg_info = ((struct octeon_skb_page_info *)(skb->cb));
484                         if (pg_info->page) {
485                                 /* For Paged allocation use the frags */
486                                 va = page_address(pg_info->page) +
487                                         pg_info->page_offset;
488                                 memcpy(skb->data, va, MIN_SKB_SIZE);
489                                 skb_put(skb, MIN_SKB_SIZE);
490                                 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
491                                                 pg_info->page,
492                                                 pg_info->page_offset +
493                                                 MIN_SKB_SIZE,
494                                                 len - MIN_SKB_SIZE,
495                                                 LIO_RXBUFFER_SZ);
496                         }
497                 } else {
498                         struct octeon_skb_page_info *pg_info =
499                                 ((struct octeon_skb_page_info *)(skb->cb));
500                         skb_copy_to_linear_data(skb, page_address(pg_info->page)
501                                                 + pg_info->page_offset, len);
502                         skb_put(skb, len);
503                         put_page(pg_info->page);
504                 }
505
506                 r_dh_off = (rh->r_dh.len - 1) * BYTES_PER_DHLEN_UNIT;
507
508                 if (oct->ptp_enable) {
509                         if (rh->r_dh.has_hwtstamp) {
510                                 /* timestamp is included from the hardware at
511                                  * the beginning of the packet.
512                                  */
513                                 if (ifstate_check
514                                         (lio,
515                                          LIO_IFSTATE_RX_TIMESTAMP_ENABLED)) {
516                                         /* Nanoseconds are in the first 64-bits
517                                          * of the packet.
518                                          */
519                                         memcpy(&ns, (skb->data + r_dh_off),
520                                                sizeof(ns));
521                                         r_dh_off -= BYTES_PER_DHLEN_UNIT;
522                                         shhwtstamps = skb_hwtstamps(skb);
523                                         shhwtstamps->hwtstamp =
524                                                 ns_to_ktime(ns +
525                                                             lio->ptp_adjust);
526                                 }
527                         }
528                 }
529
530                 if (rh->r_dh.has_hash) {
531                         __be32 *hash_be = (__be32 *)(skb->data + r_dh_off);
532                         u32 hash = be32_to_cpu(*hash_be);
533
534                         skb_set_hash(skb, hash, PKT_HASH_TYPE_L4);
535                         r_dh_off -= BYTES_PER_DHLEN_UNIT;
536                 }
537
538                 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
539                 skb->protocol = eth_type_trans(skb, skb->dev);
540
541                 if ((netdev->features & NETIF_F_RXCSUM) &&
542                     (((rh->r_dh.encap_on) &&
543                       (rh->r_dh.csum_verified & CNNIC_TUN_CSUM_VERIFIED)) ||
544                      (!(rh->r_dh.encap_on) &&
545                       (rh->r_dh.csum_verified & CNNIC_CSUM_VERIFIED))))
546                         /* checksum has already been verified */
547                         skb->ip_summed = CHECKSUM_UNNECESSARY;
548                 else
549                         skb->ip_summed = CHECKSUM_NONE;
550
551                 /* Setting Encapsulation field on basis of status received
552                  * from the firmware
553                  */
554                 if (rh->r_dh.encap_on) {
555                         skb->encapsulation = 1;
556                         skb->csum_level = 1;
557                         droq->stats.rx_vxlan++;
558                 }
559
560                 /* inbound VLAN tag */
561                 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
562                     rh->r_dh.vlan) {
563                         u16 priority = rh->r_dh.priority;
564                         u16 vid = rh->r_dh.vlan;
565
566                         vtag = (priority << VLAN_PRIO_SHIFT) | vid;
567                         __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
568                 }
569
570                 packet_was_received = (napi_gro_receive(napi, skb) != GRO_DROP);
571
572                 if (packet_was_received) {
573                         droq->stats.rx_bytes_received += len;
574                         droq->stats.rx_pkts_received++;
575                 } else {
576                         droq->stats.rx_dropped++;
577                         netif_info(lio, rx_err, lio->netdev,
578                                    "droq:%d  error rx_dropped:%llu\n",
579                                    droq->q_no, droq->stats.rx_dropped);
580                 }
581
582         } else {
583                 recv_buffer_free(skb);
584         }
585 }
586
587 /**
588  * \brief wrapper for calling napi_schedule
589  * @param param parameters to pass to napi_schedule
590  *
591  * Used when scheduling on different CPUs
592  */
593 static void napi_schedule_wrapper(void *param)
594 {
595         struct napi_struct *napi = param;
596
597         napi_schedule(napi);
598 }
599
600 /**
601  * \brief callback when receive interrupt occurs and we are in NAPI mode
602  * @param arg pointer to octeon output queue
603  */
604 static void liquidio_napi_drv_callback(void *arg)
605 {
606         struct octeon_device *oct;
607         struct octeon_droq *droq = arg;
608         int this_cpu = smp_processor_id();
609
610         oct = droq->oct_dev;
611
612         if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
613             droq->cpu_id == this_cpu) {
614                 napi_schedule_irqoff(&droq->napi);
615         } else {
616                 call_single_data_t *csd = &droq->csd;
617
618                 csd->func = napi_schedule_wrapper;
619                 csd->info = &droq->napi;
620                 csd->flags = 0;
621
622                 smp_call_function_single_async(droq->cpu_id, csd);
623         }
624 }
625
626 /**
627  * \brief Entry point for NAPI polling
628  * @param napi NAPI structure
629  * @param budget maximum number of items to process
630  */
631 static int liquidio_napi_poll(struct napi_struct *napi, int budget)
632 {
633         struct octeon_instr_queue *iq;
634         struct octeon_device *oct;
635         struct octeon_droq *droq;
636         int tx_done = 0, iq_no;
637         int work_done;
638
639         droq = container_of(napi, struct octeon_droq, napi);
640         oct = droq->oct_dev;
641         iq_no = droq->q_no;
642
643         /* Handle Droq descriptors */
644         work_done = octeon_process_droq_poll_cmd(oct, droq->q_no,
645                                                  POLL_EVENT_PROCESS_PKTS,
646                                                  budget);
647
648         /* Flush the instruction queue */
649         iq = oct->instr_queue[iq_no];
650         if (iq) {
651                 /* TODO: move this check to inside octeon_flush_iq,
652                  * once check_db_timeout is removed
653                  */
654                 if (atomic_read(&iq->instr_pending))
655                         /* Process iq buffers with in the budget limits */
656                         tx_done = octeon_flush_iq(oct, iq, budget);
657                 else
658                         tx_done = 1;
659                 /* Update iq read-index rather than waiting for next interrupt.
660                  * Return back if tx_done is false.
661                  */
662                 /* sub-queue status update */
663                 lio_update_txq_status(oct, iq_no);
664         } else {
665                 dev_err(&oct->pci_dev->dev, "%s:  iq (%d) num invalid\n",
666                         __func__, iq_no);
667         }
668
669 #define MAX_REG_CNT  2000000U
670         /* force enable interrupt if reg cnts are high to avoid wraparound */
671         if ((work_done < budget && tx_done) ||
672             (iq && iq->pkt_in_done >= MAX_REG_CNT) ||
673             (droq->pkt_count >= MAX_REG_CNT)) {
674                 tx_done = 1;
675                 napi_complete_done(napi, work_done);
676
677                 octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
678                                              POLL_EVENT_ENABLE_INTR, 0);
679                 return 0;
680         }
681
682         return (!tx_done) ? (budget) : (work_done);
683 }
684
685 /**
686  * \brief Setup input and output queues
687  * @param octeon_dev octeon device
688  * @param ifidx Interface index
689  *
690  * Note: Queues are with respect to the octeon device. Thus
691  * an input queue is for egress packets, and output queues
692  * are for ingress packets.
693  */
694 int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
695                              u32 num_iqs, u32 num_oqs)
696 {
697         struct octeon_droq_ops droq_ops;
698         struct net_device *netdev;
699         struct octeon_droq *droq;
700         struct napi_struct *napi;
701         int cpu_id_modulus;
702         int num_tx_descs;
703         struct lio *lio;
704         int retval = 0;
705         int q, q_no;
706         int cpu_id;
707
708         netdev = octeon_dev->props[ifidx].netdev;
709
710         lio = GET_LIO(netdev);
711
712         memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));
713
714         droq_ops.fptr = liquidio_push_packet;
715         droq_ops.farg = netdev;
716
717         droq_ops.poll_mode = 1;
718         droq_ops.napi_fn = liquidio_napi_drv_callback;
719         cpu_id = 0;
720         cpu_id_modulus = num_present_cpus();
721
722         /* set up DROQs. */
723         for (q = 0; q < num_oqs; q++) {
724                 q_no = lio->linfo.rxpciq[q].s.q_no;
725                 dev_dbg(&octeon_dev->pci_dev->dev,
726                         "%s index:%d linfo.rxpciq.s.q_no:%d\n",
727                         __func__, q, q_no);
728                 retval = octeon_setup_droq(
729                     octeon_dev, q_no,
730                     CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
731                                                 lio->ifidx),
732                     CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
733                                                    lio->ifidx),
734                     NULL);
735                 if (retval) {
736                         dev_err(&octeon_dev->pci_dev->dev,
737                                 "%s : Runtime DROQ(RxQ) creation failed.\n",
738                                 __func__);
739                         return 1;
740                 }
741
742                 droq = octeon_dev->droq[q_no];
743                 napi = &droq->napi;
744                 dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
745                         (u64)netdev, (u64)octeon_dev);
746                 netif_napi_add(netdev, napi, liquidio_napi_poll, 64);
747
748                 /* designate a CPU for this droq */
749                 droq->cpu_id = cpu_id;
750                 cpu_id++;
751                 if (cpu_id >= cpu_id_modulus)
752                         cpu_id = 0;
753
754                 octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
755         }
756
757         if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
758                 /* 23XX PF/VF can send/recv control messages (via the first
759                  * PF/VF-owned droq) from the firmware even if the ethX
760                  * interface is down, so that's why poll_mode must be off
761                  * for the first droq.
762                  */
763                 octeon_dev->droq[0]->ops.poll_mode = 0;
764         }
765
766         /* set up IQs. */
767         for (q = 0; q < num_iqs; q++) {
768                 num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
769                     octeon_get_conf(octeon_dev), lio->ifidx);
770                 retval = octeon_setup_iq(octeon_dev, ifidx, q,
771                                          lio->linfo.txpciq[q], num_tx_descs,
772                                          netdev_get_tx_queue(netdev, q));
773                 if (retval) {
774                         dev_err(&octeon_dev->pci_dev->dev,
775                                 " %s : Runtime IQ(TxQ) creation failed.\n",
776                                 __func__);
777                         return 1;
778                 }
779
780                 /* XPS */
781                 if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
782                     octeon_dev->ioq_vector) {
783                         struct octeon_ioq_vector    *ioq_vector;
784
785                         ioq_vector = &octeon_dev->ioq_vector[q];
786                         netif_set_xps_queue(netdev,
787                                             &ioq_vector->affinity_mask,
788                                             ioq_vector->iq_index);
789                 }
790         }
791
792         return 0;
793 }
794
795 static
796 int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
797 {
798         struct octeon_device *oct = droq->oct_dev;
799         struct octeon_device_priv *oct_priv =
800             (struct octeon_device_priv *)oct->priv;
801
802         if (droq->ops.poll_mode) {
803                 droq->ops.napi_fn(droq);
804         } else {
805                 if (ret & MSIX_PO_INT) {
806                         if (OCTEON_CN23XX_VF(oct))
807                                 dev_err(&oct->pci_dev->dev,
808                                         "should not come here should not get rx when poll mode = 0 for vf\n");
809                         tasklet_schedule(&oct_priv->droq_tasklet);
810                         return 1;
811                 }
812                 /* this will be flushed periodically by check iq db */
813                 if (ret & MSIX_PI_INT)
814                         return 0;
815         }
816
817         return 0;
818 }
819
820 irqreturn_t
821 liquidio_msix_intr_handler(int irq __attribute__((unused)), void *dev)
822 {
823         struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
824         struct octeon_device *oct = ioq_vector->oct_dev;
825         struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
826         u64 ret;
827
828         ret = oct->fn_list.msix_interrupt_handler(ioq_vector);
829
830         if (ret & MSIX_PO_INT || ret & MSIX_PI_INT)
831                 liquidio_schedule_msix_droq_pkt_handler(droq, ret);
832
833         return IRQ_HANDLED;
834 }
835
836 /**
837  * \brief Droq packet processor sceduler
838  * @param oct octeon device
839  */
840 static void liquidio_schedule_droq_pkt_handlers(struct octeon_device *oct)
841 {
842         struct octeon_device_priv *oct_priv =
843                 (struct octeon_device_priv *)oct->priv;
844         struct octeon_droq *droq;
845         u64 oq_no;
846
847         if (oct->int_status & OCT_DEV_INTR_PKT_DATA) {
848                 for (oq_no = 0; oq_no < MAX_OCTEON_OUTPUT_QUEUES(oct);
849                      oq_no++) {
850                         if (!(oct->droq_intr & BIT_ULL(oq_no)))
851                                 continue;
852
853                         droq = oct->droq[oq_no];
854
855                         if (droq->ops.poll_mode) {
856                                 droq->ops.napi_fn(droq);
857                                 oct_priv->napi_mask |= BIT_ULL(oq_no);
858                         } else {
859                                 tasklet_schedule(&oct_priv->droq_tasklet);
860                         }
861                 }
862         }
863 }
864
865 /**
866  * \brief Interrupt handler for octeon
867  * @param irq unused
868  * @param dev octeon device
869  */
870 static
871 irqreturn_t liquidio_legacy_intr_handler(int irq __attribute__((unused)),
872                                          void *dev)
873 {
874         struct octeon_device *oct = (struct octeon_device *)dev;
875         irqreturn_t ret;
876
877         /* Disable our interrupts for the duration of ISR */
878         oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
879
880         ret = oct->fn_list.process_interrupt_regs(oct);
881
882         if (ret == IRQ_HANDLED)
883                 liquidio_schedule_droq_pkt_handlers(oct);
884
885         /* Re-enable our interrupts  */
886         if (!(atomic_read(&oct->status) == OCT_DEV_IN_RESET))
887                 oct->fn_list.enable_interrupt(oct, OCTEON_ALL_INTR);
888
889         return ret;
890 }
891
892 /**
893  * \brief Setup interrupt for octeon device
894  * @param oct octeon device
895  *
896  *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
897  */
898 int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
899 {
900         struct msix_entry *msix_entries;
901         char *queue_irq_names = NULL;
902         int i, num_interrupts = 0;
903         int num_alloc_ioq_vectors;
904         char *aux_irq_name = NULL;
905         int num_ioq_vectors;
906         int irqret, err;
907
908         oct->num_msix_irqs = num_ioqs;
909         if (oct->msix_on) {
910                 if (OCTEON_CN23XX_PF(oct)) {
911                         num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;
912
913                         /* one non ioq interrupt for handling
914                          * sli_mac_pf_int_sum
915                          */
916                         oct->num_msix_irqs += 1;
917                 } else if (OCTEON_CN23XX_VF(oct)) {
918                         num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
919                 }
920
921                 /* allocate storage for the names assigned to each irq */
922                 oct->irq_name_storage =
923                         kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
924                 if (!oct->irq_name_storage) {
925                         dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
926                         return -ENOMEM;
927                 }
928
929                 queue_irq_names = oct->irq_name_storage;
930
931                 if (OCTEON_CN23XX_PF(oct))
932                         aux_irq_name = &queue_irq_names
933                                 [IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];
934
935                 oct->msix_entries = kcalloc(oct->num_msix_irqs,
936                                             sizeof(struct msix_entry),
937                                             GFP_KERNEL);
938                 if (!oct->msix_entries) {
939                         dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
940                         kfree(oct->irq_name_storage);
941                         oct->irq_name_storage = NULL;
942                         return -ENOMEM;
943                 }
944
945                 msix_entries = (struct msix_entry *)oct->msix_entries;
946
947                 /*Assumption is that pf msix vectors start from pf srn to pf to
948                  * trs and not from 0. if not change this code
949                  */
950                 if (OCTEON_CN23XX_PF(oct)) {
951                         for (i = 0; i < oct->num_msix_irqs - 1; i++)
952                                 msix_entries[i].entry =
953                                         oct->sriov_info.pf_srn + i;
954
955                         msix_entries[oct->num_msix_irqs - 1].entry =
956                                 oct->sriov_info.trs;
957                 } else if (OCTEON_CN23XX_VF(oct)) {
958                         for (i = 0; i < oct->num_msix_irqs; i++)
959                                 msix_entries[i].entry = i;
960                 }
961                 num_alloc_ioq_vectors = pci_enable_msix_range(
962                                                 oct->pci_dev, msix_entries,
963                                                 oct->num_msix_irqs,
964                                                 oct->num_msix_irqs);
965                 if (num_alloc_ioq_vectors < 0) {
966                         dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
967                         kfree(oct->msix_entries);
968                         oct->msix_entries = NULL;
969                         kfree(oct->irq_name_storage);
970                         oct->irq_name_storage = NULL;
971                         return num_alloc_ioq_vectors;
972                 }
973
974                 dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");
975
976                 num_ioq_vectors = oct->num_msix_irqs;
977                 /** For PF, there is one non-ioq interrupt handler */
978                 if (OCTEON_CN23XX_PF(oct)) {
979                         num_ioq_vectors -= 1;
980
981                         snprintf(aux_irq_name, INTRNAMSIZ,
982                                  "LiquidIO%u-pf%u-aux", oct->octeon_id,
983                                  oct->pf_num);
984                         irqret = request_irq(
985                                         msix_entries[num_ioq_vectors].vector,
986                                         liquidio_legacy_intr_handler, 0,
987                                         aux_irq_name, oct);
988                         if (irqret) {
989                                 dev_err(&oct->pci_dev->dev,
990                                         "Request_irq failed for MSIX interrupt Error: %d\n",
991                                         irqret);
992                                 pci_disable_msix(oct->pci_dev);
993                                 kfree(oct->msix_entries);
994                                 kfree(oct->irq_name_storage);
995                                 oct->irq_name_storage = NULL;
996                                 oct->msix_entries = NULL;
997                                 return irqret;
998                         }
999                 }
1000                 for (i = 0 ; i < num_ioq_vectors ; i++) {
1001                         if (OCTEON_CN23XX_PF(oct))
1002                                 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1003                                          INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
1004                                          oct->octeon_id, oct->pf_num, i);
1005
1006                         if (OCTEON_CN23XX_VF(oct))
1007                                 snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
1008                                          INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
1009                                          oct->octeon_id, oct->vf_num, i);
1010
1011                         irqret = request_irq(msix_entries[i].vector,
1012                                              liquidio_msix_intr_handler, 0,
1013                                              &queue_irq_names[IRQ_NAME_OFF(i)],
1014                                              &oct->ioq_vector[i]);
1015
1016                         if (irqret) {
1017                                 dev_err(&oct->pci_dev->dev,
1018                                         "Request_irq failed for MSIX interrupt Error: %d\n",
1019                                         irqret);
1020                                 /** Freeing the non-ioq irq vector here . */
1021                                 free_irq(msix_entries[num_ioq_vectors].vector,
1022                                          oct);
1023
1024                                 while (i) {
1025                                         i--;
1026                                         /** clearing affinity mask. */
1027                                         irq_set_affinity_hint(
1028                                                       msix_entries[i].vector,
1029                                                       NULL);
1030                                         free_irq(msix_entries[i].vector,
1031                                                  &oct->ioq_vector[i]);
1032                                 }
1033                                 pci_disable_msix(oct->pci_dev);
1034                                 kfree(oct->msix_entries);
1035                                 kfree(oct->irq_name_storage);
1036                                 oct->irq_name_storage = NULL;
1037                                 oct->msix_entries = NULL;
1038                                 return irqret;
1039                         }
1040                         oct->ioq_vector[i].vector = msix_entries[i].vector;
1041                         /* assign the cpu mask for this msix interrupt vector */
1042                         irq_set_affinity_hint(msix_entries[i].vector,
1043                                               &oct->ioq_vector[i].affinity_mask
1044                                               );
1045                 }
1046                 dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
1047                         oct->octeon_id);
1048         } else {
1049                 err = pci_enable_msi(oct->pci_dev);
1050                 if (err)
1051                         dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
1052                                  err);
1053                 else
1054                         oct->flags |= LIO_FLAG_MSI_ENABLED;
1055
1056                 /* allocate storage for the names assigned to the irq */
1057                 oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
1058                 if (!oct->irq_name_storage)
1059                         return -ENOMEM;
1060
1061                 queue_irq_names = oct->irq_name_storage;
1062
1063                 if (OCTEON_CN23XX_PF(oct))
1064                         snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1065                                  "LiquidIO%u-pf%u-rxtx-%u",
1066                                  oct->octeon_id, oct->pf_num, 0);
1067
1068                 if (OCTEON_CN23XX_VF(oct))
1069                         snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
1070                                  "LiquidIO%u-vf%u-rxtx-%u",
1071                                  oct->octeon_id, oct->vf_num, 0);
1072
1073                 irqret = request_irq(oct->pci_dev->irq,
1074                                      liquidio_legacy_intr_handler,
1075                                      IRQF_SHARED,
1076                                      &queue_irq_names[IRQ_NAME_OFF(0)], oct);
1077                 if (irqret) {
1078                         if (oct->flags & LIO_FLAG_MSI_ENABLED)
1079                                 pci_disable_msi(oct->pci_dev);
1080                         dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
1081                                 irqret);
1082                         kfree(oct->irq_name_storage);
1083                         oct->irq_name_storage = NULL;
1084                         return irqret;
1085                 }
1086         }
1087         return 0;
1088 }