1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2017 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/if_vlan.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_nic.h"
26 #include "octeon_main.h"
27 #include "octeon_network.h"
28 #include <net/switchdev.h>
29 #include "lio_vf_rep.h"
30 #include "octeon_network.h"
32 static int lio_vf_rep_open(struct net_device *ndev);
33 static int lio_vf_rep_stop(struct net_device *ndev);
34 static netdev_tx_t lio_vf_rep_pkt_xmit(struct sk_buff *skb,
35 struct net_device *ndev);
36 static void lio_vf_rep_tx_timeout(struct net_device *netdev);
37 static int lio_vf_rep_phys_port_name(struct net_device *dev,
38 char *buf, size_t len);
39 static void lio_vf_rep_get_stats64(struct net_device *dev,
40 struct rtnl_link_stats64 *stats64);
41 static int lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu);
43 static const struct net_device_ops lio_vf_rep_ndev_ops = {
44 .ndo_open = lio_vf_rep_open,
45 .ndo_stop = lio_vf_rep_stop,
46 .ndo_start_xmit = lio_vf_rep_pkt_xmit,
47 .ndo_tx_timeout = lio_vf_rep_tx_timeout,
48 .ndo_get_phys_port_name = lio_vf_rep_phys_port_name,
49 .ndo_get_stats64 = lio_vf_rep_get_stats64,
50 .ndo_change_mtu = lio_vf_rep_change_mtu,
54 lio_vf_rep_send_sc_complete(struct octeon_device *oct,
55 u32 status, void *ptr)
57 struct octeon_soft_command *sc = (struct octeon_soft_command *)ptr;
58 struct lio_vf_rep_sc_ctx *ctx =
59 (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
60 struct lio_vf_rep_resp *resp =
61 (struct lio_vf_rep_resp *)sc->virtrptr;
63 if (status != OCTEON_REQUEST_TIMEOUT && READ_ONCE(resp->status))
64 WRITE_ONCE(resp->status, 0);
66 complete(&ctx->complete);
70 lio_vf_rep_send_soft_command(struct octeon_device *oct,
71 void *req, int req_size,
72 void *resp, int resp_size)
74 int tot_resp_size = sizeof(struct lio_vf_rep_resp) + resp_size;
75 int ctx_size = sizeof(struct lio_vf_rep_sc_ctx);
76 struct octeon_soft_command *sc = NULL;
77 struct lio_vf_rep_resp *rep_resp;
78 struct lio_vf_rep_sc_ctx *ctx;
82 sc = (struct octeon_soft_command *)
83 octeon_alloc_soft_command(oct, req_size,
84 tot_resp_size, ctx_size);
88 ctx = (struct lio_vf_rep_sc_ctx *)sc->ctxptr;
89 memset(ctx, 0, ctx_size);
90 init_completion(&ctx->complete);
92 sc_req = (struct lio_vf_rep_req *)sc->virtdptr;
93 memcpy(sc_req, req, req_size);
95 rep_resp = (struct lio_vf_rep_resp *)sc->virtrptr;
96 memset(rep_resp, 0, tot_resp_size);
97 WRITE_ONCE(rep_resp->status, 1);
100 octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
101 OPCODE_NIC_VF_REP_CMD, 0, 0, 0);
102 sc->callback = lio_vf_rep_send_sc_complete;
103 sc->callback_arg = sc;
104 sc->wait_time = LIO_VF_REP_REQ_TMO_MS;
106 err = octeon_send_soft_command(oct, sc);
107 if (err == IQ_SEND_FAILED)
110 wait_for_completion_timeout(&ctx->complete,
112 (2 * LIO_VF_REP_REQ_TMO_MS));
113 err = READ_ONCE(rep_resp->status) ? -EBUSY : 0;
115 dev_err(&oct->pci_dev->dev, "VF rep send config failed\n");
118 memcpy(resp, (rep_resp + 1), resp_size);
120 octeon_free_soft_command(oct, sc);
126 lio_vf_rep_open(struct net_device *ndev)
128 struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
129 struct lio_vf_rep_req rep_cfg;
130 struct octeon_device *oct;
135 memset(&rep_cfg, 0, sizeof(rep_cfg));
136 rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
137 rep_cfg.ifidx = vf_rep->ifidx;
138 rep_cfg.rep_state.state = LIO_VF_REP_STATE_UP;
140 ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
141 sizeof(rep_cfg), NULL, 0);
144 dev_err(&oct->pci_dev->dev,
145 "VF_REP open failed with err %d\n", ret);
149 atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) |
150 LIO_IFSTATE_RUNNING));
152 netif_carrier_on(ndev);
153 netif_start_queue(ndev);
159 lio_vf_rep_stop(struct net_device *ndev)
161 struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
162 struct lio_vf_rep_req rep_cfg;
163 struct octeon_device *oct;
168 memset(&rep_cfg, 0, sizeof(rep_cfg));
169 rep_cfg.req_type = LIO_VF_REP_REQ_STATE;
170 rep_cfg.ifidx = vf_rep->ifidx;
171 rep_cfg.rep_state.state = LIO_VF_REP_STATE_DOWN;
173 ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
174 sizeof(rep_cfg), NULL, 0);
177 dev_err(&oct->pci_dev->dev,
178 "VF_REP dev stop failed with err %d\n", ret);
182 atomic_set(&vf_rep->ifstate, (atomic_read(&vf_rep->ifstate) &
183 ~LIO_IFSTATE_RUNNING));
185 netif_tx_disable(ndev);
186 netif_carrier_off(ndev);
192 lio_vf_rep_tx_timeout(struct net_device *ndev)
194 netif_trans_update(ndev);
196 netif_wake_queue(ndev);
200 lio_vf_rep_get_stats64(struct net_device *dev,
201 struct rtnl_link_stats64 *stats64)
203 struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
205 /* Swap tx and rx stats as VF rep is a switch port */
206 stats64->tx_packets = vf_rep->stats.rx_packets;
207 stats64->tx_bytes = vf_rep->stats.rx_bytes;
208 stats64->tx_dropped = vf_rep->stats.rx_dropped;
210 stats64->rx_packets = vf_rep->stats.tx_packets;
211 stats64->rx_bytes = vf_rep->stats.tx_bytes;
212 stats64->rx_dropped = vf_rep->stats.tx_dropped;
216 lio_vf_rep_change_mtu(struct net_device *ndev, int new_mtu)
218 struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
219 struct lio_vf_rep_req rep_cfg;
220 struct octeon_device *oct;
225 memset(&rep_cfg, 0, sizeof(rep_cfg));
226 rep_cfg.req_type = LIO_VF_REP_REQ_MTU;
227 rep_cfg.ifidx = vf_rep->ifidx;
228 rep_cfg.rep_mtu.mtu = cpu_to_be32(new_mtu);
230 ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
231 sizeof(rep_cfg), NULL, 0);
233 dev_err(&oct->pci_dev->dev,
234 "Change MTU failed with err %d\n", ret);
244 lio_vf_rep_phys_port_name(struct net_device *dev,
245 char *buf, size_t len)
247 struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
248 struct octeon_device *oct = vf_rep->oct;
251 ret = snprintf(buf, len, "pf%dvf%d", oct->pf_num,
252 vf_rep->ifidx - oct->pf_num * 64 - 1);
259 static struct net_device *
260 lio_vf_rep_get_ndev(struct octeon_device *oct, int ifidx)
262 int vf_id, max_vfs = CN23XX_MAX_VFS_PER_PF + 1;
263 int vfid_mask = max_vfs - 1;
265 if (ifidx <= oct->pf_num * max_vfs ||
266 ifidx >= oct->pf_num * max_vfs + max_vfs)
269 /* ifidx 1-63 for PF0 VFs
270 * ifidx 65-127 for PF1 VFs
272 vf_id = (ifidx & vfid_mask) - 1;
274 return oct->vf_rep_list.ndev[vf_id];
278 lio_vf_rep_copy_packet(struct octeon_device *oct,
282 if (likely(len > MIN_SKB_SIZE)) {
283 struct octeon_skb_page_info *pg_info;
286 pg_info = ((struct octeon_skb_page_info *)(skb->cb));
288 va = page_address(pg_info->page) +
289 pg_info->page_offset;
290 memcpy(skb->data, va, MIN_SKB_SIZE);
291 skb_put(skb, MIN_SKB_SIZE);
294 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
296 pg_info->page_offset + MIN_SKB_SIZE,
300 struct octeon_skb_page_info *pg_info =
301 ((struct octeon_skb_page_info *)(skb->cb));
303 skb_copy_to_linear_data(skb, page_address(pg_info->page) +
304 pg_info->page_offset, len);
306 put_page(pg_info->page);
311 lio_vf_rep_pkt_recv(struct octeon_recv_info *recv_info, void *buf)
313 struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
314 struct lio_vf_rep_desc *vf_rep;
315 struct net_device *vf_ndev;
316 struct octeon_device *oct;
321 oct = lio_get_device(recv_pkt->octeon_id);
325 skb = recv_pkt->buffer_ptr[0];
329 vf_ndev = lio_vf_rep_get_ndev(oct, ifidx);
333 vf_rep = netdev_priv(vf_ndev);
334 if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
335 recv_pkt->buffer_count > 1)
340 /* Multiple buffers are not used for vf_rep packets.
341 * So just buffer_size[0] is valid.
343 lio_vf_rep_copy_packet(oct, skb, recv_pkt->buffer_size[0]);
345 skb_pull(skb, rh->r_dh.len * BYTES_PER_DHLEN_UNIT);
346 skb->protocol = eth_type_trans(skb, skb->dev);
347 skb->ip_summed = CHECKSUM_NONE;
351 octeon_free_recv_info(recv_info);
356 for (i = 0; i < recv_pkt->buffer_count; i++)
357 recv_buffer_free(recv_pkt->buffer_ptr[i]);
359 octeon_free_recv_info(recv_info);
365 lio_vf_rep_packet_sent_callback(struct octeon_device *oct,
366 u32 status, void *buf)
368 struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
369 struct sk_buff *skb = sc->ctxptr;
370 struct net_device *ndev = skb->dev;
373 dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
374 sc->datasize, DMA_TO_DEVICE);
375 dev_kfree_skb_any(skb);
377 octeon_free_soft_command(oct, sc);
379 if (octnet_iq_is_full(oct, iq_no))
382 if (netif_queue_stopped(ndev))
383 netif_wake_queue(ndev);
387 lio_vf_rep_pkt_xmit(struct sk_buff *skb, struct net_device *ndev)
389 struct lio_vf_rep_desc *vf_rep = netdev_priv(ndev);
390 struct net_device *parent_ndev = vf_rep->parent_ndev;
391 struct octeon_device *oct = vf_rep->oct;
392 struct octeon_instr_pki_ih3 *pki_ih3;
393 struct octeon_soft_command *sc;
394 struct lio *parent_lio;
397 parent_lio = GET_LIO(parent_ndev);
399 if (!(atomic_read(&vf_rep->ifstate) & LIO_IFSTATE_RUNNING) ||
403 if (octnet_iq_is_full(vf_rep->oct, parent_lio->txq)) {
404 dev_err(&oct->pci_dev->dev, "VF rep: Device IQ full\n");
405 netif_stop_queue(ndev);
406 return NETDEV_TX_BUSY;
409 sc = (struct octeon_soft_command *)
410 octeon_alloc_soft_command(oct, 0, 0, 0);
412 dev_err(&oct->pci_dev->dev, "VF rep: Soft command alloc failed\n");
416 /* Multiple buffers are not used for vf_rep packets. */
417 if (skb_shinfo(skb)->nr_frags != 0) {
418 dev_err(&oct->pci_dev->dev, "VF rep: nr_frags != 0. Dropping packet\n");
422 sc->dmadptr = dma_map_single(&oct->pci_dev->dev,
423 skb->data, skb->len, DMA_TO_DEVICE);
424 if (dma_mapping_error(&oct->pci_dev->dev, sc->dmadptr)) {
425 dev_err(&oct->pci_dev->dev, "VF rep: DMA mapping failed\n");
429 sc->virtdptr = skb->data;
430 sc->datasize = skb->len;
432 sc->iq_no = parent_lio->txq;
434 octeon_prepare_soft_command(oct, sc, OPCODE_NIC, OPCODE_NIC_VF_REP_PKT,
435 vf_rep->ifidx, 0, 0);
436 pki_ih3 = (struct octeon_instr_pki_ih3 *)&sc->cmd.cmd3.pki_ih3;
437 pki_ih3->tagtype = ORDERED_TAG;
439 sc->callback = lio_vf_rep_packet_sent_callback;
440 sc->callback_arg = sc;
442 status = octeon_send_soft_command(oct, sc);
443 if (status == IQ_SEND_FAILED) {
444 dma_unmap_single(&oct->pci_dev->dev, sc->dmadptr,
445 sc->datasize, DMA_TO_DEVICE);
449 if (status == IQ_SEND_STOP)
450 netif_stop_queue(ndev);
452 netif_trans_update(ndev);
457 dev_kfree_skb_any(skb);
463 lio_vf_rep_attr_get(struct net_device *dev, struct switchdev_attr *attr)
465 struct lio_vf_rep_desc *vf_rep = netdev_priv(dev);
466 struct net_device *parent_ndev = vf_rep->parent_ndev;
467 struct lio *lio = GET_LIO(parent_ndev);
470 case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
471 attr->u.ppid.id_len = ETH_ALEN;
472 ether_addr_copy(attr->u.ppid.id,
473 (void *)&lio->linfo.hw_addr + 2);
483 static const struct switchdev_ops lio_vf_rep_switchdev_ops = {
484 .switchdev_port_attr_get = lio_vf_rep_attr_get,
488 lio_vf_rep_fetch_stats(struct work_struct *work)
490 struct cavium_wk *wk = (struct cavium_wk *)work;
491 struct lio_vf_rep_desc *vf_rep = wk->ctxptr;
492 struct lio_vf_rep_stats stats;
493 struct lio_vf_rep_req rep_cfg;
494 struct octeon_device *oct;
499 memset(&rep_cfg, 0, sizeof(rep_cfg));
500 rep_cfg.req_type = LIO_VF_REP_REQ_STATS;
501 rep_cfg.ifidx = vf_rep->ifidx;
503 ret = lio_vf_rep_send_soft_command(oct, &rep_cfg, sizeof(rep_cfg),
504 &stats, sizeof(stats));
507 octeon_swap_8B_data((u64 *)&stats, (sizeof(stats) >> 3));
508 memcpy(&vf_rep->stats, &stats, sizeof(stats));
511 schedule_delayed_work(&vf_rep->stats_wk.work,
512 msecs_to_jiffies(LIO_VF_REP_STATS_POLL_TIME_MS));
516 lio_vf_rep_create(struct octeon_device *oct)
518 struct lio_vf_rep_desc *vf_rep;
519 struct net_device *ndev;
522 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
525 if (!oct->sriov_info.sriov_enabled)
528 num_vfs = oct->sriov_info.num_vfs_alloced;
530 oct->vf_rep_list.num_vfs = 0;
531 for (i = 0; i < num_vfs; i++) {
532 ndev = alloc_etherdev(sizeof(struct lio_vf_rep_desc));
535 dev_err(&oct->pci_dev->dev,
536 "VF rep device %d creation failed\n", i);
540 ndev->min_mtu = LIO_MIN_MTU_SIZE;
541 ndev->max_mtu = LIO_MAX_MTU_SIZE;
542 ndev->netdev_ops = &lio_vf_rep_ndev_ops;
543 SWITCHDEV_SET_OPS(ndev, &lio_vf_rep_switchdev_ops);
545 vf_rep = netdev_priv(ndev);
546 memset(vf_rep, 0, sizeof(*vf_rep));
550 vf_rep->parent_ndev = oct->props[0].netdev;
551 vf_rep->ifidx = (oct->pf_num * 64) + i + 1;
553 eth_hw_addr_random(ndev);
555 if (register_netdev(ndev)) {
556 dev_err(&oct->pci_dev->dev, "VF rep nerdev registration failed\n");
562 netif_carrier_off(ndev);
564 INIT_DELAYED_WORK(&vf_rep->stats_wk.work,
565 lio_vf_rep_fetch_stats);
566 vf_rep->stats_wk.ctxptr = (void *)vf_rep;
567 schedule_delayed_work(&vf_rep->stats_wk.work,
569 (LIO_VF_REP_STATS_POLL_TIME_MS));
570 oct->vf_rep_list.num_vfs++;
571 oct->vf_rep_list.ndev[i] = ndev;
574 if (octeon_register_dispatch_fn(oct, OPCODE_NIC,
575 OPCODE_NIC_VF_REP_PKT,
576 lio_vf_rep_pkt_recv, oct)) {
577 dev_err(&oct->pci_dev->dev, "VF rep Dispatch func registration failed\n");
585 for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
586 ndev = oct->vf_rep_list.ndev[i];
587 oct->vf_rep_list.ndev[i] = NULL;
589 vf_rep = netdev_priv(ndev);
590 cancel_delayed_work_sync
591 (&vf_rep->stats_wk.work);
592 unregister_netdev(ndev);
597 oct->vf_rep_list.num_vfs = 0;
603 lio_vf_rep_destroy(struct octeon_device *oct)
605 struct lio_vf_rep_desc *vf_rep;
606 struct net_device *ndev;
609 if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
612 if (!oct->sriov_info.sriov_enabled)
615 for (i = 0; i < oct->vf_rep_list.num_vfs; i++) {
616 ndev = oct->vf_rep_list.ndev[i];
617 oct->vf_rep_list.ndev[i] = NULL;
619 vf_rep = netdev_priv(ndev);
620 cancel_delayed_work_sync
621 (&vf_rep->stats_wk.work);
622 netif_tx_disable(ndev);
623 netif_carrier_off(ndev);
625 unregister_netdev(ndev);
630 oct->vf_rep_list.num_vfs = 0;
634 lio_vf_rep_netdev_event(struct notifier_block *nb,
635 unsigned long event, void *ptr)
637 struct net_device *ndev = netdev_notifier_info_to_dev(ptr);
638 struct lio_vf_rep_desc *vf_rep;
639 struct lio_vf_rep_req rep_cfg;
640 struct octeon_device *oct;
644 case NETDEV_REGISTER:
645 case NETDEV_CHANGENAME:
652 if (ndev->netdev_ops != &lio_vf_rep_ndev_ops)
655 vf_rep = netdev_priv(ndev);
658 if (strlen(ndev->name) > LIO_IF_NAME_SIZE) {
659 dev_err(&oct->pci_dev->dev,
660 "Device name change sync failed as the size is > %d\n",
665 memset(&rep_cfg, 0, sizeof(rep_cfg));
666 rep_cfg.req_type = LIO_VF_REP_REQ_DEVNAME;
667 rep_cfg.ifidx = vf_rep->ifidx;
668 strncpy(rep_cfg.rep_name.name, ndev->name, LIO_IF_NAME_SIZE);
670 ret = lio_vf_rep_send_soft_command(oct, &rep_cfg,
671 sizeof(rep_cfg), NULL, 0);
673 dev_err(&oct->pci_dev->dev,
674 "vf_rep netdev name change failed with err %d\n", ret);
679 static struct notifier_block lio_vf_rep_netdev_notifier = {
680 .notifier_call = lio_vf_rep_netdev_event,
684 lio_vf_rep_modinit(void)
686 if (register_netdevice_notifier(&lio_vf_rep_netdev_notifier)) {
687 pr_err("netdev notifier registration failed\n");
695 lio_vf_rep_modexit(void)
697 if (unregister_netdevice_notifier(&lio_vf_rep_netdev_notifier))
698 pr_err("netdev notifier unregister failed\n");