2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/etherdevice.h>
12 #include <linux/interrupt.h>
13 #include <linux/if_vlan.h>
15 #include <linux/ipv6.h>
16 #include <linux/module.h>
17 #include <linux/pci.h>
18 #include <linux/skbuff.h>
19 #include <linux/sctp.h>
20 #include <linux/vermagic.h>
22 #include <net/vxlan.h>
25 #include "hns3_enet.h"
27 const char hns3_driver_name[] = "hns3";
28 const char hns3_driver_version[] = VERMAGIC_STRING;
29 static const char hns3_driver_string[] =
30 "Hisilicon Ethernet Network Driver for Hip08 Family";
31 static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
32 static struct hnae3_client client;
34 /* hns3_pci_tbl - PCI Device ID Table
36 * Last entry must be all 0s
38 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
39 * Class, Class Mask, private data (not used) }
41 static const struct pci_device_id hns3_pci_tbl[] = {
42 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
45 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
46 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
47 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
48 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
49 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
50 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
51 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
52 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
53 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
54 /* required last entry */
57 MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
59 static irqreturn_t hns3_irq_handle(int irq, void *dev)
61 struct hns3_enet_tqp_vector *tqp_vector = dev;
63 napi_schedule(&tqp_vector->napi);
68 static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
70 struct hns3_enet_tqp_vector *tqp_vectors;
73 for (i = 0; i < priv->vector_num; i++) {
74 tqp_vectors = &priv->tqp_vector[i];
76 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
79 /* release the irq resource */
80 free_irq(tqp_vectors->vector_irq, tqp_vectors);
81 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
85 static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
87 struct hns3_enet_tqp_vector *tqp_vectors;
94 for (i = 0; i < priv->vector_num; i++) {
95 tqp_vectors = &priv->tqp_vector[i];
97 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
100 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
101 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
102 "%s-%s-%d", priv->netdev->name, "TxRx",
105 } else if (tqp_vectors->rx_group.ring) {
106 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
107 "%s-%s-%d", priv->netdev->name, "Rx",
109 } else if (tqp_vectors->tx_group.ring) {
110 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
111 "%s-%s-%d", priv->netdev->name, "Tx",
114 /* Skip this unused q_vector */
118 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
120 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
124 netdev_err(priv->netdev, "request irq(%d) fail\n",
125 tqp_vectors->vector_irq);
129 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
135 static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
138 writel(mask_en, tqp_vector->mask_addr);
141 static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
143 napi_enable(&tqp_vector->napi);
146 hns3_mask_vector_irq(tqp_vector, 1);
149 static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
152 hns3_mask_vector_irq(tqp_vector, 0);
154 disable_irq(tqp_vector->vector_irq);
155 napi_disable(&tqp_vector->napi);
158 static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
161 /* this defines the configuration for GL (Interrupt Gap Limiter)
162 * GL defines inter interrupt gap.
163 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
165 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
166 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
167 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
170 static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
173 /* this defines the configuration for RL (Interrupt Rate Limiter).
174 * Rl defines rate of interrupts i.e. number of interrupts-per-second
175 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
177 writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
180 static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
182 /* initialize the configuration for interrupt coalescing.
183 * 1. GL (Interrupt Gap Limiter)
184 * 2. RL (Interrupt Rate Limiter)
187 /* Default :enable interrupt coalesce */
188 tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
189 tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
190 hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
191 /* for now we are disabling Interrupt RL - we
192 * will re-enable later
194 hns3_set_vector_coalesc_rl(tqp_vector, 0);
195 tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
196 tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
199 static int hns3_nic_net_up(struct net_device *netdev)
201 struct hns3_nic_priv *priv = netdev_priv(netdev);
202 struct hnae3_handle *h = priv->ae_handle;
206 /* get irq resource for all vectors */
207 ret = hns3_nic_init_irq(priv);
209 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
213 /* enable the vectors */
214 for (i = 0; i < priv->vector_num; i++)
215 hns3_vector_enable(&priv->tqp_vector[i]);
217 /* start the ae_dev */
218 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
225 for (j = i - 1; j >= 0; j--)
226 hns3_vector_disable(&priv->tqp_vector[j]);
228 hns3_nic_uninit_irq(priv);
233 static int hns3_nic_net_open(struct net_device *netdev)
235 struct hns3_nic_priv *priv = netdev_priv(netdev);
236 struct hnae3_handle *h = priv->ae_handle;
239 netif_carrier_off(netdev);
241 ret = netif_set_real_num_tx_queues(netdev, h->kinfo.num_tqps);
244 "netif_set_real_num_tx_queues fail, ret=%d!\n",
249 ret = netif_set_real_num_rx_queues(netdev, h->kinfo.num_tqps);
252 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
256 ret = hns3_nic_net_up(netdev);
259 "hns net up fail, ret=%d!\n", ret);
266 static void hns3_nic_net_down(struct net_device *netdev)
268 struct hns3_nic_priv *priv = netdev_priv(netdev);
269 const struct hnae3_ae_ops *ops;
273 ops = priv->ae_handle->ae_algo->ops;
275 ops->stop(priv->ae_handle);
277 /* disable vectors */
278 for (i = 0; i < priv->vector_num; i++)
279 hns3_vector_disable(&priv->tqp_vector[i]);
281 /* free irq resources */
282 hns3_nic_uninit_irq(priv);
285 static int hns3_nic_net_stop(struct net_device *netdev)
287 netif_tx_stop_all_queues(netdev);
288 netif_carrier_off(netdev);
290 hns3_nic_net_down(netdev);
295 void hns3_set_multicast_list(struct net_device *netdev)
297 struct hns3_nic_priv *priv = netdev_priv(netdev);
298 struct hnae3_handle *h = priv->ae_handle;
299 struct netdev_hw_addr *ha = NULL;
301 if (h->ae_algo->ops->set_mc_addr) {
302 netdev_for_each_mc_addr(ha, netdev)
303 if (h->ae_algo->ops->set_mc_addr(h, ha->addr))
304 netdev_err(netdev, "set multicast fail\n");
308 static int hns3_nic_uc_sync(struct net_device *netdev,
309 const unsigned char *addr)
311 struct hns3_nic_priv *priv = netdev_priv(netdev);
312 struct hnae3_handle *h = priv->ae_handle;
314 if (h->ae_algo->ops->add_uc_addr)
315 return h->ae_algo->ops->add_uc_addr(h, addr);
320 static int hns3_nic_uc_unsync(struct net_device *netdev,
321 const unsigned char *addr)
323 struct hns3_nic_priv *priv = netdev_priv(netdev);
324 struct hnae3_handle *h = priv->ae_handle;
326 if (h->ae_algo->ops->rm_uc_addr)
327 return h->ae_algo->ops->rm_uc_addr(h, addr);
332 static int hns3_nic_mc_sync(struct net_device *netdev,
333 const unsigned char *addr)
335 struct hns3_nic_priv *priv = netdev_priv(netdev);
336 struct hnae3_handle *h = priv->ae_handle;
338 if (h->ae_algo->ops->add_mc_addr)
339 return h->ae_algo->ops->add_mc_addr(h, addr);
344 static int hns3_nic_mc_unsync(struct net_device *netdev,
345 const unsigned char *addr)
347 struct hns3_nic_priv *priv = netdev_priv(netdev);
348 struct hnae3_handle *h = priv->ae_handle;
350 if (h->ae_algo->ops->rm_mc_addr)
351 return h->ae_algo->ops->rm_mc_addr(h, addr);
356 void hns3_nic_set_rx_mode(struct net_device *netdev)
358 struct hns3_nic_priv *priv = netdev_priv(netdev);
359 struct hnae3_handle *h = priv->ae_handle;
361 if (h->ae_algo->ops->set_promisc_mode) {
362 if (netdev->flags & IFF_PROMISC)
363 h->ae_algo->ops->set_promisc_mode(h, 1);
365 h->ae_algo->ops->set_promisc_mode(h, 0);
367 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
368 netdev_err(netdev, "sync uc address fail\n");
369 if (netdev->flags & IFF_MULTICAST)
370 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
371 netdev_err(netdev, "sync mc address fail\n");
374 static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
375 u16 *mss, u32 *type_cs_vlan_tso)
377 u32 l4_offset, hdr_len;
378 union l3_hdr_info l3;
379 union l4_hdr_info l4;
383 if (!skb_is_gso(skb))
386 ret = skb_cow_head(skb, 0);
390 l3.hdr = skb_network_header(skb);
391 l4.hdr = skb_transport_header(skb);
393 /* Software should clear the IPv4's checksum field when tso is
396 if (l3.v4->version == 4)
400 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
403 SKB_GSO_UDP_TUNNEL_CSUM)) {
404 if ((!(skb_shinfo(skb)->gso_type &
406 (skb_shinfo(skb)->gso_type &
407 SKB_GSO_UDP_TUNNEL_CSUM)) {
408 /* Software should clear the udp's checksum
409 * field when tso is needed.
413 /* reset l3&l4 pointers from outer to inner headers */
414 l3.hdr = skb_inner_network_header(skb);
415 l4.hdr = skb_inner_transport_header(skb);
417 /* Software should clear the IPv4's checksum field when
420 if (l3.v4->version == 4)
424 /* normal or tunnel packet*/
425 l4_offset = l4.hdr - skb->data;
426 hdr_len = (l4.tcp->doff * 4) + l4_offset;
428 /* remove payload length from inner pseudo checksum when tso*/
429 l4_paylen = skb->len - l4_offset;
430 csum_replace_by_diff(&l4.tcp->check,
431 (__force __wsum)htonl(l4_paylen));
433 /* find the txbd field values */
434 *paylen = skb->len - hdr_len;
435 hnae_set_bit(*type_cs_vlan_tso,
438 /* get MSS for TSO */
439 *mss = skb_shinfo(skb)->gso_size;
444 static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
452 unsigned char *l4_hdr;
453 unsigned char *exthdr;
457 /* find outer header point */
458 l3.hdr = skb_network_header(skb);
459 l4_hdr = skb_inner_transport_header(skb);
461 if (skb->protocol == htons(ETH_P_IPV6)) {
462 exthdr = l3.hdr + sizeof(*l3.v6);
463 l4_proto_tmp = l3.v6->nexthdr;
464 if (l4_hdr != exthdr)
465 ipv6_skip_exthdr(skb, exthdr - skb->data,
466 &l4_proto_tmp, &frag_off);
467 } else if (skb->protocol == htons(ETH_P_IP)) {
468 l4_proto_tmp = l3.v4->protocol;
473 *ol4_proto = l4_proto_tmp;
476 if (!skb->encapsulation) {
481 /* find inner header point */
482 l3.hdr = skb_inner_network_header(skb);
483 l4_hdr = skb_inner_transport_header(skb);
485 if (l3.v6->version == 6) {
486 exthdr = l3.hdr + sizeof(*l3.v6);
487 l4_proto_tmp = l3.v6->nexthdr;
488 if (l4_hdr != exthdr)
489 ipv6_skip_exthdr(skb, exthdr - skb->data,
490 &l4_proto_tmp, &frag_off);
491 } else if (l3.v4->version == 4) {
492 l4_proto_tmp = l3.v4->protocol;
495 *il4_proto = l4_proto_tmp;
500 static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
501 u8 il4_proto, u32 *type_cs_vlan_tso,
502 u32 *ol_type_vlan_len_msec)
512 struct gre_base_hdr *gre;
515 unsigned char *l2_hdr;
516 u8 l4_proto = ol4_proto;
523 l3.hdr = skb_network_header(skb);
524 l4.hdr = skb_transport_header(skb);
526 /* compute L2 header size for normal packet, defined in 2 Bytes */
527 l2_len = l3.hdr - skb->data;
528 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
529 HNS3_TXD_L2LEN_S, l2_len >> 1);
532 if (skb->encapsulation) {
533 /* compute OL2 header size, defined in 2 Bytes */
535 hnae_set_field(*ol_type_vlan_len_msec,
537 HNS3_TXD_L2LEN_S, ol2_len >> 1);
539 /* compute OL3 header size, defined in 4 Bytes */
540 ol3_len = l4.hdr - l3.hdr;
541 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
542 HNS3_TXD_L3LEN_S, ol3_len >> 2);
544 /* MAC in UDP, MAC in GRE (0x6558)*/
545 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
546 /* switch MAC header ptr from outer to inner header.*/
547 l2_hdr = skb_inner_mac_header(skb);
549 /* compute OL4 header size, defined in 4 Bytes. */
550 ol4_len = l2_hdr - l4.hdr;
551 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
552 HNS3_TXD_L4LEN_S, ol4_len >> 2);
554 /* switch IP header ptr from outer to inner header */
555 l3.hdr = skb_inner_network_header(skb);
557 /* compute inner l2 header size, defined in 2 Bytes. */
558 l2_len = l3.hdr - l2_hdr;
559 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
560 HNS3_TXD_L2LEN_S, l2_len >> 1);
562 /* skb packet types not supported by hardware,
563 * txbd len fild doesn't be filled.
568 /* switch L4 header pointer from outer to inner */
569 l4.hdr = skb_inner_transport_header(skb);
571 l4_proto = il4_proto;
574 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
575 l3_len = l4.hdr - l3.hdr;
576 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
577 HNS3_TXD_L3LEN_S, l3_len >> 2);
579 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
582 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
583 HNS3_TXD_L4LEN_S, l4.tcp->doff);
586 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
587 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
590 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
591 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
594 /* skb packet types not supported by hardware,
595 * txbd len fild doesn't be filled.
601 static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
602 u8 il4_proto, u32 *type_cs_vlan_tso,
603 u32 *ol_type_vlan_len_msec)
610 u32 l4_proto = ol4_proto;
612 l3.hdr = skb_network_header(skb);
614 /* define OL3 type and tunnel type(OL4).*/
615 if (skb->encapsulation) {
616 /* define outer network header type.*/
617 if (skb->protocol == htons(ETH_P_IP)) {
619 hnae_set_field(*ol_type_vlan_len_msec,
620 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
621 HNS3_OL3T_IPV4_CSUM);
623 hnae_set_field(*ol_type_vlan_len_msec,
624 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
625 HNS3_OL3T_IPV4_NO_CSUM);
627 } else if (skb->protocol == htons(ETH_P_IPV6)) {
628 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
629 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
632 /* define tunnel type(OL4).*/
635 hnae_set_field(*ol_type_vlan_len_msec,
638 HNS3_TUN_MAC_IN_UDP);
641 hnae_set_field(*ol_type_vlan_len_msec,
647 /* drop the skb tunnel packet if hardware don't support,
648 * because hardware can't calculate csum when TSO.
653 /* the stack computes the IP header already,
654 * driver calculate l4 checksum when not TSO.
656 skb_checksum_help(skb);
660 l3.hdr = skb_inner_network_header(skb);
661 l4_proto = il4_proto;
664 if (l3.v4->version == 4) {
665 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
666 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
668 /* the stack computes the IP header already, the only time we
669 * need the hardware to recompute it is in the case of TSO.
672 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
674 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
675 } else if (l3.v6->version == 6) {
676 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
677 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
678 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
683 hnae_set_field(*type_cs_vlan_tso,
689 hnae_set_field(*type_cs_vlan_tso,
695 hnae_set_field(*type_cs_vlan_tso,
701 /* drop the skb tunnel packet if hardware don't support,
702 * because hardware can't calculate csum when TSO.
707 /* the stack computes the IP header already,
708 * driver calculate l4 checksum when not TSO.
710 skb_checksum_help(skb);
717 static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
719 /* Config bd buffer end */
720 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
721 HNS3_TXD_BDTYPE_M, 0);
722 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
723 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
724 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
727 static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
728 int size, dma_addr_t dma, int frag_end,
729 enum hns_desc_type type)
731 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
732 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
733 u32 ol_type_vlan_len_msec = 0;
734 u16 bdtp_fe_sc_vld_ra_ri = 0;
735 u32 type_cs_vlan_tso = 0;
744 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
745 desc_cb->priv = priv;
746 desc_cb->length = size;
748 desc_cb->type = type;
750 /* now, fill the descriptor */
751 desc->addr = cpu_to_le64(dma);
752 desc->tx.send_size = cpu_to_le16((u16)size);
753 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
754 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
756 if (type == DESC_TYPE_SKB) {
757 skb = (struct sk_buff *)priv;
758 paylen = cpu_to_le16(skb->len);
760 if (skb->ip_summed == CHECKSUM_PARTIAL) {
761 skb_reset_mac_len(skb);
762 protocol = skb->protocol;
765 if (protocol == htons(ETH_P_8021Q)) {
766 protocol = vlan_get_protocol(skb);
767 skb->protocol = protocol;
769 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
772 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
774 &ol_type_vlan_len_msec);
775 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
777 &ol_type_vlan_len_msec);
781 ret = hns3_set_tso(skb, &paylen, &mss,
788 desc->tx.ol_type_vlan_len_msec =
789 cpu_to_le32(ol_type_vlan_len_msec);
790 desc->tx.type_cs_vlan_tso_len =
791 cpu_to_le32(type_cs_vlan_tso);
792 desc->tx.paylen = cpu_to_le16(paylen);
793 desc->tx.mss = cpu_to_le16(mss);
796 /* move ring pointer to next.*/
797 ring_ptr_move_fw(ring, next_to_use);
802 static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
803 int size, dma_addr_t dma, int frag_end,
804 enum hns_desc_type type)
806 unsigned int frag_buf_num;
811 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
812 sizeoflast = size % HNS3_MAX_BD_SIZE;
813 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
815 /* When the frag size is bigger than hardware, split this frag */
816 for (k = 0; k < frag_buf_num; k++) {
817 ret = hns3_fill_desc(ring, priv,
818 (k == frag_buf_num - 1) ?
819 sizeoflast : HNS3_MAX_BD_SIZE,
820 dma + HNS3_MAX_BD_SIZE * k,
821 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
822 (type == DESC_TYPE_SKB && !k) ?
823 DESC_TYPE_SKB : DESC_TYPE_PAGE);
831 static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
832 struct hns3_enet_ring *ring)
834 struct sk_buff *skb = *out_skb;
835 struct skb_frag_struct *frag;
842 size = skb_headlen(skb);
843 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
845 frag_num = skb_shinfo(skb)->nr_frags;
846 for (i = 0; i < frag_num; i++) {
847 frag = &skb_shinfo(skb)->frags[i];
848 size = skb_frag_size(frag);
850 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
851 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
854 buf_num += bdnum_for_frag;
857 if (buf_num > ring_space(ring))
864 static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
865 struct hns3_enet_ring *ring)
867 struct sk_buff *skb = *out_skb;
870 /* No. of segments (plus a header) */
871 buf_num = skb_shinfo(skb)->nr_frags + 1;
873 if (buf_num > ring_space(ring))
881 static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
883 struct device *dev = ring_to_dev(ring);
886 for (i = 0; i < ring->desc_num; i++) {
887 /* check if this is where we started */
888 if (ring->next_to_use == next_to_use_orig)
891 /* unmap the descriptor dma address */
892 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
893 dma_unmap_single(dev,
894 ring->desc_cb[ring->next_to_use].dma,
895 ring->desc_cb[ring->next_to_use].length,
899 ring->desc_cb[ring->next_to_use].dma,
900 ring->desc_cb[ring->next_to_use].length,
904 ring_ptr_move_bw(ring, next_to_use);
908 static netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb,
909 struct net_device *netdev)
911 struct hns3_nic_priv *priv = netdev_priv(netdev);
912 struct hns3_nic_ring_data *ring_data =
913 &tx_ring_data(priv, skb->queue_mapping);
914 struct hns3_enet_ring *ring = ring_data->ring;
915 struct device *dev = priv->dev;
916 struct netdev_queue *dev_queue;
917 struct skb_frag_struct *frag;
918 int next_to_use_head;
919 int next_to_use_frag;
927 /* Prefetch the data used later */
930 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
932 u64_stats_update_begin(&ring->syncp);
933 ring->stats.tx_busy++;
934 u64_stats_update_end(&ring->syncp);
936 goto out_net_tx_busy;
938 u64_stats_update_begin(&ring->syncp);
939 ring->stats.sw_err_cnt++;
940 u64_stats_update_end(&ring->syncp);
941 netdev_err(netdev, "no memory to xmit!\n");
948 /* No. of segments (plus a header) */
949 seg_num = skb_shinfo(skb)->nr_frags + 1;
950 /* Fill the first part */
951 size = skb_headlen(skb);
953 next_to_use_head = ring->next_to_use;
955 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
956 if (dma_mapping_error(dev, dma)) {
957 netdev_err(netdev, "TX head DMA map failed\n");
958 ring->stats.sw_err_cnt++;
962 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
965 goto head_dma_map_err;
967 next_to_use_frag = ring->next_to_use;
968 /* Fill the fragments */
969 for (i = 1; i < seg_num; i++) {
970 frag = &skb_shinfo(skb)->frags[i - 1];
971 size = skb_frag_size(frag);
972 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
973 if (dma_mapping_error(dev, dma)) {
974 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
975 ring->stats.sw_err_cnt++;
976 goto frag_dma_map_err;
978 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
979 seg_num - 1 == i ? 1 : 0,
983 goto frag_dma_map_err;
986 /* Complete translate all packets */
987 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
988 netdev_tx_sent_queue(dev_queue, skb->len);
990 wmb(); /* Commit all data before submit */
992 hnae_queue_xmit(ring->tqp, buf_num);
997 hns_nic_dma_unmap(ring, next_to_use_frag);
1000 hns_nic_dma_unmap(ring, next_to_use_head);
1003 dev_kfree_skb_any(skb);
1004 return NETDEV_TX_OK;
1007 netif_stop_subqueue(netdev, ring_data->queue_index);
1008 smp_mb(); /* Commit all data before submit */
1010 return NETDEV_TX_BUSY;
1013 static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1015 struct hns3_nic_priv *priv = netdev_priv(netdev);
1016 struct hnae3_handle *h = priv->ae_handle;
1017 struct sockaddr *mac_addr = p;
1020 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1021 return -EADDRNOTAVAIL;
1023 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1025 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1029 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1034 static int hns3_nic_set_features(struct net_device *netdev,
1035 netdev_features_t features)
1037 struct hns3_nic_priv *priv = netdev_priv(netdev);
1039 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1040 priv->ops.fill_desc = hns3_fill_desc_tso;
1041 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1043 priv->ops.fill_desc = hns3_fill_desc;
1044 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1047 netdev->features = features;
1052 hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1054 struct hns3_nic_priv *priv = netdev_priv(netdev);
1055 int queue_num = priv->ae_handle->kinfo.num_tqps;
1056 struct hns3_enet_ring *ring;
1066 for (idx = 0; idx < queue_num; idx++) {
1067 /* fetch the tx stats */
1068 ring = priv->ring_data[idx].ring;
1070 start = u64_stats_fetch_begin_irq(&ring->syncp);
1071 tx_bytes += ring->stats.tx_bytes;
1072 tx_pkts += ring->stats.tx_pkts;
1073 tx_drop += ring->stats.tx_busy;
1074 tx_drop += ring->stats.sw_err_cnt;
1075 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1077 /* fetch the rx stats */
1078 ring = priv->ring_data[idx + queue_num].ring;
1080 start = u64_stats_fetch_begin_irq(&ring->syncp);
1081 rx_bytes += ring->stats.rx_bytes;
1082 rx_pkts += ring->stats.rx_pkts;
1083 rx_drop += ring->stats.non_vld_descs;
1084 rx_drop += ring->stats.err_pkt_len;
1085 rx_drop += ring->stats.l2_err;
1086 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1089 stats->tx_bytes = tx_bytes;
1090 stats->tx_packets = tx_pkts;
1091 stats->rx_bytes = rx_bytes;
1092 stats->rx_packets = rx_pkts;
1094 stats->rx_errors = netdev->stats.rx_errors;
1095 stats->multicast = netdev->stats.multicast;
1096 stats->rx_length_errors = netdev->stats.rx_length_errors;
1097 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1098 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1100 stats->tx_errors = netdev->stats.tx_errors;
1101 stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1102 stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
1103 stats->collisions = netdev->stats.collisions;
1104 stats->rx_over_errors = netdev->stats.rx_over_errors;
1105 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1106 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1107 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1108 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1109 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1110 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1111 stats->tx_window_errors = netdev->stats.tx_window_errors;
1112 stats->rx_compressed = netdev->stats.rx_compressed;
1113 stats->tx_compressed = netdev->stats.tx_compressed;
1116 static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1117 enum hns3_udp_tnl_type type)
1119 struct hns3_nic_priv *priv = netdev_priv(netdev);
1120 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1121 struct hnae3_handle *h = priv->ae_handle;
1123 if (udp_tnl->used && udp_tnl->dst_port == port) {
1128 if (udp_tnl->used) {
1130 "UDP tunnel [%d], port [%d] offload\n", type, port);
1134 udp_tnl->dst_port = port;
1136 /* TBD send command to hardware to add port */
1137 if (h->ae_algo->ops->add_tunnel_udp)
1138 h->ae_algo->ops->add_tunnel_udp(h, port);
1141 static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1142 enum hns3_udp_tnl_type type)
1144 struct hns3_nic_priv *priv = netdev_priv(netdev);
1145 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1146 struct hnae3_handle *h = priv->ae_handle;
1148 if (!udp_tnl->used || udp_tnl->dst_port != port) {
1150 "Invalid UDP tunnel port %d\n", port);
1158 udp_tnl->dst_port = 0;
1159 /* TBD send command to hardware to del port */
1160 if (h->ae_algo->ops->del_tunnel_udp)
1161 h->ae_algo->ops->del_tunnel_udp(h, port);
1164 /* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1165 * @netdev: This physical ports's netdev
1166 * @ti: Tunnel information
1168 static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1169 struct udp_tunnel_info *ti)
1171 u16 port_n = ntohs(ti->port);
1174 case UDP_TUNNEL_TYPE_VXLAN:
1175 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1177 case UDP_TUNNEL_TYPE_GENEVE:
1178 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1181 netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1186 static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1187 struct udp_tunnel_info *ti)
1189 u16 port_n = ntohs(ti->port);
1192 case UDP_TUNNEL_TYPE_VXLAN:
1193 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1195 case UDP_TUNNEL_TYPE_GENEVE:
1196 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1203 static int hns3_setup_tc(struct net_device *netdev, u8 tc)
1205 struct hns3_nic_priv *priv = netdev_priv(netdev);
1206 struct hnae3_handle *h = priv->ae_handle;
1207 struct hnae3_knic_private_info *kinfo = &h->kinfo;
1211 if (tc > HNAE3_MAX_TC)
1214 if (kinfo->num_tc == tc)
1221 netdev_reset_tc(netdev);
1225 /* Set num_tc for netdev */
1226 ret = netdev_set_num_tc(netdev, tc);
1230 /* Set per TC queues for the VSI */
1231 for (i = 0; i < HNAE3_MAX_TC; i++) {
1232 if (kinfo->tc_info[i].enable)
1233 netdev_set_tc_queue(netdev,
1234 kinfo->tc_info[i].tc,
1235 kinfo->tc_info[i].tqp_count,
1236 kinfo->tc_info[i].tqp_offset);
1242 static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
1245 struct tc_mqprio_qopt *mqprio = type_data;
1247 if (type != TC_SETUP_MQPRIO)
1250 return hns3_setup_tc(dev, mqprio->num_tc);
1253 static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1254 __be16 proto, u16 vid)
1256 struct hns3_nic_priv *priv = netdev_priv(netdev);
1257 struct hnae3_handle *h = priv->ae_handle;
1260 if (h->ae_algo->ops->set_vlan_filter)
1261 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1266 static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1267 __be16 proto, u16 vid)
1269 struct hns3_nic_priv *priv = netdev_priv(netdev);
1270 struct hnae3_handle *h = priv->ae_handle;
1273 if (h->ae_algo->ops->set_vlan_filter)
1274 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1279 static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1280 u8 qos, __be16 vlan_proto)
1282 struct hns3_nic_priv *priv = netdev_priv(netdev);
1283 struct hnae3_handle *h = priv->ae_handle;
1286 if (h->ae_algo->ops->set_vf_vlan_filter)
1287 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1293 static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1295 struct hns3_nic_priv *priv = netdev_priv(netdev);
1296 struct hnae3_handle *h = priv->ae_handle;
1297 bool if_running = netif_running(netdev);
1300 if (!h->ae_algo->ops->set_mtu)
1303 /* if this was called with netdev up then bring netdevice down */
1305 (void)hns3_nic_net_stop(netdev);
1309 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1311 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1314 netdev->mtu = new_mtu;
1316 /* if the netdev was running earlier, bring it up again */
1317 if (if_running && hns3_nic_net_open(netdev))
1323 static const struct net_device_ops hns3_nic_netdev_ops = {
1324 .ndo_open = hns3_nic_net_open,
1325 .ndo_stop = hns3_nic_net_stop,
1326 .ndo_start_xmit = hns3_nic_net_xmit,
1327 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
1328 .ndo_change_mtu = hns3_nic_change_mtu,
1329 .ndo_set_features = hns3_nic_set_features,
1330 .ndo_get_stats64 = hns3_nic_get_stats64,
1331 .ndo_setup_tc = hns3_nic_setup_tc,
1332 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1333 .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
1334 .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
1335 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1336 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1337 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1340 /* hns3_probe - Device initialization routine
1341 * @pdev: PCI device information struct
1342 * @ent: entry in hns3_pci_tbl
1344 * hns3_probe initializes a PF identified by a pci_dev structure.
1345 * The OS initialization, configuring of the PF private structure,
1346 * and a hardware reset occur.
1348 * Returns 0 on success, negative on failure
1350 static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1352 struct hnae3_ae_dev *ae_dev;
1355 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1362 ae_dev->pdev = pdev;
1363 ae_dev->flag = ent->driver_data;
1364 ae_dev->dev_type = HNAE3_DEV_KNIC;
1365 pci_set_drvdata(pdev, ae_dev);
1367 return hnae3_register_ae_dev(ae_dev);
1370 /* hns3_remove - Device removal routine
1371 * @pdev: PCI device information struct
1373 static void hns3_remove(struct pci_dev *pdev)
1375 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1377 hnae3_unregister_ae_dev(ae_dev);
1379 devm_kfree(&pdev->dev, ae_dev);
1381 pci_set_drvdata(pdev, NULL);
1384 static struct pci_driver hns3_driver = {
1385 .name = hns3_driver_name,
1386 .id_table = hns3_pci_tbl,
1387 .probe = hns3_probe,
1388 .remove = hns3_remove,
1391 /* set default feature to hns3 */
1392 static void hns3_set_default_feature(struct net_device *netdev)
1394 netdev->priv_flags |= IFF_UNICAST_FLT;
1396 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1397 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1398 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1399 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1400 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1402 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1404 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1406 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1407 NETIF_F_HW_VLAN_CTAG_FILTER |
1408 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1409 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1410 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1411 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1413 netdev->vlan_features |=
1414 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1415 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1416 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1417 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1418 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1420 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1421 NETIF_F_HW_VLAN_CTAG_FILTER |
1422 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1423 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1424 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1425 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1428 static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1429 struct hns3_desc_cb *cb)
1431 unsigned int order = hnae_page_order(ring);
1434 p = dev_alloc_pages(order);
1439 cb->page_offset = 0;
1441 cb->buf = page_address(p);
1442 cb->length = hnae_page_size(ring);
1443 cb->type = DESC_TYPE_PAGE;
1445 memset(cb->buf, 0, cb->length);
1450 static void hns3_free_buffer(struct hns3_enet_ring *ring,
1451 struct hns3_desc_cb *cb)
1453 if (cb->type == DESC_TYPE_SKB)
1454 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1455 else if (!HNAE3_IS_TX_RING(ring))
1456 put_page((struct page *)cb->priv);
1457 memset(cb, 0, sizeof(*cb));
1460 static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1462 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1463 cb->length, ring_to_dma_dir(ring));
1465 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1471 static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1472 struct hns3_desc_cb *cb)
1474 if (cb->type == DESC_TYPE_SKB)
1475 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1476 ring_to_dma_dir(ring));
1478 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1479 ring_to_dma_dir(ring));
1482 static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1484 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1485 ring->desc[i].addr = 0;
1488 static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1490 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1492 if (!ring->desc_cb[i].dma)
1495 hns3_buffer_detach(ring, i);
1496 hns3_free_buffer(ring, cb);
1499 static void hns3_free_buffers(struct hns3_enet_ring *ring)
1503 for (i = 0; i < ring->desc_num; i++)
1504 hns3_free_buffer_detach(ring, i);
1507 /* free desc along with its attached buffer */
1508 static void hns3_free_desc(struct hns3_enet_ring *ring)
1510 hns3_free_buffers(ring);
1512 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1513 ring->desc_num * sizeof(ring->desc[0]),
1515 ring->desc_dma_addr = 0;
1520 static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1522 int size = ring->desc_num * sizeof(ring->desc[0]);
1524 ring->desc = kzalloc(size, GFP_KERNEL);
1528 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1529 size, DMA_BIDIRECTIONAL);
1530 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1531 ring->desc_dma_addr = 0;
1540 static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1541 struct hns3_desc_cb *cb)
1545 ret = hns3_alloc_buffer(ring, cb);
1549 ret = hns3_map_buffer(ring, cb);
1556 hns3_free_buffer(ring, cb);
1561 static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1563 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1568 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1573 /* Allocate memory for raw pkg, and map with dma */
1574 static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1578 for (i = 0; i < ring->desc_num; i++) {
1579 ret = hns3_alloc_buffer_attach(ring, i);
1581 goto out_buffer_fail;
1587 for (j = i - 1; j >= 0; j--)
1588 hns3_free_buffer_detach(ring, j);
1592 /* detach a in-used buffer and replace with a reserved one */
1593 static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1594 struct hns3_desc_cb *res_cb)
1596 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1597 ring->desc_cb[i] = *res_cb;
1598 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1599 ring->desc[i].rx.bd_base_info = 0;
1602 static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1604 ring->desc_cb[i].reuse_flag = 0;
1605 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1606 + ring->desc_cb[i].page_offset);
1607 ring->desc[i].rx.bd_base_info = 0;
1610 static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1613 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1615 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1616 (*bytes) += desc_cb->length;
1617 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1618 hns3_free_buffer_detach(ring, ring->next_to_clean);
1620 ring_ptr_move_fw(ring, next_to_clean);
1623 static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1625 int u = ring->next_to_use;
1626 int c = ring->next_to_clean;
1628 if (unlikely(h > ring->desc_num))
1631 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1634 int hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
1636 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1637 struct netdev_queue *dev_queue;
1641 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1642 rmb(); /* Make sure head is ready before touch any data */
1644 if (is_ring_empty(ring) || head == ring->next_to_clean)
1645 return 0; /* no data to poll */
1647 if (!is_valid_clean_head(ring, head)) {
1648 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1649 ring->next_to_use, ring->next_to_clean);
1651 u64_stats_update_begin(&ring->syncp);
1652 ring->stats.io_err_cnt++;
1653 u64_stats_update_end(&ring->syncp);
1659 while (head != ring->next_to_clean && budget) {
1660 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1661 /* Issue prefetch for next Tx descriptor */
1662 prefetch(&ring->desc_cb[ring->next_to_clean]);
1666 ring->tqp_vector->tx_group.total_bytes += bytes;
1667 ring->tqp_vector->tx_group.total_packets += pkts;
1669 u64_stats_update_begin(&ring->syncp);
1670 ring->stats.tx_bytes += bytes;
1671 ring->stats.tx_pkts += pkts;
1672 u64_stats_update_end(&ring->syncp);
1674 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1675 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1677 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1678 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1679 /* Make sure that anybody stopping the queue after this
1680 * sees the new next_to_clean.
1683 if (netif_tx_queue_stopped(dev_queue)) {
1684 netif_tx_wake_queue(dev_queue);
1685 ring->stats.restart_queue++;
1692 static int hns3_desc_unused(struct hns3_enet_ring *ring)
1694 int ntc = ring->next_to_clean;
1695 int ntu = ring->next_to_use;
1697 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1701 hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1703 struct hns3_desc_cb *desc_cb;
1704 struct hns3_desc_cb res_cbs;
1707 for (i = 0; i < cleand_count; i++) {
1708 desc_cb = &ring->desc_cb[ring->next_to_use];
1709 if (desc_cb->reuse_flag) {
1710 u64_stats_update_begin(&ring->syncp);
1711 ring->stats.reuse_pg_cnt++;
1712 u64_stats_update_end(&ring->syncp);
1714 hns3_reuse_buffer(ring, ring->next_to_use);
1716 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1718 u64_stats_update_begin(&ring->syncp);
1719 ring->stats.sw_err_cnt++;
1720 u64_stats_update_end(&ring->syncp);
1722 netdev_err(ring->tqp->handle->kinfo.netdev,
1723 "hnae reserve buffer map failed.\n");
1726 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1729 ring_ptr_move_fw(ring, next_to_use);
1732 wmb(); /* Make all data has been write before submit */
1733 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1736 /* hns3_nic_get_headlen - determine size of header for LRO/GRO
1737 * @data: pointer to the start of the headers
1738 * @max: total length of section to find headers in
1740 * This function is meant to determine the length of headers that will
1741 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1742 * motivation of doing this is to only perform one pull for IPv4 TCP
1743 * packets so that we can do basic things like calculating the gso_size
1744 * based on the average data per packet.
1746 static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1747 unsigned int max_size)
1749 unsigned char *network;
1752 /* This should never happen, but better safe than sorry */
1753 if (max_size < ETH_HLEN)
1756 /* Initialize network frame pointer */
1759 /* Set first protocol and move network header forward */
1760 network += ETH_HLEN;
1762 /* Handle any vlan tag if present */
1763 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1764 == HNS3_RX_FLAG_VLAN_PRESENT) {
1765 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1768 network += VLAN_HLEN;
1771 /* Handle L3 protocols */
1772 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1773 == HNS3_RX_FLAG_L3ID_IPV4) {
1774 if ((typeof(max_size))(network - data) >
1775 (max_size - sizeof(struct iphdr)))
1778 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1779 hlen = (network[0] & 0x0F) << 2;
1781 /* Verify hlen meets minimum size requirements */
1782 if (hlen < sizeof(struct iphdr))
1783 return network - data;
1785 /* Record next protocol if header is present */
1786 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1787 == HNS3_RX_FLAG_L3ID_IPV6) {
1788 if ((typeof(max_size))(network - data) >
1789 (max_size - sizeof(struct ipv6hdr)))
1792 /* Record next protocol */
1793 hlen = sizeof(struct ipv6hdr);
1795 return network - data;
1798 /* Relocate pointer to start of L4 header */
1801 /* Finally sort out TCP/UDP */
1802 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1803 == HNS3_RX_FLAG_L4ID_TCP) {
1804 if ((typeof(max_size))(network - data) >
1805 (max_size - sizeof(struct tcphdr)))
1808 /* Access doff as a u8 to avoid unaligned access on ia64 */
1809 hlen = (network[12] & 0xF0) >> 2;
1811 /* Verify hlen meets minimum size requirements */
1812 if (hlen < sizeof(struct tcphdr))
1813 return network - data;
1816 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1817 == HNS3_RX_FLAG_L4ID_UDP) {
1818 if ((typeof(max_size))(network - data) >
1819 (max_size - sizeof(struct udphdr)))
1822 network += sizeof(struct udphdr);
1825 /* If everything has gone correctly network should be the
1826 * data section of the packet and will be the end of the header.
1827 * If not then it probably represents the end of the last recognized
1830 if ((typeof(max_size))(network - data) < max_size)
1831 return network - data;
1836 static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
1837 struct hns3_enet_ring *ring, int pull_len,
1838 struct hns3_desc_cb *desc_cb)
1840 struct hns3_desc *desc;
1845 twobufs = ((PAGE_SIZE < 8192) &&
1846 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
1848 desc = &ring->desc[ring->next_to_clean];
1849 size = le16_to_cpu(desc->rx.size);
1852 truesize = hnae_buf_size(ring);
1854 truesize = ALIGN(size, L1_CACHE_BYTES);
1855 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
1858 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
1859 size - pull_len, truesize - pull_len);
1861 /* Avoid re-using remote pages,flag default unreuse */
1862 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
1866 /* If we are only owner of page we can reuse it */
1867 if (likely(page_count(desc_cb->priv) == 1)) {
1868 /* Flip page offset to other buffer */
1869 desc_cb->page_offset ^= truesize;
1871 desc_cb->reuse_flag = 1;
1872 /* bump ref count on page before it is given*/
1873 get_page(desc_cb->priv);
1878 /* Move offset up to the next cache line */
1879 desc_cb->page_offset += truesize;
1881 if (desc_cb->page_offset <= last_offset) {
1882 desc_cb->reuse_flag = 1;
1883 /* Bump ref count on page before it is given*/
1884 get_page(desc_cb->priv);
1888 static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
1889 struct hns3_desc *desc)
1891 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1892 int l3_type, l4_type;
1897 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1898 l234info = le32_to_cpu(desc->rx.l234_info);
1900 skb->ip_summed = CHECKSUM_NONE;
1902 skb_checksum_none_assert(skb);
1904 if (!(netdev->features & NETIF_F_RXCSUM))
1907 /* check if hardware has done checksum */
1908 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
1911 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
1912 hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
1913 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
1914 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
1915 netdev_err(netdev, "L3/L4 error pkt\n");
1916 u64_stats_update_begin(&ring->syncp);
1917 ring->stats.l3l4_csum_err++;
1918 u64_stats_update_end(&ring->syncp);
1923 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
1925 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
1928 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
1930 case HNS3_OL4_TYPE_MAC_IN_UDP:
1931 case HNS3_OL4_TYPE_NVGRE:
1932 skb->csum_level = 1;
1933 case HNS3_OL4_TYPE_NO_TUN:
1934 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
1935 if (l3_type == HNS3_L3_TYPE_IPV4 ||
1936 (l3_type == HNS3_L3_TYPE_IPV6 &&
1937 (l4_type == HNS3_L4_TYPE_UDP ||
1938 l4_type == HNS3_L4_TYPE_TCP ||
1939 l4_type == HNS3_L4_TYPE_SCTP)))
1940 skb->ip_summed = CHECKSUM_UNNECESSARY;
1945 static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
1946 struct sk_buff **out_skb, int *out_bnum)
1948 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1949 struct hns3_desc_cb *desc_cb;
1950 struct hns3_desc *desc;
1951 struct sk_buff *skb;
1959 desc = &ring->desc[ring->next_to_clean];
1960 desc_cb = &ring->desc_cb[ring->next_to_clean];
1964 length = le16_to_cpu(desc->rx.pkt_len);
1965 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
1966 l234info = le32_to_cpu(desc->rx.l234_info);
1968 /* Check valid BD */
1969 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
1972 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
1974 /* Prefetch first cache line of first page
1975 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
1976 * line size is 64B so need to prefetch twice to make it 128B. But in
1977 * actual we can have greater size of caches with 128B Level 1 cache
1978 * lines. In such a case, single fetch would suffice to cache in the
1979 * relevant part of the header.
1982 #if L1_CACHE_BYTES < 128
1983 prefetch(va + L1_CACHE_BYTES);
1986 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
1988 if (unlikely(!skb)) {
1989 netdev_err(netdev, "alloc rx skb fail\n");
1991 u64_stats_update_begin(&ring->syncp);
1992 ring->stats.sw_err_cnt++;
1993 u64_stats_update_end(&ring->syncp);
1998 prefetchw(skb->data);
2001 if (length <= HNS3_RX_HEAD_SIZE) {
2002 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2004 /* We can reuse buffer as-is, just make sure it is local */
2005 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2006 desc_cb->reuse_flag = 1;
2007 else /* This page cannot be reused so discard it */
2008 put_page(desc_cb->priv);
2010 ring_ptr_move_fw(ring, next_to_clean);
2012 u64_stats_update_begin(&ring->syncp);
2013 ring->stats.seg_pkt_cnt++;
2014 u64_stats_update_end(&ring->syncp);
2016 pull_len = hns3_nic_get_headlen(va, l234info,
2018 memcpy(__skb_put(skb, pull_len), va,
2019 ALIGN(pull_len, sizeof(long)));
2021 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2022 ring_ptr_move_fw(ring, next_to_clean);
2024 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2025 desc = &ring->desc[ring->next_to_clean];
2026 desc_cb = &ring->desc_cb[ring->next_to_clean];
2027 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2028 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2029 ring_ptr_move_fw(ring, next_to_clean);
2036 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2037 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2038 ((u64 *)desc)[0], ((u64 *)desc)[1]);
2039 u64_stats_update_begin(&ring->syncp);
2040 ring->stats.non_vld_descs++;
2041 u64_stats_update_end(&ring->syncp);
2043 dev_kfree_skb_any(skb);
2047 if (unlikely((!desc->rx.pkt_len) ||
2048 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2049 netdev_err(netdev, "truncated pkt\n");
2050 u64_stats_update_begin(&ring->syncp);
2051 ring->stats.err_pkt_len++;
2052 u64_stats_update_end(&ring->syncp);
2054 dev_kfree_skb_any(skb);
2058 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2059 netdev_err(netdev, "L2 error pkt\n");
2060 u64_stats_update_begin(&ring->syncp);
2061 ring->stats.l2_err++;
2062 u64_stats_update_end(&ring->syncp);
2064 dev_kfree_skb_any(skb);
2068 u64_stats_update_begin(&ring->syncp);
2069 ring->stats.rx_pkts++;
2070 ring->stats.rx_bytes += skb->len;
2071 u64_stats_update_end(&ring->syncp);
2073 ring->tqp_vector->rx_group.total_bytes += skb->len;
2075 hns3_rx_checksum(ring, skb, desc);
2079 static int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget)
2081 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2082 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2083 int recv_pkts, recv_bds, clean_count, err;
2084 int unused_count = hns3_desc_unused(ring);
2085 struct sk_buff *skb = NULL;
2088 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2089 rmb(); /* Make sure num taken effect before the other data is touched */
2091 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2092 num -= unused_count;
2094 while (recv_pkts < budget && recv_bds < num) {
2095 /* Reuse or realloc buffers */
2096 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2097 hns3_nic_alloc_rx_buffers(ring,
2098 clean_count + unused_count);
2100 unused_count = hns3_desc_unused(ring);
2104 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2105 if (unlikely(!skb)) /* This fault cannot be repaired */
2109 clean_count += bnum;
2110 if (unlikely(err)) { /* Do jump the err */
2115 /* Do update ip stack process */
2116 skb->protocol = eth_type_trans(skb, netdev);
2117 (void)napi_gro_receive(&ring->tqp_vector->napi, skb);
2123 /* Make all data has been write before submit */
2124 if (clean_count + unused_count > 0)
2125 hns3_nic_alloc_rx_buffers(ring,
2126 clean_count + unused_count);
2131 static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2133 #define HNS3_RX_ULTRA_PACKET_RATE 40000
2134 enum hns3_flow_level_range new_flow_level;
2135 struct hns3_enet_tqp_vector *tqp_vector;
2136 int packets_per_secs;
2137 int bytes_per_usecs;
2141 if (!ring_group->int_gl)
2144 if (ring_group->total_packets == 0) {
2145 ring_group->int_gl = HNS3_INT_GL_50K;
2146 ring_group->flow_level = HNS3_FLOW_LOW;
2150 /* Simple throttlerate management
2151 * 0-10MB/s lower (50000 ints/s)
2152 * 10-20MB/s middle (20000 ints/s)
2153 * 20-1249MB/s high (18000 ints/s)
2154 * > 40000pps ultra (8000 ints/s)
2156 new_flow_level = ring_group->flow_level;
2157 new_int_gl = ring_group->int_gl;
2158 tqp_vector = ring_group->ring->tqp_vector;
2159 usecs = (ring_group->int_gl << 1);
2160 bytes_per_usecs = ring_group->total_bytes / usecs;
2161 /* 1000000 microseconds */
2162 packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2164 switch (new_flow_level) {
2166 if (bytes_per_usecs > 10)
2167 new_flow_level = HNS3_FLOW_MID;
2170 if (bytes_per_usecs > 20)
2171 new_flow_level = HNS3_FLOW_HIGH;
2172 else if (bytes_per_usecs <= 10)
2173 new_flow_level = HNS3_FLOW_LOW;
2175 case HNS3_FLOW_HIGH:
2176 case HNS3_FLOW_ULTRA:
2178 if (bytes_per_usecs <= 20)
2179 new_flow_level = HNS3_FLOW_MID;
2183 if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2184 (&tqp_vector->rx_group == ring_group))
2185 new_flow_level = HNS3_FLOW_ULTRA;
2187 switch (new_flow_level) {
2189 new_int_gl = HNS3_INT_GL_50K;
2192 new_int_gl = HNS3_INT_GL_20K;
2194 case HNS3_FLOW_HIGH:
2195 new_int_gl = HNS3_INT_GL_18K;
2197 case HNS3_FLOW_ULTRA:
2198 new_int_gl = HNS3_INT_GL_8K;
2204 ring_group->total_bytes = 0;
2205 ring_group->total_packets = 0;
2206 ring_group->flow_level = new_flow_level;
2207 if (new_int_gl != ring_group->int_gl) {
2208 ring_group->int_gl = new_int_gl;
2214 static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2216 u16 rx_int_gl, tx_int_gl;
2219 rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2220 tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2221 rx_int_gl = tqp_vector->rx_group.int_gl;
2222 tx_int_gl = tqp_vector->tx_group.int_gl;
2224 if (rx_int_gl > tx_int_gl) {
2225 tqp_vector->tx_group.int_gl = rx_int_gl;
2226 tqp_vector->tx_group.flow_level =
2227 tqp_vector->rx_group.flow_level;
2228 hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2230 tqp_vector->rx_group.int_gl = tx_int_gl;
2231 tqp_vector->rx_group.flow_level =
2232 tqp_vector->tx_group.flow_level;
2233 hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2238 static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2240 struct hns3_enet_ring *ring;
2241 int rx_pkt_total = 0;
2243 struct hns3_enet_tqp_vector *tqp_vector =
2244 container_of(napi, struct hns3_enet_tqp_vector, napi);
2245 bool clean_complete = true;
2248 /* Since the actual Tx work is minimal, we can give the Tx a larger
2249 * budget and be more aggressive about cleaning up the Tx descriptors.
2251 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2252 if (!hns3_clean_tx_ring(ring, budget))
2253 clean_complete = false;
2256 /* make sure rx ring budget not smaller than 1 */
2257 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2259 hns3_for_each_ring(ring, tqp_vector->rx_group) {
2260 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget);
2262 if (rx_cleaned >= rx_budget)
2263 clean_complete = false;
2265 rx_pkt_total += rx_cleaned;
2268 tqp_vector->rx_group.total_packets += rx_pkt_total;
2270 if (!clean_complete)
2273 napi_complete(napi);
2274 hns3_update_new_int_gl(tqp_vector);
2275 hns3_mask_vector_irq(tqp_vector, 1);
2277 return rx_pkt_total;
2280 static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2281 struct hnae3_ring_chain_node *head)
2283 struct pci_dev *pdev = tqp_vector->handle->pdev;
2284 struct hnae3_ring_chain_node *cur_chain = head;
2285 struct hnae3_ring_chain_node *chain;
2286 struct hns3_enet_ring *tx_ring;
2287 struct hns3_enet_ring *rx_ring;
2289 tx_ring = tqp_vector->tx_group.ring;
2291 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2292 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2293 HNAE3_RING_TYPE_TX);
2295 cur_chain->next = NULL;
2297 while (tx_ring->next) {
2298 tx_ring = tx_ring->next;
2300 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2303 goto err_free_chain;
2305 cur_chain->next = chain;
2306 chain->tqp_index = tx_ring->tqp->tqp_index;
2307 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2308 HNAE3_RING_TYPE_TX);
2314 rx_ring = tqp_vector->rx_group.ring;
2315 if (!tx_ring && rx_ring) {
2316 cur_chain->next = NULL;
2317 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2318 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2319 HNAE3_RING_TYPE_RX);
2321 rx_ring = rx_ring->next;
2325 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2327 goto err_free_chain;
2329 cur_chain->next = chain;
2330 chain->tqp_index = rx_ring->tqp->tqp_index;
2331 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2332 HNAE3_RING_TYPE_RX);
2335 rx_ring = rx_ring->next;
2341 cur_chain = head->next;
2343 chain = cur_chain->next;
2344 devm_kfree(&pdev->dev, chain);
2351 static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2352 struct hnae3_ring_chain_node *head)
2354 struct pci_dev *pdev = tqp_vector->handle->pdev;
2355 struct hnae3_ring_chain_node *chain_tmp, *chain;
2360 chain_tmp = chain->next;
2361 devm_kfree(&pdev->dev, chain);
2366 static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2367 struct hns3_enet_ring *ring)
2369 ring->next = group->ring;
2375 static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2377 struct hnae3_ring_chain_node vector_ring_chain;
2378 struct hnae3_handle *h = priv->ae_handle;
2379 struct hns3_enet_tqp_vector *tqp_vector;
2380 struct hnae3_vector_info *vector;
2381 struct pci_dev *pdev = h->pdev;
2382 u16 tqp_num = h->kinfo.num_tqps;
2387 /* RSS size, cpu online and vector_num should be the same */
2388 /* Should consider 2p/4p later */
2389 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2390 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2395 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2397 priv->vector_num = vector_num;
2398 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2399 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2401 if (!priv->tqp_vector)
2404 for (i = 0; i < tqp_num; i++) {
2405 u16 vector_i = i % vector_num;
2407 tqp_vector = &priv->tqp_vector[vector_i];
2409 hns3_add_ring_to_group(&tqp_vector->tx_group,
2410 priv->ring_data[i].ring);
2412 hns3_add_ring_to_group(&tqp_vector->rx_group,
2413 priv->ring_data[i + tqp_num].ring);
2415 tqp_vector->idx = vector_i;
2416 tqp_vector->mask_addr = vector[vector_i].io_addr;
2417 tqp_vector->vector_irq = vector[vector_i].vector;
2418 tqp_vector->num_tqps++;
2420 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2421 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2424 for (i = 0; i < vector_num; i++) {
2425 tqp_vector = &priv->tqp_vector[i];
2427 tqp_vector->rx_group.total_bytes = 0;
2428 tqp_vector->rx_group.total_packets = 0;
2429 tqp_vector->tx_group.total_bytes = 0;
2430 tqp_vector->tx_group.total_packets = 0;
2431 hns3_vector_gl_rl_init(tqp_vector);
2432 tqp_vector->handle = h;
2434 ret = hns3_get_vector_ring_chain(tqp_vector,
2435 &vector_ring_chain);
2439 ret = h->ae_algo->ops->map_ring_to_vector(h,
2440 tqp_vector->vector_irq, &vector_ring_chain);
2444 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2446 netif_napi_add(priv->netdev, &tqp_vector->napi,
2447 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2451 devm_kfree(&pdev->dev, vector);
2455 static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2457 struct hnae3_ring_chain_node vector_ring_chain;
2458 struct hnae3_handle *h = priv->ae_handle;
2459 struct hns3_enet_tqp_vector *tqp_vector;
2460 struct pci_dev *pdev = h->pdev;
2463 for (i = 0; i < priv->vector_num; i++) {
2464 tqp_vector = &priv->tqp_vector[i];
2466 ret = hns3_get_vector_ring_chain(tqp_vector,
2467 &vector_ring_chain);
2471 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2472 tqp_vector->vector_irq, &vector_ring_chain);
2476 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2478 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2479 (void)irq_set_affinity_hint(
2480 priv->tqp_vector[i].vector_irq,
2482 free_irq(priv->tqp_vector[i].vector_irq,
2483 &priv->tqp_vector[i]);
2486 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2488 netif_napi_del(&priv->tqp_vector[i].napi);
2491 devm_kfree(&pdev->dev, priv->tqp_vector);
2496 static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2499 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2500 int queue_num = priv->ae_handle->kinfo.num_tqps;
2501 struct pci_dev *pdev = priv->ae_handle->pdev;
2502 struct hns3_enet_ring *ring;
2504 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2508 if (ring_type == HNAE3_RING_TYPE_TX) {
2509 ring_data[q->tqp_index].ring = ring;
2510 ring_data[q->tqp_index].queue_index = q->tqp_index;
2511 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2513 ring_data[q->tqp_index + queue_num].ring = ring;
2514 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
2515 ring->io_base = q->io_base;
2518 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2522 ring->desc_cb = NULL;
2523 ring->dev = priv->dev;
2524 ring->desc_dma_addr = 0;
2525 ring->buf_size = q->buf_size;
2526 ring->desc_num = q->desc_num;
2527 ring->next_to_use = 0;
2528 ring->next_to_clean = 0;
2533 static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2534 struct hns3_nic_priv *priv)
2538 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2542 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2544 devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
2551 static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2553 struct hnae3_handle *h = priv->ae_handle;
2554 struct pci_dev *pdev = h->pdev;
2557 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2558 sizeof(*priv->ring_data) * 2,
2560 if (!priv->ring_data)
2563 for (i = 0; i < h->kinfo.num_tqps; i++) {
2564 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2572 devm_kfree(priv->dev, priv->ring_data[i].ring);
2573 devm_kfree(priv->dev,
2574 priv->ring_data[i + h->kinfo.num_tqps].ring);
2577 devm_kfree(&pdev->dev, priv->ring_data);
2581 static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2585 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2588 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2590 if (!ring->desc_cb) {
2595 ret = hns3_alloc_desc(ring);
2597 goto out_with_desc_cb;
2599 if (!HNAE3_IS_TX_RING(ring)) {
2600 ret = hns3_alloc_ring_buffers(ring);
2608 hns3_free_desc(ring);
2610 kfree(ring->desc_cb);
2611 ring->desc_cb = NULL;
2616 static void hns3_fini_ring(struct hns3_enet_ring *ring)
2618 hns3_free_desc(ring);
2619 kfree(ring->desc_cb);
2620 ring->desc_cb = NULL;
2621 ring->next_to_clean = 0;
2622 ring->next_to_use = 0;
2625 int hns3_buf_size2type(u32 buf_size)
2631 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2634 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2637 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2640 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2643 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2646 return bd_size_type;
2649 static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2651 dma_addr_t dma = ring->desc_dma_addr;
2652 struct hnae3_queue *q = ring->tqp;
2654 if (!HNAE3_IS_TX_RING(ring)) {
2655 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2657 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2658 (u32)((dma >> 31) >> 1));
2660 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2661 hns3_buf_size2type(ring->buf_size));
2662 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2663 ring->desc_num / 8 - 1);
2666 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2668 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2669 (u32)((dma >> 31) >> 1));
2671 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2672 hns3_buf_size2type(ring->buf_size));
2673 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2674 ring->desc_num / 8 - 1);
2678 static int hns3_init_all_ring(struct hns3_nic_priv *priv)
2680 struct hnae3_handle *h = priv->ae_handle;
2681 int ring_num = h->kinfo.num_tqps * 2;
2685 for (i = 0; i < ring_num; i++) {
2686 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2689 "Alloc ring memory fail! ret=%d\n", ret);
2690 goto out_when_alloc_ring_memory;
2693 hns3_init_ring_hw(priv->ring_data[i].ring);
2695 u64_stats_init(&priv->ring_data[i].ring->syncp);
2700 out_when_alloc_ring_memory:
2701 for (j = i - 1; j >= 0; j--)
2702 hns3_fini_ring(priv->ring_data[i].ring);
2707 static int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
2709 struct hnae3_handle *h = priv->ae_handle;
2712 for (i = 0; i < h->kinfo.num_tqps; i++) {
2713 if (h->ae_algo->ops->reset_queue)
2714 h->ae_algo->ops->reset_queue(h, i);
2716 hns3_fini_ring(priv->ring_data[i].ring);
2717 devm_kfree(priv->dev, priv->ring_data[i].ring);
2718 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
2719 devm_kfree(priv->dev,
2720 priv->ring_data[i + h->kinfo.num_tqps].ring);
2722 devm_kfree(priv->dev, priv->ring_data);
2727 /* Set mac addr if it is configured. or leave it to the AE driver */
2728 static void hns3_init_mac_addr(struct net_device *netdev)
2730 struct hns3_nic_priv *priv = netdev_priv(netdev);
2731 struct hnae3_handle *h = priv->ae_handle;
2732 u8 mac_addr_temp[ETH_ALEN];
2734 if (h->ae_algo->ops->get_mac_addr) {
2735 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2736 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2739 /* Check if the MAC address is valid, if not get a random one */
2740 if (!is_valid_ether_addr(netdev->dev_addr)) {
2741 eth_hw_addr_random(netdev);
2742 dev_warn(priv->dev, "using random MAC address %pM\n",
2746 if (h->ae_algo->ops->set_mac_addr)
2747 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2751 static void hns3_nic_set_priv_ops(struct net_device *netdev)
2753 struct hns3_nic_priv *priv = netdev_priv(netdev);
2755 if ((netdev->features & NETIF_F_TSO) ||
2756 (netdev->features & NETIF_F_TSO6)) {
2757 priv->ops.fill_desc = hns3_fill_desc_tso;
2758 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2760 priv->ops.fill_desc = hns3_fill_desc;
2761 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2765 static int hns3_client_init(struct hnae3_handle *handle)
2767 struct pci_dev *pdev = handle->pdev;
2768 struct hns3_nic_priv *priv;
2769 struct net_device *netdev;
2772 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2773 handle->kinfo.num_tqps);
2777 priv = netdev_priv(netdev);
2778 priv->dev = &pdev->dev;
2779 priv->netdev = netdev;
2780 priv->ae_handle = handle;
2782 handle->kinfo.netdev = netdev;
2783 handle->priv = (void *)priv;
2785 hns3_init_mac_addr(netdev);
2787 hns3_set_default_feature(netdev);
2789 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2790 netdev->priv_flags |= IFF_UNICAST_FLT;
2791 netdev->netdev_ops = &hns3_nic_netdev_ops;
2792 SET_NETDEV_DEV(netdev, &pdev->dev);
2793 hns3_ethtool_set_ops(netdev);
2794 hns3_nic_set_priv_ops(netdev);
2796 /* Carrier off reporting is important to ethtool even BEFORE open */
2797 netif_carrier_off(netdev);
2799 ret = hns3_get_ring_config(priv);
2802 goto out_get_ring_cfg;
2805 ret = hns3_nic_init_vector_data(priv);
2808 goto out_init_vector_data;
2811 ret = hns3_init_all_ring(priv);
2814 goto out_init_ring_data;
2817 ret = register_netdev(netdev);
2819 dev_err(priv->dev, "probe register netdev fail!\n");
2820 goto out_reg_netdev_fail;
2823 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
2824 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2828 out_reg_netdev_fail:
2830 (void)hns3_nic_uninit_vector_data(priv);
2831 priv->ring_data = NULL;
2832 out_init_vector_data:
2834 priv->ae_handle = NULL;
2835 free_netdev(netdev);
2839 static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
2841 struct net_device *netdev = handle->kinfo.netdev;
2842 struct hns3_nic_priv *priv = netdev_priv(netdev);
2845 if (netdev->reg_state != NETREG_UNINITIALIZED)
2846 unregister_netdev(netdev);
2848 ret = hns3_nic_uninit_vector_data(priv);
2850 netdev_err(netdev, "uninit vector error\n");
2852 ret = hns3_uninit_all_ring(priv);
2854 netdev_err(netdev, "uninit ring error\n");
2856 priv->ring_data = NULL;
2858 free_netdev(netdev);
2861 static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
2863 struct net_device *netdev = handle->kinfo.netdev;
2869 netif_carrier_on(netdev);
2870 netif_tx_wake_all_queues(netdev);
2871 netdev_info(netdev, "link up\n");
2873 netif_carrier_off(netdev);
2874 netif_tx_stop_all_queues(netdev);
2875 netdev_info(netdev, "link down\n");
2879 const struct hnae3_client_ops client_ops = {
2880 .init_instance = hns3_client_init,
2881 .uninit_instance = hns3_client_uninit,
2882 .link_status_change = hns3_link_status_change,
2885 /* hns3_init_module - Driver registration routine
2886 * hns3_init_module is the first routine called when the driver is
2887 * loaded. All it does is register with the PCI subsystem.
2889 static int __init hns3_init_module(void)
2893 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
2894 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
2896 client.type = HNAE3_CLIENT_KNIC;
2897 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
2900 client.ops = &client_ops;
2902 INIT_LIST_HEAD(&client.node);
2904 ret = hnae3_register_client(&client);
2908 ret = pci_register_driver(&hns3_driver);
2910 hnae3_unregister_client(&client);
2914 module_init(hns3_init_module);
2916 /* hns3_exit_module - Driver exit cleanup routine
2917 * hns3_exit_module is called just before the driver is removed
2920 static void __exit hns3_exit_module(void)
2922 pci_unregister_driver(&hns3_driver);
2923 hnae3_unregister_client(&client);
2925 module_exit(hns3_exit_module);
2927 MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
2928 MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
2929 MODULE_LICENSE("GPL");
2930 MODULE_ALIAS("pci:hns-nic");