2 * Copyright (c) 2014-2015 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/clk.h>
11 #include <linux/cpumask.h>
12 #include <linux/etherdevice.h>
13 #include <linux/if_vlan.h>
14 #include <linux/interrupt.h>
17 #include <linux/ipv6.h>
18 #include <linux/module.h>
19 #include <linux/phy.h>
20 #include <linux/platform_device.h>
21 #include <linux/skbuff.h>
26 #define NIC_MAX_Q_PER_VF 16
27 #define HNS_NIC_TX_TIMEOUT (5 * HZ)
29 #define SERVICE_TIMER_HZ (1 * HZ)
31 #define RCB_IRQ_NOT_INITED 0
32 #define RCB_IRQ_INITED 1
33 #define HNS_BUFFER_SIZE_2048 2048
35 #define BD_MAX_SEND_SIZE 8191
36 #define SKB_TMP_LEN(SKB) \
37 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
39 static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
40 int send_sz, dma_addr_t dma, int frag_end,
41 int buf_num, enum hns_desc_type type, int mtu)
43 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
44 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
46 struct ipv6hdr *ipv6hdr;
58 desc_cb->length = size;
62 desc->addr = cpu_to_le64(dma);
63 desc->tx.send_size = cpu_to_le16((u16)send_sz);
65 /* config bd buffer end */
66 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
67 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
69 /* fill port_id in the tx bd for sending management pkts */
70 hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
71 HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
73 if (type == DESC_TYPE_SKB) {
74 skb = (struct sk_buff *)priv;
76 if (skb->ip_summed == CHECKSUM_PARTIAL) {
77 skb_reset_mac_len(skb);
78 protocol = skb->protocol;
81 if (protocol == htons(ETH_P_8021Q)) {
82 ip_offset += VLAN_HLEN;
83 protocol = vlan_get_protocol(skb);
84 skb->protocol = protocol;
87 if (skb->protocol == htons(ETH_P_IP)) {
89 hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
90 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
92 /* check for tcp/udp header */
93 if (iphdr->protocol == IPPROTO_TCP &&
97 l4_len = tcp_hdrlen(skb);
98 mss = skb_shinfo(skb)->gso_size;
99 paylen = skb->len - SKB_TMP_LEN(skb);
101 } else if (skb->protocol == htons(ETH_P_IPV6)) {
102 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
103 ipv6hdr = ipv6_hdr(skb);
104 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
106 /* check for tcp/udp header */
107 if (ipv6hdr->nexthdr == IPPROTO_TCP &&
108 skb_is_gso(skb) && skb_is_gso_v6(skb)) {
111 l4_len = tcp_hdrlen(skb);
112 mss = skb_shinfo(skb)->gso_size;
113 paylen = skb->len - SKB_TMP_LEN(skb);
116 desc->tx.ip_offset = ip_offset;
117 desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
118 desc->tx.mss = cpu_to_le16(mss);
119 desc->tx.l4_len = l4_len;
120 desc->tx.paylen = cpu_to_le16(paylen);
124 hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
126 desc->tx.bn_pid = bn_pid;
127 desc->tx.ra_ri_cs_fe_vld = rrcfv;
129 ring_ptr_move_fw(ring, next_to_use);
132 static void fill_v2_desc(struct hnae_ring *ring, void *priv,
133 int size, dma_addr_t dma, int frag_end,
134 int buf_num, enum hns_desc_type type, int mtu)
136 fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
140 static const struct acpi_device_id hns_enet_acpi_match[] = {
145 MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
147 static void fill_desc(struct hnae_ring *ring, void *priv,
148 int size, dma_addr_t dma, int frag_end,
149 int buf_num, enum hns_desc_type type, int mtu)
151 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
152 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
156 u32 asid_bufnum_pid = 0;
157 u32 flag_ipoffset = 0;
159 desc_cb->priv = priv;
160 desc_cb->length = size;
162 desc_cb->type = type;
164 desc->addr = cpu_to_le64(dma);
165 desc->tx.send_size = cpu_to_le16((u16)size);
167 /*config bd buffer end */
168 flag_ipoffset |= 1 << HNS_TXD_VLD_B;
170 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
172 if (type == DESC_TYPE_SKB) {
173 skb = (struct sk_buff *)priv;
175 if (skb->ip_summed == CHECKSUM_PARTIAL) {
176 protocol = skb->protocol;
177 ip_offset = ETH_HLEN;
179 /*if it is a SW VLAN check the next protocol*/
180 if (protocol == htons(ETH_P_8021Q)) {
181 ip_offset += VLAN_HLEN;
182 protocol = vlan_get_protocol(skb);
183 skb->protocol = protocol;
186 if (skb->protocol == htons(ETH_P_IP)) {
187 flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
188 /* check for tcp/udp header */
189 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
191 } else if (skb->protocol == htons(ETH_P_IPV6)) {
192 /* ipv6 has not l3 cs, check for L4 header */
193 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
196 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
200 flag_ipoffset |= frag_end << HNS_TXD_FE_B;
202 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
203 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
205 ring_ptr_move_fw(ring, next_to_use);
208 static void unfill_desc(struct hnae_ring *ring)
210 ring_ptr_move_bw(ring, next_to_use);
213 static int hns_nic_maybe_stop_tx(
214 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
216 struct sk_buff *skb = *out_skb;
217 struct sk_buff *new_skb = NULL;
220 /* no. of segments (plus a header) */
221 buf_num = skb_shinfo(skb)->nr_frags + 1;
223 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
224 if (ring_space(ring) < 1)
227 new_skb = skb_copy(skb, GFP_ATOMIC);
231 dev_kfree_skb_any(skb);
234 } else if (buf_num > ring_space(ring)) {
242 static int hns_nic_maybe_stop_tso(
243 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
249 struct sk_buff *skb = *out_skb;
250 struct sk_buff *new_skb = NULL;
251 struct skb_frag_struct *frag;
253 size = skb_headlen(skb);
254 buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
256 frag_num = skb_shinfo(skb)->nr_frags;
257 for (i = 0; i < frag_num; i++) {
258 frag = &skb_shinfo(skb)->frags[i];
259 size = skb_frag_size(frag);
260 buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
263 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
264 buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
265 if (ring_space(ring) < buf_num)
267 /* manual split the send packet */
268 new_skb = skb_copy(skb, GFP_ATOMIC);
271 dev_kfree_skb_any(skb);
274 } else if (ring_space(ring) < buf_num) {
282 static void fill_tso_desc(struct hnae_ring *ring, void *priv,
283 int size, dma_addr_t dma, int frag_end,
284 int buf_num, enum hns_desc_type type, int mtu)
290 frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
291 sizeoflast = size % BD_MAX_SEND_SIZE;
292 sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
294 /* when the frag size is bigger than hardware, split this frag */
295 for (k = 0; k < frag_buf_num; k++)
296 fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
297 (k == frag_buf_num - 1) ?
298 sizeoflast : BD_MAX_SEND_SIZE,
299 dma + BD_MAX_SEND_SIZE * k,
300 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
302 (type == DESC_TYPE_SKB && !k) ?
303 DESC_TYPE_SKB : DESC_TYPE_PAGE,
307 netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
309 struct hns_nic_ring_data *ring_data)
311 struct hns_nic_priv *priv = netdev_priv(ndev);
312 struct hnae_ring *ring = ring_data->ring;
313 struct device *dev = ring_to_dev(ring);
314 struct netdev_queue *dev_queue;
315 struct skb_frag_struct *frag;
319 int size, next_to_use;
322 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
324 ring->stats.tx_busy++;
325 goto out_net_tx_busy;
327 ring->stats.sw_err_cnt++;
328 netdev_err(ndev, "no memory to xmit!\n");
334 /* no. of segments (plus a header) */
335 seg_num = skb_shinfo(skb)->nr_frags + 1;
336 next_to_use = ring->next_to_use;
338 /* fill the first part */
339 size = skb_headlen(skb);
340 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
341 if (dma_mapping_error(dev, dma)) {
342 netdev_err(ndev, "TX head DMA map failed\n");
343 ring->stats.sw_err_cnt++;
346 priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
347 buf_num, DESC_TYPE_SKB, ndev->mtu);
349 /* fill the fragments */
350 for (i = 1; i < seg_num; i++) {
351 frag = &skb_shinfo(skb)->frags[i - 1];
352 size = skb_frag_size(frag);
353 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
354 if (dma_mapping_error(dev, dma)) {
355 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
356 ring->stats.sw_err_cnt++;
357 goto out_map_frag_fail;
359 priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
360 seg_num - 1 == i ? 1 : 0, buf_num,
361 DESC_TYPE_PAGE, ndev->mtu);
364 /*complete translate all packets*/
365 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
366 netdev_tx_sent_queue(dev_queue, skb->len);
368 netif_trans_update(ndev);
369 ndev->stats.tx_bytes += skb->len;
370 ndev->stats.tx_packets++;
372 wmb(); /* commit all data before submit */
373 assert(skb->queue_mapping < priv->ae_handle->q_num);
374 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
380 while (ring->next_to_use != next_to_use) {
382 if (ring->next_to_use != next_to_use)
384 ring->desc_cb[ring->next_to_use].dma,
385 ring->desc_cb[ring->next_to_use].length,
388 dma_unmap_single(dev,
389 ring->desc_cb[next_to_use].dma,
390 ring->desc_cb[next_to_use].length,
396 dev_kfree_skb_any(skb);
401 netif_stop_subqueue(ndev, skb->queue_mapping);
403 /* Herbert's original patch had:
404 * smp_mb__after_netif_stop_queue();
405 * but since that doesn't exist yet, just open code it.
408 return NETDEV_TX_BUSY;
412 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
413 * @data: pointer to the start of the headers
414 * @max: total length of section to find headers in
416 * This function is meant to determine the length of headers that will
417 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
418 * motivation of doing this is to only perform one pull for IPv4 TCP
419 * packets so that we can do basic things like calculating the gso_size
420 * based on the average data per packet.
422 static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
423 unsigned int max_size)
425 unsigned char *network;
428 /* this should never happen, but better safe than sorry */
429 if (max_size < ETH_HLEN)
432 /* initialize network frame pointer */
435 /* set first protocol and move network header forward */
438 /* handle any vlan tag if present */
439 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
440 == HNS_RX_FLAG_VLAN_PRESENT) {
441 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
444 network += VLAN_HLEN;
447 /* handle L3 protocols */
448 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
449 == HNS_RX_FLAG_L3ID_IPV4) {
450 if ((typeof(max_size))(network - data) >
451 (max_size - sizeof(struct iphdr)))
454 /* access ihl as a u8 to avoid unaligned access on ia64 */
455 hlen = (network[0] & 0x0F) << 2;
457 /* verify hlen meets minimum size requirements */
458 if (hlen < sizeof(struct iphdr))
459 return network - data;
461 /* record next protocol if header is present */
462 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
463 == HNS_RX_FLAG_L3ID_IPV6) {
464 if ((typeof(max_size))(network - data) >
465 (max_size - sizeof(struct ipv6hdr)))
468 /* record next protocol */
469 hlen = sizeof(struct ipv6hdr);
471 return network - data;
474 /* relocate pointer to start of L4 header */
477 /* finally sort out TCP/UDP */
478 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
479 == HNS_RX_FLAG_L4ID_TCP) {
480 if ((typeof(max_size))(network - data) >
481 (max_size - sizeof(struct tcphdr)))
484 /* access doff as a u8 to avoid unaligned access on ia64 */
485 hlen = (network[12] & 0xF0) >> 2;
487 /* verify hlen meets minimum size requirements */
488 if (hlen < sizeof(struct tcphdr))
489 return network - data;
492 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
493 == HNS_RX_FLAG_L4ID_UDP) {
494 if ((typeof(max_size))(network - data) >
495 (max_size - sizeof(struct udphdr)))
498 network += sizeof(struct udphdr);
501 /* If everything has gone correctly network should be the
502 * data section of the packet and will be the end of the header.
503 * If not then it probably represents the end of the last recognized
506 if ((typeof(max_size))(network - data) < max_size)
507 return network - data;
512 static void hns_nic_reuse_page(struct sk_buff *skb, int i,
513 struct hnae_ring *ring, int pull_len,
514 struct hnae_desc_cb *desc_cb)
516 struct hnae_desc *desc;
521 twobufs = ((PAGE_SIZE < 8192) &&
522 hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
524 desc = &ring->desc[ring->next_to_clean];
525 size = le16_to_cpu(desc->rx.size);
528 truesize = hnae_buf_size(ring);
530 truesize = ALIGN(size, L1_CACHE_BYTES);
531 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
534 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
535 size - pull_len, truesize);
537 /* avoid re-using remote pages,flag default unreuse */
538 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
542 /* if we are only owner of page we can reuse it */
543 if (likely(page_count(desc_cb->priv) == 1)) {
544 /* flip page offset to other buffer */
545 desc_cb->page_offset ^= truesize;
547 desc_cb->reuse_flag = 1;
548 /* bump ref count on page before it is given*/
549 get_page(desc_cb->priv);
554 /* move offset up to the next cache line */
555 desc_cb->page_offset += truesize;
557 if (desc_cb->page_offset <= last_offset) {
558 desc_cb->reuse_flag = 1;
559 /* bump ref count on page before it is given*/
560 get_page(desc_cb->priv);
564 static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
566 *out_bnum = hnae_get_field(bnum_flag,
567 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
570 static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
572 *out_bnum = hnae_get_field(bnum_flag,
573 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
576 static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
577 struct sk_buff **out_skb, int *out_bnum)
579 struct hnae_ring *ring = ring_data->ring;
580 struct net_device *ndev = ring_data->napi.dev;
581 struct hns_nic_priv *priv = netdev_priv(ndev);
583 struct hnae_desc *desc;
584 struct hnae_desc_cb *desc_cb;
590 desc = &ring->desc[ring->next_to_clean];
591 desc_cb = &ring->desc_cb[ring->next_to_clean];
595 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
597 /* prefetch first cache line of first page */
599 #if L1_CACHE_BYTES < 128
600 prefetch(va + L1_CACHE_BYTES);
603 skb = *out_skb = napi_alloc_skb(&ring_data->napi,
605 if (unlikely(!skb)) {
606 netdev_err(ndev, "alloc rx skb fail\n");
607 ring->stats.sw_err_cnt++;
611 prefetchw(skb->data);
612 length = le16_to_cpu(desc->rx.pkt_len);
613 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
614 priv->ops.get_rxd_bnum(bnum_flag, &bnum);
617 if (length <= HNS_RX_HEAD_SIZE) {
618 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
620 /* we can reuse buffer as-is, just make sure it is local */
621 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
622 desc_cb->reuse_flag = 1;
623 else /* this page cannot be reused so discard it */
624 put_page(desc_cb->priv);
626 ring_ptr_move_fw(ring, next_to_clean);
628 if (unlikely(bnum != 1)) { /* check err*/
633 ring->stats.seg_pkt_cnt++;
635 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
636 memcpy(__skb_put(skb, pull_len), va,
637 ALIGN(pull_len, sizeof(long)));
639 hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
640 ring_ptr_move_fw(ring, next_to_clean);
642 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
646 for (i = 1; i < bnum; i++) {
647 desc = &ring->desc[ring->next_to_clean];
648 desc_cb = &ring->desc_cb[ring->next_to_clean];
650 hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
651 ring_ptr_move_fw(ring, next_to_clean);
655 /* check except process, free skb and jump the desc */
656 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
658 *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
659 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
660 bnum, ring->max_desc_num_per_pkt,
661 length, (int)MAX_SKB_FRAGS,
662 ((u64 *)desc)[0], ((u64 *)desc)[1]);
663 ring->stats.err_bd_num++;
664 dev_kfree_skb_any(skb);
668 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
670 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
671 netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
672 ((u64 *)desc)[0], ((u64 *)desc)[1]);
673 ring->stats.non_vld_descs++;
674 dev_kfree_skb_any(skb);
678 if (unlikely((!desc->rx.pkt_len) ||
679 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
680 ring->stats.err_pkt_len++;
681 dev_kfree_skb_any(skb);
685 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
686 ring->stats.l2_err++;
687 dev_kfree_skb_any(skb);
691 ring->stats.rx_pkts++;
692 ring->stats.rx_bytes += skb->len;
694 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L3E_B) ||
695 hnae_get_bit(bnum_flag, HNS_RXD_L4E_B))) {
696 ring->stats.l3l4_csum_err++;
700 skb->ip_summed = CHECKSUM_UNNECESSARY;
706 hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
709 struct hnae_desc_cb res_cbs;
710 struct hnae_desc_cb *desc_cb;
711 struct hnae_ring *ring = ring_data->ring;
712 struct net_device *ndev = ring_data->napi.dev;
714 for (i = 0; i < cleand_count; i++) {
715 desc_cb = &ring->desc_cb[ring->next_to_use];
716 if (desc_cb->reuse_flag) {
717 ring->stats.reuse_pg_cnt++;
718 hnae_reuse_buffer(ring, ring->next_to_use);
720 ret = hnae_reserve_buffer_map(ring, &res_cbs);
722 ring->stats.sw_err_cnt++;
723 netdev_err(ndev, "hnae reserve buffer map failed.\n");
726 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
729 ring_ptr_move_fw(ring, next_to_use);
732 wmb(); /* make all data has been write before submit */
733 writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
736 /* return error number for error or number of desc left to take
738 static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
741 struct net_device *ndev = ring_data->napi.dev;
743 skb->protocol = eth_type_trans(skb, ndev);
744 (void)napi_gro_receive(&ring_data->napi, skb);
745 ndev->last_rx = jiffies;
748 static int hns_desc_unused(struct hnae_ring *ring)
750 int ntc = ring->next_to_clean;
751 int ntu = ring->next_to_use;
753 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
756 static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
759 struct hnae_ring *ring = ring_data->ring;
762 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
763 int recv_pkts, recv_bds, clean_count, err;
764 int unused_count = hns_desc_unused(ring);
766 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
767 rmb(); /* make sure num taken effect before the other data is touched */
769 recv_pkts = 0, recv_bds = 0, clean_count = 0;
772 while (recv_pkts < budget && recv_bds < num) {
773 /* reuse or realloc buffers */
774 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
775 hns_nic_alloc_rx_buffers(ring_data,
776 clean_count + unused_count);
778 unused_count = hns_desc_unused(ring);
782 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
783 if (unlikely(!skb)) /* this fault cannot be repaired */
788 if (unlikely(err)) { /* do jump the err */
793 /* do update ip stack process*/
794 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
800 /* make all data has been write before submit */
801 if (clean_count + unused_count > 0)
802 hns_nic_alloc_rx_buffers(ring_data,
803 clean_count + unused_count);
808 static void hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
810 struct hnae_ring *ring = ring_data->ring;
813 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
815 /* for hardware bug fixed */
816 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
819 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
822 napi_schedule(&ring_data->napi);
826 static void hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
828 struct hnae_ring *ring = ring_data->ring;
831 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
834 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
837 napi_schedule(&ring_data->napi);
840 static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
841 int *bytes, int *pkts)
843 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
845 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
846 (*bytes) += desc_cb->length;
847 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
848 hnae_free_buffer_detach(ring, ring->next_to_clean);
850 ring_ptr_move_fw(ring, next_to_clean);
853 static int is_valid_clean_head(struct hnae_ring *ring, int h)
855 int u = ring->next_to_use;
856 int c = ring->next_to_clean;
858 if (unlikely(h > ring->desc_num))
861 assert(u > 0 && u < ring->desc_num);
862 assert(c > 0 && c < ring->desc_num);
863 assert(u != c && h != c); /* must be checked before call this func */
865 return u > c ? (h > c && h <= u) : (h > c || h <= u);
868 /* netif_tx_lock will turn down the performance, set only when necessary */
869 #ifdef CONFIG_NET_POLL_CONTROLLER
870 #define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
871 #define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
873 #define NETIF_TX_LOCK(ndev)
874 #define NETIF_TX_UNLOCK(ndev)
876 /* reclaim all desc in one budget
877 * return error or number of desc left
879 static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
882 struct hnae_ring *ring = ring_data->ring;
883 struct net_device *ndev = ring_data->napi.dev;
884 struct netdev_queue *dev_queue;
885 struct hns_nic_priv *priv = netdev_priv(ndev);
891 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
892 rmb(); /* make sure head is ready before touch any data */
894 if (is_ring_empty(ring) || head == ring->next_to_clean) {
895 NETIF_TX_UNLOCK(ndev);
896 return 0; /* no data to poll */
899 if (!is_valid_clean_head(ring, head)) {
900 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
901 ring->next_to_use, ring->next_to_clean);
902 ring->stats.io_err_cnt++;
903 NETIF_TX_UNLOCK(ndev);
909 while (head != ring->next_to_clean) {
910 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
911 /* issue prefetch for next Tx descriptor */
912 prefetch(&ring->desc_cb[ring->next_to_clean]);
914 /* update tx ring statistics. */
915 ring->stats.tx_pkts += pkts;
916 ring->stats.tx_bytes += bytes;
918 NETIF_TX_UNLOCK(ndev);
920 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
921 netdev_tx_completed_queue(dev_queue, pkts, bytes);
923 if (unlikely(priv->link && !netif_carrier_ok(ndev)))
924 netif_carrier_on(ndev);
926 if (unlikely(pkts && netif_carrier_ok(ndev) &&
927 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
928 /* Make sure that anybody stopping the queue after this
929 * sees the new next_to_clean.
932 if (netif_tx_queue_stopped(dev_queue) &&
933 !test_bit(NIC_STATE_DOWN, &priv->state)) {
934 netif_tx_wake_queue(dev_queue);
935 ring->stats.restart_queue++;
941 static void hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
943 struct hnae_ring *ring = ring_data->ring;
946 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
948 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
950 if (head != ring->next_to_clean) {
951 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
954 napi_schedule(&ring_data->napi);
958 static void hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
960 struct hnae_ring *ring = ring_data->ring;
961 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
963 if (head == ring->next_to_clean)
964 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
967 napi_schedule(&ring_data->napi);
970 static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
972 struct hnae_ring *ring = ring_data->ring;
973 struct net_device *ndev = ring_data->napi.dev;
974 struct netdev_queue *dev_queue;
980 head = ring->next_to_use; /* ntu :soft setted ring position*/
983 while (head != ring->next_to_clean)
984 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
986 NETIF_TX_UNLOCK(ndev);
988 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
989 netdev_tx_reset_queue(dev_queue);
992 static int hns_nic_common_poll(struct napi_struct *napi, int budget)
994 struct hns_nic_ring_data *ring_data =
995 container_of(napi, struct hns_nic_ring_data, napi);
996 int clean_complete = ring_data->poll_one(
997 ring_data, budget, ring_data->ex_process);
999 if (clean_complete >= 0 && clean_complete < budget) {
1000 napi_complete(napi);
1001 ring_data->fini_process(ring_data);
1005 return clean_complete;
1008 static irqreturn_t hns_irq_handle(int irq, void *dev)
1010 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
1012 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
1013 ring_data->ring, 1);
1014 napi_schedule(&ring_data->napi);
1020 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
1023 static void hns_nic_adjust_link(struct net_device *ndev)
1025 struct hns_nic_priv *priv = netdev_priv(ndev);
1026 struct hnae_handle *h = priv->ae_handle;
1030 h->dev->ops->adjust_link(h, ndev->phydev->speed,
1031 ndev->phydev->duplex);
1032 state = ndev->phydev->link;
1034 state = state && h->dev->ops->get_status(h);
1036 if (state != priv->link) {
1038 netif_carrier_on(ndev);
1039 netif_tx_wake_all_queues(ndev);
1040 netdev_info(ndev, "link up\n");
1042 netif_carrier_off(ndev);
1043 netdev_info(ndev, "link down\n");
1050 *hns_nic_init_phy - init phy
1053 * Return 0 on success, negative on failure
1055 int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1057 struct phy_device *phy_dev = h->phy_dev;
1063 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
1064 phy_dev->dev_flags = 0;
1066 ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
1069 ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
1074 phy_dev->supported &= h->if_support;
1075 phy_dev->advertising = phy_dev->supported;
1077 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1078 phy_dev->autoneg = false;
1080 if (h->phy_if == PHY_INTERFACE_MODE_SGMII)
1086 static int hns_nic_ring_open(struct net_device *netdev, int idx)
1088 struct hns_nic_priv *priv = netdev_priv(netdev);
1089 struct hnae_handle *h = priv->ae_handle;
1091 napi_enable(&priv->ring_data[idx].napi);
1093 enable_irq(priv->ring_data[idx].ring->irq);
1094 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
1099 static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
1101 struct hns_nic_priv *priv = netdev_priv(ndev);
1102 struct hnae_handle *h = priv->ae_handle;
1103 struct sockaddr *mac_addr = p;
1106 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1107 return -EADDRNOTAVAIL;
1109 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
1111 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
1115 memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
1120 void hns_nic_update_stats(struct net_device *netdev)
1122 struct hns_nic_priv *priv = netdev_priv(netdev);
1123 struct hnae_handle *h = priv->ae_handle;
1125 h->dev->ops->update_stats(h, &netdev->stats);
1128 /* set mac addr if it is configed. or leave it to the AE driver */
1129 static void hns_init_mac_addr(struct net_device *ndev)
1131 struct hns_nic_priv *priv = netdev_priv(ndev);
1133 if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
1134 eth_hw_addr_random(ndev);
1135 dev_warn(priv->dev, "No valid mac, use random mac %pM",
1140 static void hns_nic_ring_close(struct net_device *netdev, int idx)
1142 struct hns_nic_priv *priv = netdev_priv(netdev);
1143 struct hnae_handle *h = priv->ae_handle;
1145 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
1146 disable_irq(priv->ring_data[idx].ring->irq);
1148 napi_disable(&priv->ring_data[idx].napi);
1151 static void hns_set_irq_affinity(struct hns_nic_priv *priv)
1153 struct hnae_handle *h = priv->ae_handle;
1154 struct hns_nic_ring_data *rd;
1159 /*diffrent irq banlance for 16core and 32core*/
1160 if (h->q_num == num_possible_cpus()) {
1161 for (i = 0; i < h->q_num * 2; i++) {
1162 rd = &priv->ring_data[i];
1163 if (cpu_online(rd->queue_index)) {
1164 cpumask_clear(&mask);
1165 cpu = rd->queue_index;
1166 cpumask_set_cpu(cpu, &mask);
1167 (void)irq_set_affinity_hint(rd->ring->irq,
1172 for (i = 0; i < h->q_num; i++) {
1173 rd = &priv->ring_data[i];
1174 if (cpu_online(rd->queue_index * 2)) {
1175 cpumask_clear(&mask);
1176 cpu = rd->queue_index * 2;
1177 cpumask_set_cpu(cpu, &mask);
1178 (void)irq_set_affinity_hint(rd->ring->irq,
1183 for (i = h->q_num; i < h->q_num * 2; i++) {
1184 rd = &priv->ring_data[i];
1185 if (cpu_online(rd->queue_index * 2 + 1)) {
1186 cpumask_clear(&mask);
1187 cpu = rd->queue_index * 2 + 1;
1188 cpumask_set_cpu(cpu, &mask);
1189 (void)irq_set_affinity_hint(rd->ring->irq,
1196 static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
1200 for (i = 0; i < q_num * 2; i++) {
1201 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
1202 irq_set_affinity_hint(priv->ring_data[i].ring->irq,
1204 free_irq(priv->ring_data[i].ring->irq,
1205 &priv->ring_data[i]);
1206 priv->ring_data[i].ring->irq_init_flag =
1212 static int hns_nic_init_irq(struct hns_nic_priv *priv)
1214 struct hnae_handle *h = priv->ae_handle;
1215 struct hns_nic_ring_data *rd;
1219 for (i = 0; i < h->q_num * 2; i++) {
1220 rd = &priv->ring_data[i];
1222 if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
1225 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
1226 "%s-%s%d", priv->netdev->name,
1227 (i < h->q_num ? "tx" : "rx"), rd->queue_index);
1229 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
1231 ret = request_irq(rd->ring->irq,
1232 hns_irq_handle, 0, rd->ring->ring_name, rd);
1234 netdev_err(priv->netdev, "request irq(%d) fail\n",
1238 disable_irq(rd->ring->irq);
1239 rd->ring->irq_init_flag = RCB_IRQ_INITED;
1242 /*set cpu affinity*/
1243 hns_set_irq_affinity(priv);
1248 hns_nic_free_irq(h->q_num, priv);
1252 static int hns_nic_net_up(struct net_device *ndev)
1254 struct hns_nic_priv *priv = netdev_priv(ndev);
1255 struct hnae_handle *h = priv->ae_handle;
1259 if (!test_bit(NIC_STATE_DOWN, &priv->state))
1262 ret = hns_nic_init_irq(priv);
1264 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
1268 for (i = 0; i < h->q_num * 2; i++) {
1269 ret = hns_nic_ring_open(ndev, i);
1271 goto out_has_some_queues;
1274 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
1276 goto out_set_mac_addr_err;
1278 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
1283 phy_start(ndev->phydev);
1285 clear_bit(NIC_STATE_DOWN, &priv->state);
1286 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1291 netif_stop_queue(ndev);
1292 out_set_mac_addr_err:
1293 out_has_some_queues:
1294 for (j = i - 1; j >= 0; j--)
1295 hns_nic_ring_close(ndev, j);
1297 hns_nic_free_irq(h->q_num, priv);
1298 set_bit(NIC_STATE_DOWN, &priv->state);
1303 static void hns_nic_net_down(struct net_device *ndev)
1306 struct hnae_ae_ops *ops;
1307 struct hns_nic_priv *priv = netdev_priv(ndev);
1309 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
1312 (void)del_timer_sync(&priv->service_timer);
1313 netif_tx_stop_all_queues(ndev);
1314 netif_carrier_off(ndev);
1315 netif_tx_disable(ndev);
1319 phy_stop(ndev->phydev);
1321 ops = priv->ae_handle->dev->ops;
1324 ops->stop(priv->ae_handle);
1326 netif_tx_stop_all_queues(ndev);
1328 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
1329 hns_nic_ring_close(ndev, i);
1330 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
1332 /* clean tx buffers*/
1333 hns_nic_tx_clr_all_bufs(priv->ring_data + i);
1337 void hns_nic_net_reset(struct net_device *ndev)
1339 struct hns_nic_priv *priv = netdev_priv(ndev);
1340 struct hnae_handle *handle = priv->ae_handle;
1342 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
1343 usleep_range(1000, 2000);
1345 (void)hnae_reinit_handle(handle);
1347 clear_bit(NIC_STATE_RESETTING, &priv->state);
1350 void hns_nic_net_reinit(struct net_device *netdev)
1352 struct hns_nic_priv *priv = netdev_priv(netdev);
1354 netif_trans_update(priv->netdev);
1355 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
1356 usleep_range(1000, 2000);
1358 hns_nic_net_down(netdev);
1359 hns_nic_net_reset(netdev);
1360 (void)hns_nic_net_up(netdev);
1361 clear_bit(NIC_STATE_REINITING, &priv->state);
1364 static int hns_nic_net_open(struct net_device *ndev)
1366 struct hns_nic_priv *priv = netdev_priv(ndev);
1367 struct hnae_handle *h = priv->ae_handle;
1370 if (test_bit(NIC_STATE_TESTING, &priv->state))
1374 netif_carrier_off(ndev);
1376 ret = netif_set_real_num_tx_queues(ndev, h->q_num);
1378 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1383 ret = netif_set_real_num_rx_queues(ndev, h->q_num);
1386 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
1390 ret = hns_nic_net_up(ndev);
1393 "hns net up fail, ret=%d!\n", ret);
1400 static int hns_nic_net_stop(struct net_device *ndev)
1402 hns_nic_net_down(ndev);
1407 static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
1408 #define HNS_TX_TIMEO_LIMIT (40 * HZ)
1409 static void hns_nic_net_timeout(struct net_device *ndev)
1411 struct hns_nic_priv *priv = netdev_priv(ndev);
1413 if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
1414 ndev->watchdog_timeo *= 2;
1415 netdev_info(ndev, "watchdog_timo changed to %d.\n",
1416 ndev->watchdog_timeo);
1418 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
1419 hns_tx_timeout_reset(priv);
1423 static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
1426 struct phy_device *phy_dev = netdev->phydev;
1428 if (!netif_running(netdev))
1434 return phy_mii_ioctl(phy_dev, ifr, cmd);
1437 /* use only for netconsole to poll with the device without interrupt */
1438 #ifdef CONFIG_NET_POLL_CONTROLLER
1439 void hns_nic_poll_controller(struct net_device *ndev)
1441 struct hns_nic_priv *priv = netdev_priv(ndev);
1442 unsigned long flags;
1445 local_irq_save(flags);
1446 for (i = 0; i < priv->ae_handle->q_num * 2; i++)
1447 napi_schedule(&priv->ring_data[i].napi);
1448 local_irq_restore(flags);
1452 static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1453 struct net_device *ndev)
1455 struct hns_nic_priv *priv = netdev_priv(ndev);
1457 assert(skb->queue_mapping < ndev->ae_handle->q_num);
1459 return hns_nic_net_xmit_hw(ndev, skb,
1460 &tx_ring_data(priv, skb->queue_mapping));
1463 static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
1465 struct hns_nic_priv *priv = netdev_priv(ndev);
1466 struct hnae_handle *h = priv->ae_handle;
1469 /* MTU < 68 is an error and causes problems on some kernels */
1473 if (!h->dev->ops->set_mtu)
1476 if (netif_running(ndev)) {
1477 (void)hns_nic_net_stop(ndev);
1480 ret = h->dev->ops->set_mtu(h, new_mtu);
1482 netdev_err(ndev, "set mtu fail, return value %d\n",
1485 if (hns_nic_net_open(ndev))
1486 netdev_err(ndev, "hns net open fail\n");
1488 ret = h->dev->ops->set_mtu(h, new_mtu);
1492 ndev->mtu = new_mtu;
1497 static int hns_nic_set_features(struct net_device *netdev,
1498 netdev_features_t features)
1500 struct hns_nic_priv *priv = netdev_priv(netdev);
1502 switch (priv->enet_ver) {
1504 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1505 netdev_info(netdev, "enet v1 do not support tso!\n");
1508 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1509 priv->ops.fill_desc = fill_tso_desc;
1510 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1511 /* The chip only support 7*4096 */
1512 netif_set_gso_max_size(netdev, 7 * 4096);
1514 priv->ops.fill_desc = fill_v2_desc;
1515 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1519 netdev->features = features;
1523 static netdev_features_t hns_nic_fix_features(
1524 struct net_device *netdev, netdev_features_t features)
1526 struct hns_nic_priv *priv = netdev_priv(netdev);
1528 switch (priv->enet_ver) {
1530 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
1531 NETIF_F_HW_VLAN_CTAG_FILTER);
1540 * nic_set_multicast_list - set mutl mac address
1541 * @netdev: net device
1546 void hns_set_multicast_list(struct net_device *ndev)
1548 struct hns_nic_priv *priv = netdev_priv(ndev);
1549 struct hnae_handle *h = priv->ae_handle;
1550 struct netdev_hw_addr *ha = NULL;
1553 netdev_err(ndev, "hnae handle is null\n");
1557 if (h->dev->ops->set_mc_addr) {
1558 netdev_for_each_mc_addr(ha, ndev)
1559 if (h->dev->ops->set_mc_addr(h, ha->addr))
1560 netdev_err(ndev, "set multicast fail\n");
1564 void hns_nic_set_rx_mode(struct net_device *ndev)
1566 struct hns_nic_priv *priv = netdev_priv(ndev);
1567 struct hnae_handle *h = priv->ae_handle;
1569 if (h->dev->ops->set_promisc_mode) {
1570 if (ndev->flags & IFF_PROMISC)
1571 h->dev->ops->set_promisc_mode(h, 1);
1573 h->dev->ops->set_promisc_mode(h, 0);
1576 hns_set_multicast_list(ndev);
1579 struct rtnl_link_stats64 *hns_nic_get_stats64(struct net_device *ndev,
1580 struct rtnl_link_stats64 *stats)
1587 struct hns_nic_priv *priv = netdev_priv(ndev);
1588 struct hnae_handle *h = priv->ae_handle;
1590 for (idx = 0; idx < h->q_num; idx++) {
1591 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
1592 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
1593 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
1594 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
1597 stats->tx_bytes = tx_bytes;
1598 stats->tx_packets = tx_pkts;
1599 stats->rx_bytes = rx_bytes;
1600 stats->rx_packets = rx_pkts;
1602 stats->rx_errors = ndev->stats.rx_errors;
1603 stats->multicast = ndev->stats.multicast;
1604 stats->rx_length_errors = ndev->stats.rx_length_errors;
1605 stats->rx_crc_errors = ndev->stats.rx_crc_errors;
1606 stats->rx_missed_errors = ndev->stats.rx_missed_errors;
1608 stats->tx_errors = ndev->stats.tx_errors;
1609 stats->rx_dropped = ndev->stats.rx_dropped;
1610 stats->tx_dropped = ndev->stats.tx_dropped;
1611 stats->collisions = ndev->stats.collisions;
1612 stats->rx_over_errors = ndev->stats.rx_over_errors;
1613 stats->rx_frame_errors = ndev->stats.rx_frame_errors;
1614 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
1615 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
1616 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
1617 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
1618 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
1619 stats->tx_window_errors = ndev->stats.tx_window_errors;
1620 stats->rx_compressed = ndev->stats.rx_compressed;
1621 stats->tx_compressed = ndev->stats.tx_compressed;
1627 hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
1628 void *accel_priv, select_queue_fallback_t fallback)
1630 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
1631 struct hns_nic_priv *priv = netdev_priv(ndev);
1633 /* fix hardware broadcast/multicast packets queue loopback */
1634 if (!AE_IS_VER1(priv->enet_ver) &&
1635 is_multicast_ether_addr(eth_hdr->h_dest))
1638 return fallback(ndev, skb);
1641 static const struct net_device_ops hns_nic_netdev_ops = {
1642 .ndo_open = hns_nic_net_open,
1643 .ndo_stop = hns_nic_net_stop,
1644 .ndo_start_xmit = hns_nic_net_xmit,
1645 .ndo_tx_timeout = hns_nic_net_timeout,
1646 .ndo_set_mac_address = hns_nic_net_set_mac_address,
1647 .ndo_change_mtu = hns_nic_change_mtu,
1648 .ndo_do_ioctl = hns_nic_do_ioctl,
1649 .ndo_set_features = hns_nic_set_features,
1650 .ndo_fix_features = hns_nic_fix_features,
1651 .ndo_get_stats64 = hns_nic_get_stats64,
1652 #ifdef CONFIG_NET_POLL_CONTROLLER
1653 .ndo_poll_controller = hns_nic_poll_controller,
1655 .ndo_set_rx_mode = hns_nic_set_rx_mode,
1656 .ndo_select_queue = hns_nic_select_queue,
1659 static void hns_nic_update_link_status(struct net_device *netdev)
1661 struct hns_nic_priv *priv = netdev_priv(netdev);
1663 struct hnae_handle *h = priv->ae_handle;
1666 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1669 (void)genphy_read_status(h->phy_dev);
1671 hns_nic_adjust_link(netdev);
1674 /* for dumping key regs*/
1675 static void hns_nic_dump(struct hns_nic_priv *priv)
1677 struct hnae_handle *h = priv->ae_handle;
1678 struct hnae_ae_ops *ops = h->dev->ops;
1679 u32 *data, reg_num, i;
1681 if (ops->get_regs_len && ops->get_regs) {
1682 reg_num = ops->get_regs_len(priv->ae_handle);
1683 reg_num = (reg_num + 3ul) & ~3ul;
1684 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
1686 ops->get_regs(priv->ae_handle, data);
1687 for (i = 0; i < reg_num; i += 4)
1688 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1689 i, data[i], data[i + 1],
1690 data[i + 2], data[i + 3]);
1695 for (i = 0; i < h->q_num; i++) {
1696 pr_info("tx_queue%d_next_to_clean:%d\n",
1697 i, h->qs[i]->tx_ring.next_to_clean);
1698 pr_info("tx_queue%d_next_to_use:%d\n",
1699 i, h->qs[i]->tx_ring.next_to_use);
1700 pr_info("rx_queue%d_next_to_clean:%d\n",
1701 i, h->qs[i]->rx_ring.next_to_clean);
1702 pr_info("rx_queue%d_next_to_use:%d\n",
1703 i, h->qs[i]->rx_ring.next_to_use);
1707 /* for resetting subtask */
1708 static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
1710 enum hnae_port_type type = priv->ae_handle->port_type;
1712 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
1714 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
1716 /* If we're already down, removing or resetting, just bail */
1717 if (test_bit(NIC_STATE_DOWN, &priv->state) ||
1718 test_bit(NIC_STATE_REMOVING, &priv->state) ||
1719 test_bit(NIC_STATE_RESETTING, &priv->state))
1723 netdev_info(priv->netdev, "try to reset %s port!\n",
1724 (type == HNAE_PORT_DEBUG ? "debug" : "service"));
1727 /* put off any impending NetWatchDogTimeout */
1728 netif_trans_update(priv->netdev);
1730 if (type == HNAE_PORT_DEBUG) {
1731 hns_nic_net_reinit(priv->netdev);
1733 netif_carrier_off(priv->netdev);
1734 netif_tx_disable(priv->netdev);
1739 /* for doing service complete*/
1740 static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
1742 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
1743 /* make sure to commit the things */
1744 smp_mb__before_atomic();
1745 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
1748 static void hns_nic_service_task(struct work_struct *work)
1750 struct hns_nic_priv *priv
1751 = container_of(work, struct hns_nic_priv, service_task);
1752 struct hnae_handle *h = priv->ae_handle;
1754 hns_nic_reset_subtask(priv);
1755 hns_nic_update_link_status(priv->netdev);
1756 h->dev->ops->update_led_status(h);
1757 hns_nic_update_stats(priv->netdev);
1759 hns_nic_service_event_complete(priv);
1762 static void hns_nic_task_schedule(struct hns_nic_priv *priv)
1764 if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
1765 !test_bit(NIC_STATE_REMOVING, &priv->state) &&
1766 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
1767 (void)schedule_work(&priv->service_task);
1770 static void hns_nic_service_timer(unsigned long data)
1772 struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
1774 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1776 hns_nic_task_schedule(priv);
1780 * hns_tx_timeout_reset - initiate reset due to Tx timeout
1781 * @priv: driver private struct
1783 static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
1785 /* Do the reset outside of interrupt context */
1786 if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
1787 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
1788 netdev_warn(priv->netdev,
1789 "initiating reset due to tx timeout(%llu,0x%lx)\n",
1790 priv->tx_timeout_count, priv->state);
1791 priv->tx_timeout_count++;
1792 hns_nic_task_schedule(priv);
1796 static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
1798 struct hnae_handle *h = priv->ae_handle;
1799 struct hns_nic_ring_data *rd;
1800 bool is_ver1 = AE_IS_VER1(priv->enet_ver);
1803 if (h->q_num > NIC_MAX_Q_PER_VF) {
1804 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
1808 priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
1810 if (!priv->ring_data)
1813 for (i = 0; i < h->q_num; i++) {
1814 rd = &priv->ring_data[i];
1815 rd->queue_index = i;
1816 rd->ring = &h->qs[i]->tx_ring;
1817 rd->poll_one = hns_nic_tx_poll_one;
1818 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
1819 hns_nic_tx_fini_pro_v2;
1821 netif_napi_add(priv->netdev, &rd->napi,
1822 hns_nic_common_poll, NAPI_POLL_WEIGHT);
1823 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1825 for (i = h->q_num; i < h->q_num * 2; i++) {
1826 rd = &priv->ring_data[i];
1827 rd->queue_index = i - h->q_num;
1828 rd->ring = &h->qs[i - h->q_num]->rx_ring;
1829 rd->poll_one = hns_nic_rx_poll_one;
1830 rd->ex_process = hns_nic_rx_up_pro;
1831 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
1832 hns_nic_rx_fini_pro_v2;
1834 netif_napi_add(priv->netdev, &rd->napi,
1835 hns_nic_common_poll, NAPI_POLL_WEIGHT);
1836 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1842 static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
1844 struct hnae_handle *h = priv->ae_handle;
1847 for (i = 0; i < h->q_num * 2; i++) {
1848 netif_napi_del(&priv->ring_data[i].napi);
1849 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
1850 (void)irq_set_affinity_hint(
1851 priv->ring_data[i].ring->irq,
1853 free_irq(priv->ring_data[i].ring->irq,
1854 &priv->ring_data[i]);
1857 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
1859 kfree(priv->ring_data);
1862 static void hns_nic_set_priv_ops(struct net_device *netdev)
1864 struct hns_nic_priv *priv = netdev_priv(netdev);
1865 struct hnae_handle *h = priv->ae_handle;
1867 if (AE_IS_VER1(priv->enet_ver)) {
1868 priv->ops.fill_desc = fill_desc;
1869 priv->ops.get_rxd_bnum = get_rx_desc_bnum;
1870 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1872 priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
1873 if ((netdev->features & NETIF_F_TSO) ||
1874 (netdev->features & NETIF_F_TSO6)) {
1875 priv->ops.fill_desc = fill_tso_desc;
1876 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1877 /* This chip only support 7*4096 */
1878 netif_set_gso_max_size(netdev, 7 * 4096);
1880 priv->ops.fill_desc = fill_v2_desc;
1881 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
1883 /* enable tso when init
1884 * control tso on/off through TSE bit in bd
1886 h->dev->ops->set_tso_stats(h, 1);
1890 static int hns_nic_try_get_ae(struct net_device *ndev)
1892 struct hns_nic_priv *priv = netdev_priv(ndev);
1893 struct hnae_handle *h;
1896 h = hnae_get_handle(&priv->netdev->dev,
1897 priv->fwnode, priv->port_id, NULL);
1898 if (IS_ERR_OR_NULL(h)) {
1900 dev_dbg(priv->dev, "has not handle, register notifier!\n");
1903 priv->ae_handle = h;
1905 ret = hns_nic_init_phy(ndev, h);
1907 dev_err(priv->dev, "probe phy device fail!\n");
1911 ret = hns_nic_init_ring_data(priv);
1914 goto out_init_ring_data;
1917 hns_nic_set_priv_ops(ndev);
1919 ret = register_netdev(ndev);
1921 dev_err(priv->dev, "probe register netdev fail!\n");
1922 goto out_reg_ndev_fail;
1927 hns_nic_uninit_ring_data(priv);
1928 priv->ring_data = NULL;
1931 hnae_put_handle(priv->ae_handle);
1932 priv->ae_handle = NULL;
1937 static int hns_nic_notifier_action(struct notifier_block *nb,
1938 unsigned long action, void *data)
1940 struct hns_nic_priv *priv =
1941 container_of(nb, struct hns_nic_priv, notifier_block);
1943 assert(action == HNAE_AE_REGISTER);
1945 if (!hns_nic_try_get_ae(priv->netdev)) {
1946 hnae_unregister_notifier(&priv->notifier_block);
1947 priv->notifier_block.notifier_call = NULL;
1952 static int hns_nic_dev_probe(struct platform_device *pdev)
1954 struct device *dev = &pdev->dev;
1955 struct net_device *ndev;
1956 struct hns_nic_priv *priv;
1960 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
1964 platform_set_drvdata(pdev, ndev);
1966 priv = netdev_priv(ndev);
1968 priv->netdev = ndev;
1970 if (dev_of_node(dev)) {
1971 struct device_node *ae_node;
1973 if (of_device_is_compatible(dev->of_node,
1974 "hisilicon,hns-nic-v1"))
1975 priv->enet_ver = AE_VERSION_1;
1977 priv->enet_ver = AE_VERSION_2;
1979 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
1980 if (IS_ERR_OR_NULL(ae_node)) {
1981 ret = PTR_ERR(ae_node);
1982 dev_err(dev, "not find ae-handle\n");
1983 goto out_read_prop_fail;
1985 priv->fwnode = &ae_node->fwnode;
1986 } else if (is_acpi_node(dev->fwnode)) {
1987 struct acpi_reference_args args;
1989 if (acpi_dev_found(hns_enet_acpi_match[0].id))
1990 priv->enet_ver = AE_VERSION_1;
1991 else if (acpi_dev_found(hns_enet_acpi_match[1].id))
1992 priv->enet_ver = AE_VERSION_2;
1995 goto out_read_prop_fail;
1998 /* try to find port-idx-in-ae first */
1999 ret = acpi_node_get_property_reference(dev->fwnode,
2000 "ae-handle", 0, &args);
2002 dev_err(dev, "not find ae-handle\n");
2003 goto out_read_prop_fail;
2005 priv->fwnode = acpi_fwnode_handle(args.adev);
2007 dev_err(dev, "cannot read cfg data from OF or acpi\n");
2009 goto out_read_prop_fail;
2012 ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
2014 /* only for old code compatible */
2015 ret = device_property_read_u32(dev, "port-id", &port_id);
2017 goto out_read_prop_fail;
2018 /* for old dts, we need to caculate the port offset */
2019 port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
2020 : port_id - HNS_SRV_OFFSET;
2022 priv->port_id = port_id;
2024 hns_init_mac_addr(ndev);
2026 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
2027 ndev->priv_flags |= IFF_UNICAST_FLT;
2028 ndev->netdev_ops = &hns_nic_netdev_ops;
2029 hns_ethtool_set_ops(ndev);
2031 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2032 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2034 ndev->vlan_features |=
2035 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2036 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
2038 switch (priv->enet_ver) {
2040 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
2041 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2042 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2043 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
2049 SET_NETDEV_DEV(ndev, dev);
2051 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
2052 dev_dbg(dev, "set mask to 64bit\n");
2054 dev_err(dev, "set mask to 64bit fail!\n");
2056 /* carrier off reporting is important to ethtool even BEFORE open */
2057 netif_carrier_off(ndev);
2059 setup_timer(&priv->service_timer, hns_nic_service_timer,
2060 (unsigned long)priv);
2061 INIT_WORK(&priv->service_task, hns_nic_service_task);
2063 set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
2064 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2065 set_bit(NIC_STATE_DOWN, &priv->state);
2067 if (hns_nic_try_get_ae(priv->netdev)) {
2068 priv->notifier_block.notifier_call = hns_nic_notifier_action;
2069 ret = hnae_register_notifier(&priv->notifier_block);
2071 dev_err(dev, "register notifier fail!\n");
2072 goto out_notify_fail;
2074 dev_dbg(dev, "has not handle, register notifier!\n");
2080 (void)cancel_work_sync(&priv->service_task);
2082 /* safe for ACPI FW */
2083 of_node_put(to_of_node(priv->fwnode));
2088 static int hns_nic_dev_remove(struct platform_device *pdev)
2090 struct net_device *ndev = platform_get_drvdata(pdev);
2091 struct hns_nic_priv *priv = netdev_priv(ndev);
2093 if (ndev->reg_state != NETREG_UNINITIALIZED)
2094 unregister_netdev(ndev);
2096 if (priv->ring_data)
2097 hns_nic_uninit_ring_data(priv);
2098 priv->ring_data = NULL;
2101 phy_disconnect(ndev->phydev);
2103 if (!IS_ERR_OR_NULL(priv->ae_handle))
2104 hnae_put_handle(priv->ae_handle);
2105 priv->ae_handle = NULL;
2106 if (priv->notifier_block.notifier_call)
2107 hnae_unregister_notifier(&priv->notifier_block);
2108 priv->notifier_block.notifier_call = NULL;
2110 set_bit(NIC_STATE_REMOVING, &priv->state);
2111 (void)cancel_work_sync(&priv->service_task);
2113 /* safe for ACPI FW */
2114 of_node_put(to_of_node(priv->fwnode));
2120 static const struct of_device_id hns_enet_of_match[] = {
2121 {.compatible = "hisilicon,hns-nic-v1",},
2122 {.compatible = "hisilicon,hns-nic-v2",},
2126 MODULE_DEVICE_TABLE(of, hns_enet_of_match);
2128 static struct platform_driver hns_nic_dev_driver = {
2131 .of_match_table = hns_enet_of_match,
2132 .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
2134 .probe = hns_nic_dev_probe,
2135 .remove = hns_nic_dev_remove,
2138 module_platform_driver(hns_nic_dev_driver);
2140 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2141 MODULE_AUTHOR("Hisilicon, Inc.");
2142 MODULE_LICENSE("GPL");
2143 MODULE_ALIAS("platform:hns-nic");