2 * Huawei HiNIC PCI Express Linux driver
3 * Copyright(c) 2017 Huawei Technologies Co., Ltd
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/device.h>
21 #include <linux/netdevice.h>
22 #include <linux/etherdevice.h>
23 #include <linux/u64_stats_sync.h>
24 #include <linux/slab.h>
25 #include <linux/interrupt.h>
26 #include <linux/skbuff.h>
27 #include <linux/dma-mapping.h>
28 #include <linux/prefetch.h>
29 #include <asm/barrier.h>
31 #include "hinic_common.h"
32 #include "hinic_hw_if.h"
33 #include "hinic_hw_wqe.h"
34 #include "hinic_hw_wq.h"
35 #include "hinic_hw_qp.h"
36 #include "hinic_hw_dev.h"
38 #include "hinic_dev.h"
40 #define RX_IRQ_NO_PENDING 0
41 #define RX_IRQ_NO_COALESC 0
42 #define RX_IRQ_NO_LLI_TIMER 0
43 #define RX_IRQ_NO_CREDIT 0
44 #define RX_IRQ_NO_RESEND_TIMER 0
47 * hinic_rxq_clean_stats - Clean the statistics of specific queue
48 * @rxq: Logical Rx Queue
50 void hinic_rxq_clean_stats(struct hinic_rxq *rxq)
52 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
54 u64_stats_update_begin(&rxq_stats->syncp);
57 u64_stats_update_end(&rxq_stats->syncp);
61 * hinic_rxq_get_stats - get statistics of Rx Queue
62 * @rxq: Logical Rx Queue
63 * @stats: return updated stats here
65 void hinic_rxq_get_stats(struct hinic_rxq *rxq, struct hinic_rxq_stats *stats)
67 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
70 u64_stats_update_begin(&stats->syncp);
72 start = u64_stats_fetch_begin(&rxq_stats->syncp);
73 stats->pkts = rxq_stats->pkts;
74 stats->bytes = rxq_stats->bytes;
75 } while (u64_stats_fetch_retry(&rxq_stats->syncp, start));
76 u64_stats_update_end(&stats->syncp);
80 * rxq_stats_init - Initialize the statistics of specific queue
81 * @rxq: Logical Rx Queue
83 static void rxq_stats_init(struct hinic_rxq *rxq)
85 struct hinic_rxq_stats *rxq_stats = &rxq->rxq_stats;
87 u64_stats_init(&rxq_stats->syncp);
88 hinic_rxq_clean_stats(rxq);
92 * rx_alloc_skb - allocate skb and map it to dma address
94 * @dma_addr: returned dma address for the skb
98 static struct sk_buff *rx_alloc_skb(struct hinic_rxq *rxq,
101 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
102 struct hinic_hwdev *hwdev = nic_dev->hwdev;
103 struct hinic_hwif *hwif = hwdev->hwif;
104 struct pci_dev *pdev = hwif->pdev;
109 skb = netdev_alloc_skb_ip_align(rxq->netdev, rxq->rq->buf_sz);
111 netdev_err(rxq->netdev, "Failed to allocate Rx SKB\n");
115 addr = dma_map_single(&pdev->dev, skb->data, rxq->rq->buf_sz,
117 err = dma_mapping_error(&pdev->dev, addr);
119 dev_err(&pdev->dev, "Failed to map Rx DMA, err = %d\n", err);
127 dev_kfree_skb_any(skb);
132 * rx_unmap_skb - unmap the dma address of the skb
134 * @dma_addr: dma address of the skb
136 static void rx_unmap_skb(struct hinic_rxq *rxq, dma_addr_t dma_addr)
138 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
139 struct hinic_hwdev *hwdev = nic_dev->hwdev;
140 struct hinic_hwif *hwif = hwdev->hwif;
141 struct pci_dev *pdev = hwif->pdev;
143 dma_unmap_single(&pdev->dev, dma_addr, rxq->rq->buf_sz,
148 * rx_free_skb - unmap and free skb
151 * @dma_addr: dma address of the skb
153 static void rx_free_skb(struct hinic_rxq *rxq, struct sk_buff *skb,
156 rx_unmap_skb(rxq, dma_addr);
157 dev_kfree_skb_any(skb);
161 * rx_alloc_pkts - allocate pkts in rx queue
164 * Return number of skbs allocated
166 static int rx_alloc_pkts(struct hinic_rxq *rxq)
168 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
169 struct hinic_rq_wqe *rq_wqe;
170 unsigned int free_wqebbs;
171 struct hinic_sge sge;
177 free_wqebbs = hinic_get_rq_free_wqebbs(rxq->rq);
180 /* Limit the allocation chunks */
181 if (free_wqebbs > nic_dev->rx_weight)
182 free_wqebbs = nic_dev->rx_weight;
184 for (i = 0; i < free_wqebbs; i++) {
185 skb = rx_alloc_skb(rxq, &dma_addr);
187 netdev_err(rxq->netdev, "Failed to alloc Rx skb\n");
192 hinic_set_sge(&sge, dma_addr, skb->len);
194 rq_wqe = hinic_rq_get_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
197 rx_free_skb(rxq, skb, dma_addr);
202 hinic_rq_prepare_wqe(rxq->rq, prod_idx, rq_wqe, &sge);
204 hinic_rq_write_wqe(rxq->rq, prod_idx, rq_wqe, skb);
209 wmb(); /* write all the wqes before update PI */
211 hinic_rq_update(rxq->rq, prod_idx);
215 tasklet_schedule(&rxq->rx_task);
221 * free_all_rx_skbs - free all skbs in rx queue
224 static void free_all_rx_skbs(struct hinic_rxq *rxq)
226 struct hinic_rq *rq = rxq->rq;
227 struct hinic_hw_wqe *hw_wqe;
228 struct hinic_sge sge;
231 while ((hw_wqe = hinic_read_wqe(rq->wq, HINIC_RQ_WQE_SIZE, &ci))) {
235 hinic_rq_get_sge(rq, &hw_wqe->rq_wqe, ci, &sge);
237 hinic_put_wqe(rq->wq, HINIC_RQ_WQE_SIZE);
239 rx_free_skb(rxq, rq->saved_skb[ci], hinic_sge_to_dma(&sge));
244 * rx_alloc_task - tasklet for queue allocation
247 static void rx_alloc_task(unsigned long data)
249 struct hinic_rxq *rxq = (struct hinic_rxq *)data;
251 (void)rx_alloc_pkts(rxq);
255 * rx_recv_jumbo_pkt - Rx handler for jumbo pkt
257 * @head_skb: the first skb in the list
258 * @left_pkt_len: left size of the pkt exclude head skb
259 * @ci: consumer index
261 * Return number of wqes that used for the left of the pkt
263 static int rx_recv_jumbo_pkt(struct hinic_rxq *rxq, struct sk_buff *head_skb,
264 unsigned int left_pkt_len, u16 ci)
266 struct sk_buff *skb, *curr_skb = head_skb;
267 struct hinic_rq_wqe *rq_wqe;
268 unsigned int curr_len;
269 struct hinic_sge sge;
272 while (left_pkt_len > 0) {
273 rq_wqe = hinic_rq_read_next_wqe(rxq->rq, HINIC_RQ_WQE_SIZE,
278 hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
280 rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
284 curr_len = (left_pkt_len > HINIC_RX_BUF_SZ) ? HINIC_RX_BUF_SZ :
287 left_pkt_len -= curr_len;
289 __skb_put(skb, curr_len);
291 if (curr_skb == head_skb)
292 skb_shinfo(head_skb)->frag_list = skb;
294 curr_skb->next = skb;
296 head_skb->len += skb->len;
297 head_skb->data_len += skb->len;
298 head_skb->truesize += skb->truesize;
307 * rxq_recv - Rx handler
309 * @budget: maximum pkts to process
311 * Return number of pkts received
313 static int rxq_recv(struct hinic_rxq *rxq, int budget)
315 struct hinic_qp *qp = container_of(rxq->rq, struct hinic_qp, rq);
316 u64 pkt_len = 0, rx_bytes = 0;
317 struct hinic_rq_wqe *rq_wqe;
318 int num_wqes, pkts = 0;
319 struct hinic_sge sge;
323 while (pkts < budget) {
326 rq_wqe = hinic_rq_read_wqe(rxq->rq, HINIC_RQ_WQE_SIZE, &skb,
331 hinic_rq_get_sge(rxq->rq, rq_wqe, ci, &sge);
333 rx_unmap_skb(rxq, hinic_sge_to_dma(&sge));
339 if (pkt_len <= HINIC_RX_BUF_SZ) {
340 __skb_put(skb, pkt_len);
342 __skb_put(skb, HINIC_RX_BUF_SZ);
343 num_wqes = rx_recv_jumbo_pkt(rxq, skb, pkt_len -
344 HINIC_RX_BUF_SZ, ci);
347 hinic_rq_put_wqe(rxq->rq, ci,
348 (num_wqes + 1) * HINIC_RQ_WQE_SIZE);
350 skb_record_rx_queue(skb, qp->q_id);
351 skb->protocol = eth_type_trans(skb, rxq->netdev);
353 napi_gro_receive(&rxq->napi, skb);
360 tasklet_schedule(&rxq->rx_task); /* hinic_rx_alloc_pkts */
362 u64_stats_update_begin(&rxq->rxq_stats.syncp);
363 rxq->rxq_stats.pkts += pkts;
364 rxq->rxq_stats.bytes += rx_bytes;
365 u64_stats_update_end(&rxq->rxq_stats.syncp);
370 static int rx_poll(struct napi_struct *napi, int budget)
372 struct hinic_rxq *rxq = container_of(napi, struct hinic_rxq, napi);
373 struct hinic_rq *rq = rxq->rq;
376 pkts = rxq_recv(rxq, budget);
385 static void rx_add_napi(struct hinic_rxq *rxq)
387 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
389 netif_napi_add(rxq->netdev, &rxq->napi, rx_poll, nic_dev->rx_weight);
390 napi_enable(&rxq->napi);
393 static void rx_del_napi(struct hinic_rxq *rxq)
395 napi_disable(&rxq->napi);
396 netif_napi_del(&rxq->napi);
399 static irqreturn_t rx_irq(int irq, void *data)
401 struct hinic_rxq *rxq = (struct hinic_rxq *)data;
402 struct hinic_rq *rq = rxq->rq;
403 struct hinic_dev *nic_dev;
405 /* Disable the interrupt until napi will be completed */
406 disable_irq_nosync(rq->irq);
408 nic_dev = netdev_priv(rxq->netdev);
409 hinic_hwdev_msix_cnt_set(nic_dev->hwdev, rq->msix_entry);
411 napi_schedule(&rxq->napi);
415 static int rx_request_irq(struct hinic_rxq *rxq)
417 struct hinic_dev *nic_dev = netdev_priv(rxq->netdev);
418 struct hinic_hwdev *hwdev = nic_dev->hwdev;
419 struct hinic_rq *rq = rxq->rq;
424 hinic_hwdev_msix_set(hwdev, rq->msix_entry,
425 RX_IRQ_NO_PENDING, RX_IRQ_NO_COALESC,
426 RX_IRQ_NO_LLI_TIMER, RX_IRQ_NO_CREDIT,
427 RX_IRQ_NO_RESEND_TIMER);
429 err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq);
438 static void rx_free_irq(struct hinic_rxq *rxq)
440 struct hinic_rq *rq = rxq->rq;
442 free_irq(rq->irq, rxq);
447 * hinic_init_rxq - Initialize the Rx Queue
448 * @rxq: Logical Rx Queue
449 * @rq: Hardware Rx Queue to connect the Logical queue with
450 * @netdev: network device to connect the Logical queue with
452 * Return 0 - Success, negative - Failure
454 int hinic_init_rxq(struct hinic_rxq *rxq, struct hinic_rq *rq,
455 struct net_device *netdev)
457 struct hinic_qp *qp = container_of(rq, struct hinic_qp, rq);
458 int err, pkts, irqname_len;
460 rxq->netdev = netdev;
465 irqname_len = snprintf(NULL, 0, "hinic_rxq%d", qp->q_id) + 1;
466 rxq->irq_name = devm_kzalloc(&netdev->dev, irqname_len, GFP_KERNEL);
470 sprintf(rxq->irq_name, "hinic_rxq%d", qp->q_id);
472 tasklet_init(&rxq->rx_task, rx_alloc_task, (unsigned long)rxq);
474 pkts = rx_alloc_pkts(rxq);
480 err = rx_request_irq(rxq);
482 netdev_err(netdev, "Failed to request Rx irq\n");
490 tasklet_kill(&rxq->rx_task);
491 free_all_rx_skbs(rxq);
492 devm_kfree(&netdev->dev, rxq->irq_name);
497 * hinic_clean_rxq - Clean the Rx Queue
498 * @rxq: Logical Rx Queue
500 void hinic_clean_rxq(struct hinic_rxq *rxq)
502 struct net_device *netdev = rxq->netdev;
506 tasklet_kill(&rxq->rx_task);
507 free_all_rx_skbs(rxq);
508 devm_kfree(&netdev->dev, rxq->irq_name);