1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2019-2020 Marvell International Ltd. All rights reserved */
4 #include <linux/bitfield.h>
5 #include <linux/dmapool.h>
6 #include <linux/etherdevice.h>
7 #include <linux/if_vlan.h>
8 #include <linux/platform_device.h>
10 #include "prestera_dsa.h"
12 #include "prestera_hw.h"
13 #include "prestera_rxtx.h"
14 #include "prestera_devlink.h"
16 #define PRESTERA_SDMA_WAIT_MUL 10
18 struct prestera_sdma_desc {
23 } __packed __aligned(16);
25 #define PRESTERA_SDMA_BUFF_SIZE_MAX 1544
27 #define PRESTERA_SDMA_RX_DESC_PKT_LEN(desc) \
28 ((le32_to_cpu((desc)->word2) >> 16) & GENMASK(13, 0))
30 #define PRESTERA_SDMA_RX_DESC_OWNER(desc) \
31 ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
33 #define PRESTERA_SDMA_RX_DESC_IS_RCVD(desc) \
34 (PRESTERA_SDMA_RX_DESC_OWNER(desc) == PRESTERA_SDMA_RX_DESC_CPU_OWN)
36 #define PRESTERA_SDMA_RX_DESC_CPU_OWN 0
37 #define PRESTERA_SDMA_RX_DESC_DMA_OWN 1
39 #define PRESTERA_SDMA_RX_QUEUE_NUM 8
41 #define PRESTERA_SDMA_RX_DESC_PER_Q 1000
43 #define PRESTERA_SDMA_TX_DESC_PER_Q 1000
44 #define PRESTERA_SDMA_TX_MAX_BURST 64
46 #define PRESTERA_SDMA_TX_DESC_OWNER(desc) \
47 ((le32_to_cpu((desc)->word1) & BIT(31)) >> 31)
49 #define PRESTERA_SDMA_TX_DESC_CPU_OWN 0
50 #define PRESTERA_SDMA_TX_DESC_DMA_OWN 1U
52 #define PRESTERA_SDMA_TX_DESC_IS_SENT(desc) \
53 (PRESTERA_SDMA_TX_DESC_OWNER(desc) == PRESTERA_SDMA_TX_DESC_CPU_OWN)
55 #define PRESTERA_SDMA_TX_DESC_LAST BIT(20)
56 #define PRESTERA_SDMA_TX_DESC_FIRST BIT(21)
57 #define PRESTERA_SDMA_TX_DESC_CALC_CRC BIT(12)
59 #define PRESTERA_SDMA_TX_DESC_SINGLE \
60 (PRESTERA_SDMA_TX_DESC_FIRST | PRESTERA_SDMA_TX_DESC_LAST)
62 #define PRESTERA_SDMA_TX_DESC_INIT \
63 (PRESTERA_SDMA_TX_DESC_SINGLE | PRESTERA_SDMA_TX_DESC_CALC_CRC)
65 #define PRESTERA_SDMA_RX_INTR_MASK_REG 0x2814
66 #define PRESTERA_SDMA_RX_QUEUE_STATUS_REG 0x2680
67 #define PRESTERA_SDMA_RX_QUEUE_DESC_REG(n) (0x260C + (n) * 16)
69 #define PRESTERA_SDMA_TX_QUEUE_DESC_REG 0x26C0
70 #define PRESTERA_SDMA_TX_QUEUE_START_REG 0x2868
72 struct prestera_sdma_buf {
73 struct prestera_sdma_desc *desc;
80 struct prestera_rx_ring {
81 struct prestera_sdma_buf *bufs;
85 struct prestera_tx_ring {
86 struct prestera_sdma_buf *bufs;
92 struct prestera_sdma {
93 struct prestera_rx_ring rx_ring[PRESTERA_SDMA_RX_QUEUE_NUM];
94 struct prestera_tx_ring tx_ring;
95 struct prestera_switch *sw;
96 struct dma_pool *desc_pool;
97 struct work_struct tx_work;
98 struct napi_struct rx_napi;
99 struct net_device napi_dev;
102 /* protect SDMA with concurrent access from multiple CPUs */
106 struct prestera_rxtx {
107 struct prestera_sdma sdma;
110 static int prestera_sdma_buf_init(struct prestera_sdma *sdma,
111 struct prestera_sdma_buf *buf)
113 struct prestera_sdma_desc *desc;
116 desc = dma_pool_alloc(sdma->desc_pool, GFP_DMA | GFP_KERNEL, &dma);
120 buf->buf_dma = DMA_MAPPING_ERROR;
128 static u32 prestera_sdma_map(struct prestera_sdma *sdma, dma_addr_t pa)
130 return sdma->map_addr + pa;
133 static void prestera_sdma_rx_desc_init(struct prestera_sdma *sdma,
134 struct prestera_sdma_desc *desc,
137 u32 word = le32_to_cpu(desc->word2);
139 u32p_replace_bits(&word, PRESTERA_SDMA_BUFF_SIZE_MAX, GENMASK(15, 0));
140 desc->word2 = cpu_to_le32(word);
142 desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
144 /* make sure buffer is set before reset the descriptor */
147 desc->word1 = cpu_to_le32(0xA0000000);
150 static void prestera_sdma_rx_desc_set_next(struct prestera_sdma *sdma,
151 struct prestera_sdma_desc *desc,
154 desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
157 static int prestera_sdma_rx_skb_alloc(struct prestera_sdma *sdma,
158 struct prestera_sdma_buf *buf)
160 struct device *dev = sdma->sw->dev->dev;
164 skb = alloc_skb(PRESTERA_SDMA_BUFF_SIZE_MAX, GFP_DMA | GFP_ATOMIC);
168 dma = dma_map_single(dev, skb->data, skb->len, DMA_FROM_DEVICE);
169 if (dma_mapping_error(dev, dma))
173 dma_unmap_single(dev, buf->buf_dma, buf->skb->len,
187 static struct sk_buff *prestera_sdma_rx_skb_get(struct prestera_sdma *sdma,
188 struct prestera_sdma_buf *buf)
190 dma_addr_t buf_dma = buf->buf_dma;
191 struct sk_buff *skb = buf->skb;
195 err = prestera_sdma_rx_skb_alloc(sdma, buf);
197 buf->buf_dma = buf_dma;
200 skb = alloc_skb(skb->len, GFP_ATOMIC);
203 skb_copy_from_linear_data(buf->skb, skb->data, len);
207 prestera_sdma_rx_desc_init(sdma, buf->desc, buf->buf_dma);
212 static int prestera_rxtx_process_skb(struct prestera_sdma *sdma,
215 struct prestera_port *port;
216 struct prestera_dsa dsa;
221 skb_pull(skb, ETH_HLEN);
223 /* ethertype field is part of the dsa header */
224 err = prestera_dsa_parse(&dsa, skb->data - ETH_TLEN);
228 dev_id = dsa.hw_dev_num;
229 hw_port = dsa.port_num;
231 port = prestera_port_find_by_hwid(sdma->sw, dev_id, hw_port);
232 if (unlikely(!port)) {
233 dev_warn_ratelimited(prestera_dev(sdma->sw), "received pkt for non-existent port(%u, %u)\n",
238 if (unlikely(!pskb_may_pull(skb, PRESTERA_DSA_HLEN)))
241 /* remove DSA tag and update checksum */
242 skb_pull_rcsum(skb, PRESTERA_DSA_HLEN);
244 memmove(skb->data - ETH_HLEN, skb->data - ETH_HLEN - PRESTERA_DSA_HLEN,
247 skb_push(skb, ETH_HLEN);
249 skb->protocol = eth_type_trans(skb, port->dev);
251 if (dsa.vlan.is_tagged) {
252 u16 tci = dsa.vlan.vid & VLAN_VID_MASK;
254 tci |= dsa.vlan.vpt << VLAN_PRIO_SHIFT;
255 if (dsa.vlan.cfi_bit)
256 tci |= VLAN_CFI_MASK;
258 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), tci);
261 cpu_code = dsa.cpu_code;
262 prestera_devlink_trap_report(port, skb, cpu_code);
267 static int prestera_sdma_next_rx_buf_idx(int buf_idx)
269 return (buf_idx + 1) % PRESTERA_SDMA_RX_DESC_PER_Q;
272 static int prestera_sdma_rx_poll(struct napi_struct *napi, int budget)
274 int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
275 unsigned int rxq_done_map = 0;
276 struct prestera_sdma *sdma;
277 struct list_head rx_list;
282 qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
283 qmask = GENMASK(qnum - 1, 0);
285 INIT_LIST_HEAD(&rx_list);
287 sdma = container_of(napi, struct prestera_sdma, rx_napi);
289 while (pkts_done < budget && rxq_done_map != qmask) {
290 for (q = 0; q < qnum && pkts_done < budget; q++) {
291 struct prestera_rx_ring *ring = &sdma->rx_ring[q];
292 struct prestera_sdma_desc *desc;
293 struct prestera_sdma_buf *buf;
294 int buf_idx = ring->next_rx;
297 buf = &ring->bufs[buf_idx];
300 if (PRESTERA_SDMA_RX_DESC_IS_RCVD(desc)) {
301 rxq_done_map &= ~BIT(q);
303 rxq_done_map |= BIT(q);
309 __skb_trim(buf->skb, PRESTERA_SDMA_RX_DESC_PKT_LEN(desc));
311 skb = prestera_sdma_rx_skb_get(sdma, buf);
315 if (unlikely(prestera_rxtx_process_skb(sdma, skb)))
318 list_add_tail(&skb->list, &rx_list);
320 ring->next_rx = prestera_sdma_next_rx_buf_idx(buf_idx);
324 if (pkts_done < budget && napi_complete_done(napi, pkts_done))
325 prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG,
328 netif_receive_skb_list(&rx_list);
333 static void prestera_sdma_rx_fini(struct prestera_sdma *sdma)
335 int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
338 /* disable all rx queues */
339 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
342 for (q = 0; q < qnum; q++) {
343 struct prestera_rx_ring *ring = &sdma->rx_ring[q];
348 for (b = 0; b < PRESTERA_SDMA_RX_DESC_PER_Q; b++) {
349 struct prestera_sdma_buf *buf = &ring->bufs[b];
352 dma_pool_free(sdma->desc_pool, buf->desc,
358 if (buf->buf_dma != DMA_MAPPING_ERROR)
359 dma_unmap_single(sdma->sw->dev->dev,
360 buf->buf_dma, buf->skb->len,
367 static int prestera_sdma_rx_init(struct prestera_sdma *sdma)
369 int bnum = PRESTERA_SDMA_RX_DESC_PER_Q;
370 int qnum = PRESTERA_SDMA_RX_QUEUE_NUM;
374 /* disable all rx queues */
375 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
378 for (q = 0; q < qnum; q++) {
379 struct prestera_sdma_buf *head, *tail, *next, *prev;
380 struct prestera_rx_ring *ring = &sdma->rx_ring[q];
382 ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
388 tail = &ring->bufs[bnum - 1];
389 head = &ring->bufs[0];
394 err = prestera_sdma_buf_init(sdma, next);
398 err = prestera_sdma_rx_skb_alloc(sdma, next);
402 prestera_sdma_rx_desc_init(sdma, next->desc,
405 prestera_sdma_rx_desc_set_next(sdma, prev->desc,
410 } while (prev != tail);
412 /* join tail with head to make a circular list */
413 prestera_sdma_rx_desc_set_next(sdma, tail->desc, head->desc_dma);
415 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_DESC_REG(q),
416 prestera_sdma_map(sdma, head->desc_dma));
419 /* make sure all rx descs are filled before enabling all rx queues */
422 prestera_write(sdma->sw, PRESTERA_SDMA_RX_QUEUE_STATUS_REG,
428 static void prestera_sdma_tx_desc_init(struct prestera_sdma *sdma,
429 struct prestera_sdma_desc *desc)
431 desc->word1 = cpu_to_le32(PRESTERA_SDMA_TX_DESC_INIT);
435 static void prestera_sdma_tx_desc_set_next(struct prestera_sdma *sdma,
436 struct prestera_sdma_desc *desc,
439 desc->next = cpu_to_le32(prestera_sdma_map(sdma, next));
442 static void prestera_sdma_tx_desc_set_buf(struct prestera_sdma *sdma,
443 struct prestera_sdma_desc *desc,
444 dma_addr_t buf, size_t len)
446 u32 word = le32_to_cpu(desc->word2);
448 u32p_replace_bits(&word, len + ETH_FCS_LEN, GENMASK(30, 16));
450 desc->buff = cpu_to_le32(prestera_sdma_map(sdma, buf));
451 desc->word2 = cpu_to_le32(word);
454 static void prestera_sdma_tx_desc_xmit(struct prestera_sdma_desc *desc)
456 u32 word = le32_to_cpu(desc->word1);
458 word |= PRESTERA_SDMA_TX_DESC_DMA_OWN << 31;
460 /* make sure everything is written before enable xmit */
463 desc->word1 = cpu_to_le32(word);
466 static int prestera_sdma_tx_buf_map(struct prestera_sdma *sdma,
467 struct prestera_sdma_buf *buf,
470 struct device *dma_dev = sdma->sw->dev->dev;
473 dma = dma_map_single(dma_dev, skb->data, skb->len, DMA_TO_DEVICE);
474 if (dma_mapping_error(dma_dev, dma))
483 static void prestera_sdma_tx_buf_unmap(struct prestera_sdma *sdma,
484 struct prestera_sdma_buf *buf)
486 struct device *dma_dev = sdma->sw->dev->dev;
488 dma_unmap_single(dma_dev, buf->buf_dma, buf->skb->len, DMA_TO_DEVICE);
491 static void prestera_sdma_tx_recycle_work_fn(struct work_struct *work)
493 int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
494 struct prestera_tx_ring *tx_ring;
495 struct prestera_sdma *sdma;
498 sdma = container_of(work, struct prestera_sdma, tx_work);
500 tx_ring = &sdma->tx_ring;
502 for (b = 0; b < bnum; b++) {
503 struct prestera_sdma_buf *buf = &tx_ring->bufs[b];
508 if (!PRESTERA_SDMA_TX_DESC_IS_SENT(buf->desc))
511 prestera_sdma_tx_buf_unmap(sdma, buf);
512 dev_consume_skb_any(buf->skb);
515 /* make sure everything is cleaned up */
518 buf->is_used = false;
522 static int prestera_sdma_tx_init(struct prestera_sdma *sdma)
524 struct prestera_sdma_buf *head, *tail, *next, *prev;
525 struct prestera_tx_ring *tx_ring = &sdma->tx_ring;
526 int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
529 INIT_WORK(&sdma->tx_work, prestera_sdma_tx_recycle_work_fn);
530 spin_lock_init(&sdma->tx_lock);
532 tx_ring->bufs = kmalloc_array(bnum, sizeof(*head), GFP_KERNEL);
536 tail = &tx_ring->bufs[bnum - 1];
537 head = &tx_ring->bufs[0];
541 tx_ring->max_burst = PRESTERA_SDMA_TX_MAX_BURST;
542 tx_ring->burst = tx_ring->max_burst;
543 tx_ring->next_tx = 0;
546 err = prestera_sdma_buf_init(sdma, next);
550 next->is_used = false;
552 prestera_sdma_tx_desc_init(sdma, next->desc);
554 prestera_sdma_tx_desc_set_next(sdma, prev->desc,
559 } while (prev != tail);
561 /* join tail with head to make a circular list */
562 prestera_sdma_tx_desc_set_next(sdma, tail->desc, head->desc_dma);
564 /* make sure descriptors are written */
567 prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_DESC_REG,
568 prestera_sdma_map(sdma, head->desc_dma));
573 static void prestera_sdma_tx_fini(struct prestera_sdma *sdma)
575 struct prestera_tx_ring *ring = &sdma->tx_ring;
576 int bnum = PRESTERA_SDMA_TX_DESC_PER_Q;
579 cancel_work_sync(&sdma->tx_work);
584 for (b = 0; b < bnum; b++) {
585 struct prestera_sdma_buf *buf = &ring->bufs[b];
588 dma_pool_free(sdma->desc_pool, buf->desc,
594 dma_unmap_single(sdma->sw->dev->dev, buf->buf_dma,
595 buf->skb->len, DMA_TO_DEVICE);
597 dev_consume_skb_any(buf->skb);
601 static void prestera_rxtx_handle_event(struct prestera_switch *sw,
602 struct prestera_event *evt,
605 struct prestera_sdma *sdma = arg;
607 if (evt->id != PRESTERA_RXTX_EVENT_RCV_PKT)
610 prestera_write(sdma->sw, PRESTERA_SDMA_RX_INTR_MASK_REG, 0);
611 napi_schedule(&sdma->rx_napi);
614 static int prestera_sdma_switch_init(struct prestera_switch *sw)
616 struct prestera_sdma *sdma = &sw->rxtx->sdma;
617 struct device *dev = sw->dev->dev;
618 struct prestera_rxtx_params p;
623 err = prestera_hw_rxtx_init(sw, &p);
625 dev_err(dev, "failed to init rxtx by hw\n");
629 sdma->dma_mask = dma_get_mask(dev);
630 sdma->map_addr = p.map_addr;
633 sdma->desc_pool = dma_pool_create("desc_pool", dev,
634 sizeof(struct prestera_sdma_desc),
636 if (!sdma->desc_pool)
639 err = prestera_sdma_rx_init(sdma);
641 dev_err(dev, "failed to init rx ring\n");
645 err = prestera_sdma_tx_init(sdma);
647 dev_err(dev, "failed to init tx ring\n");
651 err = prestera_hw_event_handler_register(sw, PRESTERA_EVENT_TYPE_RXTX,
652 prestera_rxtx_handle_event,
655 goto err_evt_register;
657 init_dummy_netdev(&sdma->napi_dev);
659 netif_napi_add(&sdma->napi_dev, &sdma->rx_napi, prestera_sdma_rx_poll);
660 napi_enable(&sdma->rx_napi);
666 prestera_sdma_tx_fini(sdma);
668 prestera_sdma_rx_fini(sdma);
670 dma_pool_destroy(sdma->desc_pool);
674 static void prestera_sdma_switch_fini(struct prestera_switch *sw)
676 struct prestera_sdma *sdma = &sw->rxtx->sdma;
678 napi_disable(&sdma->rx_napi);
679 netif_napi_del(&sdma->rx_napi);
680 prestera_hw_event_handler_unregister(sw, PRESTERA_EVENT_TYPE_RXTX,
681 prestera_rxtx_handle_event);
682 prestera_sdma_tx_fini(sdma);
683 prestera_sdma_rx_fini(sdma);
684 dma_pool_destroy(sdma->desc_pool);
687 static bool prestera_sdma_is_ready(struct prestera_sdma *sdma)
689 return !(prestera_read(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG) & 1);
692 static int prestera_sdma_tx_wait(struct prestera_sdma *sdma,
693 struct prestera_tx_ring *tx_ring)
695 int tx_wait_num = PRESTERA_SDMA_WAIT_MUL * tx_ring->max_burst;
698 if (prestera_sdma_is_ready(sdma))
702 } while (--tx_wait_num);
707 static void prestera_sdma_tx_start(struct prestera_sdma *sdma)
709 prestera_write(sdma->sw, PRESTERA_SDMA_TX_QUEUE_START_REG, 1);
710 schedule_work(&sdma->tx_work);
713 static netdev_tx_t prestera_sdma_xmit(struct prestera_sdma *sdma,
716 struct device *dma_dev = sdma->sw->dev->dev;
717 struct net_device *dev = skb->dev;
718 struct prestera_tx_ring *tx_ring;
719 struct prestera_sdma_buf *buf;
722 spin_lock(&sdma->tx_lock);
724 tx_ring = &sdma->tx_ring;
726 buf = &tx_ring->bufs[tx_ring->next_tx];
728 schedule_work(&sdma->tx_work);
732 if (unlikely(eth_skb_pad(skb)))
733 goto drop_skb_nofree;
735 err = prestera_sdma_tx_buf_map(sdma, buf, skb);
739 prestera_sdma_tx_desc_set_buf(sdma, buf->desc, buf->buf_dma, skb->len);
741 dma_sync_single_for_device(dma_dev, buf->buf_dma, skb->len,
744 if (tx_ring->burst) {
747 tx_ring->burst = tx_ring->max_burst;
749 err = prestera_sdma_tx_wait(sdma, tx_ring);
754 tx_ring->next_tx = (tx_ring->next_tx + 1) % PRESTERA_SDMA_TX_DESC_PER_Q;
755 prestera_sdma_tx_desc_xmit(buf->desc);
758 prestera_sdma_tx_start(sdma);
763 prestera_sdma_tx_buf_unmap(sdma, buf);
765 dev_consume_skb_any(skb);
767 dev->stats.tx_dropped++;
769 spin_unlock(&sdma->tx_lock);
773 int prestera_rxtx_switch_init(struct prestera_switch *sw)
775 struct prestera_rxtx *rxtx;
778 rxtx = kzalloc(sizeof(*rxtx), GFP_KERNEL);
784 err = prestera_sdma_switch_init(sw);
791 void prestera_rxtx_switch_fini(struct prestera_switch *sw)
793 prestera_sdma_switch_fini(sw);
797 int prestera_rxtx_port_init(struct prestera_port *port)
799 port->dev->needed_headroom = PRESTERA_DSA_HLEN;
803 netdev_tx_t prestera_rxtx_xmit(struct prestera_port *port, struct sk_buff *skb)
805 struct prestera_dsa dsa;
807 dsa.hw_dev_num = port->dev_id;
808 dsa.port_num = port->hw_id;
810 if (skb_cow_head(skb, PRESTERA_DSA_HLEN) < 0)
811 return NET_XMIT_DROP;
813 skb_push(skb, PRESTERA_DSA_HLEN);
814 memmove(skb->data, skb->data + PRESTERA_DSA_HLEN, 2 * ETH_ALEN);
816 if (prestera_dsa_build(&dsa, skb->data + 2 * ETH_ALEN) != 0)
817 return NET_XMIT_DROP;
819 return prestera_sdma_xmit(&port->sw->rxtx->sdma, skb);