1 // SPDX-License-Identifier: GPL-2.0
3 * Texas Instruments Ethernet Switch Driver
5 * Copyright (C) 2019 Texas Instruments
9 #include <linux/bpf_trace.h>
10 #include <linux/if_ether.h>
11 #include <linux/if_vlan.h>
12 #include <linux/kmemleak.h>
13 #include <linux/module.h>
14 #include <linux/netdevice.h>
15 #include <linux/net_tstamp.h>
17 #include <linux/phy.h>
18 #include <linux/platform_device.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/skbuff.h>
21 #include <net/page_pool/helpers.h>
22 #include <net/pkt_cls.h>
23 #include <net/pkt_sched.h>
28 #include "cpsw_priv.h"
30 #include "davinci_cpdma.h"
32 #define CPTS_N_ETX_TS 4
34 int (*cpsw_slave_index)(struct cpsw_common *cpsw, struct cpsw_priv *priv);
36 void cpsw_intr_enable(struct cpsw_common *cpsw)
38 writel_relaxed(0xFF, &cpsw->wr_regs->tx_en);
39 writel_relaxed(0xFF, &cpsw->wr_regs->rx_en);
41 cpdma_ctlr_int_ctrl(cpsw->dma, true);
44 void cpsw_intr_disable(struct cpsw_common *cpsw)
46 writel_relaxed(0, &cpsw->wr_regs->tx_en);
47 writel_relaxed(0, &cpsw->wr_regs->rx_en);
49 cpdma_ctlr_int_ctrl(cpsw->dma, false);
52 void cpsw_tx_handler(void *token, int len, int status)
54 struct cpsw_meta_xdp *xmeta;
55 struct xdp_frame *xdpf;
56 struct net_device *ndev;
57 struct netdev_queue *txq;
61 if (cpsw_is_xdpf_handle(token)) {
62 xdpf = cpsw_handle_to_xdpf(token);
63 xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
66 xdp_return_frame(xdpf);
70 ch = skb_get_queue_mapping(skb);
71 cpts_tx_timestamp(ndev_to_cpsw(ndev)->cpts, skb);
72 dev_kfree_skb_any(skb);
75 /* Check whether the queue is stopped due to stalled tx dma, if the
76 * queue is stopped then start the queue as we have free desc for tx
78 txq = netdev_get_tx_queue(ndev, ch);
79 if (unlikely(netif_tx_queue_stopped(txq)))
80 netif_tx_wake_queue(txq);
82 ndev->stats.tx_packets++;
83 ndev->stats.tx_bytes += len;
86 irqreturn_t cpsw_tx_interrupt(int irq, void *dev_id)
88 struct cpsw_common *cpsw = dev_id;
90 writel(0, &cpsw->wr_regs->tx_en);
91 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_TX);
93 if (cpsw->quirk_irq) {
94 disable_irq_nosync(cpsw->irqs_table[1]);
95 cpsw->tx_irq_disabled = true;
98 napi_schedule(&cpsw->napi_tx);
102 irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
104 struct cpsw_common *cpsw = dev_id;
106 writel(0, &cpsw->wr_regs->rx_en);
107 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_RX);
109 if (cpsw->quirk_irq) {
110 disable_irq_nosync(cpsw->irqs_table[0]);
111 cpsw->rx_irq_disabled = true;
114 napi_schedule(&cpsw->napi_rx);
118 irqreturn_t cpsw_misc_interrupt(int irq, void *dev_id)
120 struct cpsw_common *cpsw = dev_id;
122 writel(0, &cpsw->wr_regs->misc_en);
123 cpdma_ctlr_eoi(cpsw->dma, CPDMA_EOI_MISC);
124 cpts_misc_interrupt(cpsw->cpts);
125 writel(0x10, &cpsw->wr_regs->misc_en);
130 int cpsw_tx_mq_poll(struct napi_struct *napi_tx, int budget)
132 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
133 int num_tx, cur_budget, ch;
135 struct cpsw_vector *txv;
137 /* process every unprocessed channel */
138 ch_map = cpdma_ctrl_txchs_state(cpsw->dma);
139 for (ch = 0, num_tx = 0; ch_map & 0xff; ch_map <<= 1, ch++) {
140 if (!(ch_map & 0x80))
143 txv = &cpsw->txv[ch];
144 if (unlikely(txv->budget > budget - num_tx))
145 cur_budget = budget - num_tx;
147 cur_budget = txv->budget;
149 num_tx += cpdma_chan_process(txv->ch, cur_budget);
150 if (num_tx >= budget)
154 if (num_tx < budget) {
155 napi_complete(napi_tx);
156 writel(0xff, &cpsw->wr_regs->tx_en);
162 int cpsw_tx_poll(struct napi_struct *napi_tx, int budget)
164 struct cpsw_common *cpsw = napi_to_cpsw(napi_tx);
167 num_tx = cpdma_chan_process(cpsw->txv[0].ch, budget);
168 if (num_tx < budget) {
169 napi_complete(napi_tx);
170 writel(0xff, &cpsw->wr_regs->tx_en);
171 if (cpsw->tx_irq_disabled) {
172 cpsw->tx_irq_disabled = false;
173 enable_irq(cpsw->irqs_table[1]);
180 int cpsw_rx_mq_poll(struct napi_struct *napi_rx, int budget)
182 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
183 int num_rx, cur_budget, ch;
185 struct cpsw_vector *rxv;
187 /* process every unprocessed channel */
188 ch_map = cpdma_ctrl_rxchs_state(cpsw->dma);
189 for (ch = 0, num_rx = 0; ch_map; ch_map >>= 1, ch++) {
190 if (!(ch_map & 0x01))
193 rxv = &cpsw->rxv[ch];
194 if (unlikely(rxv->budget > budget - num_rx))
195 cur_budget = budget - num_rx;
197 cur_budget = rxv->budget;
199 num_rx += cpdma_chan_process(rxv->ch, cur_budget);
200 if (num_rx >= budget)
204 if (num_rx < budget) {
205 napi_complete_done(napi_rx, num_rx);
206 writel(0xff, &cpsw->wr_regs->rx_en);
212 int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
214 struct cpsw_common *cpsw = napi_to_cpsw(napi_rx);
217 num_rx = cpdma_chan_process(cpsw->rxv[0].ch, budget);
218 if (num_rx < budget) {
219 napi_complete_done(napi_rx, num_rx);
220 writel(0xff, &cpsw->wr_regs->rx_en);
221 if (cpsw->rx_irq_disabled) {
222 cpsw->rx_irq_disabled = false;
223 enable_irq(cpsw->irqs_table[0]);
230 void cpsw_rx_vlan_encap(struct sk_buff *skb)
232 struct cpsw_priv *priv = netdev_priv(skb->dev);
233 u32 rx_vlan_encap_hdr = *((u32 *)skb->data);
234 struct cpsw_common *cpsw = priv->cpsw;
235 u16 vtag, vid, prio, pkt_type;
237 /* Remove VLAN header encapsulation word */
238 skb_pull(skb, CPSW_RX_VLAN_ENCAP_HDR_SIZE);
240 pkt_type = (rx_vlan_encap_hdr >>
241 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_SHIFT) &
242 CPSW_RX_VLAN_ENCAP_HDR_PKT_TYPE_MSK;
243 /* Ignore unknown & Priority-tagged packets*/
244 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_RESERV ||
245 pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_PRIO_TAG)
248 vid = (rx_vlan_encap_hdr >>
249 CPSW_RX_VLAN_ENCAP_HDR_VID_SHIFT) &
251 /* Ignore vid 0 and pass packet as is */
255 /* Untag P0 packets if set for vlan */
256 if (!cpsw_ale_get_vlan_p0_untag(cpsw->ale, vid)) {
257 prio = (rx_vlan_encap_hdr >>
258 CPSW_RX_VLAN_ENCAP_HDR_PRIO_SHIFT) &
259 CPSW_RX_VLAN_ENCAP_HDR_PRIO_MSK;
261 vtag = (prio << VLAN_PRIO_SHIFT) | vid;
262 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vtag);
265 /* strip vlan tag for VLAN-tagged packet */
266 if (pkt_type == CPSW_RX_VLAN_ENCAP_HDR_PKT_VLAN_TAG) {
267 memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN);
268 skb_pull(skb, VLAN_HLEN);
272 void cpsw_set_slave_mac(struct cpsw_slave *slave, struct cpsw_priv *priv)
274 slave_write(slave, mac_hi(priv->mac_addr), SA_HI);
275 slave_write(slave, mac_lo(priv->mac_addr), SA_LO);
278 void soft_reset(const char *module, void __iomem *reg)
280 unsigned long timeout = jiffies + HZ;
282 writel_relaxed(1, reg);
285 } while ((readl_relaxed(reg) & 1) && time_after(timeout, jiffies));
287 WARN(readl_relaxed(reg) & 1, "failed to soft-reset %s\n", module);
290 void cpsw_ndo_tx_timeout(struct net_device *ndev, unsigned int txqueue)
292 struct cpsw_priv *priv = netdev_priv(ndev);
293 struct cpsw_common *cpsw = priv->cpsw;
296 cpsw_err(priv, tx_err, "transmit timeout, restarting dma\n");
297 ndev->stats.tx_errors++;
298 cpsw_intr_disable(cpsw);
299 for (ch = 0; ch < cpsw->tx_ch_num; ch++) {
300 cpdma_chan_stop(cpsw->txv[ch].ch);
301 cpdma_chan_start(cpsw->txv[ch].ch);
304 cpsw_intr_enable(cpsw);
305 netif_trans_update(ndev);
306 netif_tx_wake_all_queues(ndev);
309 static int cpsw_get_common_speed(struct cpsw_common *cpsw)
313 for (i = 0, speed = 0; i < cpsw->data.slaves; i++)
314 if (cpsw->slaves[i].phy && cpsw->slaves[i].phy->link)
315 speed += cpsw->slaves[i].phy->speed;
320 int cpsw_need_resplit(struct cpsw_common *cpsw)
325 /* re-split resources only in case speed was changed */
326 speed = cpsw_get_common_speed(cpsw);
327 if (speed == cpsw->speed || !speed)
332 for (i = 0, rlim_ch_num = 0; i < cpsw->tx_ch_num; i++) {
333 ch_rate = cpdma_chan_get_rate(cpsw->txv[i].ch);
340 /* cases not dependent on speed */
341 if (!rlim_ch_num || rlim_ch_num == cpsw->tx_ch_num)
347 void cpsw_split_res(struct cpsw_common *cpsw)
349 u32 consumed_rate = 0, bigest_rate = 0;
350 struct cpsw_vector *txv = cpsw->txv;
351 int i, ch_weight, rlim_ch_num = 0;
352 int budget, bigest_rate_ch = 0;
353 u32 ch_rate, max_rate;
356 for (i = 0; i < cpsw->tx_ch_num; i++) {
357 ch_rate = cpdma_chan_get_rate(txv[i].ch);
362 consumed_rate += ch_rate;
365 if (cpsw->tx_ch_num == rlim_ch_num) {
366 max_rate = consumed_rate;
367 } else if (!rlim_ch_num) {
368 ch_budget = NAPI_POLL_WEIGHT / cpsw->tx_ch_num;
370 max_rate = consumed_rate;
372 max_rate = cpsw->speed * 1000;
374 /* if max_rate is less then expected due to reduced link speed,
375 * split proportionally according next potential max speed
377 if (max_rate < consumed_rate)
380 if (max_rate < consumed_rate)
383 ch_budget = (consumed_rate * NAPI_POLL_WEIGHT) / max_rate;
384 ch_budget = (NAPI_POLL_WEIGHT - ch_budget) /
385 (cpsw->tx_ch_num - rlim_ch_num);
386 bigest_rate = (max_rate - consumed_rate) /
387 (cpsw->tx_ch_num - rlim_ch_num);
390 /* split tx weight/budget */
391 budget = NAPI_POLL_WEIGHT;
392 for (i = 0; i < cpsw->tx_ch_num; i++) {
393 ch_rate = cpdma_chan_get_rate(txv[i].ch);
395 txv[i].budget = (ch_rate * NAPI_POLL_WEIGHT) / max_rate;
398 if (ch_rate > bigest_rate) {
400 bigest_rate = ch_rate;
403 ch_weight = (ch_rate * 100) / max_rate;
406 cpdma_chan_set_weight(cpsw->txv[i].ch, ch_weight);
408 txv[i].budget = ch_budget;
411 cpdma_chan_set_weight(cpsw->txv[i].ch, 0);
414 budget -= txv[i].budget;
418 txv[bigest_rate_ch].budget += budget;
420 /* split rx budget */
421 budget = NAPI_POLL_WEIGHT;
422 ch_budget = budget / cpsw->rx_ch_num;
423 for (i = 0; i < cpsw->rx_ch_num; i++) {
424 cpsw->rxv[i].budget = ch_budget;
429 cpsw->rxv[0].budget += budget;
432 int cpsw_init_common(struct cpsw_common *cpsw, void __iomem *ss_regs,
433 int ale_ageout, phys_addr_t desc_mem_phys,
436 u32 slave_offset, sliver_offset, slave_size;
437 struct cpsw_ale_params ale_params;
438 struct cpsw_platform_data *data;
439 struct cpdma_params dma_params;
440 struct device *dev = cpsw->dev;
441 struct device_node *cpts_node;
442 void __iomem *cpts_regs;
449 cpsw->version = readl(&cpsw->regs->id_ver);
451 memset(&dma_params, 0, sizeof(dma_params));
452 memset(&ale_params, 0, sizeof(ale_params));
454 switch (cpsw->version) {
456 cpsw->host_port_regs = ss_regs + CPSW1_HOST_PORT_OFFSET;
457 cpts_regs = ss_regs + CPSW1_CPTS_OFFSET;
458 cpsw->hw_stats = ss_regs + CPSW1_HW_STATS;
459 dma_params.dmaregs = ss_regs + CPSW1_CPDMA_OFFSET;
460 dma_params.txhdp = ss_regs + CPSW1_STATERAM_OFFSET;
461 ale_params.ale_regs = ss_regs + CPSW1_ALE_OFFSET;
462 slave_offset = CPSW1_SLAVE_OFFSET;
463 slave_size = CPSW1_SLAVE_SIZE;
464 sliver_offset = CPSW1_SLIVER_OFFSET;
465 dma_params.desc_mem_phys = 0;
470 cpsw->host_port_regs = ss_regs + CPSW2_HOST_PORT_OFFSET;
471 cpts_regs = ss_regs + CPSW2_CPTS_OFFSET;
472 cpsw->hw_stats = ss_regs + CPSW2_HW_STATS;
473 dma_params.dmaregs = ss_regs + CPSW2_CPDMA_OFFSET;
474 dma_params.txhdp = ss_regs + CPSW2_STATERAM_OFFSET;
475 ale_params.ale_regs = ss_regs + CPSW2_ALE_OFFSET;
476 slave_offset = CPSW2_SLAVE_OFFSET;
477 slave_size = CPSW2_SLAVE_SIZE;
478 sliver_offset = CPSW2_SLIVER_OFFSET;
479 dma_params.desc_mem_phys = desc_mem_phys;
482 dev_err(dev, "unknown version 0x%08x\n", cpsw->version);
486 for (i = 0; i < cpsw->data.slaves; i++) {
487 struct cpsw_slave *slave = &cpsw->slaves[i];
488 void __iomem *regs = cpsw->regs;
490 slave->slave_num = i;
491 slave->data = &cpsw->data.slave_data[i];
492 slave->regs = regs + slave_offset;
493 slave->port_vlan = slave->data->dual_emac_res_vlan;
494 slave->mac_sl = cpsw_sl_get("cpsw", dev, regs + sliver_offset);
495 if (IS_ERR(slave->mac_sl))
496 return PTR_ERR(slave->mac_sl);
498 slave_offset += slave_size;
499 sliver_offset += SLIVER_SIZE;
502 ale_params.dev = dev;
503 ale_params.ale_ageout = ale_ageout;
504 ale_params.ale_ports = CPSW_ALE_PORTS_NUM;
505 ale_params.dev_id = "cpsw";
506 ale_params.bus_freq = cpsw->bus_freq_mhz * 1000000;
508 cpsw->ale = cpsw_ale_create(&ale_params);
509 if (IS_ERR(cpsw->ale)) {
510 dev_err(dev, "error initializing ale engine\n");
511 return PTR_ERR(cpsw->ale);
514 dma_params.dev = dev;
515 dma_params.rxthresh = dma_params.dmaregs + CPDMA_RXTHRESH;
516 dma_params.rxfree = dma_params.dmaregs + CPDMA_RXFREE;
517 dma_params.rxhdp = dma_params.txhdp + CPDMA_RXHDP;
518 dma_params.txcp = dma_params.txhdp + CPDMA_TXCP;
519 dma_params.rxcp = dma_params.txhdp + CPDMA_RXCP;
521 dma_params.num_chan = data->channels;
522 dma_params.has_soft_reset = true;
523 dma_params.min_packet_size = CPSW_MIN_PACKET_SIZE;
524 dma_params.desc_mem_size = data->bd_ram_size;
525 dma_params.desc_align = 16;
526 dma_params.has_ext_regs = true;
527 dma_params.desc_hw_addr = dma_params.desc_mem_phys;
528 dma_params.bus_freq_mhz = cpsw->bus_freq_mhz;
529 dma_params.descs_pool_size = descs_pool_size;
531 cpsw->dma = cpdma_ctlr_create(&dma_params);
533 dev_err(dev, "error initializing dma\n");
537 cpts_node = of_get_child_by_name(cpsw->dev->of_node, "cpts");
539 cpts_node = cpsw->dev->of_node;
541 cpsw->cpts = cpts_create(cpsw->dev, cpts_regs, cpts_node,
543 if (IS_ERR(cpsw->cpts)) {
544 ret = PTR_ERR(cpsw->cpts);
545 cpdma_ctlr_destroy(cpsw->dma);
547 of_node_put(cpts_node);
552 #if IS_ENABLED(CONFIG_TI_CPTS)
554 static void cpsw_hwtstamp_v1(struct cpsw_priv *priv)
556 struct cpsw_common *cpsw = priv->cpsw;
557 struct cpsw_slave *slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
560 if (!priv->tx_ts_enabled && !priv->rx_ts_enabled) {
561 slave_write(slave, 0, CPSW1_TS_CTL);
565 seq_id = (30 << CPSW_V1_SEQ_ID_OFS_SHIFT) | ETH_P_1588;
566 ts_en = EVENT_MSG_BITS << CPSW_V1_MSG_TYPE_OFS;
568 if (priv->tx_ts_enabled)
569 ts_en |= CPSW_V1_TS_TX_EN;
571 if (priv->rx_ts_enabled)
572 ts_en |= CPSW_V1_TS_RX_EN;
574 slave_write(slave, ts_en, CPSW1_TS_CTL);
575 slave_write(slave, seq_id, CPSW1_TS_SEQ_LTYPE);
578 static void cpsw_hwtstamp_v2(struct cpsw_priv *priv)
580 struct cpsw_common *cpsw = priv->cpsw;
581 struct cpsw_slave *slave;
584 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
586 ctrl = slave_read(slave, CPSW2_CONTROL);
587 switch (cpsw->version) {
589 ctrl &= ~CTRL_V2_ALL_TS_MASK;
591 if (priv->tx_ts_enabled)
592 ctrl |= CTRL_V2_TX_TS_BITS;
594 if (priv->rx_ts_enabled)
595 ctrl |= CTRL_V2_RX_TS_BITS;
599 ctrl &= ~CTRL_V3_ALL_TS_MASK;
601 if (priv->tx_ts_enabled)
602 ctrl |= CTRL_V3_TX_TS_BITS;
604 if (priv->rx_ts_enabled)
605 ctrl |= CTRL_V3_RX_TS_BITS;
609 mtype = (30 << TS_SEQ_ID_OFFSET_SHIFT) | EVENT_MSG_BITS;
611 slave_write(slave, mtype, CPSW2_TS_SEQ_MTYPE);
612 slave_write(slave, ctrl, CPSW2_CONTROL);
613 writel_relaxed(ETH_P_1588, &cpsw->regs->ts_ltype);
614 writel_relaxed(ETH_P_8021Q, &cpsw->regs->vlan_ltype);
617 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 struct cpsw_priv *priv = netdev_priv(dev);
620 struct cpsw_common *cpsw = priv->cpsw;
621 struct hwtstamp_config cfg;
623 if (cpsw->version != CPSW_VERSION_1 &&
624 cpsw->version != CPSW_VERSION_2 &&
625 cpsw->version != CPSW_VERSION_3)
628 if (copy_from_user(&cfg, ifr->ifr_data, sizeof(cfg)))
631 if (cfg.tx_type != HWTSTAMP_TX_OFF && cfg.tx_type != HWTSTAMP_TX_ON)
634 switch (cfg.rx_filter) {
635 case HWTSTAMP_FILTER_NONE:
636 priv->rx_ts_enabled = 0;
638 case HWTSTAMP_FILTER_ALL:
639 case HWTSTAMP_FILTER_NTP_ALL:
640 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
641 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
642 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
644 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
645 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
646 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
647 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
648 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
649 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
650 case HWTSTAMP_FILTER_PTP_V2_EVENT:
651 case HWTSTAMP_FILTER_PTP_V2_SYNC:
652 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
653 priv->rx_ts_enabled = HWTSTAMP_FILTER_PTP_V2_EVENT;
654 cfg.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
660 priv->tx_ts_enabled = cfg.tx_type == HWTSTAMP_TX_ON;
662 switch (cpsw->version) {
664 cpsw_hwtstamp_v1(priv);
668 cpsw_hwtstamp_v2(priv);
674 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
677 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
679 struct cpsw_common *cpsw = ndev_to_cpsw(dev);
680 struct cpsw_priv *priv = netdev_priv(dev);
681 struct hwtstamp_config cfg;
683 if (cpsw->version != CPSW_VERSION_1 &&
684 cpsw->version != CPSW_VERSION_2 &&
685 cpsw->version != CPSW_VERSION_3)
689 cfg.tx_type = priv->tx_ts_enabled ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
690 cfg.rx_filter = priv->rx_ts_enabled;
692 return copy_to_user(ifr->ifr_data, &cfg, sizeof(cfg)) ? -EFAULT : 0;
695 static int cpsw_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
700 static int cpsw_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
704 #endif /*CONFIG_TI_CPTS*/
706 int cpsw_ndo_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
708 struct cpsw_priv *priv = netdev_priv(dev);
709 struct cpsw_common *cpsw = priv->cpsw;
710 int slave_no = cpsw_slave_index(cpsw, priv);
711 struct phy_device *phy;
713 if (!netif_running(dev))
716 phy = cpsw->slaves[slave_no].phy;
718 if (!phy_has_hwtstamp(phy)) {
721 return cpsw_hwtstamp_set(dev, req);
723 return cpsw_hwtstamp_get(dev, req);
728 return phy_mii_ioctl(phy, req, cmd);
733 int cpsw_ndo_set_tx_maxrate(struct net_device *ndev, int queue, u32 rate)
735 struct cpsw_priv *priv = netdev_priv(ndev);
736 struct cpsw_common *cpsw = priv->cpsw;
737 struct cpsw_slave *slave;
742 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
746 ch_rate = rate * 1000;
747 min_rate = cpdma_chan_get_min_rate(cpsw->dma);
748 if ((ch_rate < min_rate && ch_rate)) {
749 dev_err(priv->dev, "The channel rate cannot be less than %dMbps",
754 if (rate > cpsw->speed) {
755 dev_err(priv->dev, "The channel rate cannot be more than 2Gbps");
759 ret = pm_runtime_resume_and_get(cpsw->dev);
763 ret = cpdma_chan_set_rate(cpsw->txv[queue].ch, ch_rate);
764 pm_runtime_put(cpsw->dev);
769 /* update rates for slaves tx queues */
770 for (i = 0; i < cpsw->data.slaves; i++) {
771 slave = &cpsw->slaves[i];
775 netdev_get_tx_queue(slave->ndev, queue)->tx_maxrate = rate;
778 cpsw_split_res(cpsw);
782 static int cpsw_tc_to_fifo(int tc, int num_tc)
784 if (tc == num_tc - 1)
787 return CPSW_FIFO_SHAPERS_NUM - tc;
790 bool cpsw_shp_is_off(struct cpsw_priv *priv)
792 struct cpsw_common *cpsw = priv->cpsw;
793 struct cpsw_slave *slave;
794 u32 shift, mask, val;
796 val = readl_relaxed(&cpsw->regs->ptype);
798 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
799 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
806 static void cpsw_fifo_shp_on(struct cpsw_priv *priv, int fifo, int on)
808 struct cpsw_common *cpsw = priv->cpsw;
809 struct cpsw_slave *slave;
810 u32 shift, mask, val;
812 val = readl_relaxed(&cpsw->regs->ptype);
814 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
815 shift = CPSW_FIFO_SHAPE_EN_SHIFT + 3 * slave->slave_num;
816 mask = (1 << --fifo) << shift;
817 val = on ? val | mask : val & ~mask;
819 writel_relaxed(val, &cpsw->regs->ptype);
822 static int cpsw_set_fifo_bw(struct cpsw_priv *priv, int fifo, int bw)
824 struct cpsw_common *cpsw = priv->cpsw;
825 u32 val = 0, send_pct, shift;
826 struct cpsw_slave *slave;
829 if (bw > priv->shp_cfg_speed * 1000)
832 /* shaping has to stay enabled for highest fifos linearly
833 * and fifo bw no more then interface can allow
835 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
836 send_pct = slave_read(slave, SEND_PERCENT);
837 for (i = CPSW_FIFO_SHAPERS_NUM; i > 0; i--) {
839 if (i >= fifo || !priv->fifo_bw[i])
842 dev_warn(priv->dev, "Prev FIFO%d is shaped", i);
846 if (!priv->fifo_bw[i] && i > fifo) {
847 dev_err(priv->dev, "Upper FIFO%d is not shaped", i);
853 send_pct &= ~(CPSW_PCT_MASK << shift);
854 val = DIV_ROUND_UP(bw, priv->shp_cfg_speed * 10);
858 send_pct |= val << shift;
863 if (priv->fifo_bw[i])
864 pct += (send_pct >> shift) & CPSW_PCT_MASK;
870 slave_write(slave, send_pct, SEND_PERCENT);
871 priv->fifo_bw[fifo] = bw;
873 dev_warn(priv->dev, "set FIFO%d bw = %d\n", fifo,
874 DIV_ROUND_CLOSEST(val * priv->shp_cfg_speed, 100));
878 dev_err(priv->dev, "Bandwidth doesn't fit in tc configuration");
882 static int cpsw_set_fifo_rlimit(struct cpsw_priv *priv, int fifo, int bw)
884 struct cpsw_common *cpsw = priv->cpsw;
885 struct cpsw_slave *slave;
886 u32 tx_in_ctl_rg, val;
889 ret = cpsw_set_fifo_bw(priv, fifo, bw);
893 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
894 tx_in_ctl_rg = cpsw->version == CPSW_VERSION_1 ?
895 CPSW1_TX_IN_CTL : CPSW2_TX_IN_CTL;
898 cpsw_fifo_shp_on(priv, fifo, bw);
900 val = slave_read(slave, tx_in_ctl_rg);
901 if (cpsw_shp_is_off(priv)) {
902 /* disable FIFOs rate limited queues */
903 val &= ~(0xf << CPSW_FIFO_RATE_EN_SHIFT);
905 /* set type of FIFO queues to normal priority mode */
906 val &= ~(3 << CPSW_FIFO_QUEUE_TYPE_SHIFT);
908 /* set type of FIFO queues to be rate limited */
910 val |= 2 << CPSW_FIFO_QUEUE_TYPE_SHIFT;
912 priv->shp_cfg_speed = 0;
915 /* toggle a FIFO rate limited queue */
917 val |= BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
919 val &= ~BIT(fifo + CPSW_FIFO_RATE_EN_SHIFT);
920 slave_write(slave, val, tx_in_ctl_rg);
922 /* FIFO transmit shape enable */
923 cpsw_fifo_shp_on(priv, fifo, bw);
930 * shaping for class A should be set first
932 static int cpsw_set_cbs(struct net_device *ndev,
933 struct tc_cbs_qopt_offload *qopt)
935 struct cpsw_priv *priv = netdev_priv(ndev);
936 struct cpsw_common *cpsw = priv->cpsw;
937 struct cpsw_slave *slave;
942 tc = netdev_txq_to_tc(priv->ndev, qopt->queue);
944 /* enable channels in backward order, as highest FIFOs must be rate
945 * limited first and for compliance with CPDMA rate limited channels
946 * that also used in bacward order. FIFO0 cannot be rate limited.
948 fifo = cpsw_tc_to_fifo(tc, ndev->num_tc);
950 dev_err(priv->dev, "Last tc%d can't be rate limited", tc);
954 /* do nothing, it's disabled anyway */
955 if (!qopt->enable && !priv->fifo_bw[fifo])
958 /* shapers can be set if link speed is known */
959 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
960 if (slave->phy && slave->phy->link) {
961 if (priv->shp_cfg_speed &&
962 priv->shp_cfg_speed != slave->phy->speed)
963 prev_speed = priv->shp_cfg_speed;
965 priv->shp_cfg_speed = slave->phy->speed;
968 if (!priv->shp_cfg_speed) {
969 dev_err(priv->dev, "Link speed is not known");
973 ret = pm_runtime_resume_and_get(cpsw->dev);
977 bw = qopt->enable ? qopt->idleslope : 0;
978 ret = cpsw_set_fifo_rlimit(priv, fifo, bw);
980 priv->shp_cfg_speed = prev_speed;
984 if (bw && prev_speed)
986 "Speed was changed, CBS shaper speeds are changed!");
988 pm_runtime_put_sync(cpsw->dev);
992 static int cpsw_set_mqprio(struct net_device *ndev, void *type_data)
994 struct tc_mqprio_qopt_offload *mqprio = type_data;
995 struct cpsw_priv *priv = netdev_priv(ndev);
996 struct cpsw_common *cpsw = priv->cpsw;
997 int fifo, num_tc, count, offset;
998 struct cpsw_slave *slave;
1002 num_tc = mqprio->qopt.num_tc;
1003 if (num_tc > CPSW_TC_NUM)
1006 if (mqprio->mode != TC_MQPRIO_MODE_DCB)
1009 ret = pm_runtime_resume_and_get(cpsw->dev);
1014 for (i = 0; i < 8; i++) {
1015 tc = mqprio->qopt.prio_tc_map[i];
1016 fifo = cpsw_tc_to_fifo(tc, num_tc);
1017 tx_prio_map |= fifo << (4 * i);
1020 netdev_set_num_tc(ndev, num_tc);
1021 for (i = 0; i < num_tc; i++) {
1022 count = mqprio->qopt.count[i];
1023 offset = mqprio->qopt.offset[i];
1024 netdev_set_tc_queue(ndev, i, count, offset);
1028 if (!mqprio->qopt.hw) {
1029 /* restore default configuration */
1030 netdev_reset_tc(ndev);
1031 tx_prio_map = TX_PRIORITY_MAPPING;
1034 priv->mqprio_hw = mqprio->qopt.hw;
1036 offset = cpsw->version == CPSW_VERSION_1 ?
1037 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1039 slave = &cpsw->slaves[cpsw_slave_index(cpsw, priv)];
1040 slave_write(slave, tx_prio_map, offset);
1042 pm_runtime_put_sync(cpsw->dev);
1047 static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f);
1049 int cpsw_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1053 case TC_SETUP_QDISC_CBS:
1054 return cpsw_set_cbs(ndev, type_data);
1056 case TC_SETUP_QDISC_MQPRIO:
1057 return cpsw_set_mqprio(ndev, type_data);
1059 case TC_SETUP_BLOCK:
1060 return cpsw_qos_setup_tc_block(ndev, type_data);
1067 void cpsw_cbs_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1071 for (fifo = CPSW_FIFO_SHAPERS_NUM; fifo > 0; fifo--) {
1072 bw = priv->fifo_bw[fifo];
1076 cpsw_set_fifo_rlimit(priv, fifo, bw);
1080 void cpsw_mqprio_resume(struct cpsw_slave *slave, struct cpsw_priv *priv)
1082 struct cpsw_common *cpsw = priv->cpsw;
1083 u32 tx_prio_map = 0;
1087 if (!priv->mqprio_hw)
1090 for (i = 0; i < 8; i++) {
1091 tc = netdev_get_prio_tc_map(priv->ndev, i);
1092 fifo = CPSW_FIFO_SHAPERS_NUM - tc;
1093 tx_prio_map |= fifo << (4 * i);
1096 tx_prio_rg = cpsw->version == CPSW_VERSION_1 ?
1097 CPSW1_TX_PRI_MAP : CPSW2_TX_PRI_MAP;
1099 slave_write(slave, tx_prio_map, tx_prio_rg);
1102 int cpsw_fill_rx_channels(struct cpsw_priv *priv)
1104 struct cpsw_common *cpsw = priv->cpsw;
1105 struct cpsw_meta_xdp *xmeta;
1106 struct page_pool *pool;
1112 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1113 pool = cpsw->page_pool[ch];
1114 ch_buf_num = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1115 for (i = 0; i < ch_buf_num; i++) {
1116 page = page_pool_dev_alloc_pages(pool);
1118 cpsw_err(priv, ifup, "allocate rx page err\n");
1122 xmeta = page_address(page) + CPSW_XMETA_OFFSET;
1123 xmeta->ndev = priv->ndev;
1126 dma = page_pool_get_dma_addr(page) + CPSW_HEADROOM_NA;
1127 ret = cpdma_chan_idle_submit_mapped(cpsw->rxv[ch].ch,
1129 cpsw->rx_packet_max,
1132 cpsw_err(priv, ifup,
1133 "cannot submit page to channel %d rx, error %d\n",
1135 page_pool_recycle_direct(pool, page);
1140 cpsw_info(priv, ifup, "ch %d rx, submitted %d descriptors\n",
1147 static struct page_pool *cpsw_create_page_pool(struct cpsw_common *cpsw,
1150 struct page_pool_params pp_params = {};
1151 struct page_pool *pool;
1153 pp_params.order = 0;
1154 pp_params.flags = PP_FLAG_DMA_MAP;
1155 pp_params.pool_size = size;
1156 pp_params.nid = NUMA_NO_NODE;
1157 pp_params.dma_dir = DMA_BIDIRECTIONAL;
1158 pp_params.dev = cpsw->dev;
1160 pool = page_pool_create(&pp_params);
1162 dev_err(cpsw->dev, "cannot create rx page pool\n");
1167 static int cpsw_create_rx_pool(struct cpsw_common *cpsw, int ch)
1169 struct page_pool *pool;
1170 int ret = 0, pool_size;
1172 pool_size = cpdma_chan_get_rx_buf_num(cpsw->rxv[ch].ch);
1173 pool = cpsw_create_page_pool(cpsw, pool_size);
1175 ret = PTR_ERR(pool);
1177 cpsw->page_pool[ch] = pool;
1182 static int cpsw_ndev_create_xdp_rxq(struct cpsw_priv *priv, int ch)
1184 struct cpsw_common *cpsw = priv->cpsw;
1185 struct xdp_rxq_info *rxq;
1186 struct page_pool *pool;
1189 pool = cpsw->page_pool[ch];
1190 rxq = &priv->xdp_rxq[ch];
1192 ret = xdp_rxq_info_reg(rxq, priv->ndev, ch, 0);
1196 ret = xdp_rxq_info_reg_mem_model(rxq, MEM_TYPE_PAGE_POOL, pool);
1198 xdp_rxq_info_unreg(rxq);
1203 static void cpsw_ndev_destroy_xdp_rxq(struct cpsw_priv *priv, int ch)
1205 struct xdp_rxq_info *rxq = &priv->xdp_rxq[ch];
1207 if (!xdp_rxq_info_is_reg(rxq))
1210 xdp_rxq_info_unreg(rxq);
1213 void cpsw_destroy_xdp_rxqs(struct cpsw_common *cpsw)
1215 struct net_device *ndev;
1218 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1219 for (i = 0; i < cpsw->data.slaves; i++) {
1220 ndev = cpsw->slaves[i].ndev;
1224 cpsw_ndev_destroy_xdp_rxq(netdev_priv(ndev), ch);
1227 page_pool_destroy(cpsw->page_pool[ch]);
1228 cpsw->page_pool[ch] = NULL;
1232 int cpsw_create_xdp_rxqs(struct cpsw_common *cpsw)
1234 struct net_device *ndev;
1237 for (ch = 0; ch < cpsw->rx_ch_num; ch++) {
1238 ret = cpsw_create_rx_pool(cpsw, ch);
1242 /* using same page pool is allowed as no running rx handlers
1243 * simultaneously for both ndevs
1245 for (i = 0; i < cpsw->data.slaves; i++) {
1246 ndev = cpsw->slaves[i].ndev;
1250 ret = cpsw_ndev_create_xdp_rxq(netdev_priv(ndev), ch);
1259 cpsw_destroy_xdp_rxqs(cpsw);
1264 static int cpsw_xdp_prog_setup(struct cpsw_priv *priv, struct netdev_bpf *bpf)
1266 struct bpf_prog *prog = bpf->prog;
1268 if (!priv->xdpi.prog && !prog)
1271 WRITE_ONCE(priv->xdp_prog, prog);
1273 xdp_attachment_setup(&priv->xdpi, bpf);
1278 int cpsw_ndo_bpf(struct net_device *ndev, struct netdev_bpf *bpf)
1280 struct cpsw_priv *priv = netdev_priv(ndev);
1282 switch (bpf->command) {
1283 case XDP_SETUP_PROG:
1284 return cpsw_xdp_prog_setup(priv, bpf);
1291 int cpsw_xdp_tx_frame(struct cpsw_priv *priv, struct xdp_frame *xdpf,
1292 struct page *page, int port)
1294 struct cpsw_common *cpsw = priv->cpsw;
1295 struct cpsw_meta_xdp *xmeta;
1296 struct cpdma_chan *txch;
1300 xmeta = (void *)xdpf + CPSW_XMETA_OFFSET;
1301 xmeta->ndev = priv->ndev;
1303 txch = cpsw->txv[0].ch;
1306 dma = page_pool_get_dma_addr(page);
1307 dma += xdpf->headroom + sizeof(struct xdp_frame);
1308 ret = cpdma_chan_submit_mapped(txch, cpsw_xdpf_to_handle(xdpf),
1309 dma, xdpf->len, port);
1311 if (sizeof(*xmeta) > xdpf->headroom)
1314 ret = cpdma_chan_submit(txch, cpsw_xdpf_to_handle(xdpf),
1315 xdpf->data, xdpf->len, port);
1319 priv->ndev->stats.tx_dropped++;
1324 int cpsw_run_xdp(struct cpsw_priv *priv, int ch, struct xdp_buff *xdp,
1325 struct page *page, int port, int *len)
1327 struct cpsw_common *cpsw = priv->cpsw;
1328 struct net_device *ndev = priv->ndev;
1329 int ret = CPSW_XDP_CONSUMED;
1330 struct xdp_frame *xdpf;
1331 struct bpf_prog *prog;
1334 prog = READ_ONCE(priv->xdp_prog);
1336 return CPSW_XDP_PASS;
1338 act = bpf_prog_run_xdp(prog, xdp);
1339 /* XDP prog might have changed packet data and boundaries */
1340 *len = xdp->data_end - xdp->data;
1344 ret = CPSW_XDP_PASS;
1347 xdpf = xdp_convert_buff_to_frame(xdp);
1348 if (unlikely(!xdpf))
1351 if (cpsw_xdp_tx_frame(priv, xdpf, page, port))
1352 xdp_return_frame_rx_napi(xdpf);
1355 if (xdp_do_redirect(ndev, xdp, prog))
1358 /* Have to flush here, per packet, instead of doing it in bulk
1359 * at the end of the napi handler. The RX devices on this
1360 * particular hardware is sharing a common queue, so the
1361 * incoming device might change per packet.
1366 bpf_warn_invalid_xdp_action(ndev, prog, act);
1369 trace_xdp_exception(ndev, prog, act);
1370 fallthrough; /* handle aborts by dropping packet */
1372 ndev->stats.rx_bytes += *len;
1373 ndev->stats.rx_packets++;
1377 ndev->stats.rx_bytes += *len;
1378 ndev->stats.rx_packets++;
1382 page_pool_recycle_direct(cpsw->page_pool[ch], page);
1386 static int cpsw_qos_clsflower_add_policer(struct cpsw_priv *priv,
1387 struct netlink_ext_ack *extack,
1388 struct flow_cls_offload *cls,
1391 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1392 struct flow_dissector *dissector = rule->match.dissector;
1393 static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
1394 struct flow_match_eth_addrs match;
1398 if (dissector->used_keys &
1399 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
1400 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
1401 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
1402 NL_SET_ERR_MSG_MOD(extack,
1403 "Unsupported keys used");
1407 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1408 NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
1412 flow_rule_match_eth_addrs(rule, &match);
1414 if (!is_zero_ether_addr(match.mask->src)) {
1415 NL_SET_ERR_MSG_MOD(extack,
1416 "Matching on source MAC not supported");
1420 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1422 if (is_broadcast_ether_addr(match.key->dst) &&
1423 is_broadcast_ether_addr(match.mask->dst)) {
1424 ret = cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, rate_pkt_ps);
1428 priv->ale_bc_ratelimit.cookie = cls->cookie;
1429 priv->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
1430 } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
1431 ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
1432 ret = cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, rate_pkt_ps);
1436 priv->ale_mc_ratelimit.cookie = cls->cookie;
1437 priv->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
1439 NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
1446 static int cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
1447 const struct flow_action_entry *act,
1448 struct netlink_ext_ack *extack)
1450 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1451 NL_SET_ERR_MSG_MOD(extack,
1452 "Offload not supported when exceed action is not drop");
1456 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1457 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1458 NL_SET_ERR_MSG_MOD(extack,
1459 "Offload not supported when conform action is not pipe or ok");
1463 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1464 !flow_action_is_last_entry(action, act)) {
1465 NL_SET_ERR_MSG_MOD(extack,
1466 "Offload not supported when conform action is ok, but action is not last");
1470 if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
1471 act->police.avrate || act->police.overhead) {
1472 NL_SET_ERR_MSG_MOD(extack,
1473 "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
1480 static int cpsw_qos_configure_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
1482 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1483 struct netlink_ext_ack *extack = cls->common.extack;
1484 const struct flow_action_entry *act;
1487 flow_action_for_each(i, act, &rule->action) {
1489 case FLOW_ACTION_POLICE:
1490 ret = cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
1494 return cpsw_qos_clsflower_add_policer(priv, extack, cls,
1495 act->police.rate_pkt_ps);
1497 NL_SET_ERR_MSG_MOD(extack, "Action not supported");
1504 static int cpsw_qos_delete_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls)
1506 u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1508 if (cls->cookie == priv->ale_bc_ratelimit.cookie) {
1509 priv->ale_bc_ratelimit.cookie = 0;
1510 priv->ale_bc_ratelimit.rate_packet_ps = 0;
1511 cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id, 0);
1514 if (cls->cookie == priv->ale_mc_ratelimit.cookie) {
1515 priv->ale_mc_ratelimit.cookie = 0;
1516 priv->ale_mc_ratelimit.rate_packet_ps = 0;
1517 cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id, 0);
1523 static int cpsw_qos_setup_tc_clsflower(struct cpsw_priv *priv, struct flow_cls_offload *cls_flower)
1525 switch (cls_flower->command) {
1526 case FLOW_CLS_REPLACE:
1527 return cpsw_qos_configure_clsflower(priv, cls_flower);
1528 case FLOW_CLS_DESTROY:
1529 return cpsw_qos_delete_clsflower(priv, cls_flower);
1535 static int cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1537 struct cpsw_priv *priv = cb_priv;
1540 if (!tc_cls_can_offload_and_chain0(priv->ndev, type_data))
1543 ret = pm_runtime_get_sync(priv->dev);
1545 pm_runtime_put_noidle(priv->dev);
1550 case TC_SETUP_CLSFLOWER:
1551 ret = cpsw_qos_setup_tc_clsflower(priv, type_data);
1557 pm_runtime_put(priv->dev);
1561 static LIST_HEAD(cpsw_qos_block_cb_list);
1563 static int cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
1565 struct cpsw_priv *priv = netdev_priv(ndev);
1567 return flow_block_cb_setup_simple(f, &cpsw_qos_block_cb_list,
1568 cpsw_qos_setup_tc_block_cb,
1572 void cpsw_qos_clsflower_resume(struct cpsw_priv *priv)
1574 u32 port_id = cpsw_slave_index(priv->cpsw, priv) + 1;
1576 if (priv->ale_bc_ratelimit.cookie)
1577 cpsw_ale_rx_ratelimit_bc(priv->cpsw->ale, port_id,
1578 priv->ale_bc_ratelimit.rate_packet_ps);
1580 if (priv->ale_mc_ratelimit.cookie)
1581 cpsw_ale_rx_ratelimit_mc(priv->cpsw->ale, port_id,
1582 priv->ale_mc_ratelimit.rate_packet_ps);