1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
3 * Copyright (c) 2018 Synopsys, Inc. and/or its affiliates.
4 * stmmac XGMAC support.
7 #include <linux/bitrev.h>
8 #include <linux/crc32.h>
9 #include <linux/iopoll.h>
11 #include "stmmac_ptp.h"
12 #include "dwxlgmac2.h"
15 static void dwxgmac2_core_init(struct mac_device_info *hw,
16 struct net_device *dev)
18 void __iomem *ioaddr = hw->pcsr;
21 tx = readl(ioaddr + XGMAC_TX_CONFIG);
22 rx = readl(ioaddr + XGMAC_RX_CONFIG);
24 tx |= XGMAC_CORE_INIT_TX;
25 rx |= XGMAC_CORE_INIT_RX;
28 tx |= XGMAC_CONFIG_TE;
29 tx &= ~hw->link.speed_mask;
33 tx |= hw->link.xgmii.speed10000;
36 tx |= hw->link.speed2500;
40 tx |= hw->link.speed1000;
45 writel(tx, ioaddr + XGMAC_TX_CONFIG);
46 writel(rx, ioaddr + XGMAC_RX_CONFIG);
47 writel(XGMAC_INT_DEFAULT_EN, ioaddr + XGMAC_INT_EN);
50 static void dwxgmac2_set_mac(void __iomem *ioaddr, bool enable)
52 u32 tx = readl(ioaddr + XGMAC_TX_CONFIG);
53 u32 rx = readl(ioaddr + XGMAC_RX_CONFIG);
56 tx |= XGMAC_CONFIG_TE;
57 rx |= XGMAC_CONFIG_RE;
59 tx &= ~XGMAC_CONFIG_TE;
60 rx &= ~XGMAC_CONFIG_RE;
63 writel(tx, ioaddr + XGMAC_TX_CONFIG);
64 writel(rx, ioaddr + XGMAC_RX_CONFIG);
67 static int dwxgmac2_rx_ipc(struct mac_device_info *hw)
69 void __iomem *ioaddr = hw->pcsr;
72 value = readl(ioaddr + XGMAC_RX_CONFIG);
74 value |= XGMAC_CONFIG_IPC;
76 value &= ~XGMAC_CONFIG_IPC;
77 writel(value, ioaddr + XGMAC_RX_CONFIG);
79 return !!(readl(ioaddr + XGMAC_RX_CONFIG) & XGMAC_CONFIG_IPC);
82 static void dwxgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
85 void __iomem *ioaddr = hw->pcsr;
88 value = readl(ioaddr + XGMAC_RXQ_CTRL0) & ~XGMAC_RXQEN(queue);
89 if (mode == MTL_QUEUE_AVB)
90 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
91 else if (mode == MTL_QUEUE_DCB)
92 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
93 writel(value, ioaddr + XGMAC_RXQ_CTRL0);
96 static void dwxgmac2_rx_queue_prio(struct mac_device_info *hw, u32 prio,
99 void __iomem *ioaddr = hw->pcsr;
104 ctrl2 = readl(ioaddr + XGMAC_RXQ_CTRL2);
105 ctrl3 = readl(ioaddr + XGMAC_RXQ_CTRL3);
107 /* The software must ensure that the same priority
108 * is not mapped to multiple Rx queues
110 for (i = 0; i < 4; i++)
111 clear_mask |= ((prio << XGMAC_PSRQ_SHIFT(i)) &
114 ctrl2 &= ~clear_mask;
115 ctrl3 &= ~clear_mask;
117 /* First assign new priorities to a queue, then
118 * clear them from others queues
121 ctrl2 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
124 writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
125 writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
129 ctrl3 |= (prio << XGMAC_PSRQ_SHIFT(queue)) &
132 writel(ctrl3, ioaddr + XGMAC_RXQ_CTRL3);
133 writel(ctrl2, ioaddr + XGMAC_RXQ_CTRL2);
137 static void dwxgmac2_tx_queue_prio(struct mac_device_info *hw, u32 prio,
140 void __iomem *ioaddr = hw->pcsr;
143 reg = (queue < 4) ? XGMAC_TC_PRTY_MAP0 : XGMAC_TC_PRTY_MAP1;
147 value = readl(ioaddr + reg);
148 value &= ~XGMAC_PSTC(queue);
149 value |= (prio << XGMAC_PSTC_SHIFT(queue)) & XGMAC_PSTC(queue);
151 writel(value, ioaddr + reg);
154 static void dwxgmac2_prog_mtl_rx_algorithms(struct mac_device_info *hw,
157 void __iomem *ioaddr = hw->pcsr;
160 value = readl(ioaddr + XGMAC_MTL_OPMODE);
164 case MTL_RX_ALGORITHM_SP:
166 case MTL_RX_ALGORITHM_WSP:
173 writel(value, ioaddr + XGMAC_MTL_OPMODE);
176 static void dwxgmac2_prog_mtl_tx_algorithms(struct mac_device_info *hw,
179 void __iomem *ioaddr = hw->pcsr;
184 value = readl(ioaddr + XGMAC_MTL_OPMODE);
185 value &= ~XGMAC_ETSALG;
188 case MTL_TX_ALGORITHM_WRR:
191 case MTL_TX_ALGORITHM_WFQ:
194 case MTL_TX_ALGORITHM_DWRR:
202 writel(value, ioaddr + XGMAC_MTL_OPMODE);
204 /* Set ETS if desired */
205 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
206 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
210 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(i));
214 static void dwxgmac2_set_mtl_tx_queue_weight(struct mac_device_info *hw,
215 u32 weight, u32 queue)
217 void __iomem *ioaddr = hw->pcsr;
219 writel(weight, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
222 static void dwxgmac2_map_mtl_to_dma(struct mac_device_info *hw, u32 queue,
225 void __iomem *ioaddr = hw->pcsr;
228 reg = (queue < 4) ? XGMAC_MTL_RXQ_DMA_MAP0 : XGMAC_MTL_RXQ_DMA_MAP1;
232 value = readl(ioaddr + reg);
233 value &= ~XGMAC_QxMDMACH(queue);
234 value |= (chan << XGMAC_QxMDMACH_SHIFT(queue)) & XGMAC_QxMDMACH(queue);
236 writel(value, ioaddr + reg);
239 static void dwxgmac2_config_cbs(struct mac_device_info *hw,
240 u32 send_slope, u32 idle_slope,
241 u32 high_credit, u32 low_credit, u32 queue)
243 void __iomem *ioaddr = hw->pcsr;
246 writel(send_slope, ioaddr + XGMAC_MTL_TCx_SENDSLOPE(queue));
247 writel(idle_slope, ioaddr + XGMAC_MTL_TCx_QUANTUM_WEIGHT(queue));
248 writel(high_credit, ioaddr + XGMAC_MTL_TCx_HICREDIT(queue));
249 writel(low_credit, ioaddr + XGMAC_MTL_TCx_LOCREDIT(queue));
251 value = readl(ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
253 value |= XGMAC_CC | XGMAC_CBS;
254 writel(value, ioaddr + XGMAC_MTL_TCx_ETS_CONTROL(queue));
257 static void dwxgmac2_dump_regs(struct mac_device_info *hw, u32 *reg_space)
259 void __iomem *ioaddr = hw->pcsr;
262 for (i = 0; i < XGMAC_MAC_REGSIZE; i++)
263 reg_space[i] = readl(ioaddr + i * 4);
266 static int dwxgmac2_host_irq_status(struct mac_device_info *hw,
267 struct stmmac_extra_stats *x)
269 void __iomem *ioaddr = hw->pcsr;
273 en = readl(ioaddr + XGMAC_INT_EN);
274 stat = readl(ioaddr + XGMAC_INT_STATUS);
278 if (stat & XGMAC_PMTIS) {
279 x->irq_receive_pmt_irq_n++;
280 readl(ioaddr + XGMAC_PMT);
283 if (stat & XGMAC_LPIIS) {
284 u32 lpi = readl(ioaddr + XGMAC_LPI_CTRL);
286 if (lpi & XGMAC_TLPIEN) {
287 ret |= CORE_IRQ_TX_PATH_IN_LPI_MODE;
288 x->irq_tx_path_in_lpi_mode_n++;
290 if (lpi & XGMAC_TLPIEX) {
291 ret |= CORE_IRQ_TX_PATH_EXIT_LPI_MODE;
292 x->irq_tx_path_exit_lpi_mode_n++;
294 if (lpi & XGMAC_RLPIEN)
295 x->irq_rx_path_in_lpi_mode_n++;
296 if (lpi & XGMAC_RLPIEX)
297 x->irq_rx_path_exit_lpi_mode_n++;
303 static int dwxgmac2_host_mtl_irq_status(struct mac_device_info *hw, u32 chan)
305 void __iomem *ioaddr = hw->pcsr;
309 status = readl(ioaddr + XGMAC_MTL_INT_STATUS);
310 if (status & BIT(chan)) {
311 u32 chan_status = readl(ioaddr + XGMAC_MTL_QINT_STATUS(chan));
313 if (chan_status & XGMAC_RXOVFIS)
314 ret |= CORE_IRQ_MTL_RX_OVERFLOW;
316 writel(~0x0, ioaddr + XGMAC_MTL_QINT_STATUS(chan));
322 static void dwxgmac2_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
323 unsigned int fc, unsigned int pause_time,
326 void __iomem *ioaddr = hw->pcsr;
330 writel(XGMAC_RFE, ioaddr + XGMAC_RX_FLOW_CTRL);
332 for (i = 0; i < tx_cnt; i++) {
333 u32 value = XGMAC_TFE;
336 value |= pause_time << XGMAC_PT_SHIFT;
338 writel(value, ioaddr + XGMAC_Qx_TX_FLOW_CTRL(i));
343 static void dwxgmac2_pmt(struct mac_device_info *hw, unsigned long mode)
345 void __iomem *ioaddr = hw->pcsr;
348 if (mode & WAKE_MAGIC)
349 val |= XGMAC_PWRDWN | XGMAC_MGKPKTEN;
350 if (mode & WAKE_UCAST)
351 val |= XGMAC_PWRDWN | XGMAC_GLBLUCAST | XGMAC_RWKPKTEN;
353 u32 cfg = readl(ioaddr + XGMAC_RX_CONFIG);
354 cfg |= XGMAC_CONFIG_RE;
355 writel(cfg, ioaddr + XGMAC_RX_CONFIG);
358 writel(val, ioaddr + XGMAC_PMT);
361 static void dwxgmac2_set_umac_addr(struct mac_device_info *hw,
362 unsigned char *addr, unsigned int reg_n)
364 void __iomem *ioaddr = hw->pcsr;
367 value = (addr[5] << 8) | addr[4];
368 writel(value | XGMAC_AE, ioaddr + XGMAC_ADDRx_HIGH(reg_n));
370 value = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
371 writel(value, ioaddr + XGMAC_ADDRx_LOW(reg_n));
374 static void dwxgmac2_get_umac_addr(struct mac_device_info *hw,
375 unsigned char *addr, unsigned int reg_n)
377 void __iomem *ioaddr = hw->pcsr;
378 u32 hi_addr, lo_addr;
380 /* Read the MAC address from the hardware */
381 hi_addr = readl(ioaddr + XGMAC_ADDRx_HIGH(reg_n));
382 lo_addr = readl(ioaddr + XGMAC_ADDRx_LOW(reg_n));
384 /* Extract the MAC address from the high and low words */
385 addr[0] = lo_addr & 0xff;
386 addr[1] = (lo_addr >> 8) & 0xff;
387 addr[2] = (lo_addr >> 16) & 0xff;
388 addr[3] = (lo_addr >> 24) & 0xff;
389 addr[4] = hi_addr & 0xff;
390 addr[5] = (hi_addr >> 8) & 0xff;
393 static void dwxgmac2_set_eee_mode(struct mac_device_info *hw,
394 bool en_tx_lpi_clockgating)
396 void __iomem *ioaddr = hw->pcsr;
399 value = readl(ioaddr + XGMAC_LPI_CTRL);
401 value |= XGMAC_LPITXEN | XGMAC_LPITXA;
402 if (en_tx_lpi_clockgating)
403 value |= XGMAC_TXCGE;
405 writel(value, ioaddr + XGMAC_LPI_CTRL);
408 static void dwxgmac2_reset_eee_mode(struct mac_device_info *hw)
410 void __iomem *ioaddr = hw->pcsr;
413 value = readl(ioaddr + XGMAC_LPI_CTRL);
414 value &= ~(XGMAC_LPITXEN | XGMAC_LPITXA | XGMAC_TXCGE);
415 writel(value, ioaddr + XGMAC_LPI_CTRL);
418 static void dwxgmac2_set_eee_pls(struct mac_device_info *hw, int link)
420 void __iomem *ioaddr = hw->pcsr;
423 value = readl(ioaddr + XGMAC_LPI_CTRL);
428 writel(value, ioaddr + XGMAC_LPI_CTRL);
431 static void dwxgmac2_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
433 void __iomem *ioaddr = hw->pcsr;
436 value = (tw & 0xffff) | ((ls & 0x3ff) << 16);
437 writel(value, ioaddr + XGMAC_LPI_TIMER_CTRL);
440 static void dwxgmac2_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
443 int numhashregs, regs;
445 switch (mcbitslog2) {
459 for (regs = 0; regs < numhashregs; regs++)
460 writel(mcfilterbits[regs], ioaddr + XGMAC_HASH_TABLE(regs));
463 static void dwxgmac2_set_filter(struct mac_device_info *hw,
464 struct net_device *dev)
466 void __iomem *ioaddr = (void __iomem *)dev->base_addr;
467 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
468 int mcbitslog2 = hw->mcast_bits_log2;
472 value &= ~(XGMAC_FILTER_PR | XGMAC_FILTER_HMC | XGMAC_FILTER_PM);
473 value |= XGMAC_FILTER_HPF;
475 memset(mc_filter, 0, sizeof(mc_filter));
477 if (dev->flags & IFF_PROMISC) {
478 value |= XGMAC_FILTER_PR;
479 value |= XGMAC_FILTER_PCF;
480 } else if ((dev->flags & IFF_ALLMULTI) ||
481 (netdev_mc_count(dev) > hw->multicast_filter_bins)) {
482 value |= XGMAC_FILTER_PM;
484 for (i = 0; i < XGMAC_MAX_HASH_TABLE; i++)
485 writel(~0x0, ioaddr + XGMAC_HASH_TABLE(i));
486 } else if (!netdev_mc_empty(dev) && (dev->flags & IFF_MULTICAST)) {
487 struct netdev_hw_addr *ha;
489 value |= XGMAC_FILTER_HMC;
491 netdev_for_each_mc_addr(ha, dev) {
492 u32 nr = (bitrev32(~crc32_le(~0, ha->addr, 6)) >>
494 mc_filter[nr >> 5] |= (1 << (nr & 0x1F));
498 dwxgmac2_set_mchash(ioaddr, mc_filter, mcbitslog2);
500 /* Handle multiple unicast addresses */
501 if (netdev_uc_count(dev) > hw->unicast_filter_entries) {
502 value |= XGMAC_FILTER_PR;
504 struct netdev_hw_addr *ha;
507 netdev_for_each_uc_addr(ha, dev) {
508 dwxgmac2_set_umac_addr(hw, ha->addr, reg);
512 for ( ; reg < XGMAC_ADDR_MAX; reg++) {
513 writel(0, ioaddr + XGMAC_ADDRx_HIGH(reg));
514 writel(0, ioaddr + XGMAC_ADDRx_LOW(reg));
518 writel(value, ioaddr + XGMAC_PACKET_FILTER);
521 static void dwxgmac2_set_mac_loopback(void __iomem *ioaddr, bool enable)
523 u32 value = readl(ioaddr + XGMAC_RX_CONFIG);
526 value |= XGMAC_CONFIG_LM;
528 value &= ~XGMAC_CONFIG_LM;
530 writel(value, ioaddr + XGMAC_RX_CONFIG);
533 static int dwxgmac2_rss_write_reg(void __iomem *ioaddr, bool is_key, int idx,
538 writel(val, ioaddr + XGMAC_RSS_DATA);
539 ctrl |= idx << XGMAC_RSSIA_SHIFT;
540 ctrl |= is_key ? XGMAC_ADDRT : 0x0;
542 writel(ctrl, ioaddr + XGMAC_RSS_ADDR);
544 return readl_poll_timeout(ioaddr + XGMAC_RSS_ADDR, ctrl,
545 !(ctrl & XGMAC_OB), 100, 10000);
548 static int dwxgmac2_rss_configure(struct mac_device_info *hw,
549 struct stmmac_rss *cfg, u32 num_rxq)
551 void __iomem *ioaddr = hw->pcsr;
555 value = readl(ioaddr + XGMAC_RSS_CTRL);
556 if (!cfg || !cfg->enable) {
557 value &= ~XGMAC_RSSE;
558 writel(value, ioaddr + XGMAC_RSS_CTRL);
562 key = (u32 *)cfg->key;
563 for (i = 0; i < (ARRAY_SIZE(cfg->key) / sizeof(u32)); i++) {
564 ret = dwxgmac2_rss_write_reg(ioaddr, true, i, key[i]);
569 for (i = 0; i < ARRAY_SIZE(cfg->table); i++) {
570 ret = dwxgmac2_rss_write_reg(ioaddr, false, i, cfg->table[i]);
575 for (i = 0; i < num_rxq; i++)
576 dwxgmac2_map_mtl_to_dma(hw, i, XGMAC_QDDMACH);
578 value |= XGMAC_UDP4TE | XGMAC_TCP4TE | XGMAC_IP2TE | XGMAC_RSSE;
579 writel(value, ioaddr + XGMAC_RSS_CTRL);
583 static void dwxgmac2_update_vlan_hash(struct mac_device_info *hw, u32 hash,
584 __le16 perfect_match, bool is_double)
586 void __iomem *ioaddr = hw->pcsr;
588 writel(hash, ioaddr + XGMAC_VLAN_HASH_TABLE);
591 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
593 value |= XGMAC_FILTER_VTFE;
595 writel(value, ioaddr + XGMAC_PACKET_FILTER);
597 value = readl(ioaddr + XGMAC_VLAN_TAG);
599 value |= XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV;
601 value |= XGMAC_VLAN_EDVLP;
602 value |= XGMAC_VLAN_ESVL;
603 value |= XGMAC_VLAN_DOVLTC;
605 value &= ~XGMAC_VLAN_EDVLP;
606 value &= ~XGMAC_VLAN_ESVL;
607 value &= ~XGMAC_VLAN_DOVLTC;
610 value &= ~XGMAC_VLAN_VID;
611 writel(value, ioaddr + XGMAC_VLAN_TAG);
612 } else if (perfect_match) {
613 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
615 value |= XGMAC_FILTER_VTFE;
617 writel(value, ioaddr + XGMAC_PACKET_FILTER);
619 value = readl(ioaddr + XGMAC_VLAN_TAG);
621 value &= ~XGMAC_VLAN_VTHM;
622 value |= XGMAC_VLAN_ETV;
624 value |= XGMAC_VLAN_EDVLP;
625 value |= XGMAC_VLAN_ESVL;
626 value |= XGMAC_VLAN_DOVLTC;
628 value &= ~XGMAC_VLAN_EDVLP;
629 value &= ~XGMAC_VLAN_ESVL;
630 value &= ~XGMAC_VLAN_DOVLTC;
633 value &= ~XGMAC_VLAN_VID;
634 writel(value | perfect_match, ioaddr + XGMAC_VLAN_TAG);
636 u32 value = readl(ioaddr + XGMAC_PACKET_FILTER);
638 value &= ~XGMAC_FILTER_VTFE;
640 writel(value, ioaddr + XGMAC_PACKET_FILTER);
642 value = readl(ioaddr + XGMAC_VLAN_TAG);
644 value &= ~(XGMAC_VLAN_VTHM | XGMAC_VLAN_ETV);
645 value &= ~(XGMAC_VLAN_EDVLP | XGMAC_VLAN_ESVL);
646 value &= ~XGMAC_VLAN_DOVLTC;
647 value &= ~XGMAC_VLAN_VID;
649 writel(value, ioaddr + XGMAC_VLAN_TAG);
653 struct dwxgmac3_error_desc {
656 const char *detailed_desc;
659 #define STAT_OFF(field) offsetof(struct stmmac_safety_stats, field)
661 static void dwxgmac3_log_error(struct net_device *ndev, u32 value, bool corr,
662 const char *module_name,
663 const struct dwxgmac3_error_desc *desc,
664 unsigned long field_offset,
665 struct stmmac_safety_stats *stats)
667 unsigned long loc, mask;
668 u8 *bptr = (u8 *)stats;
671 ptr = (unsigned long *)(bptr + field_offset);
674 for_each_set_bit(loc, &mask, 32) {
675 netdev_err(ndev, "Found %s error in %s: '%s: %s'\n", corr ?
676 "correctable" : "uncorrectable", module_name,
677 desc[loc].desc, desc[loc].detailed_desc);
679 /* Update counters */
684 static const struct dwxgmac3_error_desc dwxgmac3_mac_errors[32]= {
685 { true, "ATPES", "Application Transmit Interface Parity Check Error" },
686 { true, "DPES", "Descriptor Cache Data Path Parity Check Error" },
687 { true, "TPES", "TSO Data Path Parity Check Error" },
688 { true, "TSOPES", "TSO Header Data Path Parity Check Error" },
689 { true, "MTPES", "MTL Data Path Parity Check Error" },
690 { true, "MTSPES", "MTL TX Status Data Path Parity Check Error" },
691 { true, "MTBUPES", "MAC TBU Data Path Parity Check Error" },
692 { true, "MTFCPES", "MAC TFC Data Path Parity Check Error" },
693 { true, "ARPES", "Application Receive Interface Data Path Parity Check Error" },
694 { true, "MRWCPES", "MTL RWC Data Path Parity Check Error" },
695 { true, "MRRCPES", "MTL RCC Data Path Parity Check Error" },
696 { true, "CWPES", "CSR Write Data Path Parity Check Error" },
697 { true, "ASRPES", "AXI Slave Read Data Path Parity Check Error" },
698 { true, "TTES", "TX FSM Timeout Error" },
699 { true, "RTES", "RX FSM Timeout Error" },
700 { true, "CTES", "CSR FSM Timeout Error" },
701 { true, "ATES", "APP FSM Timeout Error" },
702 { true, "PTES", "PTP FSM Timeout Error" },
703 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
704 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
705 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
706 { true, "MSTTES", "Master Read/Write Timeout Error" },
707 { true, "SLVTES", "Slave Read/Write Timeout Error" },
708 { true, "ATITES", "Application Timeout on ATI Interface Error" },
709 { true, "ARITES", "Application Timeout on ARI Interface Error" },
710 { true, "FSMPES", "FSM State Parity Error" },
711 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
712 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
713 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
714 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
715 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
716 { true, "CPI", "Control Register Parity Check Error" },
719 static void dwxgmac3_handle_mac_err(struct net_device *ndev,
720 void __iomem *ioaddr, bool correctable,
721 struct stmmac_safety_stats *stats)
725 value = readl(ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
726 writel(value, ioaddr + XGMAC_MAC_DPP_FSM_INT_STATUS);
728 dwxgmac3_log_error(ndev, value, correctable, "MAC",
729 dwxgmac3_mac_errors, STAT_OFF(mac_errors), stats);
732 static const struct dwxgmac3_error_desc dwxgmac3_mtl_errors[32]= {
733 { true, "TXCES", "MTL TX Memory Error" },
734 { true, "TXAMS", "MTL TX Memory Address Mismatch Error" },
735 { true, "TXUES", "MTL TX Memory Error" },
736 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
737 { true, "RXCES", "MTL RX Memory Error" },
738 { true, "RXAMS", "MTL RX Memory Address Mismatch Error" },
739 { true, "RXUES", "MTL RX Memory Error" },
740 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
741 { true, "ECES", "MTL EST Memory Error" },
742 { true, "EAMS", "MTL EST Memory Address Mismatch Error" },
743 { true, "EUES", "MTL EST Memory Error" },
744 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
745 { true, "RPCES", "MTL RX Parser Memory Error" },
746 { true, "RPAMS", "MTL RX Parser Memory Address Mismatch Error" },
747 { true, "RPUES", "MTL RX Parser Memory Error" },
748 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
749 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
750 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
751 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
752 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
753 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
754 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
755 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
756 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
757 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
758 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
759 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
760 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
761 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
762 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
763 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
764 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
767 static void dwxgmac3_handle_mtl_err(struct net_device *ndev,
768 void __iomem *ioaddr, bool correctable,
769 struct stmmac_safety_stats *stats)
773 value = readl(ioaddr + XGMAC_MTL_ECC_INT_STATUS);
774 writel(value, ioaddr + XGMAC_MTL_ECC_INT_STATUS);
776 dwxgmac3_log_error(ndev, value, correctable, "MTL",
777 dwxgmac3_mtl_errors, STAT_OFF(mtl_errors), stats);
780 static const struct dwxgmac3_error_desc dwxgmac3_dma_errors[32]= {
781 { true, "TCES", "DMA TSO Memory Error" },
782 { true, "TAMS", "DMA TSO Memory Address Mismatch Error" },
783 { true, "TUES", "DMA TSO Memory Error" },
784 { false, "UNKNOWN", "Unknown Error" }, /* 3 */
785 { true, "DCES", "DMA DCACHE Memory Error" },
786 { true, "DAMS", "DMA DCACHE Address Mismatch Error" },
787 { true, "DUES", "DMA DCACHE Memory Error" },
788 { false, "UNKNOWN", "Unknown Error" }, /* 7 */
789 { false, "UNKNOWN", "Unknown Error" }, /* 8 */
790 { false, "UNKNOWN", "Unknown Error" }, /* 9 */
791 { false, "UNKNOWN", "Unknown Error" }, /* 10 */
792 { false, "UNKNOWN", "Unknown Error" }, /* 11 */
793 { false, "UNKNOWN", "Unknown Error" }, /* 12 */
794 { false, "UNKNOWN", "Unknown Error" }, /* 13 */
795 { false, "UNKNOWN", "Unknown Error" }, /* 14 */
796 { false, "UNKNOWN", "Unknown Error" }, /* 15 */
797 { false, "UNKNOWN", "Unknown Error" }, /* 16 */
798 { false, "UNKNOWN", "Unknown Error" }, /* 17 */
799 { false, "UNKNOWN", "Unknown Error" }, /* 18 */
800 { false, "UNKNOWN", "Unknown Error" }, /* 19 */
801 { false, "UNKNOWN", "Unknown Error" }, /* 20 */
802 { false, "UNKNOWN", "Unknown Error" }, /* 21 */
803 { false, "UNKNOWN", "Unknown Error" }, /* 22 */
804 { false, "UNKNOWN", "Unknown Error" }, /* 23 */
805 { false, "UNKNOWN", "Unknown Error" }, /* 24 */
806 { false, "UNKNOWN", "Unknown Error" }, /* 25 */
807 { false, "UNKNOWN", "Unknown Error" }, /* 26 */
808 { false, "UNKNOWN", "Unknown Error" }, /* 27 */
809 { false, "UNKNOWN", "Unknown Error" }, /* 28 */
810 { false, "UNKNOWN", "Unknown Error" }, /* 29 */
811 { false, "UNKNOWN", "Unknown Error" }, /* 30 */
812 { false, "UNKNOWN", "Unknown Error" }, /* 31 */
815 #define DPP_RX_ERR "Read Rx Descriptor Parity checker Error"
816 #define DPP_TX_ERR "Read Tx Descriptor Parity checker Error"
818 static const struct dwxgmac3_error_desc dwxgmac3_dma_dpp_errors[32] = {
819 { true, "TDPES0", DPP_TX_ERR },
820 { true, "TDPES1", DPP_TX_ERR },
821 { true, "TDPES2", DPP_TX_ERR },
822 { true, "TDPES3", DPP_TX_ERR },
823 { true, "TDPES4", DPP_TX_ERR },
824 { true, "TDPES5", DPP_TX_ERR },
825 { true, "TDPES6", DPP_TX_ERR },
826 { true, "TDPES7", DPP_TX_ERR },
827 { true, "TDPES8", DPP_TX_ERR },
828 { true, "TDPES9", DPP_TX_ERR },
829 { true, "TDPES10", DPP_TX_ERR },
830 { true, "TDPES11", DPP_TX_ERR },
831 { true, "TDPES12", DPP_TX_ERR },
832 { true, "TDPES13", DPP_TX_ERR },
833 { true, "TDPES14", DPP_TX_ERR },
834 { true, "TDPES15", DPP_TX_ERR },
835 { true, "RDPES0", DPP_RX_ERR },
836 { true, "RDPES1", DPP_RX_ERR },
837 { true, "RDPES2", DPP_RX_ERR },
838 { true, "RDPES3", DPP_RX_ERR },
839 { true, "RDPES4", DPP_RX_ERR },
840 { true, "RDPES5", DPP_RX_ERR },
841 { true, "RDPES6", DPP_RX_ERR },
842 { true, "RDPES7", DPP_RX_ERR },
843 { true, "RDPES8", DPP_RX_ERR },
844 { true, "RDPES9", DPP_RX_ERR },
845 { true, "RDPES10", DPP_RX_ERR },
846 { true, "RDPES11", DPP_RX_ERR },
847 { true, "RDPES12", DPP_RX_ERR },
848 { true, "RDPES13", DPP_RX_ERR },
849 { true, "RDPES14", DPP_RX_ERR },
850 { true, "RDPES15", DPP_RX_ERR },
853 static void dwxgmac3_handle_dma_err(struct net_device *ndev,
854 void __iomem *ioaddr, bool correctable,
855 struct stmmac_safety_stats *stats)
859 value = readl(ioaddr + XGMAC_DMA_ECC_INT_STATUS);
860 writel(value, ioaddr + XGMAC_DMA_ECC_INT_STATUS);
862 dwxgmac3_log_error(ndev, value, correctable, "DMA",
863 dwxgmac3_dma_errors, STAT_OFF(dma_errors), stats);
865 value = readl(ioaddr + XGMAC_DMA_DPP_INT_STATUS);
866 writel(value, ioaddr + XGMAC_DMA_DPP_INT_STATUS);
868 dwxgmac3_log_error(ndev, value, false, "DMA_DPP",
869 dwxgmac3_dma_dpp_errors,
870 STAT_OFF(dma_dpp_errors), stats);
873 static int dwxgmac3_safety_feat_config(void __iomem *ioaddr, unsigned int asp)
880 /* 1. Enable Safety Features */
881 writel(0x0, ioaddr + XGMAC_MTL_ECC_CONTROL);
883 /* 2. Enable MTL Safety Interrupts */
884 value = readl(ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
885 value |= XGMAC_RPCEIE; /* RX Parser Memory Correctable Error */
886 value |= XGMAC_ECEIE; /* EST Memory Correctable Error */
887 value |= XGMAC_RXCEIE; /* RX Memory Correctable Error */
888 value |= XGMAC_TXCEIE; /* TX Memory Correctable Error */
889 writel(value, ioaddr + XGMAC_MTL_ECC_INT_ENABLE);
891 /* 3. Enable DMA Safety Interrupts */
892 value = readl(ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
893 value |= XGMAC_DCEIE; /* Descriptor Cache Memory Correctable Error */
894 value |= XGMAC_TCEIE; /* TSO Memory Correctable Error */
895 writel(value, ioaddr + XGMAC_DMA_ECC_INT_ENABLE);
897 /* Only ECC Protection for External Memory feature is selected */
901 /* 4. Enable Parity and Timeout for FSM */
902 value = readl(ioaddr + XGMAC_MAC_FSM_CONTROL);
903 value |= XGMAC_PRTYEN; /* FSM Parity Feature */
904 value |= XGMAC_TMOUTEN; /* FSM Timeout Feature */
905 writel(value, ioaddr + XGMAC_MAC_FSM_CONTROL);
907 /* 5. Enable Data Path Parity Protection */
908 value = readl(ioaddr + XGMAC_MTL_DPP_CONTROL);
909 /* already enabled by default, explicit enable it again */
910 value &= ~XGMAC_DPP_DISABLE;
911 writel(value, ioaddr + XGMAC_MTL_DPP_CONTROL);
916 static int dwxgmac3_safety_feat_irq_status(struct net_device *ndev,
917 void __iomem *ioaddr,
919 struct stmmac_safety_stats *stats)
928 mtl = readl(ioaddr + XGMAC_MTL_SAFETY_INT_STATUS);
929 dma = readl(ioaddr + XGMAC_DMA_SAFETY_INT_STATUS);
931 err = (mtl & XGMAC_MCSIS) || (dma & XGMAC_MCSIS);
934 dwxgmac3_handle_mac_err(ndev, ioaddr, corr, stats);
938 err = (mtl & (XGMAC_MEUIS | XGMAC_MECIS)) ||
939 (dma & (XGMAC_MSUIS | XGMAC_MSCIS));
940 corr = (mtl & XGMAC_MECIS) || (dma & XGMAC_MSCIS);
942 dwxgmac3_handle_mtl_err(ndev, ioaddr, corr, stats);
946 /* DMA_DPP_Interrupt_Status is indicated by MCSIS bit in
947 * DMA_Safety_Interrupt_Status, so we handle DMA Data Path
950 err = dma & (XGMAC_DEUIS | XGMAC_DECIS | XGMAC_MCSIS);
951 corr = dma & XGMAC_DECIS;
953 dwxgmac3_handle_dma_err(ndev, ioaddr, corr, stats);
960 static const struct dwxgmac3_error {
961 const struct dwxgmac3_error_desc *desc;
962 } dwxgmac3_all_errors[] = {
963 { dwxgmac3_mac_errors },
964 { dwxgmac3_mtl_errors },
965 { dwxgmac3_dma_errors },
966 { dwxgmac3_dma_dpp_errors },
969 static int dwxgmac3_safety_feat_dump(struct stmmac_safety_stats *stats,
970 int index, unsigned long *count,
973 int module = index / 32, offset = index % 32;
974 unsigned long *ptr = (unsigned long *)stats;
976 if (module >= ARRAY_SIZE(dwxgmac3_all_errors))
978 if (!dwxgmac3_all_errors[module].desc[offset].valid)
981 *count = *(ptr + index);
983 *desc = dwxgmac3_all_errors[module].desc[offset].desc;
987 static int dwxgmac3_rxp_disable(void __iomem *ioaddr)
989 u32 val = readl(ioaddr + XGMAC_MTL_OPMODE);
992 writel(val, ioaddr + XGMAC_MTL_OPMODE);
997 static void dwxgmac3_rxp_enable(void __iomem *ioaddr)
1001 val = readl(ioaddr + XGMAC_MTL_OPMODE);
1003 writel(val, ioaddr + XGMAC_MTL_OPMODE);
1006 static int dwxgmac3_rxp_update_single_entry(void __iomem *ioaddr,
1007 struct stmmac_tc_entry *entry,
1012 for (i = 0; i < (sizeof(entry->val) / sizeof(u32)); i++) {
1013 int real_pos = pos * (sizeof(entry->val) / sizeof(u32)) + i;
1016 /* Wait for ready */
1017 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1018 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1023 val = *((u32 *)&entry->val + i);
1024 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_DATA);
1027 val = real_pos & XGMAC_ADDR;
1028 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1032 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1035 val |= XGMAC_STARTBUSY;
1036 writel(val, ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST);
1039 ret = readl_poll_timeout(ioaddr + XGMAC_MTL_RXP_IACC_CTRL_ST,
1040 val, !(val & XGMAC_STARTBUSY), 1, 10000);
1048 static struct stmmac_tc_entry *
1049 dwxgmac3_rxp_get_next_entry(struct stmmac_tc_entry *entries,
1050 unsigned int count, u32 curr_prio)
1052 struct stmmac_tc_entry *entry;
1053 u32 min_prio = ~0x0;
1054 int i, min_prio_idx;
1057 for (i = count - 1; i >= 0; i--) {
1058 entry = &entries[i];
1060 /* Do not update unused entries */
1063 /* Do not update already updated entries (i.e. fragments) */
1066 /* Let last entry be updated last */
1069 /* Do not return fragments */
1072 /* Check if we already checked this prio */
1073 if (entry->prio < curr_prio)
1075 /* Check if this is the minimum prio */
1076 if (entry->prio < min_prio) {
1077 min_prio = entry->prio;
1084 return &entries[min_prio_idx];
1088 static int dwxgmac3_rxp_config(void __iomem *ioaddr,
1089 struct stmmac_tc_entry *entries,
1092 struct stmmac_tc_entry *entry, *frag;
1093 int i, ret, nve = 0;
1097 /* Force disable RX */
1098 old_val = readl(ioaddr + XGMAC_RX_CONFIG);
1099 val = old_val & ~XGMAC_CONFIG_RE;
1100 writel(val, ioaddr + XGMAC_RX_CONFIG);
1102 /* Disable RX Parser */
1103 ret = dwxgmac3_rxp_disable(ioaddr);
1107 /* Set all entries as NOT in HW */
1108 for (i = 0; i < count; i++) {
1109 entry = &entries[i];
1110 entry->in_hw = false;
1113 /* Update entries by reverse order */
1115 entry = dwxgmac3_rxp_get_next_entry(entries, count, curr_prio);
1119 curr_prio = entry->prio;
1120 frag = entry->frag_ptr;
1122 /* Set special fragment requirements */
1127 entry->val.ok_index = nve + 2;
1130 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1134 entry->table_pos = nve++;
1135 entry->in_hw = true;
1137 if (frag && !frag->in_hw) {
1138 ret = dwxgmac3_rxp_update_single_entry(ioaddr, frag, nve);
1141 frag->table_pos = nve++;
1149 /* Update all pass entry */
1150 for (i = 0; i < count; i++) {
1151 entry = &entries[i];
1152 if (!entry->is_last)
1155 ret = dwxgmac3_rxp_update_single_entry(ioaddr, entry, nve);
1159 entry->table_pos = nve++;
1162 /* Assume n. of parsable entries == n. of valid entries */
1163 val = (nve << 16) & XGMAC_NPE;
1164 val |= nve & XGMAC_NVE;
1165 writel(val, ioaddr + XGMAC_MTL_RXP_CONTROL_STATUS);
1167 /* Enable RX Parser */
1168 dwxgmac3_rxp_enable(ioaddr);
1172 writel(old_val, ioaddr + XGMAC_RX_CONFIG);
1176 static int dwxgmac2_get_mac_tx_timestamp(struct mac_device_info *hw, u64 *ts)
1178 void __iomem *ioaddr = hw->pcsr;
1181 if (readl_poll_timeout_atomic(ioaddr + XGMAC_TIMESTAMP_STATUS,
1182 value, value & XGMAC_TXTSC, 100, 10000))
1185 *ts = readl(ioaddr + XGMAC_TXTIMESTAMP_NSEC) & XGMAC_TXTSSTSLO;
1186 *ts += readl(ioaddr + XGMAC_TXTIMESTAMP_SEC) * 1000000000ULL;
1190 static int dwxgmac2_flex_pps_config(void __iomem *ioaddr, int index,
1191 struct stmmac_pps_cfg *cfg, bool enable,
1192 u32 sub_second_inc, u32 systime_flags)
1194 u32 tnsec = readl(ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1195 u32 val = readl(ioaddr + XGMAC_PPS_CONTROL);
1198 if (!cfg->available)
1200 if (tnsec & XGMAC_TRGTBUSY0)
1202 if (!sub_second_inc || !systime_flags)
1205 val &= ~XGMAC_PPSx_MASK(index);
1208 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_STOP);
1209 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1213 val |= XGMAC_PPSCMDx(index, XGMAC_PPSCMD_START);
1214 val |= XGMAC_TRGTMODSELx(index, XGMAC_PPSCMD_START);
1216 /* XGMAC Core has 4 PPS outputs at most.
1218 * Prior XGMAC Core 3.20, Fixed mode or Flexible mode are selectable for
1219 * PPS0 only via PPSEN0. PPS{1,2,3} are in Flexible mode by default,
1220 * and can not be switched to Fixed mode, since PPSEN{1,2,3} are
1221 * read-only reserved to 0.
1222 * But we always set PPSEN{1,2,3} do not make things worse ;-)
1224 * From XGMAC Core 3.20 and later, PPSEN{0,1,2,3} are writable and must
1225 * be set, or the PPS outputs stay in Fixed PPS mode by default.
1227 val |= XGMAC_PPSENx(index);
1229 writel(cfg->start.tv_sec, ioaddr + XGMAC_PPSx_TARGET_TIME_SEC(index));
1231 if (!(systime_flags & PTP_TCR_TSCTRLSSR))
1232 cfg->start.tv_nsec = (cfg->start.tv_nsec * 1000) / 465;
1233 writel(cfg->start.tv_nsec, ioaddr + XGMAC_PPSx_TARGET_TIME_NSEC(index));
1235 period = cfg->period.tv_sec * 1000000000;
1236 period += cfg->period.tv_nsec;
1238 do_div(period, sub_second_inc);
1243 writel(period - 1, ioaddr + XGMAC_PPSx_INTERVAL(index));
1249 writel(period - 1, ioaddr + XGMAC_PPSx_WIDTH(index));
1251 /* Finally, activate it */
1252 writel(val, ioaddr + XGMAC_PPS_CONTROL);
1256 static void dwxgmac2_sarc_configure(void __iomem *ioaddr, int val)
1258 u32 value = readl(ioaddr + XGMAC_TX_CONFIG);
1260 value &= ~XGMAC_CONFIG_SARC;
1261 value |= val << XGMAC_CONFIG_SARC_SHIFT;
1263 writel(value, ioaddr + XGMAC_TX_CONFIG);
1266 static void dwxgmac2_enable_vlan(struct mac_device_info *hw, u32 type)
1268 void __iomem *ioaddr = hw->pcsr;
1271 value = readl(ioaddr + XGMAC_VLAN_INCL);
1272 value |= XGMAC_VLAN_VLTI;
1273 value |= XGMAC_VLAN_CSVL; /* Only use SVLAN */
1274 value &= ~XGMAC_VLAN_VLC;
1275 value |= (type << XGMAC_VLAN_VLC_SHIFT) & XGMAC_VLAN_VLC;
1276 writel(value, ioaddr + XGMAC_VLAN_INCL);
1279 static int dwxgmac2_filter_wait(struct mac_device_info *hw)
1281 void __iomem *ioaddr = hw->pcsr;
1284 if (readl_poll_timeout(ioaddr + XGMAC_L3L4_ADDR_CTRL, value,
1285 !(value & XGMAC_XB), 100, 10000))
1290 static int dwxgmac2_filter_read(struct mac_device_info *hw, u32 filter_no,
1293 void __iomem *ioaddr = hw->pcsr;
1297 ret = dwxgmac2_filter_wait(hw);
1301 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1302 value |= XGMAC_TT | XGMAC_XB;
1303 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1305 ret = dwxgmac2_filter_wait(hw);
1309 *data = readl(ioaddr + XGMAC_L3L4_DATA);
1313 static int dwxgmac2_filter_write(struct mac_device_info *hw, u32 filter_no,
1316 void __iomem *ioaddr = hw->pcsr;
1320 ret = dwxgmac2_filter_wait(hw);
1324 writel(data, ioaddr + XGMAC_L3L4_DATA);
1326 value = ((filter_no << XGMAC_IDDR_FNUM) | reg) << XGMAC_IDDR_SHIFT;
1328 writel(value, ioaddr + XGMAC_L3L4_ADDR_CTRL);
1330 return dwxgmac2_filter_wait(hw);
1333 static int dwxgmac2_config_l3_filter(struct mac_device_info *hw, u32 filter_no,
1334 bool en, bool ipv6, bool sa, bool inv,
1337 void __iomem *ioaddr = hw->pcsr;
1341 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1342 value |= XGMAC_FILTER_IPFE;
1343 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1345 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1349 /* For IPv6 not both SA/DA filters can be active */
1351 value |= XGMAC_L3PEN0;
1352 value &= ~(XGMAC_L3SAM0 | XGMAC_L3SAIM0);
1353 value &= ~(XGMAC_L3DAM0 | XGMAC_L3DAIM0);
1355 value |= XGMAC_L3SAM0;
1357 value |= XGMAC_L3SAIM0;
1359 value |= XGMAC_L3DAM0;
1361 value |= XGMAC_L3DAIM0;
1364 value &= ~XGMAC_L3PEN0;
1366 value |= XGMAC_L3SAM0;
1368 value |= XGMAC_L3SAIM0;
1370 value |= XGMAC_L3DAM0;
1372 value |= XGMAC_L3DAIM0;
1376 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1381 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR0, match);
1385 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3_ADDR1, match);
1391 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1396 static int dwxgmac2_config_l4_filter(struct mac_device_info *hw, u32 filter_no,
1397 bool en, bool udp, bool sa, bool inv,
1400 void __iomem *ioaddr = hw->pcsr;
1404 value = readl(ioaddr + XGMAC_PACKET_FILTER);
1405 value |= XGMAC_FILTER_IPFE;
1406 writel(value, ioaddr + XGMAC_PACKET_FILTER);
1408 ret = dwxgmac2_filter_read(hw, filter_no, XGMAC_L3L4_CTRL, &value);
1413 value |= XGMAC_L4PEN0;
1415 value &= ~XGMAC_L4PEN0;
1418 value &= ~(XGMAC_L4SPM0 | XGMAC_L4SPIM0);
1419 value &= ~(XGMAC_L4DPM0 | XGMAC_L4DPIM0);
1421 value |= XGMAC_L4SPM0;
1423 value |= XGMAC_L4SPIM0;
1425 value |= XGMAC_L4DPM0;
1427 value |= XGMAC_L4DPIM0;
1430 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, value);
1435 value = match & XGMAC_L4SP0;
1437 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1441 value = (match << XGMAC_L4DP0_SHIFT) & XGMAC_L4DP0;
1443 ret = dwxgmac2_filter_write(hw, filter_no, XGMAC_L4_ADDR, value);
1449 return dwxgmac2_filter_write(hw, filter_no, XGMAC_L3L4_CTRL, 0);
1454 static void dwxgmac2_set_arp_offload(struct mac_device_info *hw, bool en,
1457 void __iomem *ioaddr = hw->pcsr;
1460 writel(addr, ioaddr + XGMAC_ARP_ADDR);
1462 value = readl(ioaddr + XGMAC_RX_CONFIG);
1464 value |= XGMAC_CONFIG_ARPEN;
1466 value &= ~XGMAC_CONFIG_ARPEN;
1467 writel(value, ioaddr + XGMAC_RX_CONFIG);
1470 static int dwxgmac3_est_write(void __iomem *ioaddr, u32 reg, u32 val, bool gcl)
1474 writel(val, ioaddr + XGMAC_MTL_EST_GCL_DATA);
1476 ctrl = (reg << XGMAC_ADDR_SHIFT);
1477 ctrl |= gcl ? 0 : XGMAC_GCRR;
1479 writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
1482 writel(ctrl, ioaddr + XGMAC_MTL_EST_GCL_CONTROL);
1484 return readl_poll_timeout_atomic(ioaddr + XGMAC_MTL_EST_GCL_CONTROL,
1485 ctrl, !(ctrl & XGMAC_SRWO), 100, 5000);
1488 static int dwxgmac3_est_configure(void __iomem *ioaddr, struct stmmac_est *cfg,
1489 unsigned int ptp_rate)
1494 ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_LOW, cfg->btr[0], false);
1495 ret |= dwxgmac3_est_write(ioaddr, XGMAC_BTR_HIGH, cfg->btr[1], false);
1496 ret |= dwxgmac3_est_write(ioaddr, XGMAC_TER, cfg->ter, false);
1497 ret |= dwxgmac3_est_write(ioaddr, XGMAC_LLR, cfg->gcl_size, false);
1498 ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_LOW, cfg->ctr[0], false);
1499 ret |= dwxgmac3_est_write(ioaddr, XGMAC_CTR_HIGH, cfg->ctr[1], false);
1503 for (i = 0; i < cfg->gcl_size; i++) {
1504 ret = dwxgmac3_est_write(ioaddr, i, cfg->gcl[i], true);
1509 ctrl = readl(ioaddr + XGMAC_MTL_EST_CONTROL);
1510 ctrl &= ~XGMAC_PTOV;
1511 ctrl |= ((1000000000 / ptp_rate) * 9) << XGMAC_PTOV_SHIFT;
1513 ctrl |= XGMAC_EEST | XGMAC_SSWL;
1515 ctrl &= ~XGMAC_EEST;
1517 writel(ctrl, ioaddr + XGMAC_MTL_EST_CONTROL);
1521 static void dwxgmac3_fpe_configure(void __iomem *ioaddr, u32 num_txq,
1522 u32 num_rxq, bool enable)
1527 value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1529 value &= ~XGMAC_EFPE;
1531 writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1535 value = readl(ioaddr + XGMAC_RXQ_CTRL1);
1537 value |= (num_rxq - 1) << XGMAC_RQ_SHIFT;
1538 writel(value, ioaddr + XGMAC_RXQ_CTRL1);
1540 value = readl(ioaddr + XGMAC_FPE_CTRL_STS);
1541 value |= XGMAC_EFPE;
1542 writel(value, ioaddr + XGMAC_FPE_CTRL_STS);
1545 const struct stmmac_ops dwxgmac210_ops = {
1546 .core_init = dwxgmac2_core_init,
1547 .set_mac = dwxgmac2_set_mac,
1548 .rx_ipc = dwxgmac2_rx_ipc,
1549 .rx_queue_enable = dwxgmac2_rx_queue_enable,
1550 .rx_queue_prio = dwxgmac2_rx_queue_prio,
1551 .tx_queue_prio = dwxgmac2_tx_queue_prio,
1552 .rx_queue_routing = NULL,
1553 .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1554 .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1555 .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1556 .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1557 .config_cbs = dwxgmac2_config_cbs,
1558 .dump_regs = dwxgmac2_dump_regs,
1559 .host_irq_status = dwxgmac2_host_irq_status,
1560 .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1561 .flow_ctrl = dwxgmac2_flow_ctrl,
1562 .pmt = dwxgmac2_pmt,
1563 .set_umac_addr = dwxgmac2_set_umac_addr,
1564 .get_umac_addr = dwxgmac2_get_umac_addr,
1565 .set_eee_mode = dwxgmac2_set_eee_mode,
1566 .reset_eee_mode = dwxgmac2_reset_eee_mode,
1567 .set_eee_timer = dwxgmac2_set_eee_timer,
1568 .set_eee_pls = dwxgmac2_set_eee_pls,
1569 .pcs_ctrl_ane = NULL,
1571 .pcs_get_adv_lp = NULL,
1573 .set_filter = dwxgmac2_set_filter,
1574 .safety_feat_config = dwxgmac3_safety_feat_config,
1575 .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1576 .safety_feat_dump = dwxgmac3_safety_feat_dump,
1577 .set_mac_loopback = dwxgmac2_set_mac_loopback,
1578 .rss_configure = dwxgmac2_rss_configure,
1579 .update_vlan_hash = dwxgmac2_update_vlan_hash,
1580 .rxp_config = dwxgmac3_rxp_config,
1581 .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1582 .flex_pps_config = dwxgmac2_flex_pps_config,
1583 .sarc_configure = dwxgmac2_sarc_configure,
1584 .enable_vlan = dwxgmac2_enable_vlan,
1585 .config_l3_filter = dwxgmac2_config_l3_filter,
1586 .config_l4_filter = dwxgmac2_config_l4_filter,
1587 .set_arp_offload = dwxgmac2_set_arp_offload,
1588 .est_configure = dwxgmac3_est_configure,
1589 .fpe_configure = dwxgmac3_fpe_configure,
1592 static void dwxlgmac2_rx_queue_enable(struct mac_device_info *hw, u8 mode,
1595 void __iomem *ioaddr = hw->pcsr;
1598 value = readl(ioaddr + XLGMAC_RXQ_ENABLE_CTRL0) & ~XGMAC_RXQEN(queue);
1599 if (mode == MTL_QUEUE_AVB)
1600 value |= 0x1 << XGMAC_RXQEN_SHIFT(queue);
1601 else if (mode == MTL_QUEUE_DCB)
1602 value |= 0x2 << XGMAC_RXQEN_SHIFT(queue);
1603 writel(value, ioaddr + XLGMAC_RXQ_ENABLE_CTRL0);
1606 const struct stmmac_ops dwxlgmac2_ops = {
1607 .core_init = dwxgmac2_core_init,
1608 .set_mac = dwxgmac2_set_mac,
1609 .rx_ipc = dwxgmac2_rx_ipc,
1610 .rx_queue_enable = dwxlgmac2_rx_queue_enable,
1611 .rx_queue_prio = dwxgmac2_rx_queue_prio,
1612 .tx_queue_prio = dwxgmac2_tx_queue_prio,
1613 .rx_queue_routing = NULL,
1614 .prog_mtl_rx_algorithms = dwxgmac2_prog_mtl_rx_algorithms,
1615 .prog_mtl_tx_algorithms = dwxgmac2_prog_mtl_tx_algorithms,
1616 .set_mtl_tx_queue_weight = dwxgmac2_set_mtl_tx_queue_weight,
1617 .map_mtl_to_dma = dwxgmac2_map_mtl_to_dma,
1618 .config_cbs = dwxgmac2_config_cbs,
1619 .dump_regs = dwxgmac2_dump_regs,
1620 .host_irq_status = dwxgmac2_host_irq_status,
1621 .host_mtl_irq_status = dwxgmac2_host_mtl_irq_status,
1622 .flow_ctrl = dwxgmac2_flow_ctrl,
1623 .pmt = dwxgmac2_pmt,
1624 .set_umac_addr = dwxgmac2_set_umac_addr,
1625 .get_umac_addr = dwxgmac2_get_umac_addr,
1626 .set_eee_mode = dwxgmac2_set_eee_mode,
1627 .reset_eee_mode = dwxgmac2_reset_eee_mode,
1628 .set_eee_timer = dwxgmac2_set_eee_timer,
1629 .set_eee_pls = dwxgmac2_set_eee_pls,
1630 .pcs_ctrl_ane = NULL,
1632 .pcs_get_adv_lp = NULL,
1634 .set_filter = dwxgmac2_set_filter,
1635 .safety_feat_config = dwxgmac3_safety_feat_config,
1636 .safety_feat_irq_status = dwxgmac3_safety_feat_irq_status,
1637 .safety_feat_dump = dwxgmac3_safety_feat_dump,
1638 .set_mac_loopback = dwxgmac2_set_mac_loopback,
1639 .rss_configure = dwxgmac2_rss_configure,
1640 .update_vlan_hash = dwxgmac2_update_vlan_hash,
1641 .rxp_config = dwxgmac3_rxp_config,
1642 .get_mac_tx_timestamp = dwxgmac2_get_mac_tx_timestamp,
1643 .flex_pps_config = dwxgmac2_flex_pps_config,
1644 .sarc_configure = dwxgmac2_sarc_configure,
1645 .enable_vlan = dwxgmac2_enable_vlan,
1646 .config_l3_filter = dwxgmac2_config_l3_filter,
1647 .config_l4_filter = dwxgmac2_config_l4_filter,
1648 .set_arp_offload = dwxgmac2_set_arp_offload,
1649 .est_configure = dwxgmac3_est_configure,
1650 .fpe_configure = dwxgmac3_fpe_configure,
1653 int dwxgmac2_setup(struct stmmac_priv *priv)
1655 struct mac_device_info *mac = priv->hw;
1657 dev_info(priv->device, "\tXGMAC2\n");
1659 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1660 mac->pcsr = priv->ioaddr;
1661 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1662 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1663 mac->mcast_bits_log2 = 0;
1665 if (mac->multicast_filter_bins)
1666 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1668 mac->link.duplex = 0;
1669 mac->link.speed10 = XGMAC_CONFIG_SS_10_MII;
1670 mac->link.speed100 = XGMAC_CONFIG_SS_100_MII;
1671 mac->link.speed1000 = XGMAC_CONFIG_SS_1000_GMII;
1672 mac->link.speed2500 = XGMAC_CONFIG_SS_2500_GMII;
1673 mac->link.xgmii.speed2500 = XGMAC_CONFIG_SS_2500;
1674 mac->link.xgmii.speed5000 = XGMAC_CONFIG_SS_5000;
1675 mac->link.xgmii.speed10000 = XGMAC_CONFIG_SS_10000;
1676 mac->link.speed_mask = XGMAC_CONFIG_SS_MASK;
1678 mac->mii.addr = XGMAC_MDIO_ADDR;
1679 mac->mii.data = XGMAC_MDIO_DATA;
1680 mac->mii.addr_shift = 16;
1681 mac->mii.addr_mask = GENMASK(20, 16);
1682 mac->mii.reg_shift = 0;
1683 mac->mii.reg_mask = GENMASK(15, 0);
1684 mac->mii.clk_csr_shift = 19;
1685 mac->mii.clk_csr_mask = GENMASK(21, 19);
1690 int dwxlgmac2_setup(struct stmmac_priv *priv)
1692 struct mac_device_info *mac = priv->hw;
1694 dev_info(priv->device, "\tXLGMAC\n");
1696 priv->dev->priv_flags |= IFF_UNICAST_FLT;
1697 mac->pcsr = priv->ioaddr;
1698 mac->multicast_filter_bins = priv->plat->multicast_filter_bins;
1699 mac->unicast_filter_entries = priv->plat->unicast_filter_entries;
1700 mac->mcast_bits_log2 = 0;
1702 if (mac->multicast_filter_bins)
1703 mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
1705 mac->link.duplex = 0;
1706 mac->link.speed1000 = XLGMAC_CONFIG_SS_1000;
1707 mac->link.speed2500 = XLGMAC_CONFIG_SS_2500;
1708 mac->link.xgmii.speed10000 = XLGMAC_CONFIG_SS_10G;
1709 mac->link.xlgmii.speed25000 = XLGMAC_CONFIG_SS_25G;
1710 mac->link.xlgmii.speed40000 = XLGMAC_CONFIG_SS_40G;
1711 mac->link.xlgmii.speed50000 = XLGMAC_CONFIG_SS_50G;
1712 mac->link.xlgmii.speed100000 = XLGMAC_CONFIG_SS_100G;
1713 mac->link.speed_mask = XLGMAC_CONFIG_SS;
1715 mac->mii.addr = XGMAC_MDIO_ADDR;
1716 mac->mii.data = XGMAC_MDIO_DATA;
1717 mac->mii.addr_shift = 16;
1718 mac->mii.addr_mask = GENMASK(20, 16);
1719 mac->mii.reg_shift = 0;
1720 mac->mii.reg_mask = GENMASK(15, 0);
1721 mac->mii.clk_csr_shift = 19;
1722 mac->mii.clk_csr_mask = GENMASK(21, 19);