1 // SPDX-License-Identifier: GPL-2.0
2 /* Texas Instruments K3 AM65 Ethernet QoS submodule
3 * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com/
5 * quality of service module includes:
6 * Enhanced Scheduler Traffic (EST - P802.1Qbv/D2.2)
7 * Interspersed Express Traffic (IET - P802.3br/D2.0)
10 #include <linux/pm_runtime.h>
11 #include <linux/math.h>
12 #include <linux/time.h>
13 #include <linux/units.h>
14 #include <net/pkt_cls.h>
16 #include "am65-cpsw-nuss.h"
17 #include "am65-cpsw-qos.h"
18 #include "am65-cpts.h"
21 #define TO_MBPS(x) DIV_ROUND_UP((x), BYTES_PER_MBIT)
24 TACT_PROG, /* need program timer */
25 TACT_NEED_STOP, /* need stop first */
26 TACT_SKIP_PROG, /* just buffer can be updated */
29 static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs);
32 am65_cpsw_qos_tx_rate_calc(u32 rate_mbps, unsigned long bus_freq)
37 ir = DIV_ROUND_UP(((u64)rate_mbps * 32768), bus_freq);
41 static void am65_cpsw_tx_pn_shaper_reset(struct am65_cpsw_port *port)
45 for (prio = 0; prio < AM65_CPSW_PN_FIFO_PRIO_NUM; prio++) {
46 writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio));
47 writel(0, port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio));
51 static void am65_cpsw_tx_pn_shaper_apply(struct am65_cpsw_port *port)
53 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
54 struct am65_cpsw_common *common = port->common;
55 struct tc_mqprio_qopt_offload *mqprio;
56 bool enable, shaper_susp = false;
60 mqprio = &p_mqprio->mqprio_hw;
61 /* takes care of no link case as well */
62 if (p_mqprio->max_rate_total > port->qos.link_speed)
65 am65_cpsw_tx_pn_shaper_reset(port);
67 enable = p_mqprio->shaper_en && !shaper_susp;
71 /* Rate limit is specified per Traffic Class but
72 * for CPSW, rate limit can be applied per priority
75 * We have assigned the same priority (TCn) to all queues
76 * of a Traffic Class so they share the same shaper
79 for (tc = 0; tc < mqprio->qopt.num_tc; tc++) {
82 rate_mbps = TO_MBPS(mqprio->min_rate[tc]);
83 rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
86 port->port_base + AM65_CPSW_PN_REG_PRI_CIR(prio));
90 if (mqprio->max_rate[tc]) {
91 rate_mbps = mqprio->max_rate[tc] - mqprio->min_rate[tc];
92 rate_mbps = TO_MBPS(rate_mbps);
93 rate_mbps = am65_cpsw_qos_tx_rate_calc(rate_mbps,
98 port->port_base + AM65_CPSW_PN_REG_PRI_EIR(prio));
102 static int am65_cpsw_mqprio_verify_shaper(struct am65_cpsw_port *port,
103 struct tc_mqprio_qopt_offload *mqprio)
105 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
106 struct netlink_ext_ack *extack = mqprio->extack;
107 u64 min_rate_total = 0, max_rate_total = 0;
108 u32 min_rate_msk = 0, max_rate_msk = 0;
109 bool has_min_rate, has_max_rate;
112 if (!(mqprio->flags & TC_MQPRIO_F_SHAPER))
115 if (mqprio->shaper != TC_MQPRIO_SHAPER_BW_RATE)
118 has_min_rate = !!(mqprio->flags & TC_MQPRIO_F_MIN_RATE);
119 has_max_rate = !!(mqprio->flags & TC_MQPRIO_F_MAX_RATE);
121 if (!has_min_rate && has_max_rate) {
122 NL_SET_ERR_MSG_MOD(extack, "min_rate is required with max_rate");
129 num_tc = mqprio->qopt.num_tc;
131 for (i = num_tc - 1; i >= 0; i--) {
134 if (mqprio->min_rate[i])
135 min_rate_msk |= BIT(i);
136 min_rate_total += mqprio->min_rate[i];
139 if (mqprio->max_rate[i])
140 max_rate_msk |= BIT(i);
141 max_rate_total += mqprio->max_rate[i];
143 if (!mqprio->min_rate[i] && mqprio->max_rate[i]) {
144 NL_SET_ERR_MSG_FMT_MOD(extack,
145 "TX tc%d rate max>0 but min=0",
150 if (mqprio->max_rate[i] &&
151 mqprio->max_rate[i] < mqprio->min_rate[i]) {
152 NL_SET_ERR_MSG_FMT_MOD(extack,
153 "TX tc%d rate min(%llu)>max(%llu)",
154 i, mqprio->min_rate[i],
155 mqprio->max_rate[i]);
160 ch_msk = GENMASK(num_tc - 1, i);
161 if ((min_rate_msk & BIT(i)) && (min_rate_msk ^ ch_msk)) {
162 NL_SET_ERR_MSG_FMT_MOD(extack,
163 "Min rate must be set sequentially hi->lo tx_rate_msk%x",
168 if ((max_rate_msk & BIT(i)) && (max_rate_msk ^ ch_msk)) {
169 NL_SET_ERR_MSG_FMT_MOD(extack,
170 "Max rate must be set sequentially hi->lo tx_rate_msk%x",
176 min_rate_total = TO_MBPS(min_rate_total);
177 max_rate_total = TO_MBPS(max_rate_total);
179 p_mqprio->shaper_en = true;
180 p_mqprio->max_rate_total = max_t(u64, min_rate_total, max_rate_total);
185 static void am65_cpsw_reset_tc_mqprio(struct net_device *ndev)
187 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
188 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
190 p_mqprio->shaper_en = false;
191 p_mqprio->max_rate_total = 0;
193 am65_cpsw_tx_pn_shaper_reset(port);
194 netdev_reset_tc(ndev);
196 /* Reset all Queue priorities to 0 */
197 writel(0, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
199 am65_cpsw_iet_change_preemptible_tcs(port, 0);
202 static int am65_cpsw_setup_mqprio(struct net_device *ndev, void *type_data)
204 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
205 struct am65_cpsw_mqprio *p_mqprio = &port->qos.mqprio;
206 struct tc_mqprio_qopt_offload *mqprio = type_data;
207 struct am65_cpsw_common *common = port->common;
208 struct tc_mqprio_qopt *qopt = &mqprio->qopt;
209 int i, tc, offset, count, prio, ret;
210 u8 num_tc = qopt->num_tc;
213 memcpy(&p_mqprio->mqprio_hw, mqprio, sizeof(*mqprio));
215 ret = pm_runtime_get_sync(common->dev);
217 pm_runtime_put_noidle(common->dev);
222 am65_cpsw_reset_tc_mqprio(ndev);
227 ret = am65_cpsw_mqprio_verify_shaper(port, mqprio);
231 netdev_set_num_tc(ndev, num_tc);
233 /* Multiple Linux priorities can map to a Traffic Class
234 * A Traffic Class can have multiple contiguous Queues,
235 * Queues get mapped to Channels (thread_id),
236 * if not VLAN tagged, thread_id is used as packet_priority
237 * if VLAN tagged. VLAN priority is used as packet_priority
238 * packet_priority gets mapped to header_priority in p0_rx_pri_map,
239 * header_priority gets mapped to switch_priority in pn_tx_pri_map.
240 * As p0_rx_pri_map is left at defaults (0x76543210), we can
241 * assume that Queue_n gets mapped to header_priority_n. We can then
242 * set the switch priority in pn_tx_pri_map.
245 for (tc = 0; tc < num_tc; tc++) {
248 /* For simplicity we assign the same priority (TCn) to
249 * all queues of a Traffic Class.
251 for (i = qopt->offset[tc]; i < qopt->offset[tc] + qopt->count[tc]; i++)
252 tx_prio_map |= prio << (4 * i);
254 count = qopt->count[tc];
255 offset = qopt->offset[tc];
256 netdev_set_tc_queue(ndev, tc, count, offset);
259 writel(tx_prio_map, port->port_base + AM65_CPSW_PN_REG_TX_PRI_MAP);
261 am65_cpsw_tx_pn_shaper_apply(port);
262 am65_cpsw_iet_change_preemptible_tcs(port, mqprio->preemptible_tcs);
265 pm_runtime_put(common->dev);
270 static int am65_cpsw_iet_set_verify_timeout_count(struct am65_cpsw_port *port)
272 int verify_time_ms = port->qos.iet.verify_time_ms;
275 /* The number of wireside clocks contained in the verify
276 * timeout counter. The default is 0x1312d0
277 * (10ms at 125Mhz in 1G mode).
279 val = 125 * HZ_PER_MHZ; /* assuming 125MHz wireside clock */
281 val /= MILLIHZ_PER_HZ; /* count per ms timeout */
282 val *= verify_time_ms; /* count for timeout ms */
284 if (val > AM65_CPSW_PN_MAC_VERIFY_CNT_MASK)
287 writel(val, port->port_base + AM65_CPSW_PN_REG_IET_VERIFY);
292 static int am65_cpsw_iet_verify_wait(struct am65_cpsw_port *port)
299 /* Reset the verify state machine by writing 1
302 ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
303 ctrl |= AM65_CPSW_PN_IET_MAC_LINKFAIL;
304 writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
306 /* Clear MAC_LINKFAIL bit to start Verify. */
307 ctrl = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
308 ctrl &= ~AM65_CPSW_PN_IET_MAC_LINKFAIL;
309 writel(ctrl, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
311 msleep(port->qos.iet.verify_time_ms);
313 status = readl(port->port_base + AM65_CPSW_PN_REG_IET_STATUS);
314 if (status & AM65_CPSW_PN_MAC_VERIFIED)
317 if (status & AM65_CPSW_PN_MAC_VERIFY_FAIL) {
318 netdev_dbg(port->ndev,
319 "MAC Merge verify failed, trying again\n");
323 if (status & AM65_CPSW_PN_MAC_RESPOND_ERR) {
324 netdev_dbg(port->ndev, "MAC Merge respond error\n");
328 if (status & AM65_CPSW_PN_MAC_VERIFY_ERR) {
329 netdev_dbg(port->ndev, "MAC Merge verify error\n");
334 netdev_dbg(port->ndev, "MAC Merge verify timeout\n");
338 static void am65_cpsw_iet_set_preempt_mask(struct am65_cpsw_port *port, u8 preemptible_tcs)
342 val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
343 val &= ~AM65_CPSW_PN_IET_MAC_PREMPT_MASK;
344 val |= AM65_CPSW_PN_IET_MAC_SET_PREEMPT(preemptible_tcs);
345 writel(val, port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
348 /* enable common IET_ENABLE only if at least 1 port has rx IET enabled.
349 * UAPI doesn't allow tx enable without rx enable.
351 void am65_cpsw_iet_common_enable(struct am65_cpsw_common *common)
353 struct am65_cpsw_port *port;
354 bool rx_enable = false;
358 for (i = 0; i < common->port_num; i++) {
359 port = &common->ports[i];
360 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
361 rx_enable = !!(val & AM65_CPSW_PN_CTL_IET_PORT_EN);
366 val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
369 val |= AM65_CPSW_CTL_IET_EN;
371 val &= ~AM65_CPSW_CTL_IET_EN;
373 writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
374 common->iet_enabled = rx_enable;
377 /* CPSW does not have an IRQ to notify changes to the MAC Merge TX status
378 * (active/inactive), but the preemptible traffic classes should only be
379 * committed to hardware once TX is active. Resort to polling.
381 void am65_cpsw_iet_commit_preemptible_tcs(struct am65_cpsw_port *port)
387 if (port->qos.link_speed == SPEED_UNKNOWN)
390 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
391 if (!(val & AM65_CPSW_PN_CTL_IET_PORT_EN))
394 /* update common IET enable */
395 am65_cpsw_iet_common_enable(port->common);
397 /* update verify count */
398 err = am65_cpsw_iet_set_verify_timeout_count(port);
400 netdev_err(port->ndev, "couldn't set verify count: %d\n", err);
404 val = readl(port->port_base + AM65_CPSW_PN_REG_IET_CTRL);
405 if (!(val & AM65_CPSW_PN_IET_MAC_DISABLEVERIFY)) {
406 err = am65_cpsw_iet_verify_wait(port);
411 preemptible_tcs = port->qos.iet.preemptible_tcs;
412 am65_cpsw_iet_set_preempt_mask(port, preemptible_tcs);
415 static void am65_cpsw_iet_change_preemptible_tcs(struct am65_cpsw_port *port, u8 preemptible_tcs)
417 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(port->ndev);
419 port->qos.iet.preemptible_tcs = preemptible_tcs;
420 mutex_lock(&priv->mm_lock);
421 am65_cpsw_iet_commit_preemptible_tcs(port);
422 mutex_unlock(&priv->mm_lock);
425 static void am65_cpsw_iet_link_state_update(struct net_device *ndev)
427 struct am65_cpsw_ndev_priv *priv = am65_ndev_to_priv(ndev);
428 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
430 mutex_lock(&priv->mm_lock);
431 am65_cpsw_iet_commit_preemptible_tcs(port);
432 mutex_unlock(&priv->mm_lock);
435 static int am65_cpsw_port_est_enabled(struct am65_cpsw_port *port)
437 return port->qos.est_oper || port->qos.est_admin;
440 static void am65_cpsw_est_enable(struct am65_cpsw_common *common, int enable)
444 val = readl(common->cpsw_base + AM65_CPSW_REG_CTL);
447 val |= AM65_CPSW_CTL_EST_EN;
449 val &= ~AM65_CPSW_CTL_EST_EN;
451 writel(val, common->cpsw_base + AM65_CPSW_REG_CTL);
452 common->est_enabled = enable;
455 static void am65_cpsw_port_est_enable(struct am65_cpsw_port *port, int enable)
459 val = readl(port->port_base + AM65_CPSW_PN_REG_CTL);
461 val |= AM65_CPSW_PN_CTL_EST_PORT_EN;
463 val &= ~AM65_CPSW_PN_CTL_EST_PORT_EN;
465 writel(val, port->port_base + AM65_CPSW_PN_REG_CTL);
468 /* target new EST RAM buffer, actual toggle happens after cycle completion */
469 static void am65_cpsw_port_est_assign_buf_num(struct net_device *ndev,
472 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
475 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
477 val |= AM65_CPSW_PN_EST_BUFSEL;
479 val &= ~AM65_CPSW_PN_EST_BUFSEL;
481 writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
484 /* am65_cpsw_port_est_is_swapped() - Indicate if h/w is transitioned
485 * admin -> oper or not
487 * Return true if already transitioned. i.e oper is equal to admin and buf
488 * numbers match (est_oper->buf match with est_admin->buf).
489 * false if before transition. i.e oper is not equal to admin, (i.e a
490 * previous admin command is waiting to be transitioned to oper state
491 * and est_oper->buf not match with est_oper->buf).
493 static int am65_cpsw_port_est_is_swapped(struct net_device *ndev, int *oper,
496 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
499 val = readl(port->port_base + AM65_CPSW_PN_REG_FIFO_STATUS);
500 *oper = !!(val & AM65_CPSW_PN_FST_EST_BUFACT);
502 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
503 *admin = !!(val & AM65_CPSW_PN_EST_BUFSEL);
505 return *admin == *oper;
508 /* am65_cpsw_port_est_get_free_buf_num() - Get free buffer number for
509 * Admin to program the new schedule.
512 * If oper is same as admin, return the other buffer (!oper) as the admin
513 * buffer. If oper is not the same, driver let the current oper to continue
514 * as it is in the process of transitioning from admin -> oper. So keep the
515 * oper by selecting the same oper buffer by writing to EST_BUFSEL bit in
516 * EST CTL register. In the second iteration they will match and code returns.
517 * The actual buffer to write command is selected later before it is ready
518 * to update the schedule.
520 static int am65_cpsw_port_est_get_free_buf_num(struct net_device *ndev)
526 if (am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
529 /* admin is not set, so hinder transition as it's not allowed
530 * to touch memory in-flight, by targeting same oper buf.
532 am65_cpsw_port_est_assign_buf_num(ndev, oper);
535 "Prev. EST admin cycle is in transit %d -> %d\n",
542 static void am65_cpsw_admin_to_oper(struct net_device *ndev)
544 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
546 devm_kfree(&ndev->dev, port->qos.est_oper);
548 port->qos.est_oper = port->qos.est_admin;
549 port->qos.est_admin = NULL;
552 static void am65_cpsw_port_est_get_buf_num(struct net_device *ndev,
553 struct am65_cpsw_est *est_new)
555 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
558 val = readl(port->port_base + AM65_CPSW_PN_REG_EST_CTL);
559 val &= ~AM65_CPSW_PN_EST_ONEBUF;
560 writel(val, port->port_base + AM65_CPSW_PN_REG_EST_CTL);
562 est_new->buf = am65_cpsw_port_est_get_free_buf_num(ndev);
564 /* rolled buf num means changed buf while configuring */
565 if (port->qos.est_oper && port->qos.est_admin &&
566 est_new->buf == port->qos.est_oper->buf)
567 am65_cpsw_admin_to_oper(ndev);
570 static void am65_cpsw_est_set(struct net_device *ndev, int enable)
572 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
573 struct am65_cpsw_common *common = port->common;
574 int common_enable = 0;
577 am65_cpsw_port_est_enable(port, enable);
579 for (i = 0; i < common->port_num; i++)
580 common_enable |= am65_cpsw_port_est_enabled(&common->ports[i]);
582 common_enable |= enable;
583 am65_cpsw_est_enable(common, common_enable);
586 /* This update is supposed to be used in any routine before getting real state
587 * of admin -> oper transition, particularly it's supposed to be used in some
588 * generic routine for providing real state to Taprio Qdisc.
590 static void am65_cpsw_est_update_state(struct net_device *ndev)
592 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
595 if (!port->qos.est_admin)
598 if (!am65_cpsw_port_est_is_swapped(ndev, &oper, &admin))
601 am65_cpsw_admin_to_oper(ndev);
604 /* Fetch command count it's number of bytes in Gigabit mode or nibbles in
605 * 10/100Mb mode. So, having speed and time in ns, recalculate ns to number of
606 * bytes/nibbles that can be sent while transmission on given speed.
608 static int am65_est_cmd_ns_to_cnt(u64 ns, int link_speed)
612 temp = ns * link_speed;
613 if (link_speed < SPEED_1000)
616 return DIV_ROUND_UP(temp, 8 * 1000);
619 static void __iomem *am65_cpsw_est_set_sched_cmds(void __iomem *addr,
623 u32 prio_mask, cmd_fetch_cnt, cmd;
626 if (fetch_cnt > AM65_CPSW_FETCH_CNT_MAX) {
627 fetch_cnt -= AM65_CPSW_FETCH_CNT_MAX;
628 cmd_fetch_cnt = AM65_CPSW_FETCH_CNT_MAX;
630 cmd_fetch_cnt = fetch_cnt;
631 /* fetch count can't be less than 16? */
632 if (cmd_fetch_cnt && cmd_fetch_cnt < 16)
638 prio_mask = fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK;
639 cmd = (cmd_fetch_cnt << AM65_CPSW_FETCH_CNT_OFFSET) | prio_mask;
648 static int am65_cpsw_est_calc_cmd_num(struct net_device *ndev,
649 struct tc_taprio_qopt_offload *taprio,
652 int i, cmd_cnt, cmd_sum = 0;
655 for (i = 0; i < taprio->num_entries; i++) {
656 if (taprio->entries[i].command != TC_TAPRIO_CMD_SET_GATES) {
657 dev_err(&ndev->dev, "Only SET command is supported");
661 fetch_cnt = am65_est_cmd_ns_to_cnt(taprio->entries[i].interval,
664 cmd_cnt = DIV_ROUND_UP(fetch_cnt, AM65_CPSW_FETCH_CNT_MAX);
677 static int am65_cpsw_est_check_scheds(struct net_device *ndev,
678 struct am65_cpsw_est *est_new)
680 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
683 cmd_num = am65_cpsw_est_calc_cmd_num(ndev, &est_new->taprio,
684 port->qos.link_speed);
688 if (cmd_num > AM65_CPSW_FETCH_RAM_CMD_NUM / 2) {
689 dev_err(&ndev->dev, "No fetch RAM");
696 static void am65_cpsw_est_set_sched_list(struct net_device *ndev,
697 struct am65_cpsw_est *est_new)
699 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
700 u32 fetch_cnt, fetch_allow, all_fetch_allow = 0;
701 void __iomem *ram_addr, *max_ram_addr;
702 struct tc_taprio_sched_entry *entry;
705 ram_addr = port->fetch_ram_base;
706 ram_size = AM65_CPSW_FETCH_RAM_CMD_NUM * 2;
707 ram_addr += est_new->buf * ram_size;
709 max_ram_addr = ram_size + ram_addr;
710 for (i = 0; i < est_new->taprio.num_entries; i++) {
711 entry = &est_new->taprio.entries[i];
713 fetch_cnt = am65_est_cmd_ns_to_cnt(entry->interval,
714 port->qos.link_speed);
715 fetch_allow = entry->gate_mask;
716 if (fetch_allow > AM65_CPSW_FETCH_ALLOW_MAX)
717 dev_dbg(&ndev->dev, "fetch_allow > 8 bits: %d\n",
720 ram_addr = am65_cpsw_est_set_sched_cmds(ram_addr, fetch_cnt,
723 if (!fetch_cnt && i < est_new->taprio.num_entries - 1) {
725 "next scheds after %d have no impact", i + 1);
729 all_fetch_allow |= fetch_allow;
732 /* end cmd, enabling non-timed queues for potential over cycle time */
733 if (ram_addr < max_ram_addr)
734 writel(~all_fetch_allow & AM65_CPSW_FETCH_ALLOW_MSK, ram_addr);
738 * Enable ESTf periodic output, set cycle start time and interval.
740 static int am65_cpsw_timer_set(struct net_device *ndev,
741 struct am65_cpsw_est *est_new)
743 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
744 struct am65_cpsw_common *common = port->common;
745 struct am65_cpts *cpts = common->cpts;
746 struct am65_cpts_estf_cfg cfg;
748 cfg.ns_period = est_new->taprio.cycle_time;
749 cfg.ns_start = est_new->taprio.base_time;
751 return am65_cpts_estf_enable(cpts, port->port_id - 1, &cfg);
754 static void am65_cpsw_timer_stop(struct net_device *ndev)
756 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
757 struct am65_cpts *cpts = port->common->cpts;
759 am65_cpts_estf_disable(cpts, port->port_id - 1);
762 static enum timer_act am65_cpsw_timer_act(struct net_device *ndev,
763 struct am65_cpsw_est *est_new)
765 struct tc_taprio_qopt_offload *taprio_oper, *taprio_new;
766 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
767 struct am65_cpts *cpts = port->common->cpts;
771 if (!port->qos.est_oper)
774 taprio_new = &est_new->taprio;
775 taprio_oper = &port->qos.est_oper->taprio;
777 if (taprio_new->cycle_time != taprio_oper->cycle_time)
778 return TACT_NEED_STOP;
780 /* in order to avoid timer reset get base_time form oper taprio */
781 if (!taprio_new->base_time && taprio_oper)
782 taprio_new->base_time = taprio_oper->base_time;
784 if (taprio_new->base_time == taprio_oper->base_time)
785 return TACT_SKIP_PROG;
787 /* base times are cycle synchronized */
788 diff = taprio_new->base_time - taprio_oper->base_time;
789 diff = diff < 0 ? -diff : diff;
790 if (diff % taprio_new->cycle_time)
791 return TACT_NEED_STOP;
793 cur_time = am65_cpts_ns_gettime(cpts);
794 if (taprio_new->base_time <= cur_time + taprio_new->cycle_time)
795 return TACT_SKIP_PROG;
797 /* TODO: Admin schedule at future time is not currently supported */
798 return TACT_NEED_STOP;
801 static void am65_cpsw_stop_est(struct net_device *ndev)
803 am65_cpsw_est_set(ndev, 0);
804 am65_cpsw_timer_stop(ndev);
807 static void am65_cpsw_taprio_destroy(struct net_device *ndev)
809 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
811 am65_cpsw_stop_est(ndev);
813 devm_kfree(&ndev->dev, port->qos.est_admin);
814 devm_kfree(&ndev->dev, port->qos.est_oper);
816 port->qos.est_oper = NULL;
817 port->qos.est_admin = NULL;
819 am65_cpsw_reset_tc_mqprio(ndev);
822 static void am65_cpsw_cp_taprio(struct tc_taprio_qopt_offload *from,
823 struct tc_taprio_qopt_offload *to)
828 for (i = 0; i < from->num_entries; i++)
829 to->entries[i] = from->entries[i];
832 static int am65_cpsw_taprio_replace(struct net_device *ndev,
833 struct tc_taprio_qopt_offload *taprio)
835 struct am65_cpsw_common *common = am65_ndev_to_common(ndev);
836 struct netlink_ext_ack *extack = taprio->mqprio.extack;
837 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
838 struct am65_cpts *cpts = common->cpts;
839 struct am65_cpsw_est *est_new;
842 if (!netif_running(ndev)) {
843 NL_SET_ERR_MSG_MOD(extack, "interface is down, link speed unknown");
847 if (common->pf_p0_rx_ptype_rrobin) {
848 NL_SET_ERR_MSG_MOD(extack,
849 "p0-rx-ptype-rrobin flag conflicts with taprio qdisc");
853 if (port->qos.link_speed == SPEED_UNKNOWN)
856 if (taprio->cycle_time_extension) {
857 NL_SET_ERR_MSG_MOD(extack,
858 "cycle time extension not supported");
862 est_new = devm_kzalloc(&ndev->dev,
863 struct_size(est_new, taprio.entries, taprio->num_entries),
868 ret = am65_cpsw_setup_mqprio(ndev, &taprio->mqprio);
872 am65_cpsw_cp_taprio(taprio, &est_new->taprio);
874 am65_cpsw_est_update_state(ndev);
876 ret = am65_cpsw_est_check_scheds(ndev, est_new);
880 tact = am65_cpsw_timer_act(ndev, est_new);
881 if (tact == TACT_NEED_STOP) {
882 NL_SET_ERR_MSG_MOD(extack,
883 "Can't toggle estf timer, stop taprio first");
888 if (tact == TACT_PROG)
889 am65_cpsw_timer_stop(ndev);
891 if (!est_new->taprio.base_time)
892 est_new->taprio.base_time = am65_cpts_ns_gettime(cpts);
894 am65_cpsw_port_est_get_buf_num(ndev, est_new);
895 am65_cpsw_est_set_sched_list(ndev, est_new);
896 am65_cpsw_port_est_assign_buf_num(ndev, est_new->buf);
898 am65_cpsw_est_set(ndev, 1);
900 if (tact == TACT_PROG) {
901 ret = am65_cpsw_timer_set(ndev, est_new);
903 NL_SET_ERR_MSG_MOD(extack,
904 "Failed to set cycle time");
909 devm_kfree(&ndev->dev, port->qos.est_admin);
910 port->qos.est_admin = est_new;
911 am65_cpsw_iet_change_preemptible_tcs(port, taprio->mqprio.preemptible_tcs);
916 am65_cpsw_reset_tc_mqprio(ndev);
917 devm_kfree(&ndev->dev, est_new);
921 static void am65_cpsw_est_link_up(struct net_device *ndev, int link_speed)
923 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
927 if (!am65_cpsw_port_est_enabled(port))
930 if (port->qos.link_down_time) {
931 cur_time = ktime_get();
932 delta = ktime_us_delta(cur_time, port->qos.link_down_time);
933 if (delta > USEC_PER_SEC) {
935 "Link has been lost too long, stopping TAS");
943 am65_cpsw_taprio_destroy(ndev);
946 static int am65_cpsw_setup_taprio(struct net_device *ndev, void *type_data)
948 struct tc_taprio_qopt_offload *taprio = type_data;
951 switch (taprio->cmd) {
952 case TAPRIO_CMD_REPLACE:
953 err = am65_cpsw_taprio_replace(ndev, taprio);
955 case TAPRIO_CMD_DESTROY:
956 am65_cpsw_taprio_destroy(ndev);
965 static int am65_cpsw_tc_query_caps(struct net_device *ndev, void *type_data)
967 struct tc_query_caps_base *base = type_data;
969 switch (base->type) {
970 case TC_SETUP_QDISC_MQPRIO: {
971 struct tc_mqprio_caps *caps = base->caps;
973 caps->validate_queue_counts = true;
978 case TC_SETUP_QDISC_TAPRIO: {
979 struct tc_taprio_caps *caps = base->caps;
981 caps->gate_mask_per_txq = true;
990 static int am65_cpsw_qos_clsflower_add_policer(struct am65_cpsw_port *port,
991 struct netlink_ext_ack *extack,
992 struct flow_cls_offload *cls,
995 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
996 struct flow_dissector *dissector = rule->match.dissector;
997 static const u8 mc_mac[] = {0x01, 0x00, 0x00, 0x00, 0x00, 0x00};
998 struct am65_cpsw_qos *qos = &port->qos;
999 struct flow_match_eth_addrs match;
1002 if (dissector->used_keys &
1003 ~(BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
1004 BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
1005 BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS))) {
1006 NL_SET_ERR_MSG_MOD(extack,
1007 "Unsupported keys used");
1011 if (!flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
1012 NL_SET_ERR_MSG_MOD(extack, "Not matching on eth address");
1016 flow_rule_match_eth_addrs(rule, &match);
1018 if (!is_zero_ether_addr(match.mask->src)) {
1019 NL_SET_ERR_MSG_MOD(extack,
1020 "Matching on source MAC not supported");
1024 if (is_broadcast_ether_addr(match.key->dst) &&
1025 is_broadcast_ether_addr(match.mask->dst)) {
1026 ret = cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, rate_pkt_ps);
1030 qos->ale_bc_ratelimit.cookie = cls->cookie;
1031 qos->ale_bc_ratelimit.rate_packet_ps = rate_pkt_ps;
1032 } else if (ether_addr_equal_unaligned(match.key->dst, mc_mac) &&
1033 ether_addr_equal_unaligned(match.mask->dst, mc_mac)) {
1034 ret = cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, rate_pkt_ps);
1038 qos->ale_mc_ratelimit.cookie = cls->cookie;
1039 qos->ale_mc_ratelimit.rate_packet_ps = rate_pkt_ps;
1041 NL_SET_ERR_MSG_MOD(extack, "Not supported matching key");
1048 static int am65_cpsw_qos_clsflower_policer_validate(const struct flow_action *action,
1049 const struct flow_action_entry *act,
1050 struct netlink_ext_ack *extack)
1052 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
1053 NL_SET_ERR_MSG_MOD(extack,
1054 "Offload not supported when exceed action is not drop");
1058 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
1059 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
1060 NL_SET_ERR_MSG_MOD(extack,
1061 "Offload not supported when conform action is not pipe or ok");
1065 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
1066 !flow_action_is_last_entry(action, act)) {
1067 NL_SET_ERR_MSG_MOD(extack,
1068 "Offload not supported when conform action is ok, but action is not last");
1072 if (act->police.rate_bytes_ps || act->police.peakrate_bytes_ps ||
1073 act->police.avrate || act->police.overhead) {
1074 NL_SET_ERR_MSG_MOD(extack,
1075 "Offload not supported when bytes per second/peakrate/avrate/overhead is configured");
1082 static int am65_cpsw_qos_configure_clsflower(struct am65_cpsw_port *port,
1083 struct flow_cls_offload *cls)
1085 struct flow_rule *rule = flow_cls_offload_flow_rule(cls);
1086 struct netlink_ext_ack *extack = cls->common.extack;
1087 const struct flow_action_entry *act;
1090 flow_action_for_each(i, act, &rule->action) {
1092 case FLOW_ACTION_POLICE:
1093 ret = am65_cpsw_qos_clsflower_policer_validate(&rule->action, act, extack);
1097 return am65_cpsw_qos_clsflower_add_policer(port, extack, cls,
1098 act->police.rate_pkt_ps);
1100 NL_SET_ERR_MSG_MOD(extack,
1101 "Action not supported");
1108 static int am65_cpsw_qos_delete_clsflower(struct am65_cpsw_port *port, struct flow_cls_offload *cls)
1110 struct am65_cpsw_qos *qos = &port->qos;
1112 if (cls->cookie == qos->ale_bc_ratelimit.cookie) {
1113 qos->ale_bc_ratelimit.cookie = 0;
1114 qos->ale_bc_ratelimit.rate_packet_ps = 0;
1115 cpsw_ale_rx_ratelimit_bc(port->common->ale, port->port_id, 0);
1118 if (cls->cookie == qos->ale_mc_ratelimit.cookie) {
1119 qos->ale_mc_ratelimit.cookie = 0;
1120 qos->ale_mc_ratelimit.rate_packet_ps = 0;
1121 cpsw_ale_rx_ratelimit_mc(port->common->ale, port->port_id, 0);
1127 static int am65_cpsw_qos_setup_tc_clsflower(struct am65_cpsw_port *port,
1128 struct flow_cls_offload *cls_flower)
1130 switch (cls_flower->command) {
1131 case FLOW_CLS_REPLACE:
1132 return am65_cpsw_qos_configure_clsflower(port, cls_flower);
1133 case FLOW_CLS_DESTROY:
1134 return am65_cpsw_qos_delete_clsflower(port, cls_flower);
1140 static int am65_cpsw_qos_setup_tc_block_cb(enum tc_setup_type type, void *type_data, void *cb_priv)
1142 struct am65_cpsw_port *port = cb_priv;
1144 if (!tc_cls_can_offload_and_chain0(port->ndev, type_data))
1148 case TC_SETUP_CLSFLOWER:
1149 return am65_cpsw_qos_setup_tc_clsflower(port, type_data);
1155 static LIST_HEAD(am65_cpsw_qos_block_cb_list);
1157 static int am65_cpsw_qos_setup_tc_block(struct net_device *ndev, struct flow_block_offload *f)
1159 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1161 return flow_block_cb_setup_simple(f, &am65_cpsw_qos_block_cb_list,
1162 am65_cpsw_qos_setup_tc_block_cb,
1167 am65_cpsw_qos_tx_p0_rate_apply(struct am65_cpsw_common *common,
1168 int tx_ch, u32 rate_mbps)
1170 struct am65_cpsw_host *host = am65_common_get_host(common);
1174 ch_cir = am65_cpsw_qos_tx_rate_calc(rate_mbps, common->bus_freq);
1175 writel(ch_cir, host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
1177 /* update rates for every port tx queues */
1178 for (i = 0; i < common->port_num; i++) {
1179 struct net_device *ndev = common->ports[i].ndev;
1183 netdev_get_tx_queue(ndev, tx_ch)->tx_maxrate = rate_mbps;
1187 int am65_cpsw_qos_ndo_tx_p0_set_maxrate(struct net_device *ndev,
1188 int queue, u32 rate_mbps)
1190 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1191 struct am65_cpsw_common *common = port->common;
1192 struct am65_cpsw_tx_chn *tx_chn;
1193 u32 ch_rate, tx_ch_rate_msk_new;
1197 dev_dbg(common->dev, "apply TX%d rate limiting %uMbps tx_rate_msk%x\n",
1198 queue, rate_mbps, common->tx_ch_rate_msk);
1200 if (common->pf_p0_rx_ptype_rrobin) {
1201 dev_err(common->dev, "TX Rate Limiting failed - rrobin mode\n");
1205 ch_rate = netdev_get_tx_queue(ndev, queue)->tx_maxrate;
1206 if (ch_rate == rate_mbps)
1209 ret = pm_runtime_get_sync(common->dev);
1211 pm_runtime_put_noidle(common->dev);
1216 tx_ch_rate_msk_new = common->tx_ch_rate_msk;
1217 if (rate_mbps && !(tx_ch_rate_msk_new & BIT(queue))) {
1218 tx_ch_rate_msk_new |= BIT(queue);
1219 ch_msk = GENMASK(common->tx_ch_num - 1, queue);
1220 ch_msk = tx_ch_rate_msk_new ^ ch_msk;
1221 } else if (!rate_mbps) {
1222 tx_ch_rate_msk_new &= ~BIT(queue);
1223 ch_msk = queue ? GENMASK(queue - 1, 0) : 0;
1224 ch_msk = tx_ch_rate_msk_new & ch_msk;
1228 dev_err(common->dev, "TX rate limiting has to be enabled sequentially hi->lo tx_rate_msk:%x tx_rate_msk_new:%x\n",
1229 common->tx_ch_rate_msk, tx_ch_rate_msk_new);
1234 tx_chn = &common->tx_chns[queue];
1235 tx_chn->rate_mbps = rate_mbps;
1236 common->tx_ch_rate_msk = tx_ch_rate_msk_new;
1238 if (!common->usage_count)
1239 /* will be applied on next netif up */
1242 am65_cpsw_qos_tx_p0_rate_apply(common, queue, rate_mbps);
1245 pm_runtime_put(common->dev);
1249 void am65_cpsw_qos_tx_p0_rate_init(struct am65_cpsw_common *common)
1251 struct am65_cpsw_host *host = am65_common_get_host(common);
1254 for (tx_ch = 0; tx_ch < common->tx_ch_num; tx_ch++) {
1255 struct am65_cpsw_tx_chn *tx_chn = &common->tx_chns[tx_ch];
1258 if (!tx_chn->rate_mbps)
1261 ch_cir = am65_cpsw_qos_tx_rate_calc(tx_chn->rate_mbps,
1264 host->port_base + AM65_CPSW_PN_REG_PRI_CIR(tx_ch));
1268 int am65_cpsw_qos_ndo_setup_tc(struct net_device *ndev, enum tc_setup_type type,
1273 return am65_cpsw_tc_query_caps(ndev, type_data);
1274 case TC_SETUP_QDISC_TAPRIO:
1275 return am65_cpsw_setup_taprio(ndev, type_data);
1276 case TC_SETUP_QDISC_MQPRIO:
1277 return am65_cpsw_setup_mqprio(ndev, type_data);
1278 case TC_SETUP_BLOCK:
1279 return am65_cpsw_qos_setup_tc_block(ndev, type_data);
1285 void am65_cpsw_qos_link_up(struct net_device *ndev, int link_speed)
1287 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1289 port->qos.link_speed = link_speed;
1290 am65_cpsw_tx_pn_shaper_apply(port);
1291 am65_cpsw_iet_link_state_update(ndev);
1293 am65_cpsw_est_link_up(ndev, link_speed);
1294 port->qos.link_down_time = 0;
1297 void am65_cpsw_qos_link_down(struct net_device *ndev)
1299 struct am65_cpsw_port *port = am65_ndev_to_port(ndev);
1301 port->qos.link_speed = SPEED_UNKNOWN;
1302 am65_cpsw_tx_pn_shaper_apply(port);
1303 am65_cpsw_iet_link_state_update(ndev);
1305 if (!port->qos.link_down_time)
1306 port->qos.link_down_time = ktime_get();