2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 #include <linux/device.h>
33 #include <linux/netdevice.h>
36 #define MLX5E_MAX_PRIORITY 8
38 #define MLX5E_100MB (100000)
39 #define MLX5E_1GB (1000000)
41 #define MLX5E_CEE_STATE_UP 1
42 #define MLX5E_CEE_STATE_DOWN 0
45 MLX5E_VENDOR_TC_GROUP_NUM = 7,
46 MLX5E_LOWEST_PRIO_GROUP = 0,
49 /* If dcbx mode is non-host set the dcbx mode to host.
51 static int mlx5e_dcbnl_set_dcbx_mode(struct mlx5e_priv *priv,
52 enum mlx5_dcbx_oper_mode mode)
54 struct mlx5_core_dev *mdev = priv->mdev;
55 u32 param[MLX5_ST_SZ_DW(dcbx_param)];
58 err = mlx5_query_port_dcbx_param(mdev, param);
62 MLX5_SET(dcbx_param, param, version_admin, mode);
63 if (mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
64 MLX5_SET(dcbx_param, param, willing_admin, 1);
66 return mlx5_set_port_dcbx_param(mdev, param);
69 static int mlx5e_dcbnl_switch_to_host_mode(struct mlx5e_priv *priv)
71 struct mlx5e_dcbx *dcbx = &priv->dcbx;
74 if (!MLX5_CAP_GEN(priv->mdev, dcbx))
77 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
80 err = mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_HOST);
84 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
88 static int mlx5e_dcbnl_ieee_getets(struct net_device *netdev,
91 struct mlx5e_priv *priv = netdev_priv(netdev);
92 struct mlx5_core_dev *mdev = priv->mdev;
93 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
94 bool is_tc_group_6_exist = false;
95 bool is_zero_bw_ets_tc = false;
99 if (!MLX5_CAP_GEN(priv->mdev, ets))
102 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
103 err = mlx5_query_port_prio_tc(mdev, i, &ets->prio_tc[i]);
108 ets->ets_cap = mlx5_max_tc(priv->mdev) + 1;
109 for (i = 0; i < ets->ets_cap; i++) {
110 err = mlx5_query_port_tc_group(mdev, i, &tc_group[i]);
114 err = mlx5_query_port_tc_bw_alloc(mdev, i, &ets->tc_tx_bw[i]);
118 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC &&
119 tc_group[i] == (MLX5E_LOWEST_PRIO_GROUP + 1))
120 is_zero_bw_ets_tc = true;
122 if (tc_group[i] == (MLX5E_VENDOR_TC_GROUP_NUM - 1))
123 is_tc_group_6_exist = true;
126 /* Report 0% ets tc if exits*/
127 if (is_zero_bw_ets_tc) {
128 for (i = 0; i < ets->ets_cap; i++)
129 if (tc_group[i] == MLX5E_LOWEST_PRIO_GROUP)
130 ets->tc_tx_bw[i] = 0;
133 /* Update tc_tsa based on fw setting*/
134 for (i = 0; i < ets->ets_cap; i++) {
135 if (ets->tc_tx_bw[i] < MLX5E_MAX_BW_ALLOC)
136 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
137 else if (tc_group[i] == MLX5E_VENDOR_TC_GROUP_NUM &&
138 !is_tc_group_6_exist)
139 priv->dcbx.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
141 memcpy(ets->tc_tsa, priv->dcbx.tc_tsa, sizeof(ets->tc_tsa));
146 static void mlx5e_build_tc_group(struct ieee_ets *ets, u8 *tc_group, int max_tc)
148 bool any_tc_mapped_to_ets = false;
149 bool ets_zero_bw = false;
153 for (i = 0; i <= max_tc; i++) {
154 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
155 any_tc_mapped_to_ets = true;
156 if (!ets->tc_tx_bw[i])
161 /* strict group has higher priority than ets group */
162 strict_group = MLX5E_LOWEST_PRIO_GROUP;
163 if (any_tc_mapped_to_ets)
168 for (i = 0; i <= max_tc; i++) {
169 switch (ets->tc_tsa[i]) {
170 case IEEE_8021QAZ_TSA_VENDOR:
171 tc_group[i] = MLX5E_VENDOR_TC_GROUP_NUM;
173 case IEEE_8021QAZ_TSA_STRICT:
174 tc_group[i] = strict_group++;
176 case IEEE_8021QAZ_TSA_ETS:
177 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP;
178 if (ets->tc_tx_bw[i] && ets_zero_bw)
179 tc_group[i] = MLX5E_LOWEST_PRIO_GROUP + 1;
185 static void mlx5e_build_tc_tx_bw(struct ieee_ets *ets, u8 *tc_tx_bw,
186 u8 *tc_group, int max_tc)
188 int bw_for_ets_zero_bw_tc = 0;
189 int last_ets_zero_bw_tc = -1;
190 int num_ets_zero_bw = 0;
193 for (i = 0; i <= max_tc; i++) {
194 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS &&
197 last_ets_zero_bw_tc = i;
202 bw_for_ets_zero_bw_tc = MLX5E_MAX_BW_ALLOC / num_ets_zero_bw;
204 for (i = 0; i <= max_tc; i++) {
205 switch (ets->tc_tsa[i]) {
206 case IEEE_8021QAZ_TSA_VENDOR:
207 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
209 case IEEE_8021QAZ_TSA_STRICT:
210 tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
212 case IEEE_8021QAZ_TSA_ETS:
213 tc_tx_bw[i] = ets->tc_tx_bw[i] ?
215 bw_for_ets_zero_bw_tc;
220 /* Make sure the total bw for ets zero bw group is 100% */
221 if (last_ets_zero_bw_tc != -1)
222 tc_tx_bw[last_ets_zero_bw_tc] +=
223 MLX5E_MAX_BW_ALLOC % num_ets_zero_bw;
226 /* If there are ETS BW 0,
227 * Set ETS group # to 1 for all ETS non zero BW tcs. Their sum must be 100%.
228 * Set group #0 to all the ETS BW 0 tcs and
229 * equally splits the 100% BW between them
230 * Report both group #0 and #1 as ETS type.
231 * All the tcs in group #0 will be reported with 0% BW.
233 int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
235 struct mlx5_core_dev *mdev = priv->mdev;
236 u8 tc_tx_bw[IEEE_8021QAZ_MAX_TCS];
237 u8 tc_group[IEEE_8021QAZ_MAX_TCS];
238 int max_tc = mlx5_max_tc(mdev);
241 mlx5e_build_tc_group(ets, tc_group, max_tc);
242 mlx5e_build_tc_tx_bw(ets, tc_tx_bw, tc_group, max_tc);
244 err = mlx5_set_port_prio_tc(mdev, ets->prio_tc);
248 err = mlx5_set_port_tc_group(mdev, tc_group);
252 err = mlx5_set_port_tc_bw_alloc(mdev, tc_tx_bw);
257 memcpy(priv->dcbx.tc_tsa, ets->tc_tsa, sizeof(ets->tc_tsa));
261 static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
262 struct ieee_ets *ets,
263 bool zero_sum_allowed)
265 bool have_ets_tc = false;
269 /* Validate Priority */
270 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
271 if (ets->prio_tc[i] >= MLX5E_MAX_PRIORITY) {
273 "Failed to validate ETS: priority value greater than max(%d)\n",
279 /* Validate Bandwidth Sum */
280 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
281 if (ets->tc_tsa[i] == IEEE_8021QAZ_TSA_ETS) {
283 bw_sum += ets->tc_tx_bw[i];
287 if (have_ets_tc && bw_sum != 100) {
288 if (bw_sum || (!bw_sum && !zero_sum_allowed))
290 "Failed to validate ETS: BW sum is illegal\n");
296 static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
297 struct ieee_ets *ets)
299 struct mlx5e_priv *priv = netdev_priv(netdev);
302 if (!MLX5_CAP_GEN(priv->mdev, ets))
305 err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
309 err = mlx5e_dcbnl_ieee_setets_core(priv, ets);
316 static int mlx5e_dcbnl_ieee_getpfc(struct net_device *dev,
317 struct ieee_pfc *pfc)
319 struct mlx5e_priv *priv = netdev_priv(dev);
320 struct mlx5_core_dev *mdev = priv->mdev;
321 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
324 pfc->pfc_cap = mlx5_max_tc(mdev) + 1;
325 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
326 pfc->requests[i] = PPORT_PER_PRIO_GET(pstats, i, tx_pause);
327 pfc->indications[i] = PPORT_PER_PRIO_GET(pstats, i, rx_pause);
330 return mlx5_query_port_pfc(mdev, &pfc->pfc_en, NULL);
333 static int mlx5e_dcbnl_ieee_setpfc(struct net_device *dev,
334 struct ieee_pfc *pfc)
336 struct mlx5e_priv *priv = netdev_priv(dev);
337 struct mlx5_core_dev *mdev = priv->mdev;
341 mlx5_query_port_pfc(mdev, &curr_pfc_en, NULL);
343 if (pfc->pfc_en == curr_pfc_en)
346 ret = mlx5_set_port_pfc(mdev, pfc->pfc_en, pfc->pfc_en);
347 mlx5_toggle_port_link(mdev);
352 static u8 mlx5e_dcbnl_getdcbx(struct net_device *dev)
354 struct mlx5e_priv *priv = netdev_priv(dev);
356 return priv->dcbx.cap;
359 static u8 mlx5e_dcbnl_setdcbx(struct net_device *dev, u8 mode)
361 struct mlx5e_priv *priv = netdev_priv(dev);
362 struct mlx5e_dcbx *dcbx = &priv->dcbx;
364 if (mode & DCB_CAP_DCBX_LLD_MANAGED)
367 if ((!mode) && MLX5_CAP_GEN(priv->mdev, dcbx)) {
368 if (dcbx->mode == MLX5E_DCBX_PARAM_VER_OPER_AUTO)
371 /* set dcbx to fw controlled */
372 if (!mlx5e_dcbnl_set_dcbx_mode(priv, MLX5E_DCBX_PARAM_VER_OPER_AUTO)) {
373 dcbx->mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
374 dcbx->cap &= ~DCB_CAP_DCBX_HOST;
381 if (!(mode & DCB_CAP_DCBX_HOST))
384 if (mlx5e_dcbnl_switch_to_host_mode(netdev_priv(dev)))
392 static int mlx5e_dcbnl_ieee_getmaxrate(struct net_device *netdev,
393 struct ieee_maxrate *maxrate)
395 struct mlx5e_priv *priv = netdev_priv(netdev);
396 struct mlx5_core_dev *mdev = priv->mdev;
397 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
398 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
402 err = mlx5_query_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
406 memset(maxrate->tc_maxrate, 0, sizeof(maxrate->tc_maxrate));
408 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
409 switch (max_bw_unit[i]) {
410 case MLX5_100_MBPS_UNIT:
411 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_100MB;
414 maxrate->tc_maxrate[i] = max_bw_value[i] * MLX5E_1GB;
416 case MLX5_BW_NO_LIMIT:
419 WARN(true, "non-supported BW unit");
427 static int mlx5e_dcbnl_ieee_setmaxrate(struct net_device *netdev,
428 struct ieee_maxrate *maxrate)
430 struct mlx5e_priv *priv = netdev_priv(netdev);
431 struct mlx5_core_dev *mdev = priv->mdev;
432 u8 max_bw_value[IEEE_8021QAZ_MAX_TCS];
433 u8 max_bw_unit[IEEE_8021QAZ_MAX_TCS];
434 __u64 upper_limit_mbps = roundup(255 * MLX5E_100MB, MLX5E_1GB);
437 memset(max_bw_value, 0, sizeof(max_bw_value));
438 memset(max_bw_unit, 0, sizeof(max_bw_unit));
440 for (i = 0; i <= mlx5_max_tc(mdev); i++) {
441 if (!maxrate->tc_maxrate[i]) {
442 max_bw_unit[i] = MLX5_BW_NO_LIMIT;
445 if (maxrate->tc_maxrate[i] < upper_limit_mbps) {
446 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
448 max_bw_value[i] = max_bw_value[i] ? max_bw_value[i] : 1;
449 max_bw_unit[i] = MLX5_100_MBPS_UNIT;
451 max_bw_value[i] = div_u64(maxrate->tc_maxrate[i],
453 max_bw_unit[i] = MLX5_GBPS_UNIT;
457 return mlx5_modify_port_ets_rate_limit(mdev, max_bw_value, max_bw_unit);
460 static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
462 struct mlx5e_priv *priv = netdev_priv(netdev);
463 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
464 struct mlx5_core_dev *mdev = priv->mdev;
467 int err = -EOPNOTSUPP;
470 if (!MLX5_CAP_GEN(mdev, ets))
473 memset(&ets, 0, sizeof(ets));
474 memset(&pfc, 0, sizeof(pfc));
476 ets.ets_cap = IEEE_8021QAZ_MAX_TCS;
477 for (i = 0; i < CEE_DCBX_MAX_PGS; i++) {
478 ets.tc_tx_bw[i] = cee_cfg->pg_bw_pct[i];
479 ets.tc_rx_bw[i] = cee_cfg->pg_bw_pct[i];
480 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_ETS;
481 ets.prio_tc[i] = cee_cfg->prio_to_pg_map[i];
484 err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
488 err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
491 "%s, Failed to set ETS: %d\n", __func__, err);
496 pfc.pfc_cap = mlx5_max_tc(mdev) + 1;
497 if (!cee_cfg->pfc_enable)
500 for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
501 pfc.pfc_en |= cee_cfg->pfc_setting[i] << i;
503 err = mlx5e_dcbnl_ieee_setpfc(netdev, &pfc);
506 "%s, Failed to set PFC: %d\n", __func__, err);
510 return err ? MLX5_DCB_NO_CHG : MLX5_DCB_CHG_RESET;
513 static u8 mlx5e_dcbnl_getstate(struct net_device *netdev)
515 return MLX5E_CEE_STATE_UP;
518 static void mlx5e_dcbnl_getpermhwaddr(struct net_device *netdev,
521 struct mlx5e_priv *priv = netdev_priv(netdev);
526 memset(perm_addr, 0xff, MAX_ADDR_LEN);
528 mlx5_query_nic_vport_mac_address(priv->mdev, 0, perm_addr);
531 static void mlx5e_dcbnl_setpgtccfgtx(struct net_device *netdev,
532 int priority, u8 prio_type,
533 u8 pgid, u8 bw_pct, u8 up_map)
535 struct mlx5e_priv *priv = netdev_priv(netdev);
536 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
538 if (priority >= CEE_DCBX_MAX_PRIO) {
540 "%s, priority is out of range\n", __func__);
544 if (pgid >= CEE_DCBX_MAX_PGS) {
546 "%s, priority group is out of range\n", __func__);
550 cee_cfg->prio_to_pg_map[priority] = pgid;
553 static void mlx5e_dcbnl_setpgbwgcfgtx(struct net_device *netdev,
556 struct mlx5e_priv *priv = netdev_priv(netdev);
557 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
559 if (pgid >= CEE_DCBX_MAX_PGS) {
561 "%s, priority group is out of range\n", __func__);
565 cee_cfg->pg_bw_pct[pgid] = bw_pct;
568 static void mlx5e_dcbnl_getpgtccfgtx(struct net_device *netdev,
569 int priority, u8 *prio_type,
570 u8 *pgid, u8 *bw_pct, u8 *up_map)
572 struct mlx5e_priv *priv = netdev_priv(netdev);
573 struct mlx5_core_dev *mdev = priv->mdev;
575 if (!MLX5_CAP_GEN(priv->mdev, ets)) {
576 netdev_err(netdev, "%s, ets is not supported\n", __func__);
580 if (priority >= CEE_DCBX_MAX_PRIO) {
582 "%s, priority is out of range\n", __func__);
590 if (mlx5_query_port_prio_tc(mdev, priority, pgid))
594 static void mlx5e_dcbnl_getpgbwgcfgtx(struct net_device *netdev,
595 int pgid, u8 *bw_pct)
599 if (pgid >= CEE_DCBX_MAX_PGS) {
601 "%s, priority group is out of range\n", __func__);
605 mlx5e_dcbnl_ieee_getets(netdev, &ets);
606 *bw_pct = ets.tc_tx_bw[pgid];
609 static void mlx5e_dcbnl_setpfccfg(struct net_device *netdev,
610 int priority, u8 setting)
612 struct mlx5e_priv *priv = netdev_priv(netdev);
613 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
615 if (priority >= CEE_DCBX_MAX_PRIO) {
617 "%s, priority is out of range\n", __func__);
624 cee_cfg->pfc_setting[priority] = setting;
628 mlx5e_dcbnl_get_priority_pfc(struct net_device *netdev,
629 int priority, u8 *setting)
634 err = mlx5e_dcbnl_ieee_getpfc(netdev, &pfc);
639 *setting = (pfc.pfc_en >> priority) & 0x01;
644 static void mlx5e_dcbnl_getpfccfg(struct net_device *netdev,
645 int priority, u8 *setting)
647 if (priority >= CEE_DCBX_MAX_PRIO) {
649 "%s, priority is out of range\n", __func__);
656 mlx5e_dcbnl_get_priority_pfc(netdev, priority, setting);
659 static u8 mlx5e_dcbnl_getcap(struct net_device *netdev,
662 struct mlx5e_priv *priv = netdev_priv(netdev);
663 struct mlx5_core_dev *mdev = priv->mdev;
667 case DCB_CAP_ATTR_PG:
670 case DCB_CAP_ATTR_PFC:
673 case DCB_CAP_ATTR_UP2TC:
676 case DCB_CAP_ATTR_PG_TCS:
677 *cap = 1 << mlx5_max_tc(mdev);
679 case DCB_CAP_ATTR_PFC_TCS:
680 *cap = 1 << mlx5_max_tc(mdev);
682 case DCB_CAP_ATTR_GSP:
685 case DCB_CAP_ATTR_BCN:
688 case DCB_CAP_ATTR_DCBX:
689 *cap = priv->dcbx.cap |
690 DCB_CAP_DCBX_VER_CEE |
691 DCB_CAP_DCBX_VER_IEEE;
702 static int mlx5e_dcbnl_getnumtcs(struct net_device *netdev,
705 struct mlx5e_priv *priv = netdev_priv(netdev);
706 struct mlx5_core_dev *mdev = priv->mdev;
709 case DCB_NUMTCS_ATTR_PG:
710 case DCB_NUMTCS_ATTR_PFC:
711 *num = mlx5_max_tc(mdev) + 1;
720 static u8 mlx5e_dcbnl_getpfcstate(struct net_device *netdev)
724 if (mlx5e_dcbnl_ieee_getpfc(netdev, &pfc))
725 return MLX5E_CEE_STATE_DOWN;
727 return pfc.pfc_en ? MLX5E_CEE_STATE_UP : MLX5E_CEE_STATE_DOWN;
730 static void mlx5e_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
732 struct mlx5e_priv *priv = netdev_priv(netdev);
733 struct mlx5e_cee_config *cee_cfg = &priv->dcbx.cee_cfg;
735 if ((state != MLX5E_CEE_STATE_UP) && (state != MLX5E_CEE_STATE_DOWN))
738 cee_cfg->pfc_enable = state;
741 const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops = {
742 .ieee_getets = mlx5e_dcbnl_ieee_getets,
743 .ieee_setets = mlx5e_dcbnl_ieee_setets,
744 .ieee_getmaxrate = mlx5e_dcbnl_ieee_getmaxrate,
745 .ieee_setmaxrate = mlx5e_dcbnl_ieee_setmaxrate,
746 .ieee_getpfc = mlx5e_dcbnl_ieee_getpfc,
747 .ieee_setpfc = mlx5e_dcbnl_ieee_setpfc,
748 .getdcbx = mlx5e_dcbnl_getdcbx,
749 .setdcbx = mlx5e_dcbnl_setdcbx,
752 .setall = mlx5e_dcbnl_setall,
753 .getstate = mlx5e_dcbnl_getstate,
754 .getpermhwaddr = mlx5e_dcbnl_getpermhwaddr,
756 .setpgtccfgtx = mlx5e_dcbnl_setpgtccfgtx,
757 .setpgbwgcfgtx = mlx5e_dcbnl_setpgbwgcfgtx,
758 .getpgtccfgtx = mlx5e_dcbnl_getpgtccfgtx,
759 .getpgbwgcfgtx = mlx5e_dcbnl_getpgbwgcfgtx,
761 .setpfccfg = mlx5e_dcbnl_setpfccfg,
762 .getpfccfg = mlx5e_dcbnl_getpfccfg,
763 .getcap = mlx5e_dcbnl_getcap,
764 .getnumtcs = mlx5e_dcbnl_getnumtcs,
765 .getpfcstate = mlx5e_dcbnl_getpfcstate,
766 .setpfcstate = mlx5e_dcbnl_setpfcstate,
769 static void mlx5e_dcbnl_query_dcbx_mode(struct mlx5e_priv *priv,
770 enum mlx5_dcbx_oper_mode *mode)
772 u32 out[MLX5_ST_SZ_DW(dcbx_param)];
774 *mode = MLX5E_DCBX_PARAM_VER_OPER_HOST;
776 if (!mlx5_query_port_dcbx_param(priv->mdev, out))
777 *mode = MLX5_GET(dcbx_param, out, version_oper);
779 /* From driver's point of view, we only care if the mode
780 * is host (HOST) or non-host (AUTO)
782 if (*mode != MLX5E_DCBX_PARAM_VER_OPER_HOST)
783 *mode = MLX5E_DCBX_PARAM_VER_OPER_AUTO;
786 static void mlx5e_ets_init(struct mlx5e_priv *priv)
791 if (!MLX5_CAP_GEN(priv->mdev, ets))
794 memset(&ets, 0, sizeof(ets));
795 ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
796 for (i = 0; i < ets.ets_cap; i++) {
797 ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
798 ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
802 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
806 mlx5e_dcbnl_ieee_setets_core(priv, &ets);
809 void mlx5e_dcbnl_initialize(struct mlx5e_priv *priv)
811 struct mlx5e_dcbx *dcbx = &priv->dcbx;
813 if (!MLX5_CAP_GEN(priv->mdev, qos))
816 if (MLX5_CAP_GEN(priv->mdev, dcbx))
817 mlx5e_dcbnl_query_dcbx_mode(priv, &dcbx->mode);
819 priv->dcbx.cap = DCB_CAP_DCBX_VER_CEE |
820 DCB_CAP_DCBX_VER_IEEE;
821 if (priv->dcbx.mode == MLX5E_DCBX_PARAM_VER_OPER_HOST)
822 priv->dcbx.cap |= DCB_CAP_DCBX_HOST;
824 mlx5e_ets_init(priv);