2 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <net/tc_act/tc_gact.h>
34 #include <net/pkt_cls.h>
35 #include <linux/mlx5/fs.h>
36 #include <net/vxlan.h>
37 #include <linux/bpf.h>
42 #include "en_accel/ipsec.h"
43 #include "en_accel/ipsec_rxtx.h"
44 #include "accel/ipsec.h"
47 struct mlx5e_rq_param {
48 u32 rqc[MLX5_ST_SZ_DW(rqc)];
49 struct mlx5_wq_param wq;
52 struct mlx5e_sq_param {
53 u32 sqc[MLX5_ST_SZ_DW(sqc)];
54 struct mlx5_wq_param wq;
57 struct mlx5e_cq_param {
58 u32 cqc[MLX5_ST_SZ_DW(cqc)];
59 struct mlx5_wq_param wq;
64 struct mlx5e_channel_param {
65 struct mlx5e_rq_param rq;
66 struct mlx5e_sq_param sq;
67 struct mlx5e_sq_param xdp_sq;
68 struct mlx5e_sq_param icosq;
69 struct mlx5e_cq_param rx_cq;
70 struct mlx5e_cq_param tx_cq;
71 struct mlx5e_cq_param icosq_cq;
74 static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
76 return MLX5_CAP_GEN(mdev, striding_rq) &&
77 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
78 MLX5_CAP_ETH(mdev, reg_umr_sq);
81 void mlx5e_set_rq_type_params(struct mlx5_core_dev *mdev,
82 struct mlx5e_params *params, u8 rq_type)
84 params->rq_wq_type = rq_type;
85 params->lro_wqe_sz = MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
86 switch (params->rq_wq_type) {
87 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
88 params->log_rq_size = is_kdump_kernel() ?
89 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
90 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
91 params->mpwqe_log_stride_sz =
92 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
93 MLX5_MPWRQ_CQE_CMPRS_LOG_STRIDE_SZ(mdev) :
94 MLX5_MPWRQ_DEF_LOG_STRIDE_SZ(mdev);
95 params->mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
96 params->mpwqe_log_stride_sz;
98 default: /* MLX5_WQ_TYPE_LINKED_LIST */
99 params->log_rq_size = is_kdump_kernel() ?
100 MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
101 MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
102 params->rq_headroom = params->xdp_prog ?
103 XDP_PACKET_HEADROOM : MLX5_RX_HEADROOM;
104 params->rq_headroom += NET_IP_ALIGN;
106 /* Extra room needed for build_skb */
107 params->lro_wqe_sz -= params->rq_headroom +
108 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
111 mlx5_core_info(mdev, "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
112 params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
113 BIT(params->log_rq_size),
114 BIT(params->mpwqe_log_stride_sz),
115 MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS));
118 static void mlx5e_set_rq_params(struct mlx5_core_dev *mdev, struct mlx5e_params *params)
120 u8 rq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) &&
121 !params->xdp_prog && !MLX5_IPSEC_DEV(mdev) ?
122 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
123 MLX5_WQ_TYPE_LINKED_LIST;
124 mlx5e_set_rq_type_params(mdev, params, rq_type);
127 static void mlx5e_update_carrier(struct mlx5e_priv *priv)
129 struct mlx5_core_dev *mdev = priv->mdev;
132 port_state = mlx5_query_vport_state(mdev,
133 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT,
136 if (port_state == VPORT_STATE_UP) {
137 netdev_info(priv->netdev, "Link up\n");
138 netif_carrier_on(priv->netdev);
140 netdev_info(priv->netdev, "Link down\n");
141 netif_carrier_off(priv->netdev);
145 static void mlx5e_update_carrier_work(struct work_struct *work)
147 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
148 update_carrier_work);
150 mutex_lock(&priv->state_lock);
151 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
152 if (priv->profile->update_carrier)
153 priv->profile->update_carrier(priv);
154 mutex_unlock(&priv->state_lock);
157 static void mlx5e_tx_timeout_work(struct work_struct *work)
159 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
164 mutex_lock(&priv->state_lock);
165 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
167 mlx5e_close_locked(priv->netdev);
168 err = mlx5e_open_locked(priv->netdev);
170 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
173 mutex_unlock(&priv->state_lock);
177 static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
179 struct mlx5e_sw_stats temp, *s = &temp;
180 struct mlx5e_rq_stats *rq_stats;
181 struct mlx5e_sq_stats *sq_stats;
184 memset(s, 0, sizeof(*s));
185 for (i = 0; i < priv->channels.num; i++) {
186 struct mlx5e_channel *c = priv->channels.c[i];
188 rq_stats = &c->rq.stats;
190 s->rx_packets += rq_stats->packets;
191 s->rx_bytes += rq_stats->bytes;
192 s->rx_lro_packets += rq_stats->lro_packets;
193 s->rx_lro_bytes += rq_stats->lro_bytes;
194 s->rx_csum_none += rq_stats->csum_none;
195 s->rx_csum_complete += rq_stats->csum_complete;
196 s->rx_csum_unnecessary += rq_stats->csum_unnecessary;
197 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
198 s->rx_xdp_drop += rq_stats->xdp_drop;
199 s->rx_xdp_tx += rq_stats->xdp_tx;
200 s->rx_xdp_tx_full += rq_stats->xdp_tx_full;
201 s->rx_wqe_err += rq_stats->wqe_err;
202 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
203 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
204 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
205 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
206 s->rx_page_reuse += rq_stats->page_reuse;
207 s->rx_cache_reuse += rq_stats->cache_reuse;
208 s->rx_cache_full += rq_stats->cache_full;
209 s->rx_cache_empty += rq_stats->cache_empty;
210 s->rx_cache_busy += rq_stats->cache_busy;
211 s->rx_cache_waive += rq_stats->cache_waive;
213 for (j = 0; j < priv->channels.params.num_tc; j++) {
214 sq_stats = &c->sq[j].stats;
216 s->tx_packets += sq_stats->packets;
217 s->tx_bytes += sq_stats->bytes;
218 s->tx_tso_packets += sq_stats->tso_packets;
219 s->tx_tso_bytes += sq_stats->tso_bytes;
220 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
221 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
222 s->tx_queue_stopped += sq_stats->stopped;
223 s->tx_queue_wake += sq_stats->wake;
224 s->tx_queue_dropped += sq_stats->dropped;
225 s->tx_xmit_more += sq_stats->xmit_more;
226 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
227 s->tx_csum_none += sq_stats->csum_none;
228 s->tx_csum_partial += sq_stats->csum_partial;
232 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
233 priv->stats.pport.phy_counters,
234 counter_set.phys_layer_cntrs.link_down_events);
235 memcpy(&priv->stats.sw, s, sizeof(*s));
238 static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
240 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
241 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
242 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)] = {0};
243 struct mlx5_core_dev *mdev = priv->mdev;
245 MLX5_SET(query_vport_counter_in, in, opcode,
246 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
247 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
248 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
250 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
253 static void mlx5e_update_pport_counters(struct mlx5e_priv *priv, bool full)
255 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
256 struct mlx5_core_dev *mdev = priv->mdev;
257 u32 in[MLX5_ST_SZ_DW(ppcnt_reg)] = {0};
258 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
262 MLX5_SET(ppcnt_reg, in, local_port, 1);
264 out = pstats->IEEE_802_3_counters;
265 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
266 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
271 out = pstats->RFC_2863_counters;
272 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
273 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
275 out = pstats->RFC_2819_counters;
276 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
277 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
279 out = pstats->phy_counters;
280 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
281 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
283 if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
284 out = pstats->phy_statistical_counters;
285 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
286 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
289 if (MLX5_CAP_PCAM_FEATURE(mdev, rx_buffer_fullness_counters)) {
290 out = pstats->eth_ext_counters;
291 MLX5_SET(ppcnt_reg, in, grp, MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP);
292 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
295 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
296 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
297 out = pstats->per_prio_counters[prio];
298 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
299 mlx5_core_access_reg(mdev, in, sz, out, sz,
300 MLX5_REG_PPCNT, 0, 0);
304 static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
306 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
307 u32 out[MLX5_ST_SZ_DW(query_q_counter_out)];
310 if (!priv->q_counter)
313 err = mlx5_core_query_q_counter(priv->mdev, priv->q_counter, 0, out, sizeof(out));
317 qcnt->rx_out_of_buffer = MLX5_GET(query_q_counter_out, out, out_of_buffer);
320 static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
322 struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
323 struct mlx5_core_dev *mdev = priv->mdev;
324 u32 in[MLX5_ST_SZ_DW(mpcnt_reg)] = {0};
325 int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
328 if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
331 out = pcie_stats->pcie_perf_counters;
332 MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
333 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
336 void mlx5e_update_stats(struct mlx5e_priv *priv, bool full)
339 mlx5e_update_pcie_counters(priv);
340 mlx5e_ipsec_update_stats(priv);
342 mlx5e_update_pport_counters(priv, full);
343 mlx5e_update_vport_counters(priv);
344 mlx5e_update_q_counter(priv);
345 mlx5e_update_sw_counters(priv);
348 static void mlx5e_update_ndo_stats(struct mlx5e_priv *priv)
350 mlx5e_update_stats(priv, false);
353 void mlx5e_update_stats_work(struct work_struct *work)
355 struct delayed_work *dwork = to_delayed_work(work);
356 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
358 mutex_lock(&priv->state_lock);
359 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
360 priv->profile->update_stats(priv);
361 queue_delayed_work(priv->wq, dwork,
362 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
364 mutex_unlock(&priv->state_lock);
367 static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
368 enum mlx5_dev_event event, unsigned long param)
370 struct mlx5e_priv *priv = vpriv;
371 struct ptp_clock_event ptp_event;
372 struct mlx5_eqe *eqe = NULL;
374 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
378 case MLX5_DEV_EVENT_PORT_UP:
379 case MLX5_DEV_EVENT_PORT_DOWN:
380 queue_work(priv->wq, &priv->update_carrier_work);
382 case MLX5_DEV_EVENT_PPS:
383 eqe = (struct mlx5_eqe *)param;
384 ptp_event.index = eqe->data.pps.pin;
385 ptp_event.timestamp =
386 timecounter_cyc2time(&priv->tstamp.clock,
387 be64_to_cpu(eqe->data.pps.time_stamp));
388 mlx5e_pps_event_handler(vpriv, &ptp_event);
395 static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
397 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
400 static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
402 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
403 synchronize_irq(pci_irq_vector(priv->mdev->pdev, MLX5_EQ_VEC_ASYNC));
406 static inline int mlx5e_get_wqe_mtt_sz(void)
408 /* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
409 * To avoid copying garbage after the mtt array, we allocate
412 return ALIGN(MLX5_MPWRQ_PAGES_PER_WQE * sizeof(__be64),
413 MLX5_UMR_MTT_ALIGNMENT);
416 static inline void mlx5e_build_umr_wqe(struct mlx5e_rq *rq,
417 struct mlx5e_icosq *sq,
418 struct mlx5e_umr_wqe *wqe,
421 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
422 struct mlx5_wqe_umr_ctrl_seg *ucseg = &wqe->uctrl;
423 struct mlx5_wqe_data_seg *dseg = &wqe->data;
424 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[ix];
425 u8 ds_cnt = DIV_ROUND_UP(sizeof(*wqe), MLX5_SEND_WQE_DS);
426 u32 umr_wqe_mtt_offset = mlx5e_get_wqe_mtt_offset(rq, ix);
428 cseg->qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT) |
430 cseg->fm_ce_se = MLX5_WQE_CTRL_CQ_UPDATE;
431 cseg->imm = rq->mkey_be;
433 ucseg->flags = MLX5_UMR_TRANSLATION_OFFSET_EN;
434 ucseg->xlt_octowords =
435 cpu_to_be16(MLX5_MTT_OCTW(MLX5_MPWRQ_PAGES_PER_WQE));
436 ucseg->bsf_octowords =
437 cpu_to_be16(MLX5_MTT_OCTW(umr_wqe_mtt_offset));
438 ucseg->mkey_mask = cpu_to_be64(MLX5_MKEY_MASK_FREE);
440 dseg->lkey = sq->mkey_be;
441 dseg->addr = cpu_to_be64(wi->umr.mtt_addr);
444 static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq,
445 struct mlx5e_channel *c)
447 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
448 int mtt_sz = mlx5e_get_wqe_mtt_sz();
449 int mtt_alloc = mtt_sz + MLX5_UMR_ALIGN - 1;
452 rq->mpwqe.info = kzalloc_node(wq_sz * sizeof(*rq->mpwqe.info),
453 GFP_KERNEL, cpu_to_node(c->cpu));
457 /* We allocate more than mtt_sz as we will align the pointer */
458 rq->mpwqe.mtt_no_align = kzalloc_node(mtt_alloc * wq_sz, GFP_KERNEL,
459 cpu_to_node(c->cpu));
460 if (unlikely(!rq->mpwqe.mtt_no_align))
461 goto err_free_wqe_info;
463 for (i = 0; i < wq_sz; i++) {
464 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
466 wi->umr.mtt = PTR_ALIGN(rq->mpwqe.mtt_no_align + i * mtt_alloc,
468 wi->umr.mtt_addr = dma_map_single(c->pdev, wi->umr.mtt, mtt_sz,
470 if (unlikely(dma_mapping_error(c->pdev, wi->umr.mtt_addr)))
473 mlx5e_build_umr_wqe(rq, &c->icosq, &wi->umr.wqe, i);
480 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
482 dma_unmap_single(c->pdev, wi->umr.mtt_addr, mtt_sz,
485 kfree(rq->mpwqe.mtt_no_align);
487 kfree(rq->mpwqe.info);
493 static void mlx5e_rq_free_mpwqe_info(struct mlx5e_rq *rq)
495 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
496 int mtt_sz = mlx5e_get_wqe_mtt_sz();
499 for (i = 0; i < wq_sz; i++) {
500 struct mlx5e_mpw_info *wi = &rq->mpwqe.info[i];
502 dma_unmap_single(rq->pdev, wi->umr.mtt_addr, mtt_sz,
505 kfree(rq->mpwqe.mtt_no_align);
506 kfree(rq->mpwqe.info);
509 static int mlx5e_create_umr_mkey(struct mlx5_core_dev *mdev,
510 u64 npages, u8 page_shift,
511 struct mlx5_core_mkey *umr_mkey)
513 int inlen = MLX5_ST_SZ_BYTES(create_mkey_in);
518 if (!MLX5E_VALID_NUM_MTTS(npages))
521 in = kvzalloc(inlen, GFP_KERNEL);
525 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
527 MLX5_SET(mkc, mkc, free, 1);
528 MLX5_SET(mkc, mkc, umr_en, 1);
529 MLX5_SET(mkc, mkc, lw, 1);
530 MLX5_SET(mkc, mkc, lr, 1);
531 MLX5_SET(mkc, mkc, access_mode, MLX5_MKC_ACCESS_MODE_MTT);
533 MLX5_SET(mkc, mkc, qpn, 0xffffff);
534 MLX5_SET(mkc, mkc, pd, mdev->mlx5e_res.pdn);
535 MLX5_SET64(mkc, mkc, len, npages << page_shift);
536 MLX5_SET(mkc, mkc, translations_octword_size,
537 MLX5_MTT_OCTW(npages));
538 MLX5_SET(mkc, mkc, log_page_size, page_shift);
540 err = mlx5_core_create_mkey(mdev, umr_mkey, in, inlen);
546 static int mlx5e_create_rq_umr_mkey(struct mlx5_core_dev *mdev, struct mlx5e_rq *rq)
548 u64 num_mtts = MLX5E_REQUIRED_MTTS(mlx5_wq_ll_get_size(&rq->wq));
550 return mlx5e_create_umr_mkey(mdev, num_mtts, PAGE_SHIFT, &rq->umr_mkey);
553 static int mlx5e_alloc_rq(struct mlx5e_channel *c,
554 struct mlx5e_params *params,
555 struct mlx5e_rq_param *rqp,
558 struct mlx5_core_dev *mdev = c->mdev;
559 void *rqc = rqp->rqc;
560 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
567 rqp->wq.db_numa_node = cpu_to_node(c->cpu);
569 err = mlx5_wq_ll_create(mdev, &rqp->wq, rqc_wq, &rq->wq,
574 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
576 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
578 rq->wq_type = params->rq_wq_type;
580 rq->netdev = c->netdev;
581 rq->tstamp = c->tstamp;
586 rq->xdp_prog = params->xdp_prog ? bpf_prog_inc(params->xdp_prog) : NULL;
587 if (IS_ERR(rq->xdp_prog)) {
588 err = PTR_ERR(rq->xdp_prog);
590 goto err_rq_wq_destroy;
593 rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
594 rq->buff.headroom = params->rq_headroom;
596 switch (rq->wq_type) {
597 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
599 rq->post_wqes = mlx5e_post_rx_mpwqes;
600 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
602 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe_mpwqe;
603 #ifdef CONFIG_MLX5_EN_IPSEC
604 if (MLX5_IPSEC_DEV(mdev)) {
606 netdev_err(c->netdev, "MPWQE RQ with IPSec offload not supported\n");
607 goto err_rq_wq_destroy;
610 if (!rq->handle_rx_cqe) {
612 netdev_err(c->netdev, "RX handler of MPWQE RQ is not set, err %d\n", err);
613 goto err_rq_wq_destroy;
616 rq->mpwqe.log_stride_sz = params->mpwqe_log_stride_sz;
617 rq->mpwqe.num_strides = BIT(params->mpwqe_log_num_strides);
619 byte_count = rq->mpwqe.num_strides << rq->mpwqe.log_stride_sz;
621 err = mlx5e_create_rq_umr_mkey(mdev, rq);
623 goto err_rq_wq_destroy;
624 rq->mkey_be = cpu_to_be32(rq->umr_mkey.key);
626 err = mlx5e_rq_alloc_mpwqe_info(rq, c);
628 goto err_destroy_umr_mkey;
630 default: /* MLX5_WQ_TYPE_LINKED_LIST */
632 kzalloc_node(wq_sz * sizeof(*rq->wqe.frag_info),
633 GFP_KERNEL, cpu_to_node(c->cpu));
634 if (!rq->wqe.frag_info) {
636 goto err_rq_wq_destroy;
638 rq->post_wqes = mlx5e_post_rx_wqes;
639 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
641 #ifdef CONFIG_MLX5_EN_IPSEC
643 rq->handle_rx_cqe = mlx5e_ipsec_handle_rx_cqe;
646 rq->handle_rx_cqe = c->priv->profile->rx_handlers.handle_rx_cqe;
647 if (!rq->handle_rx_cqe) {
648 kfree(rq->wqe.frag_info);
650 netdev_err(c->netdev, "RX handler of RQ is not set, err %d\n", err);
651 goto err_rq_wq_destroy;
654 byte_count = params->lro_en ?
656 MLX5E_SW2HW_MTU(c->priv, c->netdev->mtu);
657 #ifdef CONFIG_MLX5_EN_IPSEC
658 if (MLX5_IPSEC_DEV(mdev))
659 byte_count += MLX5E_METADATA_ETHER_LEN;
661 rq->wqe.page_reuse = !params->xdp_prog && !params->lro_en;
663 /* calc the required page order */
664 rq->wqe.frag_sz = MLX5_SKB_FRAG_SZ(rq->buff.headroom + byte_count);
665 npages = DIV_ROUND_UP(rq->wqe.frag_sz, PAGE_SIZE);
666 rq->buff.page_order = order_base_2(npages);
668 byte_count |= MLX5_HW_START_PADDING;
669 rq->mkey_be = c->mkey_be;
672 for (i = 0; i < wq_sz; i++) {
673 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
675 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ) {
676 u64 dma_offset = (u64)mlx5e_get_wqe_mtt_offset(rq, i) << PAGE_SHIFT;
678 wqe->data.addr = cpu_to_be64(dma_offset);
681 wqe->data.byte_count = cpu_to_be32(byte_count);
682 wqe->data.lkey = rq->mkey_be;
685 INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
686 rq->am.mode = params->rx_cq_period_mode;
687 rq->page_cache.head = 0;
688 rq->page_cache.tail = 0;
692 err_destroy_umr_mkey:
693 mlx5_core_destroy_mkey(mdev, &rq->umr_mkey);
697 bpf_prog_put(rq->xdp_prog);
698 mlx5_wq_destroy(&rq->wq_ctrl);
703 static void mlx5e_free_rq(struct mlx5e_rq *rq)
708 bpf_prog_put(rq->xdp_prog);
710 switch (rq->wq_type) {
711 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
712 mlx5e_rq_free_mpwqe_info(rq);
713 mlx5_core_destroy_mkey(rq->mdev, &rq->umr_mkey);
715 default: /* MLX5_WQ_TYPE_LINKED_LIST */
716 kfree(rq->wqe.frag_info);
719 for (i = rq->page_cache.head; i != rq->page_cache.tail;
720 i = (i + 1) & (MLX5E_CACHE_SIZE - 1)) {
721 struct mlx5e_dma_info *dma_info = &rq->page_cache.page_cache[i];
723 mlx5e_page_release(rq, dma_info, false);
725 mlx5_wq_destroy(&rq->wq_ctrl);
728 static int mlx5e_create_rq(struct mlx5e_rq *rq,
729 struct mlx5e_rq_param *param)
731 struct mlx5_core_dev *mdev = rq->mdev;
739 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
740 sizeof(u64) * rq->wq_ctrl.buf.npages;
741 in = kvzalloc(inlen, GFP_KERNEL);
745 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
746 wq = MLX5_ADDR_OF(rqc, rqc, wq);
748 memcpy(rqc, param->rqc, sizeof(param->rqc));
750 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
751 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
752 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
753 MLX5_ADAPTER_PAGE_SHIFT);
754 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
756 mlx5_fill_page_array(&rq->wq_ctrl.buf,
757 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
759 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
766 static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
769 struct mlx5e_channel *c = rq->channel;
770 struct mlx5_core_dev *mdev = c->mdev;
777 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
778 in = kvzalloc(inlen, GFP_KERNEL);
782 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
784 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
785 MLX5_SET(rqc, rqc, state, next_state);
787 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
794 static int mlx5e_modify_rq_scatter_fcs(struct mlx5e_rq *rq, bool enable)
796 struct mlx5e_channel *c = rq->channel;
797 struct mlx5e_priv *priv = c->priv;
798 struct mlx5_core_dev *mdev = priv->mdev;
805 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
806 in = kvzalloc(inlen, GFP_KERNEL);
810 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
812 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
813 MLX5_SET64(modify_rq_in, in, modify_bitmask,
814 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS);
815 MLX5_SET(rqc, rqc, scatter_fcs, enable);
816 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
818 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
825 static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
827 struct mlx5e_channel *c = rq->channel;
828 struct mlx5_core_dev *mdev = c->mdev;
834 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
835 in = kvzalloc(inlen, GFP_KERNEL);
839 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
841 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
842 MLX5_SET64(modify_rq_in, in, modify_bitmask,
843 MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD);
844 MLX5_SET(rqc, rqc, vsd, vsd);
845 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
847 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
854 static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
856 mlx5_core_destroy_rq(rq->mdev, rq->rqn);
859 static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
861 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
862 struct mlx5e_channel *c = rq->channel;
864 struct mlx5_wq_ll *wq = &rq->wq;
865 u16 min_wqes = mlx5_min_rx_wqes(rq->wq_type, mlx5_wq_ll_get_size(wq));
867 while (time_before(jiffies, exp_time)) {
868 if (wq->cur_sz >= min_wqes)
874 netdev_warn(c->netdev, "Failed to get min RX wqes on RQN[0x%x] wq cur_sz(%d) min_rx_wqes(%d)\n",
875 rq->rqn, wq->cur_sz, min_wqes);
879 static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
881 struct mlx5_wq_ll *wq = &rq->wq;
882 struct mlx5e_rx_wqe *wqe;
886 /* UMR WQE (if in progress) is always at wq->head */
887 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ &&
888 rq->mpwqe.umr_in_progress)
889 mlx5e_free_rx_mpwqe(rq, &rq->mpwqe.info[wq->head]);
891 while (!mlx5_wq_ll_is_empty(wq)) {
892 wqe_ix_be = *wq->tail_next;
893 wqe_ix = be16_to_cpu(wqe_ix_be);
894 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
895 rq->dealloc_wqe(rq, wqe_ix);
896 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
897 &wqe->next.next_wqe_index);
900 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST && rq->wqe.page_reuse) {
901 /* Clean outstanding pages on handled WQEs that decided to do page-reuse,
902 * but yet to be re-posted.
904 int wq_sz = mlx5_wq_ll_get_size(&rq->wq);
906 for (wqe_ix = 0; wqe_ix < wq_sz; wqe_ix++)
907 rq->dealloc_wqe(rq, wqe_ix);
911 static int mlx5e_open_rq(struct mlx5e_channel *c,
912 struct mlx5e_params *params,
913 struct mlx5e_rq_param *param,
918 err = mlx5e_alloc_rq(c, params, param, rq);
922 err = mlx5e_create_rq(rq, param);
926 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
930 if (params->rx_am_enabled)
931 c->rq.state |= BIT(MLX5E_RQ_STATE_AM);
936 mlx5e_destroy_rq(rq);
943 static void mlx5e_activate_rq(struct mlx5e_rq *rq)
945 struct mlx5e_icosq *sq = &rq->channel->icosq;
946 u16 pi = sq->pc & sq->wq.sz_m1;
947 struct mlx5e_tx_wqe *nopwqe;
949 set_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
950 sq->db.ico_wqe[pi].opcode = MLX5_OPCODE_NOP;
951 nopwqe = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
952 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nopwqe->ctrl);
955 static void mlx5e_deactivate_rq(struct mlx5e_rq *rq)
957 clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state);
958 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
961 static void mlx5e_close_rq(struct mlx5e_rq *rq)
963 cancel_work_sync(&rq->am.work);
964 mlx5e_destroy_rq(rq);
965 mlx5e_free_rx_descs(rq);
969 static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
974 static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
976 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
978 sq->db.di = kzalloc_node(sizeof(*sq->db.di) * wq_sz,
981 mlx5e_free_xdpsq_db(sq);
988 static int mlx5e_alloc_xdpsq(struct mlx5e_channel *c,
989 struct mlx5e_params *params,
990 struct mlx5e_sq_param *param,
991 struct mlx5e_xdpsq *sq)
993 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
994 struct mlx5_core_dev *mdev = c->mdev;
998 sq->mkey_be = c->mkey_be;
1000 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1001 sq->min_inline_mode = params->tx_min_inline_mode;
1003 param->wq.db_numa_node = cpu_to_node(c->cpu);
1004 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1007 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1009 err = mlx5e_alloc_xdpsq_db(sq, cpu_to_node(c->cpu));
1011 goto err_sq_wq_destroy;
1016 mlx5_wq_destroy(&sq->wq_ctrl);
1021 static void mlx5e_free_xdpsq(struct mlx5e_xdpsq *sq)
1023 mlx5e_free_xdpsq_db(sq);
1024 mlx5_wq_destroy(&sq->wq_ctrl);
1027 static void mlx5e_free_icosq_db(struct mlx5e_icosq *sq)
1029 kfree(sq->db.ico_wqe);
1032 static int mlx5e_alloc_icosq_db(struct mlx5e_icosq *sq, int numa)
1034 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1036 sq->db.ico_wqe = kzalloc_node(sizeof(*sq->db.ico_wqe) * wq_sz,
1038 if (!sq->db.ico_wqe)
1044 static int mlx5e_alloc_icosq(struct mlx5e_channel *c,
1045 struct mlx5e_sq_param *param,
1046 struct mlx5e_icosq *sq)
1048 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1049 struct mlx5_core_dev *mdev = c->mdev;
1052 sq->mkey_be = c->mkey_be;
1054 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1056 param->wq.db_numa_node = cpu_to_node(c->cpu);
1057 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1060 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1062 err = mlx5e_alloc_icosq_db(sq, cpu_to_node(c->cpu));
1064 goto err_sq_wq_destroy;
1066 sq->edge = (sq->wq.sz_m1 + 1) - MLX5E_ICOSQ_MAX_WQEBBS;
1071 mlx5_wq_destroy(&sq->wq_ctrl);
1076 static void mlx5e_free_icosq(struct mlx5e_icosq *sq)
1078 mlx5e_free_icosq_db(sq);
1079 mlx5_wq_destroy(&sq->wq_ctrl);
1082 static void mlx5e_free_txqsq_db(struct mlx5e_txqsq *sq)
1084 kfree(sq->db.wqe_info);
1085 kfree(sq->db.dma_fifo);
1088 static int mlx5e_alloc_txqsq_db(struct mlx5e_txqsq *sq, int numa)
1090 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
1091 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
1093 sq->db.dma_fifo = kzalloc_node(df_sz * sizeof(*sq->db.dma_fifo),
1095 sq->db.wqe_info = kzalloc_node(wq_sz * sizeof(*sq->db.wqe_info),
1097 if (!sq->db.dma_fifo || !sq->db.wqe_info) {
1098 mlx5e_free_txqsq_db(sq);
1102 sq->dma_fifo_mask = df_sz - 1;
1107 static int mlx5e_alloc_txqsq(struct mlx5e_channel *c,
1109 struct mlx5e_params *params,
1110 struct mlx5e_sq_param *param,
1111 struct mlx5e_txqsq *sq)
1113 void *sqc_wq = MLX5_ADDR_OF(sqc, param->sqc, wq);
1114 struct mlx5_core_dev *mdev = c->mdev;
1118 sq->tstamp = c->tstamp;
1119 sq->mkey_be = c->mkey_be;
1121 sq->txq_ix = txq_ix;
1122 sq->uar_map = mdev->mlx5e_res.bfreg.map;
1123 sq->max_inline = params->tx_max_inline;
1124 sq->min_inline_mode = params->tx_min_inline_mode;
1125 if (MLX5_IPSEC_DEV(c->priv->mdev))
1126 set_bit(MLX5E_SQ_STATE_IPSEC, &sq->state);
1128 param->wq.db_numa_node = cpu_to_node(c->cpu);
1129 err = mlx5_wq_cyc_create(mdev, ¶m->wq, sqc_wq, &sq->wq, &sq->wq_ctrl);
1132 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
1134 err = mlx5e_alloc_txqsq_db(sq, cpu_to_node(c->cpu));
1136 goto err_sq_wq_destroy;
1138 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
1143 mlx5_wq_destroy(&sq->wq_ctrl);
1148 static void mlx5e_free_txqsq(struct mlx5e_txqsq *sq)
1150 mlx5e_free_txqsq_db(sq);
1151 mlx5_wq_destroy(&sq->wq_ctrl);
1154 struct mlx5e_create_sq_param {
1155 struct mlx5_wq_ctrl *wq_ctrl;
1162 static int mlx5e_create_sq(struct mlx5_core_dev *mdev,
1163 struct mlx5e_sq_param *param,
1164 struct mlx5e_create_sq_param *csp,
1173 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
1174 sizeof(u64) * csp->wq_ctrl->buf.npages;
1175 in = kvzalloc(inlen, GFP_KERNEL);
1179 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
1180 wq = MLX5_ADDR_OF(sqc, sqc, wq);
1182 memcpy(sqc, param->sqc, sizeof(param->sqc));
1183 MLX5_SET(sqc, sqc, tis_lst_sz, csp->tis_lst_sz);
1184 MLX5_SET(sqc, sqc, tis_num_0, csp->tisn);
1185 MLX5_SET(sqc, sqc, cqn, csp->cqn);
1187 if (MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5_CAP_INLINE_MODE_VPORT_CONTEXT)
1188 MLX5_SET(sqc, sqc, min_wqe_inline_mode, csp->min_inline_mode);
1190 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
1192 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
1193 MLX5_SET(wq, wq, uar_page, mdev->mlx5e_res.bfreg.index);
1194 MLX5_SET(wq, wq, log_wq_pg_sz, csp->wq_ctrl->buf.page_shift -
1195 MLX5_ADAPTER_PAGE_SHIFT);
1196 MLX5_SET64(wq, wq, dbr_addr, csp->wq_ctrl->db.dma);
1198 mlx5_fill_page_array(&csp->wq_ctrl->buf, (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
1200 err = mlx5_core_create_sq(mdev, in, inlen, sqn);
1207 struct mlx5e_modify_sq_param {
1214 static int mlx5e_modify_sq(struct mlx5_core_dev *mdev, u32 sqn,
1215 struct mlx5e_modify_sq_param *p)
1222 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
1223 in = kvzalloc(inlen, GFP_KERNEL);
1227 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
1229 MLX5_SET(modify_sq_in, in, sq_state, p->curr_state);
1230 MLX5_SET(sqc, sqc, state, p->next_state);
1231 if (p->rl_update && p->next_state == MLX5_SQC_STATE_RDY) {
1232 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
1233 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, p->rl_index);
1236 err = mlx5_core_modify_sq(mdev, sqn, in, inlen);
1243 static void mlx5e_destroy_sq(struct mlx5_core_dev *mdev, u32 sqn)
1245 mlx5_core_destroy_sq(mdev, sqn);
1248 static int mlx5e_create_sq_rdy(struct mlx5_core_dev *mdev,
1249 struct mlx5e_sq_param *param,
1250 struct mlx5e_create_sq_param *csp,
1253 struct mlx5e_modify_sq_param msp = {0};
1256 err = mlx5e_create_sq(mdev, param, csp, sqn);
1260 msp.curr_state = MLX5_SQC_STATE_RST;
1261 msp.next_state = MLX5_SQC_STATE_RDY;
1262 err = mlx5e_modify_sq(mdev, *sqn, &msp);
1264 mlx5e_destroy_sq(mdev, *sqn);
1269 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1270 struct mlx5e_txqsq *sq, u32 rate);
1272 static int mlx5e_open_txqsq(struct mlx5e_channel *c,
1275 struct mlx5e_params *params,
1276 struct mlx5e_sq_param *param,
1277 struct mlx5e_txqsq *sq)
1279 struct mlx5e_create_sq_param csp = {};
1283 err = mlx5e_alloc_txqsq(c, txq_ix, params, param, sq);
1289 csp.cqn = sq->cq.mcq.cqn;
1290 csp.wq_ctrl = &sq->wq_ctrl;
1291 csp.min_inline_mode = sq->min_inline_mode;
1292 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1294 goto err_free_txqsq;
1296 tx_rate = c->priv->tx_rates[sq->txq_ix];
1298 mlx5e_set_sq_maxrate(c->netdev, sq, tx_rate);
1303 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1304 mlx5e_free_txqsq(sq);
1309 static void mlx5e_activate_txqsq(struct mlx5e_txqsq *sq)
1311 sq->txq = netdev_get_tx_queue(sq->channel->netdev, sq->txq_ix);
1312 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1313 netdev_tx_reset_queue(sq->txq);
1314 netif_tx_start_queue(sq->txq);
1317 static inline void netif_tx_disable_queue(struct netdev_queue *txq)
1319 __netif_tx_lock_bh(txq);
1320 netif_tx_stop_queue(txq);
1321 __netif_tx_unlock_bh(txq);
1324 static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq)
1326 struct mlx5e_channel *c = sq->channel;
1328 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1329 /* prevent netif_tx_wake_queue */
1330 napi_synchronize(&c->napi);
1332 netif_tx_disable_queue(sq->txq);
1334 /* last doorbell out, godspeed .. */
1335 if (mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1)) {
1336 struct mlx5e_tx_wqe *nop;
1338 sq->db.wqe_info[(sq->pc & sq->wq.sz_m1)].skb = NULL;
1339 nop = mlx5e_post_nop(&sq->wq, sq->sqn, &sq->pc);
1340 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, &nop->ctrl);
1344 static void mlx5e_close_txqsq(struct mlx5e_txqsq *sq)
1346 struct mlx5e_channel *c = sq->channel;
1347 struct mlx5_core_dev *mdev = c->mdev;
1349 mlx5e_destroy_sq(mdev, sq->sqn);
1351 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1352 mlx5e_free_txqsq_descs(sq);
1353 mlx5e_free_txqsq(sq);
1356 static int mlx5e_open_icosq(struct mlx5e_channel *c,
1357 struct mlx5e_params *params,
1358 struct mlx5e_sq_param *param,
1359 struct mlx5e_icosq *sq)
1361 struct mlx5e_create_sq_param csp = {};
1364 err = mlx5e_alloc_icosq(c, param, sq);
1368 csp.cqn = sq->cq.mcq.cqn;
1369 csp.wq_ctrl = &sq->wq_ctrl;
1370 csp.min_inline_mode = params->tx_min_inline_mode;
1371 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1372 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1374 goto err_free_icosq;
1379 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1380 mlx5e_free_icosq(sq);
1385 static void mlx5e_close_icosq(struct mlx5e_icosq *sq)
1387 struct mlx5e_channel *c = sq->channel;
1389 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1390 napi_synchronize(&c->napi);
1392 mlx5e_destroy_sq(c->mdev, sq->sqn);
1393 mlx5e_free_icosq(sq);
1396 static int mlx5e_open_xdpsq(struct mlx5e_channel *c,
1397 struct mlx5e_params *params,
1398 struct mlx5e_sq_param *param,
1399 struct mlx5e_xdpsq *sq)
1401 unsigned int ds_cnt = MLX5E_XDP_TX_DS_COUNT;
1402 struct mlx5e_create_sq_param csp = {};
1403 unsigned int inline_hdr_sz = 0;
1407 err = mlx5e_alloc_xdpsq(c, params, param, sq);
1412 csp.tisn = c->priv->tisn[0]; /* tc = 0 */
1413 csp.cqn = sq->cq.mcq.cqn;
1414 csp.wq_ctrl = &sq->wq_ctrl;
1415 csp.min_inline_mode = sq->min_inline_mode;
1416 set_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1417 err = mlx5e_create_sq_rdy(c->mdev, param, &csp, &sq->sqn);
1419 goto err_free_xdpsq;
1421 if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) {
1422 inline_hdr_sz = MLX5E_XDP_MIN_INLINE;
1426 /* Pre initialize fixed WQE fields */
1427 for (i = 0; i < mlx5_wq_cyc_get_size(&sq->wq); i++) {
1428 struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(&sq->wq, i);
1429 struct mlx5_wqe_ctrl_seg *cseg = &wqe->ctrl;
1430 struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
1431 struct mlx5_wqe_data_seg *dseg;
1433 cseg->qpn_ds = cpu_to_be32((sq->sqn << 8) | ds_cnt);
1434 eseg->inline_hdr.sz = cpu_to_be16(inline_hdr_sz);
1436 dseg = (struct mlx5_wqe_data_seg *)cseg + (ds_cnt - 1);
1437 dseg->lkey = sq->mkey_be;
1443 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1444 mlx5e_free_xdpsq(sq);
1449 static void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq)
1451 struct mlx5e_channel *c = sq->channel;
1453 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
1454 napi_synchronize(&c->napi);
1456 mlx5e_destroy_sq(c->mdev, sq->sqn);
1457 mlx5e_free_xdpsq_descs(sq);
1458 mlx5e_free_xdpsq(sq);
1461 static int mlx5e_alloc_cq_common(struct mlx5_core_dev *mdev,
1462 struct mlx5e_cq_param *param,
1463 struct mlx5e_cq *cq)
1465 struct mlx5_core_cq *mcq = &cq->mcq;
1471 err = mlx5_cqwq_create(mdev, ¶m->wq, param->cqc, &cq->wq,
1476 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1479 mcq->set_ci_db = cq->wq_ctrl.db.db;
1480 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1481 *mcq->set_ci_db = 0;
1483 mcq->vector = param->eq_ix;
1484 mcq->comp = mlx5e_completion_event;
1485 mcq->event = mlx5e_cq_error_event;
1488 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
1489 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
1499 static int mlx5e_alloc_cq(struct mlx5e_channel *c,
1500 struct mlx5e_cq_param *param,
1501 struct mlx5e_cq *cq)
1503 struct mlx5_core_dev *mdev = c->priv->mdev;
1506 param->wq.buf_numa_node = cpu_to_node(c->cpu);
1507 param->wq.db_numa_node = cpu_to_node(c->cpu);
1508 param->eq_ix = c->ix;
1510 err = mlx5e_alloc_cq_common(mdev, param, cq);
1512 cq->napi = &c->napi;
1518 static void mlx5e_free_cq(struct mlx5e_cq *cq)
1520 mlx5_cqwq_destroy(&cq->wq_ctrl);
1523 static int mlx5e_create_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
1525 struct mlx5_core_dev *mdev = cq->mdev;
1526 struct mlx5_core_cq *mcq = &cq->mcq;
1531 unsigned int irqn_not_used;
1535 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
1536 sizeof(u64) * cq->wq_ctrl.frag_buf.npages;
1537 in = kvzalloc(inlen, GFP_KERNEL);
1541 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
1543 memcpy(cqc, param->cqc, sizeof(param->cqc));
1545 mlx5_fill_page_frag_array(&cq->wq_ctrl.frag_buf,
1546 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
1548 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
1550 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
1551 MLX5_SET(cqc, cqc, c_eqn, eqn);
1552 MLX5_SET(cqc, cqc, uar_page, mdev->priv.uar->index);
1553 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.frag_buf.page_shift -
1554 MLX5_ADAPTER_PAGE_SHIFT);
1555 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
1557 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
1569 static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
1571 mlx5_core_destroy_cq(cq->mdev, &cq->mcq);
1574 static int mlx5e_open_cq(struct mlx5e_channel *c,
1575 struct mlx5e_cq_moder moder,
1576 struct mlx5e_cq_param *param,
1577 struct mlx5e_cq *cq)
1579 struct mlx5_core_dev *mdev = c->mdev;
1582 err = mlx5e_alloc_cq(c, param, cq);
1586 err = mlx5e_create_cq(cq, param);
1590 if (MLX5_CAP_GEN(mdev, cq_moderation))
1591 mlx5_core_modify_cq_moderation(mdev, &cq->mcq, moder.usec, moder.pkts);
1600 static void mlx5e_close_cq(struct mlx5e_cq *cq)
1602 mlx5e_destroy_cq(cq);
1606 static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1608 return cpumask_first(priv->mdev->priv.irq_info[ix + MLX5_EQ_VEC_COMP_BASE].mask);
1611 static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1612 struct mlx5e_params *params,
1613 struct mlx5e_channel_param *cparam)
1618 for (tc = 0; tc < c->num_tc; tc++) {
1619 err = mlx5e_open_cq(c, params->tx_cq_moderation,
1620 &cparam->tx_cq, &c->sq[tc].cq);
1622 goto err_close_tx_cqs;
1628 for (tc--; tc >= 0; tc--)
1629 mlx5e_close_cq(&c->sq[tc].cq);
1634 static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1638 for (tc = 0; tc < c->num_tc; tc++)
1639 mlx5e_close_cq(&c->sq[tc].cq);
1642 static int mlx5e_open_sqs(struct mlx5e_channel *c,
1643 struct mlx5e_params *params,
1644 struct mlx5e_channel_param *cparam)
1649 for (tc = 0; tc < params->num_tc; tc++) {
1650 int txq_ix = c->ix + tc * params->num_channels;
1652 err = mlx5e_open_txqsq(c, c->priv->tisn[tc], txq_ix,
1653 params, &cparam->sq, &c->sq[tc]);
1661 for (tc--; tc >= 0; tc--)
1662 mlx5e_close_txqsq(&c->sq[tc]);
1667 static void mlx5e_close_sqs(struct mlx5e_channel *c)
1671 for (tc = 0; tc < c->num_tc; tc++)
1672 mlx5e_close_txqsq(&c->sq[tc]);
1675 static int mlx5e_set_sq_maxrate(struct net_device *dev,
1676 struct mlx5e_txqsq *sq, u32 rate)
1678 struct mlx5e_priv *priv = netdev_priv(dev);
1679 struct mlx5_core_dev *mdev = priv->mdev;
1680 struct mlx5e_modify_sq_param msp = {0};
1684 if (rate == sq->rate_limit)
1689 /* remove current rl index to free space to next ones */
1690 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1695 err = mlx5_rl_add_rate(mdev, rate, &rl_index);
1697 netdev_err(dev, "Failed configuring rate %u: %d\n",
1703 msp.curr_state = MLX5_SQC_STATE_RDY;
1704 msp.next_state = MLX5_SQC_STATE_RDY;
1705 msp.rl_index = rl_index;
1706 msp.rl_update = true;
1707 err = mlx5e_modify_sq(mdev, sq->sqn, &msp);
1709 netdev_err(dev, "Failed configuring rate %u: %d\n",
1711 /* remove the rate from the table */
1713 mlx5_rl_remove_rate(mdev, rate);
1717 sq->rate_limit = rate;
1721 static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1723 struct mlx5e_priv *priv = netdev_priv(dev);
1724 struct mlx5_core_dev *mdev = priv->mdev;
1725 struct mlx5e_txqsq *sq = priv->txq2sq[index];
1728 if (!mlx5_rl_is_supported(mdev)) {
1729 netdev_err(dev, "Rate limiting is not supported on this device\n");
1733 /* rate is given in Mb/sec, HW config is in Kb/sec */
1736 /* Check whether rate in valid range, 0 is always valid */
1737 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1738 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1742 mutex_lock(&priv->state_lock);
1743 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1744 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1746 priv->tx_rates[index] = rate;
1747 mutex_unlock(&priv->state_lock);
1752 static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1753 struct mlx5e_params *params,
1754 struct mlx5e_channel_param *cparam,
1755 struct mlx5e_channel **cp)
1757 struct mlx5e_cq_moder icocq_moder = {0, 0};
1758 struct net_device *netdev = priv->netdev;
1759 int cpu = mlx5e_get_cpu(priv, ix);
1760 struct mlx5e_channel *c;
1765 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1770 c->mdev = priv->mdev;
1771 c->tstamp = &priv->tstamp;
1774 c->pdev = &priv->mdev->pdev->dev;
1775 c->netdev = priv->netdev;
1776 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
1777 c->num_tc = params->num_tc;
1778 c->xdp = !!params->xdp_prog;
1780 mlx5_vector2eqn(priv->mdev, ix, &eqn, &irq);
1781 c->irq_desc = irq_to_desc(irq);
1783 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1785 err = mlx5e_open_cq(c, icocq_moder, &cparam->icosq_cq, &c->icosq.cq);
1789 err = mlx5e_open_tx_cqs(c, params, cparam);
1791 goto err_close_icosq_cq;
1793 err = mlx5e_open_cq(c, params->rx_cq_moderation, &cparam->rx_cq, &c->rq.cq);
1795 goto err_close_tx_cqs;
1797 /* XDP SQ CQ params are same as normal TXQ sq CQ params */
1798 err = c->xdp ? mlx5e_open_cq(c, params->tx_cq_moderation,
1799 &cparam->tx_cq, &c->rq.xdpsq.cq) : 0;
1801 goto err_close_rx_cq;
1803 napi_enable(&c->napi);
1805 err = mlx5e_open_icosq(c, params, &cparam->icosq, &c->icosq);
1807 goto err_disable_napi;
1809 err = mlx5e_open_sqs(c, params, cparam);
1811 goto err_close_icosq;
1813 err = c->xdp ? mlx5e_open_xdpsq(c, params, &cparam->xdp_sq, &c->rq.xdpsq) : 0;
1817 err = mlx5e_open_rq(c, params, &cparam->rq, &c->rq);
1819 goto err_close_xdp_sq;
1826 mlx5e_close_xdpsq(&c->rq.xdpsq);
1832 mlx5e_close_icosq(&c->icosq);
1835 napi_disable(&c->napi);
1837 mlx5e_close_cq(&c->rq.xdpsq.cq);
1840 mlx5e_close_cq(&c->rq.cq);
1843 mlx5e_close_tx_cqs(c);
1846 mlx5e_close_cq(&c->icosq.cq);
1849 netif_napi_del(&c->napi);
1855 static void mlx5e_activate_channel(struct mlx5e_channel *c)
1859 for (tc = 0; tc < c->num_tc; tc++)
1860 mlx5e_activate_txqsq(&c->sq[tc]);
1861 mlx5e_activate_rq(&c->rq);
1862 netif_set_xps_queue(c->netdev, get_cpu_mask(c->cpu), c->ix);
1865 static void mlx5e_deactivate_channel(struct mlx5e_channel *c)
1869 mlx5e_deactivate_rq(&c->rq);
1870 for (tc = 0; tc < c->num_tc; tc++)
1871 mlx5e_deactivate_txqsq(&c->sq[tc]);
1874 static void mlx5e_close_channel(struct mlx5e_channel *c)
1876 mlx5e_close_rq(&c->rq);
1878 mlx5e_close_xdpsq(&c->rq.xdpsq);
1880 mlx5e_close_icosq(&c->icosq);
1881 napi_disable(&c->napi);
1883 mlx5e_close_cq(&c->rq.xdpsq.cq);
1884 mlx5e_close_cq(&c->rq.cq);
1885 mlx5e_close_tx_cqs(c);
1886 mlx5e_close_cq(&c->icosq.cq);
1887 netif_napi_del(&c->napi);
1892 static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1893 struct mlx5e_params *params,
1894 struct mlx5e_rq_param *param)
1896 void *rqc = param->rqc;
1897 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1899 switch (params->rq_wq_type) {
1900 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1901 MLX5_SET(wq, wq, log_wqe_num_of_strides, params->mpwqe_log_num_strides - 9);
1902 MLX5_SET(wq, wq, log_wqe_stride_size, params->mpwqe_log_stride_sz - 6);
1903 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1905 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1906 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1909 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1910 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1911 MLX5_SET(wq, wq, log_wq_sz, params->log_rq_size);
1912 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
1913 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
1914 MLX5_SET(rqc, rqc, vsd, params->vlan_strip_disable);
1915 MLX5_SET(rqc, rqc, scatter_fcs, params->scatter_fcs_en);
1917 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1918 param->wq.linear = 1;
1921 static void mlx5e_build_drop_rq_param(struct mlx5_core_dev *mdev,
1922 struct mlx5e_rq_param *param)
1924 void *rqc = param->rqc;
1925 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1927 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1928 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1930 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
1933 static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1934 struct mlx5e_sq_param *param)
1936 void *sqc = param->sqc;
1937 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1939 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1940 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
1942 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1945 static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1946 struct mlx5e_params *params,
1947 struct mlx5e_sq_param *param)
1949 void *sqc = param->sqc;
1950 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1952 mlx5e_build_sq_param_common(priv, param);
1953 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
1954 MLX5_SET(sqc, sqc, allow_swp, !!MLX5_IPSEC_DEV(priv->mdev));
1957 static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1958 struct mlx5e_cq_param *param)
1960 void *cqc = param->cqc;
1962 MLX5_SET(cqc, cqc, uar_page, priv->mdev->priv.uar->index);
1965 static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1966 struct mlx5e_params *params,
1967 struct mlx5e_cq_param *param)
1969 void *cqc = param->cqc;
1972 switch (params->rq_wq_type) {
1973 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1974 log_cq_size = params->log_rq_size + params->mpwqe_log_num_strides;
1976 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1977 log_cq_size = params->log_rq_size;
1980 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
1981 if (MLX5E_GET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS)) {
1982 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1983 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
1986 mlx5e_build_common_cq_param(priv, param);
1987 param->cq_period_mode = params->rx_cq_period_mode;
1990 static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1991 struct mlx5e_params *params,
1992 struct mlx5e_cq_param *param)
1994 void *cqc = param->cqc;
1996 MLX5_SET(cqc, cqc, log_cq_size, params->log_sq_size);
1998 mlx5e_build_common_cq_param(priv, param);
2000 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2003 static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
2005 struct mlx5e_cq_param *param)
2007 void *cqc = param->cqc;
2009 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
2011 mlx5e_build_common_cq_param(priv, param);
2013 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
2016 static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
2018 struct mlx5e_sq_param *param)
2020 void *sqc = param->sqc;
2021 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2023 mlx5e_build_sq_param_common(priv, param);
2025 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
2026 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
2029 static void mlx5e_build_xdpsq_param(struct mlx5e_priv *priv,
2030 struct mlx5e_params *params,
2031 struct mlx5e_sq_param *param)
2033 void *sqc = param->sqc;
2034 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
2036 mlx5e_build_sq_param_common(priv, param);
2037 MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
2040 static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
2041 struct mlx5e_params *params,
2042 struct mlx5e_channel_param *cparam)
2044 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
2046 mlx5e_build_rq_param(priv, params, &cparam->rq);
2047 mlx5e_build_sq_param(priv, params, &cparam->sq);
2048 mlx5e_build_xdpsq_param(priv, params, &cparam->xdp_sq);
2049 mlx5e_build_icosq_param(priv, icosq_log_wq_sz, &cparam->icosq);
2050 mlx5e_build_rx_cq_param(priv, params, &cparam->rx_cq);
2051 mlx5e_build_tx_cq_param(priv, params, &cparam->tx_cq);
2052 mlx5e_build_ico_cq_param(priv, icosq_log_wq_sz, &cparam->icosq_cq);
2055 int mlx5e_open_channels(struct mlx5e_priv *priv,
2056 struct mlx5e_channels *chs)
2058 struct mlx5e_channel_param *cparam;
2062 chs->num = chs->params.num_channels;
2064 chs->c = kcalloc(chs->num, sizeof(struct mlx5e_channel *), GFP_KERNEL);
2065 cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
2066 if (!chs->c || !cparam)
2069 mlx5e_build_channel_param(priv, &chs->params, cparam);
2070 for (i = 0; i < chs->num; i++) {
2071 err = mlx5e_open_channel(priv, i, &chs->params, cparam, &chs->c[i]);
2073 goto err_close_channels;
2080 for (i--; i >= 0; i--)
2081 mlx5e_close_channel(chs->c[i]);
2090 static void mlx5e_activate_channels(struct mlx5e_channels *chs)
2094 for (i = 0; i < chs->num; i++)
2095 mlx5e_activate_channel(chs->c[i]);
2098 static int mlx5e_wait_channels_min_rx_wqes(struct mlx5e_channels *chs)
2103 for (i = 0; i < chs->num; i++) {
2104 err = mlx5e_wait_for_min_rx_wqes(&chs->c[i]->rq);
2112 static void mlx5e_deactivate_channels(struct mlx5e_channels *chs)
2116 for (i = 0; i < chs->num; i++)
2117 mlx5e_deactivate_channel(chs->c[i]);
2120 void mlx5e_close_channels(struct mlx5e_channels *chs)
2124 for (i = 0; i < chs->num; i++)
2125 mlx5e_close_channel(chs->c[i]);
2132 mlx5e_create_rqt(struct mlx5e_priv *priv, int sz, struct mlx5e_rqt *rqt)
2134 struct mlx5_core_dev *mdev = priv->mdev;
2141 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
2142 in = kvzalloc(inlen, GFP_KERNEL);
2146 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
2148 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2149 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
2151 for (i = 0; i < sz; i++)
2152 MLX5_SET(rqtc, rqtc, rq_num[i], priv->drop_rq.rqn);
2154 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
2156 rqt->enabled = true;
2162 void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
2164 rqt->enabled = false;
2165 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
2168 int mlx5e_create_indirect_rqt(struct mlx5e_priv *priv)
2170 struct mlx5e_rqt *rqt = &priv->indir_rqt;
2173 err = mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, rqt);
2175 mlx5_core_warn(priv->mdev, "create indirect rqts failed, %d\n", err);
2179 int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
2181 struct mlx5e_rqt *rqt;
2185 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2186 rqt = &priv->direct_tir[ix].rqt;
2187 err = mlx5e_create_rqt(priv, 1 /*size */, rqt);
2189 goto err_destroy_rqts;
2195 mlx5_core_warn(priv->mdev, "create direct rqts failed, %d\n", err);
2196 for (ix--; ix >= 0; ix--)
2197 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
2202 void mlx5e_destroy_direct_rqts(struct mlx5e_priv *priv)
2206 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
2207 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
2210 static int mlx5e_rx_hash_fn(int hfunc)
2212 return (hfunc == ETH_RSS_HASH_TOP) ?
2213 MLX5_RX_HASH_FN_TOEPLITZ :
2214 MLX5_RX_HASH_FN_INVERTED_XOR8;
2217 static int mlx5e_bits_invert(unsigned long a, int size)
2222 for (i = 0; i < size; i++)
2223 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
2228 static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, int sz,
2229 struct mlx5e_redirect_rqt_param rrp, void *rqtc)
2233 for (i = 0; i < sz; i++) {
2239 if (rrp.rss.hfunc == ETH_RSS_HASH_XOR)
2240 ix = mlx5e_bits_invert(i, ilog2(sz));
2242 ix = priv->channels.params.indirection_rqt[ix];
2243 rqn = rrp.rss.channels->c[ix]->rq.rqn;
2247 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
2251 int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz,
2252 struct mlx5e_redirect_rqt_param rrp)
2254 struct mlx5_core_dev *mdev = priv->mdev;
2260 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
2261 in = kvzalloc(inlen, GFP_KERNEL);
2265 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
2267 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
2268 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
2269 mlx5e_fill_rqt_rqns(priv, sz, rrp, rqtc);
2270 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
2276 static u32 mlx5e_get_direct_rqn(struct mlx5e_priv *priv, int ix,
2277 struct mlx5e_redirect_rqt_param rrp)
2282 if (ix >= rrp.rss.channels->num)
2283 return priv->drop_rq.rqn;
2285 return rrp.rss.channels->c[ix]->rq.rqn;
2288 static void mlx5e_redirect_rqts(struct mlx5e_priv *priv,
2289 struct mlx5e_redirect_rqt_param rrp)
2294 if (priv->indir_rqt.enabled) {
2296 rqtn = priv->indir_rqt.rqtn;
2297 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, rrp);
2300 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2301 struct mlx5e_redirect_rqt_param direct_rrp = {
2304 .rqn = mlx5e_get_direct_rqn(priv, ix, rrp)
2308 /* Direct RQ Tables */
2309 if (!priv->direct_tir[ix].rqt.enabled)
2312 rqtn = priv->direct_tir[ix].rqt.rqtn;
2313 mlx5e_redirect_rqt(priv, rqtn, 1, direct_rrp);
2317 static void mlx5e_redirect_rqts_to_channels(struct mlx5e_priv *priv,
2318 struct mlx5e_channels *chs)
2320 struct mlx5e_redirect_rqt_param rrp = {
2325 .hfunc = chs->params.rss_hfunc,
2330 mlx5e_redirect_rqts(priv, rrp);
2333 static void mlx5e_redirect_rqts_to_drop(struct mlx5e_priv *priv)
2335 struct mlx5e_redirect_rqt_param drop_rrp = {
2338 .rqn = priv->drop_rq.rqn,
2342 mlx5e_redirect_rqts(priv, drop_rrp);
2345 static void mlx5e_build_tir_ctx_lro(struct mlx5e_params *params, void *tirc)
2347 if (!params->lro_en)
2350 #define ROUGH_MAX_L2_L3_HDR_SZ 256
2352 MLX5_SET(tirc, tirc, lro_enable_mask,
2353 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
2354 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
2355 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
2356 (params->lro_wqe_sz - ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
2357 MLX5_SET(tirc, tirc, lro_timeout_period_usecs, params->lro_timeout);
2360 void mlx5e_build_indir_tir_ctx_hash(struct mlx5e_params *params,
2361 enum mlx5e_traffic_types tt,
2362 void *tirc, bool inner)
2364 void *hfso = inner ? MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_inner) :
2365 MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2367 #define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2368 MLX5_HASH_FIELD_SEL_DST_IP)
2370 #define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2371 MLX5_HASH_FIELD_SEL_DST_IP |\
2372 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2373 MLX5_HASH_FIELD_SEL_L4_DPORT)
2375 #define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2376 MLX5_HASH_FIELD_SEL_DST_IP |\
2377 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2379 MLX5_SET(tirc, tirc, rx_hash_fn, mlx5e_rx_hash_fn(params->rss_hfunc));
2380 if (params->rss_hfunc == ETH_RSS_HASH_TOP) {
2381 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
2382 rx_hash_toeplitz_key);
2383 size_t len = MLX5_FLD_SZ_BYTES(tirc,
2384 rx_hash_toeplitz_key);
2386 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
2387 memcpy(rss_key, params->toeplitz_hash_key, len);
2391 case MLX5E_TT_IPV4_TCP:
2392 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2393 MLX5_L3_PROT_TYPE_IPV4);
2394 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2395 MLX5_L4_PROT_TYPE_TCP);
2396 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2397 MLX5_HASH_IP_L4PORTS);
2400 case MLX5E_TT_IPV6_TCP:
2401 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2402 MLX5_L3_PROT_TYPE_IPV6);
2403 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2404 MLX5_L4_PROT_TYPE_TCP);
2405 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2406 MLX5_HASH_IP_L4PORTS);
2409 case MLX5E_TT_IPV4_UDP:
2410 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2411 MLX5_L3_PROT_TYPE_IPV4);
2412 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2413 MLX5_L4_PROT_TYPE_UDP);
2414 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2415 MLX5_HASH_IP_L4PORTS);
2418 case MLX5E_TT_IPV6_UDP:
2419 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2420 MLX5_L3_PROT_TYPE_IPV6);
2421 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2422 MLX5_L4_PROT_TYPE_UDP);
2423 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2424 MLX5_HASH_IP_L4PORTS);
2427 case MLX5E_TT_IPV4_IPSEC_AH:
2428 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2429 MLX5_L3_PROT_TYPE_IPV4);
2430 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2431 MLX5_HASH_IP_IPSEC_SPI);
2434 case MLX5E_TT_IPV6_IPSEC_AH:
2435 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2436 MLX5_L3_PROT_TYPE_IPV6);
2437 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2438 MLX5_HASH_IP_IPSEC_SPI);
2441 case MLX5E_TT_IPV4_IPSEC_ESP:
2442 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2443 MLX5_L3_PROT_TYPE_IPV4);
2444 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2445 MLX5_HASH_IP_IPSEC_SPI);
2448 case MLX5E_TT_IPV6_IPSEC_ESP:
2449 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2450 MLX5_L3_PROT_TYPE_IPV6);
2451 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2452 MLX5_HASH_IP_IPSEC_SPI);
2456 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2457 MLX5_L3_PROT_TYPE_IPV4);
2458 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2463 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2464 MLX5_L3_PROT_TYPE_IPV6);
2465 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2469 WARN_ONCE(true, "%s: bad traffic type!\n", __func__);
2473 static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
2475 struct mlx5_core_dev *mdev = priv->mdev;
2484 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
2485 in = kvzalloc(inlen, GFP_KERNEL);
2489 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
2490 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
2492 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2494 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2495 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
2501 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
2502 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
2514 static void mlx5e_build_inner_indir_tir_ctx(struct mlx5e_priv *priv,
2515 enum mlx5e_traffic_types tt,
2518 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2520 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2522 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2523 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2524 MLX5_SET(tirc, tirc, tunneled_offload_en, 0x1);
2526 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, true);
2529 static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
2531 struct mlx5_core_dev *mdev = priv->mdev;
2532 u16 hw_mtu = MLX5E_SW2HW_MTU(priv, mtu);
2535 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
2539 /* Update vport context MTU */
2540 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
2544 static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
2546 struct mlx5_core_dev *mdev = priv->mdev;
2550 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
2551 if (err || !hw_mtu) /* fallback to port oper mtu */
2552 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
2554 *mtu = MLX5E_HW2SW_MTU(priv, hw_mtu);
2557 static int mlx5e_set_dev_port_mtu(struct mlx5e_priv *priv)
2559 struct net_device *netdev = priv->netdev;
2563 err = mlx5e_set_mtu(priv, netdev->mtu);
2567 mlx5e_query_mtu(priv, &mtu);
2568 if (mtu != netdev->mtu)
2569 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
2570 __func__, mtu, netdev->mtu);
2576 static void mlx5e_netdev_set_tcs(struct net_device *netdev)
2578 struct mlx5e_priv *priv = netdev_priv(netdev);
2579 int nch = priv->channels.params.num_channels;
2580 int ntc = priv->channels.params.num_tc;
2583 netdev_reset_tc(netdev);
2588 netdev_set_num_tc(netdev, ntc);
2590 /* Map netdev TCs to offset 0
2591 * We have our own UP to TXQ mapping for QoS
2593 for (tc = 0; tc < ntc; tc++)
2594 netdev_set_tc_queue(netdev, tc, nch, 0);
2597 static void mlx5e_build_channels_tx_maps(struct mlx5e_priv *priv)
2599 struct mlx5e_channel *c;
2600 struct mlx5e_txqsq *sq;
2603 for (i = 0; i < priv->channels.num; i++)
2604 for (tc = 0; tc < priv->profile->max_tc; tc++)
2605 priv->channel_tc2txq[i][tc] = i + tc * priv->channels.num;
2607 for (i = 0; i < priv->channels.num; i++) {
2608 c = priv->channels.c[i];
2609 for (tc = 0; tc < c->num_tc; tc++) {
2611 priv->txq2sq[sq->txq_ix] = sq;
2616 void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
2618 int num_txqs = priv->channels.num * priv->channels.params.num_tc;
2619 struct net_device *netdev = priv->netdev;
2621 mlx5e_netdev_set_tcs(netdev);
2622 netif_set_real_num_tx_queues(netdev, num_txqs);
2623 netif_set_real_num_rx_queues(netdev, priv->channels.num);
2625 mlx5e_build_channels_tx_maps(priv);
2626 mlx5e_activate_channels(&priv->channels);
2627 netif_tx_start_all_queues(priv->netdev);
2629 if (MLX5_ESWITCH_MANAGER(priv->mdev))
2630 mlx5e_add_sqs_fwd_rules(priv);
2632 mlx5e_wait_channels_min_rx_wqes(&priv->channels);
2633 mlx5e_redirect_rqts_to_channels(priv, &priv->channels);
2636 void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
2638 mlx5e_redirect_rqts_to_drop(priv);
2640 if (MLX5_ESWITCH_MANAGER(priv->mdev))
2641 mlx5e_remove_sqs_fwd_rules(priv);
2643 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
2644 * polling for inactive tx queues.
2646 netif_tx_stop_all_queues(priv->netdev);
2647 netif_tx_disable(priv->netdev);
2648 mlx5e_deactivate_channels(&priv->channels);
2651 void mlx5e_switch_priv_channels(struct mlx5e_priv *priv,
2652 struct mlx5e_channels *new_chs,
2653 mlx5e_fp_hw_modify hw_modify)
2655 struct net_device *netdev = priv->netdev;
2658 new_num_txqs = new_chs->num * new_chs->params.num_tc;
2660 carrier_ok = netif_carrier_ok(netdev);
2661 netif_carrier_off(netdev);
2663 if (new_num_txqs < netdev->real_num_tx_queues)
2664 netif_set_real_num_tx_queues(netdev, new_num_txqs);
2666 mlx5e_deactivate_priv_channels(priv);
2667 mlx5e_close_channels(&priv->channels);
2669 priv->channels = *new_chs;
2671 /* New channels are ready to roll, modify HW settings if needed */
2675 mlx5e_refresh_tirs(priv, false);
2676 mlx5e_activate_priv_channels(priv);
2678 /* return carrier back if needed */
2680 netif_carrier_on(netdev);
2683 int mlx5e_open_locked(struct net_device *netdev)
2685 struct mlx5e_priv *priv = netdev_priv(netdev);
2688 set_bit(MLX5E_STATE_OPENED, &priv->state);
2690 err = mlx5e_open_channels(priv, &priv->channels);
2692 goto err_clear_state_opened_flag;
2694 mlx5e_refresh_tirs(priv, false);
2695 mlx5e_activate_priv_channels(priv);
2696 if (priv->profile->update_carrier)
2697 priv->profile->update_carrier(priv);
2698 mlx5e_timestamp_init(priv);
2700 if (priv->profile->update_stats)
2701 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
2705 err_clear_state_opened_flag:
2706 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2710 int mlx5e_open(struct net_device *netdev)
2712 struct mlx5e_priv *priv = netdev_priv(netdev);
2715 mutex_lock(&priv->state_lock);
2716 err = mlx5e_open_locked(netdev);
2718 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_UP);
2719 mutex_unlock(&priv->state_lock);
2721 if (mlx5e_vxlan_allowed(priv->mdev))
2722 udp_tunnel_get_rx_info(netdev);
2727 int mlx5e_close_locked(struct net_device *netdev)
2729 struct mlx5e_priv *priv = netdev_priv(netdev);
2731 /* May already be CLOSED in case a previous configuration operation
2732 * (e.g RX/TX queue size change) that involves close&open failed.
2734 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2737 clear_bit(MLX5E_STATE_OPENED, &priv->state);
2739 mlx5e_timestamp_cleanup(priv);
2740 netif_carrier_off(priv->netdev);
2741 mlx5e_deactivate_priv_channels(priv);
2742 mlx5e_close_channels(&priv->channels);
2747 int mlx5e_close(struct net_device *netdev)
2749 struct mlx5e_priv *priv = netdev_priv(netdev);
2752 if (!netif_device_present(netdev))
2755 mutex_lock(&priv->state_lock);
2756 mlx5_set_port_admin_status(priv->mdev, MLX5_PORT_DOWN);
2757 err = mlx5e_close_locked(netdev);
2758 mutex_unlock(&priv->state_lock);
2763 static int mlx5e_alloc_drop_rq(struct mlx5_core_dev *mdev,
2764 struct mlx5e_rq *rq,
2765 struct mlx5e_rq_param *param)
2767 void *rqc = param->rqc;
2768 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
2771 param->wq.db_numa_node = param->wq.buf_numa_node;
2773 err = mlx5_wq_ll_create(mdev, ¶m->wq, rqc_wq, &rq->wq,
2783 static int mlx5e_alloc_drop_cq(struct mlx5_core_dev *mdev,
2784 struct mlx5e_cq *cq,
2785 struct mlx5e_cq_param *param)
2787 param->wq.buf_numa_node = dev_to_node(&mdev->pdev->dev);
2788 param->wq.db_numa_node = dev_to_node(&mdev->pdev->dev);
2790 return mlx5e_alloc_cq_common(mdev, param, cq);
2793 static int mlx5e_open_drop_rq(struct mlx5_core_dev *mdev,
2794 struct mlx5e_rq *drop_rq)
2796 struct mlx5e_cq_param cq_param = {};
2797 struct mlx5e_rq_param rq_param = {};
2798 struct mlx5e_cq *cq = &drop_rq->cq;
2801 mlx5e_build_drop_rq_param(mdev, &rq_param);
2803 err = mlx5e_alloc_drop_cq(mdev, cq, &cq_param);
2807 err = mlx5e_create_cq(cq, &cq_param);
2811 err = mlx5e_alloc_drop_rq(mdev, drop_rq, &rq_param);
2813 goto err_destroy_cq;
2815 err = mlx5e_create_rq(drop_rq, &rq_param);
2822 mlx5e_free_rq(drop_rq);
2825 mlx5e_destroy_cq(cq);
2833 static void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq)
2835 mlx5e_destroy_rq(drop_rq);
2836 mlx5e_free_rq(drop_rq);
2837 mlx5e_destroy_cq(&drop_rq->cq);
2838 mlx5e_free_cq(&drop_rq->cq);
2841 int mlx5e_create_tis(struct mlx5_core_dev *mdev, int tc,
2842 u32 underlay_qpn, u32 *tisn)
2844 u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {0};
2845 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2847 MLX5_SET(tisc, tisc, prio, tc << 1);
2848 MLX5_SET(tisc, tisc, underlay_qpn, underlay_qpn);
2849 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
2851 if (mlx5_lag_is_lacp_owner(mdev))
2852 MLX5_SET(tisc, tisc, strict_lag_tx_port_affinity, 1);
2854 return mlx5_core_create_tis(mdev, in, sizeof(in), tisn);
2857 void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn)
2859 mlx5_core_destroy_tis(mdev, tisn);
2862 int mlx5e_create_tises(struct mlx5e_priv *priv)
2867 for (tc = 0; tc < priv->profile->max_tc; tc++) {
2868 err = mlx5e_create_tis(priv->mdev, tc, 0, &priv->tisn[tc]);
2870 goto err_close_tises;
2876 for (tc--; tc >= 0; tc--)
2877 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
2882 void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
2886 for (tc = 0; tc < priv->profile->max_tc; tc++)
2887 mlx5e_destroy_tis(priv->mdev, priv->tisn[tc]);
2890 static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv,
2891 enum mlx5e_traffic_types tt,
2894 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2896 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2898 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2899 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
2900 mlx5e_build_indir_tir_ctx_hash(&priv->channels.params, tt, tirc, false);
2903 static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 rqtn, u32 *tirc)
2905 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
2907 mlx5e_build_tir_ctx_lro(&priv->channels.params, tirc);
2909 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2910 MLX5_SET(tirc, tirc, indirect_table, rqtn);
2911 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2914 int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
2916 struct mlx5e_tir *tir;
2924 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2925 in = kvzalloc(inlen, GFP_KERNEL);
2929 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2930 memset(in, 0, inlen);
2931 tir = &priv->indir_tir[tt];
2932 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2933 mlx5e_build_indir_tir_ctx(priv, tt, tirc);
2934 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2936 mlx5_core_warn(priv->mdev, "create indirect tirs failed, %d\n", err);
2937 goto err_destroy_inner_tirs;
2941 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
2944 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++) {
2945 memset(in, 0, inlen);
2946 tir = &priv->inner_indir_tir[i];
2947 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2948 mlx5e_build_inner_indir_tir_ctx(priv, i, tirc);
2949 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2951 mlx5_core_warn(priv->mdev, "create inner indirect tirs failed, %d\n", err);
2952 goto err_destroy_inner_tirs;
2961 err_destroy_inner_tirs:
2962 for (i--; i >= 0; i--)
2963 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
2965 for (tt--; tt >= 0; tt--)
2966 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
2973 int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
2975 int nch = priv->profile->max_nch(priv->mdev);
2976 struct mlx5e_tir *tir;
2983 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2984 in = kvzalloc(inlen, GFP_KERNEL);
2988 for (ix = 0; ix < nch; ix++) {
2989 memset(in, 0, inlen);
2990 tir = &priv->direct_tir[ix];
2991 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2992 mlx5e_build_direct_tir_ctx(priv, priv->direct_tir[ix].rqt.rqtn, tirc);
2993 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
2995 goto err_destroy_ch_tirs;
3002 err_destroy_ch_tirs:
3003 mlx5_core_warn(priv->mdev, "create direct tirs failed, %d\n", err);
3004 for (ix--; ix >= 0; ix--)
3005 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
3012 void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
3016 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3017 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
3019 if (!mlx5e_tunnel_inner_ft_supported(priv->mdev))
3022 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
3023 mlx5e_destroy_tir(priv->mdev, &priv->inner_indir_tir[i]);
3026 void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
3028 int nch = priv->profile->max_nch(priv->mdev);
3031 for (i = 0; i < nch; i++)
3032 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
3035 static int mlx5e_modify_channels_scatter_fcs(struct mlx5e_channels *chs, bool enable)
3040 for (i = 0; i < chs->num; i++) {
3041 err = mlx5e_modify_rq_scatter_fcs(&chs->c[i]->rq, enable);
3049 static int mlx5e_modify_channels_vsd(struct mlx5e_channels *chs, bool vsd)
3054 for (i = 0; i < chs->num; i++) {
3055 err = mlx5e_modify_rq_vsd(&chs->c[i]->rq, vsd);
3063 static int mlx5e_setup_tc_mqprio(struct net_device *netdev,
3064 struct tc_mqprio_qopt *mqprio)
3066 struct mlx5e_priv *priv = netdev_priv(netdev);
3067 struct mlx5e_channels new_channels = {};
3068 u8 tc = mqprio->num_tc;
3071 mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS;
3073 if (tc && tc != MLX5E_MAX_NUM_TC)
3076 mutex_lock(&priv->state_lock);
3078 new_channels.params = priv->channels.params;
3079 new_channels.params.num_tc = tc ? tc : 1;
3081 if (!test_bit(MLX5E_STATE_OPENED, &priv->state)) {
3082 priv->channels.params = new_channels.params;
3086 err = mlx5e_open_channels(priv, &new_channels);
3090 mlx5e_switch_priv_channels(priv, &new_channels, NULL);
3092 mutex_unlock(&priv->state_lock);
3096 #ifdef CONFIG_MLX5_ESWITCH
3097 static int mlx5e_setup_tc_cls_flower(struct net_device *dev,
3098 struct tc_cls_flower_offload *cls_flower)
3100 struct mlx5e_priv *priv = netdev_priv(dev);
3102 if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
3103 cls_flower->common.chain_index)
3106 switch (cls_flower->command) {
3107 case TC_CLSFLOWER_REPLACE:
3108 return mlx5e_configure_flower(priv, cls_flower);
3109 case TC_CLSFLOWER_DESTROY:
3110 return mlx5e_delete_flower(priv, cls_flower);
3111 case TC_CLSFLOWER_STATS:
3112 return mlx5e_stats_flower(priv, cls_flower);
3119 static int mlx5e_setup_tc(struct net_device *dev, enum tc_setup_type type,
3123 #ifdef CONFIG_MLX5_ESWITCH
3124 case TC_SETUP_CLSFLOWER:
3125 return mlx5e_setup_tc_cls_flower(dev, type_data);
3127 case TC_SETUP_MQPRIO:
3128 return mlx5e_setup_tc_mqprio(dev, type_data);
3135 mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
3137 struct mlx5e_priv *priv = netdev_priv(dev);
3138 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
3139 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
3140 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
3142 if (mlx5e_is_uplink_rep(priv)) {
3143 stats->rx_packets = PPORT_802_3_GET(pstats, a_frames_received_ok);
3144 stats->rx_bytes = PPORT_802_3_GET(pstats, a_octets_received_ok);
3145 stats->tx_packets = PPORT_802_3_GET(pstats, a_frames_transmitted_ok);
3146 stats->tx_bytes = PPORT_802_3_GET(pstats, a_octets_transmitted_ok);
3148 stats->rx_packets = sstats->rx_packets;
3149 stats->rx_bytes = sstats->rx_bytes;
3150 stats->tx_packets = sstats->tx_packets;
3151 stats->tx_bytes = sstats->tx_bytes;
3152 stats->tx_dropped = sstats->tx_queue_dropped;
3155 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
3157 stats->rx_length_errors =
3158 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
3159 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
3160 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
3161 stats->rx_crc_errors =
3162 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
3163 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
3164 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
3165 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
3166 stats->rx_frame_errors;
3167 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
3169 /* vport multicast also counts packets that are dropped due to steering
3170 * or rx out of buffer
3173 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
3176 static void mlx5e_set_rx_mode(struct net_device *dev)
3178 struct mlx5e_priv *priv = netdev_priv(dev);
3180 queue_work(priv->wq, &priv->set_rx_mode_work);
3183 static int mlx5e_set_mac(struct net_device *netdev, void *addr)
3185 struct mlx5e_priv *priv = netdev_priv(netdev);
3186 struct sockaddr *saddr = addr;
3188 if (!is_valid_ether_addr(saddr->sa_data))
3189 return -EADDRNOTAVAIL;
3191 netif_addr_lock_bh(netdev);
3192 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
3193 netif_addr_unlock_bh(netdev);
3195 queue_work(priv->wq, &priv->set_rx_mode_work);
3200 #define MLX5E_SET_FEATURE(netdev, feature, enable) \
3203 netdev->features |= feature; \
3205 netdev->features &= ~feature; \
3208 typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
3210 static int set_feature_lro(struct net_device *netdev, bool enable)
3212 struct mlx5e_priv *priv = netdev_priv(netdev);
3213 struct mlx5e_channels new_channels = {};
3217 mutex_lock(&priv->state_lock);
3219 reset = (priv->channels.params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST);
3220 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
3222 new_channels.params = priv->channels.params;
3223 new_channels.params.lro_en = enable;
3226 priv->channels.params = new_channels.params;
3227 err = mlx5e_modify_tirs_lro(priv);
3231 err = mlx5e_open_channels(priv, &new_channels);
3235 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_modify_tirs_lro);
3237 mutex_unlock(&priv->state_lock);
3241 static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
3243 struct mlx5e_priv *priv = netdev_priv(netdev);
3246 mlx5e_enable_vlan_filter(priv);
3248 mlx5e_disable_vlan_filter(priv);
3253 static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
3255 struct mlx5e_priv *priv = netdev_priv(netdev);
3257 if (!enable && mlx5e_tc_num_filters(priv)) {
3259 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
3266 static int set_feature_rx_all(struct net_device *netdev, bool enable)
3268 struct mlx5e_priv *priv = netdev_priv(netdev);
3269 struct mlx5_core_dev *mdev = priv->mdev;
3271 return mlx5_set_port_fcs(mdev, !enable);
3274 static int set_feature_rx_fcs(struct net_device *netdev, bool enable)
3276 struct mlx5e_priv *priv = netdev_priv(netdev);
3279 mutex_lock(&priv->state_lock);
3281 priv->channels.params.scatter_fcs_en = enable;
3282 err = mlx5e_modify_channels_scatter_fcs(&priv->channels, enable);
3284 priv->channels.params.scatter_fcs_en = !enable;
3286 mutex_unlock(&priv->state_lock);
3291 static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
3293 struct mlx5e_priv *priv = netdev_priv(netdev);
3296 mutex_lock(&priv->state_lock);
3298 priv->channels.params.vlan_strip_disable = !enable;
3299 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
3302 err = mlx5e_modify_channels_vsd(&priv->channels, !enable);
3304 priv->channels.params.vlan_strip_disable = enable;
3307 mutex_unlock(&priv->state_lock);
3312 #ifdef CONFIG_RFS_ACCEL
3313 static int set_feature_arfs(struct net_device *netdev, bool enable)
3315 struct mlx5e_priv *priv = netdev_priv(netdev);
3319 err = mlx5e_arfs_enable(priv);
3321 err = mlx5e_arfs_disable(priv);
3327 static int mlx5e_handle_feature(struct net_device *netdev,
3328 netdev_features_t wanted_features,
3329 netdev_features_t feature,
3330 mlx5e_feature_handler feature_handler)
3332 netdev_features_t changes = wanted_features ^ netdev->features;
3333 bool enable = !!(wanted_features & feature);
3336 if (!(changes & feature))
3339 err = feature_handler(netdev, enable);
3341 netdev_err(netdev, "%s feature %pNF failed, err %d\n",
3342 enable ? "Enable" : "Disable", &feature, err);
3346 MLX5E_SET_FEATURE(netdev, feature, enable);
3350 static int mlx5e_set_features(struct net_device *netdev,
3351 netdev_features_t features)
3355 err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
3357 err |= mlx5e_handle_feature(netdev, features,
3358 NETIF_F_HW_VLAN_CTAG_FILTER,
3359 set_feature_vlan_filter);
3360 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
3361 set_feature_tc_num_filters);
3362 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
3363 set_feature_rx_all);
3364 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXFCS,
3365 set_feature_rx_fcs);
3366 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
3367 set_feature_rx_vlan);
3368 #ifdef CONFIG_RFS_ACCEL
3369 err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
3373 return err ? -EINVAL : 0;
3376 static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
3378 struct mlx5e_priv *priv = netdev_priv(netdev);
3379 struct mlx5e_channels new_channels = {};
3384 mutex_lock(&priv->state_lock);
3386 reset = !priv->channels.params.lro_en &&
3387 (priv->channels.params.rq_wq_type !=
3388 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
3390 reset = reset && test_bit(MLX5E_STATE_OPENED, &priv->state);
3392 curr_mtu = netdev->mtu;
3393 netdev->mtu = new_mtu;
3396 mlx5e_set_dev_port_mtu(priv);
3400 new_channels.params = priv->channels.params;
3401 err = mlx5e_open_channels(priv, &new_channels);
3403 netdev->mtu = curr_mtu;
3407 mlx5e_switch_priv_channels(priv, &new_channels, mlx5e_set_dev_port_mtu);
3410 mutex_unlock(&priv->state_lock);
3414 static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
3416 struct mlx5e_priv *priv = netdev_priv(dev);
3420 return mlx5e_hwstamp_set(priv, ifr);
3422 return mlx5e_hwstamp_get(priv, ifr);
3428 #ifdef CONFIG_MLX5_ESWITCH
3429 static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
3431 struct mlx5e_priv *priv = netdev_priv(dev);
3432 struct mlx5_core_dev *mdev = priv->mdev;
3434 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
3437 static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos,
3440 struct mlx5e_priv *priv = netdev_priv(dev);
3441 struct mlx5_core_dev *mdev = priv->mdev;
3443 if (vlan_proto != htons(ETH_P_8021Q))
3444 return -EPROTONOSUPPORT;
3446 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
3450 static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
3452 struct mlx5e_priv *priv = netdev_priv(dev);
3453 struct mlx5_core_dev *mdev = priv->mdev;
3455 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
3458 static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
3460 struct mlx5e_priv *priv = netdev_priv(dev);
3461 struct mlx5_core_dev *mdev = priv->mdev;
3463 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
3466 static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
3469 struct mlx5e_priv *priv = netdev_priv(dev);
3470 struct mlx5_core_dev *mdev = priv->mdev;
3472 return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
3473 max_tx_rate, min_tx_rate);
3476 static int mlx5_vport_link2ifla(u8 esw_link)
3479 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
3480 return IFLA_VF_LINK_STATE_DISABLE;
3481 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
3482 return IFLA_VF_LINK_STATE_ENABLE;
3484 return IFLA_VF_LINK_STATE_AUTO;
3487 static int mlx5_ifla_link2vport(u8 ifla_link)
3489 switch (ifla_link) {
3490 case IFLA_VF_LINK_STATE_DISABLE:
3491 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
3492 case IFLA_VF_LINK_STATE_ENABLE:
3493 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
3495 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
3498 static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
3501 struct mlx5e_priv *priv = netdev_priv(dev);
3502 struct mlx5_core_dev *mdev = priv->mdev;
3504 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
3505 mlx5_ifla_link2vport(link_state));
3508 static int mlx5e_get_vf_config(struct net_device *dev,
3509 int vf, struct ifla_vf_info *ivi)
3511 struct mlx5e_priv *priv = netdev_priv(dev);
3512 struct mlx5_core_dev *mdev = priv->mdev;
3515 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
3518 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
3522 static int mlx5e_get_vf_stats(struct net_device *dev,
3523 int vf, struct ifla_vf_stats *vf_stats)
3525 struct mlx5e_priv *priv = netdev_priv(dev);
3526 struct mlx5_core_dev *mdev = priv->mdev;
3528 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
3533 static void mlx5e_add_vxlan_port(struct net_device *netdev,
3534 struct udp_tunnel_info *ti)
3536 struct mlx5e_priv *priv = netdev_priv(netdev);
3538 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3541 if (!mlx5e_vxlan_allowed(priv->mdev))
3544 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
3547 static void mlx5e_del_vxlan_port(struct net_device *netdev,
3548 struct udp_tunnel_info *ti)
3550 struct mlx5e_priv *priv = netdev_priv(netdev);
3552 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3555 if (!mlx5e_vxlan_allowed(priv->mdev))
3558 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
3561 static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
3562 struct sk_buff *skb,
3563 netdev_features_t features)
3565 unsigned int offset = 0;
3566 struct udphdr *udph;
3570 switch (vlan_get_protocol(skb)) {
3571 case htons(ETH_P_IP):
3572 proto = ip_hdr(skb)->protocol;
3574 case htons(ETH_P_IPV6):
3575 proto = ipv6_find_hdr(skb, &offset, -1, NULL, NULL);
3585 udph = udp_hdr(skb);
3586 port = be16_to_cpu(udph->dest);
3588 /* Verify if UDP port is being offloaded by HW */
3589 if (mlx5e_vxlan_lookup_port(priv, port))
3594 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
3595 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3598 static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
3599 struct net_device *netdev,
3600 netdev_features_t features)
3602 struct mlx5e_priv *priv = netdev_priv(netdev);
3604 features = vlan_features_check(skb, features);
3605 features = vxlan_features_check(skb, features);
3607 #ifdef CONFIG_MLX5_EN_IPSEC
3608 if (mlx5e_ipsec_feature_check(skb, netdev, features))
3612 /* Validate if the tunneled packet is being offloaded by HW */
3613 if (skb->encapsulation &&
3614 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
3615 return mlx5e_tunnel_features_check(priv, skb, features);
3620 static void mlx5e_tx_timeout(struct net_device *dev)
3622 struct mlx5e_priv *priv = netdev_priv(dev);
3623 bool sched_work = false;
3626 netdev_err(dev, "TX timeout detected\n");
3628 for (i = 0; i < priv->channels.num * priv->channels.params.num_tc; i++) {
3629 struct mlx5e_txqsq *sq = priv->txq2sq[i];
3631 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
3634 clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state);
3635 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
3636 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
3639 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
3640 schedule_work(&priv->tx_timeout_work);
3643 static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
3645 struct mlx5e_priv *priv = netdev_priv(netdev);
3646 struct bpf_prog *old_prog;
3648 bool reset, was_opened;
3651 mutex_lock(&priv->state_lock);
3653 if ((netdev->features & NETIF_F_LRO) && prog) {
3654 netdev_warn(netdev, "can't set XDP while LRO is on, disable LRO first\n");
3659 if ((netdev->features & NETIF_F_HW_ESP) && prog) {
3660 netdev_warn(netdev, "can't set XDP with IPSec offload\n");
3665 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
3666 /* no need for full reset when exchanging programs */
3667 reset = (!priv->channels.params.xdp_prog || !prog);
3669 if (was_opened && reset)
3670 mlx5e_close_locked(netdev);
3671 if (was_opened && !reset) {
3672 /* num_channels is invariant here, so we can take the
3673 * batched reference right upfront.
3675 prog = bpf_prog_add(prog, priv->channels.num);
3677 err = PTR_ERR(prog);
3682 /* exchange programs, extra prog reference we got from caller
3683 * as long as we don't fail from this point onwards.
3685 old_prog = xchg(&priv->channels.params.xdp_prog, prog);
3687 bpf_prog_put(old_prog);
3689 if (reset) /* change RQ type according to priv->xdp_prog */
3690 mlx5e_set_rq_params(priv->mdev, &priv->channels.params);
3692 if (was_opened && reset)
3693 mlx5e_open_locked(netdev);
3695 if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset)
3698 /* exchanging programs w/o reset, we update ref counts on behalf
3699 * of the channels RQs here.
3701 for (i = 0; i < priv->channels.num; i++) {
3702 struct mlx5e_channel *c = priv->channels.c[i];
3704 clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
3705 napi_synchronize(&c->napi);
3706 /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */
3708 old_prog = xchg(&c->rq.xdp_prog, prog);
3710 set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state);
3711 /* napi_schedule in case we have missed anything */
3712 napi_schedule(&c->napi);
3715 bpf_prog_put(old_prog);
3719 mutex_unlock(&priv->state_lock);
3723 static u32 mlx5e_xdp_query(struct net_device *dev)
3725 struct mlx5e_priv *priv = netdev_priv(dev);
3726 const struct bpf_prog *xdp_prog;
3729 mutex_lock(&priv->state_lock);
3730 xdp_prog = priv->channels.params.xdp_prog;
3732 prog_id = xdp_prog->aux->id;
3733 mutex_unlock(&priv->state_lock);
3738 static int mlx5e_xdp(struct net_device *dev, struct netdev_xdp *xdp)
3740 switch (xdp->command) {
3741 case XDP_SETUP_PROG:
3742 return mlx5e_xdp_set(dev, xdp->prog);
3743 case XDP_QUERY_PROG:
3744 xdp->prog_id = mlx5e_xdp_query(dev);
3745 xdp->prog_attached = !!xdp->prog_id;
3752 #ifdef CONFIG_NET_POLL_CONTROLLER
3753 /* Fake "interrupt" called by netpoll (eg netconsole) to send skbs without
3754 * reenabling interrupts.
3756 static void mlx5e_netpoll(struct net_device *dev)
3758 struct mlx5e_priv *priv = netdev_priv(dev);
3759 struct mlx5e_channels *chs = &priv->channels;
3763 for (i = 0; i < chs->num; i++)
3764 napi_schedule(&chs->c[i]->napi);
3768 static const struct net_device_ops mlx5e_netdev_ops = {
3769 .ndo_open = mlx5e_open,
3770 .ndo_stop = mlx5e_close,
3771 .ndo_start_xmit = mlx5e_xmit,
3772 .ndo_setup_tc = mlx5e_setup_tc,
3773 .ndo_select_queue = mlx5e_select_queue,
3774 .ndo_get_stats64 = mlx5e_get_stats,
3775 .ndo_set_rx_mode = mlx5e_set_rx_mode,
3776 .ndo_set_mac_address = mlx5e_set_mac,
3777 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
3778 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
3779 .ndo_set_features = mlx5e_set_features,
3780 .ndo_change_mtu = mlx5e_change_mtu,
3781 .ndo_do_ioctl = mlx5e_ioctl,
3782 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
3783 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
3784 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
3785 .ndo_features_check = mlx5e_features_check,
3786 #ifdef CONFIG_RFS_ACCEL
3787 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
3789 .ndo_tx_timeout = mlx5e_tx_timeout,
3790 .ndo_xdp = mlx5e_xdp,
3791 #ifdef CONFIG_NET_POLL_CONTROLLER
3792 .ndo_poll_controller = mlx5e_netpoll,
3794 #ifdef CONFIG_MLX5_ESWITCH
3795 /* SRIOV E-Switch NDOs */
3796 .ndo_set_vf_mac = mlx5e_set_vf_mac,
3797 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
3798 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
3799 .ndo_set_vf_trust = mlx5e_set_vf_trust,
3800 .ndo_set_vf_rate = mlx5e_set_vf_rate,
3801 .ndo_get_vf_config = mlx5e_get_vf_config,
3802 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
3803 .ndo_get_vf_stats = mlx5e_get_vf_stats,
3804 .ndo_has_offload_stats = mlx5e_has_offload_stats,
3805 .ndo_get_offload_stats = mlx5e_get_offload_stats,
3809 static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
3811 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
3813 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
3814 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
3815 !MLX5_CAP_ETH(mdev, csum_cap) ||
3816 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
3817 !MLX5_CAP_ETH(mdev, vlan_cap) ||
3818 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
3819 MLX5_CAP_FLOWTABLE(mdev,
3820 flow_table_properties_nic_receive.max_ft_level)
3822 mlx5_core_warn(mdev,
3823 "Not creating net device, some required device capabilities are missing\n");
3826 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
3827 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
3828 if (!MLX5_CAP_GEN(mdev, cq_moderation))
3829 mlx5_core_warn(mdev, "CQ moderation is not supported\n");
3834 u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
3836 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
3838 return bf_buf_size -
3839 sizeof(struct mlx5e_tx_wqe) +
3840 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
3843 void mlx5e_build_default_indir_rqt(u32 *indirection_rqt, int len,
3848 for (i = 0; i < len; i++)
3849 indirection_rqt[i] = i % num_channels;
3852 static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
3854 enum pcie_link_width width;
3855 enum pci_bus_speed speed;
3858 err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
3862 if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
3866 case PCIE_SPEED_2_5GT:
3867 *pci_bw = 2500 * width;
3869 case PCIE_SPEED_5_0GT:
3870 *pci_bw = 5000 * width;
3872 case PCIE_SPEED_8_0GT:
3873 *pci_bw = 8000 * width;
3882 static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
3884 return (link_speed && pci_bw &&
3885 (pci_bw < 40000) && (pci_bw < link_speed));
3888 static bool hw_lro_heuristic(u32 link_speed, u32 pci_bw)
3890 return !(link_speed && pci_bw &&
3891 (pci_bw <= 16000) && (pci_bw < link_speed));
3894 void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
3896 params->rx_cq_period_mode = cq_period_mode;
3898 params->rx_cq_moderation.pkts =
3899 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
3900 params->rx_cq_moderation.usec =
3901 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
3903 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
3904 params->rx_cq_moderation.usec =
3905 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
3907 if (params->rx_am_enabled)
3908 params->rx_cq_moderation =
3909 mlx5e_am_get_def_profile(params->rx_cq_period_mode);
3911 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_BASED_MODER,
3912 params->rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
3915 u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
3919 /* The supported periods are organized in ascending order */
3920 for (i = 0; i < MLX5E_LRO_TIMEOUT_ARR_SIZE - 1; i++)
3921 if (MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]) >= wanted_timeout)
3924 return MLX5_CAP_ETH(mdev, lro_timer_supported_periods[i]);
3927 void mlx5e_build_nic_params(struct mlx5_core_dev *mdev,
3928 struct mlx5e_params *params,
3931 u8 cq_period_mode = 0;
3935 params->num_channels = max_channels;
3938 mlx5e_get_max_linkspeed(mdev, &link_speed);
3939 mlx5e_get_pci_bw(mdev, &pci_bw);
3940 mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
3941 link_speed, pci_bw);
3944 params->log_sq_size = is_kdump_kernel() ?
3945 MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
3946 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
3948 /* set CQE compression */
3949 params->rx_cqe_compress_def = false;
3950 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
3951 MLX5_CAP_GEN(mdev, vport_group_manager))
3952 params->rx_cqe_compress_def = cqe_compress_heuristic(link_speed, pci_bw);
3954 MLX5E_SET_PFLAG(params, MLX5E_PFLAG_RX_CQE_COMPRESS, params->rx_cqe_compress_def);
3957 mlx5e_set_rq_params(mdev, params);
3961 /* TODO: && MLX5_CAP_ETH(mdev, lro_cap) */
3962 if (params->rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
3963 params->lro_en = hw_lro_heuristic(link_speed, pci_bw);
3964 params->lro_timeout = mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
3966 /* CQ moderation params */
3967 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
3968 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
3969 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
3970 params->rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
3971 mlx5e_set_rx_cq_mode_params(params, cq_period_mode);
3973 params->tx_cq_moderation.usec = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
3974 params->tx_cq_moderation.pkts = MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
3977 params->tx_max_inline = mlx5e_get_max_inline_cap(mdev);
3978 mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode);
3979 if (params->tx_min_inline_mode == MLX5_INLINE_MODE_NONE &&
3980 !MLX5_CAP_ETH(mdev, wqe_vlan_insert))
3981 params->tx_min_inline_mode = MLX5_INLINE_MODE_L2;
3984 params->rss_hfunc = ETH_RSS_HASH_XOR;
3985 netdev_rss_key_fill(params->toeplitz_hash_key, sizeof(params->toeplitz_hash_key));
3986 mlx5e_build_default_indir_rqt(params->indirection_rqt,
3987 MLX5E_INDIR_RQT_SIZE, max_channels);
3990 static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
3991 struct net_device *netdev,
3992 const struct mlx5e_profile *profile,
3995 struct mlx5e_priv *priv = netdev_priv(netdev);
3998 priv->netdev = netdev;
3999 priv->profile = profile;
4000 priv->ppriv = ppriv;
4001 priv->hard_mtu = MLX5E_ETH_HARD_MTU;
4003 mlx5e_build_nic_params(mdev, &priv->channels.params, profile->max_nch(mdev));
4005 mutex_init(&priv->state_lock);
4007 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
4008 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
4009 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
4010 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
4013 static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
4015 struct mlx5e_priv *priv = netdev_priv(netdev);
4017 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
4018 if (is_zero_ether_addr(netdev->dev_addr) &&
4019 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
4020 eth_hw_addr_random(netdev);
4021 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
4025 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4026 static const struct switchdev_ops mlx5e_switchdev_ops = {
4027 .switchdev_port_attr_get = mlx5e_attr_get,
4031 static void mlx5e_build_nic_netdev(struct net_device *netdev)
4033 struct mlx5e_priv *priv = netdev_priv(netdev);
4034 struct mlx5_core_dev *mdev = priv->mdev;
4038 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
4040 netdev->netdev_ops = &mlx5e_netdev_ops;
4042 #ifdef CONFIG_MLX5_CORE_EN_DCB
4043 if (MLX5_CAP_GEN(mdev, vport_group_manager) && MLX5_CAP_GEN(mdev, qos))
4044 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
4047 netdev->watchdog_timeo = 15 * HZ;
4049 netdev->ethtool_ops = &mlx5e_ethtool_ops;
4051 netdev->vlan_features |= NETIF_F_SG;
4052 netdev->vlan_features |= NETIF_F_IP_CSUM;
4053 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
4054 netdev->vlan_features |= NETIF_F_GRO;
4055 netdev->vlan_features |= NETIF_F_TSO;
4056 netdev->vlan_features |= NETIF_F_TSO6;
4057 netdev->vlan_features |= NETIF_F_RXCSUM;
4058 netdev->vlan_features |= NETIF_F_RXHASH;
4060 if (!!MLX5_CAP_ETH(mdev, lro_cap))
4061 netdev->vlan_features |= NETIF_F_LRO;
4063 netdev->hw_features = netdev->vlan_features;
4064 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
4065 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
4066 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
4068 if (mlx5e_vxlan_allowed(mdev) || MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4069 netdev->hw_features |= NETIF_F_GSO_PARTIAL;
4070 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
4071 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
4072 netdev->hw_enc_features |= NETIF_F_TSO;
4073 netdev->hw_enc_features |= NETIF_F_TSO6;
4074 netdev->hw_enc_features |= NETIF_F_GSO_PARTIAL;
4077 if (mlx5e_vxlan_allowed(mdev)) {
4078 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
4079 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4080 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL |
4081 NETIF_F_GSO_UDP_TUNNEL_CSUM;
4082 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
4085 if (MLX5_CAP_ETH(mdev, tunnel_stateless_gre)) {
4086 netdev->hw_features |= NETIF_F_GSO_GRE |
4087 NETIF_F_GSO_GRE_CSUM;
4088 netdev->hw_enc_features |= NETIF_F_GSO_GRE |
4089 NETIF_F_GSO_GRE_CSUM;
4090 netdev->gso_partial_features |= NETIF_F_GSO_GRE |
4091 NETIF_F_GSO_GRE_CSUM;
4094 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
4097 netdev->hw_features |= NETIF_F_RXALL;
4099 if (MLX5_CAP_ETH(mdev, scatter_fcs))
4100 netdev->hw_features |= NETIF_F_RXFCS;
4102 netdev->features = netdev->hw_features;
4103 if (!priv->channels.params.lro_en)
4104 netdev->features &= ~NETIF_F_LRO;
4107 netdev->features &= ~NETIF_F_RXALL;
4109 if (!priv->channels.params.scatter_fcs_en)
4110 netdev->features &= ~NETIF_F_RXFCS;
4112 #define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
4113 if (FT_CAP(flow_modify_en) &&
4114 FT_CAP(modify_root) &&
4115 FT_CAP(identified_miss_table_mode) &&
4116 FT_CAP(flow_table_modify)) {
4117 netdev->hw_features |= NETIF_F_HW_TC;
4118 #ifdef CONFIG_RFS_ACCEL
4119 netdev->hw_features |= NETIF_F_NTUPLE;
4123 netdev->features |= NETIF_F_HIGHDMA;
4125 netdev->priv_flags |= IFF_UNICAST_FLT;
4127 mlx5e_set_netdev_dev_addr(netdev);
4129 #if IS_ENABLED(CONFIG_MLX5_ESWITCH)
4130 if (MLX5_ESWITCH_MANAGER(mdev))
4131 netdev->switchdev_ops = &mlx5e_switchdev_ops;
4134 mlx5e_ipsec_build_netdev(priv);
4137 static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
4139 struct mlx5_core_dev *mdev = priv->mdev;
4142 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
4144 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
4145 priv->q_counter = 0;
4149 static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
4151 if (!priv->q_counter)
4154 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
4157 static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
4158 struct net_device *netdev,
4159 const struct mlx5e_profile *profile,
4162 struct mlx5e_priv *priv = netdev_priv(netdev);
4165 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
4166 err = mlx5e_ipsec_init(priv);
4168 mlx5_core_err(mdev, "IPSec initialization failed, %d\n", err);
4169 mlx5e_build_nic_netdev(netdev);
4170 mlx5e_vxlan_init(priv);
4173 static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
4175 mlx5e_ipsec_cleanup(priv);
4176 mlx5e_vxlan_cleanup(priv);
4178 if (priv->channels.params.xdp_prog)
4179 bpf_prog_put(priv->channels.params.xdp_prog);
4182 static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
4184 struct mlx5_core_dev *mdev = priv->mdev;
4187 err = mlx5e_create_indirect_rqt(priv);
4191 err = mlx5e_create_direct_rqts(priv);
4193 goto err_destroy_indirect_rqts;
4195 err = mlx5e_create_indirect_tirs(priv);
4197 goto err_destroy_direct_rqts;
4199 err = mlx5e_create_direct_tirs(priv);
4201 goto err_destroy_indirect_tirs;
4203 err = mlx5e_create_flow_steering(priv);
4205 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
4206 goto err_destroy_direct_tirs;
4209 err = mlx5e_tc_init(priv);
4211 goto err_destroy_flow_steering;
4215 err_destroy_flow_steering:
4216 mlx5e_destroy_flow_steering(priv);
4217 err_destroy_direct_tirs:
4218 mlx5e_destroy_direct_tirs(priv);
4219 err_destroy_indirect_tirs:
4220 mlx5e_destroy_indirect_tirs(priv);
4221 err_destroy_direct_rqts:
4222 mlx5e_destroy_direct_rqts(priv);
4223 err_destroy_indirect_rqts:
4224 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4228 static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
4230 mlx5e_tc_cleanup(priv);
4231 mlx5e_destroy_flow_steering(priv);
4232 mlx5e_destroy_direct_tirs(priv);
4233 mlx5e_destroy_indirect_tirs(priv);
4234 mlx5e_destroy_direct_rqts(priv);
4235 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
4238 static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
4242 err = mlx5e_create_tises(priv);
4244 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
4248 #ifdef CONFIG_MLX5_CORE_EN_DCB
4249 mlx5e_dcbnl_initialize(priv);
4254 static void mlx5e_nic_enable(struct mlx5e_priv *priv)
4256 struct net_device *netdev = priv->netdev;
4257 struct mlx5_core_dev *mdev = priv->mdev;
4260 mlx5e_init_l2_addr(priv);
4262 /* Marking the link as currently not needed by the Driver */
4263 if (!netif_running(netdev))
4264 mlx5_set_port_admin_status(mdev, MLX5_PORT_DOWN);
4266 /* MTU range: 68 - hw-specific max */
4267 netdev->min_mtu = ETH_MIN_MTU;
4268 mlx5_query_port_max_mtu(priv->mdev, &max_mtu, 1);
4269 netdev->max_mtu = MLX5E_HW2SW_MTU(priv, max_mtu);
4270 mlx5e_set_dev_port_mtu(priv);
4272 mlx5_lag_add(mdev, netdev);
4274 mlx5e_enable_async_events(priv);
4276 if (MLX5_ESWITCH_MANAGER(priv->mdev))
4277 mlx5e_register_vport_reps(priv);
4279 if (netdev->reg_state != NETREG_REGISTERED)
4282 queue_work(priv->wq, &priv->set_rx_mode_work);
4285 if (netif_running(netdev))
4287 netif_device_attach(netdev);
4291 static void mlx5e_nic_disable(struct mlx5e_priv *priv)
4293 struct mlx5_core_dev *mdev = priv->mdev;
4296 if (netif_running(priv->netdev))
4297 mlx5e_close(priv->netdev);
4298 netif_device_detach(priv->netdev);
4301 queue_work(priv->wq, &priv->set_rx_mode_work);
4303 if (MLX5_ESWITCH_MANAGER(priv->mdev))
4304 mlx5e_unregister_vport_reps(priv);
4306 mlx5e_disable_async_events(priv);
4307 mlx5_lag_remove(mdev);
4310 static const struct mlx5e_profile mlx5e_nic_profile = {
4311 .init = mlx5e_nic_init,
4312 .cleanup = mlx5e_nic_cleanup,
4313 .init_rx = mlx5e_init_nic_rx,
4314 .cleanup_rx = mlx5e_cleanup_nic_rx,
4315 .init_tx = mlx5e_init_nic_tx,
4316 .cleanup_tx = mlx5e_cleanup_nic_tx,
4317 .enable = mlx5e_nic_enable,
4318 .disable = mlx5e_nic_disable,
4319 .update_stats = mlx5e_update_ndo_stats,
4320 .max_nch = mlx5e_get_max_num_channels,
4321 .update_carrier = mlx5e_update_carrier,
4322 .rx_handlers.handle_rx_cqe = mlx5e_handle_rx_cqe,
4323 .rx_handlers.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
4324 .max_tc = MLX5E_MAX_NUM_TC,
4327 /* mlx5e generic netdev management API (move to en_common.c) */
4329 struct net_device *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
4330 const struct mlx5e_profile *profile,
4333 int nch = profile->max_nch(mdev);
4334 struct net_device *netdev;
4335 struct mlx5e_priv *priv;
4337 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
4338 nch * profile->max_tc,
4341 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
4345 #ifdef CONFIG_RFS_ACCEL
4346 netdev->rx_cpu_rmap = mdev->rmap;
4349 profile->init(mdev, netdev, profile, ppriv);
4351 netif_carrier_off(netdev);
4353 priv = netdev_priv(netdev);
4355 priv->wq = create_singlethread_workqueue("mlx5e");
4357 goto err_cleanup_nic;
4362 if (profile->cleanup)
4363 profile->cleanup(priv);
4364 free_netdev(netdev);
4369 int mlx5e_attach_netdev(struct mlx5e_priv *priv)
4371 struct mlx5_core_dev *mdev = priv->mdev;
4372 const struct mlx5e_profile *profile;
4375 profile = priv->profile;
4376 clear_bit(MLX5E_STATE_DESTROYING, &priv->state);
4378 err = profile->init_tx(priv);
4382 err = mlx5e_open_drop_rq(mdev, &priv->drop_rq);
4384 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
4385 goto err_cleanup_tx;
4388 err = profile->init_rx(priv);
4390 goto err_close_drop_rq;
4392 mlx5e_create_q_counter(priv);
4394 if (profile->enable)
4395 profile->enable(priv);
4400 mlx5e_close_drop_rq(&priv->drop_rq);
4403 profile->cleanup_tx(priv);
4409 void mlx5e_detach_netdev(struct mlx5e_priv *priv)
4411 const struct mlx5e_profile *profile = priv->profile;
4413 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
4415 if (profile->disable)
4416 profile->disable(priv);
4417 flush_workqueue(priv->wq);
4419 mlx5e_destroy_q_counter(priv);
4420 profile->cleanup_rx(priv);
4421 mlx5e_close_drop_rq(&priv->drop_rq);
4422 profile->cleanup_tx(priv);
4423 cancel_delayed_work_sync(&priv->update_stats_work);
4426 void mlx5e_destroy_netdev(struct mlx5e_priv *priv)
4428 const struct mlx5e_profile *profile = priv->profile;
4429 struct net_device *netdev = priv->netdev;
4431 destroy_workqueue(priv->wq);
4432 if (profile->cleanup)
4433 profile->cleanup(priv);
4434 free_netdev(netdev);
4437 /* mlx5e_attach and mlx5e_detach scope should be only creating/destroying
4438 * hardware contexts and to connect it to the current netdev.
4440 static int mlx5e_attach(struct mlx5_core_dev *mdev, void *vpriv)
4442 struct mlx5e_priv *priv = vpriv;
4443 struct net_device *netdev = priv->netdev;
4446 if (netif_device_present(netdev))
4449 err = mlx5e_create_mdev_resources(mdev);
4453 err = mlx5e_attach_netdev(priv);
4455 mlx5e_destroy_mdev_resources(mdev);
4462 static void mlx5e_detach(struct mlx5_core_dev *mdev, void *vpriv)
4464 struct mlx5e_priv *priv = vpriv;
4465 struct net_device *netdev = priv->netdev;
4467 if (!netif_device_present(netdev))
4470 mlx5e_detach_netdev(priv);
4471 mlx5e_destroy_mdev_resources(mdev);
4474 static void *mlx5e_add(struct mlx5_core_dev *mdev)
4476 struct net_device *netdev;
4481 err = mlx5e_check_required_hca_cap(mdev);
4485 #ifdef CONFIG_MLX5_ESWITCH
4486 if (MLX5_ESWITCH_MANAGER(mdev)) {
4487 rpriv = mlx5e_alloc_nic_rep_priv(mdev);
4489 mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
4495 netdev = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, rpriv);
4497 mlx5_core_err(mdev, "mlx5e_create_netdev failed\n");
4498 goto err_free_rpriv;
4501 priv = netdev_priv(netdev);
4503 err = mlx5e_attach(mdev, priv);
4505 mlx5_core_err(mdev, "mlx5e_attach failed, %d\n", err);
4506 goto err_destroy_netdev;
4509 err = register_netdev(netdev);
4511 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
4518 mlx5e_detach(mdev, priv);
4520 mlx5e_destroy_netdev(priv);
4526 static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
4528 struct mlx5e_priv *priv = vpriv;
4529 void *ppriv = priv->ppriv;
4531 unregister_netdev(priv->netdev);
4532 mlx5e_detach(mdev, vpriv);
4533 mlx5e_destroy_netdev(priv);
4537 static void *mlx5e_get_netdev(void *vpriv)
4539 struct mlx5e_priv *priv = vpriv;
4541 return priv->netdev;
4544 static struct mlx5_interface mlx5e_interface = {
4546 .remove = mlx5e_remove,
4547 .attach = mlx5e_attach,
4548 .detach = mlx5e_detach,
4549 .event = mlx5e_async_event,
4550 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
4551 .get_dev = mlx5e_get_netdev,
4554 void mlx5e_init(void)
4556 mlx5e_ipsec_build_inverse_table();
4557 mlx5e_build_ptys2ethtool_map();
4558 mlx5_register_interface(&mlx5e_interface);
4561 void mlx5e_cleanup(void)
4563 mlx5_unregister_interface(&mlx5e_interface);