1 // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
3 * Copyright(c) 2020 Intel Corporation.
8 * This file contains HFI1 support for netdev RX functionality
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <rdma/ib_verbs.h>
20 static int hfi1_netdev_setup_ctxt(struct hfi1_netdev_priv *priv,
21 struct hfi1_ctxtdata *uctxt)
23 unsigned int rcvctrl_ops;
24 struct hfi1_devdata *dd = priv->dd;
27 uctxt->rhf_rcv_function_map = netdev_rhf_rcv_functions;
28 uctxt->do_interrupt = &handle_receive_interrupt_napi_sp;
30 /* Now allocate the RcvHdr queue and eager buffers. */
31 ret = hfi1_create_rcvhdrq(dd, uctxt);
35 ret = hfi1_setup_eagerbufs(uctxt);
39 clear_rcvhdrtail(uctxt);
41 rcvctrl_ops = HFI1_RCVCTRL_CTXT_DIS;
42 rcvctrl_ops |= HFI1_RCVCTRL_INTRAVAIL_DIS;
44 if (!HFI1_CAP_KGET_MASK(uctxt->flags, MULTI_PKT_EGR))
45 rcvctrl_ops |= HFI1_RCVCTRL_ONE_PKT_EGR_ENB;
46 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_EGR_FULL))
47 rcvctrl_ops |= HFI1_RCVCTRL_NO_EGR_DROP_ENB;
48 if (HFI1_CAP_KGET_MASK(uctxt->flags, NODROP_RHQ_FULL))
49 rcvctrl_ops |= HFI1_RCVCTRL_NO_RHQ_DROP_ENB;
50 if (HFI1_CAP_KGET_MASK(uctxt->flags, DMA_RTAIL))
51 rcvctrl_ops |= HFI1_RCVCTRL_TAILUPD_ENB;
53 hfi1_rcvctrl(uctxt->dd, rcvctrl_ops, uctxt);
58 static int hfi1_netdev_allocate_ctxt(struct hfi1_devdata *dd,
59 struct hfi1_ctxtdata **ctxt)
61 struct hfi1_ctxtdata *uctxt;
64 if (dd->flags & HFI1_FROZEN)
67 ret = hfi1_create_ctxtdata(dd->pport, dd->node, &uctxt);
69 dd_dev_err(dd, "Unable to create ctxtdata, failing open\n");
73 uctxt->flags = HFI1_CAP_KGET(MULTI_PKT_EGR) |
74 HFI1_CAP_KGET(NODROP_RHQ_FULL) |
75 HFI1_CAP_KGET(NODROP_EGR_FULL) |
76 HFI1_CAP_KGET(DMA_RTAIL);
77 /* Netdev contexts are always NO_RDMA_RTAIL */
78 uctxt->fast_handler = handle_receive_interrupt_napi_fp;
79 uctxt->slow_handler = handle_receive_interrupt_napi_sp;
80 hfi1_set_seq_cnt(uctxt, 1);
81 uctxt->is_vnic = true;
83 hfi1_stats.sps_ctxts++;
85 dd_dev_info(dd, "created netdev context %d\n", uctxt->ctxt);
91 static void hfi1_netdev_deallocate_ctxt(struct hfi1_devdata *dd,
92 struct hfi1_ctxtdata *uctxt)
97 * Disable receive context and interrupt available, reset all
98 * RcvCtxtCtrl bits to default values.
100 hfi1_rcvctrl(dd, HFI1_RCVCTRL_CTXT_DIS |
101 HFI1_RCVCTRL_TIDFLOW_DIS |
102 HFI1_RCVCTRL_INTRAVAIL_DIS |
103 HFI1_RCVCTRL_ONE_PKT_EGR_DIS |
104 HFI1_RCVCTRL_NO_RHQ_DROP_DIS |
105 HFI1_RCVCTRL_NO_EGR_DROP_DIS, uctxt);
107 if (uctxt->msix_intr != CCE_NUM_MSIX_VECTORS)
108 msix_free_irq(dd, uctxt->msix_intr);
110 uctxt->msix_intr = CCE_NUM_MSIX_VECTORS;
111 uctxt->event_flags = 0;
113 hfi1_clear_tids(uctxt);
114 hfi1_clear_ctxt_pkey(dd, uctxt);
116 hfi1_stats.sps_ctxts--;
118 hfi1_free_ctxt(uctxt);
121 static int hfi1_netdev_allot_ctxt(struct hfi1_netdev_priv *priv,
122 struct hfi1_ctxtdata **ctxt)
125 struct hfi1_devdata *dd = priv->dd;
127 rc = hfi1_netdev_allocate_ctxt(dd, ctxt);
129 dd_dev_err(dd, "netdev ctxt alloc failed %d\n", rc);
133 rc = hfi1_netdev_setup_ctxt(priv, *ctxt);
135 dd_dev_err(dd, "netdev ctxt setup failed %d\n", rc);
136 hfi1_netdev_deallocate_ctxt(dd, *ctxt);
144 * hfi1_num_netdev_contexts - Count of netdev recv contexts to use.
145 * @dd: device on which to allocate netdev contexts
146 * @available_contexts: count of available receive contexts
147 * @cpu_mask: mask of possible cpus to include for contexts
149 * Return: count of physical cores on a node or the remaining available recv
150 * contexts for netdev recv context usage up to the maximum of
151 * HFI1_MAX_NETDEV_CTXTS.
152 * A value of 0 can be returned when acceleration is explicitly turned off,
153 * a memory allocation error occurs or when there are no available contexts.
156 u32 hfi1_num_netdev_contexts(struct hfi1_devdata *dd, u32 available_contexts,
157 struct cpumask *cpu_mask)
159 cpumask_var_t node_cpu_mask;
160 unsigned int available_cpus;
162 if (!HFI1_CAP_IS_KSET(AIP))
165 /* Always give user contexts priority over netdev contexts */
166 if (available_contexts == 0) {
167 dd_dev_info(dd, "No receive contexts available for netdevs.\n");
171 if (!zalloc_cpumask_var(&node_cpu_mask, GFP_KERNEL)) {
172 dd_dev_err(dd, "Unable to allocate cpu_mask for netdevs.\n");
176 cpumask_and(node_cpu_mask, cpu_mask, cpumask_of_node(dd->node));
178 available_cpus = cpumask_weight(node_cpu_mask);
180 free_cpumask_var(node_cpu_mask);
182 return min3(available_cpus, available_contexts,
183 (u32)HFI1_MAX_NETDEV_CTXTS);
186 static int hfi1_netdev_rxq_init(struct net_device *dev)
190 struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
191 struct hfi1_devdata *dd = priv->dd;
193 priv->num_rx_q = dd->num_netdev_contexts;
194 priv->rxq = kcalloc_node(priv->num_rx_q, sizeof(struct hfi1_netdev_rxq),
195 GFP_KERNEL, dd->node);
198 dd_dev_err(dd, "Unable to allocate netdev queue data\n");
202 for (i = 0; i < priv->num_rx_q; i++) {
203 struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
205 rc = hfi1_netdev_allot_ctxt(priv, &rxq->rcd);
207 goto bail_context_irq_failure;
209 hfi1_rcd_get(rxq->rcd);
211 rxq->rcd->napi = &rxq->napi;
212 dd_dev_info(dd, "Setting rcv queue %d napi to context %d\n",
215 * Disable BUSY_POLL on this NAPI as this is not supported
218 set_bit(NAPI_STATE_NO_BUSY_POLL, &rxq->napi.state);
219 netif_napi_add(dev, &rxq->napi, hfi1_netdev_rx_napi, 64);
220 rc = msix_netdev_request_rcd_irq(rxq->rcd);
222 goto bail_context_irq_failure;
227 bail_context_irq_failure:
228 dd_dev_err(dd, "Unable to allot receive context\n");
229 for (; i >= 0; i--) {
230 struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
233 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
234 hfi1_rcd_put(rxq->rcd);
244 static void hfi1_netdev_rxq_deinit(struct net_device *dev)
247 struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dev);
248 struct hfi1_devdata *dd = priv->dd;
250 for (i = 0; i < priv->num_rx_q; i++) {
251 struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
253 netif_napi_del(&rxq->napi);
254 hfi1_netdev_deallocate_ctxt(dd, rxq->rcd);
255 hfi1_rcd_put(rxq->rcd);
264 static void enable_queues(struct hfi1_netdev_priv *priv)
268 for (i = 0; i < priv->num_rx_q; i++) {
269 struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
271 dd_dev_info(priv->dd, "enabling queue %d on context %d\n", i,
273 napi_enable(&rxq->napi);
274 hfi1_rcvctrl(priv->dd,
275 HFI1_RCVCTRL_CTXT_ENB | HFI1_RCVCTRL_INTRAVAIL_ENB,
280 static void disable_queues(struct hfi1_netdev_priv *priv)
284 msix_netdev_synchronize_irq(priv->dd);
286 for (i = 0; i < priv->num_rx_q; i++) {
287 struct hfi1_netdev_rxq *rxq = &priv->rxq[i];
289 dd_dev_info(priv->dd, "disabling queue %d on context %d\n", i,
292 /* wait for napi if it was scheduled */
293 hfi1_rcvctrl(priv->dd,
294 HFI1_RCVCTRL_CTXT_DIS | HFI1_RCVCTRL_INTRAVAIL_DIS,
296 napi_synchronize(&rxq->napi);
297 napi_disable(&rxq->napi);
302 * hfi1_netdev_rx_init - Incrememnts netdevs counter. When called first time,
303 * it allocates receive queue data and calls netif_napi_add
308 int hfi1_netdev_rx_init(struct hfi1_devdata *dd)
310 struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
313 if (atomic_fetch_inc(&priv->netdevs))
316 mutex_lock(&hfi1_mutex);
317 init_dummy_netdev(dd->dummy_netdev);
318 res = hfi1_netdev_rxq_init(dd->dummy_netdev);
319 mutex_unlock(&hfi1_mutex);
324 * hfi1_netdev_rx_destroy - Decrements netdevs counter, when it reaches 0
325 * napi is deleted and receive queses memory is freed.
329 int hfi1_netdev_rx_destroy(struct hfi1_devdata *dd)
331 struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
333 /* destroy the RX queues only if it is the last netdev going away */
334 if (atomic_fetch_add_unless(&priv->netdevs, -1, 0) == 1) {
335 mutex_lock(&hfi1_mutex);
336 hfi1_netdev_rxq_deinit(dd->dummy_netdev);
337 mutex_unlock(&hfi1_mutex);
344 * hfi1_netdev_alloc - Allocates netdev and private data. It is required
345 * because RMT index and MSI-X interrupt can be set only
346 * during driver initialization.
350 int hfi1_netdev_alloc(struct hfi1_devdata *dd)
352 struct hfi1_netdev_priv *priv;
353 const int netdev_size = sizeof(*dd->dummy_netdev) +
354 sizeof(struct hfi1_netdev_priv);
356 dd_dev_info(dd, "allocating netdev size %d\n", netdev_size);
357 dd->dummy_netdev = kcalloc_node(1, netdev_size, GFP_KERNEL, dd->node);
359 if (!dd->dummy_netdev)
362 priv = hfi1_netdev_priv(dd->dummy_netdev);
364 xa_init(&priv->dev_tbl);
365 atomic_set(&priv->enabled, 0);
366 atomic_set(&priv->netdevs, 0);
371 void hfi1_netdev_free(struct hfi1_devdata *dd)
373 if (dd->dummy_netdev) {
374 dd_dev_info(dd, "hfi1 netdev freed\n");
375 kfree(dd->dummy_netdev);
376 dd->dummy_netdev = NULL;
381 * hfi1_netdev_enable_queues - This is napi enable function.
382 * It enables napi objects associated with queues.
383 * When at least one device has called it it increments atomic counter.
384 * Disable function decrements counter and when it is 0,
385 * calls napi_disable for every queue.
389 void hfi1_netdev_enable_queues(struct hfi1_devdata *dd)
391 struct hfi1_netdev_priv *priv;
393 if (!dd->dummy_netdev)
396 priv = hfi1_netdev_priv(dd->dummy_netdev);
397 if (atomic_fetch_inc(&priv->enabled))
400 mutex_lock(&hfi1_mutex);
402 mutex_unlock(&hfi1_mutex);
405 void hfi1_netdev_disable_queues(struct hfi1_devdata *dd)
407 struct hfi1_netdev_priv *priv;
409 if (!dd->dummy_netdev)
412 priv = hfi1_netdev_priv(dd->dummy_netdev);
413 if (atomic_dec_if_positive(&priv->enabled))
416 mutex_lock(&hfi1_mutex);
417 disable_queues(priv);
418 mutex_unlock(&hfi1_mutex);
422 * hfi1_netdev_add_data - Registers data with unique identifier
423 * to be requested later this is needed for VNIC and IPoIB VLANs
425 * This call is protected by mutex idr_lock.
428 * @id: requested integer id up to INT_MAX
429 * @data: data to be associated with index
431 int hfi1_netdev_add_data(struct hfi1_devdata *dd, int id, void *data)
433 struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
435 return xa_insert(&priv->dev_tbl, id, data, GFP_NOWAIT);
439 * hfi1_netdev_remove_data - Removes data with previously given id.
440 * Returns the reference to removed entry.
443 * @id: requested integer id up to INT_MAX
445 void *hfi1_netdev_remove_data(struct hfi1_devdata *dd, int id)
447 struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
449 return xa_erase(&priv->dev_tbl, id);
453 * hfi1_netdev_get_data - Gets data with given id
456 * @id: requested integer id up to INT_MAX
458 void *hfi1_netdev_get_data(struct hfi1_devdata *dd, int id)
460 struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
462 return xa_load(&priv->dev_tbl, id);
466 * hfi1_netdev_get_first_dat - Gets first entry with greater or equal id.
469 * @id: requested integer id up to INT_MAX
471 void *hfi1_netdev_get_first_data(struct hfi1_devdata *dd, int *start_id)
473 struct hfi1_netdev_priv *priv = hfi1_netdev_priv(dd->dummy_netdev);
474 unsigned long index = *start_id;
477 ret = xa_find(&priv->dev_tbl, &index, UINT_MAX, XA_PRESENT);
478 *start_id = (int)index;