1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2012-2013 Solarflare Communications Inc.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
10 #include "net_driver.h"
11 #include "ef10_regs.h"
14 #include "mcdi_pcol.h"
16 #include "workarounds.h"
18 #include "ef10_sriov.h"
20 #include <linux/jhash.h>
21 #include <linux/wait.h>
22 #include <linux/workqueue.h>
24 /* Hardware control for EF10 architecture including 'Huntington'. */
26 #define EFX_EF10_DRVGEN_EV 7
32 /* The reserved RSS context value */
33 #define EFX_EF10_RSS_CONTEXT_INVALID 0xffffffff
34 /* The maximum size of a shared RSS context */
35 /* TODO: this should really be from the mcdi protocol export */
36 #define EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE 64UL
38 /* The filter table(s) are managed by firmware and we have write-only
39 * access. When removing filters we must identify them to the
40 * firmware by a 64-bit handle, but this is too wide for Linux kernel
41 * interfaces (32-bit for RX NFC, 16-bit for RFS). Also, we need to
42 * be able to tell in advance whether a requested insertion will
43 * replace an existing filter. Therefore we maintain a software hash
44 * table, which should be at least as large as the hardware hash
47 * Huntington has a single 8K filter table shared between all filter
48 * types and both ports.
50 #define HUNT_FILTER_TBL_ROWS 8192
52 #define EFX_EF10_FILTER_ID_INVALID 0xffff
53 struct efx_ef10_dev_addr {
58 struct efx_ef10_filter_table {
59 /* The RX match field masks supported by this fw & hw, in order of priority */
60 enum efx_filter_match_flags rx_match_flags[
61 MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
62 unsigned int rx_match_count;
65 unsigned long spec; /* pointer to spec plus flag bits */
66 /* BUSY flag indicates that an update is in progress. AUTO_OLD is
67 * used to mark and sweep MAC filters for the device address lists.
69 #define EFX_EF10_FILTER_FLAG_BUSY 1UL
70 #define EFX_EF10_FILTER_FLAG_AUTO_OLD 2UL
71 #define EFX_EF10_FILTER_FLAGS 3UL
72 u64 handle; /* firmware handle */
74 wait_queue_head_t waitq;
75 /* Shadow of net_device address lists, guarded by mac_lock */
76 #define EFX_EF10_FILTER_DEV_UC_MAX 32
77 #define EFX_EF10_FILTER_DEV_MC_MAX 256
78 struct efx_ef10_dev_addr dev_uc_list[EFX_EF10_FILTER_DEV_UC_MAX];
79 struct efx_ef10_dev_addr dev_mc_list[EFX_EF10_FILTER_DEV_MC_MAX];
82 /* Indices (like efx_ef10_dev_addr.id) for promisc/allmulti filters */
88 /* An arbitrary search limit for the software hash table */
89 #define EFX_EF10_FILTER_SEARCH_LIMIT 200
91 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx);
92 static void efx_ef10_filter_table_remove(struct efx_nic *efx);
94 static int efx_ef10_get_warm_boot_count(struct efx_nic *efx)
98 efx_readd(efx, ®, ER_DZ_BIU_MC_SFT_STATUS);
99 return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
100 EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
103 static unsigned int efx_ef10_mem_map_size(struct efx_nic *efx)
107 bar = efx->type->mem_bar;
108 return resource_size(&efx->pci_dev->resource[bar]);
111 static bool efx_ef10_is_vf(struct efx_nic *efx)
113 return efx->type->is_vf;
116 static int efx_ef10_get_pf_index(struct efx_nic *efx)
118 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
119 struct efx_ef10_nic_data *nic_data = efx->nic_data;
123 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
124 sizeof(outbuf), &outlen);
127 if (outlen < sizeof(outbuf))
130 nic_data->pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
134 #ifdef CONFIG_SFC_SRIOV
135 static int efx_ef10_get_vf_index(struct efx_nic *efx)
137 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
138 struct efx_ef10_nic_data *nic_data = efx->nic_data;
142 rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
143 sizeof(outbuf), &outlen);
146 if (outlen < sizeof(outbuf))
149 nic_data->vf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_VF);
154 static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
156 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_OUT_LEN);
157 struct efx_ef10_nic_data *nic_data = efx->nic_data;
161 BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
163 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
164 outbuf, sizeof(outbuf), &outlen);
167 if (outlen < sizeof(outbuf)) {
168 netif_err(efx, drv, efx->net_dev,
169 "unable to read datapath firmware capabilities\n");
173 nic_data->datapath_caps =
174 MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
176 /* record the DPCPU firmware IDs to determine VEB vswitching support.
178 nic_data->rx_dpcpu_fw_id =
179 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_RX_DPCPU_FW_ID);
180 nic_data->tx_dpcpu_fw_id =
181 MCDI_WORD(outbuf, GET_CAPABILITIES_OUT_TX_DPCPU_FW_ID);
183 if (!(nic_data->datapath_caps &
184 (1 << MC_CMD_GET_CAPABILITIES_OUT_TX_TSO_LBN))) {
185 netif_err(efx, drv, efx->net_dev,
186 "current firmware does not support TSO\n");
190 if (!(nic_data->datapath_caps &
191 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_PREFIX_LEN_14_LBN))) {
192 netif_err(efx, probe, efx->net_dev,
193 "current firmware does not support an RX prefix\n");
200 static int efx_ef10_get_sysclk_freq(struct efx_nic *efx)
202 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CLOCK_OUT_LEN);
205 rc = efx_mcdi_rpc(efx, MC_CMD_GET_CLOCK, NULL, 0,
206 outbuf, sizeof(outbuf), NULL);
209 rc = MCDI_DWORD(outbuf, GET_CLOCK_OUT_SYS_FREQ);
210 return rc > 0 ? rc : -ERANGE;
213 static int efx_ef10_get_mac_address_pf(struct efx_nic *efx, u8 *mac_address)
215 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
219 BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
221 rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
222 outbuf, sizeof(outbuf), &outlen);
225 if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
228 ether_addr_copy(mac_address,
229 MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
233 static int efx_ef10_get_mac_address_vf(struct efx_nic *efx, u8 *mac_address)
235 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_IN_LEN);
236 MCDI_DECLARE_BUF(outbuf, MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMAX);
240 MCDI_SET_DWORD(inbuf, VPORT_GET_MAC_ADDRESSES_IN_VPORT_ID,
241 EVB_PORT_ID_ASSIGNED);
242 rc = efx_mcdi_rpc(efx, MC_CMD_VPORT_GET_MAC_ADDRESSES, inbuf,
243 sizeof(inbuf), outbuf, sizeof(outbuf), &outlen);
247 if (outlen < MC_CMD_VPORT_GET_MAC_ADDRESSES_OUT_LENMIN)
250 num_addrs = MCDI_DWORD(outbuf,
251 VPORT_GET_MAC_ADDRESSES_OUT_MACADDR_COUNT);
253 WARN_ON(num_addrs != 1);
255 ether_addr_copy(mac_address,
256 MCDI_PTR(outbuf, VPORT_GET_MAC_ADDRESSES_OUT_MACADDR));
261 static ssize_t efx_ef10_show_link_control_flag(struct device *dev,
262 struct device_attribute *attr,
265 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
267 return sprintf(buf, "%d\n",
268 ((efx->mcdi->fn_flags) &
269 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
273 static ssize_t efx_ef10_show_primary_flag(struct device *dev,
274 struct device_attribute *attr,
277 struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev));
279 return sprintf(buf, "%d\n",
280 ((efx->mcdi->fn_flags) &
281 (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_PRIMARY))
285 static DEVICE_ATTR(link_control_flag, 0444, efx_ef10_show_link_control_flag,
287 static DEVICE_ATTR(primary_flag, 0444, efx_ef10_show_primary_flag, NULL);
289 static int efx_ef10_probe(struct efx_nic *efx)
291 struct efx_ef10_nic_data *nic_data;
292 struct net_device *net_dev = efx->net_dev;
295 /* We can have one VI for each 8K region. However, until we
296 * use TX option descriptors we need two TX queues per channel.
298 efx->max_channels = min_t(unsigned int,
300 efx_ef10_mem_map_size(efx) /
301 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
302 efx->max_tx_channels = efx->max_channels;
303 if (WARN_ON(efx->max_channels == 0))
306 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
309 efx->nic_data = nic_data;
311 /* we assume later that we can copy from this buffer in dwords */
312 BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
314 rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf,
315 8 + MCDI_CTL_SDU_LEN_MAX_V2, GFP_KERNEL);
319 /* Get the MC's warm boot count. In case it's rebooting right
320 * now, be prepared to retry.
324 rc = efx_ef10_get_warm_boot_count(efx);
331 nic_data->warm_boot_count = rc;
333 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
335 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
337 /* In case we're recovering from a crash (kexec), we want to
338 * cancel any outstanding request by the previous user of this
339 * function. We send a special message using the least
340 * significant bits of the 'high' (doorbell) register.
342 _efx_writed(efx, cpu_to_le32(1), ER_DZ_MC_DB_HWRD);
344 rc = efx_mcdi_init(efx);
348 /* Reset (most) configuration for this function */
349 rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
353 /* Enable event logging */
354 rc = efx_mcdi_log_ctrl(efx, true, false, 0);
358 rc = device_create_file(&efx->pci_dev->dev,
359 &dev_attr_link_control_flag);
363 rc = device_create_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
367 rc = efx_ef10_get_pf_index(efx);
371 rc = efx_ef10_init_datapath_caps(efx);
375 efx->rx_packet_len_offset =
376 ES_DZ_RX_PREFIX_PKTLEN_OFST - ES_DZ_RX_PREFIX_SIZE;
378 rc = efx_mcdi_port_get_number(efx);
382 net_dev->dev_port = rc;
384 rc = efx->type->get_mac_address(efx, efx->net_dev->perm_addr);
388 rc = efx_ef10_get_sysclk_freq(efx);
391 efx->timer_quantum_ns = 1536000 / rc; /* 1536 cycles */
393 /* Check whether firmware supports bug 35388 workaround.
394 * First try to enable it, then if we get EPERM, just
395 * ask if it's already enabled
397 rc = efx_mcdi_set_workaround(efx, MC_CMD_WORKAROUND_BUG35388, true, NULL);
399 nic_data->workaround_35388 = true;
400 } else if (rc == -EPERM) {
401 unsigned int enabled;
403 rc = efx_mcdi_get_workarounds(efx, NULL, &enabled);
406 nic_data->workaround_35388 = enabled &
407 MC_CMD_GET_WORKAROUNDS_OUT_BUG35388;
408 } else if (rc != -ENOSYS && rc != -ENOENT) {
411 netif_dbg(efx, probe, efx->net_dev,
412 "workaround for bug 35388 is %sabled\n",
413 nic_data->workaround_35388 ? "en" : "dis");
415 rc = efx_mcdi_mon_probe(efx);
416 if (rc && rc != -EPERM)
419 efx_ptp_probe(efx, NULL);
421 #ifdef CONFIG_SFC_SRIOV
422 if ((efx->pci_dev->physfn) && (!efx->pci_dev->is_physfn)) {
423 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
424 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
426 efx_pf->type->get_mac_address(efx_pf, nic_data->port_id);
429 ether_addr_copy(nic_data->port_id, efx->net_dev->perm_addr);
434 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
436 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
440 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
443 efx->nic_data = NULL;
447 static int efx_ef10_free_vis(struct efx_nic *efx)
449 MCDI_DECLARE_BUF_ERR(outbuf);
451 int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
452 outbuf, sizeof(outbuf), &outlen);
454 /* -EALREADY means nothing to free, so ignore */
458 efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
465 static void efx_ef10_free_piobufs(struct efx_nic *efx)
467 struct efx_ef10_nic_data *nic_data = efx->nic_data;
468 MCDI_DECLARE_BUF(inbuf, MC_CMD_FREE_PIOBUF_IN_LEN);
472 BUILD_BUG_ON(MC_CMD_FREE_PIOBUF_OUT_LEN != 0);
474 for (i = 0; i < nic_data->n_piobufs; i++) {
475 MCDI_SET_DWORD(inbuf, FREE_PIOBUF_IN_PIOBUF_HANDLE,
476 nic_data->piobuf_handle[i]);
477 rc = efx_mcdi_rpc(efx, MC_CMD_FREE_PIOBUF, inbuf, sizeof(inbuf),
482 nic_data->n_piobufs = 0;
485 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
487 struct efx_ef10_nic_data *nic_data = efx->nic_data;
488 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_PIOBUF_OUT_LEN);
493 BUILD_BUG_ON(MC_CMD_ALLOC_PIOBUF_IN_LEN != 0);
495 for (i = 0; i < n; i++) {
496 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_PIOBUF, NULL, 0,
497 outbuf, sizeof(outbuf), &outlen);
500 if (outlen < MC_CMD_ALLOC_PIOBUF_OUT_LEN) {
504 nic_data->piobuf_handle[i] =
505 MCDI_DWORD(outbuf, ALLOC_PIOBUF_OUT_PIOBUF_HANDLE);
506 netif_dbg(efx, probe, efx->net_dev,
507 "allocated PIO buffer %u handle %x\n", i,
508 nic_data->piobuf_handle[i]);
511 nic_data->n_piobufs = i;
513 efx_ef10_free_piobufs(efx);
517 static int efx_ef10_link_piobufs(struct efx_nic *efx)
519 struct efx_ef10_nic_data *nic_data = efx->nic_data;
520 _MCDI_DECLARE_BUF(inbuf,
521 max(MC_CMD_LINK_PIOBUF_IN_LEN,
522 MC_CMD_UNLINK_PIOBUF_IN_LEN));
523 struct efx_channel *channel;
524 struct efx_tx_queue *tx_queue;
525 unsigned int offset, index;
528 BUILD_BUG_ON(MC_CMD_LINK_PIOBUF_OUT_LEN != 0);
529 BUILD_BUG_ON(MC_CMD_UNLINK_PIOBUF_OUT_LEN != 0);
531 memset(inbuf, 0, sizeof(inbuf));
533 /* Link a buffer to each VI in the write-combining mapping */
534 for (index = 0; index < nic_data->n_piobufs; ++index) {
535 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_PIOBUF_HANDLE,
536 nic_data->piobuf_handle[index]);
537 MCDI_SET_DWORD(inbuf, LINK_PIOBUF_IN_TXQ_INSTANCE,
538 nic_data->pio_write_vi_base + index);
539 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
540 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
543 netif_err(efx, drv, efx->net_dev,
544 "failed to link VI %u to PIO buffer %u (%d)\n",
545 nic_data->pio_write_vi_base + index, index,
549 netif_dbg(efx, probe, efx->net_dev,
550 "linked VI %u to PIO buffer %u\n",
551 nic_data->pio_write_vi_base + index, index);
554 /* Link a buffer to each TX queue */
555 efx_for_each_channel(channel, efx) {
556 efx_for_each_channel_tx_queue(tx_queue, channel) {
557 /* We assign the PIO buffers to queues in
558 * reverse order to allow for the following
561 offset = ((efx->tx_channel_offset + efx->n_tx_channels -
562 tx_queue->channel->channel - 1) *
564 index = offset / ER_DZ_TX_PIOBUF_SIZE;
565 offset = offset % ER_DZ_TX_PIOBUF_SIZE;
567 /* When the host page size is 4K, the first
568 * host page in the WC mapping may be within
569 * the same VI page as the last TX queue. We
570 * can only link one buffer to each VI.
572 if (tx_queue->queue == nic_data->pio_write_vi_base) {
576 MCDI_SET_DWORD(inbuf,
577 LINK_PIOBUF_IN_PIOBUF_HANDLE,
578 nic_data->piobuf_handle[index]);
579 MCDI_SET_DWORD(inbuf,
580 LINK_PIOBUF_IN_TXQ_INSTANCE,
582 rc = efx_mcdi_rpc(efx, MC_CMD_LINK_PIOBUF,
583 inbuf, MC_CMD_LINK_PIOBUF_IN_LEN,
588 /* This is non-fatal; the TX path just
589 * won't use PIO for this queue
591 netif_err(efx, drv, efx->net_dev,
592 "failed to link VI %u to PIO buffer %u (%d)\n",
593 tx_queue->queue, index, rc);
594 tx_queue->piobuf = NULL;
597 nic_data->pio_write_base +
598 index * EFX_VI_PAGE_SIZE + offset;
599 tx_queue->piobuf_offset = offset;
600 netif_dbg(efx, probe, efx->net_dev,
601 "linked VI %u to PIO buffer %u offset %x addr %p\n",
602 tx_queue->queue, index,
603 tx_queue->piobuf_offset,
613 MCDI_SET_DWORD(inbuf, UNLINK_PIOBUF_IN_TXQ_INSTANCE,
614 nic_data->pio_write_vi_base + index);
615 efx_mcdi_rpc(efx, MC_CMD_UNLINK_PIOBUF,
616 inbuf, MC_CMD_UNLINK_PIOBUF_IN_LEN,
622 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
624 struct efx_channel *channel;
625 struct efx_tx_queue *tx_queue;
627 /* All our existing PIO buffers went away */
628 efx_for_each_channel(channel, efx)
629 efx_for_each_channel_tx_queue(tx_queue, channel)
630 tx_queue->piobuf = NULL;
633 #else /* !EFX_USE_PIO */
635 static int efx_ef10_alloc_piobufs(struct efx_nic *efx, unsigned int n)
637 return n == 0 ? 0 : -ENOBUFS;
640 static int efx_ef10_link_piobufs(struct efx_nic *efx)
645 static void efx_ef10_free_piobufs(struct efx_nic *efx)
649 static void efx_ef10_forget_old_piobufs(struct efx_nic *efx)
653 #endif /* EFX_USE_PIO */
655 static void efx_ef10_remove(struct efx_nic *efx)
657 struct efx_ef10_nic_data *nic_data = efx->nic_data;
660 #ifdef CONFIG_SFC_SRIOV
661 struct efx_ef10_nic_data *nic_data_pf;
662 struct pci_dev *pci_dev_pf;
663 struct efx_nic *efx_pf;
666 if (efx->pci_dev->is_virtfn) {
667 pci_dev_pf = efx->pci_dev->physfn;
669 efx_pf = pci_get_drvdata(pci_dev_pf);
670 nic_data_pf = efx_pf->nic_data;
671 vf = nic_data_pf->vf + nic_data->vf_index;
674 netif_info(efx, drv, efx->net_dev,
675 "Could not get the PF id from VF\n");
681 efx_mcdi_mon_remove(efx);
683 efx_ef10_rx_free_indir_table(efx);
685 if (nic_data->wc_membase)
686 iounmap(nic_data->wc_membase);
688 rc = efx_ef10_free_vis(efx);
691 if (!nic_data->must_restore_piobufs)
692 efx_ef10_free_piobufs(efx);
694 device_remove_file(&efx->pci_dev->dev, &dev_attr_primary_flag);
695 device_remove_file(&efx->pci_dev->dev, &dev_attr_link_control_flag);
698 efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
702 static int efx_ef10_probe_pf(struct efx_nic *efx)
704 return efx_ef10_probe(efx);
707 int efx_ef10_vadaptor_alloc(struct efx_nic *efx, unsigned int port_id)
709 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_ALLOC_IN_LEN);
711 MCDI_SET_DWORD(inbuf, VADAPTOR_ALLOC_IN_UPSTREAM_PORT_ID, port_id);
712 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_ALLOC, inbuf, sizeof(inbuf),
716 int efx_ef10_vadaptor_free(struct efx_nic *efx, unsigned int port_id)
718 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_FREE_IN_LEN);
720 MCDI_SET_DWORD(inbuf, VADAPTOR_FREE_IN_UPSTREAM_PORT_ID, port_id);
721 return efx_mcdi_rpc(efx, MC_CMD_VADAPTOR_FREE, inbuf, sizeof(inbuf),
725 int efx_ef10_vport_add_mac(struct efx_nic *efx,
726 unsigned int port_id, u8 *mac)
728 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_ADD_MAC_ADDRESS_IN_LEN);
730 MCDI_SET_DWORD(inbuf, VPORT_ADD_MAC_ADDRESS_IN_VPORT_ID, port_id);
731 ether_addr_copy(MCDI_PTR(inbuf, VPORT_ADD_MAC_ADDRESS_IN_MACADDR), mac);
733 return efx_mcdi_rpc(efx, MC_CMD_VPORT_ADD_MAC_ADDRESS, inbuf,
734 sizeof(inbuf), NULL, 0, NULL);
737 int efx_ef10_vport_del_mac(struct efx_nic *efx,
738 unsigned int port_id, u8 *mac)
740 MCDI_DECLARE_BUF(inbuf, MC_CMD_VPORT_DEL_MAC_ADDRESS_IN_LEN);
742 MCDI_SET_DWORD(inbuf, VPORT_DEL_MAC_ADDRESS_IN_VPORT_ID, port_id);
743 ether_addr_copy(MCDI_PTR(inbuf, VPORT_DEL_MAC_ADDRESS_IN_MACADDR), mac);
745 return efx_mcdi_rpc(efx, MC_CMD_VPORT_DEL_MAC_ADDRESS, inbuf,
746 sizeof(inbuf), NULL, 0, NULL);
749 #ifdef CONFIG_SFC_SRIOV
750 static int efx_ef10_probe_vf(struct efx_nic *efx)
753 struct pci_dev *pci_dev_pf;
755 /* If the parent PF has no VF data structure, it doesn't know about this
756 * VF so fail probe. The VF needs to be re-created. This can happen
757 * if the PF driver is unloaded while the VF is assigned to a guest.
759 pci_dev_pf = efx->pci_dev->physfn;
761 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
762 struct efx_ef10_nic_data *nic_data_pf = efx_pf->nic_data;
764 if (!nic_data_pf->vf) {
765 netif_info(efx, drv, efx->net_dev,
766 "The VF cannot link to its parent PF; "
767 "please destroy and re-create the VF\n");
772 rc = efx_ef10_probe(efx);
776 rc = efx_ef10_get_vf_index(efx);
780 if (efx->pci_dev->is_virtfn) {
781 if (efx->pci_dev->physfn) {
782 struct efx_nic *efx_pf =
783 pci_get_drvdata(efx->pci_dev->physfn);
784 struct efx_ef10_nic_data *nic_data_p = efx_pf->nic_data;
785 struct efx_ef10_nic_data *nic_data = efx->nic_data;
787 nic_data_p->vf[nic_data->vf_index].efx = efx;
788 nic_data_p->vf[nic_data->vf_index].pci_dev =
791 netif_info(efx, drv, efx->net_dev,
792 "Could not get the PF id from VF\n");
798 efx_ef10_remove(efx);
802 static int efx_ef10_probe_vf(struct efx_nic *efx __attribute__ ((unused)))
808 static int efx_ef10_alloc_vis(struct efx_nic *efx,
809 unsigned int min_vis, unsigned int max_vis)
811 MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
812 MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
813 struct efx_ef10_nic_data *nic_data = efx->nic_data;
817 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
818 MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
819 rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
820 outbuf, sizeof(outbuf), &outlen);
824 if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
827 netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
828 MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
830 nic_data->vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
831 nic_data->n_allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
835 /* Note that the failure path of this function does not free
836 * resources, as this will be done by efx_ef10_remove().
838 static int efx_ef10_dimension_resources(struct efx_nic *efx)
840 struct efx_ef10_nic_data *nic_data = efx->nic_data;
841 unsigned int uc_mem_map_size, wc_mem_map_size;
842 unsigned int min_vis = max(EFX_TXQ_TYPES,
843 efx_separate_tx_channels ? 2 : 1);
844 unsigned int channel_vis, pio_write_vi_base, max_vis;
845 void __iomem *membase;
848 channel_vis = max(efx->n_channels, efx->n_tx_channels * EFX_TXQ_TYPES);
851 /* Try to allocate PIO buffers if wanted and if the full
852 * number of PIO buffers would be sufficient to allocate one
853 * copy-buffer per TX channel. Failure is non-fatal, as there
854 * are only a small number of PIO buffers shared between all
855 * functions of the controller.
857 if (efx_piobuf_size != 0 &&
858 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
859 efx->n_tx_channels) {
860 unsigned int n_piobufs =
861 DIV_ROUND_UP(efx->n_tx_channels,
862 ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
864 rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
866 netif_err(efx, probe, efx->net_dev,
867 "failed to allocate PIO buffers (%d)\n", rc);
869 netif_dbg(efx, probe, efx->net_dev,
870 "allocated %u PIO buffers\n", n_piobufs);
873 nic_data->n_piobufs = 0;
876 /* PIO buffers should be mapped with write-combining enabled,
877 * and we want to make single UC and WC mappings rather than
878 * several of each (in fact that's the only option if host
879 * page size is >4K). So we may allocate some extra VIs just
880 * for writing PIO buffers through.
882 * The UC mapping contains (channel_vis - 1) complete VIs and the
883 * first half of the next VI. Then the WC mapping begins with
884 * the second half of this last VI.
886 uc_mem_map_size = PAGE_ALIGN((channel_vis - 1) * EFX_VI_PAGE_SIZE +
888 if (nic_data->n_piobufs) {
889 /* pio_write_vi_base rounds down to give the number of complete
890 * VIs inside the UC mapping.
892 pio_write_vi_base = uc_mem_map_size / EFX_VI_PAGE_SIZE;
893 wc_mem_map_size = (PAGE_ALIGN((pio_write_vi_base +
894 nic_data->n_piobufs) *
897 max_vis = pio_write_vi_base + nic_data->n_piobufs;
899 pio_write_vi_base = 0;
901 max_vis = channel_vis;
904 /* In case the last attached driver failed to free VIs, do it now */
905 rc = efx_ef10_free_vis(efx);
909 rc = efx_ef10_alloc_vis(efx, min_vis, max_vis);
913 if (nic_data->n_allocated_vis < channel_vis) {
914 netif_info(efx, drv, efx->net_dev,
915 "Could not allocate enough VIs to satisfy RSS"
916 " requirements. Performance may not be optimal.\n");
917 /* We didn't get the VIs to populate our channels.
918 * We could keep what we got but then we'd have more
919 * interrupts than we need.
920 * Instead calculate new max_channels and restart
922 efx->max_channels = nic_data->n_allocated_vis;
923 efx->max_tx_channels =
924 nic_data->n_allocated_vis / EFX_TXQ_TYPES;
926 efx_ef10_free_vis(efx);
930 /* If we didn't get enough VIs to map all the PIO buffers, free the
933 if (nic_data->n_piobufs &&
934 nic_data->n_allocated_vis <
935 pio_write_vi_base + nic_data->n_piobufs) {
936 netif_dbg(efx, probe, efx->net_dev,
937 "%u VIs are not sufficient to map %u PIO buffers\n",
938 nic_data->n_allocated_vis, nic_data->n_piobufs);
939 efx_ef10_free_piobufs(efx);
942 /* Shrink the original UC mapping of the memory BAR */
943 membase = ioremap_nocache(efx->membase_phys, uc_mem_map_size);
945 netif_err(efx, probe, efx->net_dev,
946 "could not shrink memory BAR to %x\n",
950 iounmap(efx->membase);
951 efx->membase = membase;
953 /* Set up the WC mapping if needed */
954 if (wc_mem_map_size) {
955 nic_data->wc_membase = ioremap_wc(efx->membase_phys +
958 if (!nic_data->wc_membase) {
959 netif_err(efx, probe, efx->net_dev,
960 "could not allocate WC mapping of size %x\n",
964 nic_data->pio_write_vi_base = pio_write_vi_base;
965 nic_data->pio_write_base =
966 nic_data->wc_membase +
967 (pio_write_vi_base * EFX_VI_PAGE_SIZE + ER_DZ_TX_PIOBUF -
970 rc = efx_ef10_link_piobufs(efx);
972 efx_ef10_free_piobufs(efx);
975 netif_dbg(efx, probe, efx->net_dev,
976 "memory BAR at %pa (virtual %p+%x UC, %p+%x WC)\n",
977 &efx->membase_phys, efx->membase, uc_mem_map_size,
978 nic_data->wc_membase, wc_mem_map_size);
983 static int efx_ef10_init_nic(struct efx_nic *efx)
985 struct efx_ef10_nic_data *nic_data = efx->nic_data;
988 if (nic_data->must_check_datapath_caps) {
989 rc = efx_ef10_init_datapath_caps(efx);
992 nic_data->must_check_datapath_caps = false;
995 if (nic_data->must_realloc_vis) {
996 /* We cannot let the number of VIs change now */
997 rc = efx_ef10_alloc_vis(efx, nic_data->n_allocated_vis,
998 nic_data->n_allocated_vis);
1001 nic_data->must_realloc_vis = false;
1004 if (nic_data->must_restore_piobufs && nic_data->n_piobufs) {
1005 rc = efx_ef10_alloc_piobufs(efx, nic_data->n_piobufs);
1007 rc = efx_ef10_link_piobufs(efx);
1009 efx_ef10_free_piobufs(efx);
1012 /* Log an error on failure, but this is non-fatal */
1014 netif_err(efx, drv, efx->net_dev,
1015 "failed to restore PIO buffers (%d)\n", rc);
1016 nic_data->must_restore_piobufs = false;
1019 /* don't fail init if RSS setup doesn't work */
1020 efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
1025 static void efx_ef10_reset_mc_allocations(struct efx_nic *efx)
1027 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1028 #ifdef CONFIG_SFC_SRIOV
1032 /* All our allocations have been reset */
1033 nic_data->must_realloc_vis = true;
1034 nic_data->must_restore_filters = true;
1035 nic_data->must_restore_piobufs = true;
1036 efx_ef10_forget_old_piobufs(efx);
1037 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
1039 /* Driver-created vswitches and vports must be re-created */
1040 nic_data->must_probe_vswitching = true;
1041 nic_data->vport_id = EVB_PORT_ID_ASSIGNED;
1042 #ifdef CONFIG_SFC_SRIOV
1044 for (i = 0; i < efx->vf_count; i++)
1045 nic_data->vf[i].vport_id = 0;
1049 static enum reset_type efx_ef10_map_reset_reason(enum reset_type reason)
1051 if (reason == RESET_TYPE_MC_FAILURE)
1052 return RESET_TYPE_DATAPATH;
1054 return efx_mcdi_map_reset_reason(reason);
1057 static int efx_ef10_map_reset_flags(u32 *flags)
1060 EF10_RESET_PORT = ((ETH_RESET_MAC | ETH_RESET_PHY) <<
1061 ETH_RESET_SHARED_SHIFT),
1062 EF10_RESET_MC = ((ETH_RESET_DMA | ETH_RESET_FILTER |
1063 ETH_RESET_OFFLOAD | ETH_RESET_MAC |
1064 ETH_RESET_PHY | ETH_RESET_MGMT) <<
1065 ETH_RESET_SHARED_SHIFT)
1068 /* We assume for now that our PCI function is permitted to
1072 if ((*flags & EF10_RESET_MC) == EF10_RESET_MC) {
1073 *flags &= ~EF10_RESET_MC;
1074 return RESET_TYPE_WORLD;
1077 if ((*flags & EF10_RESET_PORT) == EF10_RESET_PORT) {
1078 *flags &= ~EF10_RESET_PORT;
1079 return RESET_TYPE_ALL;
1082 /* no invisible reset implemented */
1087 static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
1089 int rc = efx_mcdi_reset(efx, reset_type);
1091 /* Unprivileged functions return -EPERM, but need to return success
1092 * here so that the datapath is brought back up.
1094 if (reset_type == RESET_TYPE_WORLD && rc == -EPERM)
1097 /* If it was a port reset, trigger reallocation of MC resources.
1098 * Note that on an MC reset nothing needs to be done now because we'll
1099 * detect the MC reset later and handle it then.
1100 * For an FLR, we never get an MC reset event, but the MC has reset all
1101 * resources assigned to us, so we have to trigger reallocation now.
1103 if ((reset_type == RESET_TYPE_ALL ||
1104 reset_type == RESET_TYPE_MCDI_TIMEOUT) && !rc)
1105 efx_ef10_reset_mc_allocations(efx);
1109 #define EF10_DMA_STAT(ext_name, mcdi_name) \
1110 [EF10_STAT_ ## ext_name] = \
1111 { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1112 #define EF10_DMA_INVIS_STAT(int_name, mcdi_name) \
1113 [EF10_STAT_ ## int_name] = \
1114 { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
1115 #define EF10_OTHER_STAT(ext_name) \
1116 [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1117 #define GENERIC_SW_STAT(ext_name) \
1118 [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
1120 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
1121 EF10_DMA_STAT(port_tx_bytes, TX_BYTES),
1122 EF10_DMA_STAT(port_tx_packets, TX_PKTS),
1123 EF10_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
1124 EF10_DMA_STAT(port_tx_control, TX_CONTROL_PKTS),
1125 EF10_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
1126 EF10_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
1127 EF10_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
1128 EF10_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
1129 EF10_DMA_STAT(port_tx_64, TX_64_PKTS),
1130 EF10_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
1131 EF10_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
1132 EF10_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
1133 EF10_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
1134 EF10_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
1135 EF10_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
1136 EF10_DMA_STAT(port_rx_bytes, RX_BYTES),
1137 EF10_DMA_INVIS_STAT(port_rx_bytes_minus_good_bytes, RX_BAD_BYTES),
1138 EF10_OTHER_STAT(port_rx_good_bytes),
1139 EF10_OTHER_STAT(port_rx_bad_bytes),
1140 EF10_DMA_STAT(port_rx_packets, RX_PKTS),
1141 EF10_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
1142 EF10_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
1143 EF10_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
1144 EF10_DMA_STAT(port_rx_control, RX_CONTROL_PKTS),
1145 EF10_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
1146 EF10_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
1147 EF10_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
1148 EF10_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
1149 EF10_DMA_STAT(port_rx_64, RX_64_PKTS),
1150 EF10_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
1151 EF10_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
1152 EF10_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
1153 EF10_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
1154 EF10_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
1155 EF10_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
1156 EF10_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
1157 EF10_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
1158 EF10_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
1159 EF10_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
1160 EF10_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
1161 EF10_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
1162 GENERIC_SW_STAT(rx_nodesc_trunc),
1163 GENERIC_SW_STAT(rx_noskb_drops),
1164 EF10_DMA_STAT(port_rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
1165 EF10_DMA_STAT(port_rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
1166 EF10_DMA_STAT(port_rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
1167 EF10_DMA_STAT(port_rx_pm_discard_vfifo_full, PM_DISCARD_VFIFO_FULL),
1168 EF10_DMA_STAT(port_rx_pm_trunc_qbb, PM_TRUNC_QBB),
1169 EF10_DMA_STAT(port_rx_pm_discard_qbb, PM_DISCARD_QBB),
1170 EF10_DMA_STAT(port_rx_pm_discard_mapping, PM_DISCARD_MAPPING),
1171 EF10_DMA_STAT(port_rx_dp_q_disabled_packets, RXDP_Q_DISABLED_PKTS),
1172 EF10_DMA_STAT(port_rx_dp_di_dropped_packets, RXDP_DI_DROPPED_PKTS),
1173 EF10_DMA_STAT(port_rx_dp_streaming_packets, RXDP_STREAMING_PKTS),
1174 EF10_DMA_STAT(port_rx_dp_hlb_fetch, RXDP_HLB_FETCH_CONDITIONS),
1175 EF10_DMA_STAT(port_rx_dp_hlb_wait, RXDP_HLB_WAIT_CONDITIONS),
1176 EF10_DMA_STAT(rx_unicast, VADAPTER_RX_UNICAST_PACKETS),
1177 EF10_DMA_STAT(rx_unicast_bytes, VADAPTER_RX_UNICAST_BYTES),
1178 EF10_DMA_STAT(rx_multicast, VADAPTER_RX_MULTICAST_PACKETS),
1179 EF10_DMA_STAT(rx_multicast_bytes, VADAPTER_RX_MULTICAST_BYTES),
1180 EF10_DMA_STAT(rx_broadcast, VADAPTER_RX_BROADCAST_PACKETS),
1181 EF10_DMA_STAT(rx_broadcast_bytes, VADAPTER_RX_BROADCAST_BYTES),
1182 EF10_DMA_STAT(rx_bad, VADAPTER_RX_BAD_PACKETS),
1183 EF10_DMA_STAT(rx_bad_bytes, VADAPTER_RX_BAD_BYTES),
1184 EF10_DMA_STAT(rx_overflow, VADAPTER_RX_OVERFLOW),
1185 EF10_DMA_STAT(tx_unicast, VADAPTER_TX_UNICAST_PACKETS),
1186 EF10_DMA_STAT(tx_unicast_bytes, VADAPTER_TX_UNICAST_BYTES),
1187 EF10_DMA_STAT(tx_multicast, VADAPTER_TX_MULTICAST_PACKETS),
1188 EF10_DMA_STAT(tx_multicast_bytes, VADAPTER_TX_MULTICAST_BYTES),
1189 EF10_DMA_STAT(tx_broadcast, VADAPTER_TX_BROADCAST_PACKETS),
1190 EF10_DMA_STAT(tx_broadcast_bytes, VADAPTER_TX_BROADCAST_BYTES),
1191 EF10_DMA_STAT(tx_bad, VADAPTER_TX_BAD_PACKETS),
1192 EF10_DMA_STAT(tx_bad_bytes, VADAPTER_TX_BAD_BYTES),
1193 EF10_DMA_STAT(tx_overflow, VADAPTER_TX_OVERFLOW),
1196 #define HUNT_COMMON_STAT_MASK ((1ULL << EF10_STAT_port_tx_bytes) | \
1197 (1ULL << EF10_STAT_port_tx_packets) | \
1198 (1ULL << EF10_STAT_port_tx_pause) | \
1199 (1ULL << EF10_STAT_port_tx_unicast) | \
1200 (1ULL << EF10_STAT_port_tx_multicast) | \
1201 (1ULL << EF10_STAT_port_tx_broadcast) | \
1202 (1ULL << EF10_STAT_port_rx_bytes) | \
1204 EF10_STAT_port_rx_bytes_minus_good_bytes) | \
1205 (1ULL << EF10_STAT_port_rx_good_bytes) | \
1206 (1ULL << EF10_STAT_port_rx_bad_bytes) | \
1207 (1ULL << EF10_STAT_port_rx_packets) | \
1208 (1ULL << EF10_STAT_port_rx_good) | \
1209 (1ULL << EF10_STAT_port_rx_bad) | \
1210 (1ULL << EF10_STAT_port_rx_pause) | \
1211 (1ULL << EF10_STAT_port_rx_control) | \
1212 (1ULL << EF10_STAT_port_rx_unicast) | \
1213 (1ULL << EF10_STAT_port_rx_multicast) | \
1214 (1ULL << EF10_STAT_port_rx_broadcast) | \
1215 (1ULL << EF10_STAT_port_rx_lt64) | \
1216 (1ULL << EF10_STAT_port_rx_64) | \
1217 (1ULL << EF10_STAT_port_rx_65_to_127) | \
1218 (1ULL << EF10_STAT_port_rx_128_to_255) | \
1219 (1ULL << EF10_STAT_port_rx_256_to_511) | \
1220 (1ULL << EF10_STAT_port_rx_512_to_1023) |\
1221 (1ULL << EF10_STAT_port_rx_1024_to_15xx) |\
1222 (1ULL << EF10_STAT_port_rx_15xx_to_jumbo) |\
1223 (1ULL << EF10_STAT_port_rx_gtjumbo) | \
1224 (1ULL << EF10_STAT_port_rx_bad_gtjumbo) |\
1225 (1ULL << EF10_STAT_port_rx_overflow) | \
1226 (1ULL << EF10_STAT_port_rx_nodesc_drops) |\
1227 (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
1228 (1ULL << GENERIC_STAT_rx_noskb_drops))
1230 /* These statistics are only provided by the 10G MAC. For a 10G/40G
1231 * switchable port we do not expose these because they might not
1232 * include all the packets they should.
1234 #define HUNT_10G_ONLY_STAT_MASK ((1ULL << EF10_STAT_port_tx_control) | \
1235 (1ULL << EF10_STAT_port_tx_lt64) | \
1236 (1ULL << EF10_STAT_port_tx_64) | \
1237 (1ULL << EF10_STAT_port_tx_65_to_127) |\
1238 (1ULL << EF10_STAT_port_tx_128_to_255) |\
1239 (1ULL << EF10_STAT_port_tx_256_to_511) |\
1240 (1ULL << EF10_STAT_port_tx_512_to_1023) |\
1241 (1ULL << EF10_STAT_port_tx_1024_to_15xx) |\
1242 (1ULL << EF10_STAT_port_tx_15xx_to_jumbo))
1244 /* These statistics are only provided by the 40G MAC. For a 10G/40G
1245 * switchable port we do expose these because the errors will otherwise
1248 #define HUNT_40G_EXTRA_STAT_MASK ((1ULL << EF10_STAT_port_rx_align_error) |\
1249 (1ULL << EF10_STAT_port_rx_length_error))
1251 /* These statistics are only provided if the firmware supports the
1252 * capability PM_AND_RXDP_COUNTERS.
1254 #define HUNT_PM_AND_RXDP_STAT_MASK ( \
1255 (1ULL << EF10_STAT_port_rx_pm_trunc_bb_overflow) | \
1256 (1ULL << EF10_STAT_port_rx_pm_discard_bb_overflow) | \
1257 (1ULL << EF10_STAT_port_rx_pm_trunc_vfifo_full) | \
1258 (1ULL << EF10_STAT_port_rx_pm_discard_vfifo_full) | \
1259 (1ULL << EF10_STAT_port_rx_pm_trunc_qbb) | \
1260 (1ULL << EF10_STAT_port_rx_pm_discard_qbb) | \
1261 (1ULL << EF10_STAT_port_rx_pm_discard_mapping) | \
1262 (1ULL << EF10_STAT_port_rx_dp_q_disabled_packets) | \
1263 (1ULL << EF10_STAT_port_rx_dp_di_dropped_packets) | \
1264 (1ULL << EF10_STAT_port_rx_dp_streaming_packets) | \
1265 (1ULL << EF10_STAT_port_rx_dp_hlb_fetch) | \
1266 (1ULL << EF10_STAT_port_rx_dp_hlb_wait))
1268 static u64 efx_ef10_raw_stat_mask(struct efx_nic *efx)
1270 u64 raw_mask = HUNT_COMMON_STAT_MASK;
1271 u32 port_caps = efx_mcdi_phy_get_caps(efx);
1272 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1274 if (!(efx->mcdi->fn_flags &
1275 1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_LINKCTRL))
1278 if (port_caps & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
1279 raw_mask |= HUNT_40G_EXTRA_STAT_MASK;
1281 raw_mask |= HUNT_10G_ONLY_STAT_MASK;
1283 if (nic_data->datapath_caps &
1284 (1 << MC_CMD_GET_CAPABILITIES_OUT_PM_AND_RXDP_COUNTERS_LBN))
1285 raw_mask |= HUNT_PM_AND_RXDP_STAT_MASK;
1290 static void efx_ef10_get_stat_mask(struct efx_nic *efx, unsigned long *mask)
1292 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1295 raw_mask[0] = efx_ef10_raw_stat_mask(efx);
1297 /* Only show vadaptor stats when EVB capability is present */
1298 if (nic_data->datapath_caps &
1299 (1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN)) {
1300 raw_mask[0] |= ~((1ULL << EF10_STAT_rx_unicast) - 1);
1301 raw_mask[1] = (1ULL << (EF10_STAT_COUNT - 63)) - 1;
1306 #if BITS_PER_LONG == 64
1307 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 2);
1308 mask[0] = raw_mask[0];
1309 mask[1] = raw_mask[1];
1311 BUILD_BUG_ON(BITS_TO_LONGS(EF10_STAT_COUNT) != 3);
1312 mask[0] = raw_mask[0] & 0xffffffff;
1313 mask[1] = raw_mask[0] >> 32;
1314 mask[2] = raw_mask[1] & 0xffffffff;
1318 static size_t efx_ef10_describe_stats(struct efx_nic *efx, u8 *names)
1320 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1322 efx_ef10_get_stat_mask(efx, mask);
1323 return efx_nic_describe_stats(efx_ef10_stat_desc, EF10_STAT_COUNT,
1327 static size_t efx_ef10_update_stats_common(struct efx_nic *efx, u64 *full_stats,
1328 struct rtnl_link_stats64 *core_stats)
1330 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1331 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1332 u64 *stats = nic_data->stats;
1333 size_t stats_count = 0, index;
1335 efx_ef10_get_stat_mask(efx, mask);
1338 for_each_set_bit(index, mask, EF10_STAT_COUNT) {
1339 if (efx_ef10_stat_desc[index].name) {
1340 *full_stats++ = stats[index];
1349 if (nic_data->datapath_caps &
1350 1 << MC_CMD_GET_CAPABILITIES_OUT_EVB_LBN) {
1351 /* Use vadaptor stats. */
1352 core_stats->rx_packets = stats[EF10_STAT_rx_unicast] +
1353 stats[EF10_STAT_rx_multicast] +
1354 stats[EF10_STAT_rx_broadcast];
1355 core_stats->tx_packets = stats[EF10_STAT_tx_unicast] +
1356 stats[EF10_STAT_tx_multicast] +
1357 stats[EF10_STAT_tx_broadcast];
1358 core_stats->rx_bytes = stats[EF10_STAT_rx_unicast_bytes] +
1359 stats[EF10_STAT_rx_multicast_bytes] +
1360 stats[EF10_STAT_rx_broadcast_bytes];
1361 core_stats->tx_bytes = stats[EF10_STAT_tx_unicast_bytes] +
1362 stats[EF10_STAT_tx_multicast_bytes] +
1363 stats[EF10_STAT_tx_broadcast_bytes];
1364 core_stats->rx_dropped = stats[GENERIC_STAT_rx_nodesc_trunc] +
1365 stats[GENERIC_STAT_rx_noskb_drops];
1366 core_stats->multicast = stats[EF10_STAT_rx_multicast];
1367 core_stats->rx_crc_errors = stats[EF10_STAT_rx_bad];
1368 core_stats->rx_fifo_errors = stats[EF10_STAT_rx_overflow];
1369 core_stats->rx_errors = core_stats->rx_crc_errors;
1370 core_stats->tx_errors = stats[EF10_STAT_tx_bad];
1372 /* Use port stats. */
1373 core_stats->rx_packets = stats[EF10_STAT_port_rx_packets];
1374 core_stats->tx_packets = stats[EF10_STAT_port_tx_packets];
1375 core_stats->rx_bytes = stats[EF10_STAT_port_rx_bytes];
1376 core_stats->tx_bytes = stats[EF10_STAT_port_tx_bytes];
1377 core_stats->rx_dropped = stats[EF10_STAT_port_rx_nodesc_drops] +
1378 stats[GENERIC_STAT_rx_nodesc_trunc] +
1379 stats[GENERIC_STAT_rx_noskb_drops];
1380 core_stats->multicast = stats[EF10_STAT_port_rx_multicast];
1381 core_stats->rx_length_errors =
1382 stats[EF10_STAT_port_rx_gtjumbo] +
1383 stats[EF10_STAT_port_rx_length_error];
1384 core_stats->rx_crc_errors = stats[EF10_STAT_port_rx_bad];
1385 core_stats->rx_frame_errors =
1386 stats[EF10_STAT_port_rx_align_error];
1387 core_stats->rx_fifo_errors = stats[EF10_STAT_port_rx_overflow];
1388 core_stats->rx_errors = (core_stats->rx_length_errors +
1389 core_stats->rx_crc_errors +
1390 core_stats->rx_frame_errors);
1396 static int efx_ef10_try_update_nic_stats_pf(struct efx_nic *efx)
1398 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1399 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1400 __le64 generation_start, generation_end;
1401 u64 *stats = nic_data->stats;
1404 efx_ef10_get_stat_mask(efx, mask);
1406 dma_stats = efx->stats_buffer.addr;
1407 nic_data = efx->nic_data;
1409 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
1410 if (generation_end == EFX_MC_STATS_GENERATION_INVALID)
1413 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1414 stats, efx->stats_buffer.addr, false);
1416 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1417 if (generation_end != generation_start)
1420 /* Update derived statistics */
1421 efx_nic_fix_nodesc_drop_stat(efx,
1422 &stats[EF10_STAT_port_rx_nodesc_drops]);
1423 stats[EF10_STAT_port_rx_good_bytes] =
1424 stats[EF10_STAT_port_rx_bytes] -
1425 stats[EF10_STAT_port_rx_bytes_minus_good_bytes];
1426 efx_update_diff_stat(&stats[EF10_STAT_port_rx_bad_bytes],
1427 stats[EF10_STAT_port_rx_bytes_minus_good_bytes]);
1428 efx_update_sw_stats(efx, stats);
1433 static size_t efx_ef10_update_stats_pf(struct efx_nic *efx, u64 *full_stats,
1434 struct rtnl_link_stats64 *core_stats)
1438 /* If we're unlucky enough to read statistics during the DMA, wait
1439 * up to 10ms for it to finish (typically takes <500us)
1441 for (retry = 0; retry < 100; ++retry) {
1442 if (efx_ef10_try_update_nic_stats_pf(efx) == 0)
1447 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1450 static int efx_ef10_try_update_nic_stats_vf(struct efx_nic *efx)
1452 MCDI_DECLARE_BUF(inbuf, MC_CMD_MAC_STATS_IN_LEN);
1453 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1454 DECLARE_BITMAP(mask, EF10_STAT_COUNT);
1455 __le64 generation_start, generation_end;
1456 u64 *stats = nic_data->stats;
1457 u32 dma_len = MC_CMD_MAC_NSTATS * sizeof(u64);
1458 struct efx_buffer stats_buf;
1462 spin_unlock_bh(&efx->stats_lock);
1464 if (in_interrupt()) {
1465 /* If in atomic context, cannot update stats. Just update the
1466 * software stats and return so the caller can continue.
1468 spin_lock_bh(&efx->stats_lock);
1469 efx_update_sw_stats(efx, stats);
1473 efx_ef10_get_stat_mask(efx, mask);
1475 rc = efx_nic_alloc_buffer(efx, &stats_buf, dma_len, GFP_ATOMIC);
1477 spin_lock_bh(&efx->stats_lock);
1481 dma_stats = stats_buf.addr;
1482 dma_stats[MC_CMD_MAC_GENERATION_END] = EFX_MC_STATS_GENERATION_INVALID;
1484 MCDI_SET_QWORD(inbuf, MAC_STATS_IN_DMA_ADDR, stats_buf.dma_addr);
1485 MCDI_POPULATE_DWORD_1(inbuf, MAC_STATS_IN_CMD,
1486 MAC_STATS_IN_DMA, 1);
1487 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_DMA_LEN, dma_len);
1488 MCDI_SET_DWORD(inbuf, MAC_STATS_IN_PORT_ID, EVB_PORT_ID_ASSIGNED);
1490 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_MAC_STATS, inbuf, sizeof(inbuf),
1492 spin_lock_bh(&efx->stats_lock);
1494 /* Expect ENOENT if DMA queues have not been set up */
1495 if (rc != -ENOENT || atomic_read(&efx->active_queues))
1496 efx_mcdi_display_error(efx, MC_CMD_MAC_STATS,
1497 sizeof(inbuf), NULL, 0, rc);
1501 generation_end = dma_stats[MC_CMD_MAC_GENERATION_END];
1502 if (generation_end == EFX_MC_STATS_GENERATION_INVALID) {
1507 efx_nic_update_stats(efx_ef10_stat_desc, EF10_STAT_COUNT, mask,
1508 stats, stats_buf.addr, false);
1510 generation_start = dma_stats[MC_CMD_MAC_GENERATION_START];
1511 if (generation_end != generation_start) {
1516 efx_update_sw_stats(efx, stats);
1518 efx_nic_free_buffer(efx, &stats_buf);
1522 static size_t efx_ef10_update_stats_vf(struct efx_nic *efx, u64 *full_stats,
1523 struct rtnl_link_stats64 *core_stats)
1525 if (efx_ef10_try_update_nic_stats_vf(efx))
1528 return efx_ef10_update_stats_common(efx, full_stats, core_stats);
1531 static void efx_ef10_push_irq_moderation(struct efx_channel *channel)
1533 struct efx_nic *efx = channel->efx;
1534 unsigned int mode, value;
1535 efx_dword_t timer_cmd;
1537 if (channel->irq_moderation) {
1539 value = channel->irq_moderation - 1;
1545 if (EFX_EF10_WORKAROUND_35388(efx)) {
1546 EFX_POPULATE_DWORD_3(timer_cmd, ERF_DD_EVQ_IND_TIMER_FLAGS,
1547 EFE_DD_EVQ_IND_TIMER_FLAGS,
1548 ERF_DD_EVQ_IND_TIMER_MODE, mode,
1549 ERF_DD_EVQ_IND_TIMER_VAL, value);
1550 efx_writed_page(efx, &timer_cmd, ER_DD_EVQ_INDIRECT,
1553 EFX_POPULATE_DWORD_2(timer_cmd, ERF_DZ_TC_TIMER_MODE, mode,
1554 ERF_DZ_TC_TIMER_VAL, value);
1555 efx_writed_page(efx, &timer_cmd, ER_DZ_EVQ_TMR,
1560 static void efx_ef10_get_wol_vf(struct efx_nic *efx,
1561 struct ethtool_wolinfo *wol) {}
1563 static int efx_ef10_set_wol_vf(struct efx_nic *efx, u32 type)
1568 static void efx_ef10_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol)
1572 memset(&wol->sopass, 0, sizeof(wol->sopass));
1575 static int efx_ef10_set_wol(struct efx_nic *efx, u32 type)
1582 static void efx_ef10_mcdi_request(struct efx_nic *efx,
1583 const efx_dword_t *hdr, size_t hdr_len,
1584 const efx_dword_t *sdu, size_t sdu_len)
1586 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1587 u8 *pdu = nic_data->mcdi_buf.addr;
1589 memcpy(pdu, hdr, hdr_len);
1590 memcpy(pdu + hdr_len, sdu, sdu_len);
1593 /* The hardware provides 'low' and 'high' (doorbell) registers
1594 * for passing the 64-bit address of an MCDI request to
1595 * firmware. However the dwords are swapped by firmware. The
1596 * least significant bits of the doorbell are then 0 for all
1597 * MCDI requests due to alignment.
1599 _efx_writed(efx, cpu_to_le32((u64)nic_data->mcdi_buf.dma_addr >> 32),
1601 _efx_writed(efx, cpu_to_le32((u32)nic_data->mcdi_buf.dma_addr),
1605 static bool efx_ef10_mcdi_poll_response(struct efx_nic *efx)
1607 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1608 const efx_dword_t hdr = *(const efx_dword_t *)nic_data->mcdi_buf.addr;
1611 return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
1615 efx_ef10_mcdi_read_response(struct efx_nic *efx, efx_dword_t *outbuf,
1616 size_t offset, size_t outlen)
1618 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1619 const u8 *pdu = nic_data->mcdi_buf.addr;
1621 memcpy(outbuf, pdu + offset, outlen);
1624 static void efx_ef10_mcdi_reboot_detected(struct efx_nic *efx)
1626 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1628 /* All our allocations have been reset */
1629 efx_ef10_reset_mc_allocations(efx);
1631 /* The datapath firmware might have been changed */
1632 nic_data->must_check_datapath_caps = true;
1634 /* MAC statistics have been cleared on the NIC; clear the local
1635 * statistic that we update with efx_update_diff_stat().
1637 nic_data->stats[EF10_STAT_port_rx_bad_bytes] = 0;
1640 static int efx_ef10_mcdi_poll_reboot(struct efx_nic *efx)
1642 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1645 rc = efx_ef10_get_warm_boot_count(efx);
1647 /* The firmware is presumably in the process of
1648 * rebooting. However, we are supposed to report each
1649 * reboot just once, so we must only do that once we
1650 * can read and store the updated warm boot count.
1655 if (rc == nic_data->warm_boot_count)
1658 nic_data->warm_boot_count = rc;
1659 efx_ef10_mcdi_reboot_detected(efx);
1664 /* Handle an MSI interrupt
1666 * Handle an MSI hardware interrupt. This routine schedules event
1667 * queue processing. No interrupt acknowledgement cycle is necessary.
1668 * Also, we never need to check that the interrupt is for us, since
1669 * MSI interrupts cannot be shared.
1671 static irqreturn_t efx_ef10_msi_interrupt(int irq, void *dev_id)
1673 struct efx_msi_context *context = dev_id;
1674 struct efx_nic *efx = context->efx;
1676 netif_vdbg(efx, intr, efx->net_dev,
1677 "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
1679 if (likely(ACCESS_ONCE(efx->irq_soft_enabled))) {
1680 /* Note test interrupts */
1681 if (context->index == efx->irq_level)
1682 efx->last_irq_cpu = raw_smp_processor_id();
1684 /* Schedule processing of the channel */
1685 efx_schedule_channel_irq(efx->channel[context->index]);
1691 static irqreturn_t efx_ef10_legacy_interrupt(int irq, void *dev_id)
1693 struct efx_nic *efx = dev_id;
1694 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1695 struct efx_channel *channel;
1699 /* Read the ISR which also ACKs the interrupts */
1700 efx_readd(efx, ®, ER_DZ_BIU_INT_ISR);
1701 queues = EFX_DWORD_FIELD(reg, ERF_DZ_ISR_REG);
1706 if (likely(soft_enabled)) {
1707 /* Note test interrupts */
1708 if (queues & (1U << efx->irq_level))
1709 efx->last_irq_cpu = raw_smp_processor_id();
1711 efx_for_each_channel(channel, efx) {
1713 efx_schedule_channel_irq(channel);
1718 netif_vdbg(efx, intr, efx->net_dev,
1719 "IRQ %d on CPU %d status " EFX_DWORD_FMT "\n",
1720 irq, raw_smp_processor_id(), EFX_DWORD_VAL(reg));
1725 static void efx_ef10_irq_test_generate(struct efx_nic *efx)
1727 MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
1729 BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
1731 MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
1732 (void) efx_mcdi_rpc(efx, MC_CMD_TRIGGER_INTERRUPT,
1733 inbuf, sizeof(inbuf), NULL, 0, NULL);
1736 static int efx_ef10_tx_probe(struct efx_tx_queue *tx_queue)
1738 return efx_nic_alloc_buffer(tx_queue->efx, &tx_queue->txd.buf,
1739 (tx_queue->ptr_mask + 1) *
1740 sizeof(efx_qword_t),
1744 /* This writes to the TX_DESC_WPTR and also pushes data */
1745 static inline void efx_ef10_push_tx_desc(struct efx_tx_queue *tx_queue,
1746 const efx_qword_t *txd)
1748 unsigned int write_ptr;
1751 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1752 EFX_POPULATE_OWORD_1(reg, ERF_DZ_TX_DESC_WPTR, write_ptr);
1753 reg.qword[0] = *txd;
1754 efx_writeo_page(tx_queue->efx, ®,
1755 ER_DZ_TX_DESC_UPD, tx_queue->queue);
1758 static void efx_ef10_tx_init(struct efx_tx_queue *tx_queue)
1760 MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
1762 bool csum_offload = tx_queue->queue & EFX_TXQ_TYPE_OFFLOAD;
1763 size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
1764 struct efx_channel *channel = tx_queue->channel;
1765 struct efx_nic *efx = tx_queue->efx;
1766 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1768 dma_addr_t dma_addr;
1772 BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
1774 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
1775 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
1776 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->queue);
1777 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
1778 MCDI_POPULATE_DWORD_2(inbuf, INIT_TXQ_IN_FLAGS,
1779 INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !csum_offload,
1780 INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload);
1781 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
1782 MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, nic_data->vport_id);
1784 dma_addr = tx_queue->txd.buf.dma_addr;
1786 netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
1787 tx_queue->queue, entries, (u64)dma_addr);
1789 for (i = 0; i < entries; ++i) {
1790 MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
1791 dma_addr += EFX_BUF_SIZE;
1794 inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
1796 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
1801 /* A previous user of this TX queue might have set us up the
1802 * bomb by writing a descriptor to the TX push collector but
1803 * not the doorbell. (Each collector belongs to a port, not a
1804 * queue or function, so cannot easily be reset.) We must
1805 * attempt to push a no-op descriptor in its place.
1807 tx_queue->buffer[0].flags = EFX_TX_BUF_OPTION;
1808 tx_queue->insert_count = 1;
1809 txd = efx_tx_desc(tx_queue, 0);
1810 EFX_POPULATE_QWORD_4(*txd,
1811 ESF_DZ_TX_DESC_IS_OPT, true,
1812 ESF_DZ_TX_OPTION_TYPE,
1813 ESE_DZ_TX_OPTION_DESC_CRC_CSUM,
1814 ESF_DZ_TX_OPTION_UDP_TCP_CSUM, csum_offload,
1815 ESF_DZ_TX_OPTION_IP_CSUM, csum_offload);
1816 tx_queue->write_count = 1;
1818 efx_ef10_push_tx_desc(tx_queue, txd);
1823 netdev_WARN(efx->net_dev, "failed to initialise TXQ %d\n",
1827 static void efx_ef10_tx_fini(struct efx_tx_queue *tx_queue)
1829 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
1830 MCDI_DECLARE_BUF_ERR(outbuf);
1831 struct efx_nic *efx = tx_queue->efx;
1835 MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
1838 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
1839 outbuf, sizeof(outbuf), &outlen);
1841 if (rc && rc != -EALREADY)
1847 efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
1848 outbuf, outlen, rc);
1851 static void efx_ef10_tx_remove(struct efx_tx_queue *tx_queue)
1853 efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
1856 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
1857 static inline void efx_ef10_notify_tx_desc(struct efx_tx_queue *tx_queue)
1859 unsigned int write_ptr;
1862 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1863 EFX_POPULATE_DWORD_1(reg, ERF_DZ_TX_DESC_WPTR_DWORD, write_ptr);
1864 efx_writed_page(tx_queue->efx, ®,
1865 ER_DZ_TX_DESC_UPD_DWORD, tx_queue->queue);
1868 static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
1870 unsigned int old_write_count = tx_queue->write_count;
1871 struct efx_tx_buffer *buffer;
1872 unsigned int write_ptr;
1875 tx_queue->xmit_more_available = false;
1876 if (unlikely(tx_queue->write_count == tx_queue->insert_count))
1880 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
1881 buffer = &tx_queue->buffer[write_ptr];
1882 txd = efx_tx_desc(tx_queue, write_ptr);
1883 ++tx_queue->write_count;
1885 /* Create TX descriptor ring entry */
1886 if (buffer->flags & EFX_TX_BUF_OPTION) {
1887 *txd = buffer->option;
1889 BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
1890 EFX_POPULATE_QWORD_3(
1893 buffer->flags & EFX_TX_BUF_CONT,
1894 ESF_DZ_TX_KER_BYTE_CNT, buffer->len,
1895 ESF_DZ_TX_KER_BUF_ADDR, buffer->dma_addr);
1897 } while (tx_queue->write_count != tx_queue->insert_count);
1899 wmb(); /* Ensure descriptors are written before they are fetched */
1901 if (efx_nic_may_push_tx_desc(tx_queue, old_write_count)) {
1902 txd = efx_tx_desc(tx_queue,
1903 old_write_count & tx_queue->ptr_mask);
1904 efx_ef10_push_tx_desc(tx_queue, txd);
1907 efx_ef10_notify_tx_desc(tx_queue);
1911 static int efx_ef10_alloc_rss_context(struct efx_nic *efx, u32 *context,
1912 bool exclusive, unsigned *context_size)
1914 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_ALLOC_IN_LEN);
1915 MCDI_DECLARE_BUF(outbuf, MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN);
1916 struct efx_ef10_nic_data *nic_data = efx->nic_data;
1919 u32 alloc_type = exclusive ?
1920 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_EXCLUSIVE :
1921 MC_CMD_RSS_CONTEXT_ALLOC_IN_TYPE_SHARED;
1922 unsigned rss_spread = exclusive ?
1924 min(rounddown_pow_of_two(efx->rss_spread),
1925 EFX_EF10_MAX_SHARED_RSS_CONTEXT_SIZE);
1927 if (!exclusive && rss_spread == 1) {
1928 *context = EFX_EF10_RSS_CONTEXT_INVALID;
1934 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_UPSTREAM_PORT_ID,
1935 nic_data->vport_id);
1936 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_TYPE, alloc_type);
1937 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_ALLOC_IN_NUM_QUEUES, rss_spread);
1939 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_ALLOC, inbuf, sizeof(inbuf),
1940 outbuf, sizeof(outbuf), &outlen);
1944 if (outlen < MC_CMD_RSS_CONTEXT_ALLOC_OUT_LEN)
1947 *context = MCDI_DWORD(outbuf, RSS_CONTEXT_ALLOC_OUT_RSS_CONTEXT_ID);
1950 *context_size = rss_spread;
1955 static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
1957 MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_FREE_IN_LEN);
1960 MCDI_SET_DWORD(inbuf, RSS_CONTEXT_FREE_IN_RSS_CONTEXT_ID,
1963 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_FREE, inbuf, sizeof(inbuf),
1968 static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
1969 const u32 *rx_indir_table)
1971 MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
1972 MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
1975 MCDI_SET_DWORD(tablebuf, RSS_CONTEXT_SET_TABLE_IN_RSS_CONTEXT_ID,
1977 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1978 MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
1980 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
1982 RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
1983 (u8) rx_indir_table[i];
1985 rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_TABLE, tablebuf,
1986 sizeof(tablebuf), NULL, 0, NULL);
1990 MCDI_SET_DWORD(keybuf, RSS_CONTEXT_SET_KEY_IN_RSS_CONTEXT_ID,
1992 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
1993 MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
1994 for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
1995 MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
1996 efx->rx_hash_key[i];
1998 return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
1999 sizeof(keybuf), NULL, 0, NULL);
2002 static void efx_ef10_rx_free_indir_table(struct efx_nic *efx)
2004 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2006 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2007 efx_ef10_free_rss_context(efx, nic_data->rx_rss_context);
2008 nic_data->rx_rss_context = EFX_EF10_RSS_CONTEXT_INVALID;
2011 static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
2012 unsigned *context_size)
2014 u32 new_rx_rss_context;
2015 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2016 int rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
2017 false, context_size);
2022 nic_data->rx_rss_context = new_rx_rss_context;
2023 nic_data->rx_rss_context_exclusive = false;
2024 efx_set_default_rx_indir_table(efx);
2028 static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
2029 const u32 *rx_indir_table)
2031 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2033 u32 new_rx_rss_context;
2035 if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID ||
2036 !nic_data->rx_rss_context_exclusive) {
2037 rc = efx_ef10_alloc_rss_context(efx, &new_rx_rss_context,
2039 if (rc == -EOPNOTSUPP)
2044 new_rx_rss_context = nic_data->rx_rss_context;
2047 rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
2052 if (nic_data->rx_rss_context != new_rx_rss_context)
2053 efx_ef10_rx_free_indir_table(efx);
2054 nic_data->rx_rss_context = new_rx_rss_context;
2055 nic_data->rx_rss_context_exclusive = true;
2056 if (rx_indir_table != efx->rx_indir_table)
2057 memcpy(efx->rx_indir_table, rx_indir_table,
2058 sizeof(efx->rx_indir_table));
2062 if (new_rx_rss_context != nic_data->rx_rss_context)
2063 efx_ef10_free_rss_context(efx, new_rx_rss_context);
2065 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2069 static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
2070 const u32 *rx_indir_table)
2074 if (efx->rss_spread == 1)
2077 rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
2079 if (rc == -ENOBUFS && !user) {
2080 unsigned context_size;
2081 bool mismatch = false;
2084 for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table) && !mismatch;
2086 mismatch = rx_indir_table[i] !=
2087 ethtool_rxfh_indir_default(i, efx->rss_spread);
2089 rc = efx_ef10_rx_push_shared_rss_config(efx, &context_size);
2091 if (context_size != efx->rss_spread)
2092 netif_warn(efx, probe, efx->net_dev,
2093 "Could not allocate an exclusive RSS"
2094 " context; allocated a shared one of"
2096 " Wanted %u, got %u.\n",
2097 efx->rss_spread, context_size);
2099 netif_warn(efx, probe, efx->net_dev,
2100 "Could not allocate an exclusive RSS"
2101 " context; allocated a shared one but"
2102 " could not apply custom"
2105 netif_info(efx, probe, efx->net_dev,
2106 "Could not allocate an exclusive RSS"
2107 " context; allocated a shared one.\n");
2113 static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
2114 const u32 *rx_indir_table
2115 __attribute__ ((unused)))
2117 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2121 if (nic_data->rx_rss_context != EFX_EF10_RSS_CONTEXT_INVALID)
2123 return efx_ef10_rx_push_shared_rss_config(efx, NULL);
2126 static int efx_ef10_rx_probe(struct efx_rx_queue *rx_queue)
2128 return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
2129 (rx_queue->ptr_mask + 1) *
2130 sizeof(efx_qword_t),
2134 static void efx_ef10_rx_init(struct efx_rx_queue *rx_queue)
2136 MCDI_DECLARE_BUF(inbuf,
2137 MC_CMD_INIT_RXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
2139 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2140 size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
2141 struct efx_nic *efx = rx_queue->efx;
2142 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2144 dma_addr_t dma_addr;
2147 BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
2149 rx_queue->scatter_n = 0;
2150 rx_queue->scatter_len = 0;
2152 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
2153 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
2154 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
2155 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
2156 efx_rx_queue_index(rx_queue));
2157 MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
2158 INIT_RXQ_IN_FLAG_PREFIX, 1,
2159 INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
2160 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
2161 MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, nic_data->vport_id);
2163 dma_addr = rx_queue->rxd.buf.dma_addr;
2165 netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
2166 efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
2168 for (i = 0; i < entries; ++i) {
2169 MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
2170 dma_addr += EFX_BUF_SIZE;
2173 inlen = MC_CMD_INIT_RXQ_IN_LEN(entries);
2175 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, inlen,
2178 netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
2179 efx_rx_queue_index(rx_queue));
2182 static void efx_ef10_rx_fini(struct efx_rx_queue *rx_queue)
2184 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
2185 MCDI_DECLARE_BUF_ERR(outbuf);
2186 struct efx_nic *efx = rx_queue->efx;
2190 MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
2191 efx_rx_queue_index(rx_queue));
2193 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
2194 outbuf, sizeof(outbuf), &outlen);
2196 if (rc && rc != -EALREADY)
2202 efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
2203 outbuf, outlen, rc);
2206 static void efx_ef10_rx_remove(struct efx_rx_queue *rx_queue)
2208 efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
2211 /* This creates an entry in the RX descriptor queue */
2213 efx_ef10_build_rx_desc(struct efx_rx_queue *rx_queue, unsigned int index)
2215 struct efx_rx_buffer *rx_buf;
2218 rxd = efx_rx_desc(rx_queue, index);
2219 rx_buf = efx_rx_buffer(rx_queue, index);
2220 EFX_POPULATE_QWORD_2(*rxd,
2221 ESF_DZ_RX_KER_BYTE_CNT, rx_buf->len,
2222 ESF_DZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
2225 static void efx_ef10_rx_write(struct efx_rx_queue *rx_queue)
2227 struct efx_nic *efx = rx_queue->efx;
2228 unsigned int write_count;
2231 /* Firmware requires that RX_DESC_WPTR be a multiple of 8 */
2232 write_count = rx_queue->added_count & ~7;
2233 if (rx_queue->notified_count == write_count)
2237 efx_ef10_build_rx_desc(
2239 rx_queue->notified_count & rx_queue->ptr_mask);
2240 while (++rx_queue->notified_count != write_count);
2243 EFX_POPULATE_DWORD_1(reg, ERF_DZ_RX_DESC_WPTR,
2244 write_count & rx_queue->ptr_mask);
2245 efx_writed_page(efx, ®, ER_DZ_RX_DESC_UPD,
2246 efx_rx_queue_index(rx_queue));
2249 static efx_mcdi_async_completer efx_ef10_rx_defer_refill_complete;
2251 static void efx_ef10_rx_defer_refill(struct efx_rx_queue *rx_queue)
2253 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
2254 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2257 EFX_POPULATE_QWORD_2(event,
2258 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2259 ESF_DZ_EV_DATA, EFX_EF10_REFILL);
2261 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2263 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2264 * already swapped the data to little-endian order.
2266 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2267 sizeof(efx_qword_t));
2269 efx_mcdi_rpc_async(channel->efx, MC_CMD_DRIVER_EVENT,
2270 inbuf, sizeof(inbuf), 0,
2271 efx_ef10_rx_defer_refill_complete, 0);
2275 efx_ef10_rx_defer_refill_complete(struct efx_nic *efx, unsigned long cookie,
2276 int rc, efx_dword_t *outbuf,
2277 size_t outlen_actual)
2282 static int efx_ef10_ev_probe(struct efx_channel *channel)
2284 return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
2285 (channel->eventq_mask + 1) *
2286 sizeof(efx_qword_t),
2290 static void efx_ef10_ev_fini(struct efx_channel *channel)
2292 MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
2293 MCDI_DECLARE_BUF_ERR(outbuf);
2294 struct efx_nic *efx = channel->efx;
2298 MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
2300 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
2301 outbuf, sizeof(outbuf), &outlen);
2303 if (rc && rc != -EALREADY)
2309 efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
2310 outbuf, outlen, rc);
2313 static int efx_ef10_ev_init(struct efx_channel *channel)
2315 MCDI_DECLARE_BUF(inbuf,
2316 MC_CMD_INIT_EVQ_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
2318 MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_OUT_LEN);
2319 size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
2320 struct efx_nic *efx = channel->efx;
2321 struct efx_ef10_nic_data *nic_data;
2322 bool supports_rx_merge;
2323 size_t inlen, outlen;
2324 unsigned int enabled, implemented;
2325 dma_addr_t dma_addr;
2329 nic_data = efx->nic_data;
2331 !!(nic_data->datapath_caps &
2332 1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN);
2334 /* Fill event queue with all ones (i.e. empty events) */
2335 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
2337 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
2338 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
2339 /* INIT_EVQ expects index in vector table, not absolute */
2340 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
2341 MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
2342 INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
2343 INIT_EVQ_IN_FLAG_RX_MERGE, 1,
2344 INIT_EVQ_IN_FLAG_TX_MERGE, 1,
2345 INIT_EVQ_IN_FLAG_CUT_THRU, !supports_rx_merge);
2346 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
2347 MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
2348 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
2349 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
2350 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
2351 MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
2352 MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
2354 dma_addr = channel->eventq.buf.dma_addr;
2355 for (i = 0; i < entries; ++i) {
2356 MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
2357 dma_addr += EFX_BUF_SIZE;
2360 inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
2362 rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
2363 outbuf, sizeof(outbuf), &outlen);
2364 /* IRQ return is ignored */
2365 if (channel->channel || rc)
2368 /* Successfully created event queue on channel 0 */
2369 rc = efx_mcdi_get_workarounds(efx, &implemented, &enabled);
2370 if (rc == -ENOSYS) {
2371 /* GET_WORKAROUNDS was implemented before the bug26807
2372 * workaround, thus the latter must be unavailable in this fw
2374 nic_data->workaround_26807 = false;
2379 nic_data->workaround_26807 =
2380 !!(enabled & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807);
2382 if (implemented & MC_CMD_GET_WORKAROUNDS_OUT_BUG26807 &&
2383 !nic_data->workaround_26807) {
2386 rc = efx_mcdi_set_workaround(efx,
2387 MC_CMD_WORKAROUND_BUG26807,
2392 1 << MC_CMD_WORKAROUND_EXT_OUT_FLR_DONE_LBN) {
2393 netif_info(efx, drv, efx->net_dev,
2394 "other functions on NIC have been reset\n");
2395 /* MC's boot count has incremented */
2396 ++nic_data->warm_boot_count;
2398 nic_data->workaround_26807 = true;
2399 } else if (rc == -EPERM) {
2409 efx_ef10_ev_fini(channel);
2413 static void efx_ef10_ev_remove(struct efx_channel *channel)
2415 efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
2418 static void efx_ef10_handle_rx_wrong_queue(struct efx_rx_queue *rx_queue,
2419 unsigned int rx_queue_label)
2421 struct efx_nic *efx = rx_queue->efx;
2423 netif_info(efx, hw, efx->net_dev,
2424 "rx event arrived on queue %d labeled as queue %u\n",
2425 efx_rx_queue_index(rx_queue), rx_queue_label);
2427 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2431 efx_ef10_handle_rx_bad_lbits(struct efx_rx_queue *rx_queue,
2432 unsigned int actual, unsigned int expected)
2434 unsigned int dropped = (actual - expected) & rx_queue->ptr_mask;
2435 struct efx_nic *efx = rx_queue->efx;
2437 netif_info(efx, hw, efx->net_dev,
2438 "dropped %d events (index=%d expected=%d)\n",
2439 dropped, actual, expected);
2441 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
2444 /* partially received RX was aborted. clean up. */
2445 static void efx_ef10_handle_rx_abort(struct efx_rx_queue *rx_queue)
2447 unsigned int rx_desc_ptr;
2449 netif_dbg(rx_queue->efx, hw, rx_queue->efx->net_dev,
2450 "scattered RX aborted (dropping %u buffers)\n",
2451 rx_queue->scatter_n);
2453 rx_desc_ptr = rx_queue->removed_count & rx_queue->ptr_mask;
2455 efx_rx_packet(rx_queue, rx_desc_ptr, rx_queue->scatter_n,
2456 0, EFX_RX_PKT_DISCARD);
2458 rx_queue->removed_count += rx_queue->scatter_n;
2459 rx_queue->scatter_n = 0;
2460 rx_queue->scatter_len = 0;
2461 ++efx_rx_queue_channel(rx_queue)->n_rx_nodesc_trunc;
2464 static int efx_ef10_handle_rx_event(struct efx_channel *channel,
2465 const efx_qword_t *event)
2467 unsigned int rx_bytes, next_ptr_lbits, rx_queue_label, rx_l4_class;
2468 unsigned int n_descs, n_packets, i;
2469 struct efx_nic *efx = channel->efx;
2470 struct efx_rx_queue *rx_queue;
2474 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
2477 /* Basic packet information */
2478 rx_bytes = EFX_QWORD_FIELD(*event, ESF_DZ_RX_BYTES);
2479 next_ptr_lbits = EFX_QWORD_FIELD(*event, ESF_DZ_RX_DSC_PTR_LBITS);
2480 rx_queue_label = EFX_QWORD_FIELD(*event, ESF_DZ_RX_QLABEL);
2481 rx_l4_class = EFX_QWORD_FIELD(*event, ESF_DZ_RX_L4_CLASS);
2482 rx_cont = EFX_QWORD_FIELD(*event, ESF_DZ_RX_CONT);
2484 if (EFX_QWORD_FIELD(*event, ESF_DZ_RX_DROP_EVENT))
2485 netdev_WARN(efx->net_dev, "saw RX_DROP_EVENT: event="
2487 EFX_QWORD_VAL(*event));
2489 rx_queue = efx_channel_get_rx_queue(channel);
2491 if (unlikely(rx_queue_label != efx_rx_queue_index(rx_queue)))
2492 efx_ef10_handle_rx_wrong_queue(rx_queue, rx_queue_label);
2494 n_descs = ((next_ptr_lbits - rx_queue->removed_count) &
2495 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2497 if (n_descs != rx_queue->scatter_n + 1) {
2498 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2500 /* detect rx abort */
2501 if (unlikely(n_descs == rx_queue->scatter_n)) {
2502 if (rx_queue->scatter_n == 0 || rx_bytes != 0)
2503 netdev_WARN(efx->net_dev,
2504 "invalid RX abort: scatter_n=%u event="
2506 rx_queue->scatter_n,
2507 EFX_QWORD_VAL(*event));
2508 efx_ef10_handle_rx_abort(rx_queue);
2512 /* Check that RX completion merging is valid, i.e.
2513 * the current firmware supports it and this is a
2514 * non-scattered packet.
2516 if (!(nic_data->datapath_caps &
2517 (1 << MC_CMD_GET_CAPABILITIES_OUT_RX_BATCHING_LBN)) ||
2518 rx_queue->scatter_n != 0 || rx_cont) {
2519 efx_ef10_handle_rx_bad_lbits(
2520 rx_queue, next_ptr_lbits,
2521 (rx_queue->removed_count +
2522 rx_queue->scatter_n + 1) &
2523 ((1 << ESF_DZ_RX_DSC_PTR_LBITS_WIDTH) - 1));
2527 /* Merged completion for multiple non-scattered packets */
2528 rx_queue->scatter_n = 1;
2529 rx_queue->scatter_len = 0;
2530 n_packets = n_descs;
2531 ++channel->n_rx_merge_events;
2532 channel->n_rx_merge_packets += n_packets;
2533 flags |= EFX_RX_PKT_PREFIX_LEN;
2535 ++rx_queue->scatter_n;
2536 rx_queue->scatter_len += rx_bytes;
2542 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_ECRC_ERR)))
2543 flags |= EFX_RX_PKT_DISCARD;
2545 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_RX_IPCKSUM_ERR))) {
2546 channel->n_rx_ip_hdr_chksum_err += n_packets;
2547 } else if (unlikely(EFX_QWORD_FIELD(*event,
2548 ESF_DZ_RX_TCPUDP_CKSUM_ERR))) {
2549 channel->n_rx_tcp_udp_chksum_err += n_packets;
2550 } else if (rx_l4_class == ESE_DZ_L4_CLASS_TCP ||
2551 rx_l4_class == ESE_DZ_L4_CLASS_UDP) {
2552 flags |= EFX_RX_PKT_CSUMMED;
2555 if (rx_l4_class == ESE_DZ_L4_CLASS_TCP)
2556 flags |= EFX_RX_PKT_TCP;
2558 channel->irq_mod_score += 2 * n_packets;
2560 /* Handle received packet(s) */
2561 for (i = 0; i < n_packets; i++) {
2562 efx_rx_packet(rx_queue,
2563 rx_queue->removed_count & rx_queue->ptr_mask,
2564 rx_queue->scatter_n, rx_queue->scatter_len,
2566 rx_queue->removed_count += rx_queue->scatter_n;
2569 rx_queue->scatter_n = 0;
2570 rx_queue->scatter_len = 0;
2576 efx_ef10_handle_tx_event(struct efx_channel *channel, efx_qword_t *event)
2578 struct efx_nic *efx = channel->efx;
2579 struct efx_tx_queue *tx_queue;
2580 unsigned int tx_ev_desc_ptr;
2581 unsigned int tx_ev_q_label;
2584 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
2587 if (unlikely(EFX_QWORD_FIELD(*event, ESF_DZ_TX_DROP_EVENT)))
2590 /* Transmit completion */
2591 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, ESF_DZ_TX_DESCR_INDX);
2592 tx_ev_q_label = EFX_QWORD_FIELD(*event, ESF_DZ_TX_QLABEL);
2593 tx_queue = efx_channel_get_tx_queue(channel,
2594 tx_ev_q_label % EFX_TXQ_TYPES);
2595 tx_descs = ((tx_ev_desc_ptr + 1 - tx_queue->read_count) &
2596 tx_queue->ptr_mask);
2597 efx_xmit_done(tx_queue, tx_ev_desc_ptr & tx_queue->ptr_mask);
2603 efx_ef10_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
2605 struct efx_nic *efx = channel->efx;
2608 subcode = EFX_QWORD_FIELD(*event, ESF_DZ_DRV_SUB_CODE);
2611 case ESE_DZ_DRV_TIMER_EV:
2612 case ESE_DZ_DRV_WAKE_UP_EV:
2614 case ESE_DZ_DRV_START_UP_EV:
2615 /* event queue init complete. ok. */
2618 netif_err(efx, hw, efx->net_dev,
2619 "channel %d unknown driver event type %d"
2620 " (data " EFX_QWORD_FMT ")\n",
2621 channel->channel, subcode,
2622 EFX_QWORD_VAL(*event));
2627 static void efx_ef10_handle_driver_generated_event(struct efx_channel *channel,
2630 struct efx_nic *efx = channel->efx;
2633 subcode = EFX_QWORD_FIELD(*event, EFX_DWORD_0);
2637 channel->event_test_cpu = raw_smp_processor_id();
2639 case EFX_EF10_REFILL:
2640 /* The queue must be empty, so we won't receive any rx
2641 * events, so efx_process_channel() won't refill the
2642 * queue. Refill it here
2644 efx_fast_push_rx_descriptors(&channel->rx_queue, true);
2647 netif_err(efx, hw, efx->net_dev,
2648 "channel %d unknown driver event type %u"
2649 " (data " EFX_QWORD_FMT ")\n",
2650 channel->channel, (unsigned) subcode,
2651 EFX_QWORD_VAL(*event));
2655 static int efx_ef10_ev_process(struct efx_channel *channel, int quota)
2657 struct efx_nic *efx = channel->efx;
2658 efx_qword_t event, *p_event;
2659 unsigned int read_ptr;
2667 read_ptr = channel->eventq_read_ptr;
2670 p_event = efx_event(channel, read_ptr);
2673 if (!efx_event_present(&event))
2676 EFX_SET_QWORD(*p_event);
2680 ev_code = EFX_QWORD_FIELD(event, ESF_DZ_EV_CODE);
2682 netif_vdbg(efx, drv, efx->net_dev,
2683 "processing event on %d " EFX_QWORD_FMT "\n",
2684 channel->channel, EFX_QWORD_VAL(event));
2687 case ESE_DZ_EV_CODE_MCDI_EV:
2688 efx_mcdi_process_event(channel, &event);
2690 case ESE_DZ_EV_CODE_RX_EV:
2691 spent += efx_ef10_handle_rx_event(channel, &event);
2692 if (spent >= quota) {
2693 /* XXX can we split a merged event to
2694 * avoid going over-quota?
2700 case ESE_DZ_EV_CODE_TX_EV:
2701 tx_descs += efx_ef10_handle_tx_event(channel, &event);
2702 if (tx_descs > efx->txq_entries) {
2705 } else if (++spent == quota) {
2709 case ESE_DZ_EV_CODE_DRIVER_EV:
2710 efx_ef10_handle_driver_event(channel, &event);
2711 if (++spent == quota)
2714 case EFX_EF10_DRVGEN_EV:
2715 efx_ef10_handle_driver_generated_event(channel, &event);
2718 netif_err(efx, hw, efx->net_dev,
2719 "channel %d unknown event type %d"
2720 " (data " EFX_QWORD_FMT ")\n",
2721 channel->channel, ev_code,
2722 EFX_QWORD_VAL(event));
2727 channel->eventq_read_ptr = read_ptr;
2731 static void efx_ef10_ev_read_ack(struct efx_channel *channel)
2733 struct efx_nic *efx = channel->efx;
2736 if (EFX_EF10_WORKAROUND_35388(efx)) {
2737 BUILD_BUG_ON(EFX_MIN_EVQ_SIZE <
2738 (1 << ERF_DD_EVQ_IND_RPTR_WIDTH));
2739 BUILD_BUG_ON(EFX_MAX_EVQ_SIZE >
2740 (1 << 2 * ERF_DD_EVQ_IND_RPTR_WIDTH));
2742 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2743 EFE_DD_EVQ_IND_RPTR_FLAGS_HIGH,
2744 ERF_DD_EVQ_IND_RPTR,
2745 (channel->eventq_read_ptr &
2746 channel->eventq_mask) >>
2747 ERF_DD_EVQ_IND_RPTR_WIDTH);
2748 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2750 EFX_POPULATE_DWORD_2(rptr, ERF_DD_EVQ_IND_RPTR_FLAGS,
2751 EFE_DD_EVQ_IND_RPTR_FLAGS_LOW,
2752 ERF_DD_EVQ_IND_RPTR,
2753 channel->eventq_read_ptr &
2754 ((1 << ERF_DD_EVQ_IND_RPTR_WIDTH) - 1));
2755 efx_writed_page(efx, &rptr, ER_DD_EVQ_INDIRECT,
2758 EFX_POPULATE_DWORD_1(rptr, ERF_DZ_EVQ_RPTR,
2759 channel->eventq_read_ptr &
2760 channel->eventq_mask);
2761 efx_writed_page(efx, &rptr, ER_DZ_EVQ_RPTR, channel->channel);
2765 static void efx_ef10_ev_test_generate(struct efx_channel *channel)
2767 MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
2768 struct efx_nic *efx = channel->efx;
2772 EFX_POPULATE_QWORD_2(event,
2773 ESF_DZ_EV_CODE, EFX_EF10_DRVGEN_EV,
2774 ESF_DZ_EV_DATA, EFX_EF10_TEST);
2776 MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
2778 /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
2779 * already swapped the data to little-endian order.
2781 memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
2782 sizeof(efx_qword_t));
2784 rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
2793 netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
2796 void efx_ef10_handle_drain_event(struct efx_nic *efx)
2798 if (atomic_dec_and_test(&efx->active_queues))
2799 wake_up(&efx->flush_wq);
2801 WARN_ON(atomic_read(&efx->active_queues) < 0);
2804 static int efx_ef10_fini_dmaq(struct efx_nic *efx)
2806 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2807 struct efx_channel *channel;
2808 struct efx_tx_queue *tx_queue;
2809 struct efx_rx_queue *rx_queue;
2812 /* If the MC has just rebooted, the TX/RX queues will have already been
2813 * torn down, but efx->active_queues needs to be set to zero.
2815 if (nic_data->must_realloc_vis) {
2816 atomic_set(&efx->active_queues, 0);
2820 /* Do not attempt to write to the NIC during EEH recovery */
2821 if (efx->state != STATE_RECOVERY) {
2822 efx_for_each_channel(channel, efx) {
2823 efx_for_each_channel_rx_queue(rx_queue, channel)
2824 efx_ef10_rx_fini(rx_queue);
2825 efx_for_each_channel_tx_queue(tx_queue, channel)
2826 efx_ef10_tx_fini(tx_queue);
2829 wait_event_timeout(efx->flush_wq,
2830 atomic_read(&efx->active_queues) == 0,
2831 msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
2832 pending = atomic_read(&efx->active_queues);
2834 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
2843 static void efx_ef10_prepare_flr(struct efx_nic *efx)
2845 atomic_set(&efx->active_queues, 0);
2848 static bool efx_ef10_filter_equal(const struct efx_filter_spec *left,
2849 const struct efx_filter_spec *right)
2851 if ((left->match_flags ^ right->match_flags) |
2852 ((left->flags ^ right->flags) &
2853 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
2856 return memcmp(&left->outer_vid, &right->outer_vid,
2857 sizeof(struct efx_filter_spec) -
2858 offsetof(struct efx_filter_spec, outer_vid)) == 0;
2861 static unsigned int efx_ef10_filter_hash(const struct efx_filter_spec *spec)
2863 BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
2864 return jhash2((const u32 *)&spec->outer_vid,
2865 (sizeof(struct efx_filter_spec) -
2866 offsetof(struct efx_filter_spec, outer_vid)) / 4,
2868 /* XXX should we randomise the initval? */
2871 /* Decide whether a filter should be exclusive or else should allow
2872 * delivery to additional recipients. Currently we decide that
2873 * filters for specific local unicast MAC and IP addresses are
2876 static bool efx_ef10_filter_is_exclusive(const struct efx_filter_spec *spec)
2878 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC &&
2879 !is_multicast_ether_addr(spec->loc_mac))
2882 if ((spec->match_flags &
2883 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) ==
2884 (EFX_FILTER_MATCH_ETHER_TYPE | EFX_FILTER_MATCH_LOC_HOST)) {
2885 if (spec->ether_type == htons(ETH_P_IP) &&
2886 !ipv4_is_multicast(spec->loc_host[0]))
2888 if (spec->ether_type == htons(ETH_P_IPV6) &&
2889 ((const u8 *)spec->loc_host)[0] != 0xff)
2896 static struct efx_filter_spec *
2897 efx_ef10_filter_entry_spec(const struct efx_ef10_filter_table *table,
2898 unsigned int filter_idx)
2900 return (struct efx_filter_spec *)(table->entry[filter_idx].spec &
2901 ~EFX_EF10_FILTER_FLAGS);
2905 efx_ef10_filter_entry_flags(const struct efx_ef10_filter_table *table,
2906 unsigned int filter_idx)
2908 return table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAGS;
2912 efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
2913 unsigned int filter_idx,
2914 const struct efx_filter_spec *spec,
2917 table->entry[filter_idx].spec = (unsigned long)spec | flags;
2920 static void efx_ef10_filter_push_prep(struct efx_nic *efx,
2921 const struct efx_filter_spec *spec,
2922 efx_dword_t *inbuf, u64 handle,
2925 struct efx_ef10_nic_data *nic_data = efx->nic_data;
2927 memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
2930 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2931 MC_CMD_FILTER_OP_IN_OP_REPLACE);
2932 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
2934 u32 match_fields = 0;
2936 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
2937 efx_ef10_filter_is_exclusive(spec) ?
2938 MC_CMD_FILTER_OP_IN_OP_INSERT :
2939 MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
2941 /* Convert match flags and values. Unlike almost
2942 * everything else in MCDI, these fields are in
2943 * network byte order.
2945 if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
2947 is_multicast_ether_addr(spec->loc_mac) ?
2948 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
2949 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
2950 #define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
2951 if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
2953 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
2954 mcdi_field ## _LBN; \
2956 MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
2957 sizeof(spec->gen_field)); \
2958 memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
2959 &spec->gen_field, sizeof(spec->gen_field)); \
2961 COPY_FIELD(REM_HOST, rem_host, SRC_IP);
2962 COPY_FIELD(LOC_HOST, loc_host, DST_IP);
2963 COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
2964 COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
2965 COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
2966 COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
2967 COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
2968 COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
2969 COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
2970 COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
2972 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
2976 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
2977 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_DEST,
2978 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
2979 MC_CMD_FILTER_OP_IN_RX_DEST_DROP :
2980 MC_CMD_FILTER_OP_IN_RX_DEST_HOST);
2981 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DOMAIN, 0);
2982 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_TX_DEST,
2983 MC_CMD_FILTER_OP_IN_TX_DEST_DEFAULT);
2984 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_QUEUE,
2985 spec->dmaq_id == EFX_FILTER_RX_DMAQ_ID_DROP ?
2987 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_MODE,
2988 (spec->flags & EFX_FILTER_FLAG_RX_RSS) ?
2989 MC_CMD_FILTER_OP_IN_RX_MODE_RSS :
2990 MC_CMD_FILTER_OP_IN_RX_MODE_SIMPLE);
2991 if (spec->flags & EFX_FILTER_FLAG_RX_RSS)
2992 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_RX_CONTEXT,
2993 spec->rss_context !=
2994 EFX_FILTER_RSS_CONTEXT_DEFAULT ?
2995 spec->rss_context : nic_data->rx_rss_context);
2998 static int efx_ef10_filter_push(struct efx_nic *efx,
2999 const struct efx_filter_spec *spec,
3000 u64 *handle, bool replacing)
3002 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3003 MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
3006 efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
3007 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3008 outbuf, sizeof(outbuf), NULL);
3010 *handle = MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
3012 rc = -EBUSY; /* to match efx_farch_filter_insert() */
3016 static int efx_ef10_filter_rx_match_pri(struct efx_ef10_filter_table *table,
3017 enum efx_filter_match_flags match_flags)
3019 unsigned int match_pri;
3022 match_pri < table->rx_match_count;
3024 if (table->rx_match_flags[match_pri] == match_flags)
3027 return -EPROTONOSUPPORT;
3030 static s32 efx_ef10_filter_insert(struct efx_nic *efx,
3031 struct efx_filter_spec *spec,
3034 struct efx_ef10_filter_table *table = efx->filter_state;
3035 DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
3036 struct efx_filter_spec *saved_spec;
3037 unsigned int match_pri, hash;
3038 unsigned int priv_flags;
3039 bool replacing = false;
3045 /* For now, only support RX filters */
3046 if ((spec->flags & (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)) !=
3050 rc = efx_ef10_filter_rx_match_pri(table, spec->match_flags);
3055 hash = efx_ef10_filter_hash(spec);
3056 is_mc_recip = efx_filter_is_mc_recipient(spec);
3058 bitmap_zero(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
3060 /* Find any existing filters with the same match tuple or
3061 * else a free slot to insert at. If any of them are busy,
3062 * we have to wait and retry.
3065 unsigned int depth = 1;
3068 spin_lock_bh(&efx->filter_lock);
3071 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3072 saved_spec = efx_ef10_filter_entry_spec(table, i);
3077 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
3078 if (table->entry[i].spec &
3079 EFX_EF10_FILTER_FLAG_BUSY)
3081 if (spec->priority < saved_spec->priority &&
3082 spec->priority != EFX_FILTER_PRI_AUTO) {
3087 /* This is the only one */
3088 if (spec->priority ==
3089 saved_spec->priority &&
3096 } else if (spec->priority >
3097 saved_spec->priority ||
3099 saved_spec->priority &&
3104 __set_bit(depth, mc_rem_map);
3108 /* Once we reach the maximum search depth, use
3109 * the first suitable slot or return -EBUSY if
3112 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
3113 if (ins_index < 0) {
3123 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
3124 spin_unlock_bh(&efx->filter_lock);
3129 /* Create a software table entry if necessary, and mark it
3130 * busy. We might yet fail to insert, but any attempt to
3131 * insert a conflicting filter while we're waiting for the
3132 * firmware must find the busy entry.
3134 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
3136 if (spec->priority == EFX_FILTER_PRI_AUTO &&
3137 saved_spec->priority >= EFX_FILTER_PRI_AUTO) {
3138 /* Just make sure it won't be removed */
3139 if (saved_spec->priority > EFX_FILTER_PRI_AUTO)
3140 saved_spec->flags |= EFX_FILTER_FLAG_RX_OVER_AUTO;
3141 table->entry[ins_index].spec &=
3142 ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
3147 priv_flags = efx_ef10_filter_entry_flags(table, ins_index);
3149 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
3154 *saved_spec = *spec;
3157 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
3158 priv_flags | EFX_EF10_FILTER_FLAG_BUSY);
3160 /* Mark lower-priority multicast recipients busy prior to removal */
3162 unsigned int depth, i;
3164 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
3165 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3166 if (test_bit(depth, mc_rem_map))
3167 table->entry[i].spec |=
3168 EFX_EF10_FILTER_FLAG_BUSY;
3172 spin_unlock_bh(&efx->filter_lock);
3174 rc = efx_ef10_filter_push(efx, spec, &table->entry[ins_index].handle,
3177 /* Finalise the software table entry */
3178 spin_lock_bh(&efx->filter_lock);
3181 /* Update the fields that may differ */
3182 if (saved_spec->priority == EFX_FILTER_PRI_AUTO)
3183 saved_spec->flags |=
3184 EFX_FILTER_FLAG_RX_OVER_AUTO;
3185 saved_spec->priority = spec->priority;
3186 saved_spec->flags &= EFX_FILTER_FLAG_RX_OVER_AUTO;
3187 saved_spec->flags |= spec->flags;
3188 saved_spec->rss_context = spec->rss_context;
3189 saved_spec->dmaq_id = spec->dmaq_id;
3191 } else if (!replacing) {
3195 efx_ef10_filter_set_entry(table, ins_index, saved_spec, priv_flags);
3197 /* Remove and finalise entries for lower-priority multicast
3201 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3202 unsigned int depth, i;
3204 memset(inbuf, 0, sizeof(inbuf));
3206 for (depth = 0; depth < EFX_EF10_FILTER_SEARCH_LIMIT; depth++) {
3207 if (!test_bit(depth, mc_rem_map))
3210 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3211 saved_spec = efx_ef10_filter_entry_spec(table, i);
3212 priv_flags = efx_ef10_filter_entry_flags(table, i);
3215 spin_unlock_bh(&efx->filter_lock);
3216 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3217 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3218 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3219 table->entry[i].handle);
3220 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
3221 inbuf, sizeof(inbuf),
3223 spin_lock_bh(&efx->filter_lock);
3231 priv_flags &= ~EFX_EF10_FILTER_FLAG_BUSY;
3233 efx_ef10_filter_set_entry(table, i, saved_spec,
3238 /* If successful, return the inserted filter ID */
3240 rc = match_pri * HUNT_FILTER_TBL_ROWS + ins_index;
3242 wake_up_all(&table->waitq);
3244 spin_unlock_bh(&efx->filter_lock);
3245 finish_wait(&table->waitq, &wait);
3249 static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
3251 /* no need to do anything here on EF10 */
3255 * If !by_index, remove by ID
3256 * If by_index, remove by index
3257 * Filter ID may come from userland and must be range-checked.
3259 static int efx_ef10_filter_remove_internal(struct efx_nic *efx,
3260 unsigned int priority_mask,
3261 u32 filter_id, bool by_index)
3263 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
3264 struct efx_ef10_filter_table *table = efx->filter_state;
3265 MCDI_DECLARE_BUF(inbuf,
3266 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
3267 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
3268 struct efx_filter_spec *spec;
3272 /* Find the software table entry and mark it busy. Don't
3273 * remove it yet; any attempt to update while we're waiting
3274 * for the firmware must find the busy entry.
3277 spin_lock_bh(&efx->filter_lock);
3278 if (!(table->entry[filter_idx].spec &
3279 EFX_EF10_FILTER_FLAG_BUSY))
3281 prepare_to_wait(&table->waitq, &wait, TASK_UNINTERRUPTIBLE);
3282 spin_unlock_bh(&efx->filter_lock);
3286 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3289 efx_ef10_filter_rx_match_pri(table, spec->match_flags) !=
3290 filter_id / HUNT_FILTER_TBL_ROWS)) {
3295 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO &&
3296 priority_mask == (1U << EFX_FILTER_PRI_AUTO)) {
3297 /* Just remove flags */
3298 spec->flags &= ~EFX_FILTER_FLAG_RX_OVER_AUTO;
3299 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_AUTO_OLD;
3304 if (!(priority_mask & (1U << spec->priority))) {
3309 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3310 spin_unlock_bh(&efx->filter_lock);
3312 if (spec->flags & EFX_FILTER_FLAG_RX_OVER_AUTO) {
3313 /* Reset to an automatic filter */
3315 struct efx_filter_spec new_spec = *spec;
3317 new_spec.priority = EFX_FILTER_PRI_AUTO;
3318 new_spec.flags = (EFX_FILTER_FLAG_RX |
3319 (efx_rss_enabled(efx) ?
3320 EFX_FILTER_FLAG_RX_RSS : 0));
3321 new_spec.dmaq_id = 0;
3322 new_spec.rss_context = EFX_FILTER_RSS_CONTEXT_DEFAULT;
3323 rc = efx_ef10_filter_push(efx, &new_spec,
3324 &table->entry[filter_idx].handle,
3327 spin_lock_bh(&efx->filter_lock);
3331 /* Really remove the filter */
3333 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3334 efx_ef10_filter_is_exclusive(spec) ?
3335 MC_CMD_FILTER_OP_IN_OP_REMOVE :
3336 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3337 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3338 table->entry[filter_idx].handle);
3339 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP,
3340 inbuf, sizeof(inbuf), NULL, 0, NULL);
3342 spin_lock_bh(&efx->filter_lock);
3345 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3349 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
3350 wake_up_all(&table->waitq);
3352 spin_unlock_bh(&efx->filter_lock);
3353 finish_wait(&table->waitq, &wait);
3357 static int efx_ef10_filter_remove_safe(struct efx_nic *efx,
3358 enum efx_filter_priority priority,
3361 return efx_ef10_filter_remove_internal(efx, 1U << priority,
3365 static u32 efx_ef10_filter_get_unsafe_id(struct efx_nic *efx, u32 filter_id)
3367 return filter_id % HUNT_FILTER_TBL_ROWS;
3370 static int efx_ef10_filter_remove_unsafe(struct efx_nic *efx,
3371 enum efx_filter_priority priority,
3374 return efx_ef10_filter_remove_internal(efx, 1U << priority,
3378 static int efx_ef10_filter_get_safe(struct efx_nic *efx,
3379 enum efx_filter_priority priority,
3380 u32 filter_id, struct efx_filter_spec *spec)
3382 unsigned int filter_idx = filter_id % HUNT_FILTER_TBL_ROWS;
3383 struct efx_ef10_filter_table *table = efx->filter_state;
3384 const struct efx_filter_spec *saved_spec;
3387 spin_lock_bh(&efx->filter_lock);
3388 saved_spec = efx_ef10_filter_entry_spec(table, filter_idx);
3389 if (saved_spec && saved_spec->priority == priority &&
3390 efx_ef10_filter_rx_match_pri(table, saved_spec->match_flags) ==
3391 filter_id / HUNT_FILTER_TBL_ROWS) {
3392 *spec = *saved_spec;
3397 spin_unlock_bh(&efx->filter_lock);
3401 static int efx_ef10_filter_clear_rx(struct efx_nic *efx,
3402 enum efx_filter_priority priority)
3404 unsigned int priority_mask;
3408 priority_mask = (((1U << (priority + 1)) - 1) &
3409 ~(1U << EFX_FILTER_PRI_AUTO));
3411 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
3412 rc = efx_ef10_filter_remove_internal(efx, priority_mask,
3414 if (rc && rc != -ENOENT)
3421 static u32 efx_ef10_filter_count_rx_used(struct efx_nic *efx,
3422 enum efx_filter_priority priority)
3424 struct efx_ef10_filter_table *table = efx->filter_state;
3425 unsigned int filter_idx;
3428 spin_lock_bh(&efx->filter_lock);
3429 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3430 if (table->entry[filter_idx].spec &&
3431 efx_ef10_filter_entry_spec(table, filter_idx)->priority ==
3435 spin_unlock_bh(&efx->filter_lock);
3439 static u32 efx_ef10_filter_get_rx_id_limit(struct efx_nic *efx)
3441 struct efx_ef10_filter_table *table = efx->filter_state;
3443 return table->rx_match_count * HUNT_FILTER_TBL_ROWS;
3446 static s32 efx_ef10_filter_get_rx_ids(struct efx_nic *efx,
3447 enum efx_filter_priority priority,
3450 struct efx_ef10_filter_table *table = efx->filter_state;
3451 struct efx_filter_spec *spec;
3452 unsigned int filter_idx;
3455 spin_lock_bh(&efx->filter_lock);
3456 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3457 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3458 if (spec && spec->priority == priority) {
3459 if (count == size) {
3463 buf[count++] = (efx_ef10_filter_rx_match_pri(
3464 table, spec->match_flags) *
3465 HUNT_FILTER_TBL_ROWS +
3469 spin_unlock_bh(&efx->filter_lock);
3473 #ifdef CONFIG_RFS_ACCEL
3475 static efx_mcdi_async_completer efx_ef10_filter_rfs_insert_complete;
3477 static s32 efx_ef10_filter_rfs_insert(struct efx_nic *efx,
3478 struct efx_filter_spec *spec)
3480 struct efx_ef10_filter_table *table = efx->filter_state;
3481 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3482 struct efx_filter_spec *saved_spec;
3483 unsigned int hash, i, depth = 1;
3484 bool replacing = false;
3489 /* Must be an RX filter without RSS and not for a multicast
3490 * destination address (RFS only works for connected sockets).
3491 * These restrictions allow us to pass only a tiny amount of
3492 * data through to the completion function.
3494 EFX_WARN_ON_PARANOID(spec->flags !=
3495 (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_RX_SCATTER));
3496 EFX_WARN_ON_PARANOID(spec->priority != EFX_FILTER_PRI_HINT);
3497 EFX_WARN_ON_PARANOID(efx_filter_is_mc_recipient(spec));
3499 hash = efx_ef10_filter_hash(spec);
3501 spin_lock_bh(&efx->filter_lock);
3503 /* Find any existing filter with the same match tuple or else
3504 * a free slot to insert at. If an existing filter is busy,
3505 * we have to give up.
3508 i = (hash + depth) & (HUNT_FILTER_TBL_ROWS - 1);
3509 saved_spec = efx_ef10_filter_entry_spec(table, i);
3514 } else if (efx_ef10_filter_equal(spec, saved_spec)) {
3515 if (table->entry[i].spec & EFX_EF10_FILTER_FLAG_BUSY) {
3519 if (spec->priority < saved_spec->priority) {
3527 /* Once we reach the maximum search depth, use the
3528 * first suitable slot or return -EBUSY if there was
3531 if (depth == EFX_EF10_FILTER_SEARCH_LIMIT) {
3532 if (ins_index < 0) {
3542 /* Create a software table entry if necessary, and mark it
3543 * busy. We might yet fail to insert, but any attempt to
3544 * insert a conflicting filter while we're waiting for the
3545 * firmware must find the busy entry.
3547 saved_spec = efx_ef10_filter_entry_spec(table, ins_index);
3551 saved_spec = kmalloc(sizeof(*spec), GFP_ATOMIC);
3556 *saved_spec = *spec;
3558 efx_ef10_filter_set_entry(table, ins_index, saved_spec,
3559 EFX_EF10_FILTER_FLAG_BUSY);
3561 spin_unlock_bh(&efx->filter_lock);
3563 /* Pack up the variables needed on completion */
3564 cookie = replacing << 31 | ins_index << 16 | spec->dmaq_id;
3566 efx_ef10_filter_push_prep(efx, spec, inbuf,
3567 table->entry[ins_index].handle, replacing);
3568 efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3569 MC_CMD_FILTER_OP_OUT_LEN,
3570 efx_ef10_filter_rfs_insert_complete, cookie);
3575 spin_unlock_bh(&efx->filter_lock);
3580 efx_ef10_filter_rfs_insert_complete(struct efx_nic *efx, unsigned long cookie,
3581 int rc, efx_dword_t *outbuf,
3582 size_t outlen_actual)
3584 struct efx_ef10_filter_table *table = efx->filter_state;
3585 unsigned int ins_index, dmaq_id;
3586 struct efx_filter_spec *spec;
3589 /* Unpack the cookie */
3590 replacing = cookie >> 31;
3591 ins_index = (cookie >> 16) & (HUNT_FILTER_TBL_ROWS - 1);
3592 dmaq_id = cookie & 0xffff;
3594 spin_lock_bh(&efx->filter_lock);
3595 spec = efx_ef10_filter_entry_spec(table, ins_index);
3597 table->entry[ins_index].handle =
3598 MCDI_QWORD(outbuf, FILTER_OP_OUT_HANDLE);
3600 spec->dmaq_id = dmaq_id;
3601 } else if (!replacing) {
3605 efx_ef10_filter_set_entry(table, ins_index, spec, 0);
3606 spin_unlock_bh(&efx->filter_lock);
3608 wake_up_all(&table->waitq);
3612 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
3613 unsigned long filter_idx,
3614 int rc, efx_dword_t *outbuf,
3615 size_t outlen_actual);
3617 static bool efx_ef10_filter_rfs_expire_one(struct efx_nic *efx, u32 flow_id,
3618 unsigned int filter_idx)
3620 struct efx_ef10_filter_table *table = efx->filter_state;
3621 struct efx_filter_spec *spec =
3622 efx_ef10_filter_entry_spec(table, filter_idx);
3623 MCDI_DECLARE_BUF(inbuf,
3624 MC_CMD_FILTER_OP_IN_HANDLE_OFST +
3625 MC_CMD_FILTER_OP_IN_HANDLE_LEN);
3628 (table->entry[filter_idx].spec & EFX_EF10_FILTER_FLAG_BUSY) ||
3629 spec->priority != EFX_FILTER_PRI_HINT ||
3630 !rps_may_expire_flow(efx->net_dev, spec->dmaq_id,
3631 flow_id, filter_idx))
3634 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3635 MC_CMD_FILTER_OP_IN_OP_REMOVE);
3636 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3637 table->entry[filter_idx].handle);
3638 if (efx_mcdi_rpc_async(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf), 0,
3639 efx_ef10_filter_rfs_expire_complete, filter_idx))
3642 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3647 efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
3648 unsigned long filter_idx,
3649 int rc, efx_dword_t *outbuf,
3650 size_t outlen_actual)
3652 struct efx_ef10_filter_table *table = efx->filter_state;
3653 struct efx_filter_spec *spec =
3654 efx_ef10_filter_entry_spec(table, filter_idx);
3656 spin_lock_bh(&efx->filter_lock);
3659 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3661 table->entry[filter_idx].spec &= ~EFX_EF10_FILTER_FLAG_BUSY;
3662 wake_up_all(&table->waitq);
3663 spin_unlock_bh(&efx->filter_lock);
3666 #endif /* CONFIG_RFS_ACCEL */
3668 static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
3670 int match_flags = 0;
3672 #define MAP_FLAG(gen_flag, mcdi_field) { \
3673 u32 old_mcdi_flags = mcdi_flags; \
3674 mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
3675 mcdi_field ## _LBN); \
3676 if (mcdi_flags != old_mcdi_flags) \
3677 match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
3679 MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
3680 MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
3681 MAP_FLAG(REM_HOST, SRC_IP);
3682 MAP_FLAG(LOC_HOST, DST_IP);
3683 MAP_FLAG(REM_MAC, SRC_MAC);
3684 MAP_FLAG(REM_PORT, SRC_PORT);
3685 MAP_FLAG(LOC_MAC, DST_MAC);
3686 MAP_FLAG(LOC_PORT, DST_PORT);
3687 MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
3688 MAP_FLAG(INNER_VID, INNER_VLAN);
3689 MAP_FLAG(OUTER_VID, OUTER_VLAN);
3690 MAP_FLAG(IP_PROTO, IP_PROTO);
3693 /* Did we map them all? */
3700 static int efx_ef10_filter_table_probe(struct efx_nic *efx)
3702 MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
3703 MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
3704 unsigned int pd_match_pri, pd_match_count;
3705 struct efx_ef10_filter_table *table;
3709 table = kzalloc(sizeof(*table), GFP_KERNEL);
3713 /* Find out which RX filter types are supported, and their priorities */
3714 MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
3715 MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
3716 rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
3717 inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
3721 pd_match_count = MCDI_VAR_ARRAY_LEN(
3722 outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
3723 table->rx_match_count = 0;
3725 for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
3729 GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
3731 rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
3733 netif_dbg(efx, probe, efx->net_dev,
3734 "%s: fw flags %#x pri %u not supported in driver\n",
3735 __func__, mcdi_flags, pd_match_pri);
3737 netif_dbg(efx, probe, efx->net_dev,
3738 "%s: fw flags %#x pri %u supported as driver flags %#x pri %u\n",
3739 __func__, mcdi_flags, pd_match_pri,
3740 rc, table->rx_match_count);
3741 table->rx_match_flags[table->rx_match_count++] = rc;
3745 table->entry = vzalloc(HUNT_FILTER_TBL_ROWS * sizeof(*table->entry));
3746 if (!table->entry) {
3751 table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
3752 table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
3753 table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
3755 efx->filter_state = table;
3756 init_waitqueue_head(&table->waitq);
3764 /* Caller must hold efx->filter_sem for read if race against
3765 * efx_ef10_filter_table_remove() is possible
3767 static void efx_ef10_filter_table_restore(struct efx_nic *efx)
3769 struct efx_ef10_filter_table *table = efx->filter_state;
3770 struct efx_ef10_nic_data *nic_data = efx->nic_data;
3771 struct efx_filter_spec *spec;
3772 unsigned int filter_idx;
3773 bool failed = false;
3776 WARN_ON(!rwsem_is_locked(&efx->filter_sem));
3778 if (!nic_data->must_restore_filters)
3784 spin_lock_bh(&efx->filter_lock);
3786 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3787 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3791 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
3792 spin_unlock_bh(&efx->filter_lock);
3794 rc = efx_ef10_filter_push(efx, spec,
3795 &table->entry[filter_idx].handle,
3800 spin_lock_bh(&efx->filter_lock);
3803 efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
3805 table->entry[filter_idx].spec &=
3806 ~EFX_EF10_FILTER_FLAG_BUSY;
3810 spin_unlock_bh(&efx->filter_lock);
3813 netif_err(efx, hw, efx->net_dev,
3814 "unable to restore all filters\n");
3816 nic_data->must_restore_filters = false;
3819 /* Caller must hold efx->filter_sem for write */
3820 static void efx_ef10_filter_table_remove(struct efx_nic *efx)
3822 struct efx_ef10_filter_table *table = efx->filter_state;
3823 MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
3824 struct efx_filter_spec *spec;
3825 unsigned int filter_idx;
3828 efx->filter_state = NULL;
3832 for (filter_idx = 0; filter_idx < HUNT_FILTER_TBL_ROWS; filter_idx++) {
3833 spec = efx_ef10_filter_entry_spec(table, filter_idx);
3837 MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
3838 efx_ef10_filter_is_exclusive(spec) ?
3839 MC_CMD_FILTER_OP_IN_OP_REMOVE :
3840 MC_CMD_FILTER_OP_IN_OP_UNSUBSCRIBE);
3841 MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE,
3842 table->entry[filter_idx].handle);
3843 rc = efx_mcdi_rpc(efx, MC_CMD_FILTER_OP, inbuf, sizeof(inbuf),
3846 netdev_WARN(efx->net_dev,
3847 "filter_idx=%#x handle=%#llx\n",
3849 table->entry[filter_idx].handle);
3853 vfree(table->entry);
3857 #define EFX_EF10_FILTER_DO_MARK_OLD(id) \
3858 if (id != EFX_EF10_FILTER_ID_INVALID) { \
3859 filter_idx = efx_ef10_filter_get_unsafe_id(efx, id); \
3860 WARN_ON(!table->entry[filter_idx].spec); \
3861 table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_AUTO_OLD; \
3863 static void efx_ef10_filter_mark_old(struct efx_nic *efx)
3865 struct efx_ef10_filter_table *table = efx->filter_state;
3866 unsigned int filter_idx, i;
3871 /* Mark old filters that may need to be removed */
3872 spin_lock_bh(&efx->filter_lock);
3873 for (i = 0; i < table->dev_uc_count; i++)
3874 EFX_EF10_FILTER_DO_MARK_OLD(table->dev_uc_list[i].id);
3875 for (i = 0; i < table->dev_mc_count; i++)
3876 EFX_EF10_FILTER_DO_MARK_OLD(table->dev_mc_list[i].id);
3877 EFX_EF10_FILTER_DO_MARK_OLD(table->ucdef_id);
3878 EFX_EF10_FILTER_DO_MARK_OLD(table->bcast_id);
3879 EFX_EF10_FILTER_DO_MARK_OLD(table->mcdef_id);
3880 spin_unlock_bh(&efx->filter_lock);
3882 #undef EFX_EF10_FILTER_DO_MARK_OLD
3884 static void efx_ef10_filter_uc_addr_list(struct efx_nic *efx, bool *promisc)
3886 struct efx_ef10_filter_table *table = efx->filter_state;
3887 struct net_device *net_dev = efx->net_dev;
3888 struct netdev_hw_addr *uc;
3892 table->ucdef_id = EFX_EF10_FILTER_ID_INVALID;
3893 addr_count = netdev_uc_count(net_dev);
3894 if (net_dev->flags & IFF_PROMISC)
3896 table->dev_uc_count = 1 + addr_count;
3897 ether_addr_copy(table->dev_uc_list[0].addr, net_dev->dev_addr);
3899 netdev_for_each_uc_addr(uc, net_dev) {
3900 if (i >= EFX_EF10_FILTER_DEV_UC_MAX) {
3904 ether_addr_copy(table->dev_uc_list[i].addr, uc->addr);
3905 table->dev_uc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
3910 static void efx_ef10_filter_mc_addr_list(struct efx_nic *efx, bool *promisc)
3912 struct efx_ef10_filter_table *table = efx->filter_state;
3913 struct net_device *net_dev = efx->net_dev;
3914 struct netdev_hw_addr *mc;
3915 unsigned int i, addr_count;
3917 table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
3918 table->bcast_id = EFX_EF10_FILTER_ID_INVALID;
3919 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI))
3922 addr_count = netdev_mc_count(net_dev);
3924 netdev_for_each_mc_addr(mc, net_dev) {
3925 if (i >= EFX_EF10_FILTER_DEV_MC_MAX) {
3929 ether_addr_copy(table->dev_mc_list[i].addr, mc->addr);
3930 table->dev_mc_list[i].id = EFX_EF10_FILTER_ID_INVALID;
3934 table->dev_mc_count = i;
3937 static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
3938 bool multicast, bool rollback)
3940 struct efx_ef10_filter_table *table = efx->filter_state;
3941 struct efx_ef10_dev_addr *addr_list;
3942 enum efx_filter_flags filter_flags;
3943 struct efx_filter_spec spec;
3950 addr_list = table->dev_mc_list;
3951 addr_count = table->dev_mc_count;
3953 addr_list = table->dev_uc_list;
3954 addr_count = table->dev_uc_count;
3957 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
3959 /* Insert/renew filters */
3960 for (i = 0; i < addr_count; i++) {
3961 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
3962 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
3964 rc = efx_ef10_filter_insert(efx, &spec, true);
3967 netif_info(efx, drv, efx->net_dev,
3968 "efx_ef10_filter_insert failed rc=%d\n",
3970 /* Fall back to promiscuous */
3971 for (j = 0; j < i; j++) {
3972 if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
3974 efx_ef10_filter_remove_unsafe(
3975 efx, EFX_FILTER_PRI_AUTO,
3977 addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
3981 /* mark as not inserted, and carry on */
3982 rc = EFX_EF10_FILTER_ID_INVALID;
3985 addr_list[i].id = efx_ef10_filter_get_unsafe_id(efx, rc);
3988 if (multicast && rollback) {
3989 /* Also need an Ethernet broadcast filter */
3990 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
3991 eth_broadcast_addr(baddr);
3992 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC, baddr);
3993 rc = efx_ef10_filter_insert(efx, &spec, true);
3995 netif_warn(efx, drv, efx->net_dev,
3996 "Broadcast filter insert failed rc=%d\n", rc);
3997 /* Fall back to promiscuous */
3998 for (j = 0; j < i; j++) {
3999 if (addr_list[j].id == EFX_EF10_FILTER_ID_INVALID)
4001 efx_ef10_filter_remove_unsafe(
4002 efx, EFX_FILTER_PRI_AUTO,
4004 addr_list[j].id = EFX_EF10_FILTER_ID_INVALID;
4008 table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
4015 static int efx_ef10_filter_insert_def(struct efx_nic *efx, bool multicast,
4018 struct efx_ef10_filter_table *table = efx->filter_state;
4019 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4020 enum efx_filter_flags filter_flags;
4021 struct efx_filter_spec spec;
4025 filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
4027 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
4030 efx_filter_set_mc_def(&spec);
4032 efx_filter_set_uc_def(&spec);
4034 rc = efx_ef10_filter_insert(efx, &spec, true);
4036 netif_warn(efx, drv, efx->net_dev,
4037 "%scast mismatch filter insert failed rc=%d\n",
4038 multicast ? "Multi" : "Uni", rc);
4039 } else if (multicast) {
4040 table->mcdef_id = efx_ef10_filter_get_unsafe_id(efx, rc);
4041 if (!nic_data->workaround_26807) {
4042 /* Also need an Ethernet broadcast filter */
4043 efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
4045 eth_broadcast_addr(baddr);
4046 efx_filter_set_eth_local(&spec, EFX_FILTER_VID_UNSPEC,
4048 rc = efx_ef10_filter_insert(efx, &spec, true);
4050 netif_warn(efx, drv, efx->net_dev,
4051 "Broadcast filter insert failed rc=%d\n",
4054 /* Roll back the mc_def filter */
4055 efx_ef10_filter_remove_unsafe(
4056 efx, EFX_FILTER_PRI_AUTO,
4058 table->mcdef_id = EFX_EF10_FILTER_ID_INVALID;
4062 table->bcast_id = efx_ef10_filter_get_unsafe_id(efx, rc);
4067 table->ucdef_id = rc;
4073 /* Remove filters that weren't renewed. Since nothing else changes the AUTO_OLD
4074 * flag or removes these filters, we don't need to hold the filter_lock while
4075 * scanning for these filters.
4077 static void efx_ef10_filter_remove_old(struct efx_nic *efx)
4079 struct efx_ef10_filter_table *table = efx->filter_state;
4080 bool remove_failed = false;
4083 for (i = 0; i < HUNT_FILTER_TBL_ROWS; i++) {
4084 if (ACCESS_ONCE(table->entry[i].spec) &
4085 EFX_EF10_FILTER_FLAG_AUTO_OLD) {
4086 if (efx_ef10_filter_remove_internal(
4087 efx, 1U << EFX_FILTER_PRI_AUTO,
4089 remove_failed = true;
4092 WARN_ON(remove_failed);
4095 static int efx_ef10_vport_set_mac_address(struct efx_nic *efx)
4097 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4098 u8 mac_old[ETH_ALEN];
4101 /* Only reconfigure a PF-created vport */
4102 if (is_zero_ether_addr(nic_data->vport_mac))
4105 efx_device_detach_sync(efx);
4106 efx_net_stop(efx->net_dev);
4107 down_write(&efx->filter_sem);
4108 efx_ef10_filter_table_remove(efx);
4109 up_write(&efx->filter_sem);
4111 rc = efx_ef10_vadaptor_free(efx, nic_data->vport_id);
4113 goto restore_filters;
4115 ether_addr_copy(mac_old, nic_data->vport_mac);
4116 rc = efx_ef10_vport_del_mac(efx, nic_data->vport_id,
4117 nic_data->vport_mac);
4119 goto restore_vadaptor;
4121 rc = efx_ef10_vport_add_mac(efx, nic_data->vport_id,
4122 efx->net_dev->dev_addr);
4124 ether_addr_copy(nic_data->vport_mac, efx->net_dev->dev_addr);
4126 rc2 = efx_ef10_vport_add_mac(efx, nic_data->vport_id, mac_old);
4128 /* Failed to add original MAC, so clear vport_mac */
4129 eth_zero_addr(nic_data->vport_mac);
4135 rc2 = efx_ef10_vadaptor_alloc(efx, nic_data->vport_id);
4139 down_write(&efx->filter_sem);
4140 rc2 = efx_ef10_filter_table_probe(efx);
4141 up_write(&efx->filter_sem);
4145 rc2 = efx_net_open(efx->net_dev);
4149 netif_device_attach(efx->net_dev);
4154 netif_err(efx, drv, efx->net_dev,
4155 "Failed to restore when changing MAC address - scheduling reset\n");
4156 efx_schedule_reset(efx, RESET_TYPE_DATAPATH);
4158 return rc ? rc : rc2;
4161 /* Caller must hold efx->filter_sem for read if race against
4162 * efx_ef10_filter_table_remove() is possible
4164 static void efx_ef10_filter_sync_rx_mode(struct efx_nic *efx)
4166 struct efx_ef10_filter_table *table = efx->filter_state;
4167 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4168 struct net_device *net_dev = efx->net_dev;
4169 bool uc_promisc = false, mc_promisc = false;
4171 if (!efx_dev_registered(efx))
4177 efx_ef10_filter_mark_old(efx);
4179 /* Copy/convert the address lists; add the primary station
4180 * address and broadcast address
4182 netif_addr_lock_bh(net_dev);
4183 efx_ef10_filter_uc_addr_list(efx, &uc_promisc);
4184 efx_ef10_filter_mc_addr_list(efx, &mc_promisc);
4185 netif_addr_unlock_bh(net_dev);
4187 /* Insert/renew unicast filters */
4189 efx_ef10_filter_insert_def(efx, false, false);
4190 efx_ef10_filter_insert_addr_list(efx, false, false);
4192 /* If any of the filters failed to insert, fall back to
4193 * promiscuous mode - add in the uc_def filter. But keep
4194 * our individual unicast filters.
4196 if (efx_ef10_filter_insert_addr_list(efx, false, false))
4197 efx_ef10_filter_insert_def(efx, false, false);
4200 /* Insert/renew multicast filters */
4201 /* If changing promiscuous state with cascaded multicast filters, remove
4202 * old filters first, so that packets are dropped rather than duplicated
4204 if (nic_data->workaround_26807 && efx->mc_promisc != mc_promisc)
4205 efx_ef10_filter_remove_old(efx);
4207 if (nic_data->workaround_26807) {
4208 /* If we failed to insert promiscuous filters, rollback
4209 * and fall back to individual multicast filters
4211 if (efx_ef10_filter_insert_def(efx, true, true)) {
4212 /* Changing promisc state, so remove old filters */
4213 efx_ef10_filter_remove_old(efx);
4214 efx_ef10_filter_insert_addr_list(efx, true, false);
4217 /* If we failed to insert promiscuous filters, don't
4218 * rollback. Regardless, also insert the mc_list
4220 efx_ef10_filter_insert_def(efx, true, false);
4221 efx_ef10_filter_insert_addr_list(efx, true, false);
4224 /* If any filters failed to insert, rollback and fall back to
4225 * promiscuous mode - mc_def filter and maybe broadcast. If
4226 * that fails, roll back again and insert as many of our
4227 * individual multicast filters as we can.
4229 if (efx_ef10_filter_insert_addr_list(efx, true, true)) {
4230 /* Changing promisc state, so remove old filters */
4231 if (nic_data->workaround_26807)
4232 efx_ef10_filter_remove_old(efx);
4233 if (efx_ef10_filter_insert_def(efx, true, true))
4234 efx_ef10_filter_insert_addr_list(efx, true, false);
4238 efx_ef10_filter_remove_old(efx);
4239 efx->mc_promisc = mc_promisc;
4242 static int efx_ef10_set_mac_address(struct efx_nic *efx)
4244 MCDI_DECLARE_BUF(inbuf, MC_CMD_VADAPTOR_SET_MAC_IN_LEN);
4245 struct efx_ef10_nic_data *nic_data = efx->nic_data;
4246 bool was_enabled = efx->port_enabled;
4249 efx_device_detach_sync(efx);
4250 efx_net_stop(efx->net_dev);
4251 down_write(&efx->filter_sem);
4252 efx_ef10_filter_table_remove(efx);
4254 ether_addr_copy(MCDI_PTR(inbuf, VADAPTOR_SET_MAC_IN_MACADDR),
4255 efx->net_dev->dev_addr);
4256 MCDI_SET_DWORD(inbuf, VADAPTOR_SET_MAC_IN_UPSTREAM_PORT_ID,
4257 nic_data->vport_id);
4258 rc = efx_mcdi_rpc_quiet(efx, MC_CMD_VADAPTOR_SET_MAC, inbuf,
4259 sizeof(inbuf), NULL, 0, NULL);
4261 efx_ef10_filter_table_probe(efx);
4262 up_write(&efx->filter_sem);
4264 efx_net_open(efx->net_dev);
4265 netif_device_attach(efx->net_dev);
4267 #ifdef CONFIG_SFC_SRIOV
4268 if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
4269 struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
4272 struct efx_nic *efx_pf;
4274 /* Switch to PF and change MAC address on vport */
4275 efx_pf = pci_get_drvdata(pci_dev_pf);
4277 rc = efx_ef10_sriov_set_vf_mac(efx_pf,
4279 efx->net_dev->dev_addr);
4281 struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
4282 struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
4285 /* MAC address successfully changed by VF (with MAC
4286 * spoofing) so update the parent PF if possible.
4288 for (i = 0; i < efx_pf->vf_count; ++i) {
4289 struct ef10_vf *vf = nic_data->vf + i;
4291 if (vf->efx == efx) {
4292 ether_addr_copy(vf->mac,
4293 efx->net_dev->dev_addr);
4301 netif_err(efx, drv, efx->net_dev,
4302 "Cannot change MAC address; use sfboot to enable"
4303 " mac-spoofing on this interface\n");
4304 } else if (rc == -ENOSYS && !efx_ef10_is_vf(efx)) {
4305 /* If the active MCFW does not support MC_CMD_VADAPTOR_SET_MAC
4306 * fall-back to the method of changing the MAC address on the
4307 * vport. This only applies to PFs because such versions of
4308 * MCFW do not support VFs.
4310 rc = efx_ef10_vport_set_mac_address(efx);
4312 efx_mcdi_display_error(efx, MC_CMD_VADAPTOR_SET_MAC,
4313 sizeof(inbuf), NULL, 0, rc);
4319 static int efx_ef10_mac_reconfigure(struct efx_nic *efx)
4321 efx_ef10_filter_sync_rx_mode(efx);
4323 return efx_mcdi_set_mac(efx);
4326 static int efx_ef10_mac_reconfigure_vf(struct efx_nic *efx)
4328 efx_ef10_filter_sync_rx_mode(efx);
4333 static int efx_ef10_start_bist(struct efx_nic *efx, u32 bist_type)
4335 MCDI_DECLARE_BUF(inbuf, MC_CMD_START_BIST_IN_LEN);
4337 MCDI_SET_DWORD(inbuf, START_BIST_IN_TYPE, bist_type);
4338 return efx_mcdi_rpc(efx, MC_CMD_START_BIST, inbuf, sizeof(inbuf),
4342 /* MC BISTs follow a different poll mechanism to phy BISTs.
4343 * The BIST is done in the poll handler on the MC, and the MCDI command
4344 * will block until the BIST is done.
4346 static int efx_ef10_poll_bist(struct efx_nic *efx)
4349 MCDI_DECLARE_BUF(outbuf, MC_CMD_POLL_BIST_OUT_LEN);
4353 rc = efx_mcdi_rpc(efx, MC_CMD_POLL_BIST, NULL, 0,
4354 outbuf, sizeof(outbuf), &outlen);
4358 if (outlen < MC_CMD_POLL_BIST_OUT_LEN)
4361 result = MCDI_DWORD(outbuf, POLL_BIST_OUT_RESULT);
4363 case MC_CMD_POLL_BIST_PASSED:
4364 netif_dbg(efx, hw, efx->net_dev, "BIST passed.\n");
4366 case MC_CMD_POLL_BIST_TIMEOUT:
4367 netif_err(efx, hw, efx->net_dev, "BIST timed out\n");
4369 case MC_CMD_POLL_BIST_FAILED:
4370 netif_err(efx, hw, efx->net_dev, "BIST failed.\n");
4373 netif_err(efx, hw, efx->net_dev,
4374 "BIST returned unknown result %u", result);
4379 static int efx_ef10_run_bist(struct efx_nic *efx, u32 bist_type)
4383 netif_dbg(efx, drv, efx->net_dev, "starting BIST type %u\n", bist_type);
4385 rc = efx_ef10_start_bist(efx, bist_type);
4389 return efx_ef10_poll_bist(efx);
4393 efx_ef10_test_chip(struct efx_nic *efx, struct efx_self_tests *tests)
4397 efx_reset_down(efx, RESET_TYPE_WORLD);
4399 rc = efx_mcdi_rpc(efx, MC_CMD_ENABLE_OFFLINE_BIST,
4400 NULL, 0, NULL, 0, NULL);
4404 tests->memory = efx_ef10_run_bist(efx, MC_CMD_MC_MEM_BIST) ? -1 : 1;
4405 tests->registers = efx_ef10_run_bist(efx, MC_CMD_REG_BIST) ? -1 : 1;
4407 rc = efx_mcdi_reset(efx, RESET_TYPE_WORLD);
4412 rc2 = efx_reset_up(efx, RESET_TYPE_WORLD, rc == 0);
4413 return rc ? rc : rc2;
4416 #ifdef CONFIG_SFC_MTD
4418 struct efx_ef10_nvram_type_info {
4419 u16 type, type_mask;
4424 static const struct efx_ef10_nvram_type_info efx_ef10_nvram_types[] = {
4425 { NVRAM_PARTITION_TYPE_MC_FIRMWARE, 0, 0, "sfc_mcfw" },
4426 { NVRAM_PARTITION_TYPE_MC_FIRMWARE_BACKUP, 0, 0, "sfc_mcfw_backup" },
4427 { NVRAM_PARTITION_TYPE_EXPANSION_ROM, 0, 0, "sfc_exp_rom" },
4428 { NVRAM_PARTITION_TYPE_STATIC_CONFIG, 0, 0, "sfc_static_cfg" },
4429 { NVRAM_PARTITION_TYPE_DYNAMIC_CONFIG, 0, 0, "sfc_dynamic_cfg" },
4430 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT0, 0, 0, "sfc_exp_rom_cfg" },
4431 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT1, 0, 1, "sfc_exp_rom_cfg" },
4432 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT2, 0, 2, "sfc_exp_rom_cfg" },
4433 { NVRAM_PARTITION_TYPE_EXPROM_CONFIG_PORT3, 0, 3, "sfc_exp_rom_cfg" },
4434 { NVRAM_PARTITION_TYPE_LICENSE, 0, 0, "sfc_license" },
4435 { NVRAM_PARTITION_TYPE_PHY_MIN, 0xff, 0, "sfc_phy_fw" },
4437 #define EF10_NVRAM_PARTITION_COUNT ARRAY_SIZE(efx_ef10_nvram_types)
4439 static int efx_ef10_mtd_probe_partition(struct efx_nic *efx,
4440 struct efx_mcdi_mtd_partition *part,
4442 unsigned long *found)
4444 MCDI_DECLARE_BUF(inbuf, MC_CMD_NVRAM_METADATA_IN_LEN);
4445 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_METADATA_OUT_LENMAX);
4446 const struct efx_ef10_nvram_type_info *info;
4447 size_t size, erase_size, outlen;
4452 for (type_idx = 0; ; type_idx++) {
4453 if (type_idx == EF10_NVRAM_PARTITION_COUNT)
4455 info = efx_ef10_nvram_types + type_idx;
4456 if ((type & ~info->type_mask) == info->type)
4459 if (info->port != efx_port_num(efx))
4462 rc = efx_mcdi_nvram_info(efx, type, &size, &erase_size, &protected);
4466 return -ENODEV; /* hide it */
4468 /* If we've already exposed a partition of this type, hide this
4469 * duplicate. All operations on MTDs are keyed by the type anyway,
4470 * so we can't act on the duplicate.
4472 if (__test_and_set_bit(type_idx, found))
4475 part->nvram_type = type;
4477 MCDI_SET_DWORD(inbuf, NVRAM_METADATA_IN_TYPE, type);
4478 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_METADATA, inbuf, sizeof(inbuf),
4479 outbuf, sizeof(outbuf), &outlen);
4482 if (outlen < MC_CMD_NVRAM_METADATA_OUT_LENMIN)
4484 if (MCDI_DWORD(outbuf, NVRAM_METADATA_OUT_FLAGS) &
4485 (1 << MC_CMD_NVRAM_METADATA_OUT_SUBTYPE_VALID_LBN))
4486 part->fw_subtype = MCDI_DWORD(outbuf,
4487 NVRAM_METADATA_OUT_SUBTYPE);
4489 part->common.dev_type_name = "EF10 NVRAM manager";
4490 part->common.type_name = info->name;
4492 part->common.mtd.type = MTD_NORFLASH;
4493 part->common.mtd.flags = MTD_CAP_NORFLASH;
4494 part->common.mtd.size = size;
4495 part->common.mtd.erasesize = erase_size;
4500 static int efx_ef10_mtd_probe(struct efx_nic *efx)
4502 MCDI_DECLARE_BUF(outbuf, MC_CMD_NVRAM_PARTITIONS_OUT_LENMAX);
4503 DECLARE_BITMAP(found, EF10_NVRAM_PARTITION_COUNT) = { 0 };
4504 struct efx_mcdi_mtd_partition *parts;
4505 size_t outlen, n_parts_total, i, n_parts;
4511 BUILD_BUG_ON(MC_CMD_NVRAM_PARTITIONS_IN_LEN != 0);
4512 rc = efx_mcdi_rpc(efx, MC_CMD_NVRAM_PARTITIONS, NULL, 0,
4513 outbuf, sizeof(outbuf), &outlen);
4516 if (outlen < MC_CMD_NVRAM_PARTITIONS_OUT_LENMIN)
4519 n_parts_total = MCDI_DWORD(outbuf, NVRAM_PARTITIONS_OUT_NUM_PARTITIONS);
4521 MCDI_VAR_ARRAY_LEN(outlen, NVRAM_PARTITIONS_OUT_TYPE_ID))
4524 parts = kcalloc(n_parts_total, sizeof(*parts), GFP_KERNEL);
4529 for (i = 0; i < n_parts_total; i++) {
4530 type = MCDI_ARRAY_DWORD(outbuf, NVRAM_PARTITIONS_OUT_TYPE_ID,
4532 rc = efx_ef10_mtd_probe_partition(efx, &parts[n_parts], type,
4534 if (rc == -EEXIST || rc == -ENODEV)
4541 rc = efx_mtd_add(efx, &parts[0].common, n_parts, sizeof(*parts));
4548 #endif /* CONFIG_SFC_MTD */
4550 static void efx_ef10_ptp_write_host_time(struct efx_nic *efx, u32 host_time)
4552 _efx_writed(efx, cpu_to_le32(host_time), ER_DZ_MC_DB_LWRD);
4555 static void efx_ef10_ptp_write_host_time_vf(struct efx_nic *efx,
4558 static int efx_ef10_rx_enable_timestamping(struct efx_channel *channel,
4561 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_SUBSCRIBE_LEN);
4564 if (channel->sync_events_state == SYNC_EVENTS_REQUESTED ||
4565 channel->sync_events_state == SYNC_EVENTS_VALID ||
4566 (temp && channel->sync_events_state == SYNC_EVENTS_DISABLED))
4568 channel->sync_events_state = SYNC_EVENTS_REQUESTED;
4570 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_SUBSCRIBE);
4571 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
4572 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_SUBSCRIBE_QUEUE,
4575 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
4576 inbuf, sizeof(inbuf), NULL, 0, NULL);
4579 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
4580 SYNC_EVENTS_DISABLED;
4585 static int efx_ef10_rx_disable_timestamping(struct efx_channel *channel,
4588 MCDI_DECLARE_BUF(inbuf, MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_LEN);
4591 if (channel->sync_events_state == SYNC_EVENTS_DISABLED ||
4592 (temp && channel->sync_events_state == SYNC_EVENTS_QUIESCENT))
4594 if (channel->sync_events_state == SYNC_EVENTS_QUIESCENT) {
4595 channel->sync_events_state = SYNC_EVENTS_DISABLED;
4598 channel->sync_events_state = temp ? SYNC_EVENTS_QUIESCENT :
4599 SYNC_EVENTS_DISABLED;
4601 MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_TIME_EVENT_UNSUBSCRIBE);
4602 MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
4603 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_CONTROL,
4604 MC_CMD_PTP_IN_TIME_EVENT_UNSUBSCRIBE_SINGLE);
4605 MCDI_SET_DWORD(inbuf, PTP_IN_TIME_EVENT_UNSUBSCRIBE_QUEUE,
4608 rc = efx_mcdi_rpc(channel->efx, MC_CMD_PTP,
4609 inbuf, sizeof(inbuf), NULL, 0, NULL);
4614 static int efx_ef10_ptp_set_ts_sync_events(struct efx_nic *efx, bool en,
4617 int (*set)(struct efx_channel *channel, bool temp);
4618 struct efx_channel *channel;
4621 efx_ef10_rx_enable_timestamping :
4622 efx_ef10_rx_disable_timestamping;
4624 efx_for_each_channel(channel, efx) {
4625 int rc = set(channel, temp);
4626 if (en && rc != 0) {
4627 efx_ef10_ptp_set_ts_sync_events(efx, false, temp);
4635 static int efx_ef10_ptp_set_ts_config_vf(struct efx_nic *efx,
4636 struct hwtstamp_config *init)
4641 static int efx_ef10_ptp_set_ts_config(struct efx_nic *efx,
4642 struct hwtstamp_config *init)
4646 switch (init->rx_filter) {
4647 case HWTSTAMP_FILTER_NONE:
4648 efx_ef10_ptp_set_ts_sync_events(efx, false, false);
4649 /* if TX timestamping is still requested then leave PTP on */
4650 return efx_ptp_change_mode(efx,
4651 init->tx_type != HWTSTAMP_TX_OFF, 0);
4652 case HWTSTAMP_FILTER_ALL:
4653 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4654 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4655 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4656 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
4657 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4658 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
4659 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
4660 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4661 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
4662 case HWTSTAMP_FILTER_PTP_V2_EVENT:
4663 case HWTSTAMP_FILTER_PTP_V2_SYNC:
4664 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
4665 init->rx_filter = HWTSTAMP_FILTER_ALL;
4666 rc = efx_ptp_change_mode(efx, true, 0);
4668 rc = efx_ef10_ptp_set_ts_sync_events(efx, true, false);
4670 efx_ptp_change_mode(efx, false, 0);
4677 const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
4679 .mem_bar = EFX_MEM_VF_BAR,
4680 .mem_map_size = efx_ef10_mem_map_size,
4681 .probe = efx_ef10_probe_vf,
4682 .remove = efx_ef10_remove,
4683 .dimension_resources = efx_ef10_dimension_resources,
4684 .init = efx_ef10_init_nic,
4685 .fini = efx_port_dummy_op_void,
4686 .map_reset_reason = efx_ef10_map_reset_reason,
4687 .map_reset_flags = efx_ef10_map_reset_flags,
4688 .reset = efx_ef10_reset,
4689 .probe_port = efx_mcdi_port_probe,
4690 .remove_port = efx_mcdi_port_remove,
4691 .fini_dmaq = efx_ef10_fini_dmaq,
4692 .prepare_flr = efx_ef10_prepare_flr,
4693 .finish_flr = efx_port_dummy_op_void,
4694 .describe_stats = efx_ef10_describe_stats,
4695 .update_stats = efx_ef10_update_stats_vf,
4696 .start_stats = efx_port_dummy_op_void,
4697 .pull_stats = efx_port_dummy_op_void,
4698 .stop_stats = efx_port_dummy_op_void,
4699 .set_id_led = efx_mcdi_set_id_led,
4700 .push_irq_moderation = efx_ef10_push_irq_moderation,
4701 .reconfigure_mac = efx_ef10_mac_reconfigure_vf,
4702 .check_mac_fault = efx_mcdi_mac_check_fault,
4703 .reconfigure_port = efx_mcdi_port_reconfigure,
4704 .get_wol = efx_ef10_get_wol_vf,
4705 .set_wol = efx_ef10_set_wol_vf,
4706 .resume_wol = efx_port_dummy_op_void,
4707 .mcdi_request = efx_ef10_mcdi_request,
4708 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
4709 .mcdi_read_response = efx_ef10_mcdi_read_response,
4710 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
4711 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
4712 .irq_enable_master = efx_port_dummy_op_void,
4713 .irq_test_generate = efx_ef10_irq_test_generate,
4714 .irq_disable_non_ev = efx_port_dummy_op_void,
4715 .irq_handle_msi = efx_ef10_msi_interrupt,
4716 .irq_handle_legacy = efx_ef10_legacy_interrupt,
4717 .tx_probe = efx_ef10_tx_probe,
4718 .tx_init = efx_ef10_tx_init,
4719 .tx_remove = efx_ef10_tx_remove,
4720 .tx_write = efx_ef10_tx_write,
4721 .rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
4722 .rx_probe = efx_ef10_rx_probe,
4723 .rx_init = efx_ef10_rx_init,
4724 .rx_remove = efx_ef10_rx_remove,
4725 .rx_write = efx_ef10_rx_write,
4726 .rx_defer_refill = efx_ef10_rx_defer_refill,
4727 .ev_probe = efx_ef10_ev_probe,
4728 .ev_init = efx_ef10_ev_init,
4729 .ev_fini = efx_ef10_ev_fini,
4730 .ev_remove = efx_ef10_ev_remove,
4731 .ev_process = efx_ef10_ev_process,
4732 .ev_read_ack = efx_ef10_ev_read_ack,
4733 .ev_test_generate = efx_ef10_ev_test_generate,
4734 .filter_table_probe = efx_ef10_filter_table_probe,
4735 .filter_table_restore = efx_ef10_filter_table_restore,
4736 .filter_table_remove = efx_ef10_filter_table_remove,
4737 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
4738 .filter_insert = efx_ef10_filter_insert,
4739 .filter_remove_safe = efx_ef10_filter_remove_safe,
4740 .filter_get_safe = efx_ef10_filter_get_safe,
4741 .filter_clear_rx = efx_ef10_filter_clear_rx,
4742 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
4743 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
4744 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
4745 #ifdef CONFIG_RFS_ACCEL
4746 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
4747 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
4749 #ifdef CONFIG_SFC_MTD
4750 .mtd_probe = efx_port_dummy_op_int,
4752 .ptp_write_host_time = efx_ef10_ptp_write_host_time_vf,
4753 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config_vf,
4754 #ifdef CONFIG_SFC_SRIOV
4755 .vswitching_probe = efx_ef10_vswitching_probe_vf,
4756 .vswitching_restore = efx_ef10_vswitching_restore_vf,
4757 .vswitching_remove = efx_ef10_vswitching_remove_vf,
4758 .sriov_get_phys_port_id = efx_ef10_sriov_get_phys_port_id,
4760 .get_mac_address = efx_ef10_get_mac_address_vf,
4761 .set_mac_address = efx_ef10_set_mac_address,
4763 .revision = EFX_REV_HUNT_A0,
4764 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
4765 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
4766 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
4767 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
4768 .can_rx_scatter = true,
4769 .always_rx_scatter = true,
4770 .max_interrupt_mode = EFX_INT_MODE_MSIX,
4771 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
4772 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4773 NETIF_F_RXHASH | NETIF_F_NTUPLE),
4775 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
4776 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
4777 1 << HWTSTAMP_FILTER_ALL,
4780 const struct efx_nic_type efx_hunt_a0_nic_type = {
4782 .mem_bar = EFX_MEM_BAR,
4783 .mem_map_size = efx_ef10_mem_map_size,
4784 .probe = efx_ef10_probe_pf,
4785 .remove = efx_ef10_remove,
4786 .dimension_resources = efx_ef10_dimension_resources,
4787 .init = efx_ef10_init_nic,
4788 .fini = efx_port_dummy_op_void,
4789 .map_reset_reason = efx_ef10_map_reset_reason,
4790 .map_reset_flags = efx_ef10_map_reset_flags,
4791 .reset = efx_ef10_reset,
4792 .probe_port = efx_mcdi_port_probe,
4793 .remove_port = efx_mcdi_port_remove,
4794 .fini_dmaq = efx_ef10_fini_dmaq,
4795 .prepare_flr = efx_ef10_prepare_flr,
4796 .finish_flr = efx_port_dummy_op_void,
4797 .describe_stats = efx_ef10_describe_stats,
4798 .update_stats = efx_ef10_update_stats_pf,
4799 .start_stats = efx_mcdi_mac_start_stats,
4800 .pull_stats = efx_mcdi_mac_pull_stats,
4801 .stop_stats = efx_mcdi_mac_stop_stats,
4802 .set_id_led = efx_mcdi_set_id_led,
4803 .push_irq_moderation = efx_ef10_push_irq_moderation,
4804 .reconfigure_mac = efx_ef10_mac_reconfigure,
4805 .check_mac_fault = efx_mcdi_mac_check_fault,
4806 .reconfigure_port = efx_mcdi_port_reconfigure,
4807 .get_wol = efx_ef10_get_wol,
4808 .set_wol = efx_ef10_set_wol,
4809 .resume_wol = efx_port_dummy_op_void,
4810 .test_chip = efx_ef10_test_chip,
4811 .test_nvram = efx_mcdi_nvram_test_all,
4812 .mcdi_request = efx_ef10_mcdi_request,
4813 .mcdi_poll_response = efx_ef10_mcdi_poll_response,
4814 .mcdi_read_response = efx_ef10_mcdi_read_response,
4815 .mcdi_poll_reboot = efx_ef10_mcdi_poll_reboot,
4816 .mcdi_reboot_detected = efx_ef10_mcdi_reboot_detected,
4817 .irq_enable_master = efx_port_dummy_op_void,
4818 .irq_test_generate = efx_ef10_irq_test_generate,
4819 .irq_disable_non_ev = efx_port_dummy_op_void,
4820 .irq_handle_msi = efx_ef10_msi_interrupt,
4821 .irq_handle_legacy = efx_ef10_legacy_interrupt,
4822 .tx_probe = efx_ef10_tx_probe,
4823 .tx_init = efx_ef10_tx_init,
4824 .tx_remove = efx_ef10_tx_remove,
4825 .tx_write = efx_ef10_tx_write,
4826 .rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
4827 .rx_probe = efx_ef10_rx_probe,
4828 .rx_init = efx_ef10_rx_init,
4829 .rx_remove = efx_ef10_rx_remove,
4830 .rx_write = efx_ef10_rx_write,
4831 .rx_defer_refill = efx_ef10_rx_defer_refill,
4832 .ev_probe = efx_ef10_ev_probe,
4833 .ev_init = efx_ef10_ev_init,
4834 .ev_fini = efx_ef10_ev_fini,
4835 .ev_remove = efx_ef10_ev_remove,
4836 .ev_process = efx_ef10_ev_process,
4837 .ev_read_ack = efx_ef10_ev_read_ack,
4838 .ev_test_generate = efx_ef10_ev_test_generate,
4839 .filter_table_probe = efx_ef10_filter_table_probe,
4840 .filter_table_restore = efx_ef10_filter_table_restore,
4841 .filter_table_remove = efx_ef10_filter_table_remove,
4842 .filter_update_rx_scatter = efx_ef10_filter_update_rx_scatter,
4843 .filter_insert = efx_ef10_filter_insert,
4844 .filter_remove_safe = efx_ef10_filter_remove_safe,
4845 .filter_get_safe = efx_ef10_filter_get_safe,
4846 .filter_clear_rx = efx_ef10_filter_clear_rx,
4847 .filter_count_rx_used = efx_ef10_filter_count_rx_used,
4848 .filter_get_rx_id_limit = efx_ef10_filter_get_rx_id_limit,
4849 .filter_get_rx_ids = efx_ef10_filter_get_rx_ids,
4850 #ifdef CONFIG_RFS_ACCEL
4851 .filter_rfs_insert = efx_ef10_filter_rfs_insert,
4852 .filter_rfs_expire_one = efx_ef10_filter_rfs_expire_one,
4854 #ifdef CONFIG_SFC_MTD
4855 .mtd_probe = efx_ef10_mtd_probe,
4856 .mtd_rename = efx_mcdi_mtd_rename,
4857 .mtd_read = efx_mcdi_mtd_read,
4858 .mtd_erase = efx_mcdi_mtd_erase,
4859 .mtd_write = efx_mcdi_mtd_write,
4860 .mtd_sync = efx_mcdi_mtd_sync,
4862 .ptp_write_host_time = efx_ef10_ptp_write_host_time,
4863 .ptp_set_ts_sync_events = efx_ef10_ptp_set_ts_sync_events,
4864 .ptp_set_ts_config = efx_ef10_ptp_set_ts_config,
4865 #ifdef CONFIG_SFC_SRIOV
4866 .sriov_configure = efx_ef10_sriov_configure,
4867 .sriov_init = efx_ef10_sriov_init,
4868 .sriov_fini = efx_ef10_sriov_fini,
4869 .sriov_wanted = efx_ef10_sriov_wanted,
4870 .sriov_reset = efx_ef10_sriov_reset,
4871 .sriov_flr = efx_ef10_sriov_flr,
4872 .sriov_set_vf_mac = efx_ef10_sriov_set_vf_mac,
4873 .sriov_set_vf_vlan = efx_ef10_sriov_set_vf_vlan,
4874 .sriov_set_vf_spoofchk = efx_ef10_sriov_set_vf_spoofchk,
4875 .sriov_get_vf_config = efx_ef10_sriov_get_vf_config,
4876 .sriov_set_vf_link_state = efx_ef10_sriov_set_vf_link_state,
4877 .vswitching_probe = efx_ef10_vswitching_probe_pf,
4878 .vswitching_restore = efx_ef10_vswitching_restore_pf,
4879 .vswitching_remove = efx_ef10_vswitching_remove_pf,
4881 .get_mac_address = efx_ef10_get_mac_address_pf,
4882 .set_mac_address = efx_ef10_set_mac_address,
4884 .revision = EFX_REV_HUNT_A0,
4885 .max_dma_mask = DMA_BIT_MASK(ESF_DZ_TX_KER_BUF_ADDR_WIDTH),
4886 .rx_prefix_size = ES_DZ_RX_PREFIX_SIZE,
4887 .rx_hash_offset = ES_DZ_RX_PREFIX_HASH_OFST,
4888 .rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
4889 .can_rx_scatter = true,
4890 .always_rx_scatter = true,
4891 .max_interrupt_mode = EFX_INT_MODE_MSIX,
4892 .timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
4893 .offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
4894 NETIF_F_RXHASH | NETIF_F_NTUPLE),
4896 .max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
4897 .hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
4898 1 << HWTSTAMP_FILTER_ALL,