1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2013 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/pci.h>
15 #include <linux/module.h>
16 #include <linux/seq_file.h>
17 #include <linux/crc32.h>
18 #include "net_driver.h"
22 #include "farch_regs.h"
24 #include "workarounds.h"
26 /* Falcon-architecture (SFC4000) support */
28 /**************************************************************************
32 **************************************************************************
35 /* This is set to 16 for a good reason. In summary, if larger than
36 * 16, the descriptor cache holds more than a default socket
37 * buffer's worth of packets (for UDP we can only have at most one
38 * socket buffer's worth outstanding). This combined with the fact
39 * that we only get 1 TX event per descriptor cache means the NIC
42 #define TX_DC_ENTRIES 16
43 #define TX_DC_ENTRIES_ORDER 1
45 #define RX_DC_ENTRIES 64
46 #define RX_DC_ENTRIES_ORDER 3
48 /* If EF4_MAX_INT_ERRORS internal errors occur within
49 * EF4_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
52 #define EF4_INT_ERROR_EXPIRE 3600
53 #define EF4_MAX_INT_ERRORS 5
55 /* Depth of RX flush request fifo */
56 #define EF4_RX_FLUSH_COUNT 4
58 /* Driver generated events */
59 #define _EF4_CHANNEL_MAGIC_TEST 0x000101
60 #define _EF4_CHANNEL_MAGIC_FILL 0x000102
61 #define _EF4_CHANNEL_MAGIC_RX_DRAIN 0x000103
62 #define _EF4_CHANNEL_MAGIC_TX_DRAIN 0x000104
64 #define _EF4_CHANNEL_MAGIC(_code, _data) ((_code) << 8 | (_data))
65 #define _EF4_CHANNEL_MAGIC_CODE(_magic) ((_magic) >> 8)
67 #define EF4_CHANNEL_MAGIC_TEST(_channel) \
68 _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TEST, (_channel)->channel)
69 #define EF4_CHANNEL_MAGIC_FILL(_rx_queue) \
70 _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_FILL, \
71 ef4_rx_queue_index(_rx_queue))
72 #define EF4_CHANNEL_MAGIC_RX_DRAIN(_rx_queue) \
73 _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_RX_DRAIN, \
74 ef4_rx_queue_index(_rx_queue))
75 #define EF4_CHANNEL_MAGIC_TX_DRAIN(_tx_queue) \
76 _EF4_CHANNEL_MAGIC(_EF4_CHANNEL_MAGIC_TX_DRAIN, \
79 static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic);
81 /**************************************************************************
85 **************************************************************************/
87 static inline void ef4_write_buf_tbl(struct ef4_nic *efx, ef4_qword_t *value,
90 ef4_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
94 static bool ef4_masked_compare_oword(const ef4_oword_t *a, const ef4_oword_t *b,
95 const ef4_oword_t *mask)
97 return ((a->u64[0] ^ b->u64[0]) & mask->u64[0]) ||
98 ((a->u64[1] ^ b->u64[1]) & mask->u64[1]);
101 int ef4_farch_test_registers(struct ef4_nic *efx,
102 const struct ef4_farch_register_test *regs,
105 unsigned address = 0;
107 ef4_oword_t mask, imask, original, reg, buf;
109 for (i = 0; i < n_regs; ++i) {
110 address = regs[i].address;
111 mask = imask = regs[i].mask;
112 EF4_INVERT_OWORD(imask);
114 ef4_reado(efx, &original, address);
116 /* bit sweep on and off */
117 for (j = 0; j < 128; j++) {
118 if (!EF4_EXTRACT_OWORD32(mask, j, j))
121 /* Test this testable bit can be set in isolation */
122 EF4_AND_OWORD(reg, original, mask);
123 EF4_SET_OWORD32(reg, j, j, 1);
125 ef4_writeo(efx, ®, address);
126 ef4_reado(efx, &buf, address);
128 if (ef4_masked_compare_oword(®, &buf, &mask))
131 /* Test this testable bit can be cleared in isolation */
132 EF4_OR_OWORD(reg, original, mask);
133 EF4_SET_OWORD32(reg, j, j, 0);
135 ef4_writeo(efx, ®, address);
136 ef4_reado(efx, &buf, address);
138 if (ef4_masked_compare_oword(®, &buf, &mask))
142 ef4_writeo(efx, &original, address);
148 netif_err(efx, hw, efx->net_dev,
149 "wrote "EF4_OWORD_FMT" read "EF4_OWORD_FMT
150 " at address 0x%x mask "EF4_OWORD_FMT"\n", EF4_OWORD_VAL(reg),
151 EF4_OWORD_VAL(buf), address, EF4_OWORD_VAL(mask));
155 /**************************************************************************
157 * Special buffer handling
158 * Special buffers are used for event queues and the TX and RX
161 *************************************************************************/
164 * Initialise a special buffer
166 * This will define a buffer (previously allocated via
167 * ef4_alloc_special_buffer()) in the buffer table, allowing
168 * it to be used for event queues, descriptor rings etc.
171 ef4_init_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
173 ef4_qword_t buf_desc;
178 EF4_BUG_ON_PARANOID(!buffer->buf.addr);
180 /* Write buffer descriptors to NIC */
181 for (i = 0; i < buffer->entries; i++) {
182 index = buffer->index + i;
183 dma_addr = buffer->buf.dma_addr + (i * EF4_BUF_SIZE);
184 netif_dbg(efx, probe, efx->net_dev,
185 "mapping special buffer %d at %llx\n",
186 index, (unsigned long long)dma_addr);
187 EF4_POPULATE_QWORD_3(buf_desc,
188 FRF_AZ_BUF_ADR_REGION, 0,
189 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
190 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
191 ef4_write_buf_tbl(efx, &buf_desc, index);
195 /* Unmaps a buffer and clears the buffer table entries */
197 ef4_fini_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
199 ef4_oword_t buf_tbl_upd;
200 unsigned int start = buffer->index;
201 unsigned int end = (buffer->index + buffer->entries - 1);
203 if (!buffer->entries)
206 netif_dbg(efx, hw, efx->net_dev, "unmapping special buffers %d-%d\n",
207 buffer->index, buffer->index + buffer->entries - 1);
209 EF4_POPULATE_OWORD_4(buf_tbl_upd,
210 FRF_AZ_BUF_UPD_CMD, 0,
211 FRF_AZ_BUF_CLR_CMD, 1,
212 FRF_AZ_BUF_CLR_END_ID, end,
213 FRF_AZ_BUF_CLR_START_ID, start);
214 ef4_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
218 * Allocate a new special buffer
220 * This allocates memory for a new buffer, clears it and allocates a
221 * new buffer ID range. It does not write into the buffer table.
223 * This call will allocate 4KB buffers, since 8KB buffers can't be
224 * used for event queues and descriptor rings.
226 static int ef4_alloc_special_buffer(struct ef4_nic *efx,
227 struct ef4_special_buffer *buffer,
230 len = ALIGN(len, EF4_BUF_SIZE);
232 if (ef4_nic_alloc_buffer(efx, &buffer->buf, len, GFP_KERNEL))
234 buffer->entries = len / EF4_BUF_SIZE;
235 BUG_ON(buffer->buf.dma_addr & (EF4_BUF_SIZE - 1));
237 /* Select new buffer ID */
238 buffer->index = efx->next_buffer_table;
239 efx->next_buffer_table += buffer->entries;
241 netif_dbg(efx, probe, efx->net_dev,
242 "allocating special buffers %d-%d at %llx+%x "
243 "(virt %p phys %llx)\n", buffer->index,
244 buffer->index + buffer->entries - 1,
245 (u64)buffer->buf.dma_addr, len,
246 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
252 ef4_free_special_buffer(struct ef4_nic *efx, struct ef4_special_buffer *buffer)
254 if (!buffer->buf.addr)
257 netif_dbg(efx, hw, efx->net_dev,
258 "deallocating special buffers %d-%d at %llx+%x "
259 "(virt %p phys %llx)\n", buffer->index,
260 buffer->index + buffer->entries - 1,
261 (u64)buffer->buf.dma_addr, buffer->buf.len,
262 buffer->buf.addr, (u64)virt_to_phys(buffer->buf.addr));
264 ef4_nic_free_buffer(efx, &buffer->buf);
268 /**************************************************************************
272 **************************************************************************/
274 /* This writes to the TX_DESC_WPTR; write pointer for TX descriptor ring */
275 static inline void ef4_farch_notify_tx_desc(struct ef4_tx_queue *tx_queue)
280 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
281 EF4_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
282 ef4_writed_page(tx_queue->efx, ®,
283 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
286 /* Write pointer and first descriptor for TX descriptor ring */
287 static inline void ef4_farch_push_tx_desc(struct ef4_tx_queue *tx_queue,
288 const ef4_qword_t *txd)
293 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
294 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
296 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
297 EF4_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
298 FRF_AZ_TX_DESC_WPTR, write_ptr);
300 ef4_writeo_page(tx_queue->efx, ®,
301 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
305 /* For each entry inserted into the software descriptor ring, create a
306 * descriptor in the hardware TX descriptor ring (in host memory), and
309 void ef4_farch_tx_write(struct ef4_tx_queue *tx_queue)
311 struct ef4_tx_buffer *buffer;
314 unsigned old_write_count = tx_queue->write_count;
316 tx_queue->xmit_more_available = false;
317 if (unlikely(tx_queue->write_count == tx_queue->insert_count))
321 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
322 buffer = &tx_queue->buffer[write_ptr];
323 txd = ef4_tx_desc(tx_queue, write_ptr);
324 ++tx_queue->write_count;
326 EF4_BUG_ON_PARANOID(buffer->flags & EF4_TX_BUF_OPTION);
328 /* Create TX descriptor ring entry */
329 BUILD_BUG_ON(EF4_TX_BUF_CONT != 1);
330 EF4_POPULATE_QWORD_4(*txd,
332 buffer->flags & EF4_TX_BUF_CONT,
333 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
334 FSF_AZ_TX_KER_BUF_REGION, 0,
335 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
336 } while (tx_queue->write_count != tx_queue->insert_count);
338 wmb(); /* Ensure descriptors are written before they are fetched */
340 if (ef4_nic_may_push_tx_desc(tx_queue, old_write_count)) {
341 txd = ef4_tx_desc(tx_queue,
342 old_write_count & tx_queue->ptr_mask);
343 ef4_farch_push_tx_desc(tx_queue, txd);
346 ef4_farch_notify_tx_desc(tx_queue);
350 unsigned int ef4_farch_tx_limit_len(struct ef4_tx_queue *tx_queue,
351 dma_addr_t dma_addr, unsigned int len)
353 /* Don't cross 4K boundaries with descriptors. */
354 unsigned int limit = (~dma_addr & (EF4_PAGE_SIZE - 1)) + 1;
356 len = min(limit, len);
358 if (EF4_WORKAROUND_5391(tx_queue->efx) && (dma_addr & 0xf))
359 len = min_t(unsigned int, len, 512 - (dma_addr & 0xf));
365 /* Allocate hardware resources for a TX queue */
366 int ef4_farch_tx_probe(struct ef4_tx_queue *tx_queue)
368 struct ef4_nic *efx = tx_queue->efx;
371 entries = tx_queue->ptr_mask + 1;
372 return ef4_alloc_special_buffer(efx, &tx_queue->txd,
373 entries * sizeof(ef4_qword_t));
376 void ef4_farch_tx_init(struct ef4_tx_queue *tx_queue)
378 struct ef4_nic *efx = tx_queue->efx;
381 /* Pin TX descriptor ring */
382 ef4_init_special_buffer(efx, &tx_queue->txd);
384 /* Push TX descriptor ring to card */
385 EF4_POPULATE_OWORD_10(reg,
386 FRF_AZ_TX_DESCQ_EN, 1,
387 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
388 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
389 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
390 FRF_AZ_TX_DESCQ_EVQ_ID,
391 tx_queue->channel->channel,
392 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
393 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
394 FRF_AZ_TX_DESCQ_SIZE,
395 __ffs(tx_queue->txd.entries),
396 FRF_AZ_TX_DESCQ_TYPE, 0,
397 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
399 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
400 int csum = tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD;
401 EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
402 EF4_SET_OWORD_FIELD(reg, FRF_BZ_TX_TCP_CHKSM_DIS,
406 ef4_writeo_table(efx, ®, efx->type->txd_ptr_tbl_base,
409 if (ef4_nic_rev(efx) < EF4_REV_FALCON_B0) {
410 /* Only 128 bits in this register */
411 BUILD_BUG_ON(EF4_MAX_TX_QUEUES > 128);
413 ef4_reado(efx, ®, FR_AA_TX_CHKSM_CFG);
414 if (tx_queue->queue & EF4_TXQ_TYPE_OFFLOAD)
415 __clear_bit_le(tx_queue->queue, ®);
417 __set_bit_le(tx_queue->queue, ®);
418 ef4_writeo(efx, ®, FR_AA_TX_CHKSM_CFG);
421 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
422 EF4_POPULATE_OWORD_1(reg,
424 (tx_queue->queue & EF4_TXQ_TYPE_HIGHPRI) ?
426 FFE_BZ_TX_PACE_RESERVED);
427 ef4_writeo_table(efx, ®, FR_BZ_TX_PACE_TBL,
432 static void ef4_farch_flush_tx_queue(struct ef4_tx_queue *tx_queue)
434 struct ef4_nic *efx = tx_queue->efx;
435 ef4_oword_t tx_flush_descq;
437 WARN_ON(atomic_read(&tx_queue->flush_outstanding));
438 atomic_set(&tx_queue->flush_outstanding, 1);
440 EF4_POPULATE_OWORD_2(tx_flush_descq,
441 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
442 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
443 ef4_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
446 void ef4_farch_tx_fini(struct ef4_tx_queue *tx_queue)
448 struct ef4_nic *efx = tx_queue->efx;
449 ef4_oword_t tx_desc_ptr;
451 /* Remove TX descriptor ring from card */
452 EF4_ZERO_OWORD(tx_desc_ptr);
453 ef4_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
456 /* Unpin TX descriptor ring */
457 ef4_fini_special_buffer(efx, &tx_queue->txd);
460 /* Free buffers backing TX queue */
461 void ef4_farch_tx_remove(struct ef4_tx_queue *tx_queue)
463 ef4_free_special_buffer(tx_queue->efx, &tx_queue->txd);
466 /**************************************************************************
470 **************************************************************************/
472 /* This creates an entry in the RX descriptor queue */
474 ef4_farch_build_rx_desc(struct ef4_rx_queue *rx_queue, unsigned index)
476 struct ef4_rx_buffer *rx_buf;
479 rxd = ef4_rx_desc(rx_queue, index);
480 rx_buf = ef4_rx_buffer(rx_queue, index);
481 EF4_POPULATE_QWORD_3(*rxd,
482 FSF_AZ_RX_KER_BUF_SIZE,
484 rx_queue->efx->type->rx_buffer_padding,
485 FSF_AZ_RX_KER_BUF_REGION, 0,
486 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
489 /* This writes to the RX_DESC_WPTR register for the specified receive
492 void ef4_farch_rx_write(struct ef4_rx_queue *rx_queue)
494 struct ef4_nic *efx = rx_queue->efx;
498 while (rx_queue->notified_count != rx_queue->added_count) {
499 ef4_farch_build_rx_desc(
501 rx_queue->notified_count & rx_queue->ptr_mask);
502 ++rx_queue->notified_count;
506 write_ptr = rx_queue->added_count & rx_queue->ptr_mask;
507 EF4_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
508 ef4_writed_page(efx, ®, FR_AZ_RX_DESC_UPD_DWORD_P0,
509 ef4_rx_queue_index(rx_queue));
512 int ef4_farch_rx_probe(struct ef4_rx_queue *rx_queue)
514 struct ef4_nic *efx = rx_queue->efx;
517 entries = rx_queue->ptr_mask + 1;
518 return ef4_alloc_special_buffer(efx, &rx_queue->rxd,
519 entries * sizeof(ef4_qword_t));
522 void ef4_farch_rx_init(struct ef4_rx_queue *rx_queue)
524 ef4_oword_t rx_desc_ptr;
525 struct ef4_nic *efx = rx_queue->efx;
526 bool is_b0 = ef4_nic_rev(efx) >= EF4_REV_FALCON_B0;
527 bool iscsi_digest_en = is_b0;
530 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
531 * DMA to continue after a PCIe page boundary (and scattering
532 * is not possible). In Falcon B0 and Siena, it enables
535 jumbo_en = !is_b0 || efx->rx_scatter;
537 netif_dbg(efx, hw, efx->net_dev,
538 "RX queue %d ring in special buffers %d-%d\n",
539 ef4_rx_queue_index(rx_queue), rx_queue->rxd.index,
540 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
542 rx_queue->scatter_n = 0;
544 /* Pin RX descriptor ring */
545 ef4_init_special_buffer(efx, &rx_queue->rxd);
547 /* Push RX descriptor ring to card */
548 EF4_POPULATE_OWORD_10(rx_desc_ptr,
549 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
550 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
551 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
552 FRF_AZ_RX_DESCQ_EVQ_ID,
553 ef4_rx_queue_channel(rx_queue)->channel,
554 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
555 FRF_AZ_RX_DESCQ_LABEL,
556 ef4_rx_queue_index(rx_queue),
557 FRF_AZ_RX_DESCQ_SIZE,
558 __ffs(rx_queue->rxd.entries),
559 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
560 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
561 FRF_AZ_RX_DESCQ_EN, 1);
562 ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
563 ef4_rx_queue_index(rx_queue));
566 static void ef4_farch_flush_rx_queue(struct ef4_rx_queue *rx_queue)
568 struct ef4_nic *efx = rx_queue->efx;
569 ef4_oword_t rx_flush_descq;
571 EF4_POPULATE_OWORD_2(rx_flush_descq,
572 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
573 FRF_AZ_RX_FLUSH_DESCQ,
574 ef4_rx_queue_index(rx_queue));
575 ef4_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
578 void ef4_farch_rx_fini(struct ef4_rx_queue *rx_queue)
580 ef4_oword_t rx_desc_ptr;
581 struct ef4_nic *efx = rx_queue->efx;
583 /* Remove RX descriptor ring from card */
584 EF4_ZERO_OWORD(rx_desc_ptr);
585 ef4_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
586 ef4_rx_queue_index(rx_queue));
588 /* Unpin RX descriptor ring */
589 ef4_fini_special_buffer(efx, &rx_queue->rxd);
592 /* Free buffers backing RX queue */
593 void ef4_farch_rx_remove(struct ef4_rx_queue *rx_queue)
595 ef4_free_special_buffer(rx_queue->efx, &rx_queue->rxd);
598 /**************************************************************************
602 **************************************************************************/
604 /* ef4_farch_flush_queues() must be woken up when all flushes are completed,
605 * or more RX flushes can be kicked off.
607 static bool ef4_farch_flush_wake(struct ef4_nic *efx)
609 /* Ensure that all updates are visible to ef4_farch_flush_queues() */
612 return (atomic_read(&efx->active_queues) == 0 ||
613 (atomic_read(&efx->rxq_flush_outstanding) < EF4_RX_FLUSH_COUNT
614 && atomic_read(&efx->rxq_flush_pending) > 0));
617 static bool ef4_check_tx_flush_complete(struct ef4_nic *efx)
620 ef4_oword_t txd_ptr_tbl;
621 struct ef4_channel *channel;
622 struct ef4_tx_queue *tx_queue;
624 ef4_for_each_channel(channel, efx) {
625 ef4_for_each_channel_tx_queue(tx_queue, channel) {
626 ef4_reado_table(efx, &txd_ptr_tbl,
627 FR_BZ_TX_DESC_PTR_TBL, tx_queue->queue);
628 if (EF4_OWORD_FIELD(txd_ptr_tbl,
629 FRF_AZ_TX_DESCQ_FLUSH) ||
630 EF4_OWORD_FIELD(txd_ptr_tbl,
631 FRF_AZ_TX_DESCQ_EN)) {
632 netif_dbg(efx, hw, efx->net_dev,
633 "flush did not complete on TXQ %d\n",
636 } else if (atomic_cmpxchg(&tx_queue->flush_outstanding,
638 /* The flush is complete, but we didn't
639 * receive a flush completion event
641 netif_dbg(efx, hw, efx->net_dev,
642 "flush complete on TXQ %d, so drain "
643 "the queue\n", tx_queue->queue);
644 /* Don't need to increment active_queues as it
645 * has already been incremented for the queues
646 * which did not drain
648 ef4_farch_magic_event(channel,
649 EF4_CHANNEL_MAGIC_TX_DRAIN(
658 /* Flush all the transmit queues, and continue flushing receive queues until
659 * they're all flushed. Wait for the DRAIN events to be received so that there
660 * are no more RX and TX events left on any channel. */
661 static int ef4_farch_do_flush(struct ef4_nic *efx)
663 unsigned timeout = msecs_to_jiffies(5000); /* 5s for all flushes and drains */
664 struct ef4_channel *channel;
665 struct ef4_rx_queue *rx_queue;
666 struct ef4_tx_queue *tx_queue;
669 ef4_for_each_channel(channel, efx) {
670 ef4_for_each_channel_tx_queue(tx_queue, channel) {
671 ef4_farch_flush_tx_queue(tx_queue);
673 ef4_for_each_channel_rx_queue(rx_queue, channel) {
674 rx_queue->flush_pending = true;
675 atomic_inc(&efx->rxq_flush_pending);
679 while (timeout && atomic_read(&efx->active_queues) > 0) {
680 /* The hardware supports four concurrent rx flushes, each of
681 * which may need to be retried if there is an outstanding
684 ef4_for_each_channel(channel, efx) {
685 ef4_for_each_channel_rx_queue(rx_queue, channel) {
686 if (atomic_read(&efx->rxq_flush_outstanding) >=
690 if (rx_queue->flush_pending) {
691 rx_queue->flush_pending = false;
692 atomic_dec(&efx->rxq_flush_pending);
693 atomic_inc(&efx->rxq_flush_outstanding);
694 ef4_farch_flush_rx_queue(rx_queue);
699 timeout = wait_event_timeout(efx->flush_wq,
700 ef4_farch_flush_wake(efx),
704 if (atomic_read(&efx->active_queues) &&
705 !ef4_check_tx_flush_complete(efx)) {
706 netif_err(efx, hw, efx->net_dev, "failed to flush %d queues "
707 "(rx %d+%d)\n", atomic_read(&efx->active_queues),
708 atomic_read(&efx->rxq_flush_outstanding),
709 atomic_read(&efx->rxq_flush_pending));
712 atomic_set(&efx->active_queues, 0);
713 atomic_set(&efx->rxq_flush_pending, 0);
714 atomic_set(&efx->rxq_flush_outstanding, 0);
720 int ef4_farch_fini_dmaq(struct ef4_nic *efx)
722 struct ef4_channel *channel;
723 struct ef4_tx_queue *tx_queue;
724 struct ef4_rx_queue *rx_queue;
727 /* Do not attempt to write to the NIC during EEH recovery */
728 if (efx->state != STATE_RECOVERY) {
729 /* Only perform flush if DMA is enabled */
730 if (efx->pci_dev->is_busmaster) {
731 efx->type->prepare_flush(efx);
732 rc = ef4_farch_do_flush(efx);
733 efx->type->finish_flush(efx);
736 ef4_for_each_channel(channel, efx) {
737 ef4_for_each_channel_rx_queue(rx_queue, channel)
738 ef4_farch_rx_fini(rx_queue);
739 ef4_for_each_channel_tx_queue(tx_queue, channel)
740 ef4_farch_tx_fini(tx_queue);
747 /* Reset queue and flush accounting after FLR
749 * One possible cause of FLR recovery is that DMA may be failing (eg. if bus
750 * mastering was disabled), in which case we don't receive (RXQ) flush
751 * completion events. This means that efx->rxq_flush_outstanding remained at 4
752 * after the FLR; also, efx->active_queues was non-zero (as no flush completion
753 * events were received, and we didn't go through ef4_check_tx_flush_complete())
754 * If we don't fix this up, on the next call to ef4_realloc_channels() we won't
755 * flush any RX queues because efx->rxq_flush_outstanding is at the limit of 4
756 * for batched flush requests; and the efx->active_queues gets messed up because
757 * we keep incrementing for the newly initialised queues, but it never went to
758 * zero previously. Then we get a timeout every time we try to restart the
759 * queues, as it doesn't go back to zero when we should be flushing the queues.
761 void ef4_farch_finish_flr(struct ef4_nic *efx)
763 atomic_set(&efx->rxq_flush_pending, 0);
764 atomic_set(&efx->rxq_flush_outstanding, 0);
765 atomic_set(&efx->active_queues, 0);
769 /**************************************************************************
771 * Event queue processing
772 * Event queues are processed by per-channel tasklets.
774 **************************************************************************/
776 /* Update a channel's event queue's read pointer (RPTR) register
778 * This writes the EVQ_RPTR_REG register for the specified channel's
781 void ef4_farch_ev_read_ack(struct ef4_channel *channel)
784 struct ef4_nic *efx = channel->efx;
786 EF4_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR,
787 channel->eventq_read_ptr & channel->eventq_mask);
789 /* For Falcon A1, EVQ_RPTR_KER is documented as having a step size
790 * of 4 bytes, but it is really 16 bytes just like later revisions.
792 ef4_writed(efx, ®,
793 efx->type->evq_rptr_tbl_base +
794 FR_BZ_EVQ_RPTR_STEP * channel->channel);
797 /* Use HW to insert a SW defined event */
798 void ef4_farch_generate_event(struct ef4_nic *efx, unsigned int evq,
801 ef4_oword_t drv_ev_reg;
803 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
804 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
805 drv_ev_reg.u32[0] = event->u32[0];
806 drv_ev_reg.u32[1] = event->u32[1];
807 drv_ev_reg.u32[2] = 0;
808 drv_ev_reg.u32[3] = 0;
809 EF4_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, evq);
810 ef4_writeo(efx, &drv_ev_reg, FR_AZ_DRV_EV);
813 static void ef4_farch_magic_event(struct ef4_channel *channel, u32 magic)
817 EF4_POPULATE_QWORD_2(event, FSF_AZ_EV_CODE,
818 FSE_AZ_EV_CODE_DRV_GEN_EV,
819 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
820 ef4_farch_generate_event(channel->efx, channel->channel, &event);
823 /* Handle a transmit completion event
825 * The NIC batches TX completion events; the message we receive is of
826 * the form "complete all TX events up to this index".
829 ef4_farch_handle_tx_event(struct ef4_channel *channel, ef4_qword_t *event)
831 unsigned int tx_ev_desc_ptr;
832 unsigned int tx_ev_q_label;
833 struct ef4_tx_queue *tx_queue;
834 struct ef4_nic *efx = channel->efx;
837 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
840 if (likely(EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
841 /* Transmit completion */
842 tx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
843 tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
844 tx_queue = ef4_channel_get_tx_queue(
845 channel, tx_ev_q_label % EF4_TXQ_TYPES);
846 tx_packets = ((tx_ev_desc_ptr - tx_queue->read_count) &
848 ef4_xmit_done(tx_queue, tx_ev_desc_ptr);
849 } else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
850 /* Rewrite the FIFO write pointer */
851 tx_ev_q_label = EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
852 tx_queue = ef4_channel_get_tx_queue(
853 channel, tx_ev_q_label % EF4_TXQ_TYPES);
855 netif_tx_lock(efx->net_dev);
856 ef4_farch_notify_tx_desc(tx_queue);
857 netif_tx_unlock(efx->net_dev);
858 } else if (EF4_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR)) {
859 ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
861 netif_err(efx, tx_err, efx->net_dev,
862 "channel %d unexpected TX event "
863 EF4_QWORD_FMT"\n", channel->channel,
864 EF4_QWORD_VAL(*event));
870 /* Detect errors included in the rx_evt_pkt_ok bit. */
871 static u16 ef4_farch_handle_rx_not_ok(struct ef4_rx_queue *rx_queue,
872 const ef4_qword_t *event)
874 struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
875 struct ef4_nic *efx = rx_queue->efx;
876 bool __maybe_unused rx_ev_buf_owner_id_err, rx_ev_ip_hdr_chksum_err;
877 bool rx_ev_tcp_udp_chksum_err, rx_ev_eth_crc_err;
878 bool rx_ev_frm_trunc, rx_ev_drib_nib, rx_ev_tobe_disc;
879 bool rx_ev_pause_frm;
881 rx_ev_tobe_disc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
882 rx_ev_buf_owner_id_err = EF4_QWORD_FIELD(*event,
883 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
884 rx_ev_ip_hdr_chksum_err = EF4_QWORD_FIELD(*event,
885 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
886 rx_ev_tcp_udp_chksum_err = EF4_QWORD_FIELD(*event,
887 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
888 rx_ev_eth_crc_err = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
889 rx_ev_frm_trunc = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
890 rx_ev_drib_nib = ((ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) ?
891 0 : EF4_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
892 rx_ev_pause_frm = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
895 /* Count errors that are not in MAC stats. Ignore expected
896 * checksum errors during self-test. */
898 ++channel->n_rx_frm_trunc;
899 else if (rx_ev_tobe_disc)
900 ++channel->n_rx_tobe_disc;
901 else if (!efx->loopback_selftest) {
902 if (rx_ev_ip_hdr_chksum_err)
903 ++channel->n_rx_ip_hdr_chksum_err;
904 else if (rx_ev_tcp_udp_chksum_err)
905 ++channel->n_rx_tcp_udp_chksum_err;
908 /* TOBE_DISC is expected on unicast mismatches; don't print out an
909 * error message. FRM_TRUNC indicates RXDP dropped the packet due
910 * to a FIFO overflow.
914 /* Every error apart from tobe_disc and pause_frm */
916 bool rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
917 rx_ev_buf_owner_id_err | rx_ev_eth_crc_err |
918 rx_ev_frm_trunc | rx_ev_ip_hdr_chksum_err);
920 if (rx_ev_other_err && net_ratelimit()) {
921 netif_dbg(efx, rx_err, efx->net_dev,
922 " RX queue %d unexpected RX event "
923 EF4_QWORD_FMT "%s%s%s%s%s%s%s%s\n",
924 ef4_rx_queue_index(rx_queue), EF4_QWORD_VAL(*event),
925 rx_ev_buf_owner_id_err ? " [OWNER_ID_ERR]" : "",
926 rx_ev_ip_hdr_chksum_err ?
927 " [IP_HDR_CHKSUM_ERR]" : "",
928 rx_ev_tcp_udp_chksum_err ?
929 " [TCP_UDP_CHKSUM_ERR]" : "",
930 rx_ev_eth_crc_err ? " [ETH_CRC_ERR]" : "",
931 rx_ev_frm_trunc ? " [FRM_TRUNC]" : "",
932 rx_ev_drib_nib ? " [DRIB_NIB]" : "",
933 rx_ev_tobe_disc ? " [TOBE_DISC]" : "",
934 rx_ev_pause_frm ? " [PAUSE]" : "");
939 /* The frame must be discarded if any of these are true. */
940 return (rx_ev_eth_crc_err | rx_ev_frm_trunc | rx_ev_drib_nib |
941 rx_ev_tobe_disc | rx_ev_pause_frm) ?
942 EF4_RX_PKT_DISCARD : 0;
945 /* Handle receive events that are not in-order. Return true if this
946 * can be handled as a partial packet discard, false if it's more
950 ef4_farch_handle_rx_bad_index(struct ef4_rx_queue *rx_queue, unsigned index)
952 struct ef4_channel *channel = ef4_rx_queue_channel(rx_queue);
953 struct ef4_nic *efx = rx_queue->efx;
954 unsigned expected, dropped;
956 if (rx_queue->scatter_n &&
957 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
958 rx_queue->ptr_mask)) {
959 ++channel->n_rx_nodesc_trunc;
963 expected = rx_queue->removed_count & rx_queue->ptr_mask;
964 dropped = (index - expected) & rx_queue->ptr_mask;
965 netif_info(efx, rx_err, efx->net_dev,
966 "dropped %d events (index=%d expected=%d)\n",
967 dropped, index, expected);
969 ef4_schedule_reset(efx, EF4_WORKAROUND_5676(efx) ?
970 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
974 /* Handle a packet received event
976 * The NIC gives a "discard" flag if it's a unicast packet with the
977 * wrong destination address
978 * Also "is multicast" and "matches multicast filter" flags can be used to
979 * discard non-matching multicast packets.
982 ef4_farch_handle_rx_event(struct ef4_channel *channel, const ef4_qword_t *event)
984 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
985 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
986 unsigned expected_ptr;
987 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
989 struct ef4_rx_queue *rx_queue;
990 struct ef4_nic *efx = channel->efx;
992 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
995 rx_ev_cont = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
996 rx_ev_sop = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
997 WARN_ON(EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
1000 rx_queue = ef4_channel_get_rx_queue(channel);
1002 rx_ev_desc_ptr = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
1003 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
1004 rx_queue->ptr_mask);
1006 /* Check for partial drops and other errors */
1007 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
1008 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
1009 if (rx_ev_desc_ptr != expected_ptr &&
1010 !ef4_farch_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
1013 /* Discard all pending fragments */
1014 if (rx_queue->scatter_n) {
1017 rx_queue->removed_count & rx_queue->ptr_mask,
1018 rx_queue->scatter_n, 0, EF4_RX_PKT_DISCARD);
1019 rx_queue->removed_count += rx_queue->scatter_n;
1020 rx_queue->scatter_n = 0;
1023 /* Return if there is no new fragment */
1024 if (rx_ev_desc_ptr != expected_ptr)
1027 /* Discard new fragment if not SOP */
1031 rx_queue->removed_count & rx_queue->ptr_mask,
1032 1, 0, EF4_RX_PKT_DISCARD);
1033 ++rx_queue->removed_count;
1038 ++rx_queue->scatter_n;
1042 rx_ev_byte_cnt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1043 rx_ev_pkt_ok = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1044 rx_ev_hdr_type = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1046 if (likely(rx_ev_pkt_ok)) {
1047 /* If packet is marked as OK then we can rely on the
1048 * hardware checksum and classification.
1051 switch (rx_ev_hdr_type) {
1052 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP:
1053 flags |= EF4_RX_PKT_TCP;
1055 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP:
1056 flags |= EF4_RX_PKT_CSUMMED;
1058 case FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER:
1059 case FSE_AZ_RX_EV_HDR_TYPE_OTHER:
1063 flags = ef4_farch_handle_rx_not_ok(rx_queue, event);
1066 /* Detect multicast packets that didn't match the filter */
1067 rx_ev_mcast_pkt = EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
1068 if (rx_ev_mcast_pkt) {
1069 unsigned int rx_ev_mcast_hash_match =
1070 EF4_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
1072 if (unlikely(!rx_ev_mcast_hash_match)) {
1073 ++channel->n_rx_mcast_mismatch;
1074 flags |= EF4_RX_PKT_DISCARD;
1078 channel->irq_mod_score += 2;
1080 /* Handle received packet */
1081 ef4_rx_packet(rx_queue,
1082 rx_queue->removed_count & rx_queue->ptr_mask,
1083 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1084 rx_queue->removed_count += rx_queue->scatter_n;
1085 rx_queue->scatter_n = 0;
1088 /* If this flush done event corresponds to a &struct ef4_tx_queue, then
1089 * send an %EF4_CHANNEL_MAGIC_TX_DRAIN event to drain the event queue
1090 * of all transmit completions.
1093 ef4_farch_handle_tx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
1095 struct ef4_tx_queue *tx_queue;
1098 qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1099 if (qid < EF4_TXQ_TYPES * efx->n_tx_channels) {
1100 tx_queue = ef4_get_tx_queue(efx, qid / EF4_TXQ_TYPES,
1101 qid % EF4_TXQ_TYPES);
1102 if (atomic_cmpxchg(&tx_queue->flush_outstanding, 1, 0)) {
1103 ef4_farch_magic_event(tx_queue->channel,
1104 EF4_CHANNEL_MAGIC_TX_DRAIN(tx_queue));
1109 /* If this flush done event corresponds to a &struct ef4_rx_queue: If the flush
1110 * was successful then send an %EF4_CHANNEL_MAGIC_RX_DRAIN, otherwise add
1111 * the RX queue back to the mask of RX queues in need of flushing.
1114 ef4_farch_handle_rx_flush_done(struct ef4_nic *efx, ef4_qword_t *event)
1116 struct ef4_channel *channel;
1117 struct ef4_rx_queue *rx_queue;
1121 qid = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1122 failed = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1123 if (qid >= efx->n_channels)
1125 channel = ef4_get_channel(efx, qid);
1126 if (!ef4_channel_has_rx_queue(channel))
1128 rx_queue = ef4_channel_get_rx_queue(channel);
1131 netif_info(efx, hw, efx->net_dev,
1132 "RXQ %d flush retry\n", qid);
1133 rx_queue->flush_pending = true;
1134 atomic_inc(&efx->rxq_flush_pending);
1136 ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
1137 EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue));
1139 atomic_dec(&efx->rxq_flush_outstanding);
1140 if (ef4_farch_flush_wake(efx))
1141 wake_up(&efx->flush_wq);
1145 ef4_farch_handle_drain_event(struct ef4_channel *channel)
1147 struct ef4_nic *efx = channel->efx;
1149 WARN_ON(atomic_read(&efx->active_queues) == 0);
1150 atomic_dec(&efx->active_queues);
1151 if (ef4_farch_flush_wake(efx))
1152 wake_up(&efx->flush_wq);
1155 static void ef4_farch_handle_generated_event(struct ef4_channel *channel,
1158 struct ef4_nic *efx = channel->efx;
1159 struct ef4_rx_queue *rx_queue =
1160 ef4_channel_has_rx_queue(channel) ?
1161 ef4_channel_get_rx_queue(channel) : NULL;
1162 unsigned magic, code;
1164 magic = EF4_QWORD_FIELD(*event, FSF_AZ_DRV_GEN_EV_MAGIC);
1165 code = _EF4_CHANNEL_MAGIC_CODE(magic);
1167 if (magic == EF4_CHANNEL_MAGIC_TEST(channel)) {
1168 channel->event_test_cpu = raw_smp_processor_id();
1169 } else if (rx_queue && magic == EF4_CHANNEL_MAGIC_FILL(rx_queue)) {
1170 /* The queue must be empty, so we won't receive any rx
1171 * events, so ef4_process_channel() won't refill the
1172 * queue. Refill it here */
1173 ef4_fast_push_rx_descriptors(rx_queue, true);
1174 } else if (rx_queue && magic == EF4_CHANNEL_MAGIC_RX_DRAIN(rx_queue)) {
1175 ef4_farch_handle_drain_event(channel);
1176 } else if (code == _EF4_CHANNEL_MAGIC_TX_DRAIN) {
1177 ef4_farch_handle_drain_event(channel);
1179 netif_dbg(efx, hw, efx->net_dev, "channel %d received "
1180 "generated event "EF4_QWORD_FMT"\n",
1181 channel->channel, EF4_QWORD_VAL(*event));
1186 ef4_farch_handle_driver_event(struct ef4_channel *channel, ef4_qword_t *event)
1188 struct ef4_nic *efx = channel->efx;
1189 unsigned int ev_sub_code;
1190 unsigned int ev_sub_data;
1192 ev_sub_code = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
1193 ev_sub_data = EF4_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
1195 switch (ev_sub_code) {
1196 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
1197 netif_vdbg(efx, hw, efx->net_dev, "channel %d TXQ %d flushed\n",
1198 channel->channel, ev_sub_data);
1199 ef4_farch_handle_tx_flush_done(efx, event);
1201 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
1202 netif_vdbg(efx, hw, efx->net_dev, "channel %d RXQ %d flushed\n",
1203 channel->channel, ev_sub_data);
1204 ef4_farch_handle_rx_flush_done(efx, event);
1206 case FSE_AZ_EVQ_INIT_DONE_EV:
1207 netif_dbg(efx, hw, efx->net_dev,
1208 "channel %d EVQ %d initialised\n",
1209 channel->channel, ev_sub_data);
1211 case FSE_AZ_SRM_UPD_DONE_EV:
1212 netif_vdbg(efx, hw, efx->net_dev,
1213 "channel %d SRAM update done\n", channel->channel);
1215 case FSE_AZ_WAKE_UP_EV:
1216 netif_vdbg(efx, hw, efx->net_dev,
1217 "channel %d RXQ %d wakeup event\n",
1218 channel->channel, ev_sub_data);
1220 case FSE_AZ_TIMER_EV:
1221 netif_vdbg(efx, hw, efx->net_dev,
1222 "channel %d RX queue %d timer expired\n",
1223 channel->channel, ev_sub_data);
1225 case FSE_AA_RX_RECOVER_EV:
1226 netif_err(efx, rx_err, efx->net_dev,
1227 "channel %d seen DRIVER RX_RESET event. "
1228 "Resetting.\n", channel->channel);
1229 atomic_inc(&efx->rx_reset);
1230 ef4_schedule_reset(efx,
1231 EF4_WORKAROUND_6555(efx) ?
1232 RESET_TYPE_RX_RECOVERY :
1233 RESET_TYPE_DISABLE);
1235 case FSE_BZ_RX_DSC_ERROR_EV:
1236 netif_err(efx, rx_err, efx->net_dev,
1237 "RX DMA Q %d reports descriptor fetch error."
1238 " RX Q %d is disabled.\n", ev_sub_data,
1240 ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1242 case FSE_BZ_TX_DSC_ERROR_EV:
1243 netif_err(efx, tx_err, efx->net_dev,
1244 "TX DMA Q %d reports descriptor fetch error."
1245 " TX Q %d is disabled.\n", ev_sub_data,
1247 ef4_schedule_reset(efx, RESET_TYPE_DMA_ERROR);
1250 netif_vdbg(efx, hw, efx->net_dev,
1251 "channel %d unknown driver event code %d "
1252 "data %04x\n", channel->channel, ev_sub_code,
1258 int ef4_farch_ev_process(struct ef4_channel *channel, int budget)
1260 struct ef4_nic *efx = channel->efx;
1261 unsigned int read_ptr;
1262 ef4_qword_t event, *p_event;
1270 read_ptr = channel->eventq_read_ptr;
1273 p_event = ef4_event(channel, read_ptr);
1276 if (!ef4_event_present(&event))
1280 netif_vdbg(channel->efx, intr, channel->efx->net_dev,
1281 "channel %d event is "EF4_QWORD_FMT"\n",
1282 channel->channel, EF4_QWORD_VAL(event));
1284 /* Clear this event by marking it all ones */
1285 EF4_SET_QWORD(*p_event);
1289 ev_code = EF4_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1292 case FSE_AZ_EV_CODE_RX_EV:
1293 ef4_farch_handle_rx_event(channel, &event);
1294 if (++spent == budget)
1297 case FSE_AZ_EV_CODE_TX_EV:
1298 tx_packets += ef4_farch_handle_tx_event(channel,
1300 if (tx_packets > efx->txq_entries) {
1305 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1306 ef4_farch_handle_generated_event(channel, &event);
1308 case FSE_AZ_EV_CODE_DRIVER_EV:
1309 ef4_farch_handle_driver_event(channel, &event);
1311 case FSE_AZ_EV_CODE_GLOBAL_EV:
1312 if (efx->type->handle_global_event &&
1313 efx->type->handle_global_event(channel, &event))
1315 /* else fall through */
1317 netif_err(channel->efx, hw, channel->efx->net_dev,
1318 "channel %d unknown event type %d (data "
1319 EF4_QWORD_FMT ")\n", channel->channel,
1320 ev_code, EF4_QWORD_VAL(event));
1325 channel->eventq_read_ptr = read_ptr;
1329 /* Allocate buffer table entries for event queue */
1330 int ef4_farch_ev_probe(struct ef4_channel *channel)
1332 struct ef4_nic *efx = channel->efx;
1335 entries = channel->eventq_mask + 1;
1336 return ef4_alloc_special_buffer(efx, &channel->eventq,
1337 entries * sizeof(ef4_qword_t));
1340 int ef4_farch_ev_init(struct ef4_channel *channel)
1343 struct ef4_nic *efx = channel->efx;
1345 netif_dbg(efx, hw, efx->net_dev,
1346 "channel %d event queue in special buffers %d-%d\n",
1347 channel->channel, channel->eventq.index,
1348 channel->eventq.index + channel->eventq.entries - 1);
1350 /* Pin event queue buffer */
1351 ef4_init_special_buffer(efx, &channel->eventq);
1353 /* Fill event queue with all ones (i.e. empty events) */
1354 memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
1356 /* Push event queue to card */
1357 EF4_POPULATE_OWORD_3(reg,
1359 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1360 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1361 ef4_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1367 void ef4_farch_ev_fini(struct ef4_channel *channel)
1370 struct ef4_nic *efx = channel->efx;
1372 /* Remove event queue from card */
1373 EF4_ZERO_OWORD(reg);
1374 ef4_writeo_table(efx, ®, efx->type->evq_ptr_tbl_base,
1377 /* Unpin event queue */
1378 ef4_fini_special_buffer(efx, &channel->eventq);
1381 /* Free buffers backing event queue */
1382 void ef4_farch_ev_remove(struct ef4_channel *channel)
1384 ef4_free_special_buffer(channel->efx, &channel->eventq);
1388 void ef4_farch_ev_test_generate(struct ef4_channel *channel)
1390 ef4_farch_magic_event(channel, EF4_CHANNEL_MAGIC_TEST(channel));
1393 void ef4_farch_rx_defer_refill(struct ef4_rx_queue *rx_queue)
1395 ef4_farch_magic_event(ef4_rx_queue_channel(rx_queue),
1396 EF4_CHANNEL_MAGIC_FILL(rx_queue));
1399 /**************************************************************************
1401 * Hardware interrupts
1402 * The hardware interrupt handler does very little work; all the event
1403 * queue processing is carried out by per-channel tasklets.
1405 **************************************************************************/
1407 /* Enable/disable/generate interrupts */
1408 static inline void ef4_farch_interrupts(struct ef4_nic *efx,
1409 bool enabled, bool force)
1411 ef4_oword_t int_en_reg_ker;
1413 EF4_POPULATE_OWORD_3(int_en_reg_ker,
1414 FRF_AZ_KER_INT_LEVE_SEL, efx->irq_level,
1415 FRF_AZ_KER_INT_KER, force,
1416 FRF_AZ_DRV_INT_EN_KER, enabled);
1417 ef4_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1420 void ef4_farch_irq_enable_master(struct ef4_nic *efx)
1422 EF4_ZERO_OWORD(*((ef4_oword_t *) efx->irq_status.addr));
1423 wmb(); /* Ensure interrupt vector is clear before interrupts enabled */
1425 ef4_farch_interrupts(efx, true, false);
1428 void ef4_farch_irq_disable_master(struct ef4_nic *efx)
1430 /* Disable interrupts */
1431 ef4_farch_interrupts(efx, false, false);
1434 /* Generate a test interrupt
1435 * Interrupt must already have been enabled, otherwise nasty things
1438 int ef4_farch_irq_test_generate(struct ef4_nic *efx)
1440 ef4_farch_interrupts(efx, true, true);
1444 /* Process a fatal interrupt
1445 * Disable bus mastering ASAP and schedule a reset
1447 irqreturn_t ef4_farch_fatal_interrupt(struct ef4_nic *efx)
1449 struct falcon_nic_data *nic_data = efx->nic_data;
1450 ef4_oword_t *int_ker = efx->irq_status.addr;
1451 ef4_oword_t fatal_intr;
1452 int error, mem_perr;
1454 ef4_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1455 error = EF4_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1457 netif_err(efx, hw, efx->net_dev, "SYSTEM ERROR "EF4_OWORD_FMT" status "
1458 EF4_OWORD_FMT ": %s\n", EF4_OWORD_VAL(*int_ker),
1459 EF4_OWORD_VAL(fatal_intr),
1460 error ? "disabling bus mastering" : "no recognised error");
1462 /* If this is a memory parity error dump which blocks are offending */
1463 mem_perr = (EF4_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER) ||
1464 EF4_OWORD_FIELD(fatal_intr, FRF_AZ_SRM_PERR_INT_KER));
1467 ef4_reado(efx, ®, FR_AZ_MEM_STAT);
1468 netif_err(efx, hw, efx->net_dev,
1469 "SYSTEM ERROR: memory parity error "EF4_OWORD_FMT"\n",
1470 EF4_OWORD_VAL(reg));
1473 /* Disable both devices */
1474 pci_clear_master(efx->pci_dev);
1475 if (ef4_nic_is_dual_func(efx))
1476 pci_clear_master(nic_data->pci_dev2);
1477 ef4_farch_irq_disable_master(efx);
1479 /* Count errors and reset or disable the NIC accordingly */
1480 if (efx->int_error_count == 0 ||
1481 time_after(jiffies, efx->int_error_expire)) {
1482 efx->int_error_count = 0;
1483 efx->int_error_expire =
1484 jiffies + EF4_INT_ERROR_EXPIRE * HZ;
1486 if (++efx->int_error_count < EF4_MAX_INT_ERRORS) {
1487 netif_err(efx, hw, efx->net_dev,
1488 "SYSTEM ERROR - reset scheduled\n");
1489 ef4_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1491 netif_err(efx, hw, efx->net_dev,
1492 "SYSTEM ERROR - max number of errors seen."
1493 "NIC will be disabled\n");
1494 ef4_schedule_reset(efx, RESET_TYPE_DISABLE);
1500 /* Handle a legacy interrupt
1501 * Acknowledges the interrupt and schedule event queue processing.
1503 irqreturn_t ef4_farch_legacy_interrupt(int irq, void *dev_id)
1505 struct ef4_nic *efx = dev_id;
1506 bool soft_enabled = ACCESS_ONCE(efx->irq_soft_enabled);
1507 ef4_oword_t *int_ker = efx->irq_status.addr;
1508 irqreturn_t result = IRQ_NONE;
1509 struct ef4_channel *channel;
1514 /* Read the ISR which also ACKs the interrupts */
1515 ef4_readd(efx, ®, FR_BZ_INT_ISR0);
1516 queues = EF4_EXTRACT_DWORD(reg, 0, 31);
1518 /* Legacy interrupts are disabled too late by the EEH kernel
1519 * code. Disable them earlier.
1520 * If an EEH error occurred, the read will have returned all ones.
1522 if (EF4_DWORD_IS_ALL_ONES(reg) && ef4_try_recovery(efx) &&
1523 !efx->eeh_disabled_legacy_irq) {
1524 disable_irq_nosync(efx->legacy_irq);
1525 efx->eeh_disabled_legacy_irq = true;
1528 /* Handle non-event-queue sources */
1529 if (queues & (1U << efx->irq_level) && soft_enabled) {
1530 syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1531 if (unlikely(syserr))
1532 return ef4_farch_fatal_interrupt(efx);
1533 efx->last_irq_cpu = raw_smp_processor_id();
1537 efx->irq_zero_count = 0;
1539 /* Schedule processing of any interrupting queues */
1540 if (likely(soft_enabled)) {
1541 ef4_for_each_channel(channel, efx) {
1543 ef4_schedule_channel_irq(channel);
1547 result = IRQ_HANDLED;
1552 /* Legacy ISR read can return zero once (SF bug 15783) */
1554 /* We can't return IRQ_HANDLED more than once on seeing ISR=0
1555 * because this might be a shared interrupt. */
1556 if (efx->irq_zero_count++ == 0)
1557 result = IRQ_HANDLED;
1559 /* Ensure we schedule or rearm all event queues */
1560 if (likely(soft_enabled)) {
1561 ef4_for_each_channel(channel, efx) {
1562 event = ef4_event(channel,
1563 channel->eventq_read_ptr);
1564 if (ef4_event_present(event))
1565 ef4_schedule_channel_irq(channel);
1567 ef4_farch_ev_read_ack(channel);
1572 if (result == IRQ_HANDLED)
1573 netif_vdbg(efx, intr, efx->net_dev,
1574 "IRQ %d on CPU %d status " EF4_DWORD_FMT "\n",
1575 irq, raw_smp_processor_id(), EF4_DWORD_VAL(reg));
1580 /* Handle an MSI interrupt
1582 * Handle an MSI hardware interrupt. This routine schedules event
1583 * queue processing. No interrupt acknowledgement cycle is necessary.
1584 * Also, we never need to check that the interrupt is for us, since
1585 * MSI interrupts cannot be shared.
1587 irqreturn_t ef4_farch_msi_interrupt(int irq, void *dev_id)
1589 struct ef4_msi_context *context = dev_id;
1590 struct ef4_nic *efx = context->efx;
1591 ef4_oword_t *int_ker = efx->irq_status.addr;
1594 netif_vdbg(efx, intr, efx->net_dev,
1595 "IRQ %d on CPU %d status " EF4_OWORD_FMT "\n",
1596 irq, raw_smp_processor_id(), EF4_OWORD_VAL(*int_ker));
1598 if (!likely(ACCESS_ONCE(efx->irq_soft_enabled)))
1601 /* Handle non-event-queue sources */
1602 if (context->index == efx->irq_level) {
1603 syserr = EF4_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1604 if (unlikely(syserr))
1605 return ef4_farch_fatal_interrupt(efx);
1606 efx->last_irq_cpu = raw_smp_processor_id();
1609 /* Schedule processing of the channel */
1610 ef4_schedule_channel_irq(efx->channel[context->index]);
1615 /* Setup RSS indirection table.
1616 * This maps from the hash value of the packet to RXQ
1618 void ef4_farch_rx_push_indir_table(struct ef4_nic *efx)
1623 BUG_ON(ef4_nic_rev(efx) < EF4_REV_FALCON_B0);
1625 BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
1626 FR_BZ_RX_INDIRECTION_TBL_ROWS);
1628 for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
1629 EF4_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1630 efx->rx_indir_table[i]);
1631 ef4_writed(efx, &dword,
1632 FR_BZ_RX_INDIRECTION_TBL +
1633 FR_BZ_RX_INDIRECTION_TBL_STEP * i);
1637 /* Looks at available SRAM resources and works out how many queues we
1638 * can support, and where things like descriptor caches should live.
1640 * SRAM is split up as follows:
1641 * 0 buftbl entries for channels
1642 * efx->vf_buftbl_base buftbl entries for SR-IOV
1643 * efx->rx_dc_base RX descriptor caches
1644 * efx->tx_dc_base TX descriptor caches
1646 void ef4_farch_dimension_resources(struct ef4_nic *efx, unsigned sram_lim_qw)
1650 /* Account for the buffer table entries backing the datapath channels
1651 * and the descriptor caches for those channels.
1653 vi_count = max(efx->n_channels, efx->n_tx_channels * EF4_TXQ_TYPES);
1655 efx->tx_dc_base = sram_lim_qw - vi_count * TX_DC_ENTRIES;
1656 efx->rx_dc_base = efx->tx_dc_base - vi_count * RX_DC_ENTRIES;
1659 u32 ef4_farch_fpga_ver(struct ef4_nic *efx)
1661 ef4_oword_t altera_build;
1662 ef4_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
1663 return EF4_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER);
1666 void ef4_farch_init_common(struct ef4_nic *efx)
1670 /* Set positions of descriptor caches in SRAM. */
1671 EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, efx->tx_dc_base);
1672 ef4_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
1673 EF4_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, efx->rx_dc_base);
1674 ef4_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
1676 /* Set TX descriptor cache size. */
1677 BUILD_BUG_ON(TX_DC_ENTRIES != (8 << TX_DC_ENTRIES_ORDER));
1678 EF4_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
1679 ef4_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
1681 /* Set RX descriptor cache size. Set low watermark to size-8, as
1682 * this allows most efficient prefetching.
1684 BUILD_BUG_ON(RX_DC_ENTRIES != (8 << RX_DC_ENTRIES_ORDER));
1685 EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
1686 ef4_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
1687 EF4_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
1688 ef4_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
1690 /* Program INT_KER address */
1691 EF4_POPULATE_OWORD_2(temp,
1692 FRF_AZ_NORM_INT_VEC_DIS_KER,
1693 EF4_INT_MODE_USE_MSI(efx),
1694 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1695 ef4_writeo(efx, &temp, FR_AZ_INT_ADR_KER);
1697 /* Use a valid MSI-X vector */
1700 /* Enable all the genuinely fatal interrupts. (They are still
1701 * masked by the overall interrupt mask, controlled by
1702 * falcon_interrupts()).
1704 * Note: All other fatal interrupts are enabled
1706 EF4_POPULATE_OWORD_3(temp,
1707 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
1708 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
1709 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
1710 EF4_INVERT_OWORD(temp);
1711 ef4_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
1713 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
1714 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
1716 ef4_reado(efx, &temp, FR_AZ_TX_RESERVED);
1717 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1718 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1719 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1720 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1721 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1722 /* Enable SW_EV to inherit in char driver - assume harmless here */
1723 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
1724 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
1725 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
1726 /* Disable hardware watchdog which can misfire */
1727 EF4_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_WD_TMR, 0x3fffff);
1728 /* Squash TX of packets of 16 bytes or less */
1729 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0)
1730 EF4_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
1731 ef4_writeo(efx, &temp, FR_AZ_TX_RESERVED);
1733 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1734 EF4_POPULATE_OWORD_4(temp,
1735 /* Default values */
1736 FRF_BZ_TX_PACE_SB_NOT_AF, 0x15,
1737 FRF_BZ_TX_PACE_SB_AF, 0xb,
1738 FRF_BZ_TX_PACE_FB_BASE, 0,
1739 /* Allow large pace values in the
1741 FRF_BZ_TX_PACE_BIN_TH,
1742 FFE_BZ_TX_PACE_RESERVED);
1743 ef4_writeo(efx, &temp, FR_BZ_TX_PACE);
1747 /**************************************************************************
1751 **************************************************************************
1754 /* "Fudge factors" - difference between programmed value and actual depth.
1755 * Due to pipelined implementation we need to program H/W with a value that
1756 * is larger than the hop limit we want.
1758 #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD 3
1759 #define EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL 1
1761 /* Hard maximum search limit. Hardware will time-out beyond 200-something.
1762 * We also need to avoid infinite loops in ef4_farch_filter_search() when the
1765 #define EF4_FARCH_FILTER_CTL_SRCH_MAX 200
1767 /* Don't try very hard to find space for performance hints, as this is
1768 * counter-productive. */
1769 #define EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX 5
1771 enum ef4_farch_filter_type {
1772 EF4_FARCH_FILTER_TCP_FULL = 0,
1773 EF4_FARCH_FILTER_TCP_WILD,
1774 EF4_FARCH_FILTER_UDP_FULL,
1775 EF4_FARCH_FILTER_UDP_WILD,
1776 EF4_FARCH_FILTER_MAC_FULL = 4,
1777 EF4_FARCH_FILTER_MAC_WILD,
1778 EF4_FARCH_FILTER_UC_DEF = 8,
1779 EF4_FARCH_FILTER_MC_DEF,
1780 EF4_FARCH_FILTER_TYPE_COUNT, /* number of specific types */
1783 enum ef4_farch_filter_table_id {
1784 EF4_FARCH_FILTER_TABLE_RX_IP = 0,
1785 EF4_FARCH_FILTER_TABLE_RX_MAC,
1786 EF4_FARCH_FILTER_TABLE_RX_DEF,
1787 EF4_FARCH_FILTER_TABLE_TX_MAC,
1788 EF4_FARCH_FILTER_TABLE_COUNT,
1791 enum ef4_farch_filter_index {
1792 EF4_FARCH_FILTER_INDEX_UC_DEF,
1793 EF4_FARCH_FILTER_INDEX_MC_DEF,
1794 EF4_FARCH_FILTER_SIZE_RX_DEF,
1797 struct ef4_farch_filter_spec {
1805 struct ef4_farch_filter_table {
1806 enum ef4_farch_filter_table_id id;
1807 u32 offset; /* address of table relative to BAR */
1808 unsigned size; /* number of entries */
1809 unsigned step; /* step between entries */
1810 unsigned used; /* number currently used */
1811 unsigned long *used_bitmap;
1812 struct ef4_farch_filter_spec *spec;
1813 unsigned search_limit[EF4_FARCH_FILTER_TYPE_COUNT];
1816 struct ef4_farch_filter_state {
1817 struct ef4_farch_filter_table table[EF4_FARCH_FILTER_TABLE_COUNT];
1821 ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
1822 struct ef4_farch_filter_table *table,
1823 unsigned int filter_idx);
1825 /* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
1826 * key derived from the n-tuple. The initial LFSR state is 0xffff. */
1827 static u16 ef4_farch_filter_hash(u32 key)
1831 /* First 16 rounds */
1832 tmp = 0x1fff ^ key >> 16;
1833 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1834 tmp = tmp ^ tmp >> 9;
1835 /* Last 16 rounds */
1836 tmp = tmp ^ tmp << 13 ^ key;
1837 tmp = tmp ^ tmp >> 3 ^ tmp >> 6;
1838 return tmp ^ tmp >> 9;
1841 /* To allow for hash collisions, filter search continues at these
1842 * increments from the first possible entry selected by the hash. */
1843 static u16 ef4_farch_filter_increment(u32 key)
1848 static enum ef4_farch_filter_table_id
1849 ef4_farch_filter_spec_table_id(const struct ef4_farch_filter_spec *spec)
1851 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1852 (EF4_FARCH_FILTER_TCP_FULL >> 2));
1853 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1854 (EF4_FARCH_FILTER_TCP_WILD >> 2));
1855 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1856 (EF4_FARCH_FILTER_UDP_FULL >> 2));
1857 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_IP !=
1858 (EF4_FARCH_FILTER_UDP_WILD >> 2));
1859 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
1860 (EF4_FARCH_FILTER_MAC_FULL >> 2));
1861 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_RX_MAC !=
1862 (EF4_FARCH_FILTER_MAC_WILD >> 2));
1863 BUILD_BUG_ON(EF4_FARCH_FILTER_TABLE_TX_MAC !=
1864 EF4_FARCH_FILTER_TABLE_RX_MAC + 2);
1865 return (spec->type >> 2) + ((spec->flags & EF4_FILTER_FLAG_TX) ? 2 : 0);
1868 static void ef4_farch_filter_push_rx_config(struct ef4_nic *efx)
1870 struct ef4_farch_filter_state *state = efx->filter_state;
1871 struct ef4_farch_filter_table *table;
1872 ef4_oword_t filter_ctl;
1874 ef4_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1876 table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
1877 EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
1878 table->search_limit[EF4_FARCH_FILTER_TCP_FULL] +
1879 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1880 EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
1881 table->search_limit[EF4_FARCH_FILTER_TCP_WILD] +
1882 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1883 EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
1884 table->search_limit[EF4_FARCH_FILTER_UDP_FULL] +
1885 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1886 EF4_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
1887 table->search_limit[EF4_FARCH_FILTER_UDP_WILD] +
1888 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1890 table = &state->table[EF4_FARCH_FILTER_TABLE_RX_MAC];
1892 EF4_SET_OWORD_FIELD(
1893 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
1894 table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
1895 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1896 EF4_SET_OWORD_FIELD(
1897 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
1898 table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
1899 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1902 table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
1904 EF4_SET_OWORD_FIELD(
1905 filter_ctl, FRF_CZ_UNICAST_NOMATCH_Q_ID,
1906 table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].dmaq_id);
1907 EF4_SET_OWORD_FIELD(
1908 filter_ctl, FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED,
1909 !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
1910 EF4_FILTER_FLAG_RX_RSS));
1911 EF4_SET_OWORD_FIELD(
1912 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_Q_ID,
1913 table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].dmaq_id);
1914 EF4_SET_OWORD_FIELD(
1915 filter_ctl, FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED,
1916 !!(table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
1917 EF4_FILTER_FLAG_RX_RSS));
1919 /* There is a single bit to enable RX scatter for all
1920 * unmatched packets. Only set it if scatter is
1921 * enabled in both filter specs.
1923 EF4_SET_OWORD_FIELD(
1924 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1925 !!(table->spec[EF4_FARCH_FILTER_INDEX_UC_DEF].flags &
1926 table->spec[EF4_FARCH_FILTER_INDEX_MC_DEF].flags &
1927 EF4_FILTER_FLAG_RX_SCATTER));
1928 } else if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
1929 /* We don't expose 'default' filters because unmatched
1930 * packets always go to the queue number found in the
1931 * RSS table. But we still need to set the RX scatter
1934 EF4_SET_OWORD_FIELD(
1935 filter_ctl, FRF_BZ_SCATTER_ENBL_NO_MATCH_Q,
1939 ef4_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
1942 static void ef4_farch_filter_push_tx_limits(struct ef4_nic *efx)
1944 struct ef4_farch_filter_state *state = efx->filter_state;
1945 struct ef4_farch_filter_table *table;
1948 ef4_reado(efx, &tx_cfg, FR_AZ_TX_CFG);
1950 table = &state->table[EF4_FARCH_FILTER_TABLE_TX_MAC];
1952 EF4_SET_OWORD_FIELD(
1953 tx_cfg, FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE,
1954 table->search_limit[EF4_FARCH_FILTER_MAC_FULL] +
1955 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_FULL);
1956 EF4_SET_OWORD_FIELD(
1957 tx_cfg, FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE,
1958 table->search_limit[EF4_FARCH_FILTER_MAC_WILD] +
1959 EF4_FARCH_FILTER_CTL_SRCH_FUDGE_WILD);
1962 ef4_writeo(efx, &tx_cfg, FR_AZ_TX_CFG);
1966 ef4_farch_filter_from_gen_spec(struct ef4_farch_filter_spec *spec,
1967 const struct ef4_filter_spec *gen_spec)
1969 bool is_full = false;
1971 if ((gen_spec->flags & EF4_FILTER_FLAG_RX_RSS) &&
1972 gen_spec->rss_context != EF4_FILTER_RSS_CONTEXT_DEFAULT)
1975 spec->priority = gen_spec->priority;
1976 spec->flags = gen_spec->flags;
1977 spec->dmaq_id = gen_spec->dmaq_id;
1979 switch (gen_spec->match_flags) {
1980 case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
1981 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT |
1982 EF4_FILTER_MATCH_REM_HOST | EF4_FILTER_MATCH_REM_PORT):
1985 case (EF4_FILTER_MATCH_ETHER_TYPE | EF4_FILTER_MATCH_IP_PROTO |
1986 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT): {
1987 __be32 rhost, host1, host2;
1988 __be16 rport, port1, port2;
1990 EF4_BUG_ON_PARANOID(!(gen_spec->flags & EF4_FILTER_FLAG_RX));
1992 if (gen_spec->ether_type != htons(ETH_P_IP))
1993 return -EPROTONOSUPPORT;
1994 if (gen_spec->loc_port == 0 ||
1995 (is_full && gen_spec->rem_port == 0))
1996 return -EADDRNOTAVAIL;
1997 switch (gen_spec->ip_proto) {
1999 spec->type = (is_full ? EF4_FARCH_FILTER_TCP_FULL :
2000 EF4_FARCH_FILTER_TCP_WILD);
2003 spec->type = (is_full ? EF4_FARCH_FILTER_UDP_FULL :
2004 EF4_FARCH_FILTER_UDP_WILD);
2007 return -EPROTONOSUPPORT;
2010 /* Filter is constructed in terms of source and destination,
2011 * with the odd wrinkle that the ports are swapped in a UDP
2012 * wildcard filter. We need to convert from local and remote
2013 * (= zero for wildcard) addresses.
2015 rhost = is_full ? gen_spec->rem_host[0] : 0;
2016 rport = is_full ? gen_spec->rem_port : 0;
2018 host2 = gen_spec->loc_host[0];
2019 if (!is_full && gen_spec->ip_proto == IPPROTO_UDP) {
2020 port1 = gen_spec->loc_port;
2024 port2 = gen_spec->loc_port;
2026 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
2027 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
2028 spec->data[2] = ntohl(host2);
2033 case EF4_FILTER_MATCH_LOC_MAC | EF4_FILTER_MATCH_OUTER_VID:
2036 case EF4_FILTER_MATCH_LOC_MAC:
2037 spec->type = (is_full ? EF4_FARCH_FILTER_MAC_FULL :
2038 EF4_FARCH_FILTER_MAC_WILD);
2039 spec->data[0] = is_full ? ntohs(gen_spec->outer_vid) : 0;
2040 spec->data[1] = (gen_spec->loc_mac[2] << 24 |
2041 gen_spec->loc_mac[3] << 16 |
2042 gen_spec->loc_mac[4] << 8 |
2043 gen_spec->loc_mac[5]);
2044 spec->data[2] = (gen_spec->loc_mac[0] << 8 |
2045 gen_spec->loc_mac[1]);
2048 case EF4_FILTER_MATCH_LOC_MAC_IG:
2049 spec->type = (is_multicast_ether_addr(gen_spec->loc_mac) ?
2050 EF4_FARCH_FILTER_MC_DEF :
2051 EF4_FARCH_FILTER_UC_DEF);
2052 memset(spec->data, 0, sizeof(spec->data)); /* ensure equality */
2056 return -EPROTONOSUPPORT;
2063 ef4_farch_filter_to_gen_spec(struct ef4_filter_spec *gen_spec,
2064 const struct ef4_farch_filter_spec *spec)
2066 bool is_full = false;
2068 /* *gen_spec should be completely initialised, to be consistent
2069 * with ef4_filter_init_{rx,tx}() and in case we want to copy
2070 * it back to userland.
2072 memset(gen_spec, 0, sizeof(*gen_spec));
2074 gen_spec->priority = spec->priority;
2075 gen_spec->flags = spec->flags;
2076 gen_spec->dmaq_id = spec->dmaq_id;
2078 switch (spec->type) {
2079 case EF4_FARCH_FILTER_TCP_FULL:
2080 case EF4_FARCH_FILTER_UDP_FULL:
2083 case EF4_FARCH_FILTER_TCP_WILD:
2084 case EF4_FARCH_FILTER_UDP_WILD: {
2085 __be32 host1, host2;
2086 __be16 port1, port2;
2088 gen_spec->match_flags =
2089 EF4_FILTER_MATCH_ETHER_TYPE |
2090 EF4_FILTER_MATCH_IP_PROTO |
2091 EF4_FILTER_MATCH_LOC_HOST | EF4_FILTER_MATCH_LOC_PORT;
2093 gen_spec->match_flags |= (EF4_FILTER_MATCH_REM_HOST |
2094 EF4_FILTER_MATCH_REM_PORT);
2095 gen_spec->ether_type = htons(ETH_P_IP);
2096 gen_spec->ip_proto =
2097 (spec->type == EF4_FARCH_FILTER_TCP_FULL ||
2098 spec->type == EF4_FARCH_FILTER_TCP_WILD) ?
2099 IPPROTO_TCP : IPPROTO_UDP;
2101 host1 = htonl(spec->data[0] >> 16 | spec->data[1] << 16);
2102 port1 = htons(spec->data[0]);
2103 host2 = htonl(spec->data[2]);
2104 port2 = htons(spec->data[1] >> 16);
2105 if (spec->flags & EF4_FILTER_FLAG_TX) {
2106 gen_spec->loc_host[0] = host1;
2107 gen_spec->rem_host[0] = host2;
2109 gen_spec->loc_host[0] = host2;
2110 gen_spec->rem_host[0] = host1;
2112 if (!!(gen_spec->flags & EF4_FILTER_FLAG_TX) ^
2113 (!is_full && gen_spec->ip_proto == IPPROTO_UDP)) {
2114 gen_spec->loc_port = port1;
2115 gen_spec->rem_port = port2;
2117 gen_spec->loc_port = port2;
2118 gen_spec->rem_port = port1;
2124 case EF4_FARCH_FILTER_MAC_FULL:
2127 case EF4_FARCH_FILTER_MAC_WILD:
2128 gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC;
2130 gen_spec->match_flags |= EF4_FILTER_MATCH_OUTER_VID;
2131 gen_spec->loc_mac[0] = spec->data[2] >> 8;
2132 gen_spec->loc_mac[1] = spec->data[2];
2133 gen_spec->loc_mac[2] = spec->data[1] >> 24;
2134 gen_spec->loc_mac[3] = spec->data[1] >> 16;
2135 gen_spec->loc_mac[4] = spec->data[1] >> 8;
2136 gen_spec->loc_mac[5] = spec->data[1];
2137 gen_spec->outer_vid = htons(spec->data[0]);
2140 case EF4_FARCH_FILTER_UC_DEF:
2141 case EF4_FARCH_FILTER_MC_DEF:
2142 gen_spec->match_flags = EF4_FILTER_MATCH_LOC_MAC_IG;
2143 gen_spec->loc_mac[0] = spec->type == EF4_FARCH_FILTER_MC_DEF;
2153 ef4_farch_filter_init_rx_auto(struct ef4_nic *efx,
2154 struct ef4_farch_filter_spec *spec)
2156 /* If there's only one channel then disable RSS for non VF
2157 * traffic, thereby allowing VFs to use RSS when the PF can't.
2159 spec->priority = EF4_FILTER_PRI_AUTO;
2160 spec->flags = (EF4_FILTER_FLAG_RX |
2161 (ef4_rss_enabled(efx) ? EF4_FILTER_FLAG_RX_RSS : 0) |
2162 (efx->rx_scatter ? EF4_FILTER_FLAG_RX_SCATTER : 0));
2166 /* Build a filter entry and return its n-tuple key. */
2167 static u32 ef4_farch_filter_build(ef4_oword_t *filter,
2168 struct ef4_farch_filter_spec *spec)
2172 switch (ef4_farch_filter_spec_table_id(spec)) {
2173 case EF4_FARCH_FILTER_TABLE_RX_IP: {
2174 bool is_udp = (spec->type == EF4_FARCH_FILTER_UDP_FULL ||
2175 spec->type == EF4_FARCH_FILTER_UDP_WILD);
2176 EF4_POPULATE_OWORD_7(
2179 !!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
2181 !!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
2182 FRF_BZ_TCP_UDP, is_udp,
2183 FRF_BZ_RXQ_ID, spec->dmaq_id,
2184 EF4_DWORD_2, spec->data[2],
2185 EF4_DWORD_1, spec->data[1],
2186 EF4_DWORD_0, spec->data[0]);
2191 case EF4_FARCH_FILTER_TABLE_RX_MAC: {
2192 bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
2193 EF4_POPULATE_OWORD_7(
2196 !!(spec->flags & EF4_FILTER_FLAG_RX_RSS),
2197 FRF_CZ_RMFT_SCATTER_EN,
2198 !!(spec->flags & EF4_FILTER_FLAG_RX_SCATTER),
2199 FRF_CZ_RMFT_RXQ_ID, spec->dmaq_id,
2200 FRF_CZ_RMFT_WILDCARD_MATCH, is_wild,
2201 FRF_CZ_RMFT_DEST_MAC_HI, spec->data[2],
2202 FRF_CZ_RMFT_DEST_MAC_LO, spec->data[1],
2203 FRF_CZ_RMFT_VLAN_ID, spec->data[0]);
2208 case EF4_FARCH_FILTER_TABLE_TX_MAC: {
2209 bool is_wild = spec->type == EF4_FARCH_FILTER_MAC_WILD;
2210 EF4_POPULATE_OWORD_5(*filter,
2211 FRF_CZ_TMFT_TXQ_ID, spec->dmaq_id,
2212 FRF_CZ_TMFT_WILDCARD_MATCH, is_wild,
2213 FRF_CZ_TMFT_SRC_MAC_HI, spec->data[2],
2214 FRF_CZ_TMFT_SRC_MAC_LO, spec->data[1],
2215 FRF_CZ_TMFT_VLAN_ID, spec->data[0]);
2216 data3 = is_wild | spec->dmaq_id << 1;
2224 return spec->data[0] ^ spec->data[1] ^ spec->data[2] ^ data3;
2227 static bool ef4_farch_filter_equal(const struct ef4_farch_filter_spec *left,
2228 const struct ef4_farch_filter_spec *right)
2230 if (left->type != right->type ||
2231 memcmp(left->data, right->data, sizeof(left->data)))
2234 if (left->flags & EF4_FILTER_FLAG_TX &&
2235 left->dmaq_id != right->dmaq_id)
2242 * Construct/deconstruct external filter IDs. At least the RX filter
2243 * IDs must be ordered by matching priority, for RX NFC semantics.
2245 * Deconstruction needs to be robust against invalid IDs so that
2246 * ef4_filter_remove_id_safe() and ef4_filter_get_filter_safe() can
2247 * accept user-provided IDs.
2250 #define EF4_FARCH_FILTER_MATCH_PRI_COUNT 5
2252 static const u8 ef4_farch_filter_type_match_pri[EF4_FARCH_FILTER_TYPE_COUNT] = {
2253 [EF4_FARCH_FILTER_TCP_FULL] = 0,
2254 [EF4_FARCH_FILTER_UDP_FULL] = 0,
2255 [EF4_FARCH_FILTER_TCP_WILD] = 1,
2256 [EF4_FARCH_FILTER_UDP_WILD] = 1,
2257 [EF4_FARCH_FILTER_MAC_FULL] = 2,
2258 [EF4_FARCH_FILTER_MAC_WILD] = 3,
2259 [EF4_FARCH_FILTER_UC_DEF] = 4,
2260 [EF4_FARCH_FILTER_MC_DEF] = 4,
2263 static const enum ef4_farch_filter_table_id ef4_farch_filter_range_table[] = {
2264 EF4_FARCH_FILTER_TABLE_RX_IP, /* RX match pri 0 */
2265 EF4_FARCH_FILTER_TABLE_RX_IP,
2266 EF4_FARCH_FILTER_TABLE_RX_MAC,
2267 EF4_FARCH_FILTER_TABLE_RX_MAC,
2268 EF4_FARCH_FILTER_TABLE_RX_DEF, /* RX match pri 4 */
2269 EF4_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 0 */
2270 EF4_FARCH_FILTER_TABLE_TX_MAC, /* TX match pri 1 */
2273 #define EF4_FARCH_FILTER_INDEX_WIDTH 13
2274 #define EF4_FARCH_FILTER_INDEX_MASK ((1 << EF4_FARCH_FILTER_INDEX_WIDTH) - 1)
2277 ef4_farch_filter_make_id(const struct ef4_farch_filter_spec *spec,
2282 range = ef4_farch_filter_type_match_pri[spec->type];
2283 if (!(spec->flags & EF4_FILTER_FLAG_RX))
2284 range += EF4_FARCH_FILTER_MATCH_PRI_COUNT;
2286 return range << EF4_FARCH_FILTER_INDEX_WIDTH | index;
2289 static inline enum ef4_farch_filter_table_id
2290 ef4_farch_filter_id_table_id(u32 id)
2292 unsigned int range = id >> EF4_FARCH_FILTER_INDEX_WIDTH;
2294 if (range < ARRAY_SIZE(ef4_farch_filter_range_table))
2295 return ef4_farch_filter_range_table[range];
2297 return EF4_FARCH_FILTER_TABLE_COUNT; /* invalid */
2300 static inline unsigned int ef4_farch_filter_id_index(u32 id)
2302 return id & EF4_FARCH_FILTER_INDEX_MASK;
2305 u32 ef4_farch_filter_get_rx_id_limit(struct ef4_nic *efx)
2307 struct ef4_farch_filter_state *state = efx->filter_state;
2308 unsigned int range = EF4_FARCH_FILTER_MATCH_PRI_COUNT - 1;
2309 enum ef4_farch_filter_table_id table_id;
2312 table_id = ef4_farch_filter_range_table[range];
2313 if (state->table[table_id].size != 0)
2314 return range << EF4_FARCH_FILTER_INDEX_WIDTH |
2315 state->table[table_id].size;
2321 s32 ef4_farch_filter_insert(struct ef4_nic *efx,
2322 struct ef4_filter_spec *gen_spec,
2325 struct ef4_farch_filter_state *state = efx->filter_state;
2326 struct ef4_farch_filter_table *table;
2327 struct ef4_farch_filter_spec spec;
2329 int rep_index, ins_index;
2330 unsigned int depth = 0;
2333 rc = ef4_farch_filter_from_gen_spec(&spec, gen_spec);
2337 table = &state->table[ef4_farch_filter_spec_table_id(&spec)];
2338 if (table->size == 0)
2341 netif_vdbg(efx, hw, efx->net_dev,
2342 "%s: type %d search_limit=%d", __func__, spec.type,
2343 table->search_limit[spec.type]);
2345 if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
2346 /* One filter spec per type */
2347 BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_UC_DEF != 0);
2348 BUILD_BUG_ON(EF4_FARCH_FILTER_INDEX_MC_DEF !=
2349 EF4_FARCH_FILTER_MC_DEF - EF4_FARCH_FILTER_UC_DEF);
2350 rep_index = spec.type - EF4_FARCH_FILTER_UC_DEF;
2351 ins_index = rep_index;
2353 spin_lock_bh(&efx->filter_lock);
2355 /* Search concurrently for
2356 * (1) a filter to be replaced (rep_index): any filter
2357 * with the same match values, up to the current
2358 * search depth for this type, and
2359 * (2) the insertion point (ins_index): (1) or any
2360 * free slot before it or up to the maximum search
2361 * depth for this priority
2362 * We fail if we cannot find (2).
2364 * We can stop once either
2365 * (a) we find (1), in which case we have definitely
2366 * found (2) as well; or
2367 * (b) we have searched exhaustively for (1), and have
2368 * either found (2) or searched exhaustively for it
2370 u32 key = ef4_farch_filter_build(&filter, &spec);
2371 unsigned int hash = ef4_farch_filter_hash(key);
2372 unsigned int incr = ef4_farch_filter_increment(key);
2373 unsigned int max_rep_depth = table->search_limit[spec.type];
2374 unsigned int max_ins_depth =
2375 spec.priority <= EF4_FILTER_PRI_HINT ?
2376 EF4_FARCH_FILTER_CTL_SRCH_HINT_MAX :
2377 EF4_FARCH_FILTER_CTL_SRCH_MAX;
2378 unsigned int i = hash & (table->size - 1);
2383 spin_lock_bh(&efx->filter_lock);
2386 if (!test_bit(i, table->used_bitmap)) {
2389 } else if (ef4_farch_filter_equal(&spec,
2398 if (depth >= max_rep_depth &&
2399 (ins_index >= 0 || depth >= max_ins_depth)) {
2401 if (ins_index < 0) {
2409 i = (i + incr) & (table->size - 1);
2414 /* If we found a filter to be replaced, check whether we
2417 if (rep_index >= 0) {
2418 struct ef4_farch_filter_spec *saved_spec =
2419 &table->spec[rep_index];
2421 if (spec.priority == saved_spec->priority && !replace_equal) {
2425 if (spec.priority < saved_spec->priority) {
2429 if (saved_spec->priority == EF4_FILTER_PRI_AUTO ||
2430 saved_spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO)
2431 spec.flags |= EF4_FILTER_FLAG_RX_OVER_AUTO;
2434 /* Insert the filter */
2435 if (ins_index != rep_index) {
2436 __set_bit(ins_index, table->used_bitmap);
2439 table->spec[ins_index] = spec;
2441 if (table->id == EF4_FARCH_FILTER_TABLE_RX_DEF) {
2442 ef4_farch_filter_push_rx_config(efx);
2444 if (table->search_limit[spec.type] < depth) {
2445 table->search_limit[spec.type] = depth;
2446 if (spec.flags & EF4_FILTER_FLAG_TX)
2447 ef4_farch_filter_push_tx_limits(efx);
2449 ef4_farch_filter_push_rx_config(efx);
2452 ef4_writeo(efx, &filter,
2453 table->offset + table->step * ins_index);
2455 /* If we were able to replace a filter by inserting
2456 * at a lower depth, clear the replaced filter
2458 if (ins_index != rep_index && rep_index >= 0)
2459 ef4_farch_filter_table_clear_entry(efx, table,
2463 netif_vdbg(efx, hw, efx->net_dev,
2464 "%s: filter type %d index %d rxq %u set",
2465 __func__, spec.type, ins_index, spec.dmaq_id);
2466 rc = ef4_farch_filter_make_id(&spec, ins_index);
2469 spin_unlock_bh(&efx->filter_lock);
2474 ef4_farch_filter_table_clear_entry(struct ef4_nic *efx,
2475 struct ef4_farch_filter_table *table,
2476 unsigned int filter_idx)
2478 static ef4_oword_t filter;
2480 EF4_WARN_ON_PARANOID(!test_bit(filter_idx, table->used_bitmap));
2481 BUG_ON(table->offset == 0); /* can't clear MAC default filters */
2483 __clear_bit(filter_idx, table->used_bitmap);
2485 memset(&table->spec[filter_idx], 0, sizeof(table->spec[0]));
2487 ef4_writeo(efx, &filter, table->offset + table->step * filter_idx);
2489 /* If this filter required a greater search depth than
2490 * any other, the search limit for its type can now be
2491 * decreased. However, it is hard to determine that
2492 * unless the table has become completely empty - in
2493 * which case, all its search limits can be set to 0.
2495 if (unlikely(table->used == 0)) {
2496 memset(table->search_limit, 0, sizeof(table->search_limit));
2497 if (table->id == EF4_FARCH_FILTER_TABLE_TX_MAC)
2498 ef4_farch_filter_push_tx_limits(efx);
2500 ef4_farch_filter_push_rx_config(efx);
2504 static int ef4_farch_filter_remove(struct ef4_nic *efx,
2505 struct ef4_farch_filter_table *table,
2506 unsigned int filter_idx,
2507 enum ef4_filter_priority priority)
2509 struct ef4_farch_filter_spec *spec = &table->spec[filter_idx];
2511 if (!test_bit(filter_idx, table->used_bitmap) ||
2512 spec->priority != priority)
2515 if (spec->flags & EF4_FILTER_FLAG_RX_OVER_AUTO) {
2516 ef4_farch_filter_init_rx_auto(efx, spec);
2517 ef4_farch_filter_push_rx_config(efx);
2519 ef4_farch_filter_table_clear_entry(efx, table, filter_idx);
2525 int ef4_farch_filter_remove_safe(struct ef4_nic *efx,
2526 enum ef4_filter_priority priority,
2529 struct ef4_farch_filter_state *state = efx->filter_state;
2530 enum ef4_farch_filter_table_id table_id;
2531 struct ef4_farch_filter_table *table;
2532 unsigned int filter_idx;
2535 table_id = ef4_farch_filter_id_table_id(filter_id);
2536 if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
2538 table = &state->table[table_id];
2540 filter_idx = ef4_farch_filter_id_index(filter_id);
2541 if (filter_idx >= table->size)
2544 spin_lock_bh(&efx->filter_lock);
2545 rc = ef4_farch_filter_remove(efx, table, filter_idx, priority);
2546 spin_unlock_bh(&efx->filter_lock);
2551 int ef4_farch_filter_get_safe(struct ef4_nic *efx,
2552 enum ef4_filter_priority priority,
2553 u32 filter_id, struct ef4_filter_spec *spec_buf)
2555 struct ef4_farch_filter_state *state = efx->filter_state;
2556 enum ef4_farch_filter_table_id table_id;
2557 struct ef4_farch_filter_table *table;
2558 struct ef4_farch_filter_spec *spec;
2559 unsigned int filter_idx;
2562 table_id = ef4_farch_filter_id_table_id(filter_id);
2563 if ((unsigned int)table_id >= EF4_FARCH_FILTER_TABLE_COUNT)
2565 table = &state->table[table_id];
2567 filter_idx = ef4_farch_filter_id_index(filter_id);
2568 if (filter_idx >= table->size)
2570 spec = &table->spec[filter_idx];
2572 spin_lock_bh(&efx->filter_lock);
2574 if (test_bit(filter_idx, table->used_bitmap) &&
2575 spec->priority == priority) {
2576 ef4_farch_filter_to_gen_spec(spec_buf, spec);
2582 spin_unlock_bh(&efx->filter_lock);
2588 ef4_farch_filter_table_clear(struct ef4_nic *efx,
2589 enum ef4_farch_filter_table_id table_id,
2590 enum ef4_filter_priority priority)
2592 struct ef4_farch_filter_state *state = efx->filter_state;
2593 struct ef4_farch_filter_table *table = &state->table[table_id];
2594 unsigned int filter_idx;
2596 spin_lock_bh(&efx->filter_lock);
2597 for (filter_idx = 0; filter_idx < table->size; ++filter_idx) {
2598 if (table->spec[filter_idx].priority != EF4_FILTER_PRI_AUTO)
2599 ef4_farch_filter_remove(efx, table,
2600 filter_idx, priority);
2602 spin_unlock_bh(&efx->filter_lock);
2605 int ef4_farch_filter_clear_rx(struct ef4_nic *efx,
2606 enum ef4_filter_priority priority)
2608 ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_IP,
2610 ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_MAC,
2612 ef4_farch_filter_table_clear(efx, EF4_FARCH_FILTER_TABLE_RX_DEF,
2617 u32 ef4_farch_filter_count_rx_used(struct ef4_nic *efx,
2618 enum ef4_filter_priority priority)
2620 struct ef4_farch_filter_state *state = efx->filter_state;
2621 enum ef4_farch_filter_table_id table_id;
2622 struct ef4_farch_filter_table *table;
2623 unsigned int filter_idx;
2626 spin_lock_bh(&efx->filter_lock);
2628 for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2629 table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2631 table = &state->table[table_id];
2632 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2633 if (test_bit(filter_idx, table->used_bitmap) &&
2634 table->spec[filter_idx].priority == priority)
2639 spin_unlock_bh(&efx->filter_lock);
2644 s32 ef4_farch_filter_get_rx_ids(struct ef4_nic *efx,
2645 enum ef4_filter_priority priority,
2648 struct ef4_farch_filter_state *state = efx->filter_state;
2649 enum ef4_farch_filter_table_id table_id;
2650 struct ef4_farch_filter_table *table;
2651 unsigned int filter_idx;
2654 spin_lock_bh(&efx->filter_lock);
2656 for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2657 table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2659 table = &state->table[table_id];
2660 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2661 if (test_bit(filter_idx, table->used_bitmap) &&
2662 table->spec[filter_idx].priority == priority) {
2663 if (count == size) {
2667 buf[count++] = ef4_farch_filter_make_id(
2668 &table->spec[filter_idx], filter_idx);
2673 spin_unlock_bh(&efx->filter_lock);
2678 /* Restore filter stater after reset */
2679 void ef4_farch_filter_table_restore(struct ef4_nic *efx)
2681 struct ef4_farch_filter_state *state = efx->filter_state;
2682 enum ef4_farch_filter_table_id table_id;
2683 struct ef4_farch_filter_table *table;
2685 unsigned int filter_idx;
2687 spin_lock_bh(&efx->filter_lock);
2689 for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2690 table = &state->table[table_id];
2692 /* Check whether this is a regular register table */
2693 if (table->step == 0)
2696 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2697 if (!test_bit(filter_idx, table->used_bitmap))
2699 ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
2700 ef4_writeo(efx, &filter,
2701 table->offset + table->step * filter_idx);
2705 ef4_farch_filter_push_rx_config(efx);
2706 ef4_farch_filter_push_tx_limits(efx);
2708 spin_unlock_bh(&efx->filter_lock);
2711 void ef4_farch_filter_table_remove(struct ef4_nic *efx)
2713 struct ef4_farch_filter_state *state = efx->filter_state;
2714 enum ef4_farch_filter_table_id table_id;
2716 for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2717 kfree(state->table[table_id].used_bitmap);
2718 vfree(state->table[table_id].spec);
2723 int ef4_farch_filter_table_probe(struct ef4_nic *efx)
2725 struct ef4_farch_filter_state *state;
2726 struct ef4_farch_filter_table *table;
2729 state = kzalloc(sizeof(struct ef4_farch_filter_state), GFP_KERNEL);
2732 efx->filter_state = state;
2734 if (ef4_nic_rev(efx) >= EF4_REV_FALCON_B0) {
2735 table = &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
2736 table->id = EF4_FARCH_FILTER_TABLE_RX_IP;
2737 table->offset = FR_BZ_RX_FILTER_TBL0;
2738 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
2739 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
2742 for (table_id = 0; table_id < EF4_FARCH_FILTER_TABLE_COUNT; table_id++) {
2743 table = &state->table[table_id];
2744 if (table->size == 0)
2746 table->used_bitmap = kcalloc(BITS_TO_LONGS(table->size),
2747 sizeof(unsigned long),
2749 if (!table->used_bitmap)
2751 table->spec = vzalloc(table->size * sizeof(*table->spec));
2756 table = &state->table[EF4_FARCH_FILTER_TABLE_RX_DEF];
2758 /* RX default filters must always exist */
2759 struct ef4_farch_filter_spec *spec;
2762 for (i = 0; i < EF4_FARCH_FILTER_SIZE_RX_DEF; i++) {
2763 spec = &table->spec[i];
2764 spec->type = EF4_FARCH_FILTER_UC_DEF + i;
2765 ef4_farch_filter_init_rx_auto(efx, spec);
2766 __set_bit(i, table->used_bitmap);
2770 ef4_farch_filter_push_rx_config(efx);
2775 ef4_farch_filter_table_remove(efx);
2779 /* Update scatter enable flags for filters pointing to our own RX queues */
2780 void ef4_farch_filter_update_rx_scatter(struct ef4_nic *efx)
2782 struct ef4_farch_filter_state *state = efx->filter_state;
2783 enum ef4_farch_filter_table_id table_id;
2784 struct ef4_farch_filter_table *table;
2786 unsigned int filter_idx;
2788 spin_lock_bh(&efx->filter_lock);
2790 for (table_id = EF4_FARCH_FILTER_TABLE_RX_IP;
2791 table_id <= EF4_FARCH_FILTER_TABLE_RX_DEF;
2793 table = &state->table[table_id];
2795 for (filter_idx = 0; filter_idx < table->size; filter_idx++) {
2796 if (!test_bit(filter_idx, table->used_bitmap) ||
2797 table->spec[filter_idx].dmaq_id >=
2801 if (efx->rx_scatter)
2802 table->spec[filter_idx].flags |=
2803 EF4_FILTER_FLAG_RX_SCATTER;
2805 table->spec[filter_idx].flags &=
2806 ~EF4_FILTER_FLAG_RX_SCATTER;
2808 if (table_id == EF4_FARCH_FILTER_TABLE_RX_DEF)
2809 /* Pushed by ef4_farch_filter_push_rx_config() */
2812 ef4_farch_filter_build(&filter, &table->spec[filter_idx]);
2813 ef4_writeo(efx, &filter,
2814 table->offset + table->step * filter_idx);
2818 ef4_farch_filter_push_rx_config(efx);
2820 spin_unlock_bh(&efx->filter_lock);
2823 #ifdef CONFIG_RFS_ACCEL
2825 s32 ef4_farch_filter_rfs_insert(struct ef4_nic *efx,
2826 struct ef4_filter_spec *gen_spec)
2828 return ef4_farch_filter_insert(efx, gen_spec, true);
2831 bool ef4_farch_filter_rfs_expire_one(struct ef4_nic *efx, u32 flow_id,
2834 struct ef4_farch_filter_state *state = efx->filter_state;
2835 struct ef4_farch_filter_table *table =
2836 &state->table[EF4_FARCH_FILTER_TABLE_RX_IP];
2838 if (test_bit(index, table->used_bitmap) &&
2839 table->spec[index].priority == EF4_FILTER_PRI_HINT &&
2840 rps_may_expire_flow(efx->net_dev, table->spec[index].dmaq_id,
2842 ef4_farch_filter_table_clear_entry(efx, table, index);
2849 #endif /* CONFIG_RFS_ACCEL */
2851 void ef4_farch_filter_sync_rx_mode(struct ef4_nic *efx)
2853 struct net_device *net_dev = efx->net_dev;
2854 struct netdev_hw_addr *ha;
2855 union ef4_multicast_hash *mc_hash = &efx->multicast_hash;
2859 if (!ef4_dev_registered(efx))
2862 netif_addr_lock_bh(net_dev);
2864 efx->unicast_filter = !(net_dev->flags & IFF_PROMISC);
2866 /* Build multicast hash table */
2867 if (net_dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) {
2868 memset(mc_hash, 0xff, sizeof(*mc_hash));
2870 memset(mc_hash, 0x00, sizeof(*mc_hash));
2871 netdev_for_each_mc_addr(ha, net_dev) {
2872 crc = ether_crc_le(ETH_ALEN, ha->addr);
2873 bit = crc & (EF4_MCAST_HASH_ENTRIES - 1);
2874 __set_bit_le(bit, mc_hash);
2877 /* Broadcast packets go through the multicast hash filter.
2878 * ether_crc_le() of the broadcast address is 0xbe2612ff
2879 * so we always add bit 0xff to the mask.
2881 __set_bit_le(0xff, mc_hash);
2884 netif_addr_unlock_bh(net_dev);