2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/delay.h>
38 #include "t4_values.h"
40 #include "t4fw_version.h"
43 * t4_wait_op_done_val - wait until an operation is completed
44 * @adapter: the adapter performing the operation
45 * @reg: the register to check for completion
46 * @mask: a single-bit field within @reg that indicates completion
47 * @polarity: the value of the field when the operation is completed
48 * @attempts: number of check iterations
49 * @delay: delay in usecs between iterations
50 * @valp: where to store the value of the register at completion time
52 * Wait until an operation is completed by checking a bit in a register
53 * up to @attempts times. If @valp is not NULL the value of the register
54 * at the time it indicated completion is stored there. Returns 0 if the
55 * operation completes and -EAGAIN otherwise.
57 static int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
58 int polarity, int attempts, int delay, u32 *valp)
61 u32 val = t4_read_reg(adapter, reg);
63 if (!!(val & mask) == polarity) {
75 static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
76 int polarity, int attempts, int delay)
78 return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
83 * t4_set_reg_field - set a register field to a value
84 * @adapter: the adapter to program
85 * @addr: the register address
86 * @mask: specifies the portion of the register to modify
87 * @val: the new value for the register field
89 * Sets a register field specified by the supplied mask to the
92 void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
95 u32 v = t4_read_reg(adapter, addr) & ~mask;
97 t4_write_reg(adapter, addr, v | val);
98 (void) t4_read_reg(adapter, addr); /* flush */
102 * t4_read_indirect - read indirectly addressed registers
104 * @addr_reg: register holding the indirect address
105 * @data_reg: register holding the value of the indirect register
106 * @vals: where the read register values are stored
107 * @nregs: how many indirect registers to read
108 * @start_idx: index of first indirect register to read
110 * Reads registers that are accessed indirectly through an address/data
113 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
114 unsigned int data_reg, u32 *vals,
115 unsigned int nregs, unsigned int start_idx)
118 t4_write_reg(adap, addr_reg, start_idx);
119 *vals++ = t4_read_reg(adap, data_reg);
125 * t4_write_indirect - write indirectly addressed registers
127 * @addr_reg: register holding the indirect addresses
128 * @data_reg: register holding the value for the indirect registers
129 * @vals: values to write
130 * @nregs: how many indirect registers to write
131 * @start_idx: address of first indirect register to write
133 * Writes a sequential block of registers that are accessed indirectly
134 * through an address/data register pair.
136 void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
137 unsigned int data_reg, const u32 *vals,
138 unsigned int nregs, unsigned int start_idx)
141 t4_write_reg(adap, addr_reg, start_idx++);
142 t4_write_reg(adap, data_reg, *vals++);
147 * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
148 * mechanism. This guarantees that we get the real value even if we're
149 * operating within a Virtual Machine and the Hypervisor is trapping our
150 * Configuration Space accesses.
152 void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
154 u32 req = FUNCTION_V(adap->pf) | REGISTER_V(reg);
156 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
161 if (is_t4(adap->params.chip))
164 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, req);
165 *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA_A);
167 /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
168 * Configuration Space read. (None of the other fields matter when
169 * ENABLE is 0 so a simple register write is easier than a
170 * read-modify-write via t4_set_reg_field().)
172 t4_write_reg(adap, PCIE_CFG_SPACE_REQ_A, 0);
176 * t4_report_fw_error - report firmware error
179 * The adapter firmware can indicate error conditions to the host.
180 * If the firmware has indicated an error, print out the reason for
181 * the firmware error.
183 static void t4_report_fw_error(struct adapter *adap)
185 static const char *const reason[] = {
186 "Crash", /* PCIE_FW_EVAL_CRASH */
187 "During Device Preparation", /* PCIE_FW_EVAL_PREP */
188 "During Device Configuration", /* PCIE_FW_EVAL_CONF */
189 "During Device Initialization", /* PCIE_FW_EVAL_INIT */
190 "Unexpected Event", /* PCIE_FW_EVAL_UNEXPECTEDEVENT */
191 "Insufficient Airflow", /* PCIE_FW_EVAL_OVERHEAT */
192 "Device Shutdown", /* PCIE_FW_EVAL_DEVICESHUTDOWN */
193 "Reserved", /* reserved */
197 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
198 if (pcie_fw & PCIE_FW_ERR_F) {
199 dev_err(adap->pdev_dev, "Firmware reports adapter error: %s\n",
200 reason[PCIE_FW_EVAL_G(pcie_fw)]);
201 adap->flags &= ~FW_OK;
206 * Get the reply to a mailbox command and store it in @rpl in big-endian order.
208 static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
211 for ( ; nflit; nflit--, mbox_addr += 8)
212 *rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
216 * Handle a FW assertion reported in a mailbox.
218 static void fw_asrt(struct adapter *adap, u32 mbox_addr)
220 struct fw_debug_cmd asrt;
222 get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
223 dev_alert(adap->pdev_dev,
224 "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
225 asrt.u.assert.filename_0_7, be32_to_cpu(asrt.u.assert.line),
226 be32_to_cpu(asrt.u.assert.x), be32_to_cpu(asrt.u.assert.y));
230 * t4_record_mbox - record a Firmware Mailbox Command/Reply in the log
231 * @adapter: the adapter
232 * @cmd: the Firmware Mailbox Command or Reply
233 * @size: command length in bytes
234 * @access: the time (ms) needed to access the Firmware Mailbox
235 * @execute: the time (ms) the command spent being executed
237 static void t4_record_mbox(struct adapter *adapter,
238 const __be64 *cmd, unsigned int size,
239 int access, int execute)
241 struct mbox_cmd_log *log = adapter->mbox_log;
242 struct mbox_cmd *entry;
245 entry = mbox_cmd_log_entry(log, log->cursor++);
246 if (log->cursor == log->size)
249 for (i = 0; i < size / 8; i++)
250 entry->cmd[i] = be64_to_cpu(cmd[i]);
251 while (i < MBOX_LEN / 8)
253 entry->timestamp = jiffies;
254 entry->seqno = log->seqno++;
255 entry->access = access;
256 entry->execute = execute;
260 * t4_wr_mbox_meat_timeout - send a command to FW through the given mailbox
262 * @mbox: index of the mailbox to use
263 * @cmd: the command to write
264 * @size: command length in bytes
265 * @rpl: where to optionally store the reply
266 * @sleep_ok: if true we may sleep while awaiting command completion
267 * @timeout: time to wait for command to finish before timing out
269 * Sends the given command to FW through the selected mailbox and waits
270 * for the FW to execute the command. If @rpl is not %NULL it is used to
271 * store the FW's reply to the command. The command and its optional
272 * reply are of the same length. FW can take up to %FW_CMD_MAX_TIMEOUT ms
273 * to respond. @sleep_ok determines whether we may sleep while awaiting
274 * the response. If sleeping is allowed we use progressive backoff
277 * The return value is 0 on success or a negative errno on failure. A
278 * failure can happen either because we are not able to execute the
279 * command or FW executes it but signals an error. In the latter case
280 * the return value is the error code indicated by FW (negated).
282 int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
283 int size, void *rpl, bool sleep_ok, int timeout)
285 static const int delay[] = {
286 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
289 struct mbox_list entry;
294 int i, ms, delay_idx, ret;
295 const __be64 *p = cmd;
296 u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA_A);
297 u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL_A);
298 __be64 cmd_rpl[MBOX_LEN / 8];
301 if ((size & 15) || size > MBOX_LEN)
305 * If the device is off-line, as in EEH, commands will time out.
306 * Fail them early so we don't waste time waiting.
308 if (adap->pdev->error_state != pci_channel_io_normal)
311 /* If we have a negative timeout, that implies that we can't sleep. */
317 /* Queue ourselves onto the mailbox access list. When our entry is at
318 * the front of the list, we have rights to access the mailbox. So we
319 * wait [for a while] till we're at the front [or bail out with an
322 spin_lock_bh(&adap->mbox_lock);
323 list_add_tail(&entry.list, &adap->mlist.list);
324 spin_unlock_bh(&adap->mbox_lock);
329 for (i = 0; ; i += ms) {
330 /* If we've waited too long, return a busy indication. This
331 * really ought to be based on our initial position in the
332 * mailbox access list but this is a start. We very rearely
333 * contend on access to the mailbox ...
335 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
336 if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
337 spin_lock_bh(&adap->mbox_lock);
338 list_del(&entry.list);
339 spin_unlock_bh(&adap->mbox_lock);
340 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
341 t4_record_mbox(adap, cmd, size, access, ret);
345 /* If we're at the head, break out and start the mailbox
348 if (list_first_entry(&adap->mlist.list, struct mbox_list,
352 /* Delay for a bit before checking again ... */
354 ms = delay[delay_idx]; /* last element may repeat */
355 if (delay_idx < ARRAY_SIZE(delay) - 1)
363 /* Loop trying to get ownership of the mailbox. Return an error
364 * if we can't gain ownership.
366 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
367 for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
368 v = MBOWNER_G(t4_read_reg(adap, ctl_reg));
369 if (v != MBOX_OWNER_DRV) {
370 spin_lock_bh(&adap->mbox_lock);
371 list_del(&entry.list);
372 spin_unlock_bh(&adap->mbox_lock);
373 ret = (v == MBOX_OWNER_FW) ? -EBUSY : -ETIMEDOUT;
374 t4_record_mbox(adap, cmd, size, access, ret);
378 /* Copy in the new mailbox command and send it on its way ... */
379 t4_record_mbox(adap, cmd, size, access, 0);
380 for (i = 0; i < size; i += 8)
381 t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
383 t4_write_reg(adap, ctl_reg, MBMSGVALID_F | MBOWNER_V(MBOX_OWNER_FW));
384 t4_read_reg(adap, ctl_reg); /* flush write */
390 !((pcie_fw = t4_read_reg(adap, PCIE_FW_A)) & PCIE_FW_ERR_F) &&
394 ms = delay[delay_idx]; /* last element may repeat */
395 if (delay_idx < ARRAY_SIZE(delay) - 1)
401 v = t4_read_reg(adap, ctl_reg);
402 if (MBOWNER_G(v) == MBOX_OWNER_DRV) {
403 if (!(v & MBMSGVALID_F)) {
404 t4_write_reg(adap, ctl_reg, 0);
408 get_mbox_rpl(adap, cmd_rpl, MBOX_LEN / 8, data_reg);
409 res = be64_to_cpu(cmd_rpl[0]);
411 if (FW_CMD_OP_G(res >> 32) == FW_DEBUG_CMD) {
412 fw_asrt(adap, data_reg);
413 res = FW_CMD_RETVAL_V(EIO);
415 memcpy(rpl, cmd_rpl, size);
418 t4_write_reg(adap, ctl_reg, 0);
421 t4_record_mbox(adap, cmd_rpl,
422 MBOX_LEN, access, execute);
423 spin_lock_bh(&adap->mbox_lock);
424 list_del(&entry.list);
425 spin_unlock_bh(&adap->mbox_lock);
426 return -FW_CMD_RETVAL_G((int)res);
430 ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -ETIMEDOUT;
431 t4_record_mbox(adap, cmd, size, access, ret);
432 dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
433 *(const u8 *)cmd, mbox);
434 t4_report_fw_error(adap);
435 spin_lock_bh(&adap->mbox_lock);
436 list_del(&entry.list);
437 spin_unlock_bh(&adap->mbox_lock);
442 int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
443 void *rpl, bool sleep_ok)
445 return t4_wr_mbox_meat_timeout(adap, mbox, cmd, size, rpl, sleep_ok,
449 static int t4_edc_err_read(struct adapter *adap, int idx)
451 u32 edc_ecc_err_addr_reg;
454 if (is_t4(adap->params.chip)) {
455 CH_WARN(adap, "%s: T4 NOT supported.\n", __func__);
458 if (idx != 0 && idx != 1) {
459 CH_WARN(adap, "%s: idx %d NOT supported.\n", __func__, idx);
463 edc_ecc_err_addr_reg = EDC_T5_REG(EDC_H_ECC_ERR_ADDR_A, idx);
464 rdata_reg = EDC_T5_REG(EDC_H_BIST_STATUS_RDATA_A, idx);
467 "edc%d err addr 0x%x: 0x%x.\n",
468 idx, edc_ecc_err_addr_reg,
469 t4_read_reg(adap, edc_ecc_err_addr_reg));
471 "bist: 0x%x, status %llx %llx %llx %llx %llx %llx %llx %llx %llx.\n",
473 (unsigned long long)t4_read_reg64(adap, rdata_reg),
474 (unsigned long long)t4_read_reg64(adap, rdata_reg + 8),
475 (unsigned long long)t4_read_reg64(adap, rdata_reg + 16),
476 (unsigned long long)t4_read_reg64(adap, rdata_reg + 24),
477 (unsigned long long)t4_read_reg64(adap, rdata_reg + 32),
478 (unsigned long long)t4_read_reg64(adap, rdata_reg + 40),
479 (unsigned long long)t4_read_reg64(adap, rdata_reg + 48),
480 (unsigned long long)t4_read_reg64(adap, rdata_reg + 56),
481 (unsigned long long)t4_read_reg64(adap, rdata_reg + 64));
487 * t4_memory_rw_init - Get memory window relative offset, base, and size.
489 * @win: PCI-E Memory Window to use
490 * @mtype: memory type: MEM_EDC0, MEM_EDC1, MEM_HMA or MEM_MC
491 * @mem_off: memory relative offset with respect to @mtype.
492 * @mem_base: configured memory base address.
493 * @mem_aperture: configured memory window aperture.
495 * Get the configured memory window's relative offset, base, and size.
497 int t4_memory_rw_init(struct adapter *adap, int win, int mtype, u32 *mem_off,
498 u32 *mem_base, u32 *mem_aperture)
500 u32 edc_size, mc_size, mem_reg;
502 /* Offset into the region of memory which is being accessed
505 * MEM_MC = 2 -- MEM_MC for chips with only 1 memory controller
506 * MEM_MC1 = 3 -- for chips with 2 memory controllers (e.g. T5)
509 edc_size = EDRAM0_SIZE_G(t4_read_reg(adap, MA_EDRAM0_BAR_A));
510 if (mtype == MEM_HMA) {
511 *mem_off = 2 * (edc_size * 1024 * 1024);
512 } else if (mtype != MEM_MC1) {
513 *mem_off = (mtype * (edc_size * 1024 * 1024));
515 mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
516 MA_EXT_MEMORY0_BAR_A));
517 *mem_off = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
520 /* Each PCI-E Memory Window is programmed with a window size -- or
521 * "aperture" -- which controls the granularity of its mapping onto
522 * adapter memory. We need to grab that aperture in order to know
523 * how to use the specified window. The window is also programmed
524 * with the base address of the Memory Window in BAR0's address
525 * space. For T4 this is an absolute PCI-E Bus Address. For T5
526 * the address is relative to BAR0.
528 mem_reg = t4_read_reg(adap,
529 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A,
531 /* a dead adapter will return 0xffffffff for PIO reads */
532 if (mem_reg == 0xffffffff)
535 *mem_aperture = 1 << (WINDOW_G(mem_reg) + WINDOW_SHIFT_X);
536 *mem_base = PCIEOFST_G(mem_reg) << PCIEOFST_SHIFT_X;
537 if (is_t4(adap->params.chip))
538 *mem_base -= adap->t4_bar0;
544 * t4_memory_update_win - Move memory window to specified address.
546 * @win: PCI-E Memory Window to use
547 * @addr: location to move.
549 * Move memory window to specified address.
551 void t4_memory_update_win(struct adapter *adap, int win, u32 addr)
554 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win),
556 /* Read it back to ensure that changes propagate before we
557 * attempt to use the new value.
560 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET_A, win));
564 * t4_memory_rw_residual - Read/Write residual data.
566 * @off: relative offset within residual to start read/write.
567 * @addr: address within indicated memory type.
568 * @buf: host memory buffer
569 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
571 * Read/Write residual data less than 32-bits.
573 void t4_memory_rw_residual(struct adapter *adap, u32 off, u32 addr, u8 *buf,
583 if (dir == T4_MEMORY_READ) {
584 last.word = le32_to_cpu((__force __le32)
585 t4_read_reg(adap, addr));
586 for (bp = (unsigned char *)buf, i = off; i < 4; i++)
587 bp[i] = last.byte[i];
590 for (i = off; i < 4; i++)
592 t4_write_reg(adap, addr,
593 (__force u32)cpu_to_le32(last.word));
598 * t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
600 * @win: PCI-E Memory Window to use
601 * @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
602 * @addr: address within indicated memory type
603 * @len: amount of memory to transfer
604 * @hbuf: host memory buffer
605 * @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
607 * Reads/writes an [almost] arbitrary memory region in the firmware: the
608 * firmware memory address and host buffer must be aligned on 32-bit
609 * boudaries; the length may be arbitrary. The memory is transferred as
610 * a raw byte sequence from/to the firmware's memory. If this memory
611 * contains data structures which contain multi-byte integers, it's the
612 * caller's responsibility to perform appropriate byte order conversions.
614 int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
615 u32 len, void *hbuf, int dir)
617 u32 pos, offset, resid, memoffset;
618 u32 win_pf, mem_aperture, mem_base;
622 /* Argument sanity checks ...
624 if (addr & 0x3 || (uintptr_t)hbuf & 0x3)
628 /* It's convenient to be able to handle lengths which aren't a
629 * multiple of 32-bits because we often end up transferring files to
630 * the firmware. So we'll handle that by normalizing the length here
631 * and then handling any residual transfer at the end.
636 ret = t4_memory_rw_init(adap, win, mtype, &memoffset, &mem_base,
641 /* Determine the PCIE_MEM_ACCESS_OFFSET */
642 addr = addr + memoffset;
644 win_pf = is_t4(adap->params.chip) ? 0 : PFNUM_V(adap->pf);
646 /* Calculate our initial PCI-E Memory Window Position and Offset into
649 pos = addr & ~(mem_aperture - 1);
652 /* Set up initial PCI-E Memory Window to cover the start of our
655 t4_memory_update_win(adap, win, pos | win_pf);
657 /* Transfer data to/from the adapter as long as there's an integral
658 * number of 32-bit transfers to complete.
660 * A note on Endianness issues:
662 * The "register" reads and writes below from/to the PCI-E Memory
663 * Window invoke the standard adapter Big-Endian to PCI-E Link
664 * Little-Endian "swizzel." As a result, if we have the following
665 * data in adapter memory:
667 * Memory: ... | b0 | b1 | b2 | b3 | ...
668 * Address: i+0 i+1 i+2 i+3
670 * Then a read of the adapter memory via the PCI-E Memory Window
675 * [ b3 | b2 | b1 | b0 ]
677 * If this value is stored into local memory on a Little-Endian system
678 * it will show up correctly in local memory as:
680 * ( ..., b0, b1, b2, b3, ... )
682 * But on a Big-Endian system, the store will show up in memory
683 * incorrectly swizzled as:
685 * ( ..., b3, b2, b1, b0, ... )
687 * So we need to account for this in the reads and writes to the
688 * PCI-E Memory Window below by undoing the register read/write
692 if (dir == T4_MEMORY_READ)
693 *buf++ = le32_to_cpu((__force __le32)t4_read_reg(adap,
696 t4_write_reg(adap, mem_base + offset,
697 (__force u32)cpu_to_le32(*buf++));
698 offset += sizeof(__be32);
699 len -= sizeof(__be32);
701 /* If we've reached the end of our current window aperture,
702 * move the PCI-E Memory Window on to the next. Note that
703 * doing this here after "len" may be 0 allows us to set up
704 * the PCI-E Memory Window for a possible final residual
707 if (offset == mem_aperture) {
710 t4_memory_update_win(adap, win, pos | win_pf);
714 /* If the original transfer had a length which wasn't a multiple of
715 * 32-bits, now's where we need to finish off the transfer of the
716 * residual amount. The PCI-E Memory Window has already been moved
717 * above (if necessary) to cover this final transfer.
720 t4_memory_rw_residual(adap, resid, mem_base + offset,
726 /* Return the specified PCI-E Configuration Space register from our Physical
727 * Function. We try first via a Firmware LDST Command since we prefer to let
728 * the firmware own all of these registers, but if that fails we go for it
729 * directly ourselves.
731 u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
733 u32 val, ldst_addrspace;
735 /* If fw_attach != 0, construct and send the Firmware LDST Command to
736 * retrieve the specified PCI-E Configuration Space register.
738 struct fw_ldst_cmd ldst_cmd;
741 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
742 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FUNC_PCIE);
743 ldst_cmd.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
747 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
748 ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS_V(1);
749 ldst_cmd.u.pcie.ctrl_to_fn =
750 (FW_LDST_CMD_LC_F | FW_LDST_CMD_FN_V(adap->pf));
751 ldst_cmd.u.pcie.r = reg;
753 /* If the LDST Command succeeds, return the result, otherwise
754 * fall through to reading it directly ourselves ...
756 ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
759 val = be32_to_cpu(ldst_cmd.u.pcie.data[0]);
761 /* Read the desired Configuration Space register via the PCI-E
762 * Backdoor mechanism.
764 t4_hw_pci_read_cfg4(adap, reg, &val);
768 /* Get the window based on base passed to it.
769 * Window aperture is currently unhandled, but there is no use case for it
772 static u32 t4_get_window(struct adapter *adap, u32 pci_base, u64 pci_mask,
777 if (is_t4(adap->params.chip)) {
780 /* Truncation intentional: we only read the bottom 32-bits of
781 * the 64-bit BAR0/BAR1 ... We use the hardware backdoor
782 * mechanism to read BAR0 instead of using
783 * pci_resource_start() because we could be operating from
784 * within a Virtual Machine which is trapping our accesses to
785 * our Configuration Space and we need to set up the PCI-E
786 * Memory Window decoders with the actual addresses which will
787 * be coming across the PCI-E link.
789 bar0 = t4_read_pcie_cfg4(adap, pci_base);
791 adap->t4_bar0 = bar0;
793 ret = bar0 + memwin_base;
795 /* For T5, only relative offset inside the PCIe BAR is passed */
801 /* Get the default utility window (win0) used by everyone */
802 u32 t4_get_util_window(struct adapter *adap)
804 return t4_get_window(adap, PCI_BASE_ADDRESS_0,
805 PCI_BASE_ADDRESS_MEM_MASK, MEMWIN0_BASE);
808 /* Set up memory window for accessing adapter memory ranges. (Read
809 * back MA register to ensure that changes propagate before we attempt
810 * to use the new values.)
812 void t4_setup_memwin(struct adapter *adap, u32 memwin_base, u32 window)
815 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window),
816 memwin_base | BIR_V(0) |
817 WINDOW_V(ilog2(MEMWIN0_APERTURE) - WINDOW_SHIFT_X));
819 PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN_A, window));
823 * t4_get_regs_len - return the size of the chips register set
824 * @adapter: the adapter
826 * Returns the size of the chip's BAR0 register space.
828 unsigned int t4_get_regs_len(struct adapter *adapter)
830 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
832 switch (chip_version) {
834 return T4_REGMAP_SIZE;
838 return T5_REGMAP_SIZE;
841 dev_err(adapter->pdev_dev,
842 "Unsupported chip version %d\n", chip_version);
847 * t4_get_regs - read chip registers into provided buffer
849 * @buf: register buffer
850 * @buf_size: size (in bytes) of register buffer
852 * If the provided register buffer isn't large enough for the chip's
853 * full register range, the register dump will be truncated to the
854 * register buffer's size.
856 void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size)
858 static const unsigned int t4_reg_ranges[] = {
1317 static const unsigned int t5_reg_ranges[] = {
2084 static const unsigned int t6_reg_ranges[] = {
2646 u32 *buf_end = (u32 *)((char *)buf + buf_size);
2647 const unsigned int *reg_ranges;
2648 int reg_ranges_size, range;
2649 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
2651 /* Select the right set of register ranges to dump depending on the
2652 * adapter chip type.
2654 switch (chip_version) {
2656 reg_ranges = t4_reg_ranges;
2657 reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
2661 reg_ranges = t5_reg_ranges;
2662 reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
2666 reg_ranges = t6_reg_ranges;
2667 reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
2671 dev_err(adap->pdev_dev,
2672 "Unsupported chip version %d\n", chip_version);
2676 /* Clear the register buffer and insert the appropriate register
2677 * values selected by the above register ranges.
2679 memset(buf, 0, buf_size);
2680 for (range = 0; range < reg_ranges_size; range += 2) {
2681 unsigned int reg = reg_ranges[range];
2682 unsigned int last_reg = reg_ranges[range + 1];
2683 u32 *bufp = (u32 *)((char *)buf + reg);
2685 /* Iterate across the register range filling in the register
2686 * buffer but don't write past the end of the register buffer.
2688 while (reg <= last_reg && bufp < buf_end) {
2689 *bufp++ = t4_read_reg(adap, reg);
2695 #define EEPROM_STAT_ADDR 0x7bfc
2696 #define VPD_BASE 0x400
2697 #define VPD_BASE_OLD 0
2698 #define VPD_LEN 1024
2699 #define CHELSIO_VPD_UNIQUE_ID 0x82
2702 * t4_eeprom_ptov - translate a physical EEPROM address to virtual
2703 * @phys_addr: the physical EEPROM address
2704 * @fn: the PCI function number
2705 * @sz: size of function-specific area
2707 * Translate a physical EEPROM address to virtual. The first 1K is
2708 * accessed through virtual addresses starting at 31K, the rest is
2709 * accessed through virtual addresses starting at 0.
2711 * The mapping is as follows:
2712 * [0..1K) -> [31K..32K)
2713 * [1K..1K+A) -> [31K-A..31K)
2714 * [1K+A..ES) -> [0..ES-A-1K)
2716 * where A = @fn * @sz, and ES = EEPROM size.
2718 int t4_eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
2721 if (phys_addr < 1024)
2722 return phys_addr + (31 << 10);
2723 if (phys_addr < 1024 + fn)
2724 return 31744 - fn + phys_addr - 1024;
2725 if (phys_addr < EEPROMSIZE)
2726 return phys_addr - 1024 - fn;
2731 * t4_seeprom_wp - enable/disable EEPROM write protection
2732 * @adapter: the adapter
2733 * @enable: whether to enable or disable write protection
2735 * Enables or disables write protection on the serial EEPROM.
2737 int t4_seeprom_wp(struct adapter *adapter, bool enable)
2739 unsigned int v = enable ? 0xc : 0;
2740 int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
2741 return ret < 0 ? ret : 0;
2745 * t4_get_raw_vpd_params - read VPD parameters from VPD EEPROM
2746 * @adapter: adapter to read
2747 * @p: where to store the parameters
2749 * Reads card parameters stored in VPD EEPROM.
2751 int t4_get_raw_vpd_params(struct adapter *adapter, struct vpd_params *p)
2753 int i, ret = 0, addr;
2756 unsigned int vpdr_len, kw_offset, id_len;
2758 vpd = vmalloc(VPD_LEN);
2762 /* Card information normally starts at VPD_BASE but early cards had
2765 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(u32), vpd);
2769 /* The VPD shall have a unique identifier specified by the PCI SIG.
2770 * For chelsio adapters, the identifier is 0x82. The first byte of a VPD
2771 * shall be CHELSIO_VPD_UNIQUE_ID (0x82). The VPD programming software
2772 * is expected to automatically put this entry at the
2773 * beginning of the VPD.
2775 addr = *vpd == CHELSIO_VPD_UNIQUE_ID ? VPD_BASE : VPD_BASE_OLD;
2777 ret = pci_read_vpd(adapter->pdev, addr, VPD_LEN, vpd);
2781 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
2782 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
2787 id_len = pci_vpd_lrdt_size(vpd);
2788 if (id_len > ID_LEN)
2791 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
2793 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
2798 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
2799 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
2800 if (vpdr_len + kw_offset > VPD_LEN) {
2801 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
2806 #define FIND_VPD_KW(var, name) do { \
2807 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
2809 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
2813 var += PCI_VPD_INFO_FLD_HDR_SIZE; \
2816 FIND_VPD_KW(i, "RV");
2817 for (csum = 0; i >= 0; i--)
2821 dev_err(adapter->pdev_dev,
2822 "corrupted VPD EEPROM, actual csum %u\n", csum);
2827 FIND_VPD_KW(ec, "EC");
2828 FIND_VPD_KW(sn, "SN");
2829 FIND_VPD_KW(pn, "PN");
2830 FIND_VPD_KW(na, "NA");
2833 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
2835 memcpy(p->ec, vpd + ec, EC_LEN);
2837 i = pci_vpd_info_field_size(vpd + sn - PCI_VPD_INFO_FLD_HDR_SIZE);
2838 memcpy(p->sn, vpd + sn, min(i, SERNUM_LEN));
2840 i = pci_vpd_info_field_size(vpd + pn - PCI_VPD_INFO_FLD_HDR_SIZE);
2841 memcpy(p->pn, vpd + pn, min(i, PN_LEN));
2843 memcpy(p->na, vpd + na, min(i, MACADDR_LEN));
2844 strim((char *)p->na);
2848 return ret < 0 ? ret : 0;
2852 * t4_get_vpd_params - read VPD parameters & retrieve Core Clock
2853 * @adapter: adapter to read
2854 * @p: where to store the parameters
2856 * Reads card parameters stored in VPD EEPROM and retrieves the Core
2857 * Clock. This can only be called after a connection to the firmware
2860 int t4_get_vpd_params(struct adapter *adapter, struct vpd_params *p)
2862 u32 cclk_param, cclk_val;
2865 /* Grab the raw VPD parameters.
2867 ret = t4_get_raw_vpd_params(adapter, p);
2871 /* Ask firmware for the Core Clock since it knows how to translate the
2872 * Reference Clock ('V2') VPD field into a Core Clock value ...
2874 cclk_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
2875 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_CCLK));
2876 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
2877 1, &cclk_param, &cclk_val);
2887 * t4_get_pfres - retrieve VF resource limits
2888 * @adapter: the adapter
2890 * Retrieves configured resource limits and capabilities for a physical
2891 * function. The results are stored in @adapter->pfres.
2893 int t4_get_pfres(struct adapter *adapter)
2895 struct pf_resources *pfres = &adapter->params.pfres;
2896 struct fw_pfvf_cmd cmd, rpl;
2900 /* Execute PFVF Read command to get VF resource limits; bail out early
2901 * with error on command failure.
2903 memset(&cmd, 0, sizeof(cmd));
2904 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) |
2907 FW_PFVF_CMD_PFN_V(adapter->pf) |
2908 FW_PFVF_CMD_VFN_V(0));
2909 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
2910 v = t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &rpl);
2911 if (v != FW_SUCCESS)
2914 /* Extract PF resource limits and return success.
2916 word = be32_to_cpu(rpl.niqflint_niq);
2917 pfres->niqflint = FW_PFVF_CMD_NIQFLINT_G(word);
2918 pfres->niq = FW_PFVF_CMD_NIQ_G(word);
2920 word = be32_to_cpu(rpl.type_to_neq);
2921 pfres->neq = FW_PFVF_CMD_NEQ_G(word);
2922 pfres->pmask = FW_PFVF_CMD_PMASK_G(word);
2924 word = be32_to_cpu(rpl.tc_to_nexactf);
2925 pfres->tc = FW_PFVF_CMD_TC_G(word);
2926 pfres->nvi = FW_PFVF_CMD_NVI_G(word);
2927 pfres->nexactf = FW_PFVF_CMD_NEXACTF_G(word);
2929 word = be32_to_cpu(rpl.r_caps_to_nethctrl);
2930 pfres->r_caps = FW_PFVF_CMD_R_CAPS_G(word);
2931 pfres->wx_caps = FW_PFVF_CMD_WX_CAPS_G(word);
2932 pfres->nethctrl = FW_PFVF_CMD_NETHCTRL_G(word);
2937 /* serial flash and firmware constants */
2939 SF_ATTEMPTS = 10, /* max retries for SF operations */
2941 /* flash command opcodes */
2942 SF_PROG_PAGE = 2, /* program page */
2943 SF_WR_DISABLE = 4, /* disable writes */
2944 SF_RD_STATUS = 5, /* read status register */
2945 SF_WR_ENABLE = 6, /* enable writes */
2946 SF_RD_DATA_FAST = 0xb, /* read flash */
2947 SF_RD_ID = 0x9f, /* read ID */
2948 SF_ERASE_SECTOR = 0xd8, /* erase sector */
2952 * sf1_read - read data from the serial flash
2953 * @adapter: the adapter
2954 * @byte_cnt: number of bytes to read
2955 * @cont: whether another operation will be chained
2956 * @lock: whether to lock SF for PL access only
2957 * @valp: where to store the read data
2959 * Reads up to 4 bytes of data from the serial flash. The location of
2960 * the read needs to be specified prior to calling this by issuing the
2961 * appropriate commands to the serial flash.
2963 static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
2964 int lock, u32 *valp)
2968 if (!byte_cnt || byte_cnt > 4)
2970 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2972 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
2973 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1));
2974 ret = t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
2976 *valp = t4_read_reg(adapter, SF_DATA_A);
2981 * sf1_write - write data to the serial flash
2982 * @adapter: the adapter
2983 * @byte_cnt: number of bytes to write
2984 * @cont: whether another operation will be chained
2985 * @lock: whether to lock SF for PL access only
2986 * @val: value to write
2988 * Writes up to 4 bytes of data to the serial flash. The location of
2989 * the write needs to be specified prior to calling this by issuing the
2990 * appropriate commands to the serial flash.
2992 static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
2995 if (!byte_cnt || byte_cnt > 4)
2997 if (t4_read_reg(adapter, SF_OP_A) & SF_BUSY_F)
2999 t4_write_reg(adapter, SF_DATA_A, val);
3000 t4_write_reg(adapter, SF_OP_A, SF_LOCK_V(lock) |
3001 SF_CONT_V(cont) | BYTECNT_V(byte_cnt - 1) | OP_V(1));
3002 return t4_wait_op_done(adapter, SF_OP_A, SF_BUSY_F, 0, SF_ATTEMPTS, 5);
3006 * flash_wait_op - wait for a flash operation to complete
3007 * @adapter: the adapter
3008 * @attempts: max number of polls of the status register
3009 * @delay: delay between polls in ms
3011 * Wait for a flash operation to complete by polling the status register.
3013 static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
3019 if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
3020 (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
3024 if (--attempts == 0)
3032 * t4_read_flash - read words from serial flash
3033 * @adapter: the adapter
3034 * @addr: the start address for the read
3035 * @nwords: how many 32-bit words to read
3036 * @data: where to store the read data
3037 * @byte_oriented: whether to store data as bytes or as words
3039 * Read the specified number of 32-bit words from the serial flash.
3040 * If @byte_oriented is set the read data is stored as a byte array
3041 * (i.e., big-endian), otherwise as 32-bit words in the platform's
3042 * natural endianness.
3044 int t4_read_flash(struct adapter *adapter, unsigned int addr,
3045 unsigned int nwords, u32 *data, int byte_oriented)
3049 if (addr + nwords * sizeof(u32) > adapter->params.sf_size || (addr & 3))
3052 addr = swab32(addr) | SF_RD_DATA_FAST;
3054 if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
3055 (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
3058 for ( ; nwords; nwords--, data++) {
3059 ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
3061 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3065 *data = (__force __u32)(cpu_to_be32(*data));
3071 * t4_write_flash - write up to a page of data to the serial flash
3072 * @adapter: the adapter
3073 * @addr: the start address to write
3074 * @n: length of data to write in bytes
3075 * @data: the data to write
3077 * Writes up to a page of data (256 bytes) to the serial flash starting
3078 * at the given address. All the data must be written to the same page.
3080 static int t4_write_flash(struct adapter *adapter, unsigned int addr,
3081 unsigned int n, const u8 *data)
3085 unsigned int i, c, left, val, offset = addr & 0xff;
3087 if (addr >= adapter->params.sf_size || offset + n > SF_PAGE_SIZE)
3090 val = swab32(addr) | SF_PROG_PAGE;
3092 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3093 (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
3096 for (left = n; left; left -= c) {
3098 for (val = 0, i = 0; i < c; ++i)
3099 val = (val << 8) + *data++;
3101 ret = sf1_write(adapter, c, c != left, 1, val);
3105 ret = flash_wait_op(adapter, 8, 1);
3109 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3111 /* Read the page to verify the write succeeded */
3112 ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
3116 if (memcmp(data - n, (u8 *)buf + offset, n)) {
3117 dev_err(adapter->pdev_dev,
3118 "failed to correctly write the flash page at %#x\n",
3125 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3130 * t4_get_fw_version - read the firmware version
3131 * @adapter: the adapter
3132 * @vers: where to place the version
3134 * Reads the FW version from flash.
3136 int t4_get_fw_version(struct adapter *adapter, u32 *vers)
3138 return t4_read_flash(adapter, FLASH_FW_START +
3139 offsetof(struct fw_hdr, fw_ver), 1,
3144 * t4_get_bs_version - read the firmware bootstrap version
3145 * @adapter: the adapter
3146 * @vers: where to place the version
3148 * Reads the FW Bootstrap version from flash.
3150 int t4_get_bs_version(struct adapter *adapter, u32 *vers)
3152 return t4_read_flash(adapter, FLASH_FWBOOTSTRAP_START +
3153 offsetof(struct fw_hdr, fw_ver), 1,
3158 * t4_get_tp_version - read the TP microcode version
3159 * @adapter: the adapter
3160 * @vers: where to place the version
3162 * Reads the TP microcode version from flash.
3164 int t4_get_tp_version(struct adapter *adapter, u32 *vers)
3166 return t4_read_flash(adapter, FLASH_FW_START +
3167 offsetof(struct fw_hdr, tp_microcode_ver),
3172 * t4_get_exprom_version - return the Expansion ROM version (if any)
3173 * @adapter: the adapter
3174 * @vers: where to place the version
3176 * Reads the Expansion ROM header from FLASH and returns the version
3177 * number (if present) through the @vers return value pointer. We return
3178 * this in the Firmware Version Format since it's convenient. Return
3179 * 0 on success, -ENOENT if no Expansion ROM is present.
3181 int t4_get_exprom_version(struct adapter *adap, u32 *vers)
3183 struct exprom_header {
3184 unsigned char hdr_arr[16]; /* must start with 0x55aa */
3185 unsigned char hdr_ver[4]; /* Expansion ROM version */
3187 u32 exprom_header_buf[DIV_ROUND_UP(sizeof(struct exprom_header),
3191 ret = t4_read_flash(adap, FLASH_EXP_ROM_START,
3192 ARRAY_SIZE(exprom_header_buf), exprom_header_buf,
3197 hdr = (struct exprom_header *)exprom_header_buf;
3198 if (hdr->hdr_arr[0] != 0x55 || hdr->hdr_arr[1] != 0xaa)
3201 *vers = (FW_HDR_FW_VER_MAJOR_V(hdr->hdr_ver[0]) |
3202 FW_HDR_FW_VER_MINOR_V(hdr->hdr_ver[1]) |
3203 FW_HDR_FW_VER_MICRO_V(hdr->hdr_ver[2]) |
3204 FW_HDR_FW_VER_BUILD_V(hdr->hdr_ver[3]));
3209 * t4_get_vpd_version - return the VPD version
3210 * @adapter: the adapter
3211 * @vers: where to place the version
3213 * Reads the VPD via the Firmware interface (thus this can only be called
3214 * once we're ready to issue Firmware commands). The format of the
3215 * VPD version is adapter specific. Returns 0 on success, an error on
3218 * Note that early versions of the Firmware didn't include the ability
3219 * to retrieve the VPD version, so we zero-out the return-value parameter
3220 * in that case to avoid leaving it with garbage in it.
3222 * Also note that the Firmware will return its cached copy of the VPD
3223 * Revision ID, not the actual Revision ID as written in the Serial
3224 * EEPROM. This is only an issue if a new VPD has been written and the
3225 * Firmware/Chip haven't yet gone through a RESET sequence. So it's best
3226 * to defer calling this routine till after a FW_RESET_CMD has been issued
3227 * if the Host Driver will be performing a full adapter initialization.
3229 int t4_get_vpd_version(struct adapter *adapter, u32 *vers)
3234 vpdrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3235 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_VPDREV));
3236 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3237 1, &vpdrev_param, vers);
3244 * t4_get_scfg_version - return the Serial Configuration version
3245 * @adapter: the adapter
3246 * @vers: where to place the version
3248 * Reads the Serial Configuration Version via the Firmware interface
3249 * (thus this can only be called once we're ready to issue Firmware
3250 * commands). The format of the Serial Configuration version is
3251 * adapter specific. Returns 0 on success, an error on failure.
3253 * Note that early versions of the Firmware didn't include the ability
3254 * to retrieve the Serial Configuration version, so we zero-out the
3255 * return-value parameter in that case to avoid leaving it with
3258 * Also note that the Firmware will return its cached copy of the Serial
3259 * Initialization Revision ID, not the actual Revision ID as written in
3260 * the Serial EEPROM. This is only an issue if a new VPD has been written
3261 * and the Firmware/Chip haven't yet gone through a RESET sequence. So
3262 * it's best to defer calling this routine till after a FW_RESET_CMD has
3263 * been issued if the Host Driver will be performing a full adapter
3266 int t4_get_scfg_version(struct adapter *adapter, u32 *vers)
3271 scfgrev_param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3272 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_SCFGREV));
3273 ret = t4_query_params(adapter, adapter->mbox, adapter->pf, 0,
3274 1, &scfgrev_param, vers);
3281 * t4_get_version_info - extract various chip/firmware version information
3282 * @adapter: the adapter
3284 * Reads various chip/firmware version numbers and stores them into the
3285 * adapter Adapter Parameters structure. If any of the efforts fails
3286 * the first failure will be returned, but all of the version numbers
3289 int t4_get_version_info(struct adapter *adapter)
3293 #define FIRST_RET(__getvinfo) \
3295 int __ret = __getvinfo; \
3296 if (__ret && !ret) \
3300 FIRST_RET(t4_get_fw_version(adapter, &adapter->params.fw_vers));
3301 FIRST_RET(t4_get_bs_version(adapter, &adapter->params.bs_vers));
3302 FIRST_RET(t4_get_tp_version(adapter, &adapter->params.tp_vers));
3303 FIRST_RET(t4_get_exprom_version(adapter, &adapter->params.er_vers));
3304 FIRST_RET(t4_get_scfg_version(adapter, &adapter->params.scfg_vers));
3305 FIRST_RET(t4_get_vpd_version(adapter, &adapter->params.vpd_vers));
3312 * t4_dump_version_info - dump all of the adapter configuration IDs
3313 * @adapter: the adapter
3315 * Dumps all of the various bits of adapter configuration version/revision
3316 * IDs information. This is typically called at some point after
3317 * t4_get_version_info() has been called.
3319 void t4_dump_version_info(struct adapter *adapter)
3321 /* Device information */
3322 dev_info(adapter->pdev_dev, "Chelsio %s rev %d\n",
3323 adapter->params.vpd.id,
3324 CHELSIO_CHIP_RELEASE(adapter->params.chip));
3325 dev_info(adapter->pdev_dev, "S/N: %s, P/N: %s\n",
3326 adapter->params.vpd.sn, adapter->params.vpd.pn);
3328 /* Firmware Version */
3329 if (!adapter->params.fw_vers)
3330 dev_warn(adapter->pdev_dev, "No firmware loaded\n");
3332 dev_info(adapter->pdev_dev, "Firmware version: %u.%u.%u.%u\n",
3333 FW_HDR_FW_VER_MAJOR_G(adapter->params.fw_vers),
3334 FW_HDR_FW_VER_MINOR_G(adapter->params.fw_vers),
3335 FW_HDR_FW_VER_MICRO_G(adapter->params.fw_vers),
3336 FW_HDR_FW_VER_BUILD_G(adapter->params.fw_vers));
3338 /* Bootstrap Firmware Version. (Some adapters don't have Bootstrap
3339 * Firmware, so dev_info() is more appropriate here.)
3341 if (!adapter->params.bs_vers)
3342 dev_info(adapter->pdev_dev, "No bootstrap loaded\n");
3344 dev_info(adapter->pdev_dev, "Bootstrap version: %u.%u.%u.%u\n",
3345 FW_HDR_FW_VER_MAJOR_G(adapter->params.bs_vers),
3346 FW_HDR_FW_VER_MINOR_G(adapter->params.bs_vers),
3347 FW_HDR_FW_VER_MICRO_G(adapter->params.bs_vers),
3348 FW_HDR_FW_VER_BUILD_G(adapter->params.bs_vers));
3350 /* TP Microcode Version */
3351 if (!adapter->params.tp_vers)
3352 dev_warn(adapter->pdev_dev, "No TP Microcode loaded\n");
3354 dev_info(adapter->pdev_dev,
3355 "TP Microcode version: %u.%u.%u.%u\n",
3356 FW_HDR_FW_VER_MAJOR_G(adapter->params.tp_vers),
3357 FW_HDR_FW_VER_MINOR_G(adapter->params.tp_vers),
3358 FW_HDR_FW_VER_MICRO_G(adapter->params.tp_vers),
3359 FW_HDR_FW_VER_BUILD_G(adapter->params.tp_vers));
3361 /* Expansion ROM version */
3362 if (!adapter->params.er_vers)
3363 dev_info(adapter->pdev_dev, "No Expansion ROM loaded\n");
3365 dev_info(adapter->pdev_dev,
3366 "Expansion ROM version: %u.%u.%u.%u\n",
3367 FW_HDR_FW_VER_MAJOR_G(adapter->params.er_vers),
3368 FW_HDR_FW_VER_MINOR_G(adapter->params.er_vers),
3369 FW_HDR_FW_VER_MICRO_G(adapter->params.er_vers),
3370 FW_HDR_FW_VER_BUILD_G(adapter->params.er_vers));
3372 /* Serial Configuration version */
3373 dev_info(adapter->pdev_dev, "Serial Configuration version: %#x\n",
3374 adapter->params.scfg_vers);
3377 dev_info(adapter->pdev_dev, "VPD version: %#x\n",
3378 adapter->params.vpd_vers);
3382 * t4_check_fw_version - check if the FW is supported with this driver
3383 * @adap: the adapter
3385 * Checks if an adapter's FW is compatible with the driver. Returns 0
3386 * if there's exact match, a negative error if the version could not be
3387 * read or there's a major version mismatch
3389 int t4_check_fw_version(struct adapter *adap)
3391 int i, ret, major, minor, micro;
3392 int exp_major, exp_minor, exp_micro;
3393 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
3395 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3396 /* Try multiple times before returning error */
3397 for (i = 0; (ret == -EBUSY || ret == -EAGAIN) && i < 3; i++)
3398 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3403 major = FW_HDR_FW_VER_MAJOR_G(adap->params.fw_vers);
3404 minor = FW_HDR_FW_VER_MINOR_G(adap->params.fw_vers);
3405 micro = FW_HDR_FW_VER_MICRO_G(adap->params.fw_vers);
3407 switch (chip_version) {
3409 exp_major = T4FW_MIN_VERSION_MAJOR;
3410 exp_minor = T4FW_MIN_VERSION_MINOR;
3411 exp_micro = T4FW_MIN_VERSION_MICRO;
3414 exp_major = T5FW_MIN_VERSION_MAJOR;
3415 exp_minor = T5FW_MIN_VERSION_MINOR;
3416 exp_micro = T5FW_MIN_VERSION_MICRO;
3419 exp_major = T6FW_MIN_VERSION_MAJOR;
3420 exp_minor = T6FW_MIN_VERSION_MINOR;
3421 exp_micro = T6FW_MIN_VERSION_MICRO;
3424 dev_err(adap->pdev_dev, "Unsupported chip type, %x\n",
3429 if (major < exp_major || (major == exp_major && minor < exp_minor) ||
3430 (major == exp_major && minor == exp_minor && micro < exp_micro)) {
3431 dev_err(adap->pdev_dev,
3432 "Card has firmware version %u.%u.%u, minimum "
3433 "supported firmware is %u.%u.%u.\n", major, minor,
3434 micro, exp_major, exp_minor, exp_micro);
3440 /* Is the given firmware API compatible with the one the driver was compiled
3443 static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
3446 /* short circuit if it's the exact same firmware version */
3447 if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
3450 #define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
3451 if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
3452 SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
3459 /* The firmware in the filesystem is usable, but should it be installed?
3460 * This routine explains itself in detail if it indicates the filesystem
3461 * firmware should be installed.
3463 static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
3468 if (!card_fw_usable) {
3469 reason = "incompatible or unusable";
3474 reason = "older than the version supported with this driver";
3481 dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
3482 "installing firmware %u.%u.%u.%u on card.\n",
3483 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3484 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c), reason,
3485 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3486 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3491 int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
3492 const u8 *fw_data, unsigned int fw_size,
3493 struct fw_hdr *card_fw, enum dev_state state,
3496 int ret, card_fw_usable, fs_fw_usable;
3497 const struct fw_hdr *fs_fw;
3498 const struct fw_hdr *drv_fw;
3500 drv_fw = &fw_info->fw_hdr;
3502 /* Read the header of the firmware on the card */
3503 ret = t4_read_flash(adap, FLASH_FW_START,
3504 sizeof(*card_fw) / sizeof(uint32_t),
3505 (uint32_t *)card_fw, 1);
3507 card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
3509 dev_err(adap->pdev_dev,
3510 "Unable to read card's firmware header: %d\n", ret);
3514 if (fw_data != NULL) {
3515 fs_fw = (const void *)fw_data;
3516 fs_fw_usable = fw_compatible(drv_fw, fs_fw);
3522 if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
3523 (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
3524 /* Common case: the firmware on the card is an exact match and
3525 * the filesystem one is an exact match too, or the filesystem
3526 * one is absent/incompatible.
3528 } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
3529 should_install_fs_fw(adap, card_fw_usable,
3530 be32_to_cpu(fs_fw->fw_ver),
3531 be32_to_cpu(card_fw->fw_ver))) {
3532 ret = t4_fw_upgrade(adap, adap->mbox, fw_data,
3535 dev_err(adap->pdev_dev,
3536 "failed to install firmware: %d\n", ret);
3540 /* Installed successfully, update the cached header too. */
3543 *reset = 0; /* already reset as part of load_fw */
3546 if (!card_fw_usable) {
3549 d = be32_to_cpu(drv_fw->fw_ver);
3550 c = be32_to_cpu(card_fw->fw_ver);
3551 k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
3553 dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
3555 "driver compiled with %d.%d.%d.%d, "
3556 "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
3558 FW_HDR_FW_VER_MAJOR_G(d), FW_HDR_FW_VER_MINOR_G(d),
3559 FW_HDR_FW_VER_MICRO_G(d), FW_HDR_FW_VER_BUILD_G(d),
3560 FW_HDR_FW_VER_MAJOR_G(c), FW_HDR_FW_VER_MINOR_G(c),
3561 FW_HDR_FW_VER_MICRO_G(c), FW_HDR_FW_VER_BUILD_G(c),
3562 FW_HDR_FW_VER_MAJOR_G(k), FW_HDR_FW_VER_MINOR_G(k),
3563 FW_HDR_FW_VER_MICRO_G(k), FW_HDR_FW_VER_BUILD_G(k));
3568 /* We're using whatever's on the card and it's known to be good. */
3569 adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
3570 adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
3577 * t4_flash_erase_sectors - erase a range of flash sectors
3578 * @adapter: the adapter
3579 * @start: the first sector to erase
3580 * @end: the last sector to erase
3582 * Erases the sectors in the given inclusive range.
3584 static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
3588 if (end >= adapter->params.sf_nsec)
3591 while (start <= end) {
3592 if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
3593 (ret = sf1_write(adapter, 4, 0, 1,
3594 SF_ERASE_SECTOR | (start << 8))) != 0 ||
3595 (ret = flash_wait_op(adapter, 14, 500)) != 0) {
3596 dev_err(adapter->pdev_dev,
3597 "erase of flash sector %d failed, error %d\n",
3603 t4_write_reg(adapter, SF_OP_A, 0); /* unlock SF */
3608 * t4_flash_cfg_addr - return the address of the flash configuration file
3609 * @adapter: the adapter
3611 * Return the address within the flash where the Firmware Configuration
3614 unsigned int t4_flash_cfg_addr(struct adapter *adapter)
3616 if (adapter->params.sf_size == 0x100000)
3617 return FLASH_FPGA_CFG_START;
3619 return FLASH_CFG_START;
3622 /* Return TRUE if the specified firmware matches the adapter. I.e. T4
3623 * firmware for T4 adapters, T5 firmware for T5 adapters, etc. We go ahead
3624 * and emit an error message for mismatched firmware to save our caller the
3627 static bool t4_fw_matches_chip(const struct adapter *adap,
3628 const struct fw_hdr *hdr)
3630 /* The expression below will return FALSE for any unsupported adapter
3631 * which will keep us "honest" in the future ...
3633 if ((is_t4(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T4) ||
3634 (is_t5(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T5) ||
3635 (is_t6(adap->params.chip) && hdr->chip == FW_HDR_CHIP_T6))
3638 dev_err(adap->pdev_dev,
3639 "FW image (%d) is not suitable for this adapter (%d)\n",
3640 hdr->chip, CHELSIO_CHIP_VERSION(adap->params.chip));
3645 * t4_load_fw - download firmware
3646 * @adap: the adapter
3647 * @fw_data: the firmware image to write
3650 * Write the supplied firmware image to the card's serial flash.
3652 int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
3657 u8 first_page[SF_PAGE_SIZE];
3658 const __be32 *p = (const __be32 *)fw_data;
3659 const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
3660 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
3661 unsigned int fw_start_sec = FLASH_FW_START_SEC;
3662 unsigned int fw_size = FLASH_FW_MAX_SIZE;
3663 unsigned int fw_start = FLASH_FW_START;
3666 dev_err(adap->pdev_dev, "FW image has no data\n");
3670 dev_err(adap->pdev_dev,
3671 "FW image size not multiple of 512 bytes\n");
3674 if ((unsigned int)be16_to_cpu(hdr->len512) * 512 != size) {
3675 dev_err(adap->pdev_dev,
3676 "FW image size differs from size in FW header\n");
3679 if (size > fw_size) {
3680 dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
3684 if (!t4_fw_matches_chip(adap, hdr))
3687 for (csum = 0, i = 0; i < size / sizeof(csum); i++)
3688 csum += be32_to_cpu(p[i]);
3690 if (csum != 0xffffffff) {
3691 dev_err(adap->pdev_dev,
3692 "corrupted firmware image, checksum %#x\n", csum);
3696 i = DIV_ROUND_UP(size, sf_sec_size); /* # of sectors spanned */
3697 ret = t4_flash_erase_sectors(adap, fw_start_sec, fw_start_sec + i - 1);
3702 * We write the correct version at the end so the driver can see a bad
3703 * version if the FW write fails. Start by writing a copy of the
3704 * first page with a bad version.
3706 memcpy(first_page, fw_data, SF_PAGE_SIZE);
3707 ((struct fw_hdr *)first_page)->fw_ver = cpu_to_be32(0xffffffff);
3708 ret = t4_write_flash(adap, fw_start, SF_PAGE_SIZE, first_page);
3713 for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
3714 addr += SF_PAGE_SIZE;
3715 fw_data += SF_PAGE_SIZE;
3716 ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
3721 ret = t4_write_flash(adap,
3722 fw_start + offsetof(struct fw_hdr, fw_ver),
3723 sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
3726 dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
3729 ret = t4_get_fw_version(adap, &adap->params.fw_vers);
3734 * t4_phy_fw_ver - return current PHY firmware version
3735 * @adap: the adapter
3736 * @phy_fw_ver: return value buffer for PHY firmware version
3738 * Returns the current version of external PHY firmware on the
3741 int t4_phy_fw_ver(struct adapter *adap, int *phy_fw_ver)
3746 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3747 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3748 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3749 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_VERSION));
3750 ret = t4_query_params(adap, adap->mbox, adap->pf, 0, 1,
3759 * t4_load_phy_fw - download port PHY firmware
3760 * @adap: the adapter
3761 * @win: the PCI-E Memory Window index to use for t4_memory_rw()
3762 * @win_lock: the lock to use to guard the memory copy
3763 * @phy_fw_version: function to check PHY firmware versions
3764 * @phy_fw_data: the PHY firmware image to write
3765 * @phy_fw_size: image size
3767 * Transfer the specified PHY firmware to the adapter. If a non-NULL
3768 * @phy_fw_version is supplied, then it will be used to determine if
3769 * it's necessary to perform the transfer by comparing the version
3770 * of any existing adapter PHY firmware with that of the passed in
3771 * PHY firmware image. If @win_lock is non-NULL then it will be used
3772 * around the call to t4_memory_rw() which transfers the PHY firmware
3775 * A negative error number will be returned if an error occurs. If
3776 * version number support is available and there's no need to upgrade
3777 * the firmware, 0 will be returned. If firmware is successfully
3778 * transferred to the adapter, 1 will be retured.
3780 * NOTE: some adapters only have local RAM to store the PHY firmware. As
3781 * a result, a RESET of the adapter would cause that RAM to lose its
3782 * contents. Thus, loading PHY firmware on such adapters must happen
3783 * after any FW_RESET_CMDs ...
3785 int t4_load_phy_fw(struct adapter *adap,
3786 int win, spinlock_t *win_lock,
3787 int (*phy_fw_version)(const u8 *, size_t),
3788 const u8 *phy_fw_data, size_t phy_fw_size)
3790 unsigned long mtype = 0, maddr = 0;
3792 int cur_phy_fw_ver = 0, new_phy_fw_vers = 0;
3795 /* If we have version number support, then check to see if the adapter
3796 * already has up-to-date PHY firmware loaded.
3798 if (phy_fw_version) {
3799 new_phy_fw_vers = phy_fw_version(phy_fw_data, phy_fw_size);
3800 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3804 if (cur_phy_fw_ver >= new_phy_fw_vers) {
3805 CH_WARN(adap, "PHY Firmware already up-to-date, "
3806 "version %#x\n", cur_phy_fw_ver);
3811 /* Ask the firmware where it wants us to copy the PHY firmware image.
3812 * The size of the file requires a special version of the READ coommand
3813 * which will pass the file size via the values field in PARAMS_CMD and
3814 * retrieve the return value from firmware and place it in the same
3817 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3818 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3819 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3820 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3822 ret = t4_query_params_rw(adap, adap->mbox, adap->pf, 0, 1,
3823 ¶m, &val, 1, true);
3827 maddr = (val & 0xff) << 16;
3829 /* Copy the supplied PHY Firmware image to the adapter memory location
3830 * allocated by the adapter firmware.
3833 spin_lock_bh(win_lock);
3834 ret = t4_memory_rw(adap, win, mtype, maddr,
3835 phy_fw_size, (__be32 *)phy_fw_data,
3838 spin_unlock_bh(win_lock);
3842 /* Tell the firmware that the PHY firmware image has been written to
3843 * RAM and it can now start copying it over to the PHYs. The chip
3844 * firmware will RESET the affected PHYs as part of this operation
3845 * leaving them running the new PHY firmware image.
3847 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3848 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PHYFW) |
3849 FW_PARAMS_PARAM_Y_V(adap->params.portvec) |
3850 FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_PHYFW_DOWNLOAD));
3851 ret = t4_set_params_timeout(adap, adap->mbox, adap->pf, 0, 1,
3852 ¶m, &val, 30000);
3854 /* If we have version number support, then check to see that the new
3855 * firmware got loaded properly.
3857 if (phy_fw_version) {
3858 ret = t4_phy_fw_ver(adap, &cur_phy_fw_ver);
3862 if (cur_phy_fw_ver != new_phy_fw_vers) {
3863 CH_WARN(adap, "PHY Firmware did not update: "
3864 "version on adapter %#x, "
3865 "version flashed %#x\n",
3866 cur_phy_fw_ver, new_phy_fw_vers);
3875 * t4_fwcache - firmware cache operation
3876 * @adap: the adapter
3877 * @op : the operation (flush or flush and invalidate)
3879 int t4_fwcache(struct adapter *adap, enum fw_params_param_dev_fwcache op)
3881 struct fw_params_cmd c;
3883 memset(&c, 0, sizeof(c));
3885 cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
3886 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
3887 FW_PARAMS_CMD_PFN_V(adap->pf) |
3888 FW_PARAMS_CMD_VFN_V(0));
3889 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
3891 cpu_to_be32(FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
3892 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_FWCACHE));
3893 c.param[0].val = cpu_to_be32(op);
3895 return t4_wr_mbox(adap, adap->mbox, &c, sizeof(c), NULL);
3898 void t4_cim_read_pif_la(struct adapter *adap, u32 *pif_req, u32 *pif_rsp,
3899 unsigned int *pif_req_wrptr,
3900 unsigned int *pif_rsp_wrptr)
3903 u32 cfg, val, req, rsp;
3905 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3906 if (cfg & LADBGEN_F)
3907 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3909 val = t4_read_reg(adap, CIM_DEBUGSTS_A);
3910 req = POLADBGWRPTR_G(val);
3911 rsp = PILADBGWRPTR_G(val);
3913 *pif_req_wrptr = req;
3915 *pif_rsp_wrptr = rsp;
3917 for (i = 0; i < CIM_PIFLA_SIZE; i++) {
3918 for (j = 0; j < 6; j++) {
3919 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(req) |
3920 PILADBGRDPTR_V(rsp));
3921 *pif_req++ = t4_read_reg(adap, CIM_PO_LA_DEBUGDATA_A);
3922 *pif_rsp++ = t4_read_reg(adap, CIM_PI_LA_DEBUGDATA_A);
3926 req = (req + 2) & POLADBGRDPTR_M;
3927 rsp = (rsp + 2) & PILADBGRDPTR_M;
3929 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3932 void t4_cim_read_ma_la(struct adapter *adap, u32 *ma_req, u32 *ma_rsp)
3937 cfg = t4_read_reg(adap, CIM_DEBUGCFG_A);
3938 if (cfg & LADBGEN_F)
3939 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg ^ LADBGEN_F);
3941 for (i = 0; i < CIM_MALA_SIZE; i++) {
3942 for (j = 0; j < 5; j++) {
3944 t4_write_reg(adap, CIM_DEBUGCFG_A, POLADBGRDPTR_V(idx) |
3945 PILADBGRDPTR_V(idx));
3946 *ma_req++ = t4_read_reg(adap, CIM_PO_LA_MADEBUGDATA_A);
3947 *ma_rsp++ = t4_read_reg(adap, CIM_PI_LA_MADEBUGDATA_A);
3950 t4_write_reg(adap, CIM_DEBUGCFG_A, cfg);
3953 void t4_ulprx_read_la(struct adapter *adap, u32 *la_buf)
3957 for (i = 0; i < 8; i++) {
3958 u32 *p = la_buf + i;
3960 t4_write_reg(adap, ULP_RX_LA_CTL_A, i);
3961 j = t4_read_reg(adap, ULP_RX_LA_WRPTR_A);
3962 t4_write_reg(adap, ULP_RX_LA_RDPTR_A, j);
3963 for (j = 0; j < ULPRX_LA_SIZE; j++, p += 8)
3964 *p = t4_read_reg(adap, ULP_RX_LA_RDDATA_A);
3968 #define ADVERT_MASK (FW_PORT_CAP32_SPEED_V(FW_PORT_CAP32_SPEED_M) | \
3972 * fwcaps16_to_caps32 - convert 16-bit Port Capabilities to 32-bits
3973 * @caps16: a 16-bit Port Capabilities value
3975 * Returns the equivalent 32-bit Port Capabilities value.
3977 static fw_port_cap32_t fwcaps16_to_caps32(fw_port_cap16_t caps16)
3979 fw_port_cap32_t caps32 = 0;
3981 #define CAP16_TO_CAP32(__cap) \
3983 if (caps16 & FW_PORT_CAP_##__cap) \
3984 caps32 |= FW_PORT_CAP32_##__cap; \
3987 CAP16_TO_CAP32(SPEED_100M);
3988 CAP16_TO_CAP32(SPEED_1G);
3989 CAP16_TO_CAP32(SPEED_25G);
3990 CAP16_TO_CAP32(SPEED_10G);
3991 CAP16_TO_CAP32(SPEED_40G);
3992 CAP16_TO_CAP32(SPEED_100G);
3993 CAP16_TO_CAP32(FC_RX);
3994 CAP16_TO_CAP32(FC_TX);
3995 CAP16_TO_CAP32(ANEG);
3996 CAP16_TO_CAP32(FORCE_PAUSE);
3997 CAP16_TO_CAP32(MDIAUTO);
3998 CAP16_TO_CAP32(MDISTRAIGHT);
3999 CAP16_TO_CAP32(FEC_RS);
4000 CAP16_TO_CAP32(FEC_BASER_RS);
4001 CAP16_TO_CAP32(802_3_PAUSE);
4002 CAP16_TO_CAP32(802_3_ASM_DIR);
4004 #undef CAP16_TO_CAP32
4010 * fwcaps32_to_caps16 - convert 32-bit Port Capabilities to 16-bits
4011 * @caps32: a 32-bit Port Capabilities value
4013 * Returns the equivalent 16-bit Port Capabilities value. Note that
4014 * not all 32-bit Port Capabilities can be represented in the 16-bit
4015 * Port Capabilities and some fields/values may not make it.
4017 static fw_port_cap16_t fwcaps32_to_caps16(fw_port_cap32_t caps32)
4019 fw_port_cap16_t caps16 = 0;
4021 #define CAP32_TO_CAP16(__cap) \
4023 if (caps32 & FW_PORT_CAP32_##__cap) \
4024 caps16 |= FW_PORT_CAP_##__cap; \
4027 CAP32_TO_CAP16(SPEED_100M);
4028 CAP32_TO_CAP16(SPEED_1G);
4029 CAP32_TO_CAP16(SPEED_10G);
4030 CAP32_TO_CAP16(SPEED_25G);
4031 CAP32_TO_CAP16(SPEED_40G);
4032 CAP32_TO_CAP16(SPEED_100G);
4033 CAP32_TO_CAP16(FC_RX);
4034 CAP32_TO_CAP16(FC_TX);
4035 CAP32_TO_CAP16(802_3_PAUSE);
4036 CAP32_TO_CAP16(802_3_ASM_DIR);
4037 CAP32_TO_CAP16(ANEG);
4038 CAP32_TO_CAP16(FORCE_PAUSE);
4039 CAP32_TO_CAP16(MDIAUTO);
4040 CAP32_TO_CAP16(MDISTRAIGHT);
4041 CAP32_TO_CAP16(FEC_RS);
4042 CAP32_TO_CAP16(FEC_BASER_RS);
4044 #undef CAP32_TO_CAP16
4049 /* Translate Firmware Port Capabilities Pause specification to Common Code */
4050 static inline enum cc_pause fwcap_to_cc_pause(fw_port_cap32_t fw_pause)
4052 enum cc_pause cc_pause = 0;
4054 if (fw_pause & FW_PORT_CAP32_FC_RX)
4055 cc_pause |= PAUSE_RX;
4056 if (fw_pause & FW_PORT_CAP32_FC_TX)
4057 cc_pause |= PAUSE_TX;
4062 /* Translate Common Code Pause specification into Firmware Port Capabilities */
4063 static inline fw_port_cap32_t cc_to_fwcap_pause(enum cc_pause cc_pause)
4065 fw_port_cap32_t fw_pause = 0;
4067 if (cc_pause & PAUSE_RX)
4068 fw_pause |= FW_PORT_CAP32_FC_RX;
4069 if (cc_pause & PAUSE_TX)
4070 fw_pause |= FW_PORT_CAP32_FC_TX;
4071 if (!(cc_pause & PAUSE_AUTONEG))
4072 fw_pause |= FW_PORT_CAP32_FORCE_PAUSE;
4077 /* Translate Firmware Forward Error Correction specification to Common Code */
4078 static inline enum cc_fec fwcap_to_cc_fec(fw_port_cap32_t fw_fec)
4080 enum cc_fec cc_fec = 0;
4082 if (fw_fec & FW_PORT_CAP32_FEC_RS)
4084 if (fw_fec & FW_PORT_CAP32_FEC_BASER_RS)
4085 cc_fec |= FEC_BASER_RS;
4090 /* Translate Common Code Forward Error Correction specification to Firmware */
4091 static inline fw_port_cap32_t cc_to_fwcap_fec(enum cc_fec cc_fec)
4093 fw_port_cap32_t fw_fec = 0;
4095 if (cc_fec & FEC_RS)
4096 fw_fec |= FW_PORT_CAP32_FEC_RS;
4097 if (cc_fec & FEC_BASER_RS)
4098 fw_fec |= FW_PORT_CAP32_FEC_BASER_RS;
4104 * t4_link_l1cfg - apply link configuration to MAC/PHY
4105 * @adapter: the adapter
4106 * @mbox: the Firmware Mailbox to use
4107 * @port: the Port ID
4108 * @lc: the Port's Link Configuration
4110 * Set up a port's MAC and PHY according to a desired link configuration.
4111 * - If the PHY can auto-negotiate first decide what to advertise, then
4112 * enable/disable auto-negotiation as desired, and reset.
4113 * - If the PHY does not auto-negotiate just reset it.
4114 * - If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
4115 * otherwise do it later based on the outcome of auto-negotiation.
4117 int t4_link_l1cfg_core(struct adapter *adapter, unsigned int mbox,
4118 unsigned int port, struct link_config *lc,
4119 bool sleep_ok, int timeout)
4121 unsigned int fw_caps = adapter->params.fw_caps_support;
4122 fw_port_cap32_t fw_fc, cc_fec, fw_fec, rcap;
4123 struct fw_port_cmd cmd;
4124 unsigned int fw_mdi;
4127 fw_mdi = (FW_PORT_CAP32_MDI_V(FW_PORT_CAP32_MDI_AUTO) & lc->pcaps);
4128 /* Convert driver coding of Pause Frame Flow Control settings into the
4131 fw_fc = cc_to_fwcap_pause(lc->requested_fc);
4133 /* Convert Common Code Forward Error Control settings into the
4134 * Firmware's API. If the current Requested FEC has "Automatic"
4135 * (IEEE 802.3) specified, then we use whatever the Firmware
4136 * sent us as part of it's IEEE 802.3-based interpratation of
4137 * the Transceiver Module EPROM FEC parameters. Otherwise we
4138 * use whatever is in the current Requested FEC settings.
4140 if (lc->requested_fec & FEC_AUTO)
4141 cc_fec = fwcap_to_cc_fec(lc->def_acaps);
4143 cc_fec = lc->requested_fec;
4144 fw_fec = cc_to_fwcap_fec(cc_fec);
4146 /* Figure out what our Requested Port Capabilities are going to be.
4148 if (!(lc->pcaps & FW_PORT_CAP32_ANEG)) {
4149 rcap = lc->acaps | fw_fc | fw_fec;
4150 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4152 } else if (lc->autoneg == AUTONEG_DISABLE) {
4153 rcap = lc->speed_caps | fw_fc | fw_fec | fw_mdi;
4154 lc->fc = lc->requested_fc & ~PAUSE_AUTONEG;
4157 rcap = lc->acaps | fw_fc | fw_fec | fw_mdi;
4160 /* Note that older Firmware doesn't have FW_PORT_CAP32_FORCE_PAUSE, so
4161 * we need to exclude this from this check in order to maintain
4164 if ((rcap & ~lc->pcaps) & ~FW_PORT_CAP32_FORCE_PAUSE) {
4165 dev_err(adapter->pdev_dev,
4166 "Requested Port Capabilities %#x exceed Physical Port Capabilities %#x\n",
4171 /* And send that on to the Firmware ...
4173 memset(&cmd, 0, sizeof(cmd));
4174 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4175 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4176 FW_PORT_CMD_PORTID_V(port));
4177 cmd.action_to_len16 =
4178 cpu_to_be32(FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
4179 ? FW_PORT_ACTION_L1_CFG
4180 : FW_PORT_ACTION_L1_CFG32) |
4182 if (fw_caps == FW_CAPS16)
4183 cmd.u.l1cfg.rcap = cpu_to_be32(fwcaps32_to_caps16(rcap));
4185 cmd.u.l1cfg32.rcap32 = cpu_to_be32(rcap);
4187 ret = t4_wr_mbox_meat_timeout(adapter, mbox, &cmd, sizeof(cmd), NULL,
4190 dev_err(adapter->pdev_dev,
4191 "Requested Port Capabilities %#x rejected, error %d\n",
4199 * t4_restart_aneg - restart autonegotiation
4200 * @adap: the adapter
4201 * @mbox: mbox to use for the FW command
4202 * @port: the port id
4204 * Restarts autonegotiation for the selected port.
4206 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
4208 struct fw_port_cmd c;
4210 memset(&c, 0, sizeof(c));
4211 c.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
4212 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
4213 FW_PORT_CMD_PORTID_V(port));
4215 cpu_to_be32(FW_PORT_CMD_ACTION_V(FW_PORT_ACTION_L1_CFG) |
4217 c.u.l1cfg.rcap = cpu_to_be32(FW_PORT_CAP32_ANEG);
4218 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
4221 typedef void (*int_handler_t)(struct adapter *adap);
4224 unsigned int mask; /* bits to check in interrupt status */
4225 const char *msg; /* message to print or NULL */
4226 short stat_idx; /* stat counter to increment or -1 */
4227 unsigned short fatal; /* whether the condition reported is fatal */
4228 int_handler_t int_handler; /* platform-specific int handler */
4232 * t4_handle_intr_status - table driven interrupt handler
4233 * @adapter: the adapter that generated the interrupt
4234 * @reg: the interrupt status register to process
4235 * @acts: table of interrupt actions
4237 * A table driven interrupt handler that applies a set of masks to an
4238 * interrupt status word and performs the corresponding actions if the
4239 * interrupts described by the mask have occurred. The actions include
4240 * optionally emitting a warning or alert message. The table is terminated
4241 * by an entry specifying mask 0. Returns the number of fatal interrupt
4244 static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
4245 const struct intr_info *acts)
4248 unsigned int mask = 0;
4249 unsigned int status = t4_read_reg(adapter, reg);
4251 for ( ; acts->mask; ++acts) {
4252 if (!(status & acts->mask))
4256 dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4257 status & acts->mask);
4258 } else if (acts->msg && printk_ratelimit())
4259 dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
4260 status & acts->mask);
4261 if (acts->int_handler)
4262 acts->int_handler(adapter);
4266 if (status) /* clear processed interrupts */
4267 t4_write_reg(adapter, reg, status);
4272 * Interrupt handler for the PCIE module.
4274 static void pcie_intr_handler(struct adapter *adapter)
4276 static const struct intr_info sysbus_intr_info[] = {
4277 { RNPP_F, "RXNP array parity error", -1, 1 },
4278 { RPCP_F, "RXPC array parity error", -1, 1 },
4279 { RCIP_F, "RXCIF array parity error", -1, 1 },
4280 { RCCP_F, "Rx completions control array parity error", -1, 1 },
4281 { RFTP_F, "RXFT array parity error", -1, 1 },
4284 static const struct intr_info pcie_port_intr_info[] = {
4285 { TPCP_F, "TXPC array parity error", -1, 1 },
4286 { TNPP_F, "TXNP array parity error", -1, 1 },
4287 { TFTP_F, "TXFT array parity error", -1, 1 },
4288 { TCAP_F, "TXCA array parity error", -1, 1 },
4289 { TCIP_F, "TXCIF array parity error", -1, 1 },
4290 { RCAP_F, "RXCA array parity error", -1, 1 },
4291 { OTDD_F, "outbound request TLP discarded", -1, 1 },
4292 { RDPE_F, "Rx data parity error", -1, 1 },
4293 { TDUE_F, "Tx uncorrectable data error", -1, 1 },
4296 static const struct intr_info pcie_intr_info[] = {
4297 { MSIADDRLPERR_F, "MSI AddrL parity error", -1, 1 },
4298 { MSIADDRHPERR_F, "MSI AddrH parity error", -1, 1 },
4299 { MSIDATAPERR_F, "MSI data parity error", -1, 1 },
4300 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4301 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4302 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4303 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4304 { PIOCPLPERR_F, "PCI PIO completion FIFO parity error", -1, 1 },
4305 { PIOREQPERR_F, "PCI PIO request FIFO parity error", -1, 1 },
4306 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4307 { CCNTPERR_F, "PCI CMD channel count parity error", -1, 1 },
4308 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4309 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4310 { DCNTPERR_F, "PCI DMA channel count parity error", -1, 1 },
4311 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4312 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4313 { HCNTPERR_F, "PCI HMA channel count parity error", -1, 1 },
4314 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4315 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4316 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4317 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4318 { INTXCLRPERR_F, "PCI INTx clear parity error", -1, 1 },
4319 { MATAGPERR_F, "PCI MA tag parity error", -1, 1 },
4320 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4321 { RXCPLPERR_F, "PCI Rx completion parity error", -1, 1 },
4322 { RXWRPERR_F, "PCI Rx write parity error", -1, 1 },
4323 { RPLPERR_F, "PCI replay buffer parity error", -1, 1 },
4324 { PCIESINT_F, "PCI core secondary fault", -1, 1 },
4325 { PCIEPINT_F, "PCI core primary fault", -1, 1 },
4326 { UNXSPLCPLERR_F, "PCI unexpected split completion error",
4331 static struct intr_info t5_pcie_intr_info[] = {
4332 { MSTGRPPERR_F, "Master Response Read Queue parity error",
4334 { MSTTIMEOUTPERR_F, "Master Timeout FIFO parity error", -1, 1 },
4335 { MSIXSTIPERR_F, "MSI-X STI SRAM parity error", -1, 1 },
4336 { MSIXADDRLPERR_F, "MSI-X AddrL parity error", -1, 1 },
4337 { MSIXADDRHPERR_F, "MSI-X AddrH parity error", -1, 1 },
4338 { MSIXDATAPERR_F, "MSI-X data parity error", -1, 1 },
4339 { MSIXDIPERR_F, "MSI-X DI parity error", -1, 1 },
4340 { PIOCPLGRPPERR_F, "PCI PIO completion Group FIFO parity error",
4342 { PIOREQGRPPERR_F, "PCI PIO request Group FIFO parity error",
4344 { TARTAGPERR_F, "PCI PCI target tag FIFO parity error", -1, 1 },
4345 { MSTTAGQPERR_F, "PCI master tag queue parity error", -1, 1 },
4346 { CREQPERR_F, "PCI CMD channel request parity error", -1, 1 },
4347 { CRSPPERR_F, "PCI CMD channel response parity error", -1, 1 },
4348 { DREQWRPERR_F, "PCI DMA channel write request parity error",
4350 { DREQPERR_F, "PCI DMA channel request parity error", -1, 1 },
4351 { DRSPPERR_F, "PCI DMA channel response parity error", -1, 1 },
4352 { HREQWRPERR_F, "PCI HMA channel count parity error", -1, 1 },
4353 { HREQPERR_F, "PCI HMA channel request parity error", -1, 1 },
4354 { HRSPPERR_F, "PCI HMA channel response parity error", -1, 1 },
4355 { CFGSNPPERR_F, "PCI config snoop FIFO parity error", -1, 1 },
4356 { FIDPERR_F, "PCI FID parity error", -1, 1 },
4357 { VFIDPERR_F, "PCI INTx clear parity error", -1, 1 },
4358 { MAGRPPERR_F, "PCI MA group FIFO parity error", -1, 1 },
4359 { PIOTAGPERR_F, "PCI PIO tag parity error", -1, 1 },
4360 { IPRXHDRGRPPERR_F, "PCI IP Rx header group parity error",
4362 { IPRXDATAGRPPERR_F, "PCI IP Rx data group parity error",
4364 { RPLPERR_F, "PCI IP replay buffer parity error", -1, 1 },
4365 { IPSOTPERR_F, "PCI IP SOT buffer parity error", -1, 1 },
4366 { TRGT1GRPPERR_F, "PCI TRGT1 group FIFOs parity error", -1, 1 },
4367 { READRSPERR_F, "Outbound read error", -1, 0 },
4373 if (is_t4(adapter->params.chip))
4374 fat = t4_handle_intr_status(adapter,
4375 PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS_A,
4377 t4_handle_intr_status(adapter,
4378 PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS_A,
4379 pcie_port_intr_info) +
4380 t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4383 fat = t4_handle_intr_status(adapter, PCIE_INT_CAUSE_A,
4387 t4_fatal_err(adapter);
4391 * TP interrupt handler.
4393 static void tp_intr_handler(struct adapter *adapter)
4395 static const struct intr_info tp_intr_info[] = {
4396 { 0x3fffffff, "TP parity error", -1, 1 },
4397 { FLMTXFLSTEMPTY_F, "TP out of Tx pages", -1, 1 },
4401 if (t4_handle_intr_status(adapter, TP_INT_CAUSE_A, tp_intr_info))
4402 t4_fatal_err(adapter);
4406 * SGE interrupt handler.
4408 static void sge_intr_handler(struct adapter *adapter)
4413 static const struct intr_info sge_intr_info[] = {
4414 { ERR_CPL_EXCEED_IQE_SIZE_F,
4415 "SGE received CPL exceeding IQE size", -1, 1 },
4416 { ERR_INVALID_CIDX_INC_F,
4417 "SGE GTS CIDX increment too large", -1, 0 },
4418 { ERR_CPL_OPCODE_0_F, "SGE received 0-length CPL", -1, 0 },
4419 { DBFIFO_LP_INT_F, NULL, -1, 0, t4_db_full },
4420 { ERR_DATA_CPL_ON_HIGH_QID1_F | ERR_DATA_CPL_ON_HIGH_QID0_F,
4421 "SGE IQID > 1023 received CPL for FL", -1, 0 },
4422 { ERR_BAD_DB_PIDX3_F, "SGE DBP 3 pidx increment too large", -1,
4424 { ERR_BAD_DB_PIDX2_F, "SGE DBP 2 pidx increment too large", -1,
4426 { ERR_BAD_DB_PIDX1_F, "SGE DBP 1 pidx increment too large", -1,
4428 { ERR_BAD_DB_PIDX0_F, "SGE DBP 0 pidx increment too large", -1,
4430 { ERR_ING_CTXT_PRIO_F,
4431 "SGE too many priority ingress contexts", -1, 0 },
4432 { INGRESS_SIZE_ERR_F, "SGE illegal ingress QID", -1, 0 },
4433 { EGRESS_SIZE_ERR_F, "SGE illegal egress QID", -1, 0 },
4437 static struct intr_info t4t5_sge_intr_info[] = {
4438 { ERR_DROPPED_DB_F, NULL, -1, 0, t4_db_dropped },
4439 { DBFIFO_HP_INT_F, NULL, -1, 0, t4_db_full },
4440 { ERR_EGR_CTXT_PRIO_F,
4441 "SGE too many priority egress contexts", -1, 0 },
4445 v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1_A) |
4446 ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2_A) << 32);
4448 dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
4449 (unsigned long long)v);
4450 t4_write_reg(adapter, SGE_INT_CAUSE1_A, v);
4451 t4_write_reg(adapter, SGE_INT_CAUSE2_A, v >> 32);
4454 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A, sge_intr_info);
4455 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
4456 v |= t4_handle_intr_status(adapter, SGE_INT_CAUSE3_A,
4457 t4t5_sge_intr_info);
4459 err = t4_read_reg(adapter, SGE_ERROR_STATS_A);
4460 if (err & ERROR_QID_VALID_F) {
4461 dev_err(adapter->pdev_dev, "SGE error for queue %u\n",
4463 if (err & UNCAPTURED_ERROR_F)
4464 dev_err(adapter->pdev_dev,
4465 "SGE UNCAPTURED_ERROR set (clearing)\n");
4466 t4_write_reg(adapter, SGE_ERROR_STATS_A, ERROR_QID_VALID_F |
4467 UNCAPTURED_ERROR_F);
4471 t4_fatal_err(adapter);
4474 #define CIM_OBQ_INTR (OBQULP0PARERR_F | OBQULP1PARERR_F | OBQULP2PARERR_F |\
4475 OBQULP3PARERR_F | OBQSGEPARERR_F | OBQNCSIPARERR_F)
4476 #define CIM_IBQ_INTR (IBQTP0PARERR_F | IBQTP1PARERR_F | IBQULPPARERR_F |\
4477 IBQSGEHIPARERR_F | IBQSGELOPARERR_F | IBQNCSIPARERR_F)
4480 * CIM interrupt handler.
4482 static void cim_intr_handler(struct adapter *adapter)
4484 static const struct intr_info cim_intr_info[] = {
4485 { PREFDROPINT_F, "CIM control register prefetch drop", -1, 1 },
4486 { CIM_OBQ_INTR, "CIM OBQ parity error", -1, 1 },
4487 { CIM_IBQ_INTR, "CIM IBQ parity error", -1, 1 },
4488 { MBUPPARERR_F, "CIM mailbox uP parity error", -1, 1 },
4489 { MBHOSTPARERR_F, "CIM mailbox host parity error", -1, 1 },
4490 { TIEQINPARERRINT_F, "CIM TIEQ outgoing parity error", -1, 1 },
4491 { TIEQOUTPARERRINT_F, "CIM TIEQ incoming parity error", -1, 1 },
4492 { TIMER0INT_F, "CIM TIMER0 interrupt", -1, 1 },
4495 static const struct intr_info cim_upintr_info[] = {
4496 { RSVDSPACEINT_F, "CIM reserved space access", -1, 1 },
4497 { ILLTRANSINT_F, "CIM illegal transaction", -1, 1 },
4498 { ILLWRINT_F, "CIM illegal write", -1, 1 },
4499 { ILLRDINT_F, "CIM illegal read", -1, 1 },
4500 { ILLRDBEINT_F, "CIM illegal read BE", -1, 1 },
4501 { ILLWRBEINT_F, "CIM illegal write BE", -1, 1 },
4502 { SGLRDBOOTINT_F, "CIM single read from boot space", -1, 1 },
4503 { SGLWRBOOTINT_F, "CIM single write to boot space", -1, 1 },
4504 { BLKWRBOOTINT_F, "CIM block write to boot space", -1, 1 },
4505 { SGLRDFLASHINT_F, "CIM single read from flash space", -1, 1 },
4506 { SGLWRFLASHINT_F, "CIM single write to flash space", -1, 1 },
4507 { BLKWRFLASHINT_F, "CIM block write to flash space", -1, 1 },
4508 { SGLRDEEPROMINT_F, "CIM single EEPROM read", -1, 1 },
4509 { SGLWREEPROMINT_F, "CIM single EEPROM write", -1, 1 },
4510 { BLKRDEEPROMINT_F, "CIM block EEPROM read", -1, 1 },
4511 { BLKWREEPROMINT_F, "CIM block EEPROM write", -1, 1 },
4512 { SGLRDCTLINT_F, "CIM single read from CTL space", -1, 1 },
4513 { SGLWRCTLINT_F, "CIM single write to CTL space", -1, 1 },
4514 { BLKRDCTLINT_F, "CIM block read from CTL space", -1, 1 },
4515 { BLKWRCTLINT_F, "CIM block write to CTL space", -1, 1 },
4516 { SGLRDPLINT_F, "CIM single read from PL space", -1, 1 },
4517 { SGLWRPLINT_F, "CIM single write to PL space", -1, 1 },
4518 { BLKRDPLINT_F, "CIM block read from PL space", -1, 1 },
4519 { BLKWRPLINT_F, "CIM block write to PL space", -1, 1 },
4520 { REQOVRLOOKUPINT_F, "CIM request FIFO overwrite", -1, 1 },
4521 { RSPOVRLOOKUPINT_F, "CIM response FIFO overwrite", -1, 1 },
4522 { TIMEOUTINT_F, "CIM PIF timeout", -1, 1 },
4523 { TIMEOUTMAINT_F, "CIM PIF MA timeout", -1, 1 },
4530 fw_err = t4_read_reg(adapter, PCIE_FW_A);
4531 if (fw_err & PCIE_FW_ERR_F)
4532 t4_report_fw_error(adapter);
4534 /* When the Firmware detects an internal error which normally
4535 * wouldn't raise a Host Interrupt, it forces a CIM Timer0 interrupt
4536 * in order to make sure the Host sees the Firmware Crash. So
4537 * if we have a Timer0 interrupt and don't see a Firmware Crash,
4538 * ignore the Timer0 interrupt.
4541 val = t4_read_reg(adapter, CIM_HOST_INT_CAUSE_A);
4542 if (val & TIMER0INT_F)
4543 if (!(fw_err & PCIE_FW_ERR_F) ||
4544 (PCIE_FW_EVAL_G(fw_err) != PCIE_FW_EVAL_CRASH))
4545 t4_write_reg(adapter, CIM_HOST_INT_CAUSE_A,
4548 fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE_A,
4550 t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE_A,
4553 t4_fatal_err(adapter);
4557 * ULP RX interrupt handler.
4559 static void ulprx_intr_handler(struct adapter *adapter)
4561 static const struct intr_info ulprx_intr_info[] = {
4562 { 0x1800000, "ULPRX context error", -1, 1 },
4563 { 0x7fffff, "ULPRX parity error", -1, 1 },
4567 if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE_A, ulprx_intr_info))
4568 t4_fatal_err(adapter);
4572 * ULP TX interrupt handler.
4574 static void ulptx_intr_handler(struct adapter *adapter)
4576 static const struct intr_info ulptx_intr_info[] = {
4577 { PBL_BOUND_ERR_CH3_F, "ULPTX channel 3 PBL out of bounds", -1,
4579 { PBL_BOUND_ERR_CH2_F, "ULPTX channel 2 PBL out of bounds", -1,
4581 { PBL_BOUND_ERR_CH1_F, "ULPTX channel 1 PBL out of bounds", -1,
4583 { PBL_BOUND_ERR_CH0_F, "ULPTX channel 0 PBL out of bounds", -1,
4585 { 0xfffffff, "ULPTX parity error", -1, 1 },
4589 if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE_A, ulptx_intr_info))
4590 t4_fatal_err(adapter);
4594 * PM TX interrupt handler.
4596 static void pmtx_intr_handler(struct adapter *adapter)
4598 static const struct intr_info pmtx_intr_info[] = {
4599 { PCMD_LEN_OVFL0_F, "PMTX channel 0 pcmd too large", -1, 1 },
4600 { PCMD_LEN_OVFL1_F, "PMTX channel 1 pcmd too large", -1, 1 },
4601 { PCMD_LEN_OVFL2_F, "PMTX channel 2 pcmd too large", -1, 1 },
4602 { ZERO_C_CMD_ERROR_F, "PMTX 0-length pcmd", -1, 1 },
4603 { PMTX_FRAMING_ERROR_F, "PMTX framing error", -1, 1 },
4604 { OESPI_PAR_ERROR_F, "PMTX oespi parity error", -1, 1 },
4605 { DB_OPTIONS_PAR_ERROR_F, "PMTX db_options parity error",
4607 { ICSPI_PAR_ERROR_F, "PMTX icspi parity error", -1, 1 },
4608 { PMTX_C_PCMD_PAR_ERROR_F, "PMTX c_pcmd parity error", -1, 1},
4612 if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE_A, pmtx_intr_info))
4613 t4_fatal_err(adapter);
4617 * PM RX interrupt handler.
4619 static void pmrx_intr_handler(struct adapter *adapter)
4621 static const struct intr_info pmrx_intr_info[] = {
4622 { ZERO_E_CMD_ERROR_F, "PMRX 0-length pcmd", -1, 1 },
4623 { PMRX_FRAMING_ERROR_F, "PMRX framing error", -1, 1 },
4624 { OCSPI_PAR_ERROR_F, "PMRX ocspi parity error", -1, 1 },
4625 { DB_OPTIONS_PAR_ERROR_F, "PMRX db_options parity error",
4627 { IESPI_PAR_ERROR_F, "PMRX iespi parity error", -1, 1 },
4628 { PMRX_E_PCMD_PAR_ERROR_F, "PMRX e_pcmd parity error", -1, 1},
4632 if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE_A, pmrx_intr_info))
4633 t4_fatal_err(adapter);
4637 * CPL switch interrupt handler.
4639 static void cplsw_intr_handler(struct adapter *adapter)
4641 static const struct intr_info cplsw_intr_info[] = {
4642 { CIM_OP_MAP_PERR_F, "CPLSW CIM op_map parity error", -1, 1 },
4643 { CIM_OVFL_ERROR_F, "CPLSW CIM overflow", -1, 1 },
4644 { TP_FRAMING_ERROR_F, "CPLSW TP framing error", -1, 1 },
4645 { SGE_FRAMING_ERROR_F, "CPLSW SGE framing error", -1, 1 },
4646 { CIM_FRAMING_ERROR_F, "CPLSW CIM framing error", -1, 1 },
4647 { ZERO_SWITCH_ERROR_F, "CPLSW no-switch error", -1, 1 },
4651 if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE_A, cplsw_intr_info))
4652 t4_fatal_err(adapter);
4656 * LE interrupt handler.
4658 static void le_intr_handler(struct adapter *adap)
4660 enum chip_type chip = CHELSIO_CHIP_VERSION(adap->params.chip);
4661 static const struct intr_info le_intr_info[] = {
4662 { LIPMISS_F, "LE LIP miss", -1, 0 },
4663 { LIP0_F, "LE 0 LIP error", -1, 0 },
4664 { PARITYERR_F, "LE parity error", -1, 1 },
4665 { UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4666 { REQQPARERR_F, "LE request queue parity error", -1, 1 },
4670 static struct intr_info t6_le_intr_info[] = {
4671 { T6_LIPMISS_F, "LE LIP miss", -1, 0 },
4672 { T6_LIP0_F, "LE 0 LIP error", -1, 0 },
4673 { TCAMINTPERR_F, "LE parity error", -1, 1 },
4674 { T6_UNKNOWNCMD_F, "LE unknown command", -1, 1 },
4675 { SSRAMINTPERR_F, "LE request queue parity error", -1, 1 },
4679 if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE_A,
4680 (chip <= CHELSIO_T5) ?
4681 le_intr_info : t6_le_intr_info))
4686 * MPS interrupt handler.
4688 static void mps_intr_handler(struct adapter *adapter)
4690 static const struct intr_info mps_rx_intr_info[] = {
4691 { 0xffffff, "MPS Rx parity error", -1, 1 },
4694 static const struct intr_info mps_tx_intr_info[] = {
4695 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4696 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4697 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4699 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4701 { BUBBLE_F, "MPS Tx underflow", -1, 1 },
4702 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4703 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4706 static const struct intr_info t6_mps_tx_intr_info[] = {
4707 { TPFIFO_V(TPFIFO_M), "MPS Tx TP FIFO parity error", -1, 1 },
4708 { NCSIFIFO_F, "MPS Tx NC-SI FIFO parity error", -1, 1 },
4709 { TXDATAFIFO_V(TXDATAFIFO_M), "MPS Tx data FIFO parity error",
4711 { TXDESCFIFO_V(TXDESCFIFO_M), "MPS Tx desc FIFO parity error",
4713 /* MPS Tx Bubble is normal for T6 */
4714 { SECNTERR_F, "MPS Tx SOP/EOP error", -1, 1 },
4715 { FRMERR_F, "MPS Tx framing error", -1, 1 },
4718 static const struct intr_info mps_trc_intr_info[] = {
4719 { FILTMEM_V(FILTMEM_M), "MPS TRC filter parity error", -1, 1 },
4720 { PKTFIFO_V(PKTFIFO_M), "MPS TRC packet FIFO parity error",
4722 { MISCPERR_F, "MPS TRC misc parity error", -1, 1 },
4725 static const struct intr_info mps_stat_sram_intr_info[] = {
4726 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
4729 static const struct intr_info mps_stat_tx_intr_info[] = {
4730 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
4733 static const struct intr_info mps_stat_rx_intr_info[] = {
4734 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
4737 static const struct intr_info mps_cls_intr_info[] = {
4738 { MATCHSRAM_F, "MPS match SRAM parity error", -1, 1 },
4739 { MATCHTCAM_F, "MPS match TCAM parity error", -1, 1 },
4740 { HASHSRAM_F, "MPS hash SRAM parity error", -1, 1 },
4746 fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE_A,
4748 t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE_A,
4749 is_t6(adapter->params.chip)
4750 ? t6_mps_tx_intr_info
4751 : mps_tx_intr_info) +
4752 t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE_A,
4753 mps_trc_intr_info) +
4754 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM_A,
4755 mps_stat_sram_intr_info) +
4756 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO_A,
4757 mps_stat_tx_intr_info) +
4758 t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO_A,
4759 mps_stat_rx_intr_info) +
4760 t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE_A,
4763 t4_write_reg(adapter, MPS_INT_CAUSE_A, 0);
4764 t4_read_reg(adapter, MPS_INT_CAUSE_A); /* flush */
4766 t4_fatal_err(adapter);
4769 #define MEM_INT_MASK (PERR_INT_CAUSE_F | ECC_CE_INT_CAUSE_F | \
4773 * EDC/MC interrupt handler.
4775 static void mem_intr_handler(struct adapter *adapter, int idx)
4777 static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
4779 unsigned int addr, cnt_addr, v;
4781 if (idx <= MEM_EDC1) {
4782 addr = EDC_REG(EDC_INT_CAUSE_A, idx);
4783 cnt_addr = EDC_REG(EDC_ECC_STATUS_A, idx);
4784 } else if (idx == MEM_MC) {
4785 if (is_t4(adapter->params.chip)) {
4786 addr = MC_INT_CAUSE_A;
4787 cnt_addr = MC_ECC_STATUS_A;
4789 addr = MC_P_INT_CAUSE_A;
4790 cnt_addr = MC_P_ECC_STATUS_A;
4793 addr = MC_REG(MC_P_INT_CAUSE_A, 1);
4794 cnt_addr = MC_REG(MC_P_ECC_STATUS_A, 1);
4797 v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
4798 if (v & PERR_INT_CAUSE_F)
4799 dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
4801 if (v & ECC_CE_INT_CAUSE_F) {
4802 u32 cnt = ECC_CECNT_G(t4_read_reg(adapter, cnt_addr));
4804 t4_edc_err_read(adapter, idx);
4806 t4_write_reg(adapter, cnt_addr, ECC_CECNT_V(ECC_CECNT_M));
4807 if (printk_ratelimit())
4808 dev_warn(adapter->pdev_dev,
4809 "%u %s correctable ECC data error%s\n",
4810 cnt, name[idx], cnt > 1 ? "s" : "");
4812 if (v & ECC_UE_INT_CAUSE_F)
4813 dev_alert(adapter->pdev_dev,
4814 "%s uncorrectable ECC data error\n", name[idx]);
4816 t4_write_reg(adapter, addr, v);
4817 if (v & (PERR_INT_CAUSE_F | ECC_UE_INT_CAUSE_F))
4818 t4_fatal_err(adapter);
4822 * MA interrupt handler.
4824 static void ma_intr_handler(struct adapter *adap)
4826 u32 v, status = t4_read_reg(adap, MA_INT_CAUSE_A);
4828 if (status & MEM_PERR_INT_CAUSE_F) {
4829 dev_alert(adap->pdev_dev,
4830 "MA parity error, parity status %#x\n",
4831 t4_read_reg(adap, MA_PARITY_ERROR_STATUS1_A));
4832 if (is_t5(adap->params.chip))
4833 dev_alert(adap->pdev_dev,
4834 "MA parity error, parity status %#x\n",
4836 MA_PARITY_ERROR_STATUS2_A));
4838 if (status & MEM_WRAP_INT_CAUSE_F) {
4839 v = t4_read_reg(adap, MA_INT_WRAP_STATUS_A);
4840 dev_alert(adap->pdev_dev, "MA address wrap-around error by "
4841 "client %u to address %#x\n",
4842 MEM_WRAP_CLIENT_NUM_G(v),
4843 MEM_WRAP_ADDRESS_G(v) << 4);
4845 t4_write_reg(adap, MA_INT_CAUSE_A, status);
4850 * SMB interrupt handler.
4852 static void smb_intr_handler(struct adapter *adap)
4854 static const struct intr_info smb_intr_info[] = {
4855 { MSTTXFIFOPARINT_F, "SMB master Tx FIFO parity error", -1, 1 },
4856 { MSTRXFIFOPARINT_F, "SMB master Rx FIFO parity error", -1, 1 },
4857 { SLVFIFOPARINT_F, "SMB slave FIFO parity error", -1, 1 },
4861 if (t4_handle_intr_status(adap, SMB_INT_CAUSE_A, smb_intr_info))
4866 * NC-SI interrupt handler.
4868 static void ncsi_intr_handler(struct adapter *adap)
4870 static const struct intr_info ncsi_intr_info[] = {
4871 { CIM_DM_PRTY_ERR_F, "NC-SI CIM parity error", -1, 1 },
4872 { MPS_DM_PRTY_ERR_F, "NC-SI MPS parity error", -1, 1 },
4873 { TXFIFO_PRTY_ERR_F, "NC-SI Tx FIFO parity error", -1, 1 },
4874 { RXFIFO_PRTY_ERR_F, "NC-SI Rx FIFO parity error", -1, 1 },
4878 if (t4_handle_intr_status(adap, NCSI_INT_CAUSE_A, ncsi_intr_info))
4883 * XGMAC interrupt handler.
4885 static void xgmac_intr_handler(struct adapter *adap, int port)
4887 u32 v, int_cause_reg;
4889 if (is_t4(adap->params.chip))
4890 int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE_A);
4892 int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE_A);
4894 v = t4_read_reg(adap, int_cause_reg);
4896 v &= TXFIFO_PRTY_ERR_F | RXFIFO_PRTY_ERR_F;
4900 if (v & TXFIFO_PRTY_ERR_F)
4901 dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
4903 if (v & RXFIFO_PRTY_ERR_F)
4904 dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
4906 t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE_A), v);
4911 * PL interrupt handler.
4913 static void pl_intr_handler(struct adapter *adap)
4915 static const struct intr_info pl_intr_info[] = {
4916 { FATALPERR_F, "T4 fatal parity error", -1, 1 },
4917 { PERRVFID_F, "PL VFID_MAP parity error", -1, 1 },
4921 if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE_A, pl_intr_info))
4925 #define PF_INTR_MASK (PFSW_F)
4926 #define GLBL_INTR_MASK (CIM_F | MPS_F | PL_F | PCIE_F | MC_F | EDC0_F | \
4927 EDC1_F | LE_F | TP_F | MA_F | PM_TX_F | PM_RX_F | ULP_RX_F | \
4928 CPL_SWITCH_F | SGE_F | ULP_TX_F | SF_F)
4931 * t4_slow_intr_handler - control path interrupt handler
4932 * @adapter: the adapter
4934 * T4 interrupt handler for non-data global interrupt events, e.g., errors.
4935 * The designation 'slow' is because it involves register reads, while
4936 * data interrupts typically don't involve any MMIOs.
4938 int t4_slow_intr_handler(struct adapter *adapter)
4940 u32 cause = t4_read_reg(adapter, PL_INT_CAUSE_A);
4942 if (!(cause & GLBL_INTR_MASK))
4945 cim_intr_handler(adapter);
4947 mps_intr_handler(adapter);
4949 ncsi_intr_handler(adapter);
4951 pl_intr_handler(adapter);
4953 smb_intr_handler(adapter);
4954 if (cause & XGMAC0_F)
4955 xgmac_intr_handler(adapter, 0);
4956 if (cause & XGMAC1_F)
4957 xgmac_intr_handler(adapter, 1);
4958 if (cause & XGMAC_KR0_F)
4959 xgmac_intr_handler(adapter, 2);
4960 if (cause & XGMAC_KR1_F)
4961 xgmac_intr_handler(adapter, 3);
4963 pcie_intr_handler(adapter);
4965 mem_intr_handler(adapter, MEM_MC);
4966 if (is_t5(adapter->params.chip) && (cause & MC1_F))
4967 mem_intr_handler(adapter, MEM_MC1);
4969 mem_intr_handler(adapter, MEM_EDC0);
4971 mem_intr_handler(adapter, MEM_EDC1);
4973 le_intr_handler(adapter);
4975 tp_intr_handler(adapter);
4977 ma_intr_handler(adapter);
4978 if (cause & PM_TX_F)
4979 pmtx_intr_handler(adapter);
4980 if (cause & PM_RX_F)
4981 pmrx_intr_handler(adapter);
4982 if (cause & ULP_RX_F)
4983 ulprx_intr_handler(adapter);
4984 if (cause & CPL_SWITCH_F)
4985 cplsw_intr_handler(adapter);
4987 sge_intr_handler(adapter);
4988 if (cause & ULP_TX_F)
4989 ulptx_intr_handler(adapter);
4991 /* Clear the interrupts just processed for which we are the master. */
4992 t4_write_reg(adapter, PL_INT_CAUSE_A, cause & GLBL_INTR_MASK);
4993 (void)t4_read_reg(adapter, PL_INT_CAUSE_A); /* flush */
4998 * t4_intr_enable - enable interrupts
4999 * @adapter: the adapter whose interrupts should be enabled
5001 * Enable PF-specific interrupts for the calling function and the top-level
5002 * interrupt concentrator for global interrupts. Interrupts are already
5003 * enabled at each module, here we just enable the roots of the interrupt
5006 * Note: this function should be called only when the driver manages
5007 * non PF-specific interrupts from the various HW modules. Only one PCI
5008 * function at a time should be doing this.
5010 void t4_intr_enable(struct adapter *adapter)
5013 u32 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5014 u32 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
5015 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5017 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5)
5018 val = ERR_DROPPED_DB_F | ERR_EGR_CTXT_PRIO_F | DBFIFO_HP_INT_F;
5019 t4_write_reg(adapter, SGE_INT_ENABLE3_A, ERR_CPL_EXCEED_IQE_SIZE_F |
5020 ERR_INVALID_CIDX_INC_F | ERR_CPL_OPCODE_0_F |
5021 ERR_DATA_CPL_ON_HIGH_QID1_F | INGRESS_SIZE_ERR_F |
5022 ERR_DATA_CPL_ON_HIGH_QID0_F | ERR_BAD_DB_PIDX3_F |
5023 ERR_BAD_DB_PIDX2_F | ERR_BAD_DB_PIDX1_F |
5024 ERR_BAD_DB_PIDX0_F | ERR_ING_CTXT_PRIO_F |
5025 DBFIFO_LP_INT_F | EGRESS_SIZE_ERR_F | val);
5026 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), PF_INTR_MASK);
5027 t4_set_reg_field(adapter, PL_INT_MAP0_A, 0, 1 << pf);
5031 * t4_intr_disable - disable interrupts
5032 * @adapter: the adapter whose interrupts should be disabled
5034 * Disable interrupts. We only disable the top-level interrupt
5035 * concentrators. The caller must be a PCI function managing global
5038 void t4_intr_disable(struct adapter *adapter)
5042 if (pci_channel_offline(adapter->pdev))
5045 whoami = t4_read_reg(adapter, PL_WHOAMI_A);
5046 pf = CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5 ?
5047 SOURCEPF_G(whoami) : T6_SOURCEPF_G(whoami);
5049 t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE_A), 0);
5050 t4_set_reg_field(adapter, PL_INT_MAP0_A, 1 << pf, 0);
5053 unsigned int t4_chip_rss_size(struct adapter *adap)
5055 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
5056 return RSS_NENTRIES;
5058 return T6_RSS_NENTRIES;
5062 * t4_config_rss_range - configure a portion of the RSS mapping table
5063 * @adapter: the adapter
5064 * @mbox: mbox to use for the FW command
5065 * @viid: virtual interface whose RSS subtable is to be written
5066 * @start: start entry in the table to write
5067 * @n: how many table entries to write
5068 * @rspq: values for the response queue lookup table
5069 * @nrspq: number of values in @rspq
5071 * Programs the selected part of the VI's RSS mapping table with the
5072 * provided values. If @nrspq < @n the supplied values are used repeatedly
5073 * until the full table range is populated.
5075 * The caller must ensure the values in @rspq are in the range allowed for
5078 int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
5079 int start, int n, const u16 *rspq, unsigned int nrspq)
5082 const u16 *rsp = rspq;
5083 const u16 *rsp_end = rspq + nrspq;
5084 struct fw_rss_ind_tbl_cmd cmd;
5086 memset(&cmd, 0, sizeof(cmd));
5087 cmd.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_IND_TBL_CMD) |
5088 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5089 FW_RSS_IND_TBL_CMD_VIID_V(viid));
5090 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
5092 /* each fw_rss_ind_tbl_cmd takes up to 32 entries */
5094 int nq = min(n, 32);
5095 __be32 *qp = &cmd.iq0_to_iq2;
5097 cmd.niqid = cpu_to_be16(nq);
5098 cmd.startidx = cpu_to_be16(start);
5106 v = FW_RSS_IND_TBL_CMD_IQ0_V(*rsp);
5107 if (++rsp >= rsp_end)
5109 v |= FW_RSS_IND_TBL_CMD_IQ1_V(*rsp);
5110 if (++rsp >= rsp_end)
5112 v |= FW_RSS_IND_TBL_CMD_IQ2_V(*rsp);
5113 if (++rsp >= rsp_end)
5116 *qp++ = cpu_to_be32(v);
5120 ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
5128 * t4_config_glbl_rss - configure the global RSS mode
5129 * @adapter: the adapter
5130 * @mbox: mbox to use for the FW command
5131 * @mode: global RSS mode
5132 * @flags: mode-specific flags
5134 * Sets the global RSS mode.
5136 int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
5139 struct fw_rss_glb_config_cmd c;
5141 memset(&c, 0, sizeof(c));
5142 c.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_RSS_GLB_CONFIG_CMD) |
5143 FW_CMD_REQUEST_F | FW_CMD_WRITE_F);
5144 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5145 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
5146 c.u.manual.mode_pkd =
5147 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5148 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
5149 c.u.basicvirtual.mode_pkd =
5150 cpu_to_be32(FW_RSS_GLB_CONFIG_CMD_MODE_V(mode));
5151 c.u.basicvirtual.synmapen_to_hashtoeplitz = cpu_to_be32(flags);
5154 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5158 * t4_config_vi_rss - configure per VI RSS settings
5159 * @adapter: the adapter
5160 * @mbox: mbox to use for the FW command
5163 * @defq: id of the default RSS queue for the VI.
5165 * Configures VI-specific RSS properties.
5167 int t4_config_vi_rss(struct adapter *adapter, int mbox, unsigned int viid,
5168 unsigned int flags, unsigned int defq)
5170 struct fw_rss_vi_config_cmd c;
5172 memset(&c, 0, sizeof(c));
5173 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
5174 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
5175 FW_RSS_VI_CONFIG_CMD_VIID_V(viid));
5176 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
5177 c.u.basicvirtual.defaultq_to_udpen = cpu_to_be32(flags |
5178 FW_RSS_VI_CONFIG_CMD_DEFAULTQ_V(defq));
5179 return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
5182 /* Read an RSS table row */
5183 static int rd_rss_row(struct adapter *adap, int row, u32 *val)
5185 t4_write_reg(adap, TP_RSS_LKP_TABLE_A, 0xfff00000 | row);
5186 return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE_A, LKPTBLROWVLD_F, 1,
5191 * t4_read_rss - read the contents of the RSS mapping table
5192 * @adapter: the adapter
5193 * @map: holds the contents of the RSS mapping table
5195 * Reads the contents of the RSS hash->queue mapping table.
5197 int t4_read_rss(struct adapter *adapter, u16 *map)
5199 int i, ret, nentries;
5202 nentries = t4_chip_rss_size(adapter);
5203 for (i = 0; i < nentries / 2; ++i) {
5204 ret = rd_rss_row(adapter, i, &val);
5207 *map++ = LKPTBLQUEUE0_G(val);
5208 *map++ = LKPTBLQUEUE1_G(val);
5213 static unsigned int t4_use_ldst(struct adapter *adap)
5215 return (adap->flags & FW_OK) && !adap->use_bd;
5219 * t4_tp_fw_ldst_rw - Access TP indirect register through LDST
5220 * @adap: the adapter
5221 * @cmd: TP fw ldst address space type
5222 * @vals: where the indirect register values are stored/written
5223 * @nregs: how many indirect registers to read/write
5224 * @start_idx: index of first indirect register to read/write
5225 * @rw: Read (1) or Write (0)
5226 * @sleep_ok: if true we may sleep while awaiting command completion
5228 * Access TP indirect registers through LDST
5230 static int t4_tp_fw_ldst_rw(struct adapter *adap, int cmd, u32 *vals,
5231 unsigned int nregs, unsigned int start_index,
5232 unsigned int rw, bool sleep_ok)
5236 struct fw_ldst_cmd c;
5238 for (i = 0; i < nregs; i++) {
5239 memset(&c, 0, sizeof(c));
5240 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
5242 (rw ? FW_CMD_READ_F :
5244 FW_LDST_CMD_ADDRSPACE_V(cmd));
5245 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
5247 c.u.addrval.addr = cpu_to_be32(start_index + i);
5248 c.u.addrval.val = rw ? 0 : cpu_to_be32(vals[i]);
5249 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c,
5255 vals[i] = be32_to_cpu(c.u.addrval.val);
5261 * t4_tp_indirect_rw - Read/Write TP indirect register through LDST or backdoor
5262 * @adap: the adapter
5263 * @reg_addr: Address Register
5264 * @reg_data: Data register
5265 * @buff: where the indirect register values are stored/written
5266 * @nregs: how many indirect registers to read/write
5267 * @start_index: index of first indirect register to read/write
5268 * @rw: READ(1) or WRITE(0)
5269 * @sleep_ok: if true we may sleep while awaiting command completion
5271 * Read/Write TP indirect registers through LDST if possible.
5272 * Else, use backdoor access
5274 static void t4_tp_indirect_rw(struct adapter *adap, u32 reg_addr, u32 reg_data,
5275 u32 *buff, u32 nregs, u32 start_index, int rw,
5283 cmd = FW_LDST_ADDRSPC_TP_PIO;
5285 case TP_TM_PIO_ADDR_A:
5286 cmd = FW_LDST_ADDRSPC_TP_TM_PIO;
5288 case TP_MIB_INDEX_A:
5289 cmd = FW_LDST_ADDRSPC_TP_MIB;
5292 goto indirect_access;
5295 if (t4_use_ldst(adap))
5296 rc = t4_tp_fw_ldst_rw(adap, cmd, buff, nregs, start_index, rw,
5303 t4_read_indirect(adap, reg_addr, reg_data, buff, nregs,
5306 t4_write_indirect(adap, reg_addr, reg_data, buff, nregs,
5312 * t4_tp_pio_read - Read TP PIO registers
5313 * @adap: the adapter
5314 * @buff: where the indirect register values are written
5315 * @nregs: how many indirect registers to read
5316 * @start_index: index of first indirect register to read
5317 * @sleep_ok: if true we may sleep while awaiting command completion
5319 * Read TP PIO Registers
5321 void t4_tp_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5322 u32 start_index, bool sleep_ok)
5324 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5325 start_index, 1, sleep_ok);
5329 * t4_tp_pio_write - Write TP PIO registers
5330 * @adap: the adapter
5331 * @buff: where the indirect register values are stored
5332 * @nregs: how many indirect registers to write
5333 * @start_index: index of first indirect register to write
5334 * @sleep_ok: if true we may sleep while awaiting command completion
5336 * Write TP PIO Registers
5338 static void t4_tp_pio_write(struct adapter *adap, u32 *buff, u32 nregs,
5339 u32 start_index, bool sleep_ok)
5341 t4_tp_indirect_rw(adap, TP_PIO_ADDR_A, TP_PIO_DATA_A, buff, nregs,
5342 start_index, 0, sleep_ok);
5346 * t4_tp_tm_pio_read - Read TP TM PIO registers
5347 * @adap: the adapter
5348 * @buff: where the indirect register values are written
5349 * @nregs: how many indirect registers to read
5350 * @start_index: index of first indirect register to read
5351 * @sleep_ok: if true we may sleep while awaiting command completion
5353 * Read TP TM PIO Registers
5355 void t4_tp_tm_pio_read(struct adapter *adap, u32 *buff, u32 nregs,
5356 u32 start_index, bool sleep_ok)
5358 t4_tp_indirect_rw(adap, TP_TM_PIO_ADDR_A, TP_TM_PIO_DATA_A, buff,
5359 nregs, start_index, 1, sleep_ok);
5363 * t4_tp_mib_read - Read TP MIB registers
5364 * @adap: the adapter
5365 * @buff: where the indirect register values are written
5366 * @nregs: how many indirect registers to read
5367 * @start_index: index of first indirect register to read
5368 * @sleep_ok: if true we may sleep while awaiting command completion
5370 * Read TP MIB Registers
5372 void t4_tp_mib_read(struct adapter *adap, u32 *buff, u32 nregs, u32 start_index,
5375 t4_tp_indirect_rw(adap, TP_MIB_INDEX_A, TP_MIB_DATA_A, buff, nregs,
5376 start_index, 1, sleep_ok);
5380 * t4_read_rss_key - read the global RSS key
5381 * @adap: the adapter
5382 * @key: 10-entry array holding the 320-bit RSS key
5383 * @sleep_ok: if true we may sleep while awaiting command completion
5385 * Reads the global 320-bit RSS key.
5387 void t4_read_rss_key(struct adapter *adap, u32 *key, bool sleep_ok)
5389 t4_tp_pio_read(adap, key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5393 * t4_write_rss_key - program one of the RSS keys
5394 * @adap: the adapter
5395 * @key: 10-entry array holding the 320-bit RSS key
5396 * @idx: which RSS key to write
5397 * @sleep_ok: if true we may sleep while awaiting command completion
5399 * Writes one of the RSS keys with the given 320-bit value. If @idx is
5400 * 0..15 the corresponding entry in the RSS key table is written,
5401 * otherwise the global RSS key is written.
5403 void t4_write_rss_key(struct adapter *adap, const u32 *key, int idx,
5406 u8 rss_key_addr_cnt = 16;
5407 u32 vrt = t4_read_reg(adap, TP_RSS_CONFIG_VRT_A);
5409 /* T6 and later: for KeyMode 3 (per-vf and per-vf scramble),
5410 * allows access to key addresses 16-63 by using KeyWrAddrX
5411 * as index[5:4](upper 2) into key table
5413 if ((CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) &&
5414 (vrt & KEYEXTEND_F) && (KEYMODE_G(vrt) == 3))
5415 rss_key_addr_cnt = 32;
5417 t4_tp_pio_write(adap, (void *)key, 10, TP_RSS_SECRET_KEY0_A, sleep_ok);
5419 if (idx >= 0 && idx < rss_key_addr_cnt) {
5420 if (rss_key_addr_cnt > 16)
5421 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5422 KEYWRADDRX_V(idx >> 4) |
5423 T6_VFWRADDR_V(idx) | KEYWREN_F);
5425 t4_write_reg(adap, TP_RSS_CONFIG_VRT_A,
5426 KEYWRADDR_V(idx) | KEYWREN_F);
5431 * t4_read_rss_pf_config - read PF RSS Configuration Table
5432 * @adapter: the adapter
5433 * @index: the entry in the PF RSS table to read
5434 * @valp: where to store the returned value
5435 * @sleep_ok: if true we may sleep while awaiting command completion
5437 * Reads the PF RSS Configuration Table at the specified index and returns
5438 * the value found there.
5440 void t4_read_rss_pf_config(struct adapter *adapter, unsigned int index,
5441 u32 *valp, bool sleep_ok)
5443 t4_tp_pio_read(adapter, valp, 1, TP_RSS_PF0_CONFIG_A + index, sleep_ok);
5447 * t4_read_rss_vf_config - read VF RSS Configuration Table
5448 * @adapter: the adapter
5449 * @index: the entry in the VF RSS table to read
5450 * @vfl: where to store the returned VFL
5451 * @vfh: where to store the returned VFH
5452 * @sleep_ok: if true we may sleep while awaiting command completion
5454 * Reads the VF RSS Configuration Table at the specified index and returns
5455 * the (VFL, VFH) values found there.
5457 void t4_read_rss_vf_config(struct adapter *adapter, unsigned int index,
5458 u32 *vfl, u32 *vfh, bool sleep_ok)
5460 u32 vrt, mask, data;
5462 if (CHELSIO_CHIP_VERSION(adapter->params.chip) <= CHELSIO_T5) {
5463 mask = VFWRADDR_V(VFWRADDR_M);
5464 data = VFWRADDR_V(index);
5466 mask = T6_VFWRADDR_V(T6_VFWRADDR_M);
5467 data = T6_VFWRADDR_V(index);
5470 /* Request that the index'th VF Table values be read into VFL/VFH.
5472 vrt = t4_read_reg(adapter, TP_RSS_CONFIG_VRT_A);
5473 vrt &= ~(VFRDRG_F | VFWREN_F | KEYWREN_F | mask);
5474 vrt |= data | VFRDEN_F;
5475 t4_write_reg(adapter, TP_RSS_CONFIG_VRT_A, vrt);
5477 /* Grab the VFL/VFH values ...
5479 t4_tp_pio_read(adapter, vfl, 1, TP_RSS_VFL_CONFIG_A, sleep_ok);
5480 t4_tp_pio_read(adapter, vfh, 1, TP_RSS_VFH_CONFIG_A, sleep_ok);
5484 * t4_read_rss_pf_map - read PF RSS Map
5485 * @adapter: the adapter
5486 * @sleep_ok: if true we may sleep while awaiting command completion
5488 * Reads the PF RSS Map register and returns its value.
5490 u32 t4_read_rss_pf_map(struct adapter *adapter, bool sleep_ok)
5494 t4_tp_pio_read(adapter, &pfmap, 1, TP_RSS_PF_MAP_A, sleep_ok);
5499 * t4_read_rss_pf_mask - read PF RSS Mask
5500 * @adapter: the adapter
5501 * @sleep_ok: if true we may sleep while awaiting command completion
5503 * Reads the PF RSS Mask register and returns its value.
5505 u32 t4_read_rss_pf_mask(struct adapter *adapter, bool sleep_ok)
5509 t4_tp_pio_read(adapter, &pfmask, 1, TP_RSS_PF_MSK_A, sleep_ok);
5514 * t4_tp_get_tcp_stats - read TP's TCP MIB counters
5515 * @adap: the adapter
5516 * @v4: holds the TCP/IP counter values
5517 * @v6: holds the TCP/IPv6 counter values
5518 * @sleep_ok: if true we may sleep while awaiting command completion
5520 * Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
5521 * Either @v4 or @v6 may be %NULL to skip the corresponding stats.
5523 void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
5524 struct tp_tcp_stats *v6, bool sleep_ok)
5526 u32 val[TP_MIB_TCP_RXT_SEG_LO_A - TP_MIB_TCP_OUT_RST_A + 1];
5528 #define STAT_IDX(x) ((TP_MIB_TCP_##x##_A) - TP_MIB_TCP_OUT_RST_A)
5529 #define STAT(x) val[STAT_IDX(x)]
5530 #define STAT64(x) (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
5533 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5534 TP_MIB_TCP_OUT_RST_A, sleep_ok);
5535 v4->tcp_out_rsts = STAT(OUT_RST);
5536 v4->tcp_in_segs = STAT64(IN_SEG);
5537 v4->tcp_out_segs = STAT64(OUT_SEG);
5538 v4->tcp_retrans_segs = STAT64(RXT_SEG);
5541 t4_tp_mib_read(adap, val, ARRAY_SIZE(val),
5542 TP_MIB_TCP_V6OUT_RST_A, sleep_ok);
5543 v6->tcp_out_rsts = STAT(OUT_RST);
5544 v6->tcp_in_segs = STAT64(IN_SEG);
5545 v6->tcp_out_segs = STAT64(OUT_SEG);
5546 v6->tcp_retrans_segs = STAT64(RXT_SEG);
5554 * t4_tp_get_err_stats - read TP's error MIB counters
5555 * @adap: the adapter
5556 * @st: holds the counter values
5557 * @sleep_ok: if true we may sleep while awaiting command completion
5559 * Returns the values of TP's error counters.
5561 void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st,
5564 int nchan = adap->params.arch.nchan;
5566 t4_tp_mib_read(adap, st->mac_in_errs, nchan, TP_MIB_MAC_IN_ERR_0_A,
5568 t4_tp_mib_read(adap, st->hdr_in_errs, nchan, TP_MIB_HDR_IN_ERR_0_A,
5570 t4_tp_mib_read(adap, st->tcp_in_errs, nchan, TP_MIB_TCP_IN_ERR_0_A,
5572 t4_tp_mib_read(adap, st->tnl_cong_drops, nchan,
5573 TP_MIB_TNL_CNG_DROP_0_A, sleep_ok);
5574 t4_tp_mib_read(adap, st->ofld_chan_drops, nchan,
5575 TP_MIB_OFD_CHN_DROP_0_A, sleep_ok);
5576 t4_tp_mib_read(adap, st->tnl_tx_drops, nchan, TP_MIB_TNL_DROP_0_A,
5578 t4_tp_mib_read(adap, st->ofld_vlan_drops, nchan,
5579 TP_MIB_OFD_VLN_DROP_0_A, sleep_ok);
5580 t4_tp_mib_read(adap, st->tcp6_in_errs, nchan,
5581 TP_MIB_TCP_V6IN_ERR_0_A, sleep_ok);
5582 t4_tp_mib_read(adap, &st->ofld_no_neigh, 2, TP_MIB_OFD_ARP_DROP_A,
5587 * t4_tp_get_cpl_stats - read TP's CPL MIB counters
5588 * @adap: the adapter
5589 * @st: holds the counter values
5590 * @sleep_ok: if true we may sleep while awaiting command completion
5592 * Returns the values of TP's CPL counters.
5594 void t4_tp_get_cpl_stats(struct adapter *adap, struct tp_cpl_stats *st,
5597 int nchan = adap->params.arch.nchan;
5599 t4_tp_mib_read(adap, st->req, nchan, TP_MIB_CPL_IN_REQ_0_A, sleep_ok);
5601 t4_tp_mib_read(adap, st->rsp, nchan, TP_MIB_CPL_OUT_RSP_0_A, sleep_ok);
5605 * t4_tp_get_rdma_stats - read TP's RDMA MIB counters
5606 * @adap: the adapter
5607 * @st: holds the counter values
5608 * @sleep_ok: if true we may sleep while awaiting command completion
5610 * Returns the values of TP's RDMA counters.
5612 void t4_tp_get_rdma_stats(struct adapter *adap, struct tp_rdma_stats *st,
5615 t4_tp_mib_read(adap, &st->rqe_dfr_pkt, 2, TP_MIB_RQE_DFR_PKT_A,
5620 * t4_get_fcoe_stats - read TP's FCoE MIB counters for a port
5621 * @adap: the adapter
5622 * @idx: the port index
5623 * @st: holds the counter values
5624 * @sleep_ok: if true we may sleep while awaiting command completion
5626 * Returns the values of TP's FCoE counters for the selected port.
5628 void t4_get_fcoe_stats(struct adapter *adap, unsigned int idx,
5629 struct tp_fcoe_stats *st, bool sleep_ok)
5633 t4_tp_mib_read(adap, &st->frames_ddp, 1, TP_MIB_FCOE_DDP_0_A + idx,
5636 t4_tp_mib_read(adap, &st->frames_drop, 1,
5637 TP_MIB_FCOE_DROP_0_A + idx, sleep_ok);
5639 t4_tp_mib_read(adap, val, 2, TP_MIB_FCOE_BYTE_0_HI_A + 2 * idx,
5642 st->octets_ddp = ((u64)val[0] << 32) | val[1];
5646 * t4_get_usm_stats - read TP's non-TCP DDP MIB counters
5647 * @adap: the adapter
5648 * @st: holds the counter values
5649 * @sleep_ok: if true we may sleep while awaiting command completion
5651 * Returns the values of TP's counters for non-TCP directly-placed packets.
5653 void t4_get_usm_stats(struct adapter *adap, struct tp_usm_stats *st,
5658 t4_tp_mib_read(adap, val, 4, TP_MIB_USM_PKTS_A, sleep_ok);
5659 st->frames = val[0];
5661 st->octets = ((u64)val[2] << 32) | val[3];
5665 * t4_read_mtu_tbl - returns the values in the HW path MTU table
5666 * @adap: the adapter
5667 * @mtus: where to store the MTU values
5668 * @mtu_log: where to store the MTU base-2 log (may be %NULL)
5670 * Reads the HW path MTU table.
5672 void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
5677 for (i = 0; i < NMTUS; ++i) {
5678 t4_write_reg(adap, TP_MTU_TABLE_A,
5679 MTUINDEX_V(0xff) | MTUVALUE_V(i));
5680 v = t4_read_reg(adap, TP_MTU_TABLE_A);
5681 mtus[i] = MTUVALUE_G(v);
5683 mtu_log[i] = MTUWIDTH_G(v);
5688 * t4_read_cong_tbl - reads the congestion control table
5689 * @adap: the adapter
5690 * @incr: where to store the alpha values
5692 * Reads the additive increments programmed into the HW congestion
5695 void t4_read_cong_tbl(struct adapter *adap, u16 incr[NMTUS][NCCTRL_WIN])
5697 unsigned int mtu, w;
5699 for (mtu = 0; mtu < NMTUS; ++mtu)
5700 for (w = 0; w < NCCTRL_WIN; ++w) {
5701 t4_write_reg(adap, TP_CCTRL_TABLE_A,
5702 ROWINDEX_V(0xffff) | (mtu << 5) | w);
5703 incr[mtu][w] = (u16)t4_read_reg(adap,
5704 TP_CCTRL_TABLE_A) & 0x1fff;
5709 * t4_tp_wr_bits_indirect - set/clear bits in an indirect TP register
5710 * @adap: the adapter
5711 * @addr: the indirect TP register address
5712 * @mask: specifies the field within the register to modify
5713 * @val: new value for the field
5715 * Sets a field of an indirect TP register to the given value.
5717 void t4_tp_wr_bits_indirect(struct adapter *adap, unsigned int addr,
5718 unsigned int mask, unsigned int val)
5720 t4_write_reg(adap, TP_PIO_ADDR_A, addr);
5721 val |= t4_read_reg(adap, TP_PIO_DATA_A) & ~mask;
5722 t4_write_reg(adap, TP_PIO_DATA_A, val);
5726 * init_cong_ctrl - initialize congestion control parameters
5727 * @a: the alpha values for congestion control
5728 * @b: the beta values for congestion control
5730 * Initialize the congestion control parameters.
5732 static void init_cong_ctrl(unsigned short *a, unsigned short *b)
5734 a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
5759 b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
5762 b[13] = b[14] = b[15] = b[16] = 3;
5763 b[17] = b[18] = b[19] = b[20] = b[21] = 4;
5764 b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
5769 /* The minimum additive increment value for the congestion control table */
5770 #define CC_MIN_INCR 2U
5773 * t4_load_mtus - write the MTU and congestion control HW tables
5774 * @adap: the adapter
5775 * @mtus: the values for the MTU table
5776 * @alpha: the values for the congestion control alpha parameter
5777 * @beta: the values for the congestion control beta parameter
5779 * Write the HW MTU table with the supplied MTUs and the high-speed
5780 * congestion control table with the supplied alpha, beta, and MTUs.
5781 * We write the two tables together because the additive increments
5782 * depend on the MTUs.
5784 void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
5785 const unsigned short *alpha, const unsigned short *beta)
5787 static const unsigned int avg_pkts[NCCTRL_WIN] = {
5788 2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
5789 896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
5790 28672, 40960, 57344, 81920, 114688, 163840, 229376
5795 for (i = 0; i < NMTUS; ++i) {
5796 unsigned int mtu = mtus[i];
5797 unsigned int log2 = fls(mtu);
5799 if (!(mtu & ((1 << log2) >> 2))) /* round */
5801 t4_write_reg(adap, TP_MTU_TABLE_A, MTUINDEX_V(i) |
5802 MTUWIDTH_V(log2) | MTUVALUE_V(mtu));
5804 for (w = 0; w < NCCTRL_WIN; ++w) {
5807 inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
5810 t4_write_reg(adap, TP_CCTRL_TABLE_A, (i << 21) |
5811 (w << 16) | (beta[w] << 13) | inc);
5816 /* Calculates a rate in bytes/s given the number of 256-byte units per 4K core
5817 * clocks. The formula is
5819 * bytes/s = bytes256 * 256 * ClkFreq / 4096
5821 * which is equivalent to
5823 * bytes/s = 62.5 * bytes256 * ClkFreq_ms
5825 static u64 chan_rate(struct adapter *adap, unsigned int bytes256)
5827 u64 v = bytes256 * adap->params.vpd.cclk;
5829 return v * 62 + v / 2;
5833 * t4_get_chan_txrate - get the current per channel Tx rates
5834 * @adap: the adapter
5835 * @nic_rate: rates for NIC traffic
5836 * @ofld_rate: rates for offloaded traffic
5838 * Return the current Tx rates in bytes/s for NIC and offloaded traffic
5841 void t4_get_chan_txrate(struct adapter *adap, u64 *nic_rate, u64 *ofld_rate)
5845 v = t4_read_reg(adap, TP_TX_TRATE_A);
5846 nic_rate[0] = chan_rate(adap, TNLRATE0_G(v));
5847 nic_rate[1] = chan_rate(adap, TNLRATE1_G(v));
5848 if (adap->params.arch.nchan == NCHAN) {
5849 nic_rate[2] = chan_rate(adap, TNLRATE2_G(v));
5850 nic_rate[3] = chan_rate(adap, TNLRATE3_G(v));
5853 v = t4_read_reg(adap, TP_TX_ORATE_A);
5854 ofld_rate[0] = chan_rate(adap, OFDRATE0_G(v));
5855 ofld_rate[1] = chan_rate(adap, OFDRATE1_G(v));
5856 if (adap->params.arch.nchan == NCHAN) {
5857 ofld_rate[2] = chan_rate(adap, OFDRATE2_G(v));
5858 ofld_rate[3] = chan_rate(adap, OFDRATE3_G(v));
5863 * t4_set_trace_filter - configure one of the tracing filters
5864 * @adap: the adapter
5865 * @tp: the desired trace filter parameters
5866 * @idx: which filter to configure
5867 * @enable: whether to enable or disable the filter
5869 * Configures one of the tracing filters available in HW. If @enable is
5870 * %0 @tp is not examined and may be %NULL. The user is responsible to
5871 * set the single/multiple trace mode by writing to MPS_TRC_CFG_A register
5873 int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
5874 int idx, int enable)
5876 int i, ofst = idx * 4;
5877 u32 data_reg, mask_reg, cfg;
5878 u32 multitrc = TRCMULTIFILTER_F;
5881 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5885 cfg = t4_read_reg(adap, MPS_TRC_CFG_A);
5886 if (cfg & TRCMULTIFILTER_F) {
5887 /* If multiple tracers are enabled, then maximum
5888 * capture size is 2.5KB (FIFO size of a single channel)
5889 * minus 2 flits for CPL_TRACE_PKT header.
5891 if (tp->snap_len > ((10 * 1024 / 4) - (2 * 8)))
5894 /* If multiple tracers are disabled, to avoid deadlocks
5895 * maximum packet capture size of 9600 bytes is recommended.
5896 * Also in this mode, only trace0 can be enabled and running.
5899 if (tp->snap_len > 9600 || idx)
5903 if (tp->port > (is_t4(adap->params.chip) ? 11 : 19) || tp->invert > 1 ||
5904 tp->skip_len > TFLENGTH_M || tp->skip_ofst > TFOFFSET_M ||
5905 tp->min_len > TFMINPKTSIZE_M)
5908 /* stop the tracer we'll be changing */
5909 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst, 0);
5911 idx *= (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A);
5912 data_reg = MPS_TRC_FILTER0_MATCH_A + idx;
5913 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + idx;
5915 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5916 t4_write_reg(adap, data_reg, tp->data[i]);
5917 t4_write_reg(adap, mask_reg, ~tp->mask[i]);
5919 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst,
5920 TFCAPTUREMAX_V(tp->snap_len) |
5921 TFMINPKTSIZE_V(tp->min_len));
5922 t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst,
5923 TFOFFSET_V(tp->skip_ofst) | TFLENGTH_V(tp->skip_len) |
5924 (is_t4(adap->params.chip) ?
5925 TFPORT_V(tp->port) | TFEN_F | TFINVERTMATCH_V(tp->invert) :
5926 T5_TFPORT_V(tp->port) | T5_TFEN_F |
5927 T5_TFINVERTMATCH_V(tp->invert)));
5933 * t4_get_trace_filter - query one of the tracing filters
5934 * @adap: the adapter
5935 * @tp: the current trace filter parameters
5936 * @idx: which trace filter to query
5937 * @enabled: non-zero if the filter is enabled
5939 * Returns the current settings of one of the HW tracing filters.
5941 void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
5945 int i, ofst = idx * 4;
5946 u32 data_reg, mask_reg;
5948 ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A_A + ofst);
5949 ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B_A + ofst);
5951 if (is_t4(adap->params.chip)) {
5952 *enabled = !!(ctla & TFEN_F);
5953 tp->port = TFPORT_G(ctla);
5954 tp->invert = !!(ctla & TFINVERTMATCH_F);
5956 *enabled = !!(ctla & T5_TFEN_F);
5957 tp->port = T5_TFPORT_G(ctla);
5958 tp->invert = !!(ctla & T5_TFINVERTMATCH_F);
5960 tp->snap_len = TFCAPTUREMAX_G(ctlb);
5961 tp->min_len = TFMINPKTSIZE_G(ctlb);
5962 tp->skip_ofst = TFOFFSET_G(ctla);
5963 tp->skip_len = TFLENGTH_G(ctla);
5965 ofst = (MPS_TRC_FILTER1_MATCH_A - MPS_TRC_FILTER0_MATCH_A) * idx;
5966 data_reg = MPS_TRC_FILTER0_MATCH_A + ofst;
5967 mask_reg = MPS_TRC_FILTER0_DONT_CARE_A + ofst;
5969 for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
5970 tp->mask[i] = ~t4_read_reg(adap, mask_reg);
5971 tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
5976 * t4_pmtx_get_stats - returns the HW stats from PMTX
5977 * @adap: the adapter
5978 * @cnt: where to store the count statistics
5979 * @cycles: where to store the cycle statistics
5981 * Returns performance statistics from PMTX.
5983 void t4_pmtx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
5988 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
5989 t4_write_reg(adap, PM_TX_STAT_CONFIG_A, i + 1);
5990 cnt[i] = t4_read_reg(adap, PM_TX_STAT_COUNT_A);
5991 if (is_t4(adap->params.chip)) {
5992 cycles[i] = t4_read_reg64(adap, PM_TX_STAT_LSB_A);
5994 t4_read_indirect(adap, PM_TX_DBG_CTRL_A,
5995 PM_TX_DBG_DATA_A, data, 2,
5996 PM_TX_DBG_STAT_MSB_A);
5997 cycles[i] = (((u64)data[0] << 32) | data[1]);
6003 * t4_pmrx_get_stats - returns the HW stats from PMRX
6004 * @adap: the adapter
6005 * @cnt: where to store the count statistics
6006 * @cycles: where to store the cycle statistics
6008 * Returns performance statistics from PMRX.
6010 void t4_pmrx_get_stats(struct adapter *adap, u32 cnt[], u64 cycles[])
6015 for (i = 0; i < adap->params.arch.pm_stats_cnt; i++) {
6016 t4_write_reg(adap, PM_RX_STAT_CONFIG_A, i + 1);
6017 cnt[i] = t4_read_reg(adap, PM_RX_STAT_COUNT_A);
6018 if (is_t4(adap->params.chip)) {
6019 cycles[i] = t4_read_reg64(adap, PM_RX_STAT_LSB_A);
6021 t4_read_indirect(adap, PM_RX_DBG_CTRL_A,
6022 PM_RX_DBG_DATA_A, data, 2,
6023 PM_RX_DBG_STAT_MSB_A);
6024 cycles[i] = (((u64)data[0] << 32) | data[1]);
6030 * compute_mps_bg_map - compute the MPS Buffer Group Map for a Port
6031 * @adap: the adapter
6032 * @pidx: the port index
6034 * Computes and returns a bitmap indicating which MPS buffer groups are
6035 * associated with the given Port. Bit i is set if buffer group i is
6038 static inline unsigned int compute_mps_bg_map(struct adapter *adapter,
6041 unsigned int chip_version, nports;
6043 chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6044 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6046 switch (chip_version) {
6051 case 2: return 3 << (2 * pidx);
6052 case 4: return 1 << pidx;
6058 case 2: return 1 << (2 * pidx);
6063 dev_err(adapter->pdev_dev, "Need MPS Buffer Group Map for Chip %0x, Nports %d\n",
6064 chip_version, nports);
6070 * t4_get_mps_bg_map - return the buffer groups associated with a port
6071 * @adapter: the adapter
6072 * @pidx: the port index
6074 * Returns a bitmap indicating which MPS buffer groups are associated
6075 * with the given Port. Bit i is set if buffer group i is used by the
6078 unsigned int t4_get_mps_bg_map(struct adapter *adapter, int pidx)
6081 unsigned int nports;
6083 nports = 1 << NUMPORTS_G(t4_read_reg(adapter, MPS_CMN_CTL_A));
6084 if (pidx >= nports) {
6085 CH_WARN(adapter, "MPS Port Index %d >= Nports %d\n",
6090 /* If we've already retrieved/computed this, just return the result.
6092 mps_bg_map = adapter->params.mps_bg_map;
6093 if (mps_bg_map[pidx])
6094 return mps_bg_map[pidx];
6096 /* Newer Firmware can tell us what the MPS Buffer Group Map is.
6097 * If we're talking to such Firmware, let it tell us. If the new
6098 * API isn't supported, revert back to old hardcoded way. The value
6099 * obtained from Firmware is encoded in below format:
6101 * val = (( MPSBGMAP[Port 3] << 24 ) |
6102 * ( MPSBGMAP[Port 2] << 16 ) |
6103 * ( MPSBGMAP[Port 1] << 8 ) |
6104 * ( MPSBGMAP[Port 0] << 0 ))
6106 if (adapter->flags & FW_OK) {
6110 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
6111 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_MPSBGMAP));
6112 ret = t4_query_params_ns(adapter, adapter->mbox, adapter->pf,
6113 0, 1, ¶m, &val);
6117 /* Store the BG Map for all of the Ports in order to
6118 * avoid more calls to the Firmware in the future.
6120 for (p = 0; p < MAX_NPORTS; p++, val >>= 8)
6121 mps_bg_map[p] = val & 0xff;
6123 return mps_bg_map[pidx];
6127 /* Either we're not talking to the Firmware or we're dealing with
6128 * older Firmware which doesn't support the new API to get the MPS
6129 * Buffer Group Map. Fall back to computing it ourselves.
6131 mps_bg_map[pidx] = compute_mps_bg_map(adapter, pidx);
6132 return mps_bg_map[pidx];
6136 * t4_get_tp_ch_map - return TP ingress channels associated with a port
6137 * @adapter: the adapter
6138 * @pidx: the port index
6140 * Returns a bitmap indicating which TP Ingress Channels are associated
6141 * with a given Port. Bit i is set if TP Ingress Channel i is used by
6144 unsigned int t4_get_tp_ch_map(struct adapter *adap, int pidx)
6146 unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip);
6147 unsigned int nports = 1 << NUMPORTS_G(t4_read_reg(adap, MPS_CMN_CTL_A));
6149 if (pidx >= nports) {
6150 dev_warn(adap->pdev_dev, "TP Port Index %d >= Nports %d\n",
6155 switch (chip_version) {
6158 /* Note that this happens to be the same values as the MPS
6159 * Buffer Group Map for these Chips. But we replicate the code
6160 * here because they're really separate concepts.
6164 case 2: return 3 << (2 * pidx);
6165 case 4: return 1 << pidx;
6172 case 2: return 1 << pidx;
6177 dev_err(adap->pdev_dev, "Need TP Channel Map for Chip %0x, Nports %d\n",
6178 chip_version, nports);
6183 * t4_get_port_type_description - return Port Type string description
6184 * @port_type: firmware Port Type enumeration
6186 const char *t4_get_port_type_description(enum fw_port_type port_type)
6188 static const char *const port_type_description[] = {
6214 if (port_type < ARRAY_SIZE(port_type_description))
6215 return port_type_description[port_type];
6220 * t4_get_port_stats_offset - collect port stats relative to a previous
6222 * @adap: The adapter
6224 * @stats: Current stats to fill
6225 * @offset: Previous stats snapshot
6227 void t4_get_port_stats_offset(struct adapter *adap, int idx,
6228 struct port_stats *stats,
6229 struct port_stats *offset)
6234 t4_get_port_stats(adap, idx, stats);
6235 for (i = 0, s = (u64 *)stats, o = (u64 *)offset;
6236 i < (sizeof(struct port_stats) / sizeof(u64));
6242 * t4_get_port_stats - collect port statistics
6243 * @adap: the adapter
6244 * @idx: the port index
6245 * @p: the stats structure to fill
6247 * Collect statistics related to the given port from HW.
6249 void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
6251 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6252 u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
6254 #define GET_STAT(name) \
6255 t4_read_reg64(adap, \
6256 (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
6257 T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
6258 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6260 p->tx_octets = GET_STAT(TX_PORT_BYTES);
6261 p->tx_frames = GET_STAT(TX_PORT_FRAMES);
6262 p->tx_bcast_frames = GET_STAT(TX_PORT_BCAST);
6263 p->tx_mcast_frames = GET_STAT(TX_PORT_MCAST);
6264 p->tx_ucast_frames = GET_STAT(TX_PORT_UCAST);
6265 p->tx_error_frames = GET_STAT(TX_PORT_ERROR);
6266 p->tx_frames_64 = GET_STAT(TX_PORT_64B);
6267 p->tx_frames_65_127 = GET_STAT(TX_PORT_65B_127B);
6268 p->tx_frames_128_255 = GET_STAT(TX_PORT_128B_255B);
6269 p->tx_frames_256_511 = GET_STAT(TX_PORT_256B_511B);
6270 p->tx_frames_512_1023 = GET_STAT(TX_PORT_512B_1023B);
6271 p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
6272 p->tx_frames_1519_max = GET_STAT(TX_PORT_1519B_MAX);
6273 p->tx_drop = GET_STAT(TX_PORT_DROP);
6274 p->tx_pause = GET_STAT(TX_PORT_PAUSE);
6275 p->tx_ppp0 = GET_STAT(TX_PORT_PPP0);
6276 p->tx_ppp1 = GET_STAT(TX_PORT_PPP1);
6277 p->tx_ppp2 = GET_STAT(TX_PORT_PPP2);
6278 p->tx_ppp3 = GET_STAT(TX_PORT_PPP3);
6279 p->tx_ppp4 = GET_STAT(TX_PORT_PPP4);
6280 p->tx_ppp5 = GET_STAT(TX_PORT_PPP5);
6281 p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
6282 p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
6284 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6285 if (stat_ctl & COUNTPAUSESTATTX_F)
6286 p->tx_frames_64 -= p->tx_pause;
6287 if (stat_ctl & COUNTPAUSEMCTX_F)
6288 p->tx_mcast_frames -= p->tx_pause;
6290 p->rx_octets = GET_STAT(RX_PORT_BYTES);
6291 p->rx_frames = GET_STAT(RX_PORT_FRAMES);
6292 p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
6293 p->rx_mcast_frames = GET_STAT(RX_PORT_MCAST);
6294 p->rx_ucast_frames = GET_STAT(RX_PORT_UCAST);
6295 p->rx_too_long = GET_STAT(RX_PORT_MTU_ERROR);
6296 p->rx_jabber = GET_STAT(RX_PORT_MTU_CRC_ERROR);
6297 p->rx_fcs_err = GET_STAT(RX_PORT_CRC_ERROR);
6298 p->rx_len_err = GET_STAT(RX_PORT_LEN_ERROR);
6299 p->rx_symbol_err = GET_STAT(RX_PORT_SYM_ERROR);
6300 p->rx_runt = GET_STAT(RX_PORT_LESS_64B);
6301 p->rx_frames_64 = GET_STAT(RX_PORT_64B);
6302 p->rx_frames_65_127 = GET_STAT(RX_PORT_65B_127B);
6303 p->rx_frames_128_255 = GET_STAT(RX_PORT_128B_255B);
6304 p->rx_frames_256_511 = GET_STAT(RX_PORT_256B_511B);
6305 p->rx_frames_512_1023 = GET_STAT(RX_PORT_512B_1023B);
6306 p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
6307 p->rx_frames_1519_max = GET_STAT(RX_PORT_1519B_MAX);
6308 p->rx_pause = GET_STAT(RX_PORT_PAUSE);
6309 p->rx_ppp0 = GET_STAT(RX_PORT_PPP0);
6310 p->rx_ppp1 = GET_STAT(RX_PORT_PPP1);
6311 p->rx_ppp2 = GET_STAT(RX_PORT_PPP2);
6312 p->rx_ppp3 = GET_STAT(RX_PORT_PPP3);
6313 p->rx_ppp4 = GET_STAT(RX_PORT_PPP4);
6314 p->rx_ppp5 = GET_STAT(RX_PORT_PPP5);
6315 p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
6316 p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
6318 if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
6319 if (stat_ctl & COUNTPAUSESTATRX_F)
6320 p->rx_frames_64 -= p->rx_pause;
6321 if (stat_ctl & COUNTPAUSEMCRX_F)
6322 p->rx_mcast_frames -= p->rx_pause;
6325 p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
6326 p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
6327 p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
6328 p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
6329 p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
6330 p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
6331 p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
6332 p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
6339 * t4_get_lb_stats - collect loopback port statistics
6340 * @adap: the adapter
6341 * @idx: the loopback port index
6342 * @p: the stats structure to fill
6344 * Return HW statistics for the given loopback port.
6346 void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
6348 u32 bgmap = t4_get_mps_bg_map(adap, idx);
6350 #define GET_STAT(name) \
6351 t4_read_reg64(adap, \
6352 (is_t4(adap->params.chip) ? \
6353 PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L) : \
6354 T5_PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L)))
6355 #define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
6357 p->octets = GET_STAT(BYTES);
6358 p->frames = GET_STAT(FRAMES);
6359 p->bcast_frames = GET_STAT(BCAST);
6360 p->mcast_frames = GET_STAT(MCAST);
6361 p->ucast_frames = GET_STAT(UCAST);
6362 p->error_frames = GET_STAT(ERROR);
6364 p->frames_64 = GET_STAT(64B);
6365 p->frames_65_127 = GET_STAT(65B_127B);
6366 p->frames_128_255 = GET_STAT(128B_255B);
6367 p->frames_256_511 = GET_STAT(256B_511B);
6368 p->frames_512_1023 = GET_STAT(512B_1023B);
6369 p->frames_1024_1518 = GET_STAT(1024B_1518B);
6370 p->frames_1519_max = GET_STAT(1519B_MAX);
6371 p->drop = GET_STAT(DROP_FRAMES);
6373 p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
6374 p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
6375 p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
6376 p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
6377 p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
6378 p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
6379 p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
6380 p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
6386 /* t4_mk_filtdelwr - create a delete filter WR
6387 * @ftid: the filter ID
6388 * @wr: the filter work request to populate
6389 * @qid: ingress queue to receive the delete notification
6391 * Creates a filter work request to delete the supplied filter. If @qid is
6392 * negative the delete notification is suppressed.
6394 void t4_mk_filtdelwr(unsigned int ftid, struct fw_filter_wr *wr, int qid)
6396 memset(wr, 0, sizeof(*wr));
6397 wr->op_pkd = cpu_to_be32(FW_WR_OP_V(FW_FILTER_WR));
6398 wr->len16_pkd = cpu_to_be32(FW_WR_LEN16_V(sizeof(*wr) / 16));
6399 wr->tid_to_iq = cpu_to_be32(FW_FILTER_WR_TID_V(ftid) |
6400 FW_FILTER_WR_NOREPLY_V(qid < 0));
6401 wr->del_filter_to_l2tix = cpu_to_be32(FW_FILTER_WR_DEL_FILTER_F);
6403 wr->rx_chan_rx_rpl_iq =
6404 cpu_to_be16(FW_FILTER_WR_RX_RPL_IQ_V(qid));
6407 #define INIT_CMD(var, cmd, rd_wr) do { \
6408 (var).op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_##cmd##_CMD) | \
6409 FW_CMD_REQUEST_F | \
6410 FW_CMD_##rd_wr##_F); \
6411 (var).retval_len16 = cpu_to_be32(FW_LEN16(var)); \
6414 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
6418 struct fw_ldst_cmd c;
6420 memset(&c, 0, sizeof(c));
6421 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_FIRMWARE);
6422 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6426 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6427 c.u.addrval.addr = cpu_to_be32(addr);
6428 c.u.addrval.val = cpu_to_be32(val);
6430 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6434 * t4_mdio_rd - read a PHY register through MDIO
6435 * @adap: the adapter
6436 * @mbox: mailbox to use for the FW command
6437 * @phy_addr: the PHY address
6438 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6439 * @reg: the register to read
6440 * @valp: where to store the value
6442 * Issues a FW command through the given mailbox to read a PHY register.
6444 int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6445 unsigned int mmd, unsigned int reg, u16 *valp)
6449 struct fw_ldst_cmd c;
6451 memset(&c, 0, sizeof(c));
6452 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6453 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6454 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6456 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6457 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6458 FW_LDST_CMD_MMD_V(mmd));
6459 c.u.mdio.raddr = cpu_to_be16(reg);
6461 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6463 *valp = be16_to_cpu(c.u.mdio.rval);
6468 * t4_mdio_wr - write a PHY register through MDIO
6469 * @adap: the adapter
6470 * @mbox: mailbox to use for the FW command
6471 * @phy_addr: the PHY address
6472 * @mmd: the PHY MMD to access (0 for clause 22 PHYs)
6473 * @reg: the register to write
6474 * @valp: value to write
6476 * Issues a FW command through the given mailbox to write a PHY register.
6478 int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
6479 unsigned int mmd, unsigned int reg, u16 val)
6482 struct fw_ldst_cmd c;
6484 memset(&c, 0, sizeof(c));
6485 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_MDIO);
6486 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6487 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
6489 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6490 c.u.mdio.paddr_mmd = cpu_to_be16(FW_LDST_CMD_PADDR_V(phy_addr) |
6491 FW_LDST_CMD_MMD_V(mmd));
6492 c.u.mdio.raddr = cpu_to_be16(reg);
6493 c.u.mdio.rval = cpu_to_be16(val);
6495 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6499 * t4_sge_decode_idma_state - decode the idma state
6500 * @adap: the adapter
6501 * @state: the state idma is stuck in
6503 void t4_sge_decode_idma_state(struct adapter *adapter, int state)
6505 static const char * const t4_decode[] = {
6507 "IDMA_PUSH_MORE_CPL_FIFO",
6508 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6510 "IDMA_PHYSADDR_SEND_PCIEHDR",
6511 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6512 "IDMA_PHYSADDR_SEND_PAYLOAD",
6513 "IDMA_SEND_FIFO_TO_IMSG",
6514 "IDMA_FL_REQ_DATA_FL_PREP",
6515 "IDMA_FL_REQ_DATA_FL",
6517 "IDMA_FL_H_REQ_HEADER_FL",
6518 "IDMA_FL_H_SEND_PCIEHDR",
6519 "IDMA_FL_H_PUSH_CPL_FIFO",
6520 "IDMA_FL_H_SEND_CPL",
6521 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6522 "IDMA_FL_H_SEND_IP_HDR",
6523 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6524 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6525 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6526 "IDMA_FL_D_SEND_PCIEHDR",
6527 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6528 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6529 "IDMA_FL_SEND_PCIEHDR",
6530 "IDMA_FL_PUSH_CPL_FIFO",
6532 "IDMA_FL_SEND_PAYLOAD_FIRST",
6533 "IDMA_FL_SEND_PAYLOAD",
6534 "IDMA_FL_REQ_NEXT_DATA_FL",
6535 "IDMA_FL_SEND_NEXT_PCIEHDR",
6536 "IDMA_FL_SEND_PADDING",
6537 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6538 "IDMA_FL_SEND_FIFO_TO_IMSG",
6539 "IDMA_FL_REQ_DATAFL_DONE",
6540 "IDMA_FL_REQ_HEADERFL_DONE",
6542 static const char * const t5_decode[] = {
6545 "IDMA_PUSH_MORE_CPL_FIFO",
6546 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6547 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6548 "IDMA_PHYSADDR_SEND_PCIEHDR",
6549 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6550 "IDMA_PHYSADDR_SEND_PAYLOAD",
6551 "IDMA_SEND_FIFO_TO_IMSG",
6552 "IDMA_FL_REQ_DATA_FL",
6554 "IDMA_FL_DROP_SEND_INC",
6555 "IDMA_FL_H_REQ_HEADER_FL",
6556 "IDMA_FL_H_SEND_PCIEHDR",
6557 "IDMA_FL_H_PUSH_CPL_FIFO",
6558 "IDMA_FL_H_SEND_CPL",
6559 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6560 "IDMA_FL_H_SEND_IP_HDR",
6561 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6562 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6563 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6564 "IDMA_FL_D_SEND_PCIEHDR",
6565 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6566 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6567 "IDMA_FL_SEND_PCIEHDR",
6568 "IDMA_FL_PUSH_CPL_FIFO",
6570 "IDMA_FL_SEND_PAYLOAD_FIRST",
6571 "IDMA_FL_SEND_PAYLOAD",
6572 "IDMA_FL_REQ_NEXT_DATA_FL",
6573 "IDMA_FL_SEND_NEXT_PCIEHDR",
6574 "IDMA_FL_SEND_PADDING",
6575 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6577 static const char * const t6_decode[] = {
6579 "IDMA_PUSH_MORE_CPL_FIFO",
6580 "IDMA_PUSH_CPL_MSG_HEADER_TO_FIFO",
6581 "IDMA_SGEFLRFLUSH_SEND_PCIEHDR",
6582 "IDMA_PHYSADDR_SEND_PCIEHDR",
6583 "IDMA_PHYSADDR_SEND_PAYLOAD_FIRST",
6584 "IDMA_PHYSADDR_SEND_PAYLOAD",
6585 "IDMA_FL_REQ_DATA_FL",
6587 "IDMA_FL_DROP_SEND_INC",
6588 "IDMA_FL_H_REQ_HEADER_FL",
6589 "IDMA_FL_H_SEND_PCIEHDR",
6590 "IDMA_FL_H_PUSH_CPL_FIFO",
6591 "IDMA_FL_H_SEND_CPL",
6592 "IDMA_FL_H_SEND_IP_HDR_FIRST",
6593 "IDMA_FL_H_SEND_IP_HDR",
6594 "IDMA_FL_H_REQ_NEXT_HEADER_FL",
6595 "IDMA_FL_H_SEND_NEXT_PCIEHDR",
6596 "IDMA_FL_H_SEND_IP_HDR_PADDING",
6597 "IDMA_FL_D_SEND_PCIEHDR",
6598 "IDMA_FL_D_SEND_CPL_AND_IP_HDR",
6599 "IDMA_FL_D_REQ_NEXT_DATA_FL",
6600 "IDMA_FL_SEND_PCIEHDR",
6601 "IDMA_FL_PUSH_CPL_FIFO",
6603 "IDMA_FL_SEND_PAYLOAD_FIRST",
6604 "IDMA_FL_SEND_PAYLOAD",
6605 "IDMA_FL_REQ_NEXT_DATA_FL",
6606 "IDMA_FL_SEND_NEXT_PCIEHDR",
6607 "IDMA_FL_SEND_PADDING",
6608 "IDMA_FL_SEND_COMPLETION_TO_IMSG",
6610 static const u32 sge_regs[] = {
6611 SGE_DEBUG_DATA_LOW_INDEX_2_A,
6612 SGE_DEBUG_DATA_LOW_INDEX_3_A,
6613 SGE_DEBUG_DATA_HIGH_INDEX_10_A,
6615 const char **sge_idma_decode;
6616 int sge_idma_decode_nstates;
6618 unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip);
6620 /* Select the right set of decode strings to dump depending on the
6621 * adapter chip type.
6623 switch (chip_version) {
6625 sge_idma_decode = (const char **)t4_decode;
6626 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6630 sge_idma_decode = (const char **)t5_decode;
6631 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6635 sge_idma_decode = (const char **)t6_decode;
6636 sge_idma_decode_nstates = ARRAY_SIZE(t6_decode);
6640 dev_err(adapter->pdev_dev,
6641 "Unsupported chip version %d\n", chip_version);
6645 if (is_t4(adapter->params.chip)) {
6646 sge_idma_decode = (const char **)t4_decode;
6647 sge_idma_decode_nstates = ARRAY_SIZE(t4_decode);
6649 sge_idma_decode = (const char **)t5_decode;
6650 sge_idma_decode_nstates = ARRAY_SIZE(t5_decode);
6653 if (state < sge_idma_decode_nstates)
6654 CH_WARN(adapter, "idma state %s\n", sge_idma_decode[state]);
6656 CH_WARN(adapter, "idma state %d unknown\n", state);
6658 for (i = 0; i < ARRAY_SIZE(sge_regs); i++)
6659 CH_WARN(adapter, "SGE register %#x value %#x\n",
6660 sge_regs[i], t4_read_reg(adapter, sge_regs[i]));
6664 * t4_sge_ctxt_flush - flush the SGE context cache
6665 * @adap: the adapter
6666 * @mbox: mailbox to use for the FW command
6667 * @ctx_type: Egress or Ingress
6669 * Issues a FW command through the given mailbox to flush the
6670 * SGE context cache.
6672 int t4_sge_ctxt_flush(struct adapter *adap, unsigned int mbox, int ctxt_type)
6676 struct fw_ldst_cmd c;
6678 memset(&c, 0, sizeof(c));
6679 ldst_addrspace = FW_LDST_CMD_ADDRSPACE_V(ctxt_type == CTXT_EGRESS ?
6680 FW_LDST_ADDRSPC_SGE_EGRC :
6681 FW_LDST_ADDRSPC_SGE_INGC);
6682 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
6683 FW_CMD_REQUEST_F | FW_CMD_READ_F |
6685 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
6686 c.u.idctxt.msg_ctxtflush = cpu_to_be32(FW_LDST_CMD_CTXTFLUSH_F);
6688 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6693 * t4_fw_hello - establish communication with FW
6694 * @adap: the adapter
6695 * @mbox: mailbox to use for the FW command
6696 * @evt_mbox: mailbox to receive async FW events
6697 * @master: specifies the caller's willingness to be the device master
6698 * @state: returns the current device state (if non-NULL)
6700 * Issues a command to establish communication with FW. Returns either
6701 * an error (negative integer) or the mailbox of the Master PF.
6703 int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
6704 enum dev_master master, enum dev_state *state)
6707 struct fw_hello_cmd c;
6709 unsigned int master_mbox;
6710 int retries = FW_CMD_HELLO_RETRIES;
6713 memset(&c, 0, sizeof(c));
6714 INIT_CMD(c, HELLO, WRITE);
6715 c.err_to_clearinit = cpu_to_be32(
6716 FW_HELLO_CMD_MASTERDIS_V(master == MASTER_CANT) |
6717 FW_HELLO_CMD_MASTERFORCE_V(master == MASTER_MUST) |
6718 FW_HELLO_CMD_MBMASTER_V(master == MASTER_MUST ?
6719 mbox : FW_HELLO_CMD_MBMASTER_M) |
6720 FW_HELLO_CMD_MBASYNCNOT_V(evt_mbox) |
6721 FW_HELLO_CMD_STAGE_V(fw_hello_cmd_stage_os) |
6722 FW_HELLO_CMD_CLEARINIT_F);
6725 * Issue the HELLO command to the firmware. If it's not successful
6726 * but indicates that we got a "busy" or "timeout" condition, retry
6727 * the HELLO until we exhaust our retry limit. If we do exceed our
6728 * retry limit, check to see if the firmware left us any error
6729 * information and report that if so.
6731 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
6733 if ((ret == -EBUSY || ret == -ETIMEDOUT) && retries-- > 0)
6735 if (t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_ERR_F)
6736 t4_report_fw_error(adap);
6740 v = be32_to_cpu(c.err_to_clearinit);
6741 master_mbox = FW_HELLO_CMD_MBMASTER_G(v);
6743 if (v & FW_HELLO_CMD_ERR_F)
6744 *state = DEV_STATE_ERR;
6745 else if (v & FW_HELLO_CMD_INIT_F)
6746 *state = DEV_STATE_INIT;
6748 *state = DEV_STATE_UNINIT;
6752 * If we're not the Master PF then we need to wait around for the
6753 * Master PF Driver to finish setting up the adapter.
6755 * Note that we also do this wait if we're a non-Master-capable PF and
6756 * there is no current Master PF; a Master PF may show up momentarily
6757 * and we wouldn't want to fail pointlessly. (This can happen when an
6758 * OS loads lots of different drivers rapidly at the same time). In
6759 * this case, the Master PF returned by the firmware will be
6760 * PCIE_FW_MASTER_M so the test below will work ...
6762 if ((v & (FW_HELLO_CMD_ERR_F|FW_HELLO_CMD_INIT_F)) == 0 &&
6763 master_mbox != mbox) {
6764 int waiting = FW_CMD_HELLO_TIMEOUT;
6767 * Wait for the firmware to either indicate an error or
6768 * initialized state. If we see either of these we bail out
6769 * and report the issue to the caller. If we exhaust the
6770 * "hello timeout" and we haven't exhausted our retries, try
6771 * again. Otherwise bail with a timeout error.
6780 * If neither Error nor Initialialized are indicated
6781 * by the firmware keep waiting till we exaust our
6782 * timeout ... and then retry if we haven't exhausted
6785 pcie_fw = t4_read_reg(adap, PCIE_FW_A);
6786 if (!(pcie_fw & (PCIE_FW_ERR_F|PCIE_FW_INIT_F))) {
6797 * We either have an Error or Initialized condition
6798 * report errors preferentially.
6801 if (pcie_fw & PCIE_FW_ERR_F)
6802 *state = DEV_STATE_ERR;
6803 else if (pcie_fw & PCIE_FW_INIT_F)
6804 *state = DEV_STATE_INIT;
6808 * If we arrived before a Master PF was selected and
6809 * there's not a valid Master PF, grab its identity
6812 if (master_mbox == PCIE_FW_MASTER_M &&
6813 (pcie_fw & PCIE_FW_MASTER_VLD_F))
6814 master_mbox = PCIE_FW_MASTER_G(pcie_fw);
6823 * t4_fw_bye - end communication with FW
6824 * @adap: the adapter
6825 * @mbox: mailbox to use for the FW command
6827 * Issues a command to terminate communication with FW.
6829 int t4_fw_bye(struct adapter *adap, unsigned int mbox)
6831 struct fw_bye_cmd c;
6833 memset(&c, 0, sizeof(c));
6834 INIT_CMD(c, BYE, WRITE);
6835 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6839 * t4_init_cmd - ask FW to initialize the device
6840 * @adap: the adapter
6841 * @mbox: mailbox to use for the FW command
6843 * Issues a command to FW to partially initialize the device. This
6844 * performs initialization that generally doesn't depend on user input.
6846 int t4_early_init(struct adapter *adap, unsigned int mbox)
6848 struct fw_initialize_cmd c;
6850 memset(&c, 0, sizeof(c));
6851 INIT_CMD(c, INITIALIZE, WRITE);
6852 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6856 * t4_fw_reset - issue a reset to FW
6857 * @adap: the adapter
6858 * @mbox: mailbox to use for the FW command
6859 * @reset: specifies the type of reset to perform
6861 * Issues a reset command of the specified type to FW.
6863 int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
6865 struct fw_reset_cmd c;
6867 memset(&c, 0, sizeof(c));
6868 INIT_CMD(c, RESET, WRITE);
6869 c.val = cpu_to_be32(reset);
6870 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6874 * t4_fw_halt - issue a reset/halt to FW and put uP into RESET
6875 * @adap: the adapter
6876 * @mbox: mailbox to use for the FW RESET command (if desired)
6877 * @force: force uP into RESET even if FW RESET command fails
6879 * Issues a RESET command to firmware (if desired) with a HALT indication
6880 * and then puts the microprocessor into RESET state. The RESET command
6881 * will only be issued if a legitimate mailbox is provided (mbox <=
6882 * PCIE_FW_MASTER_M).
6884 * This is generally used in order for the host to safely manipulate the
6885 * adapter without fear of conflicting with whatever the firmware might
6886 * be doing. The only way out of this state is to RESTART the firmware
6889 static int t4_fw_halt(struct adapter *adap, unsigned int mbox, int force)
6894 * If a legitimate mailbox is provided, issue a RESET command
6895 * with a HALT indication.
6897 if (mbox <= PCIE_FW_MASTER_M) {
6898 struct fw_reset_cmd c;
6900 memset(&c, 0, sizeof(c));
6901 INIT_CMD(c, RESET, WRITE);
6902 c.val = cpu_to_be32(PIORST_F | PIORSTMODE_F);
6903 c.halt_pkd = cpu_to_be32(FW_RESET_CMD_HALT_F);
6904 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
6908 * Normally we won't complete the operation if the firmware RESET
6909 * command fails but if our caller insists we'll go ahead and put the
6910 * uP into RESET. This can be useful if the firmware is hung or even
6911 * missing ... We'll have to take the risk of putting the uP into
6912 * RESET without the cooperation of firmware in that case.
6914 * We also force the firmware's HALT flag to be on in case we bypassed
6915 * the firmware RESET command above or we're dealing with old firmware
6916 * which doesn't have the HALT capability. This will serve as a flag
6917 * for the incoming firmware to know that it's coming out of a HALT
6918 * rather than a RESET ... if it's new enough to understand that ...
6920 if (ret == 0 || force) {
6921 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, UPCRST_F);
6922 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F,
6927 * And we always return the result of the firmware RESET command
6928 * even when we force the uP into RESET ...
6934 * t4_fw_restart - restart the firmware by taking the uP out of RESET
6935 * @adap: the adapter
6936 * @reset: if we want to do a RESET to restart things
6938 * Restart firmware previously halted by t4_fw_halt(). On successful
6939 * return the previous PF Master remains as the new PF Master and there
6940 * is no need to issue a new HELLO command, etc.
6942 * We do this in two ways:
6944 * 1. If we're dealing with newer firmware we'll simply want to take
6945 * the chip's microprocessor out of RESET. This will cause the
6946 * firmware to start up from its start vector. And then we'll loop
6947 * until the firmware indicates it's started again (PCIE_FW.HALT
6948 * reset to 0) or we timeout.
6950 * 2. If we're dealing with older firmware then we'll need to RESET
6951 * the chip since older firmware won't recognize the PCIE_FW.HALT
6952 * flag and automatically RESET itself on startup.
6954 static int t4_fw_restart(struct adapter *adap, unsigned int mbox, int reset)
6958 * Since we're directing the RESET instead of the firmware
6959 * doing it automatically, we need to clear the PCIE_FW.HALT
6962 t4_set_reg_field(adap, PCIE_FW_A, PCIE_FW_HALT_F, 0);
6965 * If we've been given a valid mailbox, first try to get the
6966 * firmware to do the RESET. If that works, great and we can
6967 * return success. Otherwise, if we haven't been given a
6968 * valid mailbox or the RESET command failed, fall back to
6969 * hitting the chip with a hammer.
6971 if (mbox <= PCIE_FW_MASTER_M) {
6972 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
6974 if (t4_fw_reset(adap, mbox,
6975 PIORST_F | PIORSTMODE_F) == 0)
6979 t4_write_reg(adap, PL_RST_A, PIORST_F | PIORSTMODE_F);
6984 t4_set_reg_field(adap, CIM_BOOT_CFG_A, UPCRST_F, 0);
6985 for (ms = 0; ms < FW_CMD_MAX_TIMEOUT; ) {
6986 if (!(t4_read_reg(adap, PCIE_FW_A) & PCIE_FW_HALT_F))
6997 * t4_fw_upgrade - perform all of the steps necessary to upgrade FW
6998 * @adap: the adapter
6999 * @mbox: mailbox to use for the FW RESET command (if desired)
7000 * @fw_data: the firmware image to write
7002 * @force: force upgrade even if firmware doesn't cooperate
7004 * Perform all of the steps necessary for upgrading an adapter's
7005 * firmware image. Normally this requires the cooperation of the
7006 * existing firmware in order to halt all existing activities
7007 * but if an invalid mailbox token is passed in we skip that step
7008 * (though we'll still put the adapter microprocessor into RESET in
7011 * On successful return the new firmware will have been loaded and
7012 * the adapter will have been fully RESET losing all previous setup
7013 * state. On unsuccessful return the adapter may be completely hosed ...
7014 * positive errno indicates that the adapter is ~probably~ intact, a
7015 * negative errno indicates that things are looking bad ...
7017 int t4_fw_upgrade(struct adapter *adap, unsigned int mbox,
7018 const u8 *fw_data, unsigned int size, int force)
7020 const struct fw_hdr *fw_hdr = (const struct fw_hdr *)fw_data;
7023 if (!t4_fw_matches_chip(adap, fw_hdr))
7026 /* Disable FW_OK flag so that mbox commands with FW_OK flag set
7027 * wont be sent when we are flashing FW.
7029 adap->flags &= ~FW_OK;
7031 ret = t4_fw_halt(adap, mbox, force);
7032 if (ret < 0 && !force)
7035 ret = t4_load_fw(adap, fw_data, size);
7040 * If there was a Firmware Configuration File stored in FLASH,
7041 * there's a good chance that it won't be compatible with the new
7042 * Firmware. In order to prevent difficult to diagnose adapter
7043 * initialization issues, we clear out the Firmware Configuration File
7044 * portion of the FLASH . The user will need to re-FLASH a new
7045 * Firmware Configuration File which is compatible with the new
7046 * Firmware if that's desired.
7048 (void)t4_load_cfg(adap, NULL, 0);
7051 * Older versions of the firmware don't understand the new
7052 * PCIE_FW.HALT flag and so won't know to perform a RESET when they
7053 * restart. So for newly loaded older firmware we'll have to do the
7054 * RESET for it so it starts up on a clean slate. We can tell if
7055 * the newly loaded firmware will handle this right by checking
7056 * its header flags to see if it advertises the capability.
7058 reset = ((be32_to_cpu(fw_hdr->flags) & FW_HDR_FLAGS_RESET_HALT) == 0);
7059 ret = t4_fw_restart(adap, mbox, reset);
7061 /* Grab potentially new Firmware Device Log parameters so we can see
7062 * how healthy the new Firmware is. It's okay to contact the new
7063 * Firmware for these parameters even though, as far as it's
7064 * concerned, we've never said "HELLO" to it ...
7066 (void)t4_init_devlog_params(adap);
7068 adap->flags |= FW_OK;
7073 * t4_fl_pkt_align - return the fl packet alignment
7074 * @adap: the adapter
7076 * T4 has a single field to specify the packing and padding boundary.
7077 * T5 onwards has separate fields for this and hence the alignment for
7078 * next packet offset is maximum of these two.
7081 int t4_fl_pkt_align(struct adapter *adap)
7083 u32 sge_control, sge_control2;
7084 unsigned int ingpadboundary, ingpackboundary, fl_align, ingpad_shift;
7086 sge_control = t4_read_reg(adap, SGE_CONTROL_A);
7088 /* T4 uses a single control field to specify both the PCIe Padding and
7089 * Packing Boundary. T5 introduced the ability to specify these
7090 * separately. The actual Ingress Packet Data alignment boundary
7091 * within Packed Buffer Mode is the maximum of these two
7092 * specifications. (Note that it makes no real practical sense to
7093 * have the Pading Boudary be larger than the Packing Boundary but you
7094 * could set the chip up that way and, in fact, legacy T4 code would
7095 * end doing this because it would initialize the Padding Boundary and
7096 * leave the Packing Boundary initialized to 0 (16 bytes).)
7097 * Padding Boundary values in T6 starts from 8B,
7098 * where as it is 32B for T4 and T5.
7100 if (CHELSIO_CHIP_VERSION(adap->params.chip) <= CHELSIO_T5)
7101 ingpad_shift = INGPADBOUNDARY_SHIFT_X;
7103 ingpad_shift = T6_INGPADBOUNDARY_SHIFT_X;
7105 ingpadboundary = 1 << (INGPADBOUNDARY_G(sge_control) + ingpad_shift);
7107 fl_align = ingpadboundary;
7108 if (!is_t4(adap->params.chip)) {
7109 /* T5 has a weird interpretation of one of the PCIe Packing
7110 * Boundary values. No idea why ...
7112 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
7113 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
7114 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
7115 ingpackboundary = 16;
7117 ingpackboundary = 1 << (ingpackboundary +
7118 INGPACKBOUNDARY_SHIFT_X);
7120 fl_align = max(ingpadboundary, ingpackboundary);
7126 * t4_fixup_host_params - fix up host-dependent parameters
7127 * @adap: the adapter
7128 * @page_size: the host's Base Page Size
7129 * @cache_line_size: the host's Cache Line Size
7131 * Various registers in T4 contain values which are dependent on the
7132 * host's Base Page and Cache Line Sizes. This function will fix all of
7133 * those registers with the appropriate values as passed in ...
7135 int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
7136 unsigned int cache_line_size)
7138 unsigned int page_shift = fls(page_size) - 1;
7139 unsigned int sge_hps = page_shift - 10;
7140 unsigned int stat_len = cache_line_size > 64 ? 128 : 64;
7141 unsigned int fl_align = cache_line_size < 32 ? 32 : cache_line_size;
7142 unsigned int fl_align_log = fls(fl_align) - 1;
7144 t4_write_reg(adap, SGE_HOST_PAGE_SIZE_A,
7145 HOSTPAGESIZEPF0_V(sge_hps) |
7146 HOSTPAGESIZEPF1_V(sge_hps) |
7147 HOSTPAGESIZEPF2_V(sge_hps) |
7148 HOSTPAGESIZEPF3_V(sge_hps) |
7149 HOSTPAGESIZEPF4_V(sge_hps) |
7150 HOSTPAGESIZEPF5_V(sge_hps) |
7151 HOSTPAGESIZEPF6_V(sge_hps) |
7152 HOSTPAGESIZEPF7_V(sge_hps));
7154 if (is_t4(adap->params.chip)) {
7155 t4_set_reg_field(adap, SGE_CONTROL_A,
7156 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7157 EGRSTATUSPAGESIZE_F,
7158 INGPADBOUNDARY_V(fl_align_log -
7159 INGPADBOUNDARY_SHIFT_X) |
7160 EGRSTATUSPAGESIZE_V(stat_len != 64));
7162 unsigned int pack_align;
7163 unsigned int ingpad, ingpack;
7164 unsigned int pcie_cap;
7166 /* T5 introduced the separation of the Free List Padding and
7167 * Packing Boundaries. Thus, we can select a smaller Padding
7168 * Boundary to avoid uselessly chewing up PCIe Link and Memory
7169 * Bandwidth, and use a Packing Boundary which is large enough
7170 * to avoid false sharing between CPUs, etc.
7172 * For the PCI Link, the smaller the Padding Boundary the
7173 * better. For the Memory Controller, a smaller Padding
7174 * Boundary is better until we cross under the Memory Line
7175 * Size (the minimum unit of transfer to/from Memory). If we
7176 * have a Padding Boundary which is smaller than the Memory
7177 * Line Size, that'll involve a Read-Modify-Write cycle on the
7178 * Memory Controller which is never good.
7181 /* We want the Packing Boundary to be based on the Cache Line
7182 * Size in order to help avoid False Sharing performance
7183 * issues between CPUs, etc. We also want the Packing
7184 * Boundary to incorporate the PCI-E Maximum Payload Size. We
7185 * get best performance when the Packing Boundary is a
7186 * multiple of the Maximum Payload Size.
7188 pack_align = fl_align;
7189 pcie_cap = pci_find_capability(adap->pdev, PCI_CAP_ID_EXP);
7191 unsigned int mps, mps_log;
7194 /* The PCIe Device Control Maximum Payload Size field
7195 * [bits 7:5] encodes sizes as powers of 2 starting at
7198 pci_read_config_word(adap->pdev,
7199 pcie_cap + PCI_EXP_DEVCTL,
7201 mps_log = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5) + 7;
7203 if (mps > pack_align)
7207 /* N.B. T5/T6 have a crazy special interpretation of the "0"
7208 * value for the Packing Boundary. This corresponds to 16
7209 * bytes instead of the expected 32 bytes. So if we want 32
7210 * bytes, the best we can really do is 64 bytes ...
7212 if (pack_align <= 16) {
7213 ingpack = INGPACKBOUNDARY_16B_X;
7215 } else if (pack_align == 32) {
7216 ingpack = INGPACKBOUNDARY_64B_X;
7219 unsigned int pack_align_log = fls(pack_align) - 1;
7221 ingpack = pack_align_log - INGPACKBOUNDARY_SHIFT_X;
7222 fl_align = pack_align;
7225 /* Use the smallest Ingress Padding which isn't smaller than
7226 * the Memory Controller Read/Write Size. We'll take that as
7227 * being 8 bytes since we don't know of any system with a
7228 * wider Memory Controller Bus Width.
7230 if (is_t5(adap->params.chip))
7231 ingpad = INGPADBOUNDARY_32B_X;
7233 ingpad = T6_INGPADBOUNDARY_8B_X;
7235 t4_set_reg_field(adap, SGE_CONTROL_A,
7236 INGPADBOUNDARY_V(INGPADBOUNDARY_M) |
7237 EGRSTATUSPAGESIZE_F,
7238 INGPADBOUNDARY_V(ingpad) |
7239 EGRSTATUSPAGESIZE_V(stat_len != 64));
7240 t4_set_reg_field(adap, SGE_CONTROL2_A,
7241 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
7242 INGPACKBOUNDARY_V(ingpack));
7245 * Adjust various SGE Free List Host Buffer Sizes.
7247 * This is something of a crock since we're using fixed indices into
7248 * the array which are also known by the sge.c code and the T4
7249 * Firmware Configuration File. We need to come up with a much better
7250 * approach to managing this array. For now, the first four entries
7255 * 2: Buffer size corresponding to 1500 byte MTU (unpacked mode)
7256 * 3: Buffer size corresponding to 9000 byte MTU (unpacked mode)
7258 * For the single-MTU buffers in unpacked mode we need to include
7259 * space for the SGE Control Packet Shift, 14 byte Ethernet header,
7260 * possible 4 byte VLAN tag, all rounded up to the next Ingress Packet
7261 * Padding boundary. All of these are accommodated in the Factory
7262 * Default Firmware Configuration File but we need to adjust it for
7263 * this host's cache line size.
7265 t4_write_reg(adap, SGE_FL_BUFFER_SIZE0_A, page_size);
7266 t4_write_reg(adap, SGE_FL_BUFFER_SIZE2_A,
7267 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE2_A) + fl_align-1)
7269 t4_write_reg(adap, SGE_FL_BUFFER_SIZE3_A,
7270 (t4_read_reg(adap, SGE_FL_BUFFER_SIZE3_A) + fl_align-1)
7273 t4_write_reg(adap, ULP_RX_TDDP_PSZ_A, HPZ0_V(page_shift - 12));
7279 * t4_fw_initialize - ask FW to initialize the device
7280 * @adap: the adapter
7281 * @mbox: mailbox to use for the FW command
7283 * Issues a command to FW to partially initialize the device. This
7284 * performs initialization that generally doesn't depend on user input.
7286 int t4_fw_initialize(struct adapter *adap, unsigned int mbox)
7288 struct fw_initialize_cmd c;
7290 memset(&c, 0, sizeof(c));
7291 INIT_CMD(c, INITIALIZE, WRITE);
7292 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7296 * t4_query_params_rw - query FW or device parameters
7297 * @adap: the adapter
7298 * @mbox: mailbox to use for the FW command
7301 * @nparams: the number of parameters
7302 * @params: the parameter names
7303 * @val: the parameter values
7304 * @rw: Write and read flag
7305 * @sleep_ok: if true, we may sleep awaiting mbox cmd completion
7307 * Reads the value of FW or device parameters. Up to 7 parameters can be
7310 int t4_query_params_rw(struct adapter *adap, unsigned int mbox, unsigned int pf,
7311 unsigned int vf, unsigned int nparams, const u32 *params,
7312 u32 *val, int rw, bool sleep_ok)
7315 struct fw_params_cmd c;
7316 __be32 *p = &c.param[0].mnem;
7321 memset(&c, 0, sizeof(c));
7322 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7323 FW_CMD_REQUEST_F | FW_CMD_READ_F |
7324 FW_PARAMS_CMD_PFN_V(pf) |
7325 FW_PARAMS_CMD_VFN_V(vf));
7326 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7328 for (i = 0; i < nparams; i++) {
7329 *p++ = cpu_to_be32(*params++);
7331 *p = cpu_to_be32(*(val + i));
7335 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7337 for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
7338 *val++ = be32_to_cpu(*p);
7342 int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7343 unsigned int vf, unsigned int nparams, const u32 *params,
7346 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7350 int t4_query_params_ns(struct adapter *adap, unsigned int mbox, unsigned int pf,
7351 unsigned int vf, unsigned int nparams, const u32 *params,
7354 return t4_query_params_rw(adap, mbox, pf, vf, nparams, params, val, 0,
7359 * t4_set_params_timeout - sets FW or device parameters
7360 * @adap: the adapter
7361 * @mbox: mailbox to use for the FW command
7364 * @nparams: the number of parameters
7365 * @params: the parameter names
7366 * @val: the parameter values
7367 * @timeout: the timeout time
7369 * Sets the value of FW or device parameters. Up to 7 parameters can be
7370 * specified at once.
7372 int t4_set_params_timeout(struct adapter *adap, unsigned int mbox,
7373 unsigned int pf, unsigned int vf,
7374 unsigned int nparams, const u32 *params,
7375 const u32 *val, int timeout)
7377 struct fw_params_cmd c;
7378 __be32 *p = &c.param[0].mnem;
7383 memset(&c, 0, sizeof(c));
7384 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PARAMS_CMD) |
7385 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7386 FW_PARAMS_CMD_PFN_V(pf) |
7387 FW_PARAMS_CMD_VFN_V(vf));
7388 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7391 *p++ = cpu_to_be32(*params++);
7392 *p++ = cpu_to_be32(*val++);
7395 return t4_wr_mbox_timeout(adap, mbox, &c, sizeof(c), NULL, timeout);
7399 * t4_set_params - sets FW or device parameters
7400 * @adap: the adapter
7401 * @mbox: mailbox to use for the FW command
7404 * @nparams: the number of parameters
7405 * @params: the parameter names
7406 * @val: the parameter values
7408 * Sets the value of FW or device parameters. Up to 7 parameters can be
7409 * specified at once.
7411 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
7412 unsigned int vf, unsigned int nparams, const u32 *params,
7415 return t4_set_params_timeout(adap, mbox, pf, vf, nparams, params, val,
7416 FW_CMD_MAX_TIMEOUT);
7420 * t4_cfg_pfvf - configure PF/VF resource limits
7421 * @adap: the adapter
7422 * @mbox: mailbox to use for the FW command
7423 * @pf: the PF being configured
7424 * @vf: the VF being configured
7425 * @txq: the max number of egress queues
7426 * @txq_eth_ctrl: the max number of egress Ethernet or control queues
7427 * @rxqi: the max number of interrupt-capable ingress queues
7428 * @rxq: the max number of interruptless ingress queues
7429 * @tc: the PCI traffic class
7430 * @vi: the max number of virtual interfaces
7431 * @cmask: the channel access rights mask for the PF/VF
7432 * @pmask: the port access rights mask for the PF/VF
7433 * @nexact: the maximum number of exact MPS filters
7434 * @rcaps: read capabilities
7435 * @wxcaps: write/execute capabilities
7437 * Configures resource limits and capabilities for a physical or virtual
7440 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
7441 unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
7442 unsigned int rxqi, unsigned int rxq, unsigned int tc,
7443 unsigned int vi, unsigned int cmask, unsigned int pmask,
7444 unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
7446 struct fw_pfvf_cmd c;
7448 memset(&c, 0, sizeof(c));
7449 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_PFVF_CMD) | FW_CMD_REQUEST_F |
7450 FW_CMD_WRITE_F | FW_PFVF_CMD_PFN_V(pf) |
7451 FW_PFVF_CMD_VFN_V(vf));
7452 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7453 c.niqflint_niq = cpu_to_be32(FW_PFVF_CMD_NIQFLINT_V(rxqi) |
7454 FW_PFVF_CMD_NIQ_V(rxq));
7455 c.type_to_neq = cpu_to_be32(FW_PFVF_CMD_CMASK_V(cmask) |
7456 FW_PFVF_CMD_PMASK_V(pmask) |
7457 FW_PFVF_CMD_NEQ_V(txq));
7458 c.tc_to_nexactf = cpu_to_be32(FW_PFVF_CMD_TC_V(tc) |
7459 FW_PFVF_CMD_NVI_V(vi) |
7460 FW_PFVF_CMD_NEXACTF_V(nexact));
7461 c.r_caps_to_nethctrl = cpu_to_be32(FW_PFVF_CMD_R_CAPS_V(rcaps) |
7462 FW_PFVF_CMD_WX_CAPS_V(wxcaps) |
7463 FW_PFVF_CMD_NETHCTRL_V(txq_eth_ctrl));
7464 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
7468 * t4_alloc_vi - allocate a virtual interface
7469 * @adap: the adapter
7470 * @mbox: mailbox to use for the FW command
7471 * @port: physical port associated with the VI
7472 * @pf: the PF owning the VI
7473 * @vf: the VF owning the VI
7474 * @nmac: number of MAC addresses needed (1 to 5)
7475 * @mac: the MAC addresses of the VI
7476 * @rss_size: size of RSS table slice associated with this VI
7478 * Allocates a virtual interface for the given physical port. If @mac is
7479 * not %NULL it contains the MAC addresses of the VI as assigned by FW.
7480 * @mac should be large enough to hold @nmac Ethernet addresses, they are
7481 * stored consecutively so the space needed is @nmac * 6 bytes.
7482 * Returns a negative error number or the non-negative VI id.
7484 int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
7485 unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
7486 unsigned int *rss_size)
7491 memset(&c, 0, sizeof(c));
7492 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) | FW_CMD_REQUEST_F |
7493 FW_CMD_WRITE_F | FW_CMD_EXEC_F |
7494 FW_VI_CMD_PFN_V(pf) | FW_VI_CMD_VFN_V(vf));
7495 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_ALLOC_F | FW_LEN16(c));
7496 c.portid_pkd = FW_VI_CMD_PORTID_V(port);
7499 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7504 memcpy(mac, c.mac, sizeof(c.mac));
7507 memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
7510 memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
7513 memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
7516 memcpy(mac + 6, c.nmac0, sizeof(c.nmac0));
7520 *rss_size = FW_VI_CMD_RSSSIZE_G(be16_to_cpu(c.rsssize_pkd));
7521 return FW_VI_CMD_VIID_G(be16_to_cpu(c.type_viid));
7525 * t4_free_vi - free a virtual interface
7526 * @adap: the adapter
7527 * @mbox: mailbox to use for the FW command
7528 * @pf: the PF owning the VI
7529 * @vf: the VF owning the VI
7530 * @viid: virtual interface identifiler
7532 * Free a previously allocated virtual interface.
7534 int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
7535 unsigned int vf, unsigned int viid)
7539 memset(&c, 0, sizeof(c));
7540 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_VI_CMD) |
7543 FW_VI_CMD_PFN_V(pf) |
7544 FW_VI_CMD_VFN_V(vf));
7545 c.alloc_to_len16 = cpu_to_be32(FW_VI_CMD_FREE_F | FW_LEN16(c));
7546 c.type_viid = cpu_to_be16(FW_VI_CMD_VIID_V(viid));
7548 return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
7552 * t4_set_rxmode - set Rx properties of a virtual interface
7553 * @adap: the adapter
7554 * @mbox: mailbox to use for the FW command
7556 * @mtu: the new MTU or -1
7557 * @promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
7558 * @all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
7559 * @bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
7560 * @vlanex: 1 to enable HW VLAN extraction, 0 to disable it, -1 no change
7561 * @sleep_ok: if true we may sleep while awaiting command completion
7563 * Sets Rx properties of a virtual interface.
7565 int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
7566 int mtu, int promisc, int all_multi, int bcast, int vlanex,
7569 struct fw_vi_rxmode_cmd c;
7571 /* convert to FW values */
7573 mtu = FW_RXMODE_MTU_NO_CHG;
7575 promisc = FW_VI_RXMODE_CMD_PROMISCEN_M;
7577 all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_M;
7579 bcast = FW_VI_RXMODE_CMD_BROADCASTEN_M;
7581 vlanex = FW_VI_RXMODE_CMD_VLANEXEN_M;
7583 memset(&c, 0, sizeof(c));
7584 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_RXMODE_CMD) |
7585 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7586 FW_VI_RXMODE_CMD_VIID_V(viid));
7587 c.retval_len16 = cpu_to_be32(FW_LEN16(c));
7589 cpu_to_be32(FW_VI_RXMODE_CMD_MTU_V(mtu) |
7590 FW_VI_RXMODE_CMD_PROMISCEN_V(promisc) |
7591 FW_VI_RXMODE_CMD_ALLMULTIEN_V(all_multi) |
7592 FW_VI_RXMODE_CMD_BROADCASTEN_V(bcast) |
7593 FW_VI_RXMODE_CMD_VLANEXEN_V(vlanex));
7594 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
7598 * t4_free_encap_mac_filt - frees MPS entry at given index
7599 * @adap: the adapter
7601 * @idx: index of MPS entry to be freed
7602 * @sleep_ok: call is allowed to sleep
7604 * Frees the MPS entry at supplied index
7606 * Returns a negative error number or zero on success
7608 int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
7609 int idx, bool sleep_ok)
7611 struct fw_vi_mac_exact *p;
7612 u8 addr[] = {0, 0, 0, 0, 0, 0};
7613 struct fw_vi_mac_cmd c;
7617 memset(&c, 0, sizeof(c));
7618 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7619 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7621 FW_VI_MAC_CMD_VIID_V(viid));
7622 exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC);
7623 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7627 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7628 FW_VI_MAC_CMD_IDX_V(idx));
7629 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7630 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7635 * t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
7636 * @adap: the adapter
7638 * @addr: the MAC address
7640 * @idx: index of the entry in mps tcam
7641 * @lookup_type: MAC address for inner (1) or outer (0) header
7642 * @port_id: the port index
7643 * @sleep_ok: call is allowed to sleep
7645 * Removes the mac entry at the specified index using raw mac interface.
7647 * Returns a negative error number on failure.
7649 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
7650 const u8 *addr, const u8 *mask, unsigned int idx,
7651 u8 lookup_type, u8 port_id, bool sleep_ok)
7653 struct fw_vi_mac_cmd c;
7654 struct fw_vi_mac_raw *p = &c.u.raw;
7657 memset(&c, 0, sizeof(c));
7658 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7659 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7661 FW_VI_MAC_CMD_VIID_V(viid));
7662 val = FW_CMD_LEN16_V(1) |
7663 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7664 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7665 FW_CMD_LEN16_V(val));
7667 p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx) |
7668 FW_VI_MAC_ID_BASED_FREE);
7670 /* Lookup Type. Outer header: 0, Inner header: 1 */
7671 p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7672 DATAPORTNUM_V(port_id));
7673 /* Lookup mask and port mask */
7674 p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7675 DATAPORTNUM_V(DATAPORTNUM_M));
7677 /* Copy the address and the mask */
7678 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7679 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7681 return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7685 * t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
7686 * @adap: the adapter
7688 * @mac: the MAC address
7690 * @vni: the VNI id for the tunnel protocol
7691 * @vni_mask: mask for the VNI id
7692 * @dip_hit: to enable DIP match for the MPS entry
7693 * @lookup_type: MAC address for inner (1) or outer (0) header
7694 * @sleep_ok: call is allowed to sleep
7696 * Allocates an MPS entry with specified MAC address and VNI value.
7698 * Returns a negative error number or the allocated index for this mac.
7700 int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
7701 const u8 *addr, const u8 *mask, unsigned int vni,
7702 unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
7705 struct fw_vi_mac_cmd c;
7706 struct fw_vi_mac_vni *p = c.u.exact_vni;
7710 memset(&c, 0, sizeof(c));
7711 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7712 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7713 FW_VI_MAC_CMD_VIID_V(viid));
7714 val = FW_CMD_LEN16_V(1) |
7715 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI);
7716 c.freemacs_to_len16 = cpu_to_be32(val);
7717 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7718 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
7719 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7720 memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
7722 p->lookup_type_to_vni =
7723 cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) |
7724 FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) |
7725 FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type));
7726 p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask));
7727 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7729 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
7734 * t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
7735 * @adap: the adapter
7737 * @mac: the MAC address
7739 * @idx: index at which to add this entry
7740 * @port_id: the port index
7741 * @lookup_type: MAC address for inner (1) or outer (0) header
7742 * @sleep_ok: call is allowed to sleep
7744 * Adds the mac entry at the specified index using raw mac interface.
7746 * Returns a negative error number or the allocated index for this mac.
7748 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
7749 const u8 *addr, const u8 *mask, unsigned int idx,
7750 u8 lookup_type, u8 port_id, bool sleep_ok)
7753 struct fw_vi_mac_cmd c;
7754 struct fw_vi_mac_raw *p = &c.u.raw;
7757 memset(&c, 0, sizeof(c));
7758 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7759 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7760 FW_VI_MAC_CMD_VIID_V(viid));
7761 val = FW_CMD_LEN16_V(1) |
7762 FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_RAW);
7763 c.freemacs_to_len16 = cpu_to_be32(val);
7765 /* Specify that this is an inner mac address */
7766 p->raw_idx_pkd = cpu_to_be32(FW_VI_MAC_CMD_RAW_IDX_V(idx));
7768 /* Lookup Type. Outer header: 0, Inner header: 1 */
7769 p->data0_pkd = cpu_to_be32(DATALKPTYPE_V(lookup_type) |
7770 DATAPORTNUM_V(port_id));
7771 /* Lookup mask and port mask */
7772 p->data0m_pkd = cpu_to_be64(DATALKPTYPE_V(DATALKPTYPE_M) |
7773 DATAPORTNUM_V(DATAPORTNUM_M));
7775 /* Copy the address and the mask */
7776 memcpy((u8 *)&p->data1[0] + 2, addr, ETH_ALEN);
7777 memcpy((u8 *)&p->data1m[0] + 2, mask, ETH_ALEN);
7779 ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
7781 ret = FW_VI_MAC_CMD_RAW_IDX_G(be32_to_cpu(p->raw_idx_pkd));
7790 * t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
7791 * @adap: the adapter
7792 * @mbox: mailbox to use for the FW command
7794 * @free: if true any existing filters for this VI id are first removed
7795 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7796 * @addr: the MAC address(es)
7797 * @idx: where to store the index of each allocated filter
7798 * @hash: pointer to hash address filter bitmap
7799 * @sleep_ok: call is allowed to sleep
7801 * Allocates an exact-match filter for each of the supplied addresses and
7802 * sets it to the corresponding address. If @idx is not %NULL it should
7803 * have at least @naddr entries, each of which will be set to the index of
7804 * the filter allocated for the corresponding MAC address. If a filter
7805 * could not be allocated for an address its index is set to 0xffff.
7806 * If @hash is not %NULL addresses that fail to allocate an exact filter
7807 * are hashed and update the hash filter bitmap pointed at by @hash.
7809 * Returns a negative error number or the number of filters allocated.
7811 int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
7812 unsigned int viid, bool free, unsigned int naddr,
7813 const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
7815 int offset, ret = 0;
7816 struct fw_vi_mac_cmd c;
7817 unsigned int nfilters = 0;
7818 unsigned int max_naddr = adap->params.arch.mps_tcam_size;
7819 unsigned int rem = naddr;
7821 if (naddr > max_naddr)
7824 for (offset = 0; offset < naddr ; /**/) {
7825 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact) ?
7826 rem : ARRAY_SIZE(c.u.exact));
7827 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7828 u.exact[fw_naddr]), 16);
7829 struct fw_vi_mac_exact *p;
7832 memset(&c, 0, sizeof(c));
7833 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7836 FW_CMD_EXEC_V(free) |
7837 FW_VI_MAC_CMD_VIID_V(viid));
7838 c.freemacs_to_len16 =
7839 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(free) |
7840 FW_CMD_LEN16_V(len16));
7842 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7844 cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7845 FW_VI_MAC_CMD_IDX_V(
7846 FW_VI_MAC_ADD_MAC));
7847 memcpy(p->macaddr, addr[offset + i],
7848 sizeof(p->macaddr));
7851 /* It's okay if we run out of space in our MAC address arena.
7852 * Some of the addresses we submit may get stored so we need
7853 * to run through the reply to see what the results were ...
7855 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7856 if (ret && ret != -FW_ENOMEM)
7859 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7860 u16 index = FW_VI_MAC_CMD_IDX_G(
7861 be16_to_cpu(p->valid_to_idx));
7864 idx[offset + i] = (index >= max_naddr ?
7866 if (index < max_naddr)
7870 hash_mac_addr(addr[offset + i]));
7878 if (ret == 0 || ret == -FW_ENOMEM)
7884 * t4_free_mac_filt - frees exact-match filters of given MAC addresses
7885 * @adap: the adapter
7886 * @mbox: mailbox to use for the FW command
7888 * @naddr: the number of MAC addresses to allocate filters for (up to 7)
7889 * @addr: the MAC address(es)
7890 * @sleep_ok: call is allowed to sleep
7892 * Frees the exact-match filter for each of the supplied addresses
7894 * Returns a negative error number or the number of filters freed.
7896 int t4_free_mac_filt(struct adapter *adap, unsigned int mbox,
7897 unsigned int viid, unsigned int naddr,
7898 const u8 **addr, bool sleep_ok)
7900 int offset, ret = 0;
7901 struct fw_vi_mac_cmd c;
7902 unsigned int nfilters = 0;
7903 unsigned int max_naddr = is_t4(adap->params.chip) ?
7904 NUM_MPS_CLS_SRAM_L_INSTANCES :
7905 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
7906 unsigned int rem = naddr;
7908 if (naddr > max_naddr)
7911 for (offset = 0; offset < (int)naddr ; /**/) {
7912 unsigned int fw_naddr = (rem < ARRAY_SIZE(c.u.exact)
7914 : ARRAY_SIZE(c.u.exact));
7915 size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
7916 u.exact[fw_naddr]), 16);
7917 struct fw_vi_mac_exact *p;
7920 memset(&c, 0, sizeof(c));
7921 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7925 FW_VI_MAC_CMD_VIID_V(viid));
7926 c.freemacs_to_len16 =
7927 cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
7928 FW_CMD_LEN16_V(len16));
7930 for (i = 0, p = c.u.exact; i < (int)fw_naddr; i++, p++) {
7931 p->valid_to_idx = cpu_to_be16(
7932 FW_VI_MAC_CMD_VALID_F |
7933 FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_MAC_BASED_FREE));
7934 memcpy(p->macaddr, addr[offset+i], sizeof(p->macaddr));
7937 ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
7941 for (i = 0, p = c.u.exact; i < fw_naddr; i++, p++) {
7942 u16 index = FW_VI_MAC_CMD_IDX_G(
7943 be16_to_cpu(p->valid_to_idx));
7945 if (index < max_naddr)
7959 * t4_change_mac - modifies the exact-match filter for a MAC address
7960 * @adap: the adapter
7961 * @mbox: mailbox to use for the FW command
7963 * @idx: index of existing filter for old value of MAC address, or -1
7964 * @addr: the new MAC address value
7965 * @persist: whether a new MAC allocation should be persistent
7966 * @add_smt: if true also add the address to the HW SMT
7968 * Modifies an exact-match filter and sets it to the new MAC address.
7969 * Note that in general it is not possible to modify the value of a given
7970 * filter so the generic way to modify an address filter is to free the one
7971 * being used by the old address value and allocate a new filter for the
7972 * new address value. @idx can be -1 if the address is a new addition.
7974 * Returns a negative error number or the index of the filter with the new
7977 int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
7978 int idx, const u8 *addr, bool persist, bool add_smt)
7981 struct fw_vi_mac_cmd c;
7982 struct fw_vi_mac_exact *p = c.u.exact;
7983 unsigned int max_mac_addr = adap->params.arch.mps_tcam_size;
7985 if (idx < 0) /* new allocation */
7986 idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
7987 mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
7989 memset(&c, 0, sizeof(c));
7990 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
7991 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
7992 FW_VI_MAC_CMD_VIID_V(viid));
7993 c.freemacs_to_len16 = cpu_to_be32(FW_CMD_LEN16_V(1));
7994 p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
7995 FW_VI_MAC_CMD_SMAC_RESULT_V(mode) |
7996 FW_VI_MAC_CMD_IDX_V(idx));
7997 memcpy(p->macaddr, addr, sizeof(p->macaddr));
7999 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
8001 ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
8002 if (ret >= max_mac_addr)
8009 * t4_set_addr_hash - program the MAC inexact-match hash filter
8010 * @adap: the adapter
8011 * @mbox: mailbox to use for the FW command
8013 * @ucast: whether the hash filter should also match unicast addresses
8014 * @vec: the value to be written to the hash filter
8015 * @sleep_ok: call is allowed to sleep
8017 * Sets the 64-bit inexact-match hash filter for a virtual interface.
8019 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
8020 bool ucast, u64 vec, bool sleep_ok)
8022 struct fw_vi_mac_cmd c;
8024 memset(&c, 0, sizeof(c));
8025 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
8026 FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
8027 FW_VI_ENABLE_CMD_VIID_V(viid));
8028 c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_HASHVECEN_F |
8029 FW_VI_MAC_CMD_HASHUNIEN_V(ucast) |
8031 c.u.hash.hashvec = cpu_to_be64(vec);
8032 return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
8036 * t4_enable_vi_params - enable/disable a virtual interface
8037 * @adap: the adapter
8038 * @mbox: mailbox to use for the FW command
8040 * @rx_en: 1=enable Rx, 0=disable Rx
8041 * @tx_en: 1=enable Tx, 0=disable Tx
8042 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8044 * Enables/disables a virtual interface. Note that setting DCB Enable
8045 * only makes sense when enabling a Virtual Interface ...
8047 int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
8048 unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
8050 struct fw_vi_enable_cmd c;
8052 memset(&c, 0, sizeof(c));
8053 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
8054 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8055 FW_VI_ENABLE_CMD_VIID_V(viid));
8056 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_IEN_V(rx_en) |
8057 FW_VI_ENABLE_CMD_EEN_V(tx_en) |
8058 FW_VI_ENABLE_CMD_DCB_INFO_V(dcb_en) |
8060 return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
8064 * t4_enable_vi - enable/disable a virtual interface
8065 * @adap: the adapter
8066 * @mbox: mailbox to use for the FW command
8068 * @rx_en: 1=enable Rx, 0=disable Rx
8069 * @tx_en: 1=enable Tx, 0=disable Tx
8071 * Enables/disables a virtual interface.
8073 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
8074 bool rx_en, bool tx_en)
8076 return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
8080 * t4_enable_pi_params - enable/disable a Port's Virtual Interface
8081 * @adap: the adapter
8082 * @mbox: mailbox to use for the FW command
8083 * @pi: the Port Information structure
8084 * @rx_en: 1=enable Rx, 0=disable Rx
8085 * @tx_en: 1=enable Tx, 0=disable Tx
8086 * @dcb_en: 1=enable delivery of Data Center Bridging messages.
8088 * Enables/disables a Port's Virtual Interface. Note that setting DCB
8089 * Enable only makes sense when enabling a Virtual Interface ...
8090 * If the Virtual Interface enable/disable operation is successful,
8091 * we notify the OS-specific code of a potential Link Status change
8092 * via the OS Contract API t4_os_link_changed().
8094 int t4_enable_pi_params(struct adapter *adap, unsigned int mbox,
8095 struct port_info *pi,
8096 bool rx_en, bool tx_en, bool dcb_en)
8098 int ret = t4_enable_vi_params(adap, mbox, pi->viid,
8099 rx_en, tx_en, dcb_en);
8102 t4_os_link_changed(adap, pi->port_id,
8103 rx_en && tx_en && pi->link_cfg.link_ok);
8108 * t4_identify_port - identify a VI's port by blinking its LED
8109 * @adap: the adapter
8110 * @mbox: mailbox to use for the FW command
8112 * @nblinks: how many times to blink LED at 2.5 Hz
8114 * Identifies a VI's port by blinking its LED.
8116 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
8117 unsigned int nblinks)
8119 struct fw_vi_enable_cmd c;
8121 memset(&c, 0, sizeof(c));
8122 c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_ENABLE_CMD) |
8123 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8124 FW_VI_ENABLE_CMD_VIID_V(viid));
8125 c.ien_to_len16 = cpu_to_be32(FW_VI_ENABLE_CMD_LED_F | FW_LEN16(c));
8126 c.blinkdur = cpu_to_be16(nblinks);
8127 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8131 * t4_iq_stop - stop an ingress queue and its FLs
8132 * @adap: the adapter
8133 * @mbox: mailbox to use for the FW command
8134 * @pf: the PF owning the queues
8135 * @vf: the VF owning the queues
8136 * @iqtype: the ingress queue type (FW_IQ_TYPE_FL_INT_CAP, etc.)
8137 * @iqid: ingress queue id
8138 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8139 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8141 * Stops an ingress queue and its associated FLs, if any. This causes
8142 * any current or future data/messages destined for these queues to be
8145 int t4_iq_stop(struct adapter *adap, unsigned int mbox, unsigned int pf,
8146 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8147 unsigned int fl0id, unsigned int fl1id)
8151 memset(&c, 0, sizeof(c));
8152 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
8153 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
8154 FW_IQ_CMD_VFN_V(vf));
8155 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_IQSTOP_F | FW_LEN16(c));
8156 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
8157 c.iqid = cpu_to_be16(iqid);
8158 c.fl0id = cpu_to_be16(fl0id);
8159 c.fl1id = cpu_to_be16(fl1id);
8160 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8164 * t4_iq_free - free an ingress queue and its FLs
8165 * @adap: the adapter
8166 * @mbox: mailbox to use for the FW command
8167 * @pf: the PF owning the queues
8168 * @vf: the VF owning the queues
8169 * @iqtype: the ingress queue type
8170 * @iqid: ingress queue id
8171 * @fl0id: FL0 queue id or 0xffff if no attached FL0
8172 * @fl1id: FL1 queue id or 0xffff if no attached FL1
8174 * Frees an ingress queue and its associated FLs, if any.
8176 int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8177 unsigned int vf, unsigned int iqtype, unsigned int iqid,
8178 unsigned int fl0id, unsigned int fl1id)
8182 memset(&c, 0, sizeof(c));
8183 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_IQ_CMD) | FW_CMD_REQUEST_F |
8184 FW_CMD_EXEC_F | FW_IQ_CMD_PFN_V(pf) |
8185 FW_IQ_CMD_VFN_V(vf));
8186 c.alloc_to_len16 = cpu_to_be32(FW_IQ_CMD_FREE_F | FW_LEN16(c));
8187 c.type_to_iqandstindex = cpu_to_be32(FW_IQ_CMD_TYPE_V(iqtype));
8188 c.iqid = cpu_to_be16(iqid);
8189 c.fl0id = cpu_to_be16(fl0id);
8190 c.fl1id = cpu_to_be16(fl1id);
8191 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8195 * t4_eth_eq_free - free an Ethernet egress queue
8196 * @adap: the adapter
8197 * @mbox: mailbox to use for the FW command
8198 * @pf: the PF owning the queue
8199 * @vf: the VF owning the queue
8200 * @eqid: egress queue id
8202 * Frees an Ethernet egress queue.
8204 int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8205 unsigned int vf, unsigned int eqid)
8207 struct fw_eq_eth_cmd c;
8209 memset(&c, 0, sizeof(c));
8210 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_ETH_CMD) |
8211 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8212 FW_EQ_ETH_CMD_PFN_V(pf) |
8213 FW_EQ_ETH_CMD_VFN_V(vf));
8214 c.alloc_to_len16 = cpu_to_be32(FW_EQ_ETH_CMD_FREE_F | FW_LEN16(c));
8215 c.eqid_pkd = cpu_to_be32(FW_EQ_ETH_CMD_EQID_V(eqid));
8216 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8220 * t4_ctrl_eq_free - free a control egress queue
8221 * @adap: the adapter
8222 * @mbox: mailbox to use for the FW command
8223 * @pf: the PF owning the queue
8224 * @vf: the VF owning the queue
8225 * @eqid: egress queue id
8227 * Frees a control egress queue.
8229 int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8230 unsigned int vf, unsigned int eqid)
8232 struct fw_eq_ctrl_cmd c;
8234 memset(&c, 0, sizeof(c));
8235 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_CTRL_CMD) |
8236 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8237 FW_EQ_CTRL_CMD_PFN_V(pf) |
8238 FW_EQ_CTRL_CMD_VFN_V(vf));
8239 c.alloc_to_len16 = cpu_to_be32(FW_EQ_CTRL_CMD_FREE_F | FW_LEN16(c));
8240 c.cmpliqid_eqid = cpu_to_be32(FW_EQ_CTRL_CMD_EQID_V(eqid));
8241 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8245 * t4_ofld_eq_free - free an offload egress queue
8246 * @adap: the adapter
8247 * @mbox: mailbox to use for the FW command
8248 * @pf: the PF owning the queue
8249 * @vf: the VF owning the queue
8250 * @eqid: egress queue id
8252 * Frees a control egress queue.
8254 int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
8255 unsigned int vf, unsigned int eqid)
8257 struct fw_eq_ofld_cmd c;
8259 memset(&c, 0, sizeof(c));
8260 c.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_EQ_OFLD_CMD) |
8261 FW_CMD_REQUEST_F | FW_CMD_EXEC_F |
8262 FW_EQ_OFLD_CMD_PFN_V(pf) |
8263 FW_EQ_OFLD_CMD_VFN_V(vf));
8264 c.alloc_to_len16 = cpu_to_be32(FW_EQ_OFLD_CMD_FREE_F | FW_LEN16(c));
8265 c.eqid_pkd = cpu_to_be32(FW_EQ_OFLD_CMD_EQID_V(eqid));
8266 return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
8270 * t4_link_down_rc_str - return a string for a Link Down Reason Code
8271 * @adap: the adapter
8272 * @link_down_rc: Link Down Reason Code
8274 * Returns a string representation of the Link Down Reason Code.
8276 static const char *t4_link_down_rc_str(unsigned char link_down_rc)
8278 static const char * const reason[] = {
8281 "Auto-negotiation Failure",
8283 "Insufficient Airflow",
8284 "Unable To Determine Reason",
8285 "No RX Signal Detected",
8289 if (link_down_rc >= ARRAY_SIZE(reason))
8290 return "Bad Reason Code";
8292 return reason[link_down_rc];
8296 * Return the highest speed set in the port capabilities, in Mb/s.
8298 static unsigned int fwcap_to_speed(fw_port_cap32_t caps)
8300 #define TEST_SPEED_RETURN(__caps_speed, __speed) \
8302 if (caps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8306 TEST_SPEED_RETURN(400G, 400000);
8307 TEST_SPEED_RETURN(200G, 200000);
8308 TEST_SPEED_RETURN(100G, 100000);
8309 TEST_SPEED_RETURN(50G, 50000);
8310 TEST_SPEED_RETURN(40G, 40000);
8311 TEST_SPEED_RETURN(25G, 25000);
8312 TEST_SPEED_RETURN(10G, 10000);
8313 TEST_SPEED_RETURN(1G, 1000);
8314 TEST_SPEED_RETURN(100M, 100);
8316 #undef TEST_SPEED_RETURN
8322 * fwcap_to_fwspeed - return highest speed in Port Capabilities
8323 * @acaps: advertised Port Capabilities
8325 * Get the highest speed for the port from the advertised Port
8326 * Capabilities. It will be either the highest speed from the list of
8327 * speeds or whatever user has set using ethtool.
8329 static fw_port_cap32_t fwcap_to_fwspeed(fw_port_cap32_t acaps)
8331 #define TEST_SPEED_RETURN(__caps_speed) \
8333 if (acaps & FW_PORT_CAP32_SPEED_##__caps_speed) \
8334 return FW_PORT_CAP32_SPEED_##__caps_speed; \
8337 TEST_SPEED_RETURN(400G);
8338 TEST_SPEED_RETURN(200G);
8339 TEST_SPEED_RETURN(100G);
8340 TEST_SPEED_RETURN(50G);
8341 TEST_SPEED_RETURN(40G);
8342 TEST_SPEED_RETURN(25G);
8343 TEST_SPEED_RETURN(10G);
8344 TEST_SPEED_RETURN(1G);
8345 TEST_SPEED_RETURN(100M);
8347 #undef TEST_SPEED_RETURN
8353 * lstatus_to_fwcap - translate old lstatus to 32-bit Port Capabilities
8354 * @lstatus: old FW_PORT_ACTION_GET_PORT_INFO lstatus value
8356 * Translates old FW_PORT_ACTION_GET_PORT_INFO lstatus field into new
8357 * 32-bit Port Capabilities value.
8359 static fw_port_cap32_t lstatus_to_fwcap(u32 lstatus)
8361 fw_port_cap32_t linkattr = 0;
8363 /* Unfortunately the format of the Link Status in the old
8364 * 16-bit Port Information message isn't the same as the
8365 * 16-bit Port Capabilities bitfield used everywhere else ...
8367 if (lstatus & FW_PORT_CMD_RXPAUSE_F)
8368 linkattr |= FW_PORT_CAP32_FC_RX;
8369 if (lstatus & FW_PORT_CMD_TXPAUSE_F)
8370 linkattr |= FW_PORT_CAP32_FC_TX;
8371 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
8372 linkattr |= FW_PORT_CAP32_SPEED_100M;
8373 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
8374 linkattr |= FW_PORT_CAP32_SPEED_1G;
8375 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
8376 linkattr |= FW_PORT_CAP32_SPEED_10G;
8377 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_25G))
8378 linkattr |= FW_PORT_CAP32_SPEED_25G;
8379 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
8380 linkattr |= FW_PORT_CAP32_SPEED_40G;
8381 if (lstatus & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100G))
8382 linkattr |= FW_PORT_CAP32_SPEED_100G;
8388 * t4_handle_get_port_info - process a FW reply message
8389 * @pi: the port info
8390 * @rpl: start of the FW message
8392 * Processes a GET_PORT_INFO FW reply message.
8394 void t4_handle_get_port_info(struct port_info *pi, const __be64 *rpl)
8396 const struct fw_port_cmd *cmd = (const void *)rpl;
8397 int action = FW_PORT_CMD_ACTION_G(be32_to_cpu(cmd->action_to_len16));
8398 struct adapter *adapter = pi->adapter;
8399 struct link_config *lc = &pi->link_cfg;
8400 int link_ok, linkdnrc;
8401 enum fw_port_type port_type;
8402 enum fw_port_module_type mod_type;
8403 unsigned int speed, fc, fec;
8404 fw_port_cap32_t pcaps, acaps, lpacaps, linkattr;
8406 /* Extract the various fields from the Port Information message.
8409 case FW_PORT_ACTION_GET_PORT_INFO: {
8410 u32 lstatus = be32_to_cpu(cmd->u.info.lstatus_to_modtype);
8412 link_ok = (lstatus & FW_PORT_CMD_LSTATUS_F) != 0;
8413 linkdnrc = FW_PORT_CMD_LINKDNRC_G(lstatus);
8414 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
8415 mod_type = FW_PORT_CMD_MODTYPE_G(lstatus);
8416 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.pcap));
8417 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.acap));
8418 lpacaps = fwcaps16_to_caps32(be16_to_cpu(cmd->u.info.lpacap));
8419 linkattr = lstatus_to_fwcap(lstatus);
8423 case FW_PORT_ACTION_GET_PORT_INFO32: {
8426 lstatus32 = be32_to_cpu(cmd->u.info32.lstatus32_to_cbllen32);
8427 link_ok = (lstatus32 & FW_PORT_CMD_LSTATUS32_F) != 0;
8428 linkdnrc = FW_PORT_CMD_LINKDNRC32_G(lstatus32);
8429 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
8430 mod_type = FW_PORT_CMD_MODTYPE32_G(lstatus32);
8431 pcaps = be32_to_cpu(cmd->u.info32.pcaps32);
8432 acaps = be32_to_cpu(cmd->u.info32.acaps32);
8433 lpacaps = be32_to_cpu(cmd->u.info32.lpacaps32);
8434 linkattr = be32_to_cpu(cmd->u.info32.linkattr32);
8439 dev_err(adapter->pdev_dev, "Handle Port Information: Bad Command/Action %#x\n",
8440 be32_to_cpu(cmd->action_to_len16));
8444 fec = fwcap_to_cc_fec(acaps);
8445 fc = fwcap_to_cc_pause(linkattr);
8446 speed = fwcap_to_speed(linkattr);
8448 lc->new_module = false;
8449 lc->redo_l1cfg = false;
8451 if (mod_type != pi->mod_type) {
8452 /* With the newer SFP28 and QSFP28 Transceiver Module Types,
8453 * various fundamental Port Capabilities which used to be
8454 * immutable can now change radically. We can now have
8455 * Speeds, Auto-Negotiation, Forward Error Correction, etc.
8456 * all change based on what Transceiver Module is inserted.
8457 * So we need to record the Physical "Port" Capabilities on
8458 * every Transceiver Module change.
8462 /* When a new Transceiver Module is inserted, the Firmware
8463 * will examine its i2c EPROM to determine its type and
8464 * general operating parameters including things like Forward
8465 * Error Control, etc. Various IEEE 802.3 standards dictate
8466 * how to interpret these i2c values to determine default
8467 * "sutomatic" settings. We record these for future use when
8468 * the user explicitly requests these standards-based values.
8470 lc->def_acaps = acaps;
8472 /* Some versions of the early T6 Firmware "cheated" when
8473 * handling different Transceiver Modules by changing the
8474 * underlaying Port Type reported to the Host Drivers. As
8475 * such we need to capture whatever Port Type the Firmware
8476 * sends us and record it in case it's different from what we
8477 * were told earlier. Unfortunately, since Firmware is
8478 * forever, we'll need to keep this code here forever, but in
8479 * later T6 Firmware it should just be an assignment of the
8480 * same value already recorded.
8482 pi->port_type = port_type;
8484 pi->mod_type = mod_type;
8486 lc->new_module = t4_is_inserted_mod_type(mod_type);
8487 t4_os_portmod_changed(adapter, pi->port_id);
8490 if (link_ok != lc->link_ok || speed != lc->speed ||
8491 fc != lc->fc || fec != lc->fec) { /* something changed */
8492 if (!link_ok && lc->link_ok) {
8493 lc->link_down_rc = linkdnrc;
8494 dev_warn(adapter->pdev_dev, "Port %d link down, reason: %s\n",
8495 pi->tx_chan, t4_link_down_rc_str(linkdnrc));
8497 lc->link_ok = link_ok;
8502 lc->lpacaps = lpacaps;
8503 lc->acaps = acaps & ADVERT_MASK;
8505 if (!(lc->acaps & FW_PORT_CAP32_ANEG)) {
8506 lc->autoneg = AUTONEG_DISABLE;
8507 } else if (lc->acaps & FW_PORT_CAP32_ANEG) {
8508 lc->autoneg = AUTONEG_ENABLE;
8510 /* When Autoneg is disabled, user needs to set
8512 * Similar to cxgb4_ethtool.c: set_link_ksettings
8515 lc->speed_caps = fwcap_to_fwspeed(acaps);
8516 lc->autoneg = AUTONEG_DISABLE;
8519 t4_os_link_changed(adapter, pi->port_id, link_ok);
8522 if (lc->new_module && lc->redo_l1cfg) {
8523 struct link_config old_lc;
8526 /* Save the current L1 Configuration and restore it if an
8527 * error occurs. We probably should fix the l1_cfg*()
8528 * routines not to change the link_config when an error
8532 ret = t4_link_l1cfg_ns(adapter, adapter->mbox, pi->lport, lc);
8535 dev_warn(adapter->pdev_dev,
8536 "Attempt to update new Transceiver Module settings failed\n");
8539 lc->new_module = false;
8540 lc->redo_l1cfg = false;
8544 * t4_update_port_info - retrieve and update port information if changed
8545 * @pi: the port_info
8547 * We issue a Get Port Information Command to the Firmware and, if
8548 * successful, we check to see if anything is different from what we
8549 * last recorded and update things accordingly.
8551 int t4_update_port_info(struct port_info *pi)
8553 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8554 struct fw_port_cmd port_cmd;
8557 memset(&port_cmd, 0, sizeof(port_cmd));
8558 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8559 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8560 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8561 port_cmd.action_to_len16 = cpu_to_be32(
8562 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
8563 ? FW_PORT_ACTION_GET_PORT_INFO
8564 : FW_PORT_ACTION_GET_PORT_INFO32) |
8565 FW_LEN16(port_cmd));
8566 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8567 &port_cmd, sizeof(port_cmd), &port_cmd);
8571 t4_handle_get_port_info(pi, (__be64 *)&port_cmd);
8576 * t4_get_link_params - retrieve basic link parameters for given port
8578 * @link_okp: value return pointer for link up/down
8579 * @speedp: value return pointer for speed (Mb/s)
8580 * @mtup: value return pointer for mtu
8582 * Retrieves basic link parameters for a port: link up/down, speed (Mb/s),
8583 * and MTU for a specified port. A negative error is returned on
8584 * failure; 0 on success.
8586 int t4_get_link_params(struct port_info *pi, unsigned int *link_okp,
8587 unsigned int *speedp, unsigned int *mtup)
8589 unsigned int fw_caps = pi->adapter->params.fw_caps_support;
8590 struct fw_port_cmd port_cmd;
8591 unsigned int action, link_ok, speed, mtu;
8592 fw_port_cap32_t linkattr;
8595 memset(&port_cmd, 0, sizeof(port_cmd));
8596 port_cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
8597 FW_CMD_REQUEST_F | FW_CMD_READ_F |
8598 FW_PORT_CMD_PORTID_V(pi->tx_chan));
8599 action = (fw_caps == FW_CAPS16
8600 ? FW_PORT_ACTION_GET_PORT_INFO
8601 : FW_PORT_ACTION_GET_PORT_INFO32);
8602 port_cmd.action_to_len16 = cpu_to_be32(
8603 FW_PORT_CMD_ACTION_V(action) |
8604 FW_LEN16(port_cmd));
8605 ret = t4_wr_mbox(pi->adapter, pi->adapter->mbox,
8606 &port_cmd, sizeof(port_cmd), &port_cmd);
8610 if (action == FW_PORT_ACTION_GET_PORT_INFO) {
8611 u32 lstatus = be32_to_cpu(port_cmd.u.info.lstatus_to_modtype);
8613 link_ok = !!(lstatus & FW_PORT_CMD_LSTATUS_F);
8614 linkattr = lstatus_to_fwcap(lstatus);
8615 mtu = be16_to_cpu(port_cmd.u.info.mtu);
8618 be32_to_cpu(port_cmd.u.info32.lstatus32_to_cbllen32);
8620 link_ok = !!(lstatus32 & FW_PORT_CMD_LSTATUS32_F);
8621 linkattr = be32_to_cpu(port_cmd.u.info32.linkattr32);
8622 mtu = FW_PORT_CMD_MTU32_G(
8623 be32_to_cpu(port_cmd.u.info32.auxlinfo32_mtu32));
8625 speed = fwcap_to_speed(linkattr);
8627 *link_okp = link_ok;
8628 *speedp = fwcap_to_speed(linkattr);
8635 * t4_handle_fw_rpl - process a FW reply message
8636 * @adap: the adapter
8637 * @rpl: start of the FW message
8639 * Processes a FW message, such as link state change messages.
8641 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
8643 u8 opcode = *(const u8 *)rpl;
8645 /* This might be a port command ... this simplifies the following
8646 * conditionals ... We can get away with pre-dereferencing
8647 * action_to_len16 because it's in the first 16 bytes and all messages
8648 * will be at least that long.
8650 const struct fw_port_cmd *p = (const void *)rpl;
8651 unsigned int action =
8652 FW_PORT_CMD_ACTION_G(be32_to_cpu(p->action_to_len16));
8654 if (opcode == FW_PORT_CMD &&
8655 (action == FW_PORT_ACTION_GET_PORT_INFO ||
8656 action == FW_PORT_ACTION_GET_PORT_INFO32)) {
8658 int chan = FW_PORT_CMD_PORTID_G(be32_to_cpu(p->op_to_portid));
8659 struct port_info *pi = NULL;
8661 for_each_port(adap, i) {
8662 pi = adap2pinfo(adap, i);
8663 if (pi->tx_chan == chan)
8667 t4_handle_get_port_info(pi, rpl);
8669 dev_warn(adap->pdev_dev, "Unknown firmware reply %d\n",
8676 static void get_pci_mode(struct adapter *adapter, struct pci_params *p)
8680 if (pci_is_pcie(adapter->pdev)) {
8681 pcie_capability_read_word(adapter->pdev, PCI_EXP_LNKSTA, &val);
8682 p->speed = val & PCI_EXP_LNKSTA_CLS;
8683 p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
8688 * init_link_config - initialize a link's SW state
8689 * @lc: pointer to structure holding the link state
8690 * @pcaps: link Port Capabilities
8691 * @acaps: link current Advertised Port Capabilities
8693 * Initializes the SW state maintained for each link, including the link's
8694 * capabilities and default speed/flow-control/autonegotiation settings.
8696 static void init_link_config(struct link_config *lc, fw_port_cap32_t pcaps,
8697 fw_port_cap32_t acaps)
8700 lc->def_acaps = acaps;
8704 lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
8706 /* For Forward Error Control, we default to whatever the Firmware
8707 * tells us the Link is currently advertising.
8709 lc->requested_fec = FEC_AUTO;
8710 lc->fec = fwcap_to_cc_fec(lc->def_acaps);
8712 /* If the Port is capable of Auto-Negtotiation, initialize it as
8713 * "enabled" and copy over all of the Physical Port Capabilities
8714 * to the Advertised Port Capabilities. Otherwise mark it as
8715 * Auto-Negotiate disabled and select the highest supported speed
8716 * for the link. Note parallel structure in t4_link_l1cfg_core()
8717 * and t4_handle_get_port_info().
8719 if (lc->pcaps & FW_PORT_CAP32_ANEG) {
8720 lc->acaps = lc->pcaps & ADVERT_MASK;
8721 lc->autoneg = AUTONEG_ENABLE;
8722 lc->requested_fc |= PAUSE_AUTONEG;
8725 lc->autoneg = AUTONEG_DISABLE;
8726 lc->speed_caps = fwcap_to_fwspeed(acaps);
8730 #define CIM_PF_NOACCESS 0xeeeeeeee
8732 int t4_wait_dev_ready(void __iomem *regs)
8736 whoami = readl(regs + PL_WHOAMI_A);
8737 if (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS)
8741 whoami = readl(regs + PL_WHOAMI_A);
8742 return (whoami != 0xffffffff && whoami != CIM_PF_NOACCESS ? 0 : -EIO);
8746 u32 vendor_and_model_id;
8750 static int t4_get_flash_params(struct adapter *adap)
8752 /* Table for non-Numonix supported flash parts. Numonix parts are left
8753 * to the preexisting code. All flash parts have 64KB sectors.
8755 static struct flash_desc supported_flash[] = {
8756 { 0x150201, 4 << 20 }, /* Spansion 4MB S25FL032P */
8759 unsigned int part, manufacturer;
8760 unsigned int density, size = 0;
8764 /* Issue a Read ID Command to the Flash part. We decode supported
8765 * Flash parts and their sizes from this. There's a newer Query
8766 * Command which can retrieve detailed geometry information but many
8767 * Flash parts don't support it.
8770 ret = sf1_write(adap, 1, 1, 0, SF_RD_ID);
8772 ret = sf1_read(adap, 3, 0, 1, &flashid);
8773 t4_write_reg(adap, SF_OP_A, 0); /* unlock SF */
8777 /* Check to see if it's one of our non-standard supported Flash parts.
8779 for (part = 0; part < ARRAY_SIZE(supported_flash); part++)
8780 if (supported_flash[part].vendor_and_model_id == flashid) {
8781 adap->params.sf_size = supported_flash[part].size_mb;
8782 adap->params.sf_nsec =
8783 adap->params.sf_size / SF_SEC_SIZE;
8787 /* Decode Flash part size. The code below looks repetative with
8788 * common encodings, but that's not guaranteed in the JEDEC
8789 * specification for the Read JADEC ID command. The only thing that
8790 * we're guaranteed by the JADEC specification is where the
8791 * Manufacturer ID is in the returned result. After that each
8792 * Manufacturer ~could~ encode things completely differently.
8793 * Note, all Flash parts must have 64KB sectors.
8795 manufacturer = flashid & 0xff;
8796 switch (manufacturer) {
8797 case 0x20: { /* Micron/Numonix */
8798 /* This Density -> Size decoding table is taken from Micron
8801 density = (flashid >> 16) & 0xff;
8803 case 0x14: /* 1MB */
8806 case 0x15: /* 2MB */
8809 case 0x16: /* 4MB */
8812 case 0x17: /* 8MB */
8815 case 0x18: /* 16MB */
8818 case 0x19: /* 32MB */
8821 case 0x20: /* 64MB */
8824 case 0x21: /* 128MB */
8827 case 0x22: /* 256MB */
8833 case 0x9d: { /* ISSI -- Integrated Silicon Solution, Inc. */
8834 /* This Density -> Size decoding table is taken from ISSI
8837 density = (flashid >> 16) & 0xff;
8839 case 0x16: /* 32 MB */
8842 case 0x17: /* 64MB */
8848 case 0xc2: { /* Macronix */
8849 /* This Density -> Size decoding table is taken from Macronix
8852 density = (flashid >> 16) & 0xff;
8854 case 0x17: /* 8MB */
8857 case 0x18: /* 16MB */
8863 case 0xef: { /* Winbond */
8864 /* This Density -> Size decoding table is taken from Winbond
8867 density = (flashid >> 16) & 0xff;
8869 case 0x17: /* 8MB */
8872 case 0x18: /* 16MB */
8880 /* If we didn't recognize the FLASH part, that's no real issue: the
8881 * Hardware/Software contract says that Hardware will _*ALWAYS*_
8882 * use a FLASH part which is at least 4MB in size and has 64KB
8883 * sectors. The unrecognized FLASH part is likely to be much larger
8884 * than 4MB, but that's all we really need.
8887 dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
8892 /* Store decoded Flash size and fall through into vetting code. */
8893 adap->params.sf_size = size;
8894 adap->params.sf_nsec = size / SF_SEC_SIZE;
8897 if (adap->params.sf_size < FLASH_MIN_SIZE)
8898 dev_warn(adap->pdev_dev, "WARNING: Flash Part ID %#x, size %#x < %#x\n",
8899 flashid, adap->params.sf_size, FLASH_MIN_SIZE);
8904 * t4_prep_adapter - prepare SW and HW for operation
8905 * @adapter: the adapter
8906 * @reset: if true perform a HW reset
8908 * Initialize adapter SW state for the various HW modules, set initial
8909 * values for some adapter tunables, take PHYs out of reset, and
8910 * initialize the MDIO interface.
8912 int t4_prep_adapter(struct adapter *adapter)
8918 get_pci_mode(adapter, &adapter->params.pci);
8919 pl_rev = REV_G(t4_read_reg(adapter, PL_REV_A));
8921 ret = t4_get_flash_params(adapter);
8923 dev_err(adapter->pdev_dev, "error %d identifying flash\n", ret);
8927 /* Retrieve adapter's device ID
8929 pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
8930 ver = device_id >> 12;
8931 adapter->params.chip = 0;
8934 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
8935 adapter->params.arch.sge_fl_db = DBPRIO_F;
8936 adapter->params.arch.mps_tcam_size =
8937 NUM_MPS_CLS_SRAM_L_INSTANCES;
8938 adapter->params.arch.mps_rplc_size = 128;
8939 adapter->params.arch.nchan = NCHAN;
8940 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
8941 adapter->params.arch.vfcount = 128;
8942 /* Congestion map is for 4 channels so that
8943 * MPS can have 4 priority per port.
8945 adapter->params.arch.cng_ch_bits_log = 2;
8948 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
8949 adapter->params.arch.sge_fl_db = DBPRIO_F | DBTYPE_F;
8950 adapter->params.arch.mps_tcam_size =
8951 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8952 adapter->params.arch.mps_rplc_size = 128;
8953 adapter->params.arch.nchan = NCHAN;
8954 adapter->params.arch.pm_stats_cnt = PM_NSTATS;
8955 adapter->params.arch.vfcount = 128;
8956 adapter->params.arch.cng_ch_bits_log = 2;
8959 adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T6, pl_rev);
8960 adapter->params.arch.sge_fl_db = 0;
8961 adapter->params.arch.mps_tcam_size =
8962 NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
8963 adapter->params.arch.mps_rplc_size = 256;
8964 adapter->params.arch.nchan = 2;
8965 adapter->params.arch.pm_stats_cnt = T6_PM_NSTATS;
8966 adapter->params.arch.vfcount = 256;
8967 /* Congestion map will be for 2 channels so that
8968 * MPS can have 8 priority per port.
8970 adapter->params.arch.cng_ch_bits_log = 3;
8973 dev_err(adapter->pdev_dev, "Device %d is not supported\n",
8978 adapter->params.cim_la_size = CIMLA_SIZE;
8979 init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
8982 * Default port for debugging in case we can't reach FW.
8984 adapter->params.nports = 1;
8985 adapter->params.portvec = 1;
8986 adapter->params.vpd.cclk = 50000;
8988 /* Set PCIe completion timeout to 4 seconds. */
8989 pcie_capability_clear_and_set_word(adapter->pdev, PCI_EXP_DEVCTL2,
8990 PCI_EXP_DEVCTL2_COMP_TIMEOUT, 0xd);
8995 * t4_shutdown_adapter - shut down adapter, host & wire
8996 * @adapter: the adapter
8998 * Perform an emergency shutdown of the adapter and stop it from
8999 * continuing any further communication on the ports or DMA to the
9000 * host. This is typically used when the adapter and/or firmware
9001 * have crashed and we want to prevent any further accidental
9002 * communication with the rest of the world. This will also force
9003 * the port Link Status to go down -- if register writes work --
9004 * which should help our peers figure out that we're down.
9006 int t4_shutdown_adapter(struct adapter *adapter)
9010 t4_intr_disable(adapter);
9011 t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
9012 for_each_port(adapter, port) {
9013 u32 a_port_cfg = is_t4(adapter->params.chip) ?
9014 PORT_REG(port, XGMAC_PORT_CFG_A) :
9015 T5_PORT_REG(port, MAC_PORT_CFG_A);
9017 t4_write_reg(adapter, a_port_cfg,
9018 t4_read_reg(adapter, a_port_cfg)
9019 & ~SIGNAL_DET_V(1));
9021 t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
9027 * t4_bar2_sge_qregs - return BAR2 SGE Queue register information
9028 * @adapter: the adapter
9029 * @qid: the Queue ID
9030 * @qtype: the Ingress or Egress type for @qid
9031 * @user: true if this request is for a user mode queue
9032 * @pbar2_qoffset: BAR2 Queue Offset
9033 * @pbar2_qid: BAR2 Queue ID or 0 for Queue ID inferred SGE Queues
9035 * Returns the BAR2 SGE Queue Registers information associated with the
9036 * indicated Absolute Queue ID. These are passed back in return value
9037 * pointers. @qtype should be T4_BAR2_QTYPE_EGRESS for Egress Queue
9038 * and T4_BAR2_QTYPE_INGRESS for Ingress Queues.
9040 * This may return an error which indicates that BAR2 SGE Queue
9041 * registers aren't available. If an error is not returned, then the
9042 * following values are returned:
9044 * *@pbar2_qoffset: the BAR2 Offset of the @qid Registers
9045 * *@pbar2_qid: the BAR2 SGE Queue ID or 0 of @qid
9047 * If the returned BAR2 Queue ID is 0, then BAR2 SGE registers which
9048 * require the "Inferred Queue ID" ability may be used. E.g. the
9049 * Write Combining Doorbell Buffer. If the BAR2 Queue ID is not 0,
9050 * then these "Inferred Queue ID" register may not be used.
9052 int t4_bar2_sge_qregs(struct adapter *adapter,
9054 enum t4_bar2_qtype qtype,
9057 unsigned int *pbar2_qid)
9059 unsigned int page_shift, page_size, qpp_shift, qpp_mask;
9060 u64 bar2_page_offset, bar2_qoffset;
9061 unsigned int bar2_qid, bar2_qid_offset, bar2_qinferred;
9063 /* T4 doesn't support BAR2 SGE Queue registers for kernel mode queues */
9064 if (!user && is_t4(adapter->params.chip))
9067 /* Get our SGE Page Size parameters.
9069 page_shift = adapter->params.sge.hps + 10;
9070 page_size = 1 << page_shift;
9072 /* Get the right Queues per Page parameters for our Queue.
9074 qpp_shift = (qtype == T4_BAR2_QTYPE_EGRESS
9075 ? adapter->params.sge.eq_qpp
9076 : adapter->params.sge.iq_qpp);
9077 qpp_mask = (1 << qpp_shift) - 1;
9079 /* Calculate the basics of the BAR2 SGE Queue register area:
9080 * o The BAR2 page the Queue registers will be in.
9081 * o The BAR2 Queue ID.
9082 * o The BAR2 Queue ID Offset into the BAR2 page.
9084 bar2_page_offset = ((u64)(qid >> qpp_shift) << page_shift);
9085 bar2_qid = qid & qpp_mask;
9086 bar2_qid_offset = bar2_qid * SGE_UDB_SIZE;
9088 /* If the BAR2 Queue ID Offset is less than the Page Size, then the
9089 * hardware will infer the Absolute Queue ID simply from the writes to
9090 * the BAR2 Queue ID Offset within the BAR2 Page (and we need to use a
9091 * BAR2 Queue ID of 0 for those writes). Otherwise, we'll simply
9092 * write to the first BAR2 SGE Queue Area within the BAR2 Page with
9093 * the BAR2 Queue ID and the hardware will infer the Absolute Queue ID
9094 * from the BAR2 Page and BAR2 Queue ID.
9096 * One important censequence of this is that some BAR2 SGE registers
9097 * have a "Queue ID" field and we can write the BAR2 SGE Queue ID
9098 * there. But other registers synthesize the SGE Queue ID purely
9099 * from the writes to the registers -- the Write Combined Doorbell
9100 * Buffer is a good example. These BAR2 SGE Registers are only
9101 * available for those BAR2 SGE Register areas where the SGE Absolute
9102 * Queue ID can be inferred from simple writes.
9104 bar2_qoffset = bar2_page_offset;
9105 bar2_qinferred = (bar2_qid_offset < page_size);
9106 if (bar2_qinferred) {
9107 bar2_qoffset += bar2_qid_offset;
9111 *pbar2_qoffset = bar2_qoffset;
9112 *pbar2_qid = bar2_qid;
9117 * t4_init_devlog_params - initialize adapter->params.devlog
9118 * @adap: the adapter
9120 * Initialize various fields of the adapter's Firmware Device Log
9121 * Parameters structure.
9123 int t4_init_devlog_params(struct adapter *adap)
9125 struct devlog_params *dparams = &adap->params.devlog;
9127 unsigned int devlog_meminfo;
9128 struct fw_devlog_cmd devlog_cmd;
9131 /* If we're dealing with newer firmware, the Device Log Paramerters
9132 * are stored in a designated register which allows us to access the
9133 * Device Log even if we can't talk to the firmware.
9136 t4_read_reg(adap, PCIE_FW_REG(PCIE_FW_PF_A, PCIE_FW_PF_DEVLOG));
9138 unsigned int nentries, nentries128;
9140 dparams->memtype = PCIE_FW_PF_DEVLOG_MEMTYPE_G(pf_dparams);
9141 dparams->start = PCIE_FW_PF_DEVLOG_ADDR16_G(pf_dparams) << 4;
9143 nentries128 = PCIE_FW_PF_DEVLOG_NENTRIES128_G(pf_dparams);
9144 nentries = (nentries128 + 1) * 128;
9145 dparams->size = nentries * sizeof(struct fw_devlog_e);
9150 /* Otherwise, ask the firmware for it's Device Log Parameters.
9152 memset(&devlog_cmd, 0, sizeof(devlog_cmd));
9153 devlog_cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_DEVLOG_CMD) |
9154 FW_CMD_REQUEST_F | FW_CMD_READ_F);
9155 devlog_cmd.retval_len16 = cpu_to_be32(FW_LEN16(devlog_cmd));
9156 ret = t4_wr_mbox(adap, adap->mbox, &devlog_cmd, sizeof(devlog_cmd),
9162 be32_to_cpu(devlog_cmd.memtype_devlog_memaddr16_devlog);
9163 dparams->memtype = FW_DEVLOG_CMD_MEMTYPE_DEVLOG_G(devlog_meminfo);
9164 dparams->start = FW_DEVLOG_CMD_MEMADDR16_DEVLOG_G(devlog_meminfo) << 4;
9165 dparams->size = be32_to_cpu(devlog_cmd.memsize_devlog);
9171 * t4_init_sge_params - initialize adap->params.sge
9172 * @adapter: the adapter
9174 * Initialize various fields of the adapter's SGE Parameters structure.
9176 int t4_init_sge_params(struct adapter *adapter)
9178 struct sge_params *sge_params = &adapter->params.sge;
9180 unsigned int s_hps, s_qpp;
9182 /* Extract the SGE Page Size for our PF.
9184 hps = t4_read_reg(adapter, SGE_HOST_PAGE_SIZE_A);
9185 s_hps = (HOSTPAGESIZEPF0_S +
9186 (HOSTPAGESIZEPF1_S - HOSTPAGESIZEPF0_S) * adapter->pf);
9187 sge_params->hps = ((hps >> s_hps) & HOSTPAGESIZEPF0_M);
9189 /* Extract the SGE Egress and Ingess Queues Per Page for our PF.
9191 s_qpp = (QUEUESPERPAGEPF0_S +
9192 (QUEUESPERPAGEPF1_S - QUEUESPERPAGEPF0_S) * adapter->pf);
9193 qpp = t4_read_reg(adapter, SGE_EGRESS_QUEUES_PER_PAGE_PF_A);
9194 sge_params->eq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
9195 qpp = t4_read_reg(adapter, SGE_INGRESS_QUEUES_PER_PAGE_PF_A);
9196 sge_params->iq_qpp = ((qpp >> s_qpp) & QUEUESPERPAGEPF0_M);
9202 * t4_init_tp_params - initialize adap->params.tp
9203 * @adap: the adapter
9204 * @sleep_ok: if true we may sleep while awaiting command completion
9206 * Initialize various fields of the adapter's TP Parameters structure.
9208 int t4_init_tp_params(struct adapter *adap, bool sleep_ok)
9213 v = t4_read_reg(adap, TP_TIMER_RESOLUTION_A);
9214 adap->params.tp.tre = TIMERRESOLUTION_G(v);
9215 adap->params.tp.dack_re = DELAYEDACKRESOLUTION_G(v);
9217 /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
9218 for (chan = 0; chan < NCHAN; chan++)
9219 adap->params.tp.tx_modq[chan] = chan;
9221 /* Cache the adapter's Compressed Filter Mode and global Incress
9224 t4_tp_pio_read(adap, &adap->params.tp.vlan_pri_map, 1,
9225 TP_VLAN_PRI_MAP_A, sleep_ok);
9226 t4_tp_pio_read(adap, &adap->params.tp.ingress_config, 1,
9227 TP_INGRESS_CONFIG_A, sleep_ok);
9229 /* For T6, cache the adapter's compressed error vector
9230 * and passing outer header info for encapsulated packets.
9232 if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) {
9233 v = t4_read_reg(adap, TP_OUT_CONFIG_A);
9234 adap->params.tp.rx_pkt_encap = (v & CRXPKTENC_F) ? 1 : 0;
9237 /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
9238 * shift positions of several elements of the Compressed Filter Tuple
9239 * for this adapter which we need frequently ...
9241 adap->params.tp.fcoe_shift = t4_filter_field_shift(adap, FCOE_F);
9242 adap->params.tp.port_shift = t4_filter_field_shift(adap, PORT_F);
9243 adap->params.tp.vnic_shift = t4_filter_field_shift(adap, VNIC_ID_F);
9244 adap->params.tp.vlan_shift = t4_filter_field_shift(adap, VLAN_F);
9245 adap->params.tp.tos_shift = t4_filter_field_shift(adap, TOS_F);
9246 adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
9248 adap->params.tp.ethertype_shift = t4_filter_field_shift(adap,
9250 adap->params.tp.macmatch_shift = t4_filter_field_shift(adap,
9252 adap->params.tp.matchtype_shift = t4_filter_field_shift(adap,
9254 adap->params.tp.frag_shift = t4_filter_field_shift(adap,
9257 /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
9258 * represents the presence of an Outer VLAN instead of a VNIC ID.
9260 if ((adap->params.tp.ingress_config & VNIC_F) == 0)
9261 adap->params.tp.vnic_shift = -1;
9263 v = t4_read_reg(adap, LE_3_DB_HASH_MASK_GEN_IPV4_T6_A);
9264 adap->params.tp.hash_filter_mask = v;
9265 v = t4_read_reg(adap, LE_4_DB_HASH_MASK_GEN_IPV4_T6_A);
9266 adap->params.tp.hash_filter_mask |= ((u64)v << 32);
9271 * t4_filter_field_shift - calculate filter field shift
9272 * @adap: the adapter
9273 * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
9275 * Return the shift position of a filter field within the Compressed
9276 * Filter Tuple. The filter field is specified via its selection bit
9277 * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
9279 int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
9281 unsigned int filter_mode = adap->params.tp.vlan_pri_map;
9285 if ((filter_mode & filter_sel) == 0)
9288 for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
9289 switch (filter_mode & sel) {
9291 field_shift += FT_FCOE_W;
9294 field_shift += FT_PORT_W;
9297 field_shift += FT_VNIC_ID_W;
9300 field_shift += FT_VLAN_W;
9303 field_shift += FT_TOS_W;
9306 field_shift += FT_PROTOCOL_W;
9309 field_shift += FT_ETHERTYPE_W;
9312 field_shift += FT_MACMATCH_W;
9315 field_shift += FT_MPSHITTYPE_W;
9317 case FRAGMENTATION_F:
9318 field_shift += FT_FRAGMENTATION_W;
9325 int t4_init_rss_mode(struct adapter *adap, int mbox)
9328 struct fw_rss_vi_config_cmd rvc;
9330 memset(&rvc, 0, sizeof(rvc));
9332 for_each_port(adap, i) {
9333 struct port_info *p = adap2pinfo(adap, i);
9336 cpu_to_be32(FW_CMD_OP_V(FW_RSS_VI_CONFIG_CMD) |
9337 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9338 FW_RSS_VI_CONFIG_CMD_VIID_V(p->viid));
9339 rvc.retval_len16 = cpu_to_be32(FW_LEN16(rvc));
9340 ret = t4_wr_mbox(adap, mbox, &rvc, sizeof(rvc), &rvc);
9343 p->rss_mode = be32_to_cpu(rvc.u.basicvirtual.defaultq_to_udpen);
9349 * t4_init_portinfo - allocate a virtual interface and initialize port_info
9350 * @pi: the port_info
9351 * @mbox: mailbox to use for the FW command
9352 * @port: physical port associated with the VI
9353 * @pf: the PF owning the VI
9354 * @vf: the VF owning the VI
9355 * @mac: the MAC address of the VI
9357 * Allocates a virtual interface for the given physical port. If @mac is
9358 * not %NULL it contains the MAC address of the VI as assigned by FW.
9359 * @mac should be large enough to hold an Ethernet address.
9360 * Returns < 0 on error.
9362 int t4_init_portinfo(struct port_info *pi, int mbox,
9363 int port, int pf, int vf, u8 mac[])
9365 struct adapter *adapter = pi->adapter;
9366 unsigned int fw_caps = adapter->params.fw_caps_support;
9367 struct fw_port_cmd cmd;
9368 unsigned int rss_size;
9369 enum fw_port_type port_type;
9371 fw_port_cap32_t pcaps, acaps;
9374 /* If we haven't yet determined whether we're talking to Firmware
9375 * which knows the new 32-bit Port Capabilities, it's time to find
9376 * out now. This will also tell new Firmware to send us Port Status
9377 * Updates using the new 32-bit Port Capabilities version of the
9378 * Port Information message.
9380 if (fw_caps == FW_CAPS_UNKNOWN) {
9383 param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_PFVF) |
9384 FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_PFVF_PORT_CAPS32));
9386 ret = t4_set_params(adapter, mbox, pf, vf, 1, ¶m, &val);
9387 fw_caps = (ret == 0 ? FW_CAPS32 : FW_CAPS16);
9388 adapter->params.fw_caps_support = fw_caps;
9391 memset(&cmd, 0, sizeof(cmd));
9392 cmd.op_to_portid = cpu_to_be32(FW_CMD_OP_V(FW_PORT_CMD) |
9393 FW_CMD_REQUEST_F | FW_CMD_READ_F |
9394 FW_PORT_CMD_PORTID_V(port));
9395 cmd.action_to_len16 = cpu_to_be32(
9396 FW_PORT_CMD_ACTION_V(fw_caps == FW_CAPS16
9397 ? FW_PORT_ACTION_GET_PORT_INFO
9398 : FW_PORT_ACTION_GET_PORT_INFO32) |
9400 ret = t4_wr_mbox(pi->adapter, mbox, &cmd, sizeof(cmd), &cmd);
9404 /* Extract the various fields from the Port Information message.
9406 if (fw_caps == FW_CAPS16) {
9407 u32 lstatus = be32_to_cpu(cmd.u.info.lstatus_to_modtype);
9409 port_type = FW_PORT_CMD_PTYPE_G(lstatus);
9410 mdio_addr = ((lstatus & FW_PORT_CMD_MDIOCAP_F)
9411 ? FW_PORT_CMD_MDIOADDR_G(lstatus)
9413 pcaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.pcap));
9414 acaps = fwcaps16_to_caps32(be16_to_cpu(cmd.u.info.acap));
9416 u32 lstatus32 = be32_to_cpu(cmd.u.info32.lstatus32_to_cbllen32);
9418 port_type = FW_PORT_CMD_PORTTYPE32_G(lstatus32);
9419 mdio_addr = ((lstatus32 & FW_PORT_CMD_MDIOCAP32_F)
9420 ? FW_PORT_CMD_MDIOADDR32_G(lstatus32)
9422 pcaps = be32_to_cpu(cmd.u.info32.pcaps32);
9423 acaps = be32_to_cpu(cmd.u.info32.acaps32);
9426 ret = t4_alloc_vi(pi->adapter, mbox, port, pf, vf, 1, mac, &rss_size);
9433 pi->rss_size = rss_size;
9435 pi->port_type = port_type;
9436 pi->mdio_addr = mdio_addr;
9437 pi->mod_type = FW_PORT_MOD_TYPE_NA;
9439 init_link_config(&pi->link_cfg, pcaps, acaps);
9443 int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
9448 for_each_port(adap, i) {
9449 struct port_info *pi = adap2pinfo(adap, i);
9451 while ((adap->params.portvec & (1 << j)) == 0)
9454 ret = t4_init_portinfo(pi, mbox, j, pf, vf, addr);
9458 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
9465 * t4_read_cimq_cfg - read CIM queue configuration
9466 * @adap: the adapter
9467 * @base: holds the queue base addresses in bytes
9468 * @size: holds the queue sizes in bytes
9469 * @thres: holds the queue full thresholds in bytes
9471 * Returns the current configuration of the CIM queues, starting with
9472 * the IBQs, then the OBQs.
9474 void t4_read_cimq_cfg(struct adapter *adap, u16 *base, u16 *size, u16 *thres)
9477 int cim_num_obq = is_t4(adap->params.chip) ?
9478 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9480 for (i = 0; i < CIM_NUM_IBQ; i++) {
9481 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, IBQSELECT_F |
9483 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9484 /* value is in 256-byte units */
9485 *base++ = CIMQBASE_G(v) * 256;
9486 *size++ = CIMQSIZE_G(v) * 256;
9487 *thres++ = QUEFULLTHRSH_G(v) * 8; /* 8-byte unit */
9489 for (i = 0; i < cim_num_obq; i++) {
9490 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9492 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9493 /* value is in 256-byte units */
9494 *base++ = CIMQBASE_G(v) * 256;
9495 *size++ = CIMQSIZE_G(v) * 256;
9500 * t4_read_cim_ibq - read the contents of a CIM inbound queue
9501 * @adap: the adapter
9502 * @qid: the queue index
9503 * @data: where to store the queue contents
9504 * @n: capacity of @data in 32-bit words
9506 * Reads the contents of the selected CIM queue starting at address 0 up
9507 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9508 * error and the number of 32-bit words actually read on success.
9510 int t4_read_cim_ibq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9512 int i, err, attempts;
9514 const unsigned int nwords = CIM_IBQ_SIZE * 4;
9516 if (qid > 5 || (n & 3))
9519 addr = qid * nwords;
9523 /* It might take 3-10ms before the IBQ debug read access is allowed.
9524 * Wait for 1 Sec with a delay of 1 usec.
9528 for (i = 0; i < n; i++, addr++) {
9529 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, IBQDBGADDR_V(addr) |
9531 err = t4_wait_op_done(adap, CIM_IBQ_DBG_CFG_A, IBQDBGBUSY_F, 0,
9535 *data++ = t4_read_reg(adap, CIM_IBQ_DBG_DATA_A);
9537 t4_write_reg(adap, CIM_IBQ_DBG_CFG_A, 0);
9542 * t4_read_cim_obq - read the contents of a CIM outbound queue
9543 * @adap: the adapter
9544 * @qid: the queue index
9545 * @data: where to store the queue contents
9546 * @n: capacity of @data in 32-bit words
9548 * Reads the contents of the selected CIM queue starting at address 0 up
9549 * to the capacity of @data. @n must be a multiple of 4. Returns < 0 on
9550 * error and the number of 32-bit words actually read on success.
9552 int t4_read_cim_obq(struct adapter *adap, unsigned int qid, u32 *data, size_t n)
9555 unsigned int addr, v, nwords;
9556 int cim_num_obq = is_t4(adap->params.chip) ?
9557 CIM_NUM_OBQ : CIM_NUM_OBQ_T5;
9559 if ((qid > (cim_num_obq - 1)) || (n & 3))
9562 t4_write_reg(adap, CIM_QUEUE_CONFIG_REF_A, OBQSELECT_F |
9563 QUENUMSELECT_V(qid));
9564 v = t4_read_reg(adap, CIM_QUEUE_CONFIG_CTRL_A);
9566 addr = CIMQBASE_G(v) * 64; /* muliple of 256 -> muliple of 4 */
9567 nwords = CIMQSIZE_G(v) * 64; /* same */
9571 for (i = 0; i < n; i++, addr++) {
9572 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, OBQDBGADDR_V(addr) |
9574 err = t4_wait_op_done(adap, CIM_OBQ_DBG_CFG_A, OBQDBGBUSY_F, 0,
9578 *data++ = t4_read_reg(adap, CIM_OBQ_DBG_DATA_A);
9580 t4_write_reg(adap, CIM_OBQ_DBG_CFG_A, 0);
9585 * t4_cim_read - read a block from CIM internal address space
9586 * @adap: the adapter
9587 * @addr: the start address within the CIM address space
9588 * @n: number of words to read
9589 * @valp: where to store the result
9591 * Reads a block of 4-byte words from the CIM intenal address space.
9593 int t4_cim_read(struct adapter *adap, unsigned int addr, unsigned int n,
9598 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9601 for ( ; !ret && n--; addr += 4) {
9602 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr);
9603 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9606 *valp++ = t4_read_reg(adap, CIM_HOST_ACC_DATA_A);
9612 * t4_cim_write - write a block into CIM internal address space
9613 * @adap: the adapter
9614 * @addr: the start address within the CIM address space
9615 * @n: number of words to write
9616 * @valp: set of values to write
9618 * Writes a block of 4-byte words into the CIM intenal address space.
9620 int t4_cim_write(struct adapter *adap, unsigned int addr, unsigned int n,
9621 const unsigned int *valp)
9625 if (t4_read_reg(adap, CIM_HOST_ACC_CTRL_A) & HOSTBUSY_F)
9628 for ( ; !ret && n--; addr += 4) {
9629 t4_write_reg(adap, CIM_HOST_ACC_DATA_A, *valp++);
9630 t4_write_reg(adap, CIM_HOST_ACC_CTRL_A, addr | HOSTWRITE_F);
9631 ret = t4_wait_op_done(adap, CIM_HOST_ACC_CTRL_A, HOSTBUSY_F,
9637 static int t4_cim_write1(struct adapter *adap, unsigned int addr,
9640 return t4_cim_write(adap, addr, 1, &val);
9644 * t4_cim_read_la - read CIM LA capture buffer
9645 * @adap: the adapter
9646 * @la_buf: where to store the LA data
9647 * @wrptr: the HW write pointer within the capture buffer
9649 * Reads the contents of the CIM LA buffer with the most recent entry at
9650 * the end of the returned data and with the entry at @wrptr first.
9651 * We try to leave the LA in the running state we find it in.
9653 int t4_cim_read_la(struct adapter *adap, u32 *la_buf, unsigned int *wrptr)
9656 unsigned int cfg, val, idx;
9658 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &cfg);
9662 if (cfg & UPDBGLAEN_F) { /* LA is running, freeze it */
9663 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A, 0);
9668 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9672 idx = UPDBGLAWRPTR_G(val);
9676 for (i = 0; i < adap->params.cim_la_size; i++) {
9677 ret = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9678 UPDBGLARDPTR_V(idx) | UPDBGLARDEN_F);
9681 ret = t4_cim_read(adap, UP_UP_DBG_LA_CFG_A, 1, &val);
9684 if (val & UPDBGLARDEN_F) {
9688 ret = t4_cim_read(adap, UP_UP_DBG_LA_DATA_A, 1, &la_buf[i]);
9692 /* Bits 0-3 of UpDbgLaRdPtr can be between 0000 to 1001 to
9693 * identify the 32-bit portion of the full 312-bit data
9695 if (is_t6(adap->params.chip) && (idx & 0xf) >= 9)
9696 idx = (idx & 0xff0) + 0x10;
9699 /* address can't exceed 0xfff */
9700 idx &= UPDBGLARDPTR_M;
9703 if (cfg & UPDBGLAEN_F) {
9704 int r = t4_cim_write1(adap, UP_UP_DBG_LA_CFG_A,
9705 cfg & ~UPDBGLARDEN_F);
9713 * t4_tp_read_la - read TP LA capture buffer
9714 * @adap: the adapter
9715 * @la_buf: where to store the LA data
9716 * @wrptr: the HW write pointer within the capture buffer
9718 * Reads the contents of the TP LA buffer with the most recent entry at
9719 * the end of the returned data and with the entry at @wrptr first.
9720 * We leave the LA in the running state we find it in.
9722 void t4_tp_read_la(struct adapter *adap, u64 *la_buf, unsigned int *wrptr)
9724 bool last_incomplete;
9725 unsigned int i, cfg, val, idx;
9727 cfg = t4_read_reg(adap, TP_DBG_LA_CONFIG_A) & 0xffff;
9728 if (cfg & DBGLAENABLE_F) /* freeze LA */
9729 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9730 adap->params.tp.la_mask | (cfg ^ DBGLAENABLE_F));
9732 val = t4_read_reg(adap, TP_DBG_LA_CONFIG_A);
9733 idx = DBGLAWPTR_G(val);
9734 last_incomplete = DBGLAMODE_G(val) >= 2 && (val & DBGLAWHLF_F) == 0;
9735 if (last_incomplete)
9736 idx = (idx + 1) & DBGLARPTR_M;
9741 val &= ~DBGLARPTR_V(DBGLARPTR_M);
9742 val |= adap->params.tp.la_mask;
9744 for (i = 0; i < TPLA_SIZE; i++) {
9745 t4_write_reg(adap, TP_DBG_LA_CONFIG_A, DBGLARPTR_V(idx) | val);
9746 la_buf[i] = t4_read_reg64(adap, TP_DBG_LA_DATAL_A);
9747 idx = (idx + 1) & DBGLARPTR_M;
9750 /* Wipe out last entry if it isn't valid */
9751 if (last_incomplete)
9752 la_buf[TPLA_SIZE - 1] = ~0ULL;
9754 if (cfg & DBGLAENABLE_F) /* restore running state */
9755 t4_write_reg(adap, TP_DBG_LA_CONFIG_A,
9756 cfg | adap->params.tp.la_mask);
9759 /* SGE Hung Ingress DMA Warning Threshold time and Warning Repeat Rate (in
9760 * seconds). If we find one of the SGE Ingress DMA State Machines in the same
9761 * state for more than the Warning Threshold then we'll issue a warning about
9762 * a potential hang. We'll repeat the warning as the SGE Ingress DMA Channel
9763 * appears to be hung every Warning Repeat second till the situation clears.
9764 * If the situation clears, we'll note that as well.
9766 #define SGE_IDMA_WARN_THRESH 1
9767 #define SGE_IDMA_WARN_REPEAT 300
9770 * t4_idma_monitor_init - initialize SGE Ingress DMA Monitor
9771 * @adapter: the adapter
9772 * @idma: the adapter IDMA Monitor state
9774 * Initialize the state of an SGE Ingress DMA Monitor.
9776 void t4_idma_monitor_init(struct adapter *adapter,
9777 struct sge_idma_monitor_state *idma)
9779 /* Initialize the state variables for detecting an SGE Ingress DMA
9780 * hang. The SGE has internal counters which count up on each clock
9781 * tick whenever the SGE finds its Ingress DMA State Engines in the
9782 * same state they were on the previous clock tick. The clock used is
9783 * the Core Clock so we have a limit on the maximum "time" they can
9784 * record; typically a very small number of seconds. For instance,
9785 * with a 600MHz Core Clock, we can only count up to a bit more than
9786 * 7s. So we'll synthesize a larger counter in order to not run the
9787 * risk of having the "timers" overflow and give us the flexibility to
9788 * maintain a Hung SGE State Machine of our own which operates across
9789 * a longer time frame.
9791 idma->idma_1s_thresh = core_ticks_per_usec(adapter) * 1000000; /* 1s */
9792 idma->idma_stalled[0] = 0;
9793 idma->idma_stalled[1] = 0;
9797 * t4_idma_monitor - monitor SGE Ingress DMA state
9798 * @adapter: the adapter
9799 * @idma: the adapter IDMA Monitor state
9800 * @hz: number of ticks/second
9801 * @ticks: number of ticks since the last IDMA Monitor call
9803 void t4_idma_monitor(struct adapter *adapter,
9804 struct sge_idma_monitor_state *idma,
9807 int i, idma_same_state_cnt[2];
9809 /* Read the SGE Debug Ingress DMA Same State Count registers. These
9810 * are counters inside the SGE which count up on each clock when the
9811 * SGE finds its Ingress DMA State Engines in the same states they
9812 * were in the previous clock. The counters will peg out at
9813 * 0xffffffff without wrapping around so once they pass the 1s
9814 * threshold they'll stay above that till the IDMA state changes.
9816 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 13);
9817 idma_same_state_cnt[0] = t4_read_reg(adapter, SGE_DEBUG_DATA_HIGH_A);
9818 idma_same_state_cnt[1] = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9820 for (i = 0; i < 2; i++) {
9821 u32 debug0, debug11;
9823 /* If the Ingress DMA Same State Counter ("timer") is less
9824 * than 1s, then we can reset our synthesized Stall Timer and
9825 * continue. If we have previously emitted warnings about a
9826 * potential stalled Ingress Queue, issue a note indicating
9827 * that the Ingress Queue has resumed forward progress.
9829 if (idma_same_state_cnt[i] < idma->idma_1s_thresh) {
9830 if (idma->idma_stalled[i] >= SGE_IDMA_WARN_THRESH * hz)
9831 dev_warn(adapter->pdev_dev, "SGE idma%d, queue %u, "
9832 "resumed after %d seconds\n",
9833 i, idma->idma_qid[i],
9834 idma->idma_stalled[i] / hz);
9835 idma->idma_stalled[i] = 0;
9839 /* Synthesize an SGE Ingress DMA Same State Timer in the Hz
9840 * domain. The first time we get here it'll be because we
9841 * passed the 1s Threshold; each additional time it'll be
9842 * because the RX Timer Callback is being fired on its regular
9845 * If the stall is below our Potential Hung Ingress Queue
9846 * Warning Threshold, continue.
9848 if (idma->idma_stalled[i] == 0) {
9849 idma->idma_stalled[i] = hz;
9850 idma->idma_warn[i] = 0;
9852 idma->idma_stalled[i] += ticks;
9853 idma->idma_warn[i] -= ticks;
9856 if (idma->idma_stalled[i] < SGE_IDMA_WARN_THRESH * hz)
9859 /* We'll issue a warning every SGE_IDMA_WARN_REPEAT seconds.
9861 if (idma->idma_warn[i] > 0)
9863 idma->idma_warn[i] = SGE_IDMA_WARN_REPEAT * hz;
9865 /* Read and save the SGE IDMA State and Queue ID information.
9866 * We do this every time in case it changes across time ...
9867 * can't be too careful ...
9869 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 0);
9870 debug0 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9871 idma->idma_state[i] = (debug0 >> (i * 9)) & 0x3f;
9873 t4_write_reg(adapter, SGE_DEBUG_INDEX_A, 11);
9874 debug11 = t4_read_reg(adapter, SGE_DEBUG_DATA_LOW_A);
9875 idma->idma_qid[i] = (debug11 >> (i * 16)) & 0xffff;
9877 dev_warn(adapter->pdev_dev, "SGE idma%u, queue %u, potentially stuck in "
9878 "state %u for %d seconds (debug0=%#x, debug11=%#x)\n",
9879 i, idma->idma_qid[i], idma->idma_state[i],
9880 idma->idma_stalled[i] / hz,
9882 t4_sge_decode_idma_state(adapter, idma->idma_state[i]);
9887 * t4_load_cfg - download config file
9888 * @adap: the adapter
9889 * @cfg_data: the cfg text file to write
9890 * @size: text file size
9892 * Write the supplied config text file to the card's serial flash.
9894 int t4_load_cfg(struct adapter *adap, const u8 *cfg_data, unsigned int size)
9896 int ret, i, n, cfg_addr;
9898 unsigned int flash_cfg_start_sec;
9899 unsigned int sf_sec_size = adap->params.sf_size / adap->params.sf_nsec;
9901 cfg_addr = t4_flash_cfg_addr(adap);
9906 flash_cfg_start_sec = addr / SF_SEC_SIZE;
9908 if (size > FLASH_CFG_MAX_SIZE) {
9909 dev_err(adap->pdev_dev, "cfg file too large, max is %u bytes\n",
9910 FLASH_CFG_MAX_SIZE);
9914 i = DIV_ROUND_UP(FLASH_CFG_MAX_SIZE, /* # of sectors spanned */
9916 ret = t4_flash_erase_sectors(adap, flash_cfg_start_sec,
9917 flash_cfg_start_sec + i - 1);
9918 /* If size == 0 then we're simply erasing the FLASH sectors associated
9919 * with the on-adapter Firmware Configuration File.
9921 if (ret || size == 0)
9924 /* this will write to the flash up to SF_PAGE_SIZE at a time */
9925 for (i = 0; i < size; i += SF_PAGE_SIZE) {
9926 if ((size - i) < SF_PAGE_SIZE)
9930 ret = t4_write_flash(adap, addr, n, cfg_data);
9934 addr += SF_PAGE_SIZE;
9935 cfg_data += SF_PAGE_SIZE;
9940 dev_err(adap->pdev_dev, "config file %s failed %d\n",
9941 (size == 0 ? "clear" : "download"), ret);
9946 * t4_set_vf_mac - Set MAC address for the specified VF
9947 * @adapter: The adapter
9948 * @vf: one of the VFs instantiated by the specified PF
9949 * @naddr: the number of MAC addresses
9950 * @addr: the MAC address(es) to be set to the specified VF
9952 int t4_set_vf_mac_acl(struct adapter *adapter, unsigned int vf,
9953 unsigned int naddr, u8 *addr)
9955 struct fw_acl_mac_cmd cmd;
9957 memset(&cmd, 0, sizeof(cmd));
9958 cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_MAC_CMD) |
9961 FW_ACL_MAC_CMD_PFN_V(adapter->pf) |
9962 FW_ACL_MAC_CMD_VFN_V(vf));
9964 /* Note: Do not enable the ACL */
9965 cmd.en_to_len16 = cpu_to_be32((unsigned int)FW_LEN16(cmd));
9968 switch (adapter->pf) {
9970 memcpy(cmd.macaddr3, addr, sizeof(cmd.macaddr3));
9973 memcpy(cmd.macaddr2, addr, sizeof(cmd.macaddr2));
9976 memcpy(cmd.macaddr1, addr, sizeof(cmd.macaddr1));
9979 memcpy(cmd.macaddr0, addr, sizeof(cmd.macaddr0));
9983 return t4_wr_mbox(adapter, adapter->mbox, &cmd, sizeof(cmd), &cmd);
9987 * t4_read_pace_tbl - read the pace table
9988 * @adap: the adapter
9989 * @pace_vals: holds the returned values
9991 * Returns the values of TP's pace table in microseconds.
9993 void t4_read_pace_tbl(struct adapter *adap, unsigned int pace_vals[NTX_SCHED])
9997 for (i = 0; i < NTX_SCHED; i++) {
9998 t4_write_reg(adap, TP_PACE_TABLE_A, 0xffff0000 + i);
9999 v = t4_read_reg(adap, TP_PACE_TABLE_A);
10000 pace_vals[i] = dack_ticks_to_usec(adap, v);
10005 * t4_get_tx_sched - get the configuration of a Tx HW traffic scheduler
10006 * @adap: the adapter
10007 * @sched: the scheduler index
10008 * @kbps: the byte rate in Kbps
10009 * @ipg: the interpacket delay in tenths of nanoseconds
10010 * @sleep_ok: if true we may sleep while awaiting command completion
10012 * Return the current configuration of a HW Tx scheduler.
10014 void t4_get_tx_sched(struct adapter *adap, unsigned int sched,
10015 unsigned int *kbps, unsigned int *ipg, bool sleep_ok)
10017 unsigned int v, addr, bpt, cpt;
10020 addr = TP_TX_MOD_Q1_Q0_RATE_LIMIT_A - sched / 2;
10021 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10024 bpt = (v >> 8) & 0xff;
10027 *kbps = 0; /* scheduler disabled */
10029 v = (adap->params.vpd.cclk * 1000) / cpt; /* ticks/s */
10030 *kbps = (v * bpt) / 125;
10034 addr = TP_TX_MOD_Q1_Q0_TIMER_SEPARATOR_A - sched / 2;
10035 t4_tp_tm_pio_read(adap, &v, 1, addr, sleep_ok);
10039 *ipg = (10000 * v) / core_ticks_per_usec(adap);
10043 /* t4_sge_ctxt_rd - read an SGE context through FW
10044 * @adap: the adapter
10045 * @mbox: mailbox to use for the FW command
10046 * @cid: the context id
10047 * @ctype: the context type
10048 * @data: where to store the context data
10050 * Issues a FW command through the given mailbox to read an SGE context.
10052 int t4_sge_ctxt_rd(struct adapter *adap, unsigned int mbox, unsigned int cid,
10053 enum ctxt_type ctype, u32 *data)
10055 struct fw_ldst_cmd c;
10058 if (ctype == CTXT_FLM)
10059 ret = FW_LDST_ADDRSPC_SGE_FLMC;
10061 ret = FW_LDST_ADDRSPC_SGE_CONMC;
10063 memset(&c, 0, sizeof(c));
10064 c.op_to_addrspace = cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
10065 FW_CMD_REQUEST_F | FW_CMD_READ_F |
10066 FW_LDST_CMD_ADDRSPACE_V(ret));
10067 c.cycles_to_len16 = cpu_to_be32(FW_LEN16(c));
10068 c.u.idctxt.physid = cpu_to_be32(cid);
10070 ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
10072 data[0] = be32_to_cpu(c.u.idctxt.ctxt_data0);
10073 data[1] = be32_to_cpu(c.u.idctxt.ctxt_data1);
10074 data[2] = be32_to_cpu(c.u.idctxt.ctxt_data2);
10075 data[3] = be32_to_cpu(c.u.idctxt.ctxt_data3);
10076 data[4] = be32_to_cpu(c.u.idctxt.ctxt_data4);
10077 data[5] = be32_to_cpu(c.u.idctxt.ctxt_data5);
10083 * t4_sge_ctxt_rd_bd - read an SGE context bypassing FW
10084 * @adap: the adapter
10085 * @cid: the context id
10086 * @ctype: the context type
10087 * @data: where to store the context data
10089 * Reads an SGE context directly, bypassing FW. This is only for
10090 * debugging when FW is unavailable.
10092 int t4_sge_ctxt_rd_bd(struct adapter *adap, unsigned int cid,
10093 enum ctxt_type ctype, u32 *data)
10097 t4_write_reg(adap, SGE_CTXT_CMD_A, CTXTQID_V(cid) | CTXTTYPE_V(ctype));
10098 ret = t4_wait_op_done(adap, SGE_CTXT_CMD_A, BUSY_F, 0, 3, 1);
10100 for (i = SGE_CTXT_DATA0_A; i <= SGE_CTXT_DATA5_A; i += 4)
10101 *data++ = t4_read_reg(adap, i);
10105 int t4_sched_params(struct adapter *adapter, int type, int level, int mode,
10106 int rateunit, int ratemode, int channel, int class,
10107 int minrate, int maxrate, int weight, int pktsize)
10109 struct fw_sched_cmd cmd;
10111 memset(&cmd, 0, sizeof(cmd));
10112 cmd.op_to_write = cpu_to_be32(FW_CMD_OP_V(FW_SCHED_CMD) |
10115 cmd.retval_len16 = cpu_to_be32(FW_LEN16(cmd));
10117 cmd.u.params.sc = FW_SCHED_SC_PARAMS;
10118 cmd.u.params.type = type;
10119 cmd.u.params.level = level;
10120 cmd.u.params.mode = mode;
10121 cmd.u.params.ch = channel;
10122 cmd.u.params.cl = class;
10123 cmd.u.params.unit = rateunit;
10124 cmd.u.params.rate = ratemode;
10125 cmd.u.params.min = cpu_to_be32(minrate);
10126 cmd.u.params.max = cpu_to_be32(maxrate);
10127 cmd.u.params.weight = cpu_to_be16(weight);
10128 cmd.u.params.pktsize = cpu_to_be16(pktsize);
10130 return t4_wr_mbox_meat(adapter, adapter->mbox, &cmd, sizeof(cmd),
10135 * t4_i2c_rd - read I2C data from adapter
10136 * @adap: the adapter
10137 * @port: Port number if per-port device; <0 if not
10138 * @devid: per-port device ID or absolute device ID
10139 * @offset: byte offset into device I2C space
10140 * @len: byte length of I2C space data
10141 * @buf: buffer in which to return I2C data
10143 * Reads the I2C data from the indicated device and location.
10145 int t4_i2c_rd(struct adapter *adap, unsigned int mbox, int port,
10146 unsigned int devid, unsigned int offset,
10147 unsigned int len, u8 *buf)
10149 struct fw_ldst_cmd ldst_cmd, ldst_rpl;
10150 unsigned int i2c_max = sizeof(ldst_cmd.u.i2c.data);
10153 if (len > I2C_PAGE_SIZE)
10156 /* Dont allow reads that spans multiple pages */
10157 if (offset < I2C_PAGE_SIZE && offset + len > I2C_PAGE_SIZE)
10160 memset(&ldst_cmd, 0, sizeof(ldst_cmd));
10161 ldst_cmd.op_to_addrspace =
10162 cpu_to_be32(FW_CMD_OP_V(FW_LDST_CMD) |
10165 FW_LDST_CMD_ADDRSPACE_V(FW_LDST_ADDRSPC_I2C));
10166 ldst_cmd.cycles_to_len16 = cpu_to_be32(FW_LEN16(ldst_cmd));
10167 ldst_cmd.u.i2c.pid = (port < 0 ? 0xff : port);
10168 ldst_cmd.u.i2c.did = devid;
10171 unsigned int i2c_len = (len < i2c_max) ? len : i2c_max;
10173 ldst_cmd.u.i2c.boffset = offset;
10174 ldst_cmd.u.i2c.blen = i2c_len;
10176 ret = t4_wr_mbox(adap, mbox, &ldst_cmd, sizeof(ldst_cmd),
10181 memcpy(buf, ldst_rpl.u.i2c.data, i2c_len);
10191 * t4_set_vlan_acl - Set a VLAN id for the specified VF
10192 * @adapter: the adapter
10193 * @mbox: mailbox to use for the FW command
10194 * @vf: one of the VFs instantiated by the specified PF
10195 * @vlan: The vlanid to be set
10197 int t4_set_vlan_acl(struct adapter *adap, unsigned int mbox, unsigned int vf,
10200 struct fw_acl_vlan_cmd vlan_cmd;
10201 unsigned int enable;
10203 enable = (vlan ? FW_ACL_VLAN_CMD_EN_F : 0);
10204 memset(&vlan_cmd, 0, sizeof(vlan_cmd));
10205 vlan_cmd.op_to_vfn = cpu_to_be32(FW_CMD_OP_V(FW_ACL_VLAN_CMD) |
10209 FW_ACL_VLAN_CMD_PFN_V(adap->pf) |
10210 FW_ACL_VLAN_CMD_VFN_V(vf));
10211 vlan_cmd.en_to_len16 = cpu_to_be32(enable | FW_LEN16(vlan_cmd));
10212 /* Drop all packets that donot match vlan id */
10213 vlan_cmd.dropnovlan_fm = FW_ACL_VLAN_CMD_FM_F;
10215 vlan_cmd.nvlan = 1;
10216 vlan_cmd.vlanid[0] = cpu_to_be16(vlan);
10219 return t4_wr_mbox(adap, adap->mbox, &vlan_cmd, sizeof(vlan_cmd), NULL);