2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 #include <linux/module.h>
10 #include <linux/interrupt.h>
11 #include <linux/pci.h>
12 #include <linux/etherdevice.h>
18 #include "thunder_bgx.h"
20 #define DRV_NAME "thunder-nic"
21 #define DRV_VERSION "1.0"
27 u8 num_vf_en; /* No of VF enabled */
28 bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
29 void __iomem *reg_base; /* Register start address */
30 u8 num_sqs_en; /* Secondary qsets enabled */
31 u64 nicvf[MAX_NUM_VFS_SUPPORTED];
32 u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
33 u8 pqs_vf[MAX_NUM_VFS_SUPPORTED];
34 bool sqs_used[MAX_NUM_VFS_SUPPORTED];
35 struct pkind_cfg pkind;
36 #define NIC_SET_VF_LMAC_MAP(bgx, lmac) (((bgx & 0xF) << 4) | (lmac & 0xF))
37 #define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) ((map >> 4) & 0xF)
38 #define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) (map & 0xF)
39 u8 vf_lmac_map[MAX_LMAC];
40 struct delayed_work dwork;
41 struct workqueue_struct *check_link;
45 u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
46 u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
48 bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
53 struct msix_entry msix_entries[NIC_PF_MSIX_VECTORS];
54 bool irq_allocated[NIC_PF_MSIX_VECTORS];
57 static inline bool pass1_silicon(struct nicpf *nic)
59 return nic->pdev->revision < 8;
62 /* Supported devices */
63 static const struct pci_device_id nic_id_table[] = {
64 { PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, PCI_DEVICE_ID_THUNDER_NIC_PF) },
65 { 0, } /* end of table */
68 MODULE_AUTHOR("Sunil Goutham");
69 MODULE_DESCRIPTION("Cavium Thunder NIC Physical Function Driver");
70 MODULE_LICENSE("GPL v2");
71 MODULE_VERSION(DRV_VERSION);
72 MODULE_DEVICE_TABLE(pci, nic_id_table);
74 /* The Cavium ThunderX network controller can *only* be found in SoCs
75 * containing the ThunderX ARM64 CPU implementation. All accesses to the device
76 * registers on this platform are implicitly strongly ordered with respect
77 * to memory accesses. So writeq_relaxed() and readq_relaxed() are safe to use
78 * with no memory barriers in this driver. The readq()/writeq() functions add
79 * explicit ordering operation which in this case are redundant, and only
83 /* Register read/write APIs */
84 static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
86 writeq_relaxed(val, nic->reg_base + offset);
89 static u64 nic_reg_read(struct nicpf *nic, u64 offset)
91 return readq_relaxed(nic->reg_base + offset);
94 /* PF -> VF mailbox communication APIs */
95 static void nic_enable_mbx_intr(struct nicpf *nic)
97 /* Enable mailbox interrupt for all 128 VFs */
98 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S, ~0ull);
99 nic_reg_write(nic, NIC_PF_MAILBOX_ENA_W1S + sizeof(u64), ~0ull);
102 static void nic_clear_mbx_intr(struct nicpf *nic, int vf, int mbx_reg)
104 nic_reg_write(nic, NIC_PF_MAILBOX_INT + (mbx_reg << 3), BIT_ULL(vf));
107 static u64 nic_get_mbx_addr(int vf)
109 return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
112 /* Send a mailbox message to VF
113 * @vf: vf to which this message to be sent
114 * @mbx: Message to be sent
116 static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
118 void __iomem *mbx_addr = nic->reg_base + nic_get_mbx_addr(vf);
119 u64 *msg = (u64 *)mbx;
121 /* In first revision HW, mbox interrupt is triggerred
122 * when PF writes to MBOX(1), in next revisions when
123 * PF writes to MBOX(0)
125 if (pass1_silicon(nic)) {
126 /* see the comment for nic_reg_write()/nic_reg_read()
129 writeq_relaxed(msg[0], mbx_addr);
130 writeq_relaxed(msg[1], mbx_addr + 8);
132 writeq_relaxed(msg[1], mbx_addr + 8);
133 writeq_relaxed(msg[0], mbx_addr);
137 /* Responds to VF's READY message with VF's
138 * ID, node, MAC address e.t.c
139 * @vf: VF which sent READY message
141 static void nic_mbx_send_ready(struct nicpf *nic, int vf)
143 union nic_mbx mbx = {};
147 mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
148 mbx.nic_cfg.vf_id = vf;
150 mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
153 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
154 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
156 mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
158 ether_addr_copy((u8 *)&mbx.nic_cfg.mac_addr, mac);
160 mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
161 mbx.nic_cfg.node_id = nic->node;
163 mbx.nic_cfg.loopback_supported = vf < MAX_LMAC;
165 nic_send_msg_to_vf(nic, vf, &mbx);
168 /* ACKs VF's mailbox message
169 * @vf: VF to which ACK to be sent
171 static void nic_mbx_send_ack(struct nicpf *nic, int vf)
173 union nic_mbx mbx = {};
175 mbx.msg.msg = NIC_MBOX_MSG_ACK;
176 nic_send_msg_to_vf(nic, vf, &mbx);
179 /* NACKs VF's mailbox message that PF is not able to
180 * complete the action
181 * @vf: VF to which ACK to be sent
183 static void nic_mbx_send_nack(struct nicpf *nic, int vf)
185 union nic_mbx mbx = {};
187 mbx.msg.msg = NIC_MBOX_MSG_NACK;
188 nic_send_msg_to_vf(nic, vf, &mbx);
191 /* Flush all in flight receive packets to memory and
192 * bring down an active RQ
194 static int nic_rcv_queue_sw_sync(struct nicpf *nic)
198 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
199 /* Wait till sync cycle is finished */
201 if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
205 nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
207 dev_err(&nic->pdev->dev, "Receive queue software sync failed");
213 /* Get BGX Rx/Tx stats and respond to VF's request */
214 static void nic_get_bgx_stats(struct nicpf *nic, struct bgx_stats_msg *bgx)
217 union nic_mbx mbx = {};
219 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
220 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[bgx->vf_id]);
222 mbx.bgx_stats.msg = NIC_MBOX_MSG_BGX_STATS;
223 mbx.bgx_stats.vf_id = bgx->vf_id;
224 mbx.bgx_stats.rx = bgx->rx;
225 mbx.bgx_stats.idx = bgx->idx;
227 mbx.bgx_stats.stats = bgx_get_rx_stats(nic->node, bgx_idx,
230 mbx.bgx_stats.stats = bgx_get_tx_stats(nic->node, bgx_idx,
232 nic_send_msg_to_vf(nic, bgx->vf_id, &mbx);
235 /* Update hardware min/max frame size */
236 static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
238 if ((new_frs > NIC_HW_MAX_FRS) || (new_frs < NIC_HW_MIN_FRS)) {
239 dev_err(&nic->pdev->dev,
240 "Invalid MTU setting from VF%d rejected, should be between %d and %d\n",
241 vf, NIC_HW_MIN_FRS, NIC_HW_MAX_FRS);
245 if (new_frs <= nic->pkind.maxlen)
248 nic->pkind.maxlen = new_frs;
249 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *(u64 *)&nic->pkind);
253 /* Set minimum transmit packet size */
254 static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
259 /* Max value that can be set is 60 */
263 for (lmac = 0; lmac < (MAX_BGX_PER_CN88XX * MAX_LMAC_PER_BGX); lmac++) {
264 lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
265 lmac_cfg &= ~(0xF << 2);
266 lmac_cfg |= ((size / 4) << 2);
267 nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
271 /* Function to check number of LMACs present and set VF::LMAC mapping.
272 * Mapping will be used while initializing channels.
274 static void nic_set_lmac_vf_mapping(struct nicpf *nic)
276 unsigned bgx_map = bgx_get_map(nic->node);
277 int bgx, next_bgx_lmac = 0;
278 int lmac, lmac_cnt = 0;
283 for (bgx = 0; bgx < NIC_MAX_BGX; bgx++) {
284 if (!(bgx_map & (1 << bgx)))
286 lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
287 for (lmac = 0; lmac < lmac_cnt; lmac++)
288 nic->vf_lmac_map[next_bgx_lmac++] =
289 NIC_SET_VF_LMAC_MAP(bgx, lmac);
290 nic->num_vf_en += lmac_cnt;
292 /* Program LMAC credits */
293 lmac_credit = (1ull << 1); /* channel credit enable */
294 lmac_credit |= (0x1ff << 2); /* Max outstanding pkt count */
295 /* 48KB BGX Tx buffer size, each unit is of size 16bytes */
296 lmac_credit |= (((((48 * 1024) / lmac_cnt) -
297 NIC_HW_MAX_FRS) / 16) << 12);
298 lmac = bgx * MAX_LMAC_PER_BGX;
299 for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
301 NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
309 static void nic_init_hw(struct nicpf *nic)
314 /* Enable NIC HW block */
315 nic_reg_write(nic, NIC_PF_CFG, 0x3);
317 /* Enable backpressure */
318 nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
320 /* Disable TNS mode on both interfaces */
321 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG,
322 (NIC_TNS_BYPASS_MODE << 7) | BGX0_BLOCK);
323 nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8),
324 (NIC_TNS_BYPASS_MODE << 7) | BGX1_BLOCK);
325 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG,
326 (1ULL << 63) | BGX0_BLOCK);
327 nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
328 (1ULL << 63) | BGX1_BLOCK);
330 /* PKIND configuration */
331 nic->pkind.minlen = 0;
332 nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
333 nic->pkind.lenerr_en = 1;
334 nic->pkind.rx_hdr = 0;
335 nic->pkind.hdr_sl = 0;
337 for (i = 0; i < NIC_MAX_PKIND; i++)
338 nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3),
339 *(u64 *)&nic->pkind);
341 nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
344 nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
346 /* Enable VLAN ethertype matching and stripping */
347 nic_reg_write(nic, NIC_PF_RX_ETYPE_0_7,
348 (2 << 19) | (ETYPE_ALG_VLAN_STRIP << 16) | ETH_P_8021Q);
350 /* Check if HW expected value is higher (could be in future chips) */
351 cqm_cfg = nic_reg_read(nic, NIC_PF_CQM_CFG);
352 if (cqm_cfg < NICPF_CQM_MIN_DROP_LEVEL)
353 nic_reg_write(nic, NIC_PF_CQM_CFG, NICPF_CQM_MIN_DROP_LEVEL);
356 /* Channel parse index configuration */
357 static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
359 u32 vnic, bgx, lmac, chan;
360 u32 padd, cpi_count = 0;
361 u64 cpi_base, cpi, rssi_base, rssi;
365 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
366 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
368 chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
369 cpi_base = (lmac * NIC_MAX_CPI_PER_LMAC) + (bgx * NIC_CPI_PER_BGX);
370 rssi_base = (lmac * nic->rss_ind_tbl_size) + (bgx * NIC_RSSI_PER_BGX);
372 /* Rx channel configuration */
373 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
374 (1ull << 63) | (vnic << 0));
375 nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
376 ((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
378 if (cfg->cpi_alg == CPI_ALG_NONE)
380 else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
382 else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
384 else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
385 cpi_count = NIC_MAX_CPI_PER_LMAC;
387 /* RSS Qset, Qidx mapping */
390 for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
391 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
392 (qset << 3) | rq_idx);
398 for (; cpi < (cpi_base + cpi_count); cpi++) {
399 /* Determine port to channel adder */
400 if (cfg->cpi_alg != CPI_ALG_DIFF)
401 padd = cpi % cpi_count;
403 padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
405 /* Leave RSS_SIZE as '0' to disable RSS */
406 if (pass1_silicon(nic)) {
407 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
408 (vnic << 24) | (padd << 16) |
411 /* Set MPI_ALG to '0' to disable MCAM parsing */
412 nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
414 /* MPI index is same as CPI if MPI_ALG is not enabled */
415 nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
416 (vnic << 24) | (rssi_base + rssi));
419 if ((rssi + 1) >= cfg->rq_cnt)
422 if (cfg->cpi_alg == CPI_ALG_VLAN)
424 else if (cfg->cpi_alg == CPI_ALG_VLAN16)
425 rssi = ((cpi - cpi_base) & 0xe) >> 1;
426 else if (cfg->cpi_alg == CPI_ALG_DIFF)
427 rssi = ((cpi - cpi_base) & 0x38) >> 3;
429 nic->cpi_base[cfg->vf_id] = cpi_base;
430 nic->rssi_base[cfg->vf_id] = rssi_base;
433 /* Responsds to VF with its RSS indirection table size */
434 static void nic_send_rss_size(struct nicpf *nic, int vf)
436 union nic_mbx mbx = {};
441 mbx.rss_size.msg = NIC_MBOX_MSG_RSS_SIZE;
442 mbx.rss_size.ind_tbl_size = nic->rss_ind_tbl_size;
443 nic_send_msg_to_vf(nic, vf, &mbx);
446 /* Receive side scaling configuration
449 * - indir table i.e hash::RQ mapping
450 * - no of hash bits to consider
452 static void nic_config_rss(struct nicpf *nic, struct rss_cfg_msg *cfg)
455 u64 cpi_cfg, cpi_base, rssi_base, rssi;
458 rssi_base = nic->rssi_base[cfg->vf_id] + cfg->tbl_offset;
463 for (; rssi < (rssi_base + cfg->tbl_len); rssi++) {
464 u8 svf = cfg->ind_tbl[idx] >> 3;
467 qset = nic->vf_sqs[cfg->vf_id][svf - 1];
470 nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
471 (qset << 3) | (cfg->ind_tbl[idx] & 0x7));
475 cpi_base = nic->cpi_base[cfg->vf_id];
476 if (pass1_silicon(nic))
477 idx_addr = NIC_PF_CPI_0_2047_CFG;
479 idx_addr = NIC_PF_MPI_0_2047_CFG;
480 cpi_cfg = nic_reg_read(nic, idx_addr | (cpi_base << 3));
481 cpi_cfg &= ~(0xFULL << 20);
482 cpi_cfg |= (cfg->hash_bits << 20);
483 nic_reg_write(nic, idx_addr | (cpi_base << 3), cpi_cfg);
486 /* 4 level transmit side scheduler configutation
487 * for TNS bypass mode
489 * Sample configuration for SQ0
490 * VNIC0-SQ0 -> TL4(0) -> TL3[0] -> TL2[0] -> TL1[0] -> BGX0
491 * VNIC1-SQ0 -> TL4(8) -> TL3[2] -> TL2[0] -> TL1[0] -> BGX0
492 * VNIC2-SQ0 -> TL4(16) -> TL3[4] -> TL2[1] -> TL1[0] -> BGX0
493 * VNIC3-SQ0 -> TL4(24) -> TL3[6] -> TL2[1] -> TL1[0] -> BGX0
494 * VNIC4-SQ0 -> TL4(512) -> TL3[128] -> TL2[32] -> TL1[1] -> BGX1
495 * VNIC5-SQ0 -> TL4(520) -> TL3[130] -> TL2[32] -> TL1[1] -> BGX1
496 * VNIC6-SQ0 -> TL4(528) -> TL3[132] -> TL2[33] -> TL1[1] -> BGX1
497 * VNIC7-SQ0 -> TL4(536) -> TL3[134] -> TL2[33] -> TL1[1] -> BGX1
499 static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
500 struct sq_cfg_msg *sq)
505 u8 sq_idx = sq->sq_num;
509 pqs_vnic = nic->pqs_vf[vnic];
513 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
514 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
516 /* 24 bytes for FCS, IPG and preamble */
517 rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
519 tl4 = (lmac * NIC_TL4_PER_LMAC) + (bgx * NIC_TL4_PER_BGX);
524 tl3 = tl4 / (NIC_MAX_TL4 / NIC_MAX_TL3);
525 nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
526 ((u64)vnic << NIC_QS_ID_SHIFT) |
527 ((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
528 nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
529 ((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
531 nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
532 chan = (lmac * MAX_BGX_CHANS_PER_LMAC) + (bgx * NIC_CHANS_PER_INF);
533 nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
534 /* Enable backpressure on the channel */
535 nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
538 nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
539 nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
540 /* No priorities as of now */
541 nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
544 /* Send primary nicvf pointer to secondary QS's VF */
545 static void nic_send_pnicvf(struct nicpf *nic, int sqs)
547 union nic_mbx mbx = {};
549 mbx.nicvf.msg = NIC_MBOX_MSG_PNICVF_PTR;
550 mbx.nicvf.nicvf = nic->nicvf[nic->pqs_vf[sqs]];
551 nic_send_msg_to_vf(nic, sqs, &mbx);
554 /* Send SQS's nicvf pointer to primary QS's VF */
555 static void nic_send_snicvf(struct nicpf *nic, struct nicvf_ptr *nicvf)
557 union nic_mbx mbx = {};
558 int sqs_id = nic->vf_sqs[nicvf->vf_id][nicvf->sqs_id];
560 mbx.nicvf.msg = NIC_MBOX_MSG_SNICVF_PTR;
561 mbx.nicvf.sqs_id = nicvf->sqs_id;
562 mbx.nicvf.nicvf = nic->nicvf[sqs_id];
563 nic_send_msg_to_vf(nic, nicvf->vf_id, &mbx);
566 /* Find next available Qset that can be assigned as a
567 * secondary Qset to a VF.
569 static int nic_nxt_avail_sqs(struct nicpf *nic)
573 for (sqs = 0; sqs < nic->num_sqs_en; sqs++) {
574 if (!nic->sqs_used[sqs])
575 nic->sqs_used[sqs] = true;
578 return sqs + nic->num_vf_en;
583 /* Allocate additional Qsets for requested VF */
584 static void nic_alloc_sqs(struct nicpf *nic, struct sqs_alloc *sqs)
586 union nic_mbx mbx = {};
587 int idx, alloc_qs = 0;
590 if (!nic->num_sqs_en)
593 for (idx = 0; idx < sqs->qs_count; idx++) {
594 sqs_id = nic_nxt_avail_sqs(nic);
597 nic->vf_sqs[sqs->vf_id][idx] = sqs_id;
598 nic->pqs_vf[sqs_id] = sqs->vf_id;
603 mbx.sqs_alloc.msg = NIC_MBOX_MSG_ALLOC_SQS;
604 mbx.sqs_alloc.vf_id = sqs->vf_id;
605 mbx.sqs_alloc.qs_count = alloc_qs;
606 nic_send_msg_to_vf(nic, sqs->vf_id, &mbx);
609 static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
611 int bgx_idx, lmac_idx;
613 if (lbk->vf_id > MAX_LMAC)
616 bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
617 lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
619 bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
624 static void nic_enable_vf(struct nicpf *nic, int vf, bool enable)
628 nic->vf_enabled[vf] = enable;
630 if (vf >= nic->num_vf_en)
633 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
634 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
636 bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, enable);
639 /* Interrupt handler to handle mailbox messages from VFs */
640 static void nic_handle_mbx_intr(struct nicpf *nic, int vf)
642 union nic_mbx mbx = {};
651 nic->mbx_lock[vf] = true;
653 mbx_addr = nic_get_mbx_addr(vf);
654 mbx_data = (u64 *)&mbx;
656 for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
657 *mbx_data = nic_reg_read(nic, mbx_addr);
659 mbx_addr += sizeof(u64);
662 dev_dbg(&nic->pdev->dev, "%s: Mailbox msg %d from VF%d\n",
663 __func__, mbx.msg.msg, vf);
664 switch (mbx.msg.msg) {
665 case NIC_MBOX_MSG_READY:
666 nic_mbx_send_ready(nic, vf);
674 case NIC_MBOX_MSG_QS_CFG:
675 reg_addr = NIC_PF_QSET_0_127_CFG |
676 (mbx.qs.num << NIC_QS_ID_SHIFT);
678 /* Check if its a secondary Qset */
679 if (vf >= nic->num_vf_en) {
680 cfg = cfg & (~0x7FULL);
681 /* Assign this Qset to primary Qset's VF */
682 cfg |= nic->pqs_vf[vf];
684 nic_reg_write(nic, reg_addr, cfg);
686 case NIC_MBOX_MSG_RQ_CFG:
687 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
688 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
689 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
690 nic_reg_write(nic, reg_addr, mbx.rq.cfg);
692 case NIC_MBOX_MSG_RQ_BP_CFG:
693 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
694 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
695 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
696 nic_reg_write(nic, reg_addr, mbx.rq.cfg);
698 case NIC_MBOX_MSG_RQ_SW_SYNC:
699 ret = nic_rcv_queue_sw_sync(nic);
701 case NIC_MBOX_MSG_RQ_DROP_CFG:
702 reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
703 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
704 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
705 nic_reg_write(nic, reg_addr, mbx.rq.cfg);
707 case NIC_MBOX_MSG_SQ_CFG:
708 reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
709 (mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
710 (mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
711 nic_reg_write(nic, reg_addr, mbx.sq.cfg);
712 nic_tx_channel_cfg(nic, mbx.qs.num, &mbx.sq);
714 case NIC_MBOX_MSG_SET_MAC:
715 if (vf >= nic->num_vf_en)
717 lmac = mbx.mac.vf_id;
718 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
719 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
720 bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
722 case NIC_MBOX_MSG_SET_MAX_FRS:
723 ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
726 case NIC_MBOX_MSG_CPI_CFG:
727 nic_config_cpi(nic, &mbx.cpi_cfg);
729 case NIC_MBOX_MSG_RSS_SIZE:
730 nic_send_rss_size(nic, vf);
732 case NIC_MBOX_MSG_RSS_CFG:
733 case NIC_MBOX_MSG_RSS_CFG_CONT:
734 nic_config_rss(nic, &mbx.rss_cfg);
736 case NIC_MBOX_MSG_CFG_DONE:
737 /* Last message of VF config msg sequence */
738 nic_enable_vf(nic, vf, true);
740 case NIC_MBOX_MSG_SHUTDOWN:
741 /* First msg in VF teardown sequence */
742 if (vf >= nic->num_vf_en)
743 nic->sqs_used[vf - nic->num_vf_en] = false;
745 nic_enable_vf(nic, vf, false);
747 case NIC_MBOX_MSG_ALLOC_SQS:
748 nic_alloc_sqs(nic, &mbx.sqs_alloc);
750 case NIC_MBOX_MSG_NICVF_PTR:
751 nic->nicvf[vf] = mbx.nicvf.nicvf;
753 case NIC_MBOX_MSG_PNICVF_PTR:
754 nic_send_pnicvf(nic, vf);
756 case NIC_MBOX_MSG_SNICVF_PTR:
757 nic_send_snicvf(nic, &mbx.nicvf);
759 case NIC_MBOX_MSG_BGX_STATS:
760 nic_get_bgx_stats(nic, &mbx.bgx_stats);
762 case NIC_MBOX_MSG_LOOPBACK:
763 ret = nic_config_loopback(nic, &mbx.lbk);
766 dev_err(&nic->pdev->dev,
767 "Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
772 nic_mbx_send_ack(nic, vf);
773 else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
774 nic_mbx_send_nack(nic, vf);
776 nic->mbx_lock[vf] = false;
779 static void nic_mbx_intr_handler (struct nicpf *nic, int mbx)
782 u8 vf, vf_per_mbx_reg = 64;
784 intr = nic_reg_read(nic, NIC_PF_MAILBOX_INT + (mbx << 3));
785 dev_dbg(&nic->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
786 for (vf = 0; vf < vf_per_mbx_reg; vf++) {
787 if (intr & (1ULL << vf)) {
788 dev_dbg(&nic->pdev->dev, "Intr from VF %d\n",
789 vf + (mbx * vf_per_mbx_reg));
791 nic_handle_mbx_intr(nic, vf + (mbx * vf_per_mbx_reg));
792 nic_clear_mbx_intr(nic, vf, mbx);
797 static irqreturn_t nic_mbx0_intr_handler (int irq, void *nic_irq)
799 struct nicpf *nic = (struct nicpf *)nic_irq;
801 nic_mbx_intr_handler(nic, 0);
806 static irqreturn_t nic_mbx1_intr_handler (int irq, void *nic_irq)
808 struct nicpf *nic = (struct nicpf *)nic_irq;
810 nic_mbx_intr_handler(nic, 1);
815 static int nic_enable_msix(struct nicpf *nic)
819 nic->num_vec = NIC_PF_MSIX_VECTORS;
821 for (i = 0; i < nic->num_vec; i++)
822 nic->msix_entries[i].entry = i;
824 ret = pci_enable_msix(nic->pdev, nic->msix_entries, nic->num_vec);
826 dev_err(&nic->pdev->dev,
827 "Request for #%d msix vectors failed\n",
832 nic->msix_enabled = 1;
836 static void nic_disable_msix(struct nicpf *nic)
838 if (nic->msix_enabled) {
839 pci_disable_msix(nic->pdev);
840 nic->msix_enabled = 0;
845 static void nic_free_all_interrupts(struct nicpf *nic)
849 for (irq = 0; irq < nic->num_vec; irq++) {
850 if (nic->irq_allocated[irq])
851 free_irq(nic->msix_entries[irq].vector, nic);
852 nic->irq_allocated[irq] = false;
856 static int nic_register_interrupts(struct nicpf *nic)
861 ret = nic_enable_msix(nic);
865 /* Register mailbox interrupt handlers */
866 ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX0].vector,
867 nic_mbx0_intr_handler, 0, "NIC Mbox0", nic);
871 nic->irq_allocated[NIC_PF_INTR_ID_MBOX0] = true;
873 ret = request_irq(nic->msix_entries[NIC_PF_INTR_ID_MBOX1].vector,
874 nic_mbx1_intr_handler, 0, "NIC Mbox1", nic);
878 nic->irq_allocated[NIC_PF_INTR_ID_MBOX1] = true;
880 /* Enable mailbox interrupt */
881 nic_enable_mbx_intr(nic);
885 dev_err(&nic->pdev->dev, "Request irq failed\n");
886 nic_free_all_interrupts(nic);
890 static void nic_unregister_interrupts(struct nicpf *nic)
892 nic_free_all_interrupts(nic);
893 nic_disable_msix(nic);
896 static int nic_num_sqs_en(struct nicpf *nic, int vf_en)
898 int pos, sqs_per_vf = MAX_SQS_PER_VF_SINGLE_NODE;
901 /* Check if its a multi-node environment */
903 sqs_per_vf = MAX_SQS_PER_VF;
905 pos = pci_find_ext_capability(nic->pdev, PCI_EXT_CAP_ID_SRIOV);
906 pci_read_config_word(nic->pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf);
907 return min(total_vf - vf_en, vf_en * sqs_per_vf);
910 static int nic_sriov_init(struct pci_dev *pdev, struct nicpf *nic)
917 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
919 dev_err(&pdev->dev, "SRIOV capability is not found in PCIe config space\n");
923 pci_read_config_word(pdev, (pos + PCI_SRIOV_TOTAL_VF), &total_vf_cnt);
924 if (total_vf_cnt < nic->num_vf_en)
925 nic->num_vf_en = total_vf_cnt;
930 vf_en = nic->num_vf_en;
931 nic->num_sqs_en = nic_num_sqs_en(nic, nic->num_vf_en);
932 vf_en += nic->num_sqs_en;
934 err = pci_enable_sriov(pdev, vf_en);
936 dev_err(&pdev->dev, "SRIOV enable failed, num VF is %d\n",
942 dev_info(&pdev->dev, "SRIOV enabled, number of VF available %d\n",
945 nic->flags |= NIC_SRIOV_ENABLED;
949 /* Poll for BGX LMAC link status and update corresponding VF
950 * if there is a change, valid only if internal L2 switch
951 * is not present otherwise VF link is always treated as up
953 static void nic_poll_for_link(struct work_struct *work)
955 union nic_mbx mbx = {};
957 struct bgx_link_status link;
960 nic = container_of(work, struct nicpf, dwork.work);
962 mbx.link_status.msg = NIC_MBOX_MSG_BGX_LINK_CHANGE;
964 for (vf = 0; vf < nic->num_vf_en; vf++) {
965 /* Poll only if VF is UP */
966 if (!nic->vf_enabled[vf])
969 /* Get BGX, LMAC indices for the VF */
970 bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
971 lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
972 /* Get interface link status */
973 bgx_get_lmac_link_state(nic->node, bgx, lmac, &link);
975 /* Inform VF only if link status changed */
976 if (nic->link[vf] == link.link_up)
979 if (!nic->mbx_lock[vf]) {
980 nic->link[vf] = link.link_up;
981 nic->duplex[vf] = link.duplex;
982 nic->speed[vf] = link.speed;
984 /* Send a mbox message to VF with current link status */
985 mbx.link_status.link_up = link.link_up;
986 mbx.link_status.duplex = link.duplex;
987 mbx.link_status.speed = link.speed;
988 nic_send_msg_to_vf(nic, vf, &mbx);
991 queue_delayed_work(nic->check_link, &nic->dwork, HZ * 2);
994 static int nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
996 struct device *dev = &pdev->dev;
1000 BUILD_BUG_ON(sizeof(union nic_mbx) > 16);
1002 nic = devm_kzalloc(dev, sizeof(*nic), GFP_KERNEL);
1006 pci_set_drvdata(pdev, nic);
1010 err = pci_enable_device(pdev);
1012 dev_err(dev, "Failed to enable PCI device\n");
1013 pci_set_drvdata(pdev, NULL);
1017 err = pci_request_regions(pdev, DRV_NAME);
1019 dev_err(dev, "PCI request regions failed 0x%x\n", err);
1020 goto err_disable_device;
1023 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(48));
1025 dev_err(dev, "Unable to get usable DMA configuration\n");
1026 goto err_release_regions;
1029 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(48));
1031 dev_err(dev, "Unable to get 48-bit DMA for consistent allocations\n");
1032 goto err_release_regions;
1035 /* MAP PF's configuration registers */
1036 nic->reg_base = pcim_iomap(pdev, PCI_CFG_REG_BAR_NUM, 0);
1037 if (!nic->reg_base) {
1038 dev_err(dev, "Cannot map config register space, aborting\n");
1040 goto err_release_regions;
1043 nic->node = nic_get_node_id(pdev);
1045 nic_set_lmac_vf_mapping(nic);
1047 /* Initialize hardware */
1050 /* Set RSS TBL size for each VF */
1051 nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
1053 /* Register interrupts */
1054 err = nic_register_interrupts(nic);
1056 goto err_release_regions;
1058 /* Configure SRIOV */
1059 err = nic_sriov_init(pdev, nic);
1061 goto err_unregister_interrupts;
1063 /* Register a physical link status poll fn() */
1064 nic->check_link = alloc_workqueue("check_link_status",
1065 WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
1066 if (!nic->check_link) {
1068 goto err_disable_sriov;
1071 INIT_DELAYED_WORK(&nic->dwork, nic_poll_for_link);
1072 queue_delayed_work(nic->check_link, &nic->dwork, 0);
1077 if (nic->flags & NIC_SRIOV_ENABLED)
1078 pci_disable_sriov(pdev);
1079 err_unregister_interrupts:
1080 nic_unregister_interrupts(nic);
1081 err_release_regions:
1082 pci_release_regions(pdev);
1084 pci_disable_device(pdev);
1085 pci_set_drvdata(pdev, NULL);
1089 static void nic_remove(struct pci_dev *pdev)
1091 struct nicpf *nic = pci_get_drvdata(pdev);
1096 if (nic->flags & NIC_SRIOV_ENABLED)
1097 pci_disable_sriov(pdev);
1099 if (nic->check_link) {
1100 /* Destroy work Queue */
1101 cancel_delayed_work_sync(&nic->dwork);
1102 destroy_workqueue(nic->check_link);
1105 nic_unregister_interrupts(nic);
1106 pci_release_regions(pdev);
1107 pci_disable_device(pdev);
1108 pci_set_drvdata(pdev, NULL);
1111 static struct pci_driver nic_driver = {
1113 .id_table = nic_id_table,
1115 .remove = nic_remove,
1118 static int __init nic_init_module(void)
1120 pr_info("%s, ver %s\n", DRV_NAME, DRV_VERSION);
1122 return pci_register_driver(&nic_driver);
1125 static void __exit nic_cleanup_module(void)
1127 pci_unregister_driver(&nic_driver);
1130 module_init(nic_init_module);
1131 module_exit(nic_cleanup_module);