GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / net / ethernet / marvell / octeon_ep / octep_cnxk_pf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell Octeon EP (EndPoint) Ethernet Driver
3  *
4  * Copyright (C) 2020 Marvell.
5  *
6  */
7
8 #include <linux/pci.h>
9 #include <linux/netdevice.h>
10 #include <linux/etherdevice.h>
11
12 #include "octep_config.h"
13 #include "octep_main.h"
14 #include "octep_regs_cnxk_pf.h"
15
16 /* We will support 128 pf's in control mbox */
17 #define CTRL_MBOX_MAX_PF        128
18 #define CTRL_MBOX_SZ            ((size_t)(0x400000 / CTRL_MBOX_MAX_PF))
19
20 /* Names of Hardware non-queue generic interrupts */
21 static char *cnxk_non_ioq_msix_names[] = {
22         "epf_ire_rint",
23         "epf_ore_rint",
24         "epf_vfire_rint",
25         "epf_rsvd0",
26         "epf_vfore_rint",
27         "epf_rsvd1",
28         "epf_mbox_rint",
29         "epf_rsvd2_0",
30         "epf_rsvd2_1",
31         "epf_dma_rint",
32         "epf_dma_vf_rint",
33         "epf_rsvd3",
34         "epf_pp_vf_rint",
35         "epf_rsvd3",
36         "epf_misc_rint",
37         "epf_rsvd5",
38         /* Next 16 are for OEI_RINT */
39         "epf_oei_rint0",
40         "epf_oei_rint1",
41         "epf_oei_rint2",
42         "epf_oei_rint3",
43         "epf_oei_rint4",
44         "epf_oei_rint5",
45         "epf_oei_rint6",
46         "epf_oei_rint7",
47         "epf_oei_rint8",
48         "epf_oei_rint9",
49         "epf_oei_rint10",
50         "epf_oei_rint11",
51         "epf_oei_rint12",
52         "epf_oei_rint13",
53         "epf_oei_rint14",
54         "epf_oei_rint15",
55         /* IOQ interrupt */
56         "octeon_ep"
57 };
58
59 /* Dump useful hardware CSRs for debug purpose */
60 static void cnxk_dump_regs(struct octep_device *oct, int qno)
61 {
62         struct device *dev = &oct->pdev->dev;
63
64         dev_info(dev, "IQ-%d register dump\n", qno);
65         dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n",
66                  qno, CNXK_SDP_R_IN_INSTR_DBELL(qno),
67                  octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(qno)));
68         dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n",
69                  qno, CNXK_SDP_R_IN_CONTROL(qno),
70                  octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(qno)));
71         dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n",
72                  qno, CNXK_SDP_R_IN_ENABLE(qno),
73                  octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(qno)));
74         dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n",
75                  qno, CNXK_SDP_R_IN_INSTR_BADDR(qno),
76                  octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(qno)));
77         dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n",
78                  qno, CNXK_SDP_R_IN_INSTR_RSIZE(qno),
79                  octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(qno)));
80         dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n",
81                  qno, CNXK_SDP_R_IN_CNTS(qno),
82                  octep_read_csr64(oct, CNXK_SDP_R_IN_CNTS(qno)));
83         dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n",
84                  qno, CNXK_SDP_R_IN_INT_LEVELS(qno),
85                  octep_read_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(qno)));
86         dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n",
87                  qno, CNXK_SDP_R_IN_PKT_CNT(qno),
88                  octep_read_csr64(oct, CNXK_SDP_R_IN_PKT_CNT(qno)));
89         dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n",
90                  qno, CNXK_SDP_R_IN_BYTE_CNT(qno),
91                  octep_read_csr64(oct, CNXK_SDP_R_IN_BYTE_CNT(qno)));
92
93         dev_info(dev, "OQ-%d register dump\n", qno);
94         dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n",
95                  qno, CNXK_SDP_R_OUT_SLIST_DBELL(qno),
96                  octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(qno)));
97         dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n",
98                  qno, CNXK_SDP_R_OUT_CONTROL(qno),
99                  octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(qno)));
100         dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n",
101                  qno, CNXK_SDP_R_OUT_ENABLE(qno),
102                  octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(qno)));
103         dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n",
104                  qno, CNXK_SDP_R_OUT_SLIST_BADDR(qno),
105                  octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(qno)));
106         dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n",
107                  qno, CNXK_SDP_R_OUT_SLIST_RSIZE(qno),
108                  octep_read_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(qno)));
109         dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n",
110                  qno, CNXK_SDP_R_OUT_CNTS(qno),
111                  octep_read_csr64(oct, CNXK_SDP_R_OUT_CNTS(qno)));
112         dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n",
113                  qno, CNXK_SDP_R_OUT_INT_LEVELS(qno),
114                  octep_read_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(qno)));
115         dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n",
116                  qno, CNXK_SDP_R_OUT_PKT_CNT(qno),
117                  octep_read_csr64(oct, CNXK_SDP_R_OUT_PKT_CNT(qno)));
118         dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n",
119                  qno, CNXK_SDP_R_OUT_BYTE_CNT(qno),
120                  octep_read_csr64(oct, CNXK_SDP_R_OUT_BYTE_CNT(qno)));
121         dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n",
122                  qno, CNXK_SDP_R_ERR_TYPE(qno),
123                  octep_read_csr64(oct, CNXK_SDP_R_ERR_TYPE(qno)));
124 }
125
126 /* Reset Hardware Tx queue */
127 static int cnxk_reset_iq(struct octep_device *oct, int q_no)
128 {
129         struct octep_config *conf = oct->conf;
130         u64 val = 0ULL;
131
132         dev_dbg(&oct->pdev->dev, "Reset PF IQ-%d\n", q_no);
133
134         /* Get absolute queue number */
135         q_no += conf->pf_ring_cfg.srn;
136
137         /* Disable the Tx/Instruction Ring */
138         octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(q_no), val);
139
140         /* clear the Instruction Ring packet/byte counts and doorbell CSRs */
141         octep_write_csr64(oct, CNXK_SDP_R_IN_CNTS(q_no), val);
142         octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(q_no), val);
143         octep_write_csr64(oct, CNXK_SDP_R_IN_PKT_CNT(q_no), val);
144         octep_write_csr64(oct, CNXK_SDP_R_IN_BYTE_CNT(q_no), val);
145         octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(q_no), val);
146         octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(q_no), val);
147
148         val = 0xFFFFFFFF;
149         octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(q_no), val);
150
151         return 0;
152 }
153
154 /* Reset Hardware Rx queue */
155 static void cnxk_reset_oq(struct octep_device *oct, int q_no)
156 {
157         u64 val = 0ULL;
158
159         q_no += CFG_GET_PORTS_PF_SRN(oct->conf);
160
161         /* Disable Output (Rx) Ring */
162         octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(q_no), val);
163         octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(q_no), val);
164         octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(q_no), val);
165         octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(q_no), val);
166
167         /* Clear count CSRs */
168         val = octep_read_csr(oct, CNXK_SDP_R_OUT_CNTS(q_no));
169         octep_write_csr(oct, CNXK_SDP_R_OUT_CNTS(q_no), val);
170
171         octep_write_csr64(oct, CNXK_SDP_R_OUT_PKT_CNT(q_no), 0xFFFFFFFFFULL);
172         octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(q_no), 0xFFFFFFFF);
173 }
174
175 /* Reset all hardware Tx/Rx queues */
176 static void octep_reset_io_queues_cnxk_pf(struct octep_device *oct)
177 {
178         struct pci_dev *pdev = oct->pdev;
179         int q;
180
181         dev_dbg(&pdev->dev, "Reset OCTEP_CNXK PF IO Queues\n");
182
183         for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
184                 cnxk_reset_iq(oct, q);
185                 cnxk_reset_oq(oct, q);
186         }
187 }
188
189 /* Initialize windowed addresses to access some hardware registers */
190 static void octep_setup_pci_window_regs_cnxk_pf(struct octep_device *oct)
191 {
192         u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
193
194         oct->pci_win_regs.pci_win_wr_addr = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_WR_ADDR64);
195         oct->pci_win_regs.pci_win_rd_addr = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_RD_ADDR64);
196         oct->pci_win_regs.pci_win_wr_data = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_WR_DATA64);
197         oct->pci_win_regs.pci_win_rd_data = (u8 __iomem *)(bar0_pciaddr + CNXK_SDP_WIN_RD_DATA64);
198 }
199
200 /* Configure Hardware mapping: inform hardware which rings belong to PF. */
201 static void octep_configure_ring_mapping_cnxk_pf(struct octep_device *oct)
202 {
203         struct octep_config *conf = oct->conf;
204         struct pci_dev *pdev = oct->pdev;
205         u64 pf_srn = CFG_GET_PORTS_PF_SRN(oct->conf);
206         int q;
207
208         for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(conf); q++) {
209                 u64 regval = 0;
210
211                 if (oct->pcie_port)
212                         regval = 8 << CNXK_SDP_FUNC_SEL_EPF_BIT_POS;
213
214                 octep_write_csr64(oct, CNXK_SDP_EPVF_RING(pf_srn + q), regval);
215
216                 regval = octep_read_csr64(oct, CNXK_SDP_EPVF_RING(pf_srn + q));
217                 dev_dbg(&pdev->dev, "Write SDP_EPVF_RING[0x%llx] = 0x%llx\n",
218                         CNXK_SDP_EPVF_RING(pf_srn + q), regval);
219         }
220 }
221
222 /* Initialize configuration limits and initial active config */
223 static void octep_init_config_cnxk_pf(struct octep_device *oct)
224 {
225         struct octep_config *conf = oct->conf;
226         struct pci_dev *pdev = oct->pdev;
227         u8 link = 0;
228         u64 val;
229         int pos;
230
231         /* Read ring configuration:
232          * PF ring count, number of VFs and rings per VF supported
233          */
234         val = octep_read_csr64(oct, CNXK_SDP_EPF_RINFO);
235         dev_info(&pdev->dev, "SDP_EPF_RINFO[0x%x]:0x%llx\n", CNXK_SDP_EPF_RINFO, val);
236         conf->sriov_cfg.max_rings_per_vf = CNXK_SDP_EPF_RINFO_RPVF(val);
237         conf->sriov_cfg.active_rings_per_vf = conf->sriov_cfg.max_rings_per_vf;
238         conf->sriov_cfg.max_vfs = CNXK_SDP_EPF_RINFO_NVFS(val);
239         conf->sriov_cfg.active_vfs = conf->sriov_cfg.max_vfs;
240         conf->sriov_cfg.vf_srn = CNXK_SDP_EPF_RINFO_SRN(val);
241
242         val = octep_read_csr64(oct, CNXK_SDP_MAC_PF_RING_CTL(oct->pcie_port));
243         dev_info(&pdev->dev, "SDP_MAC_PF_RING_CTL[%d]:0x%llx\n", oct->pcie_port, val);
244         conf->pf_ring_cfg.srn =  CNXK_SDP_MAC_PF_RING_CTL_SRN(val);
245         conf->pf_ring_cfg.max_io_rings = CNXK_SDP_MAC_PF_RING_CTL_RPPF(val);
246         conf->pf_ring_cfg.active_io_rings = conf->pf_ring_cfg.max_io_rings;
247         dev_info(&pdev->dev, "pf_srn=%u rpvf=%u nvfs=%u rppf=%u\n",
248                  conf->pf_ring_cfg.srn, conf->sriov_cfg.active_rings_per_vf,
249                  conf->sriov_cfg.active_vfs, conf->pf_ring_cfg.active_io_rings);
250
251         conf->iq.num_descs = OCTEP_IQ_MAX_DESCRIPTORS;
252         conf->iq.instr_type = OCTEP_64BYTE_INSTR;
253         conf->iq.db_min = OCTEP_DB_MIN;
254         conf->iq.intr_threshold = OCTEP_IQ_INTR_THRESHOLD;
255
256         conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS;
257         conf->oq.buf_size = OCTEP_OQ_BUF_SIZE;
258         conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD;
259         conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD;
260         conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD;
261         conf->oq.wmark = OCTEP_OQ_WMARK_MIN;
262
263         conf->msix_cfg.non_ioq_msix = CNXK_NUM_NON_IOQ_INTR;
264         conf->msix_cfg.ioq_msix = conf->pf_ring_cfg.active_io_rings;
265         conf->msix_cfg.non_ioq_msix_names = cnxk_non_ioq_msix_names;
266
267         pos = pci_find_ext_capability(oct->pdev, PCI_EXT_CAP_ID_SRIOV);
268         if (pos) {
269                 pci_read_config_byte(oct->pdev,
270                                      pos + PCI_SRIOV_FUNC_LINK,
271                                      &link);
272                 link = PCI_DEVFN(PCI_SLOT(oct->pdev->devfn), link);
273         }
274         conf->ctrl_mbox_cfg.barmem_addr = (void __iomem *)oct->mmio[2].hw_addr +
275                                            CNXK_PEM_BAR4_INDEX_OFFSET +
276                                            (link * CTRL_MBOX_SZ);
277
278         conf->fw_info.hb_interval = OCTEP_DEFAULT_FW_HB_INTERVAL;
279         conf->fw_info.hb_miss_count = OCTEP_DEFAULT_FW_HB_MISS_COUNT;
280 }
281
282 /* Setup registers for a hardware Tx Queue  */
283 static void octep_setup_iq_regs_cnxk_pf(struct octep_device *oct, int iq_no)
284 {
285         struct octep_iq *iq = oct->iq[iq_no];
286         u32 reset_instr_cnt;
287         u64 reg_val;
288
289         iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
290         reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no));
291
292         /* wait for IDLE to set to 1 */
293         if (!(reg_val & CNXK_R_IN_CTL_IDLE)) {
294                 do {
295                         reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no));
296                 } while (!(reg_val & CNXK_R_IN_CTL_IDLE));
297         }
298
299         reg_val |= CNXK_R_IN_CTL_RDSIZE;
300         reg_val |= CNXK_R_IN_CTL_IS_64B;
301         reg_val |= CNXK_R_IN_CTL_ESR;
302         octep_write_csr64(oct, CNXK_SDP_R_IN_CONTROL(iq_no), reg_val);
303
304         /* Write the start of the input queue's ring and its size  */
305         octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_BADDR(iq_no),
306                           iq->desc_ring_dma);
307         octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_RSIZE(iq_no),
308                           iq->max_count);
309
310         /* Remember the doorbell & instruction count register addr
311          * for this queue
312          */
313         iq->doorbell_reg = oct->mmio[0].hw_addr +
314                            CNXK_SDP_R_IN_INSTR_DBELL(iq_no);
315         iq->inst_cnt_reg = oct->mmio[0].hw_addr +
316                            CNXK_SDP_R_IN_CNTS(iq_no);
317         iq->intr_lvl_reg = oct->mmio[0].hw_addr +
318                            CNXK_SDP_R_IN_INT_LEVELS(iq_no);
319
320         /* Store the current instruction counter (used in flush_iq calculation) */
321         reset_instr_cnt = readl(iq->inst_cnt_reg);
322         writel(reset_instr_cnt, iq->inst_cnt_reg);
323
324         /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */
325         reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & 0xffffffff;
326         octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
327 }
328
329 /* Setup registers for a hardware Rx Queue  */
330 static void octep_setup_oq_regs_cnxk_pf(struct octep_device *oct, int oq_no)
331 {
332         u64 reg_val;
333         u64 oq_ctl = 0ULL;
334         u32 time_threshold = 0;
335         struct octep_oq *oq = oct->oq[oq_no];
336
337         oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
338         reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
339
340         /* wait for IDLE to set to 1 */
341         if (!(reg_val & CNXK_R_OUT_CTL_IDLE)) {
342                 do {
343                         reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
344                 } while (!(reg_val & CNXK_R_OUT_CTL_IDLE));
345         }
346
347         reg_val &= ~(CNXK_R_OUT_CTL_IMODE);
348         reg_val &= ~(CNXK_R_OUT_CTL_ROR_P);
349         reg_val &= ~(CNXK_R_OUT_CTL_NSR_P);
350         reg_val &= ~(CNXK_R_OUT_CTL_ROR_I);
351         reg_val &= ~(CNXK_R_OUT_CTL_NSR_I);
352         reg_val &= ~(CNXK_R_OUT_CTL_ES_I);
353         reg_val &= ~(CNXK_R_OUT_CTL_ROR_D);
354         reg_val &= ~(CNXK_R_OUT_CTL_NSR_D);
355         reg_val &= ~(CNXK_R_OUT_CTL_ES_D);
356         reg_val |= (CNXK_R_OUT_CTL_ES_P);
357
358         octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), reg_val);
359         octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_BADDR(oq_no),
360                           oq->desc_ring_dma);
361         octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_RSIZE(oq_no),
362                           oq->max_count);
363
364         oq_ctl = octep_read_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no));
365
366         /* Clear the ISIZE and BSIZE (22-0) */
367         oq_ctl &= ~0x7fffffULL;
368
369         /* Populate the BSIZE (15-0) */
370         oq_ctl |= (oq->buffer_size & 0xffff);
371         octep_write_csr64(oct, CNXK_SDP_R_OUT_CONTROL(oq_no), oq_ctl);
372
373         /* Get the mapped address of the pkt_sent and pkts_credit regs */
374         oq->pkts_sent_reg = oct->mmio[0].hw_addr + CNXK_SDP_R_OUT_CNTS(oq_no);
375         oq->pkts_credit_reg = oct->mmio[0].hw_addr +
376                               CNXK_SDP_R_OUT_SLIST_DBELL(oq_no);
377
378         time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf);
379         reg_val = ((u64)time_threshold << 32) |
380                   CFG_GET_OQ_INTR_PKT(oct->conf);
381         octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
382
383         /* set watermark for backpressure */
384         reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no));
385         reg_val &= ~0xFFFFFFFFULL;
386         reg_val |= CFG_GET_OQ_WMARK(oct->conf);
387         octep_write_csr64(oct, CNXK_SDP_R_OUT_WMARK(oq_no), reg_val);
388 }
389
390 /* Setup registers for a PF mailbox */
391 static void octep_setup_mbox_regs_cnxk_pf(struct octep_device *oct, int q_no)
392 {
393         struct octep_mbox *mbox = oct->mbox[q_no];
394
395         /* PF to VF DATA reg. PF writes into this reg */
396         mbox->pf_vf_data_reg = oct->mmio[0].hw_addr + CNXK_SDP_MBOX_PF_VF_DATA(q_no);
397
398         /* VF to PF DATA reg. PF reads from this reg */
399         mbox->vf_pf_data_reg = oct->mmio[0].hw_addr + CNXK_SDP_MBOX_VF_PF_DATA(q_no);
400 }
401
402 static void octep_poll_pfvf_mailbox_cnxk_pf(struct octep_device *oct)
403 {
404         u32 vf, active_vfs, active_rings_per_vf, vf_mbox_queue;
405         u64 reg0;
406
407         reg0 = octep_read_csr64(oct, CNXK_SDP_EPF_MBOX_RINT(0));
408         if (reg0) {
409                 active_vfs = CFG_GET_ACTIVE_VFS(oct->conf);
410                 active_rings_per_vf = CFG_GET_ACTIVE_RPVF(oct->conf);
411                 for (vf = 0; vf < active_vfs; vf++) {
412                         vf_mbox_queue = vf * active_rings_per_vf;
413                         if (!(reg0 & (0x1UL << vf_mbox_queue)))
414                                 continue;
415
416                         if (!oct->mbox[vf_mbox_queue]) {
417                                 dev_err(&oct->pdev->dev, "bad mbox vf %d\n", vf);
418                                 continue;
419                         }
420                         schedule_work(&oct->mbox[vf_mbox_queue]->wk.work);
421                 }
422                 if (reg0)
423                         octep_write_csr64(oct, CNXK_SDP_EPF_MBOX_RINT(0), reg0);
424         }
425 }
426
427 static irqreturn_t octep_pfvf_mbox_intr_handler_cnxk_pf(void *dev)
428 {
429         struct octep_device *oct = (struct octep_device *)dev;
430
431         octep_poll_pfvf_mailbox_cnxk_pf(oct);
432         return IRQ_HANDLED;
433 }
434
435 /* Poll OEI events like heartbeat */
436 static void octep_poll_oei_cnxk_pf(struct octep_device *oct)
437 {
438         u64 reg0;
439
440         /* Check for OEI INTR */
441         reg0 = octep_read_csr64(oct, CNXK_SDP_EPF_OEI_RINT);
442         if (reg0) {
443                 octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT, reg0);
444                 if (reg0 & CNXK_SDP_EPF_OEI_RINT_DATA_BIT_MBOX)
445                         queue_work(octep_wq, &oct->ctrl_mbox_task);
446                 if (reg0 & CNXK_SDP_EPF_OEI_RINT_DATA_BIT_HBEAT)
447                         atomic_set(&oct->hb_miss_cnt, 0);
448         }
449 }
450
451 /* OEI interrupt handler */
452 static irqreturn_t octep_oei_intr_handler_cnxk_pf(void *dev)
453 {
454         struct octep_device *oct = (struct octep_device *)dev;
455
456         octep_poll_oei_cnxk_pf(oct);
457         return IRQ_HANDLED;
458 }
459
460 /* Process non-ioq interrupts required to keep pf interface running.
461  * OEI_RINT is needed for control mailbox
462  * MBOX_RINT is needed for pfvf mailbox
463  */
464 static void octep_poll_non_ioq_interrupts_cnxk_pf(struct octep_device *oct)
465 {
466         octep_poll_pfvf_mailbox_cnxk_pf(oct);
467         octep_poll_oei_cnxk_pf(oct);
468 }
469
470 /* Interrupt handler for input ring error interrupts. */
471 static irqreturn_t octep_ire_intr_handler_cnxk_pf(void *dev)
472 {
473         struct octep_device *oct = (struct octep_device *)dev;
474         struct pci_dev *pdev = oct->pdev;
475         u64 reg_val = 0;
476         int i = 0;
477
478         /* Check for IRERR INTR */
479         reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_IRERR_RINT);
480         if (reg_val) {
481                 dev_info(&pdev->dev,
482                          "received IRERR_RINT intr: 0x%llx\n", reg_val);
483                 octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT, reg_val);
484
485                 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
486                         reg_val = octep_read_csr64(oct,
487                                                    CNXK_SDP_R_ERR_TYPE(i));
488                         if (reg_val) {
489                                 dev_info(&pdev->dev,
490                                          "Received err type on IQ-%d: 0x%llx\n",
491                                          i, reg_val);
492                                 octep_write_csr64(oct, CNXK_SDP_R_ERR_TYPE(i),
493                                                   reg_val);
494                         }
495                 }
496         }
497         return IRQ_HANDLED;
498 }
499
500 /* Interrupt handler for output ring error interrupts. */
501 static irqreturn_t octep_ore_intr_handler_cnxk_pf(void *dev)
502 {
503         struct octep_device *oct = (struct octep_device *)dev;
504         struct pci_dev *pdev = oct->pdev;
505         u64 reg_val = 0;
506         int i = 0;
507
508         /* Check for ORERR INTR */
509         reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_ORERR_RINT);
510         if (reg_val) {
511                 dev_info(&pdev->dev,
512                          "Received ORERR_RINT intr: 0x%llx\n", reg_val);
513                 octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT, reg_val);
514                 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) {
515                         reg_val = octep_read_csr64(oct, CNXK_SDP_R_ERR_TYPE(i));
516                         if (reg_val) {
517                                 dev_info(&pdev->dev,
518                                          "Received err type on OQ-%d: 0x%llx\n",
519                                          i, reg_val);
520                                 octep_write_csr64(oct, CNXK_SDP_R_ERR_TYPE(i),
521                                                   reg_val);
522                         }
523                 }
524         }
525         return IRQ_HANDLED;
526 }
527
528 /* Interrupt handler for vf input ring error interrupts. */
529 static irqreturn_t octep_vfire_intr_handler_cnxk_pf(void *dev)
530 {
531         struct octep_device *oct = (struct octep_device *)dev;
532         struct pci_dev *pdev = oct->pdev;
533         u64 reg_val = 0;
534
535         /* Check for VFIRE INTR */
536         reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT(0));
537         if (reg_val) {
538                 dev_info(&pdev->dev,
539                          "Received VFIRE_RINT intr: 0x%llx\n", reg_val);
540                 octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT(0), reg_val);
541         }
542         return IRQ_HANDLED;
543 }
544
545 /* Interrupt handler for vf output ring error interrupts. */
546 static irqreturn_t octep_vfore_intr_handler_cnxk_pf(void *dev)
547 {
548         struct octep_device *oct = (struct octep_device *)dev;
549         struct pci_dev *pdev = oct->pdev;
550         u64 reg_val = 0;
551
552         /* Check for VFORE INTR */
553         reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_VFORE_RINT(0));
554         if (reg_val) {
555                 dev_info(&pdev->dev,
556                          "Received VFORE_RINT intr: 0x%llx\n", reg_val);
557                 octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT(0), reg_val);
558         }
559         return IRQ_HANDLED;
560 }
561
562 /* Interrupt handler for dpi dma related interrupts. */
563 static irqreturn_t octep_dma_intr_handler_cnxk_pf(void *dev)
564 {
565         struct octep_device *oct = (struct octep_device *)dev;
566         u64 reg_val = 0;
567
568         /* Check for DMA INTR */
569         reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_DMA_RINT);
570         if (reg_val)
571                 octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT, reg_val);
572
573         return IRQ_HANDLED;
574 }
575
576 /* Interrupt handler for dpi dma transaction error interrupts for VFs  */
577 static irqreturn_t octep_dma_vf_intr_handler_cnxk_pf(void *dev)
578 {
579         struct octep_device *oct = (struct octep_device *)dev;
580         struct pci_dev *pdev = oct->pdev;
581         u64 reg_val = 0;
582
583         /* Check for DMA VF INTR */
584         reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT(0));
585         if (reg_val) {
586                 dev_info(&pdev->dev,
587                          "Received DMA_VF_RINT intr: 0x%llx\n", reg_val);
588                 octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT(0), reg_val);
589         }
590         return IRQ_HANDLED;
591 }
592
593 /* Interrupt handler for pp transaction error interrupts for VFs  */
594 static irqreturn_t octep_pp_vf_intr_handler_cnxk_pf(void *dev)
595 {
596         struct octep_device *oct = (struct octep_device *)dev;
597         struct pci_dev *pdev = oct->pdev;
598         u64 reg_val = 0;
599
600         /* Check for PPVF INTR */
601         reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT(0));
602         if (reg_val) {
603                 dev_info(&pdev->dev,
604                          "Received PP_VF_RINT intr: 0x%llx\n", reg_val);
605                 octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT(0), reg_val);
606         }
607         return IRQ_HANDLED;
608 }
609
610 /* Interrupt handler for mac related interrupts. */
611 static irqreturn_t octep_misc_intr_handler_cnxk_pf(void *dev)
612 {
613         struct octep_device *oct = (struct octep_device *)dev;
614         struct pci_dev *pdev = oct->pdev;
615         u64 reg_val = 0;
616
617         /* Check for MISC INTR */
618         reg_val = octep_read_csr64(oct, CNXK_SDP_EPF_MISC_RINT);
619         if (reg_val) {
620                 dev_info(&pdev->dev,
621                          "Received MISC_RINT intr: 0x%llx\n", reg_val);
622                 octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT, reg_val);
623         }
624         return IRQ_HANDLED;
625 }
626
627 /* Interrupts handler for all reserved interrupts. */
628 static irqreturn_t octep_rsvd_intr_handler_cnxk_pf(void *dev)
629 {
630         struct octep_device *oct = (struct octep_device *)dev;
631         struct pci_dev *pdev = oct->pdev;
632
633         dev_info(&pdev->dev, "Reserved interrupts raised; Ignore\n");
634         return IRQ_HANDLED;
635 }
636
637 /* Tx/Rx queue interrupt handler */
638 static irqreturn_t octep_ioq_intr_handler_cnxk_pf(void *data)
639 {
640         struct octep_ioq_vector *vector = (struct octep_ioq_vector *)data;
641         struct octep_oq *oq = vector->oq;
642
643         napi_schedule_irqoff(oq->napi);
644         return IRQ_HANDLED;
645 }
646
647 /* soft reset */
648 static int octep_soft_reset_cnxk_pf(struct octep_device *oct)
649 {
650         dev_info(&oct->pdev->dev, "CNXKXX: Doing soft reset\n");
651
652         octep_write_csr64(oct, CNXK_SDP_WIN_WR_MASK_REG, 0xFF);
653
654         /* Firmware status CSR is supposed to be cleared by
655          * core domain reset, but due to a hw bug, it is not.
656          * Set it to RUNNING right before reset so that it is not
657          * left in READY (1) state after a reset.  This is required
658          * in addition to the early setting to handle the case where
659          * the OcteonTX is unexpectedly reset, reboots, and then
660          * the module is removed.
661          */
662         OCTEP_PCI_WIN_WRITE(oct, CNXK_PEMX_PFX_CSX_PFCFGX(0, 0, CNXK_PCIEEP_VSECST_CTL),
663                             FW_STATUS_RUNNING);
664
665         /* Set chip domain reset bit */
666         OCTEP_PCI_WIN_WRITE(oct, CNXK_RST_CHIP_DOMAIN_W1S, 1);
667         /* Wait till Octeon resets. */
668         mdelay(10);
669         /* restore the  reset value */
670         octep_write_csr64(oct, CNXK_SDP_WIN_WR_MASK_REG, 0xFF);
671
672         return 0;
673 }
674
675 /* Re-initialize Octeon hardware registers */
676 static void octep_reinit_regs_cnxk_pf(struct octep_device *oct)
677 {
678         u32 i;
679
680         for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
681                 oct->hw_ops.setup_iq_regs(oct, i);
682
683         for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
684                 oct->hw_ops.setup_oq_regs(oct, i);
685
686         oct->hw_ops.enable_interrupts(oct);
687         oct->hw_ops.enable_io_queues(oct);
688
689         for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++)
690                 writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg);
691 }
692
693 /* Enable all interrupts */
694 static void octep_enable_interrupts_cnxk_pf(struct octep_device *oct)
695 {
696         u64 intr_mask = 0ULL;
697         int srn, num_rings, i;
698
699         srn = CFG_GET_PORTS_PF_SRN(oct->conf);
700         num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
701
702         for (i = 0; i < num_rings; i++)
703                 intr_mask |= (0x1ULL << (srn + i));
704
705         octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1S, intr_mask);
706         octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1S, intr_mask);
707         octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT_ENA_W1S, -1ULL);
708
709         octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT_ENA_W1S(0), -1ULL);
710         octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT_ENA_W1S(0), -1ULL);
711
712         octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT_ENA_W1S, intr_mask);
713         octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT_ENA_W1S, intr_mask);
714         octep_write_csr64(oct, CNXK_SDP_EPF_MBOX_RINT_ENA_W1S(0), -1ULL);
715
716         octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1S(0), -1ULL);
717         octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT_ENA_W1S(0), -1ULL);
718 }
719
720 /* Disable all interrupts */
721 static void octep_disable_interrupts_cnxk_pf(struct octep_device *oct)
722 {
723         u64 intr_mask = 0ULL;
724         int srn, num_rings, i;
725
726         srn = CFG_GET_PORTS_PF_SRN(oct->conf);
727         num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
728
729         for (i = 0; i < num_rings; i++)
730                 intr_mask |= (0x1ULL << (srn + i));
731
732         octep_write_csr64(oct, CNXK_SDP_EPF_IRERR_RINT_ENA_W1C, intr_mask);
733         octep_write_csr64(oct, CNXK_SDP_EPF_ORERR_RINT_ENA_W1C, intr_mask);
734         octep_write_csr64(oct, CNXK_SDP_EPF_OEI_RINT_ENA_W1C, -1ULL);
735
736         octep_write_csr64(oct, CNXK_SDP_EPF_VFIRE_RINT_ENA_W1C(0), -1ULL);
737         octep_write_csr64(oct, CNXK_SDP_EPF_VFORE_RINT_ENA_W1C(0), -1ULL);
738
739         octep_write_csr64(oct, CNXK_SDP_EPF_MISC_RINT_ENA_W1C, intr_mask);
740         octep_write_csr64(oct, CNXK_SDP_EPF_DMA_RINT_ENA_W1C, intr_mask);
741         octep_write_csr64(oct, CNXK_SDP_EPF_MBOX_RINT_ENA_W1C(0), -1ULL);
742
743         octep_write_csr64(oct, CNXK_SDP_EPF_DMA_VF_RINT_ENA_W1C(0), -1ULL);
744         octep_write_csr64(oct, CNXK_SDP_EPF_PP_VF_RINT_ENA_W1C(0), -1ULL);
745 }
746
747 /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */
748 static u32 octep_update_iq_read_index_cnxk_pf(struct octep_iq *iq)
749 {
750         u32 pkt_in_done = readl(iq->inst_cnt_reg);
751         u32 last_done, new_idx;
752
753         last_done = pkt_in_done - iq->pkt_in_done;
754         iq->pkt_in_done = pkt_in_done;
755
756         new_idx = (iq->octep_read_index + last_done) % iq->max_count;
757
758         return new_idx;
759 }
760
761 /* Enable a hardware Tx Queue */
762 static void octep_enable_iq_cnxk_pf(struct octep_device *oct, int iq_no)
763 {
764         u64 loop = HZ;
765         u64 reg_val;
766
767         iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
768
769         octep_write_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(iq_no), 0xFFFFFFFF);
770
771         while (octep_read_csr64(oct, CNXK_SDP_R_IN_INSTR_DBELL(iq_no)) &&
772                loop--) {
773                 schedule_timeout_interruptible(1);
774         }
775
776         reg_val = octep_read_csr64(oct,  CNXK_SDP_R_IN_INT_LEVELS(iq_no));
777         reg_val |= (0x1ULL << 62);
778         octep_write_csr64(oct, CNXK_SDP_R_IN_INT_LEVELS(iq_no), reg_val);
779
780         reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no));
781         reg_val |= 0x1ULL;
782         octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no), reg_val);
783 }
784
785 /* Enable a hardware Rx Queue */
786 static void octep_enable_oq_cnxk_pf(struct octep_device *oct, int oq_no)
787 {
788         u64 reg_val = 0ULL;
789
790         oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
791
792         reg_val = octep_read_csr64(oct,  CNXK_SDP_R_OUT_INT_LEVELS(oq_no));
793         reg_val |= (0x1ULL << 62);
794         octep_write_csr64(oct, CNXK_SDP_R_OUT_INT_LEVELS(oq_no), reg_val);
795
796         octep_write_csr64(oct, CNXK_SDP_R_OUT_SLIST_DBELL(oq_no), 0xFFFFFFFF);
797
798         reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no));
799         reg_val |= 0x1ULL;
800         octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no), reg_val);
801 }
802
803 /* Enable all hardware Tx/Rx Queues assined to PF */
804 static void octep_enable_io_queues_cnxk_pf(struct octep_device *oct)
805 {
806         u8 q;
807
808         for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
809                 octep_enable_iq_cnxk_pf(oct, q);
810                 octep_enable_oq_cnxk_pf(oct, q);
811         }
812 }
813
814 /* Disable a hardware Tx Queue assined to PF */
815 static void octep_disable_iq_cnxk_pf(struct octep_device *oct, int iq_no)
816 {
817         u64 reg_val = 0ULL;
818
819         iq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
820
821         reg_val = octep_read_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no));
822         reg_val &= ~0x1ULL;
823         octep_write_csr64(oct, CNXK_SDP_R_IN_ENABLE(iq_no), reg_val);
824 }
825
826 /* Disable a hardware Rx Queue assined to PF */
827 static void octep_disable_oq_cnxk_pf(struct octep_device *oct, int oq_no)
828 {
829         u64 reg_val = 0ULL;
830
831         oq_no += CFG_GET_PORTS_PF_SRN(oct->conf);
832         reg_val = octep_read_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no));
833         reg_val &= ~0x1ULL;
834         octep_write_csr64(oct, CNXK_SDP_R_OUT_ENABLE(oq_no), reg_val);
835 }
836
837 /* Disable all hardware Tx/Rx Queues assined to PF */
838 static void octep_disable_io_queues_cnxk_pf(struct octep_device *oct)
839 {
840         int q = 0;
841
842         for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) {
843                 octep_disable_iq_cnxk_pf(oct, q);
844                 octep_disable_oq_cnxk_pf(oct, q);
845         }
846 }
847
848 /* Dump hardware registers (including Tx/Rx queues) for debugging. */
849 static void octep_dump_registers_cnxk_pf(struct octep_device *oct)
850 {
851         u8 srn, num_rings, q;
852
853         srn = CFG_GET_PORTS_PF_SRN(oct->conf);
854         num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf);
855
856         for (q = srn; q < srn + num_rings; q++)
857                 cnxk_dump_regs(oct, q);
858 }
859
860 /**
861  * octep_device_setup_cnxk_pf() - Setup Octeon device.
862  *
863  * @oct: Octeon device private data structure.
864  *
865  * - initialize hardware operations.
866  * - get target side pcie port number for the device.
867  * - setup window access to hardware registers.
868  * - set initial configuration and max limits.
869  * - setup hardware mapping of rings to the PF device.
870  */
871 void octep_device_setup_cnxk_pf(struct octep_device *oct)
872 {
873         oct->hw_ops.setup_iq_regs = octep_setup_iq_regs_cnxk_pf;
874         oct->hw_ops.setup_oq_regs = octep_setup_oq_regs_cnxk_pf;
875         oct->hw_ops.setup_mbox_regs = octep_setup_mbox_regs_cnxk_pf;
876
877         oct->hw_ops.mbox_intr_handler = octep_pfvf_mbox_intr_handler_cnxk_pf;
878         oct->hw_ops.oei_intr_handler = octep_oei_intr_handler_cnxk_pf;
879         oct->hw_ops.ire_intr_handler = octep_ire_intr_handler_cnxk_pf;
880         oct->hw_ops.ore_intr_handler = octep_ore_intr_handler_cnxk_pf;
881         oct->hw_ops.vfire_intr_handler = octep_vfire_intr_handler_cnxk_pf;
882         oct->hw_ops.vfore_intr_handler = octep_vfore_intr_handler_cnxk_pf;
883         oct->hw_ops.dma_intr_handler = octep_dma_intr_handler_cnxk_pf;
884         oct->hw_ops.dma_vf_intr_handler = octep_dma_vf_intr_handler_cnxk_pf;
885         oct->hw_ops.pp_vf_intr_handler = octep_pp_vf_intr_handler_cnxk_pf;
886         oct->hw_ops.misc_intr_handler = octep_misc_intr_handler_cnxk_pf;
887         oct->hw_ops.rsvd_intr_handler = octep_rsvd_intr_handler_cnxk_pf;
888         oct->hw_ops.ioq_intr_handler = octep_ioq_intr_handler_cnxk_pf;
889         oct->hw_ops.soft_reset = octep_soft_reset_cnxk_pf;
890         oct->hw_ops.reinit_regs = octep_reinit_regs_cnxk_pf;
891
892         oct->hw_ops.enable_interrupts = octep_enable_interrupts_cnxk_pf;
893         oct->hw_ops.disable_interrupts = octep_disable_interrupts_cnxk_pf;
894         oct->hw_ops.poll_non_ioq_interrupts = octep_poll_non_ioq_interrupts_cnxk_pf;
895
896         oct->hw_ops.update_iq_read_idx = octep_update_iq_read_index_cnxk_pf;
897
898         oct->hw_ops.enable_iq = octep_enable_iq_cnxk_pf;
899         oct->hw_ops.enable_oq = octep_enable_oq_cnxk_pf;
900         oct->hw_ops.enable_io_queues = octep_enable_io_queues_cnxk_pf;
901
902         oct->hw_ops.disable_iq = octep_disable_iq_cnxk_pf;
903         oct->hw_ops.disable_oq = octep_disable_oq_cnxk_pf;
904         oct->hw_ops.disable_io_queues = octep_disable_io_queues_cnxk_pf;
905         oct->hw_ops.reset_io_queues = octep_reset_io_queues_cnxk_pf;
906
907         oct->hw_ops.dump_registers = octep_dump_registers_cnxk_pf;
908
909         octep_setup_pci_window_regs_cnxk_pf(oct);
910
911         oct->pcie_port = octep_read_csr64(oct, CNXK_SDP_MAC_NUMBER) & 0xff;
912         dev_info(&oct->pdev->dev,
913                  "Octeon device using PCIE Port %d\n", oct->pcie_port);
914
915         octep_init_config_cnxk_pf(oct);
916         octep_configure_ring_mapping_cnxk_pf(oct);
917
918         /* Firmware status CSR is supposed to be cleared by
919          * core domain reset, but due to IPBUPEM-38842, it is not.
920          * Set it to RUNNING early in boot, so that unexpected resets
921          * leave it in a state that is not READY (1).
922          */
923         OCTEP_PCI_WIN_WRITE(oct, CNXK_PEMX_PFX_CSX_PFCFGX(0, 0, CNXK_PCIEEP_VSECST_CTL),
924                             FW_STATUS_RUNNING);
925 }