GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / net / ethernet / cavium / liquidio / cn23xx_pf_device.c
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/vmalloc.h>
20 #include <linux/etherdevice.h>
21 #include "liquidio_common.h"
22 #include "octeon_droq.h"
23 #include "octeon_iq.h"
24 #include "response_manager.h"
25 #include "octeon_device.h"
26 #include "cn23xx_pf_device.h"
27 #include "octeon_main.h"
28 #include "octeon_mailbox.h"
29
30 #define RESET_NOTDONE 0
31 #define RESET_DONE 1
32
33 /* Change the value of SLI Packet Input Jabber Register to allow
34  * VXLAN TSO packets which can be 64424 bytes, exceeding the
35  * MAX_GSO_SIZE we supplied to the kernel
36  */
37 #define CN23XX_INPUT_JABBER 64600
38
39 void cn23xx_dump_pf_initialized_regs(struct octeon_device *oct)
40 {
41         int i = 0;
42         u32 regval = 0;
43         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
44
45         /*In cn23xx_soft_reset*/
46         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%llx\n",
47                 "CN23XX_WIN_WR_MASK_REG", CVM_CAST64(CN23XX_WIN_WR_MASK_REG),
48                 CVM_CAST64(octeon_read_csr64(oct, CN23XX_WIN_WR_MASK_REG)));
49         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
50                 "CN23XX_SLI_SCRATCH1", CVM_CAST64(CN23XX_SLI_SCRATCH1),
51                 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)));
52         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
53                 "CN23XX_RST_SOFT_RST", CN23XX_RST_SOFT_RST,
54                 lio_pci_readq(oct, CN23XX_RST_SOFT_RST));
55
56         /*In cn23xx_set_dpi_regs*/
57         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
58                 "CN23XX_DPI_DMA_CONTROL", CN23XX_DPI_DMA_CONTROL,
59                 lio_pci_readq(oct, CN23XX_DPI_DMA_CONTROL));
60
61         for (i = 0; i < 6; i++) {
62                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
63                         "CN23XX_DPI_DMA_ENG_ENB", i,
64                         CN23XX_DPI_DMA_ENG_ENB(i),
65                         lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_ENB(i)));
66                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
67                         "CN23XX_DPI_DMA_ENG_BUF", i,
68                         CN23XX_DPI_DMA_ENG_BUF(i),
69                         lio_pci_readq(oct, CN23XX_DPI_DMA_ENG_BUF(i)));
70         }
71
72         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n", "CN23XX_DPI_CTL",
73                 CN23XX_DPI_CTL, lio_pci_readq(oct, CN23XX_DPI_CTL));
74
75         /*In cn23xx_setup_pcie_mps and cn23xx_setup_pcie_mrrs */
76         pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
77         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
78                 "CN23XX_CONFIG_PCIE_DEVCTL",
79                 CVM_CAST64(CN23XX_CONFIG_PCIE_DEVCTL), CVM_CAST64(regval));
80
81         dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
82                 "CN23XX_DPI_SLI_PRTX_CFG", oct->pcie_port,
83                 CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port),
84                 lio_pci_readq(oct, CN23XX_DPI_SLI_PRTX_CFG(oct->pcie_port)));
85
86         /*In cn23xx_specific_regs_setup */
87         dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
88                 "CN23XX_SLI_S2M_PORTX_CTL", oct->pcie_port,
89                 CVM_CAST64(CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port)),
90                 CVM_CAST64(octeon_read_csr64(
91                         oct, CN23XX_SLI_S2M_PORTX_CTL(oct->pcie_port))));
92
93         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
94                 "CN23XX_SLI_RING_RST", CVM_CAST64(CN23XX_SLI_PKT_IOQ_RING_RST),
95                 (u64)octeon_read_csr64(oct, CN23XX_SLI_PKT_IOQ_RING_RST));
96
97         /*In cn23xx_setup_global_mac_regs*/
98         for (i = 0; i < CN23XX_MAX_MACS; i++) {
99                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
100                         "CN23XX_SLI_PKT_MAC_RINFO64", i,
101                         CVM_CAST64(CN23XX_SLI_PKT_MAC_RINFO64(i, oct->pf_num)),
102                         CVM_CAST64(octeon_read_csr64
103                                 (oct, CN23XX_SLI_PKT_MAC_RINFO64
104                                         (i, oct->pf_num))));
105         }
106
107         /*In cn23xx_setup_global_input_regs*/
108         for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
109                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
110                         "CN23XX_SLI_IQ_PKT_CONTROL64", i,
111                         CVM_CAST64(CN23XX_SLI_IQ_PKT_CONTROL64(i)),
112                         CVM_CAST64(octeon_read_csr64
113                                 (oct, CN23XX_SLI_IQ_PKT_CONTROL64(i))));
114         }
115
116         /*In cn23xx_setup_global_output_regs*/
117         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
118                 "CN23XX_SLI_OQ_WMARK", CVM_CAST64(CN23XX_SLI_OQ_WMARK),
119                 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_OQ_WMARK)));
120
121         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
122                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
123                         "CN23XX_SLI_OQ_PKT_CONTROL", i,
124                         CVM_CAST64(CN23XX_SLI_OQ_PKT_CONTROL(i)),
125                         CVM_CAST64(octeon_read_csr(
126                                 oct, CN23XX_SLI_OQ_PKT_CONTROL(i))));
127                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
128                         "CN23XX_SLI_OQ_PKT_INT_LEVELS", i,
129                         CVM_CAST64(CN23XX_SLI_OQ_PKT_INT_LEVELS(i)),
130                         CVM_CAST64(octeon_read_csr64(
131                                 oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(i))));
132         }
133
134         /*In cn23xx_enable_interrupt and cn23xx_disable_interrupt*/
135         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
136                 "cn23xx->intr_enb_reg64",
137                 CVM_CAST64((long)(cn23xx->intr_enb_reg64)),
138                 CVM_CAST64(readq(cn23xx->intr_enb_reg64)));
139
140         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
141                 "cn23xx->intr_sum_reg64",
142                 CVM_CAST64((long)(cn23xx->intr_sum_reg64)),
143                 CVM_CAST64(readq(cn23xx->intr_sum_reg64)));
144
145         /*In cn23xx_setup_iq_regs*/
146         for (i = 0; i < CN23XX_MAX_INPUT_QUEUES; i++) {
147                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
148                         "CN23XX_SLI_IQ_BASE_ADDR64", i,
149                         CVM_CAST64(CN23XX_SLI_IQ_BASE_ADDR64(i)),
150                         CVM_CAST64(octeon_read_csr64(
151                                 oct, CN23XX_SLI_IQ_BASE_ADDR64(i))));
152                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
153                         "CN23XX_SLI_IQ_SIZE", i,
154                         CVM_CAST64(CN23XX_SLI_IQ_SIZE(i)),
155                         CVM_CAST64(octeon_read_csr
156                                 (oct, CN23XX_SLI_IQ_SIZE(i))));
157                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
158                         "CN23XX_SLI_IQ_DOORBELL", i,
159                         CVM_CAST64(CN23XX_SLI_IQ_DOORBELL(i)),
160                         CVM_CAST64(octeon_read_csr64(
161                                 oct, CN23XX_SLI_IQ_DOORBELL(i))));
162                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
163                         "CN23XX_SLI_IQ_INSTR_COUNT64", i,
164                         CVM_CAST64(CN23XX_SLI_IQ_INSTR_COUNT64(i)),
165                         CVM_CAST64(octeon_read_csr64(
166                                 oct, CN23XX_SLI_IQ_INSTR_COUNT64(i))));
167         }
168
169         /*In cn23xx_setup_oq_regs*/
170         for (i = 0; i < CN23XX_MAX_OUTPUT_QUEUES; i++) {
171                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
172                         "CN23XX_SLI_OQ_BASE_ADDR64", i,
173                         CVM_CAST64(CN23XX_SLI_OQ_BASE_ADDR64(i)),
174                         CVM_CAST64(octeon_read_csr64(
175                                 oct, CN23XX_SLI_OQ_BASE_ADDR64(i))));
176                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
177                         "CN23XX_SLI_OQ_SIZE", i,
178                         CVM_CAST64(CN23XX_SLI_OQ_SIZE(i)),
179                         CVM_CAST64(octeon_read_csr
180                                 (oct, CN23XX_SLI_OQ_SIZE(i))));
181                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
182                         "CN23XX_SLI_OQ_BUFF_INFO_SIZE", i,
183                         CVM_CAST64(CN23XX_SLI_OQ_BUFF_INFO_SIZE(i)),
184                         CVM_CAST64(octeon_read_csr(
185                                 oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(i))));
186                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
187                         "CN23XX_SLI_OQ_PKTS_SENT", i,
188                         CVM_CAST64(CN23XX_SLI_OQ_PKTS_SENT(i)),
189                         CVM_CAST64(octeon_read_csr64(
190                                 oct, CN23XX_SLI_OQ_PKTS_SENT(i))));
191                 dev_dbg(&oct->pci_dev->dev, "%s(%d)[%llx] : 0x%016llx\n",
192                         "CN23XX_SLI_OQ_PKTS_CREDIT", i,
193                         CVM_CAST64(CN23XX_SLI_OQ_PKTS_CREDIT(i)),
194                         CVM_CAST64(octeon_read_csr64(
195                                 oct, CN23XX_SLI_OQ_PKTS_CREDIT(i))));
196         }
197
198         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
199                 "CN23XX_SLI_PKT_TIME_INT",
200                 CVM_CAST64(CN23XX_SLI_PKT_TIME_INT),
201                 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_TIME_INT)));
202         dev_dbg(&oct->pci_dev->dev, "%s[%llx] : 0x%016llx\n",
203                 "CN23XX_SLI_PKT_CNT_INT",
204                 CVM_CAST64(CN23XX_SLI_PKT_CNT_INT),
205                 CVM_CAST64(octeon_read_csr64(oct, CN23XX_SLI_PKT_CNT_INT)));
206 }
207
208 static int cn23xx_pf_soft_reset(struct octeon_device *oct)
209 {
210         octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
211
212         dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: BIST enabled for CN23XX soft reset\n",
213                 oct->octeon_id);
214
215         octeon_write_csr64(oct, CN23XX_SLI_SCRATCH1, 0x1234ULL);
216
217         /* Initiate chip-wide soft reset */
218         lio_pci_readq(oct, CN23XX_RST_SOFT_RST);
219         lio_pci_writeq(oct, 1, CN23XX_RST_SOFT_RST);
220
221         /* Wait for 100ms as Octeon resets. */
222         mdelay(100);
223
224         if (octeon_read_csr64(oct, CN23XX_SLI_SCRATCH1)) {
225                 dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Soft reset failed\n",
226                         oct->octeon_id);
227                 return 1;
228         }
229
230         dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Reset completed\n",
231                 oct->octeon_id);
232
233         /* restore the  reset value*/
234         octeon_write_csr64(oct, CN23XX_WIN_WR_MASK_REG, 0xFF);
235
236         return 0;
237 }
238
239 static void cn23xx_enable_error_reporting(struct octeon_device *oct)
240 {
241         u32 regval;
242         u32 uncorrectable_err_mask, corrtable_err_status;
243
244         pci_read_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, &regval);
245         if (regval & CN23XX_CONFIG_PCIE_DEVCTL_MASK) {
246                 uncorrectable_err_mask = 0;
247                 corrtable_err_status = 0;
248                 pci_read_config_dword(oct->pci_dev,
249                                       CN23XX_CONFIG_PCIE_UNCORRECT_ERR_MASK,
250                                       &uncorrectable_err_mask);
251                 pci_read_config_dword(oct->pci_dev,
252                                       CN23XX_CONFIG_PCIE_CORRECT_ERR_STATUS,
253                                       &corrtable_err_status);
254                 dev_err(&oct->pci_dev->dev, "PCI-E Fatal error detected;\n"
255                                  "\tdev_ctl_status_reg = 0x%08x\n"
256                                  "\tuncorrectable_error_mask_reg = 0x%08x\n"
257                                  "\tcorrectable_error_status_reg = 0x%08x\n",
258                             regval, uncorrectable_err_mask,
259                             corrtable_err_status);
260         }
261
262         regval |= 0xf; /* Enable Link error reporting */
263
264         dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: Enabling PCI-E error reporting..\n",
265                 oct->octeon_id);
266         pci_write_config_dword(oct->pci_dev, CN23XX_CONFIG_PCIE_DEVCTL, regval);
267 }
268
269 static u32 cn23xx_coprocessor_clock(struct octeon_device *oct)
270 {
271         /* Bits 29:24 of RST_BOOT[PNR_MUL] holds the ref.clock MULTIPLIER
272          * for SLI.
273          */
274
275         /* TBD: get the info in Hand-shake */
276         return (((lio_pci_readq(oct, CN23XX_RST_BOOT) >> 24) & 0x3f) * 50);
277 }
278
279 u32 cn23xx_pf_get_oq_ticks(struct octeon_device *oct, u32 time_intr_in_us)
280 {
281         /* This gives the SLI clock per microsec */
282         u32 oqticks_per_us = cn23xx_coprocessor_clock(oct);
283
284         oct->pfvf_hsword.coproc_tics_per_us = oqticks_per_us;
285
286         /* This gives the clock cycles per millisecond */
287         oqticks_per_us *= 1000;
288
289         /* This gives the oq ticks (1024 core clock cycles) per millisecond */
290         oqticks_per_us /= 1024;
291
292         /* time_intr is in microseconds. The next 2 steps gives the oq ticks
293          *  corressponding to time_intr.
294          */
295         oqticks_per_us *= time_intr_in_us;
296         oqticks_per_us /= 1000;
297
298         return oqticks_per_us;
299 }
300
301 static void cn23xx_setup_global_mac_regs(struct octeon_device *oct)
302 {
303         u16 mac_no = oct->pcie_port;
304         u16 pf_num = oct->pf_num;
305         u64 reg_val;
306         u64 temp;
307
308         /* programming SRN and TRS for each MAC(0..3)  */
309
310         dev_dbg(&oct->pci_dev->dev, "%s:Using pcie port %d\n",
311                 __func__, mac_no);
312         /* By default, mapping all 64 IOQs to  a single MACs */
313
314         reg_val =
315             octeon_read_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num));
316
317         if (oct->rev_id == OCTEON_CN23XX_REV_1_1) {
318                 /* setting SRN <6:0>  */
319                 reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
320         } else {
321                 /* setting SRN <6:0>  */
322                 reg_val = pf_num * CN23XX_MAX_RINGS_PER_PF;
323         }
324
325         /* setting TRS <23:16> */
326         reg_val = reg_val |
327                   (oct->sriov_info.trs << CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS);
328         /* setting RPVF <39:32> */
329         temp = oct->sriov_info.rings_per_vf & 0xff;
330         reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_RPVF_BIT_POS);
331
332         /* setting NVFS <55:48> */
333         temp = oct->sriov_info.max_vfs & 0xff;
334         reg_val |= (temp << CN23XX_PKT_MAC_CTL_RINFO_NVFS_BIT_POS);
335
336         /* write these settings to MAC register */
337         octeon_write_csr64(oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num),
338                            reg_val);
339
340         dev_dbg(&oct->pci_dev->dev, "SLI_PKT_MAC(%d)_PF(%d)_RINFO : 0x%016llx\n",
341                 mac_no, pf_num, (u64)octeon_read_csr64
342                 (oct, CN23XX_SLI_PKT_MAC_RINFO64(mac_no, pf_num)));
343 }
344
345 static int cn23xx_reset_io_queues(struct octeon_device *oct)
346 {
347         int ret_val = 0;
348         u64 d64;
349         u32 q_no, srn, ern;
350         u32 loop = 1000;
351
352         srn = oct->sriov_info.pf_srn;
353         ern = srn + oct->sriov_info.num_pf_rings;
354
355         /*As per HRM reg description, s/w cant write 0 to ENB. */
356         /*to make the queue off, need to set the RST bit. */
357
358         /* Reset the Enable bit for all the 64 IQs.  */
359         for (q_no = srn; q_no < ern; q_no++) {
360                 /* set RST bit to 1. This bit applies to both IQ and OQ */
361                 d64 = octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
362                 d64 = d64 | CN23XX_PKT_INPUT_CTL_RST;
363                 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), d64);
364         }
365
366         /*wait until the RST bit is clear or the RST and quite bits are set*/
367         for (q_no = srn; q_no < ern; q_no++) {
368                 u64 reg_val = octeon_read_csr64(oct,
369                                         CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
370                 while ((READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) &&
371                        !(READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_QUIET) &&
372                        loop--) {
373                         WRITE_ONCE(reg_val, octeon_read_csr64(
374                             oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
375                 }
376                 if (!loop) {
377                         dev_err(&oct->pci_dev->dev,
378                                 "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
379                                 q_no);
380                         return -1;
381                 }
382                 WRITE_ONCE(reg_val, READ_ONCE(reg_val) &
383                         ~CN23XX_PKT_INPUT_CTL_RST);
384                 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
385                                    READ_ONCE(reg_val));
386
387                 WRITE_ONCE(reg_val, octeon_read_csr64(
388                            oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
389                 if (READ_ONCE(reg_val) & CN23XX_PKT_INPUT_CTL_RST) {
390                         dev_err(&oct->pci_dev->dev,
391                                 "clearing the reset failed for qno: %u\n",
392                                 q_no);
393                         ret_val = -1;
394                 }
395         }
396
397         return ret_val;
398 }
399
400 static int cn23xx_pf_setup_global_input_regs(struct octeon_device *oct)
401 {
402         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
403         struct octeon_instr_queue *iq;
404         u64 intr_threshold, reg_val;
405         u32 q_no, ern, srn;
406         u64 pf_num;
407         u64 vf_num;
408
409         pf_num = oct->pf_num;
410
411         srn = oct->sriov_info.pf_srn;
412         ern = srn + oct->sriov_info.num_pf_rings;
413
414         if (cn23xx_reset_io_queues(oct))
415                 return -1;
416
417         /** Set the MAC_NUM and PVF_NUM in IQ_PKT_CONTROL reg
418          * for all queues.Only PF can set these bits.
419          * bits 29:30 indicate the MAC num.
420          * bits 32:47 indicate the PVF num.
421          */
422         for (q_no = 0; q_no < ern; q_no++) {
423                 reg_val = (u64)oct->pcie_port << CN23XX_PKT_INPUT_CTL_MAC_NUM_POS;
424
425                 /* for VF assigned queues. */
426                 if (q_no < oct->sriov_info.pf_srn) {
427                         vf_num = q_no / oct->sriov_info.rings_per_vf;
428                         vf_num += 1; /* VF1, VF2,........ */
429                 } else {
430                         vf_num = 0;
431                 }
432
433                 reg_val |= vf_num << CN23XX_PKT_INPUT_CTL_VF_NUM_POS;
434                 reg_val |= pf_num << CN23XX_PKT_INPUT_CTL_PF_NUM_POS;
435
436                 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
437                                    reg_val);
438         }
439
440         /* Select ES, RO, NS, RDSIZE,DPTR Fomat#0 for
441          * pf queues
442          */
443         for (q_no = srn; q_no < ern; q_no++) {
444                 void __iomem *inst_cnt_reg;
445
446                 iq = oct->instr_queue[q_no];
447                 if (iq)
448                         inst_cnt_reg = iq->inst_cnt_reg;
449                 else
450                         inst_cnt_reg = (u8 *)oct->mmio[0].hw_addr +
451                                        CN23XX_SLI_IQ_INSTR_COUNT64(q_no);
452
453                 reg_val =
454                     octeon_read_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
455
456                 reg_val |= CN23XX_PKT_INPUT_CTL_MASK;
457
458                 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
459                                    reg_val);
460
461                 /* Set WMARK level for triggering PI_INT */
462                 /* intr_threshold = CN23XX_DEF_IQ_INTR_THRESHOLD & */
463                 intr_threshold = CFG_GET_IQ_INTR_PKT(cn23xx->conf) &
464                                  CN23XX_PKT_IN_DONE_WMARK_MASK;
465
466                 writeq((readq(inst_cnt_reg) &
467                         ~(CN23XX_PKT_IN_DONE_WMARK_MASK <<
468                           CN23XX_PKT_IN_DONE_WMARK_BIT_POS)) |
469                        (intr_threshold << CN23XX_PKT_IN_DONE_WMARK_BIT_POS),
470                        inst_cnt_reg);
471         }
472         return 0;
473 }
474
475 static void cn23xx_pf_setup_global_output_regs(struct octeon_device *oct)
476 {
477         u32 reg_val;
478         u32 q_no, ern, srn;
479         u64 time_threshold;
480
481         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
482
483         srn = oct->sriov_info.pf_srn;
484         ern = srn + oct->sriov_info.num_pf_rings;
485
486         if (CFG_GET_IS_SLI_BP_ON(cn23xx->conf)) {
487                 octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 32);
488         } else {
489                 /** Set Output queue watermark to 0 to disable backpressure */
490                 octeon_write_csr64(oct, CN23XX_SLI_OQ_WMARK, 0);
491         }
492
493         for (q_no = srn; q_no < ern; q_no++) {
494                 reg_val = octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
495
496                 /* clear IPTR */
497                 reg_val &= ~CN23XX_PKT_OUTPUT_CTL_IPTR;
498
499                 /* set DPTR */
500                 reg_val |= CN23XX_PKT_OUTPUT_CTL_DPTR;
501
502                 /* reset BMODE */
503                 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_BMODE);
504
505                 /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
506                  * for Output Queue ScatterList
507                  * reset ROR_P, NSR_P
508                  */
509                 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR_P);
510                 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR_P);
511
512 #ifdef __LITTLE_ENDIAN_BITFIELD
513                 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ES_P);
514 #else
515                 reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES_P);
516 #endif
517                 /* No Relaxed Ordering, No Snoop, 64-bit Byte swap
518                  * for Output Queue Data
519                  * reset ROR, NSR
520                  */
521                 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_ROR);
522                 reg_val &= ~(CN23XX_PKT_OUTPUT_CTL_NSR);
523                 /* set the ES bit */
524                 reg_val |= (CN23XX_PKT_OUTPUT_CTL_ES);
525
526                 /* write all the selected settings */
527                 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no), reg_val);
528
529                 /* Enabling these interrupt in oct->fn_list.enable_interrupt()
530                  * routine which called after IOQ init.
531                  * Set up interrupt packet and time thresholds
532                  * for all the OQs
533                  */
534                 time_threshold = cn23xx_pf_get_oq_ticks(
535                     oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
536
537                 octeon_write_csr64(oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(q_no),
538                                    (CFG_GET_OQ_INTR_PKT(cn23xx->conf) |
539                                     (time_threshold << 32)));
540         }
541
542         /** Setting the water mark level for pko back pressure **/
543         writeq(0x40, (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_WMARK);
544
545         /** Disabling setting OQs in reset when ring has no dorebells
546          * enabling this will cause of head of line blocking
547          */
548         /* Do it only for pass1.1. and pass1.2 */
549         if ((oct->rev_id == OCTEON_CN23XX_REV_1_0) ||
550             (oct->rev_id == OCTEON_CN23XX_REV_1_1))
551                 writeq(readq((u8 *)oct->mmio[0].hw_addr +
552                                      CN23XX_SLI_GBL_CONTROL) | 0x2,
553                        (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_GBL_CONTROL);
554
555         /** Enable channel-level backpressure */
556         if (oct->pf_num)
557                 writeq(0xffffffffffffffffULL,
558                        (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN2_W1S);
559         else
560                 writeq(0xffffffffffffffffULL,
561                        (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OUT_BP_EN_W1S);
562 }
563
564 static int cn23xx_setup_pf_device_regs(struct octeon_device *oct)
565 {
566         cn23xx_enable_error_reporting(oct);
567
568         /* program the MAC(0..3)_RINFO before setting up input/output regs */
569         cn23xx_setup_global_mac_regs(oct);
570
571         if (cn23xx_pf_setup_global_input_regs(oct))
572                 return -1;
573
574         cn23xx_pf_setup_global_output_regs(oct);
575
576         /* Default error timeout value should be 0x200000 to avoid host hang
577          * when reads invalid register
578          */
579         octeon_write_csr64(oct, CN23XX_SLI_WINDOW_CTL,
580                            CN23XX_SLI_WINDOW_CTL_DEFAULT);
581
582         /* set SLI_PKT_IN_JABBER to handle large VXLAN packets */
583         octeon_write_csr64(oct, CN23XX_SLI_PKT_IN_JABBER, CN23XX_INPUT_JABBER);
584         return 0;
585 }
586
587 static void cn23xx_setup_iq_regs(struct octeon_device *oct, u32 iq_no)
588 {
589         struct octeon_instr_queue *iq = oct->instr_queue[iq_no];
590         u64 pkt_in_done;
591
592         iq_no += oct->sriov_info.pf_srn;
593
594         /* Write the start of the input queue's ring and its size  */
595         octeon_write_csr64(oct, CN23XX_SLI_IQ_BASE_ADDR64(iq_no),
596                            iq->base_addr_dma);
597         octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count);
598
599         /* Remember the doorbell & instruction count register addr
600          * for this queue
601          */
602         iq->doorbell_reg =
603             (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_DOORBELL(iq_no);
604         iq->inst_cnt_reg =
605             (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_IQ_INSTR_COUNT64(iq_no);
606         dev_dbg(&oct->pci_dev->dev, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
607                 iq_no, iq->doorbell_reg, iq->inst_cnt_reg);
608
609         /* Store the current instruction counter (used in flush_iq
610          * calculation)
611          */
612         pkt_in_done = readq(iq->inst_cnt_reg);
613
614         if (oct->msix_on) {
615                 /* Set CINT_ENB to enable IQ interrupt   */
616                 writeq((pkt_in_done | CN23XX_INTR_CINT_ENB),
617                        iq->inst_cnt_reg);
618         } else {
619                 /* Clear the count by writing back what we read, but don't
620                  * enable interrupts
621                  */
622                 writeq(pkt_in_done, iq->inst_cnt_reg);
623         }
624
625         iq->reset_instr_cnt = 0;
626 }
627
628 static void cn23xx_setup_oq_regs(struct octeon_device *oct, u32 oq_no)
629 {
630         u32 reg_val;
631         struct octeon_droq *droq = oct->droq[oq_no];
632         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
633         u64 time_threshold;
634         u64 cnt_threshold;
635
636         oq_no += oct->sriov_info.pf_srn;
637
638         octeon_write_csr64(oct, CN23XX_SLI_OQ_BASE_ADDR64(oq_no),
639                            droq->desc_ring_dma);
640         octeon_write_csr(oct, CN23XX_SLI_OQ_SIZE(oq_no), droq->max_count);
641
642         octeon_write_csr(oct, CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq_no),
643                          droq->buffer_size);
644
645         /* Get the mapped address of the pkt_sent and pkts_credit regs */
646         droq->pkts_sent_reg =
647             (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_SENT(oq_no);
648         droq->pkts_credit_reg =
649             (u8 *)oct->mmio[0].hw_addr + CN23XX_SLI_OQ_PKTS_CREDIT(oq_no);
650
651         if (!oct->msix_on) {
652                 /* Enable this output queue to generate Packet Timer Interrupt
653                  */
654                 reg_val =
655                     octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
656                 reg_val |= CN23XX_PKT_OUTPUT_CTL_TENB;
657                 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
658                                  reg_val);
659
660                 /* Enable this output queue to generate Packet Count Interrupt
661                  */
662                 reg_val =
663                     octeon_read_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no));
664                 reg_val |= CN23XX_PKT_OUTPUT_CTL_CENB;
665                 octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(oq_no),
666                                  reg_val);
667         } else {
668                 time_threshold = cn23xx_pf_get_oq_ticks(
669                     oct, (u32)CFG_GET_OQ_INTR_TIME(cn23xx->conf));
670                 cnt_threshold = (u32)CFG_GET_OQ_INTR_PKT(cn23xx->conf);
671
672                 octeon_write_csr64(
673                     oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(oq_no),
674                     ((time_threshold << 32 | cnt_threshold)));
675         }
676 }
677
678 static void cn23xx_pf_mbox_thread(struct work_struct *work)
679 {
680         struct cavium_wk *wk = (struct cavium_wk *)work;
681         struct octeon_mbox *mbox = (struct octeon_mbox *)wk->ctxptr;
682         struct octeon_device *oct = mbox->oct_dev;
683         u64 mbox_int_val, val64;
684         u32 q_no, i;
685
686         if (oct->rev_id < OCTEON_CN23XX_REV_1_1) {
687                 /*read and clear by writing 1*/
688                 mbox_int_val = readq(mbox->mbox_int_reg);
689                 writeq(mbox_int_val, mbox->mbox_int_reg);
690
691                 for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
692                         q_no = i * oct->sriov_info.rings_per_vf;
693
694                         val64 = readq(oct->mbox[q_no]->mbox_write_reg);
695
696                         if (val64 && (val64 != OCTEON_PFVFACK)) {
697                                 if (octeon_mbox_read(oct->mbox[q_no]))
698                                         octeon_mbox_process_message(
699                                             oct->mbox[q_no]);
700                         }
701                 }
702
703                 schedule_delayed_work(&wk->work, msecs_to_jiffies(10));
704         } else {
705                 octeon_mbox_process_message(mbox);
706         }
707 }
708
709 static int cn23xx_setup_pf_mbox(struct octeon_device *oct)
710 {
711         struct octeon_mbox *mbox = NULL;
712         u16 mac_no = oct->pcie_port;
713         u16 pf_num = oct->pf_num;
714         u32 q_no, i;
715
716         if (!oct->sriov_info.max_vfs)
717                 return 0;
718
719         for (i = 0; i < oct->sriov_info.max_vfs; i++) {
720                 q_no = i * oct->sriov_info.rings_per_vf;
721
722                 mbox = vzalloc(sizeof(*mbox));
723                 if (!mbox)
724                         goto free_mbox;
725
726                 spin_lock_init(&mbox->lock);
727
728                 mbox->oct_dev = oct;
729
730                 mbox->q_no = q_no;
731
732                 mbox->state = OCTEON_MBOX_STATE_IDLE;
733
734                 /* PF mbox interrupt reg */
735                 mbox->mbox_int_reg = (u8 *)oct->mmio[0].hw_addr +
736                                      CN23XX_SLI_MAC_PF_MBOX_INT(mac_no, pf_num);
737
738                 /* PF writes into SIG0 reg */
739                 mbox->mbox_write_reg = (u8 *)oct->mmio[0].hw_addr +
740                                        CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 0);
741
742                 /* PF reads from SIG1 reg */
743                 mbox->mbox_read_reg = (u8 *)oct->mmio[0].hw_addr +
744                                       CN23XX_SLI_PKT_PF_VF_MBOX_SIG(q_no, 1);
745
746                 /*Mail Box Thread creation*/
747                 INIT_DELAYED_WORK(&mbox->mbox_poll_wk.work,
748                                   cn23xx_pf_mbox_thread);
749                 mbox->mbox_poll_wk.ctxptr = (void *)mbox;
750
751                 oct->mbox[q_no] = mbox;
752
753                 writeq(OCTEON_PFVFSIG, mbox->mbox_read_reg);
754         }
755
756         if (oct->rev_id < OCTEON_CN23XX_REV_1_1)
757                 schedule_delayed_work(&oct->mbox[0]->mbox_poll_wk.work,
758                                       msecs_to_jiffies(0));
759
760         return 0;
761
762 free_mbox:
763         while (i) {
764                 i--;
765                 vfree(oct->mbox[i]);
766         }
767
768         return 1;
769 }
770
771 static int cn23xx_free_pf_mbox(struct octeon_device *oct)
772 {
773         u32 q_no, i;
774
775         if (!oct->sriov_info.max_vfs)
776                 return 0;
777
778         for (i = 0; i < oct->sriov_info.max_vfs; i++) {
779                 q_no = i * oct->sriov_info.rings_per_vf;
780                 cancel_delayed_work_sync(
781                     &oct->mbox[q_no]->mbox_poll_wk.work);
782                 vfree(oct->mbox[q_no]);
783         }
784
785         return 0;
786 }
787
788 static int cn23xx_enable_io_queues(struct octeon_device *oct)
789 {
790         u64 reg_val;
791         u32 srn, ern, q_no;
792         u32 loop = 1000;
793
794         srn = oct->sriov_info.pf_srn;
795         ern = srn + oct->num_iqs;
796
797         for (q_no = srn; q_no < ern; q_no++) {
798                 /* set the corresponding IQ IS_64B bit */
799                 if (oct->io_qmask.iq64B & BIT_ULL(q_no - srn)) {
800                         reg_val = octeon_read_csr64(
801                             oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
802                         reg_val = reg_val | CN23XX_PKT_INPUT_CTL_IS_64B;
803                         octeon_write_csr64(
804                             oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
805                 }
806
807                 /* set the corresponding IQ ENB bit */
808                 if (oct->io_qmask.iq & BIT_ULL(q_no - srn)) {
809                         /* IOQs are in reset by default in PEM2 mode,
810                          * clearing reset bit
811                          */
812                         reg_val = octeon_read_csr64(
813                             oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
814
815                         if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
816                                 while ((reg_val & CN23XX_PKT_INPUT_CTL_RST) &&
817                                        !(reg_val &
818                                          CN23XX_PKT_INPUT_CTL_QUIET) &&
819                                        --loop) {
820                                         reg_val = octeon_read_csr64(
821                                             oct,
822                                             CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
823                                 }
824                                 if (!loop) {
825                                         dev_err(&oct->pci_dev->dev,
826                                                 "clearing the reset reg failed or setting the quiet reg failed for qno: %u\n",
827                                                 q_no);
828                                         return -1;
829                                 }
830                                 reg_val = reg_val & ~CN23XX_PKT_INPUT_CTL_RST;
831                                 octeon_write_csr64(
832                                     oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
833                                     reg_val);
834
835                                 reg_val = octeon_read_csr64(
836                                     oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
837                                 if (reg_val & CN23XX_PKT_INPUT_CTL_RST) {
838                                         dev_err(&oct->pci_dev->dev,
839                                                 "clearing the reset failed for qno: %u\n",
840                                                 q_no);
841                                         return -1;
842                                 }
843                         }
844                         reg_val = octeon_read_csr64(
845                             oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no));
846                         reg_val = reg_val | CN23XX_PKT_INPUT_CTL_RING_ENB;
847                         octeon_write_csr64(
848                             oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no), reg_val);
849                 }
850         }
851         for (q_no = srn; q_no < ern; q_no++) {
852                 u32 reg_val;
853                 /* set the corresponding OQ ENB bit */
854                 if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) {
855                         reg_val = octeon_read_csr(
856                             oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no));
857                         reg_val = reg_val | CN23XX_PKT_OUTPUT_CTL_RING_ENB;
858                         octeon_write_csr(oct, CN23XX_SLI_OQ_PKT_CONTROL(q_no),
859                                          reg_val);
860                 }
861         }
862         return 0;
863 }
864
865 static void cn23xx_disable_io_queues(struct octeon_device *oct)
866 {
867         int q_no, loop;
868         u64 d64;
869         u32 d32;
870         u32 srn, ern;
871
872         srn = oct->sriov_info.pf_srn;
873         ern = srn + oct->num_iqs;
874
875         /*** Disable Input Queues. ***/
876         for (q_no = srn; q_no < ern; q_no++) {
877                 loop = HZ;
878
879                 /* start the Reset for a particular ring */
880                 WRITE_ONCE(d64, octeon_read_csr64(
881                            oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no)));
882                 WRITE_ONCE(d64, READ_ONCE(d64) &
883                                         (~(CN23XX_PKT_INPUT_CTL_RING_ENB)));
884                 WRITE_ONCE(d64, READ_ONCE(d64) | CN23XX_PKT_INPUT_CTL_RST);
885                 octeon_write_csr64(oct, CN23XX_SLI_IQ_PKT_CONTROL64(q_no),
886                                    READ_ONCE(d64));
887
888                 /* Wait until hardware indicates that the particular IQ
889                  * is out of reset.
890                  */
891                 WRITE_ONCE(d64, octeon_read_csr64(
892                                         oct, CN23XX_SLI_PKT_IOQ_RING_RST));
893                 while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
894                         WRITE_ONCE(d64, octeon_read_csr64(
895                                         oct, CN23XX_SLI_PKT_IOQ_RING_RST));
896                         schedule_timeout_uninterruptible(1);
897                 }
898
899                 /* Reset the doorbell register for this Input Queue. */
900                 octeon_write_csr(oct, CN23XX_SLI_IQ_DOORBELL(q_no), 0xFFFFFFFF);
901                 while (octeon_read_csr64(oct, CN23XX_SLI_IQ_DOORBELL(q_no)) &&
902                        loop--) {
903                         schedule_timeout_uninterruptible(1);
904                 }
905         }
906
907         /*** Disable Output Queues. ***/
908         for (q_no = srn; q_no < ern; q_no++) {
909                 loop = HZ;
910
911                 /* Wait until hardware indicates that the particular IQ
912                  * is out of reset.It given that SLI_PKT_RING_RST is
913                  * common for both IQs and OQs
914                  */
915                 WRITE_ONCE(d64, octeon_read_csr64(
916                                         oct, CN23XX_SLI_PKT_IOQ_RING_RST));
917                 while (!(READ_ONCE(d64) & BIT_ULL(q_no)) && loop--) {
918                         WRITE_ONCE(d64, octeon_read_csr64(
919                                         oct, CN23XX_SLI_PKT_IOQ_RING_RST));
920                         schedule_timeout_uninterruptible(1);
921                 }
922
923                 /* Reset the doorbell register for this Output Queue. */
924                 octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_CREDIT(q_no),
925                                  0xFFFFFFFF);
926                 while (octeon_read_csr64(oct,
927                                          CN23XX_SLI_OQ_PKTS_CREDIT(q_no)) &&
928                        loop--) {
929                         schedule_timeout_uninterruptible(1);
930                 }
931
932                 /* clear the SLI_PKT(0..63)_CNTS[CNT] reg value */
933                 WRITE_ONCE(d32, octeon_read_csr(
934                                         oct, CN23XX_SLI_OQ_PKTS_SENT(q_no)));
935                 octeon_write_csr(oct, CN23XX_SLI_OQ_PKTS_SENT(q_no),
936                                  READ_ONCE(d32));
937         }
938 }
939
940 static u64 cn23xx_pf_msix_interrupt_handler(void *dev)
941 {
942         struct octeon_ioq_vector *ioq_vector = (struct octeon_ioq_vector *)dev;
943         struct octeon_device *oct = ioq_vector->oct_dev;
944         u64 pkts_sent;
945         u64 ret = 0;
946         struct octeon_droq *droq = oct->droq[ioq_vector->droq_index];
947
948         dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
949
950         if (!droq) {
951                 dev_err(&oct->pci_dev->dev, "23XX bringup FIXME: oct pfnum:%d ioq_vector->ioq_num :%d droq is NULL\n",
952                         oct->pf_num, ioq_vector->ioq_num);
953                 return 0;
954         }
955
956         pkts_sent = readq(droq->pkts_sent_reg);
957
958         /* If our device has interrupted, then proceed. Also check
959          * for all f's if interrupt was triggered on an error
960          * and the PCI read fails.
961          */
962         if (!pkts_sent || (pkts_sent == 0xFFFFFFFFFFFFFFFFULL))
963                 return ret;
964
965         /* Write count reg in sli_pkt_cnts to clear these int.*/
966         if ((pkts_sent & CN23XX_INTR_PO_INT) ||
967             (pkts_sent & CN23XX_INTR_PI_INT)) {
968                 if (pkts_sent & CN23XX_INTR_PO_INT)
969                         ret |= MSIX_PO_INT;
970         }
971
972         if (pkts_sent & CN23XX_INTR_PI_INT)
973                 /* We will clear the count when we update the read_index. */
974                 ret |= MSIX_PI_INT;
975
976         /* Never need to handle msix mbox intr for pf. They arrive on the last
977          * msix
978          */
979         return ret;
980 }
981
982 static void cn23xx_handle_pf_mbox_intr(struct octeon_device *oct)
983 {
984         struct delayed_work *work;
985         u64 mbox_int_val;
986         u32 i, q_no;
987
988         mbox_int_val = readq(oct->mbox[0]->mbox_int_reg);
989
990         for (i = 0; i < oct->sriov_info.num_vfs_alloced; i++) {
991                 q_no = i * oct->sriov_info.rings_per_vf;
992
993                 if (mbox_int_val & BIT_ULL(q_no)) {
994                         writeq(BIT_ULL(q_no),
995                                oct->mbox[0]->mbox_int_reg);
996                         if (octeon_mbox_read(oct->mbox[q_no])) {
997                                 work = &oct->mbox[q_no]->mbox_poll_wk.work;
998                                 schedule_delayed_work(work,
999                                                       msecs_to_jiffies(0));
1000                         }
1001                 }
1002         }
1003 }
1004
1005 static irqreturn_t cn23xx_interrupt_handler(void *dev)
1006 {
1007         struct octeon_device *oct = (struct octeon_device *)dev;
1008         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1009         u64 intr64;
1010
1011         dev_dbg(&oct->pci_dev->dev, "In %s octeon_dev @ %p\n", __func__, oct);
1012         intr64 = readq(cn23xx->intr_sum_reg64);
1013
1014         oct->int_status = 0;
1015
1016         if (intr64 & CN23XX_INTR_ERR)
1017                 dev_err(&oct->pci_dev->dev, "OCTEON[%d]: Error Intr: 0x%016llx\n",
1018                         oct->octeon_id, CVM_CAST64(intr64));
1019
1020         /* When VFs write into MBOX_SIG2 reg,these intr is set in PF */
1021         if (intr64 & CN23XX_INTR_VF_MBOX)
1022                 cn23xx_handle_pf_mbox_intr(oct);
1023
1024         if (oct->msix_on != LIO_FLAG_MSIX_ENABLED) {
1025                 if (intr64 & CN23XX_INTR_PKT_DATA)
1026                         oct->int_status |= OCT_DEV_INTR_PKT_DATA;
1027         }
1028
1029         if (intr64 & (CN23XX_INTR_DMA0_FORCE))
1030                 oct->int_status |= OCT_DEV_INTR_DMA0_FORCE;
1031         if (intr64 & (CN23XX_INTR_DMA1_FORCE))
1032                 oct->int_status |= OCT_DEV_INTR_DMA1_FORCE;
1033
1034         /* Clear the current interrupts */
1035         writeq(intr64, cn23xx->intr_sum_reg64);
1036
1037         return IRQ_HANDLED;
1038 }
1039
1040 static void cn23xx_bar1_idx_setup(struct octeon_device *oct, u64 core_addr,
1041                                   u32 idx, int valid)
1042 {
1043         u64 bar1;
1044         u64 reg_adr;
1045
1046         if (!valid) {
1047                 reg_adr = lio_pci_readq(
1048                         oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1049                 WRITE_ONCE(bar1, reg_adr);
1050                 lio_pci_writeq(oct, (READ_ONCE(bar1) & 0xFFFFFFFEULL),
1051                                CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1052                 reg_adr = lio_pci_readq(
1053                         oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1054                 WRITE_ONCE(bar1, reg_adr);
1055                 return;
1056         }
1057
1058         /*  The PEM(0..3)_BAR1_INDEX(0..15)[ADDR_IDX]<23:4> stores
1059          *  bits <41:22> of the Core Addr
1060          */
1061         lio_pci_writeq(oct, (((core_addr >> 22) << 4) | PCI_BAR1_MASK),
1062                        CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1063
1064         WRITE_ONCE(bar1, lio_pci_readq(
1065                    oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx)));
1066 }
1067
1068 static void cn23xx_bar1_idx_write(struct octeon_device *oct, u32 idx, u32 mask)
1069 {
1070         lio_pci_writeq(oct, mask,
1071                        CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1072 }
1073
1074 static u32 cn23xx_bar1_idx_read(struct octeon_device *oct, u32 idx)
1075 {
1076         return (u32)lio_pci_readq(
1077             oct, CN23XX_PEM_BAR1_INDEX_REG(oct->pcie_port, idx));
1078 }
1079
1080 /* always call with lock held */
1081 static u32 cn23xx_update_read_index(struct octeon_instr_queue *iq)
1082 {
1083         u32 new_idx;
1084         u32 last_done;
1085         u32 pkt_in_done = readl(iq->inst_cnt_reg);
1086
1087         last_done = pkt_in_done - iq->pkt_in_done;
1088         iq->pkt_in_done = pkt_in_done;
1089
1090         /* Modulo of the new index with the IQ size will give us
1091          * the new index.  The iq->reset_instr_cnt is always zero for
1092          * cn23xx, so no extra adjustments are needed.
1093          */
1094         new_idx = (iq->octeon_read_index +
1095                    (u32)(last_done & CN23XX_PKT_IN_DONE_CNT_MASK)) %
1096                   iq->max_count;
1097
1098         return new_idx;
1099 }
1100
1101 static void cn23xx_enable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
1102 {
1103         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1104         u64 intr_val = 0;
1105
1106         /*  Divide the single write to multiple writes based on the flag. */
1107         /* Enable Interrupt */
1108         if (intr_flag == OCTEON_ALL_INTR) {
1109                 writeq(cn23xx->intr_mask64, cn23xx->intr_enb_reg64);
1110         } else if (intr_flag & OCTEON_OUTPUT_INTR) {
1111                 intr_val = readq(cn23xx->intr_enb_reg64);
1112                 intr_val |= CN23XX_INTR_PKT_DATA;
1113                 writeq(intr_val, cn23xx->intr_enb_reg64);
1114         } else if ((intr_flag & OCTEON_MBOX_INTR) &&
1115                    (oct->sriov_info.max_vfs > 0)) {
1116                 if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
1117                         intr_val = readq(cn23xx->intr_enb_reg64);
1118                         intr_val |= CN23XX_INTR_VF_MBOX;
1119                         writeq(intr_val, cn23xx->intr_enb_reg64);
1120                 }
1121         }
1122 }
1123
1124 static void cn23xx_disable_pf_interrupt(struct octeon_device *oct, u8 intr_flag)
1125 {
1126         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1127         u64 intr_val = 0;
1128
1129         /* Disable Interrupts */
1130         if (intr_flag == OCTEON_ALL_INTR) {
1131                 writeq(0, cn23xx->intr_enb_reg64);
1132         } else if (intr_flag & OCTEON_OUTPUT_INTR) {
1133                 intr_val = readq(cn23xx->intr_enb_reg64);
1134                 intr_val &= ~CN23XX_INTR_PKT_DATA;
1135                 writeq(intr_val, cn23xx->intr_enb_reg64);
1136         } else if ((intr_flag & OCTEON_MBOX_INTR) &&
1137                    (oct->sriov_info.max_vfs > 0)) {
1138                 if (oct->rev_id >= OCTEON_CN23XX_REV_1_1) {
1139                         intr_val = readq(cn23xx->intr_enb_reg64);
1140                         intr_val &= ~CN23XX_INTR_VF_MBOX;
1141                         writeq(intr_val, cn23xx->intr_enb_reg64);
1142                 }
1143         }
1144 }
1145
1146 static void cn23xx_get_pcie_qlmport(struct octeon_device *oct)
1147 {
1148         oct->pcie_port = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
1149
1150         dev_dbg(&oct->pci_dev->dev, "OCTEON: CN23xx uses PCIE Port %d\n",
1151                 oct->pcie_port);
1152 }
1153
1154 static int cn23xx_get_pf_num(struct octeon_device *oct)
1155 {
1156         u32 fdl_bit = 0;
1157         u64 pkt0_in_ctl, d64;
1158         int pfnum, mac, trs, ret;
1159
1160         ret = 0;
1161
1162         /** Read Function Dependency Link reg to get the function number */
1163         if (pci_read_config_dword(oct->pci_dev, CN23XX_PCIE_SRIOV_FDL,
1164                                   &fdl_bit) == 0) {
1165                 oct->pf_num = ((fdl_bit >> CN23XX_PCIE_SRIOV_FDL_BIT_POS) &
1166                                CN23XX_PCIE_SRIOV_FDL_MASK);
1167         } else {
1168                 ret = -EINVAL;
1169
1170                 /* Under some virtual environments, extended PCI regs are
1171                  * inaccessible, in which case the above read will have failed.
1172                  * In this case, read the PF number from the
1173                  * SLI_PKT0_INPUT_CONTROL reg (written by f/w)
1174                  */
1175                 pkt0_in_ctl = octeon_read_csr64(oct,
1176                                                 CN23XX_SLI_IQ_PKT_CONTROL64(0));
1177                 pfnum = (pkt0_in_ctl >> CN23XX_PKT_INPUT_CTL_PF_NUM_POS) &
1178                         CN23XX_PKT_INPUT_CTL_PF_NUM_MASK;
1179                 mac = (octeon_read_csr(oct, CN23XX_SLI_MAC_NUMBER)) & 0xff;
1180
1181                 /* validate PF num by reading RINFO; f/w writes RINFO.trs == 1*/
1182                 d64 = octeon_read_csr64(oct,
1183                                         CN23XX_SLI_PKT_MAC_RINFO64(mac, pfnum));
1184                 trs = (int)(d64 >> CN23XX_PKT_MAC_CTL_RINFO_TRS_BIT_POS) & 0xff;
1185                 if (trs == 1) {
1186                         dev_err(&oct->pci_dev->dev,
1187                                 "OCTEON: error reading PCI cfg space pfnum, re-read %u\n",
1188                                 pfnum);
1189                         oct->pf_num = pfnum;
1190                         ret = 0;
1191                 } else {
1192                         dev_err(&oct->pci_dev->dev,
1193                                 "OCTEON: error reading PCI cfg space pfnum; could not ascertain PF number\n");
1194                 }
1195         }
1196
1197         return ret;
1198 }
1199
1200 static void cn23xx_setup_reg_address(struct octeon_device *oct)
1201 {
1202         u8 __iomem *bar0_pciaddr = oct->mmio[0].hw_addr;
1203         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1204
1205         oct->reg_list.pci_win_wr_addr_hi =
1206             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_HI);
1207         oct->reg_list.pci_win_wr_addr_lo =
1208             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR_LO);
1209         oct->reg_list.pci_win_wr_addr =
1210             (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_ADDR64);
1211
1212         oct->reg_list.pci_win_rd_addr_hi =
1213             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_HI);
1214         oct->reg_list.pci_win_rd_addr_lo =
1215             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR_LO);
1216         oct->reg_list.pci_win_rd_addr =
1217             (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_ADDR64);
1218
1219         oct->reg_list.pci_win_wr_data_hi =
1220             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_HI);
1221         oct->reg_list.pci_win_wr_data_lo =
1222             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA_LO);
1223         oct->reg_list.pci_win_wr_data =
1224             (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_WR_DATA64);
1225
1226         oct->reg_list.pci_win_rd_data_hi =
1227             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_HI);
1228         oct->reg_list.pci_win_rd_data_lo =
1229             (u32 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA_LO);
1230         oct->reg_list.pci_win_rd_data =
1231             (u64 __iomem *)(bar0_pciaddr + CN23XX_WIN_RD_DATA64);
1232
1233         cn23xx_get_pcie_qlmport(oct);
1234
1235         cn23xx->intr_mask64 = CN23XX_INTR_MASK;
1236         if (!oct->msix_on)
1237                 cn23xx->intr_mask64 |= CN23XX_INTR_PKT_TIME;
1238         if (oct->rev_id >= OCTEON_CN23XX_REV_1_1)
1239                 cn23xx->intr_mask64 |= CN23XX_INTR_VF_MBOX;
1240
1241         cn23xx->intr_sum_reg64 =
1242             bar0_pciaddr +
1243             CN23XX_SLI_MAC_PF_INT_SUM64(oct->pcie_port, oct->pf_num);
1244         cn23xx->intr_enb_reg64 =
1245             bar0_pciaddr +
1246             CN23XX_SLI_MAC_PF_INT_ENB64(oct->pcie_port, oct->pf_num);
1247 }
1248
1249 int cn23xx_sriov_config(struct octeon_device *oct)
1250 {
1251         struct octeon_cn23xx_pf *cn23xx = (struct octeon_cn23xx_pf *)oct->chip;
1252         u32 max_rings, total_rings, max_vfs, rings_per_vf;
1253         u32 pf_srn, num_pf_rings;
1254         u32 max_possible_vfs;
1255
1256         cn23xx->conf =
1257                 (struct octeon_config *)oct_get_config_info(oct, LIO_23XX);
1258         switch (oct->rev_id) {
1259         case OCTEON_CN23XX_REV_1_0:
1260                 max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_0;
1261                 max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_0;
1262                 break;
1263         case OCTEON_CN23XX_REV_1_1:
1264                 max_rings = CN23XX_MAX_RINGS_PER_PF_PASS_1_1;
1265                 max_possible_vfs = CN23XX_MAX_VFS_PER_PF_PASS_1_1;
1266                 break;
1267         default:
1268                 max_rings = CN23XX_MAX_RINGS_PER_PF;
1269                 max_possible_vfs = CN23XX_MAX_VFS_PER_PF;
1270                 break;
1271         }
1272
1273         if (oct->sriov_info.num_pf_rings)
1274                 num_pf_rings = oct->sriov_info.num_pf_rings;
1275         else
1276                 num_pf_rings = num_present_cpus();
1277
1278 #ifdef CONFIG_PCI_IOV
1279         max_vfs = min_t(u32,
1280                         (max_rings - num_pf_rings), max_possible_vfs);
1281         rings_per_vf = 1;
1282 #else
1283         max_vfs = 0;
1284         rings_per_vf = 0;
1285 #endif
1286
1287         total_rings = num_pf_rings + max_vfs;
1288
1289         /* the first ring of the pf */
1290         pf_srn = total_rings - num_pf_rings;
1291
1292         oct->sriov_info.trs = total_rings;
1293         oct->sriov_info.max_vfs = max_vfs;
1294         oct->sriov_info.rings_per_vf = rings_per_vf;
1295         oct->sriov_info.pf_srn = pf_srn;
1296         oct->sriov_info.num_pf_rings = num_pf_rings;
1297         dev_notice(&oct->pci_dev->dev, "trs:%d max_vfs:%d rings_per_vf:%d pf_srn:%d num_pf_rings:%d\n",
1298                    oct->sriov_info.trs, oct->sriov_info.max_vfs,
1299                    oct->sriov_info.rings_per_vf, oct->sriov_info.pf_srn,
1300                    oct->sriov_info.num_pf_rings);
1301
1302         oct->sriov_info.sriov_enabled = 0;
1303
1304         return 0;
1305 }
1306
1307 int setup_cn23xx_octeon_pf_device(struct octeon_device *oct)
1308 {
1309         u32 data32;
1310         u64 BAR0, BAR1;
1311
1312         pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_0, &data32);
1313         BAR0 = (u64)(data32 & ~0xf);
1314         pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_1, &data32);
1315         BAR0 |= ((u64)data32 << 32);
1316         pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_2, &data32);
1317         BAR1 = (u64)(data32 & ~0xf);
1318         pci_read_config_dword(oct->pci_dev, PCI_BASE_ADDRESS_3, &data32);
1319         BAR1 |= ((u64)data32 << 32);
1320
1321         if (!BAR0 || !BAR1) {
1322                 if (!BAR0)
1323                         dev_err(&oct->pci_dev->dev, "device BAR0 unassigned\n");
1324                 if (!BAR1)
1325                         dev_err(&oct->pci_dev->dev, "device BAR1 unassigned\n");
1326                 return 1;
1327         }
1328
1329         if (octeon_map_pci_barx(oct, 0, 0))
1330                 return 1;
1331
1332         if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
1333                 dev_err(&oct->pci_dev->dev, "%s CN23XX BAR1 map failed\n",
1334                         __func__);
1335                 octeon_unmap_pci_barx(oct, 0);
1336                 return 1;
1337         }
1338
1339         if (cn23xx_get_pf_num(oct) != 0)
1340                 return 1;
1341
1342         if (cn23xx_sriov_config(oct)) {
1343                 octeon_unmap_pci_barx(oct, 0);
1344                 octeon_unmap_pci_barx(oct, 1);
1345                 return 1;
1346         }
1347
1348         octeon_write_csr64(oct, CN23XX_SLI_MAC_CREDIT_CNT, 0x3F802080802080ULL);
1349
1350         oct->fn_list.setup_iq_regs = cn23xx_setup_iq_regs;
1351         oct->fn_list.setup_oq_regs = cn23xx_setup_oq_regs;
1352         oct->fn_list.setup_mbox = cn23xx_setup_pf_mbox;
1353         oct->fn_list.free_mbox = cn23xx_free_pf_mbox;
1354
1355         oct->fn_list.process_interrupt_regs = cn23xx_interrupt_handler;
1356         oct->fn_list.msix_interrupt_handler = cn23xx_pf_msix_interrupt_handler;
1357
1358         oct->fn_list.soft_reset = cn23xx_pf_soft_reset;
1359         oct->fn_list.setup_device_regs = cn23xx_setup_pf_device_regs;
1360         oct->fn_list.update_iq_read_idx = cn23xx_update_read_index;
1361
1362         oct->fn_list.bar1_idx_setup = cn23xx_bar1_idx_setup;
1363         oct->fn_list.bar1_idx_write = cn23xx_bar1_idx_write;
1364         oct->fn_list.bar1_idx_read = cn23xx_bar1_idx_read;
1365
1366         oct->fn_list.enable_interrupt = cn23xx_enable_pf_interrupt;
1367         oct->fn_list.disable_interrupt = cn23xx_disable_pf_interrupt;
1368
1369         oct->fn_list.enable_io_queues = cn23xx_enable_io_queues;
1370         oct->fn_list.disable_io_queues = cn23xx_disable_io_queues;
1371
1372         cn23xx_setup_reg_address(oct);
1373
1374         oct->coproc_clock_rate = 1000000ULL * cn23xx_coprocessor_clock(oct);
1375
1376         return 0;
1377 }
1378 EXPORT_SYMBOL_GPL(setup_cn23xx_octeon_pf_device);
1379
1380 int validate_cn23xx_pf_config_info(struct octeon_device *oct,
1381                                    struct octeon_config *conf23xx)
1382 {
1383         if (CFG_GET_IQ_MAX_Q(conf23xx) > CN23XX_MAX_INPUT_QUEUES) {
1384                 dev_err(&oct->pci_dev->dev, "%s: Num IQ (%d) exceeds Max (%d)\n",
1385                         __func__, CFG_GET_IQ_MAX_Q(conf23xx),
1386                         CN23XX_MAX_INPUT_QUEUES);
1387                 return 1;
1388         }
1389
1390         if (CFG_GET_OQ_MAX_Q(conf23xx) > CN23XX_MAX_OUTPUT_QUEUES) {
1391                 dev_err(&oct->pci_dev->dev, "%s: Num OQ (%d) exceeds Max (%d)\n",
1392                         __func__, CFG_GET_OQ_MAX_Q(conf23xx),
1393                         CN23XX_MAX_OUTPUT_QUEUES);
1394                 return 1;
1395         }
1396
1397         if (CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_32BYTE_INSTR &&
1398             CFG_GET_IQ_INSTR_TYPE(conf23xx) != OCTEON_64BYTE_INSTR) {
1399                 dev_err(&oct->pci_dev->dev, "%s: Invalid instr type for IQ\n",
1400                         __func__);
1401                 return 1;
1402         }
1403
1404         if (!CFG_GET_OQ_REFILL_THRESHOLD(conf23xx)) {
1405                 dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
1406                         __func__);
1407                 return 1;
1408         }
1409
1410         if (!(CFG_GET_OQ_INTR_TIME(conf23xx))) {
1411                 dev_err(&oct->pci_dev->dev, "%s: Invalid parameter for OQ\n",
1412                         __func__);
1413                 return 1;
1414         }
1415
1416         return 0;
1417 }
1418
1419 int cn23xx_fw_loaded(struct octeon_device *oct)
1420 {
1421         u64 val;
1422
1423         /* If there's more than one active PF on this NIC, then that
1424          * implies that the NIC firmware is loaded and running.  This check
1425          * prevents a rare false negative that might occur if we only relied
1426          * on checking the SCR2_BIT_FW_LOADED flag.  The false negative would
1427          * happen if the PF driver sees SCR2_BIT_FW_LOADED as cleared even
1428          * though the firmware was already loaded but still booting and has yet
1429          * to set SCR2_BIT_FW_LOADED.
1430          */
1431         if (atomic_read(oct->adapter_refcount) > 1)
1432                 return 1;
1433
1434         val = octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
1435         return (val >> SCR2_BIT_FW_LOADED) & 1ULL;
1436 }
1437 EXPORT_SYMBOL_GPL(cn23xx_fw_loaded);
1438
1439 void cn23xx_tell_vf_its_macaddr_changed(struct octeon_device *oct, int vfidx,
1440                                         u8 *mac)
1441 {
1442         if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vfidx)) {
1443                 struct octeon_mbox_cmd mbox_cmd;
1444
1445                 mbox_cmd.msg.u64 = 0;
1446                 mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
1447                 mbox_cmd.msg.s.resp_needed = 0;
1448                 mbox_cmd.msg.s.cmd = OCTEON_PF_CHANGED_VF_MACADDR;
1449                 mbox_cmd.msg.s.len = 1;
1450                 mbox_cmd.recv_len = 0;
1451                 mbox_cmd.recv_status = 0;
1452                 mbox_cmd.fn = NULL;
1453                 mbox_cmd.fn_arg = NULL;
1454                 ether_addr_copy(mbox_cmd.msg.s.params, mac);
1455                 mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
1456                 octeon_mbox_write(oct, &mbox_cmd);
1457         }
1458 }
1459 EXPORT_SYMBOL_GPL(cn23xx_tell_vf_its_macaddr_changed);
1460
1461 static void
1462 cn23xx_get_vf_stats_callback(struct octeon_device *oct,
1463                              struct octeon_mbox_cmd *cmd, void *arg)
1464 {
1465         struct oct_vf_stats_ctx *ctx = arg;
1466
1467         memcpy(ctx->stats, cmd->data, sizeof(struct oct_vf_stats));
1468         atomic_set(&ctx->status, 1);
1469 }
1470
1471 int cn23xx_get_vf_stats(struct octeon_device *oct, int vfidx,
1472                         struct oct_vf_stats *stats)
1473 {
1474         u32 timeout = HZ; // 1sec
1475         struct octeon_mbox_cmd mbox_cmd;
1476         struct oct_vf_stats_ctx ctx;
1477         u32 count = 0, ret;
1478
1479         if (!(oct->sriov_info.vf_drv_loaded_mask & (1ULL << vfidx)))
1480                 return -1;
1481
1482         if (sizeof(struct oct_vf_stats) > sizeof(mbox_cmd.data))
1483                 return -1;
1484
1485         mbox_cmd.msg.u64 = 0;
1486         mbox_cmd.msg.s.type = OCTEON_MBOX_REQUEST;
1487         mbox_cmd.msg.s.resp_needed = 1;
1488         mbox_cmd.msg.s.cmd = OCTEON_GET_VF_STATS;
1489         mbox_cmd.msg.s.len = 1;
1490         mbox_cmd.q_no = vfidx * oct->sriov_info.rings_per_vf;
1491         mbox_cmd.recv_len = 0;
1492         mbox_cmd.recv_status = 0;
1493         mbox_cmd.fn = cn23xx_get_vf_stats_callback;
1494         ctx.stats = stats;
1495         atomic_set(&ctx.status, 0);
1496         mbox_cmd.fn_arg = (void *)&ctx;
1497         memset(mbox_cmd.data, 0, sizeof(mbox_cmd.data));
1498         octeon_mbox_write(oct, &mbox_cmd);
1499
1500         do {
1501                 schedule_timeout_uninterruptible(1);
1502         } while ((atomic_read(&ctx.status) == 0) && (count++ < timeout));
1503
1504         ret = atomic_read(&ctx.status);
1505         if (ret == 0) {
1506                 octeon_mbox_cancel(oct, 0);
1507                 dev_err(&oct->pci_dev->dev, "Unable to get stats from VF-%d, timedout\n",
1508                         vfidx);
1509                 return -1;
1510         }
1511
1512         return 0;
1513 }
1514 EXPORT_SYMBOL_GPL(cn23xx_get_vf_stats);