GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / infiniband / hw / bnxt_re / qplib_rcfw.c
1 /*
2  * Broadcom NetXtreme-E RoCE driver.
3  *
4  * Copyright (c) 2016 - 2017, Broadcom. All rights reserved.  The term
5  * Broadcom refers to Broadcom Limited and/or its subsidiaries.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * BSD license below:
12  *
13  * Redistribution and use in source and binary forms, with or without
14  * modification, are permitted provided that the following conditions
15  * are met:
16  *
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in
21  *    the documentation and/or other materials provided with the
22  *    distribution.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31  * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32  * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33  * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34  * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35  *
36  * Description: RDMA Controller HW interface
37  */
38 #include <linux/interrupt.h>
39 #include <linux/spinlock.h>
40 #include <linux/pci.h>
41 #include <linux/prefetch.h>
42 #include <linux/delay.h>
43
44 #include "roce_hsi.h"
45 #include "qplib_res.h"
46 #include "qplib_rcfw.h"
47 #include "qplib_sp.h"
48 #include "qplib_fp.h"
49
50 static void bnxt_qplib_service_creq(unsigned long data);
51
52 /* Hardware communication channel */
53 static int __wait_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
54 {
55         u16 cbit;
56         int rc;
57
58         cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
59         rc = wait_event_timeout(rcfw->waitq,
60                                 !test_bit(cbit, rcfw->cmdq_bitmap),
61                                 msecs_to_jiffies(RCFW_CMD_WAIT_TIME_MS));
62         return rc ? 0 : -ETIMEDOUT;
63 };
64
65 static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie)
66 {
67         u32 count = RCFW_BLOCKED_CMD_WAIT_COUNT;
68         u16 cbit;
69
70         cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
71         if (!test_bit(cbit, rcfw->cmdq_bitmap))
72                 goto done;
73         do {
74                 mdelay(1); /* 1m sec */
75                 bnxt_qplib_service_creq((unsigned long)rcfw);
76         } while (test_bit(cbit, rcfw->cmdq_bitmap) && --count);
77 done:
78         return count ? 0 : -ETIMEDOUT;
79 };
80
81 static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
82                           struct creq_base *resp, void *sb, u8 is_block)
83 {
84         struct bnxt_qplib_cmdqe *cmdqe, **cmdq_ptr;
85         struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
86         struct bnxt_qplib_crsq *crsqe;
87         u32 sw_prod, cmdq_prod;
88         unsigned long flags;
89         u32 size, opcode;
90         u16 cookie, cbit;
91         u8 *preq;
92
93         opcode = req->opcode;
94         if (!test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
95             (opcode != CMDQ_BASE_OPCODE_QUERY_FUNC &&
96              opcode != CMDQ_BASE_OPCODE_INITIALIZE_FW &&
97              opcode != CMDQ_BASE_OPCODE_QUERY_VERSION)) {
98                 dev_err(&rcfw->pdev->dev,
99                         "QPLIB: RCFW not initialized, reject opcode 0x%x",
100                         opcode);
101                 return -EINVAL;
102         }
103
104         if (test_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags) &&
105             opcode == CMDQ_BASE_OPCODE_INITIALIZE_FW) {
106                 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW already initialized!");
107                 return -EINVAL;
108         }
109
110         if (test_bit(FIRMWARE_TIMED_OUT, &rcfw->flags))
111                 return -ETIMEDOUT;
112
113         /* Cmdq are in 16-byte units, each request can consume 1 or more
114          * cmdqe
115          */
116         spin_lock_irqsave(&cmdq->lock, flags);
117         if (req->cmd_size >= HWQ_FREE_SLOTS(cmdq)) {
118                 dev_err(&rcfw->pdev->dev, "QPLIB: RCFW: CMDQ is full!");
119                 spin_unlock_irqrestore(&cmdq->lock, flags);
120                 return -EAGAIN;
121         }
122
123
124         cookie = rcfw->seq_num & RCFW_MAX_COOKIE_VALUE;
125         cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
126         if (is_block)
127                 cookie |= RCFW_CMD_IS_BLOCKING;
128
129         set_bit(cbit, rcfw->cmdq_bitmap);
130         req->cookie = cpu_to_le16(cookie);
131         crsqe = &rcfw->crsqe_tbl[cbit];
132         if (crsqe->resp) {
133                 spin_unlock_irqrestore(&cmdq->lock, flags);
134                 return -EBUSY;
135         }
136         memset(resp, 0, sizeof(*resp));
137         crsqe->resp = (struct creq_qp_event *)resp;
138         crsqe->resp->cookie = req->cookie;
139         crsqe->req_size = req->cmd_size;
140         if (req->resp_size && sb) {
141                 struct bnxt_qplib_rcfw_sbuf *sbuf = sb;
142
143                 req->resp_addr = cpu_to_le64(sbuf->dma_addr);
144                 req->resp_size = (sbuf->size + BNXT_QPLIB_CMDQE_UNITS - 1) /
145                                   BNXT_QPLIB_CMDQE_UNITS;
146         }
147
148         cmdq_ptr = (struct bnxt_qplib_cmdqe **)cmdq->pbl_ptr;
149         preq = (u8 *)req;
150         size = req->cmd_size * BNXT_QPLIB_CMDQE_UNITS;
151         do {
152                 /* Locate the next cmdq slot */
153                 sw_prod = HWQ_CMP(cmdq->prod, cmdq);
154                 cmdqe = &cmdq_ptr[get_cmdq_pg(sw_prod)][get_cmdq_idx(sw_prod)];
155                 if (!cmdqe) {
156                         dev_err(&rcfw->pdev->dev,
157                                 "QPLIB: RCFW request failed with no cmdqe!");
158                         goto done;
159                 }
160                 /* Copy a segment of the req cmd to the cmdq */
161                 memset(cmdqe, 0, sizeof(*cmdqe));
162                 memcpy(cmdqe, preq, min_t(u32, size, sizeof(*cmdqe)));
163                 preq += min_t(u32, size, sizeof(*cmdqe));
164                 size -= min_t(u32, size, sizeof(*cmdqe));
165                 cmdq->prod++;
166                 rcfw->seq_num++;
167         } while (size > 0);
168
169         rcfw->seq_num++;
170
171         cmdq_prod = cmdq->prod;
172         if (test_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags)) {
173                 /* The very first doorbell write
174                  * is required to set this flag
175                  * which prompts the FW to reset
176                  * its internal pointers
177                  */
178                 cmdq_prod |= BIT(FIRMWARE_FIRST_FLAG);
179                 clear_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
180         }
181
182         /* ring CMDQ DB */
183         wmb();
184         writel(cmdq_prod, rcfw->cmdq_bar_reg_iomem +
185                rcfw->cmdq_bar_reg_prod_off);
186         writel(RCFW_CMDQ_TRIG_VAL, rcfw->cmdq_bar_reg_iomem +
187                rcfw->cmdq_bar_reg_trig_off);
188 done:
189         spin_unlock_irqrestore(&cmdq->lock, flags);
190         /* Return the CREQ response pointer */
191         return 0;
192 }
193
194 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
195                                  struct cmdq_base *req,
196                                  struct creq_base *resp,
197                                  void *sb, u8 is_block)
198 {
199         struct creq_qp_event *evnt = (struct creq_qp_event *)resp;
200         u16 cookie;
201         u8 opcode, retry_cnt = 0xFF;
202         int rc = 0;
203
204         do {
205                 opcode = req->opcode;
206                 rc = __send_message(rcfw, req, resp, sb, is_block);
207                 cookie = le16_to_cpu(req->cookie) & RCFW_MAX_COOKIE_VALUE;
208                 if (!rc)
209                         break;
210
211                 if (!retry_cnt || (rc != -EAGAIN && rc != -EBUSY)) {
212                         /* send failed */
213                         dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x send failed",
214                                 cookie, opcode);
215                         return rc;
216                 }
217                 is_block ? mdelay(1) : usleep_range(500, 1000);
218
219         } while (retry_cnt--);
220
221         if (is_block)
222                 rc = __block_for_resp(rcfw, cookie);
223         else
224                 rc = __wait_for_resp(rcfw, cookie);
225         if (rc) {
226                 /* timed out */
227                 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x timedout (%d)msec",
228                         cookie, opcode, RCFW_CMD_WAIT_TIME_MS);
229                 set_bit(FIRMWARE_TIMED_OUT, &rcfw->flags);
230                 return rc;
231         }
232
233         if (evnt->status) {
234                 /* failed with status */
235                 dev_err(&rcfw->pdev->dev, "QPLIB: cmdq[%#x]=%#x status %#x",
236                         cookie, opcode, evnt->status);
237                 rc = -EFAULT;
238         }
239
240         return rc;
241 }
242 /* Completions */
243 static int bnxt_qplib_process_func_event(struct bnxt_qplib_rcfw *rcfw,
244                                          struct creq_func_event *func_event)
245 {
246         switch (func_event->event) {
247         case CREQ_FUNC_EVENT_EVENT_TX_WQE_ERROR:
248                 break;
249         case CREQ_FUNC_EVENT_EVENT_TX_DATA_ERROR:
250                 break;
251         case CREQ_FUNC_EVENT_EVENT_RX_WQE_ERROR:
252                 break;
253         case CREQ_FUNC_EVENT_EVENT_RX_DATA_ERROR:
254                 break;
255         case CREQ_FUNC_EVENT_EVENT_CQ_ERROR:
256                 break;
257         case CREQ_FUNC_EVENT_EVENT_TQM_ERROR:
258                 break;
259         case CREQ_FUNC_EVENT_EVENT_CFCQ_ERROR:
260                 break;
261         case CREQ_FUNC_EVENT_EVENT_CFCS_ERROR:
262                 /* SRQ ctx error, call srq_handler??
263                  * But there's no SRQ handle!
264                  */
265                 break;
266         case CREQ_FUNC_EVENT_EVENT_CFCC_ERROR:
267                 break;
268         case CREQ_FUNC_EVENT_EVENT_CFCM_ERROR:
269                 break;
270         case CREQ_FUNC_EVENT_EVENT_TIM_ERROR:
271                 break;
272         case CREQ_FUNC_EVENT_EVENT_VF_COMM_REQUEST:
273                 break;
274         case CREQ_FUNC_EVENT_EVENT_RESOURCE_EXHAUSTED:
275                 break;
276         default:
277                 return -EINVAL;
278         }
279         return 0;
280 }
281
282 static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
283                                        struct creq_qp_event *qp_event)
284 {
285         struct bnxt_qplib_hwq *cmdq = &rcfw->cmdq;
286         struct creq_qp_error_notification *err_event;
287         struct bnxt_qplib_crsq *crsqe;
288         unsigned long flags;
289         struct bnxt_qplib_qp *qp;
290         u16 cbit, blocked = 0;
291         u16 cookie;
292         __le16  mcookie;
293         u32 qp_id;
294
295         switch (qp_event->event) {
296         case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
297                 err_event = (struct creq_qp_error_notification *)qp_event;
298                 qp_id = le32_to_cpu(err_event->xid);
299                 qp = rcfw->qp_tbl[qp_id].qp_handle;
300                 dev_dbg(&rcfw->pdev->dev,
301                         "QPLIB: Received QP error notification");
302                 dev_dbg(&rcfw->pdev->dev,
303                         "QPLIB: qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
304                         qp_id, err_event->req_err_state_reason,
305                         err_event->res_err_state_reason);
306                 if (!qp)
307                         break;
308                 bnxt_qplib_mark_qp_error(qp);
309                 rcfw->aeq_handler(rcfw, qp_event, qp);
310                 break;
311         default:
312                 /*
313                  * Command Response
314                  * cmdq->lock needs to be acquired to synchronie
315                  * the command send and completion reaping. This function
316                  * is always called with creq->lock held. Using
317                  * the nested variant of spin_lock.
318                  *
319                  */
320
321                 spin_lock_irqsave_nested(&cmdq->lock, flags,
322                                          SINGLE_DEPTH_NESTING);
323                 cookie = le16_to_cpu(qp_event->cookie);
324                 mcookie = qp_event->cookie;
325                 blocked = cookie & RCFW_CMD_IS_BLOCKING;
326                 cookie &= RCFW_MAX_COOKIE_VALUE;
327                 cbit = cookie % RCFW_MAX_OUTSTANDING_CMD;
328                 crsqe = &rcfw->crsqe_tbl[cbit];
329                 if (crsqe->resp &&
330                     crsqe->resp->cookie  == mcookie) {
331                         memcpy(crsqe->resp, qp_event, sizeof(*qp_event));
332                         crsqe->resp = NULL;
333                 } else {
334                         dev_err(&rcfw->pdev->dev,
335                                 "QPLIB: CMD %s resp->cookie = %#x, evnt->cookie = %#x",
336                                 crsqe->resp ? "mismatch" : "collision",
337                                 crsqe->resp ? crsqe->resp->cookie : 0, mcookie);
338                 }
339                 if (!test_and_clear_bit(cbit, rcfw->cmdq_bitmap))
340                         dev_warn(&rcfw->pdev->dev,
341                                  "QPLIB: CMD bit %d was not requested", cbit);
342                 cmdq->cons += crsqe->req_size;
343                 crsqe->req_size = 0;
344
345                 if (!blocked)
346                         wake_up(&rcfw->waitq);
347                 spin_unlock_irqrestore(&cmdq->lock, flags);
348         }
349         return 0;
350 }
351
352 /* SP - CREQ Completion handlers */
353 static void bnxt_qplib_service_creq(unsigned long data)
354 {
355         struct bnxt_qplib_rcfw *rcfw = (struct bnxt_qplib_rcfw *)data;
356         struct bnxt_qplib_hwq *creq = &rcfw->creq;
357         struct creq_base *creqe, **creq_ptr;
358         u32 sw_cons, raw_cons;
359         unsigned long flags;
360         u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
361
362         /* Service the CREQ until budget is over */
363         spin_lock_irqsave(&creq->lock, flags);
364         raw_cons = creq->cons;
365         while (budget > 0) {
366                 sw_cons = HWQ_CMP(raw_cons, creq);
367                 creq_ptr = (struct creq_base **)creq->pbl_ptr;
368                 creqe = &creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
369                 if (!CREQ_CMP_VALID(creqe, raw_cons, creq->max_elements))
370                         break;
371                 /* The valid test of the entry must be done first before
372                  * reading any further.
373                  */
374                 dma_rmb();
375
376                 type = creqe->type & CREQ_BASE_TYPE_MASK;
377                 switch (type) {
378                 case CREQ_BASE_TYPE_QP_EVENT:
379                         bnxt_qplib_process_qp_event
380                                 (rcfw, (struct creq_qp_event *)creqe);
381                         rcfw->creq_qp_event_processed++;
382                         break;
383                 case CREQ_BASE_TYPE_FUNC_EVENT:
384                         if (!bnxt_qplib_process_func_event
385                             (rcfw, (struct creq_func_event *)creqe))
386                                 rcfw->creq_func_event_processed++;
387                         else
388                                 dev_warn
389                                 (&rcfw->pdev->dev, "QPLIB:aeqe:%#x Not handled",
390                                  type);
391                         break;
392                 default:
393                         dev_warn(&rcfw->pdev->dev, "QPLIB: creqe with ");
394                         dev_warn(&rcfw->pdev->dev,
395                                  "QPLIB: op_event = 0x%x not handled", type);
396                         break;
397                 }
398                 raw_cons++;
399                 budget--;
400         }
401
402         if (creq->cons != raw_cons) {
403                 creq->cons = raw_cons;
404                 CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, raw_cons,
405                               creq->max_elements);
406         }
407         spin_unlock_irqrestore(&creq->lock, flags);
408 }
409
410 static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
411 {
412         struct bnxt_qplib_rcfw *rcfw = dev_instance;
413         struct bnxt_qplib_hwq *creq = &rcfw->creq;
414         struct creq_base **creq_ptr;
415         u32 sw_cons;
416
417         /* Prefetch the CREQ element */
418         sw_cons = HWQ_CMP(creq->cons, creq);
419         creq_ptr = (struct creq_base **)rcfw->creq.pbl_ptr;
420         prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
421
422         tasklet_schedule(&rcfw->worker);
423
424         return IRQ_HANDLED;
425 }
426
427 /* RCFW */
428 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw)
429 {
430         struct cmdq_deinitialize_fw req;
431         struct creq_deinitialize_fw_resp resp;
432         u16 cmd_flags = 0;
433         int rc;
434
435         RCFW_CMD_PREP(req, DEINITIALIZE_FW, cmd_flags);
436         rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
437                                           NULL, 0);
438         if (rc)
439                 return rc;
440
441         clear_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
442         return 0;
443 }
444
445 static int __get_pbl_pg_idx(struct bnxt_qplib_pbl *pbl)
446 {
447         return (pbl->pg_size == ROCE_PG_SIZE_4K ?
448                                       CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K :
449                 pbl->pg_size == ROCE_PG_SIZE_8K ?
450                                       CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8K :
451                 pbl->pg_size == ROCE_PG_SIZE_64K ?
452                                       CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_64K :
453                 pbl->pg_size == ROCE_PG_SIZE_2M ?
454                                       CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_2M :
455                 pbl->pg_size == ROCE_PG_SIZE_8M ?
456                                       CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_8M :
457                 pbl->pg_size == ROCE_PG_SIZE_1G ?
458                                       CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_1G :
459                                       CMDQ_INITIALIZE_FW_QPC_PG_SIZE_PG_4K);
460 }
461
462 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
463                          struct bnxt_qplib_ctx *ctx, int is_virtfn)
464 {
465         struct cmdq_initialize_fw req;
466         struct creq_initialize_fw_resp resp;
467         u16 cmd_flags = 0, level;
468         int rc;
469
470         RCFW_CMD_PREP(req, INITIALIZE_FW, cmd_flags);
471         /* Supply (log-base-2-of-host-page-size - base-page-shift)
472          * to bono to adjust the doorbell page sizes.
473          */
474         req.log2_dbr_pg_size = cpu_to_le16(PAGE_SHIFT -
475                                            RCFW_DBR_BASE_PAGE_SHIFT);
476         /*
477          * VFs need not setup the HW context area, PF
478          * shall setup this area for VF. Skipping the
479          * HW programming
480          */
481         if (is_virtfn)
482                 goto skip_ctx_setup;
483
484         level = ctx->qpc_tbl.level;
485         req.qpc_pg_size_qpc_lvl = (level << CMDQ_INITIALIZE_FW_QPC_LVL_SFT) |
486                                 __get_pbl_pg_idx(&ctx->qpc_tbl.pbl[level]);
487         level = ctx->mrw_tbl.level;
488         req.mrw_pg_size_mrw_lvl = (level << CMDQ_INITIALIZE_FW_MRW_LVL_SFT) |
489                                 __get_pbl_pg_idx(&ctx->mrw_tbl.pbl[level]);
490         level = ctx->srqc_tbl.level;
491         req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
492                                 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
493         level = ctx->cq_tbl.level;
494         req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
495                                 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
496         level = ctx->srqc_tbl.level;
497         req.srq_pg_size_srq_lvl = (level << CMDQ_INITIALIZE_FW_SRQ_LVL_SFT) |
498                                 __get_pbl_pg_idx(&ctx->srqc_tbl.pbl[level]);
499         level = ctx->cq_tbl.level;
500         req.cq_pg_size_cq_lvl = (level << CMDQ_INITIALIZE_FW_CQ_LVL_SFT) |
501                                 __get_pbl_pg_idx(&ctx->cq_tbl.pbl[level]);
502         level = ctx->tim_tbl.level;
503         req.tim_pg_size_tim_lvl = (level << CMDQ_INITIALIZE_FW_TIM_LVL_SFT) |
504                                   __get_pbl_pg_idx(&ctx->tim_tbl.pbl[level]);
505         level = ctx->tqm_pde_level;
506         req.tqm_pg_size_tqm_lvl = (level << CMDQ_INITIALIZE_FW_TQM_LVL_SFT) |
507                                   __get_pbl_pg_idx(&ctx->tqm_pde.pbl[level]);
508
509         req.qpc_page_dir =
510                 cpu_to_le64(ctx->qpc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
511         req.mrw_page_dir =
512                 cpu_to_le64(ctx->mrw_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
513         req.srq_page_dir =
514                 cpu_to_le64(ctx->srqc_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
515         req.cq_page_dir =
516                 cpu_to_le64(ctx->cq_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
517         req.tim_page_dir =
518                 cpu_to_le64(ctx->tim_tbl.pbl[PBL_LVL_0].pg_map_arr[0]);
519         req.tqm_page_dir =
520                 cpu_to_le64(ctx->tqm_pde.pbl[PBL_LVL_0].pg_map_arr[0]);
521
522         req.number_of_qp = cpu_to_le32(ctx->qpc_tbl.max_elements);
523         req.number_of_mrw = cpu_to_le32(ctx->mrw_tbl.max_elements);
524         req.number_of_srq = cpu_to_le32(ctx->srqc_tbl.max_elements);
525         req.number_of_cq = cpu_to_le32(ctx->cq_tbl.max_elements);
526
527         req.max_qp_per_vf = cpu_to_le32(ctx->vf_res.max_qp_per_vf);
528         req.max_mrw_per_vf = cpu_to_le32(ctx->vf_res.max_mrw_per_vf);
529         req.max_srq_per_vf = cpu_to_le32(ctx->vf_res.max_srq_per_vf);
530         req.max_cq_per_vf = cpu_to_le32(ctx->vf_res.max_cq_per_vf);
531         req.max_gid_per_vf = cpu_to_le32(ctx->vf_res.max_gid_per_vf);
532
533 skip_ctx_setup:
534         req.stat_ctx_id = cpu_to_le32(ctx->stats.fw_id);
535         rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp,
536                                           NULL, 0);
537         if (rc)
538                 return rc;
539         set_bit(FIRMWARE_INITIALIZED_FLAG, &rcfw->flags);
540         return 0;
541 }
542
543 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
544 {
545         kfree(rcfw->qp_tbl);
546         kfree(rcfw->crsqe_tbl);
547         bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->cmdq);
548         bnxt_qplib_free_hwq(rcfw->pdev, &rcfw->creq);
549         rcfw->pdev = NULL;
550 }
551
552 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
553                                   struct bnxt_qplib_rcfw *rcfw,
554                                   int qp_tbl_sz)
555 {
556         rcfw->pdev = pdev;
557         rcfw->creq.max_elements = BNXT_QPLIB_CREQE_MAX_CNT;
558         if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->creq, NULL, 0,
559                                       &rcfw->creq.max_elements,
560                                       BNXT_QPLIB_CREQE_UNITS, 0, PAGE_SIZE,
561                                       HWQ_TYPE_L2_CMPL)) {
562                 dev_err(&rcfw->pdev->dev,
563                         "QPLIB: HW channel CREQ allocation failed");
564                 goto fail;
565         }
566         rcfw->cmdq.max_elements = BNXT_QPLIB_CMDQE_MAX_CNT;
567         if (bnxt_qplib_alloc_init_hwq(rcfw->pdev, &rcfw->cmdq, NULL, 0,
568                                       &rcfw->cmdq.max_elements,
569                                       BNXT_QPLIB_CMDQE_UNITS, 0, PAGE_SIZE,
570                                       HWQ_TYPE_CTX)) {
571                 dev_err(&rcfw->pdev->dev,
572                         "QPLIB: HW channel CMDQ allocation failed");
573                 goto fail;
574         }
575
576         rcfw->crsqe_tbl = kcalloc(rcfw->cmdq.max_elements,
577                                   sizeof(*rcfw->crsqe_tbl), GFP_KERNEL);
578         if (!rcfw->crsqe_tbl)
579                 goto fail;
580
581         rcfw->qp_tbl_size = qp_tbl_sz;
582         rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
583                                GFP_KERNEL);
584         if (!rcfw->qp_tbl)
585                 goto fail;
586
587         return 0;
588
589 fail:
590         bnxt_qplib_free_rcfw_channel(rcfw);
591         return -ENOMEM;
592 }
593
594 void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill)
595 {
596         tasklet_disable(&rcfw->worker);
597         /* Mask h/w interrupts */
598         CREQ_DB(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
599                 rcfw->creq.max_elements);
600         /* Sync with last running IRQ-handler */
601         synchronize_irq(rcfw->vector);
602         if (kill)
603                 tasklet_kill(&rcfw->worker);
604
605         if (rcfw->requested) {
606                 free_irq(rcfw->vector, rcfw);
607                 rcfw->requested = false;
608         }
609 }
610
611 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw)
612 {
613         unsigned long indx;
614
615         bnxt_qplib_rcfw_stop_irq(rcfw, true);
616
617         iounmap(rcfw->cmdq_bar_reg_iomem);
618         iounmap(rcfw->creq_bar_reg_iomem);
619
620         indx = find_first_bit(rcfw->cmdq_bitmap, rcfw->bmap_size);
621         if (indx != rcfw->bmap_size)
622                 dev_err(&rcfw->pdev->dev,
623                         "QPLIB: disabling RCFW with pending cmd-bit %lx", indx);
624         kfree(rcfw->cmdq_bitmap);
625         rcfw->bmap_size = 0;
626
627         rcfw->cmdq_bar_reg_iomem = NULL;
628         rcfw->creq_bar_reg_iomem = NULL;
629         rcfw->aeq_handler = NULL;
630         rcfw->vector = 0;
631 }
632
633 int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
634                               bool need_init)
635 {
636         int rc;
637
638         if (rcfw->requested)
639                 return -EFAULT;
640
641         rcfw->vector = msix_vector;
642         if (need_init)
643                 tasklet_init(&rcfw->worker,
644                              bnxt_qplib_service_creq, (unsigned long)rcfw);
645         else
646                 tasklet_enable(&rcfw->worker);
647         rc = request_irq(rcfw->vector, bnxt_qplib_creq_irq, 0,
648                          "bnxt_qplib_creq", rcfw);
649         if (rc)
650                 return rc;
651         rcfw->requested = true;
652         CREQ_DB_REARM(rcfw->creq_bar_reg_iomem, rcfw->creq.cons,
653                       rcfw->creq.max_elements);
654
655         return 0;
656 }
657
658 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
659                                    struct bnxt_qplib_rcfw *rcfw,
660                                    int msix_vector,
661                                    int cp_bar_reg_off, int virt_fn,
662                                    int (*aeq_handler)(struct bnxt_qplib_rcfw *,
663                                                       void *, void *))
664 {
665         resource_size_t res_base;
666         struct cmdq_init init;
667         u16 bmap_size;
668         int rc;
669
670         /* General */
671         rcfw->seq_num = 0;
672         set_bit(FIRMWARE_FIRST_FLAG, &rcfw->flags);
673         bmap_size = BITS_TO_LONGS(RCFW_MAX_OUTSTANDING_CMD *
674                                   sizeof(unsigned long));
675         rcfw->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL);
676         if (!rcfw->cmdq_bitmap)
677                 return -ENOMEM;
678         rcfw->bmap_size = bmap_size;
679
680         /* CMDQ */
681         rcfw->cmdq_bar_reg = RCFW_COMM_PCI_BAR_REGION;
682         res_base = pci_resource_start(pdev, rcfw->cmdq_bar_reg);
683         if (!res_base)
684                 return -ENOMEM;
685
686         rcfw->cmdq_bar_reg_iomem = ioremap_nocache(res_base +
687                                               RCFW_COMM_BASE_OFFSET,
688                                               RCFW_COMM_SIZE);
689         if (!rcfw->cmdq_bar_reg_iomem) {
690                 dev_err(&rcfw->pdev->dev,
691                         "QPLIB: CMDQ BAR region %d mapping failed",
692                         rcfw->cmdq_bar_reg);
693                 return -ENOMEM;
694         }
695
696         rcfw->cmdq_bar_reg_prod_off = virt_fn ? RCFW_VF_COMM_PROD_OFFSET :
697                                         RCFW_PF_COMM_PROD_OFFSET;
698
699         rcfw->cmdq_bar_reg_trig_off = RCFW_COMM_TRIG_OFFSET;
700
701         /* CREQ */
702         rcfw->creq_bar_reg = RCFW_COMM_CONS_PCI_BAR_REGION;
703         res_base = pci_resource_start(pdev, rcfw->creq_bar_reg);
704         if (!res_base)
705                 dev_err(&rcfw->pdev->dev,
706                         "QPLIB: CREQ BAR region %d resc start is 0!",
707                         rcfw->creq_bar_reg);
708         rcfw->creq_bar_reg_iomem = ioremap_nocache(res_base + cp_bar_reg_off,
709                                                    4);
710         if (!rcfw->creq_bar_reg_iomem) {
711                 dev_err(&rcfw->pdev->dev,
712                         "QPLIB: CREQ BAR region %d mapping failed",
713                         rcfw->creq_bar_reg);
714                 iounmap(rcfw->cmdq_bar_reg_iomem);
715                 rcfw->cmdq_bar_reg_iomem = NULL;
716                 return -ENOMEM;
717         }
718         rcfw->creq_qp_event_processed = 0;
719         rcfw->creq_func_event_processed = 0;
720
721         if (aeq_handler)
722                 rcfw->aeq_handler = aeq_handler;
723         init_waitqueue_head(&rcfw->waitq);
724
725         rc = bnxt_qplib_rcfw_start_irq(rcfw, msix_vector, true);
726         if (rc) {
727                 dev_err(&rcfw->pdev->dev,
728                         "QPLIB: Failed to request IRQ for CREQ rc = 0x%x", rc);
729                 bnxt_qplib_disable_rcfw_channel(rcfw);
730                 return rc;
731         }
732
733         init.cmdq_pbl = cpu_to_le64(rcfw->cmdq.pbl[PBL_LVL_0].pg_map_arr[0]);
734         init.cmdq_size_cmdq_lvl = cpu_to_le16(
735                 ((BNXT_QPLIB_CMDQE_MAX_CNT << CMDQ_INIT_CMDQ_SIZE_SFT) &
736                  CMDQ_INIT_CMDQ_SIZE_MASK) |
737                 ((rcfw->cmdq.level << CMDQ_INIT_CMDQ_LVL_SFT) &
738                  CMDQ_INIT_CMDQ_LVL_MASK));
739         init.creq_ring_id = cpu_to_le16(rcfw->creq_ring_id);
740
741         /* Write to the Bono mailbox register */
742         __iowrite32_copy(rcfw->cmdq_bar_reg_iomem, &init, sizeof(init) / 4);
743         return 0;
744 }
745
746 struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
747                 struct bnxt_qplib_rcfw *rcfw,
748                 u32 size)
749 {
750         struct bnxt_qplib_rcfw_sbuf *sbuf;
751
752         sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC);
753         if (!sbuf)
754                 return NULL;
755
756         sbuf->size = size;
757         sbuf->sb = dma_zalloc_coherent(&rcfw->pdev->dev, sbuf->size,
758                                        &sbuf->dma_addr, GFP_ATOMIC);
759         if (!sbuf->sb)
760                 goto bail;
761
762         return sbuf;
763 bail:
764         kfree(sbuf);
765         return NULL;
766 }
767
768 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
769                                struct bnxt_qplib_rcfw_sbuf *sbuf)
770 {
771         if (sbuf->sb)
772                 dma_free_coherent(&rcfw->pdev->dev, sbuf->size,
773                                   sbuf->sb, sbuf->dma_addr);
774         kfree(sbuf);
775 }