2 * Huawei HiNIC PCI Express Linux driver
3 * Copyright(c) 2017 Huawei Technologies Co., Ltd
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/errno.h>
19 #include <linux/pci.h>
20 #include <linux/device.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/spinlock.h>
24 #include <linux/sizes.h>
25 #include <linux/atomic.h>
26 #include <linux/log2.h>
28 #include <linux/completion.h>
29 #include <linux/err.h>
30 #include <asm/byteorder.h>
31 #include <asm/barrier.h>
33 #include "hinic_common.h"
34 #include "hinic_hw_if.h"
35 #include "hinic_hw_eqs.h"
36 #include "hinic_hw_mgmt.h"
37 #include "hinic_hw_wqe.h"
38 #include "hinic_hw_wq.h"
39 #include "hinic_hw_cmdq.h"
40 #include "hinic_hw_io.h"
41 #include "hinic_hw_dev.h"
43 #define CMDQ_CEQE_TYPE_SHIFT 0
45 #define CMDQ_CEQE_TYPE_MASK 0x7
47 #define CMDQ_CEQE_GET(val, member) \
48 (((val) >> CMDQ_CEQE_##member##_SHIFT) \
49 & CMDQ_CEQE_##member##_MASK)
51 #define CMDQ_WQE_ERRCODE_VAL_SHIFT 20
53 #define CMDQ_WQE_ERRCODE_VAL_MASK 0xF
55 #define CMDQ_WQE_ERRCODE_GET(val, member) \
56 (((val) >> CMDQ_WQE_ERRCODE_##member##_SHIFT) \
57 & CMDQ_WQE_ERRCODE_##member##_MASK)
59 #define CMDQ_DB_PI_OFF(pi) (((u16)LOWER_8_BITS(pi)) << 3)
61 #define CMDQ_DB_ADDR(db_base, pi) ((db_base) + CMDQ_DB_PI_OFF(pi))
63 #define CMDQ_WQE_HEADER(wqe) ((struct hinic_cmdq_header *)(wqe))
65 #define CMDQ_WQE_COMPLETED(ctrl_info) \
66 HINIC_CMDQ_CTRL_GET(ctrl_info, HW_BUSY_BIT)
68 #define FIRST_DATA_TO_WRITE_LAST sizeof(u64)
70 #define CMDQ_DB_OFF SZ_2K
72 #define CMDQ_WQEBB_SIZE 64
73 #define CMDQ_WQE_SIZE 64
74 #define CMDQ_DEPTH SZ_4K
76 #define CMDQ_WQ_PAGE_SIZE SZ_4K
78 #define WQE_LCMD_SIZE 64
79 #define WQE_SCMD_SIZE 64
81 #define COMPLETE_LEN 3
83 #define CMDQ_TIMEOUT 1000
85 #define CMDQ_PFN(addr, page_size) ((addr) >> (ilog2(page_size)))
87 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \
88 struct hinic_cmdqs, cmdq[0])
90 #define cmdqs_to_func_to_io(cmdqs) container_of(cmdqs, \
91 struct hinic_func_to_io, \
99 enum completion_format {
110 BUFDESC_LCMD_LEN = 2, /* 16 bytes - 2(8 byte unit) */
111 BUFDESC_SCMD_LEN = 3, /* 24 bytes - 3(8 byte unit) */
115 CTRL_SECT_LEN = 1, /* 4 bytes (ctrl) - 1(8 byte unit) */
116 CTRL_DIRECT_SECT_LEN = 2, /* 12 bytes (ctrl + rsvd) - 2(8 byte unit) */
119 enum cmdq_scmd_type {
120 CMDQ_SET_ARM_CMD = 2,
124 CMDQ_CMD_SYNC_DIRECT_RESP = 0,
125 CMDQ_CMD_SYNC_SGE_RESP = 1,
128 enum completion_request {
134 * hinic_alloc_cmdq_buf - alloc buffer for sending command
136 * @cmdq_buf: the buffer returned in this struct
138 * Return 0 - Success, negative - Failure
140 int hinic_alloc_cmdq_buf(struct hinic_cmdqs *cmdqs,
141 struct hinic_cmdq_buf *cmdq_buf)
143 struct hinic_hwif *hwif = cmdqs->hwif;
144 struct pci_dev *pdev = hwif->pdev;
146 cmdq_buf->buf = pci_pool_alloc(cmdqs->cmdq_buf_pool, GFP_KERNEL,
147 &cmdq_buf->dma_addr);
148 if (!cmdq_buf->buf) {
149 dev_err(&pdev->dev, "Failed to allocate cmd from the pool\n");
157 * hinic_free_cmdq_buf - free buffer
159 * @cmdq_buf: the buffer to free that is in this struct
161 void hinic_free_cmdq_buf(struct hinic_cmdqs *cmdqs,
162 struct hinic_cmdq_buf *cmdq_buf)
164 pci_pool_free(cmdqs->cmdq_buf_pool, cmdq_buf->buf, cmdq_buf->dma_addr);
167 static unsigned int cmdq_wqe_size_from_bdlen(enum bufdesc_len len)
169 unsigned int wqe_size = 0;
172 case BUFDESC_LCMD_LEN:
173 wqe_size = WQE_LCMD_SIZE;
175 case BUFDESC_SCMD_LEN:
176 wqe_size = WQE_SCMD_SIZE;
183 static void cmdq_set_sge_completion(struct hinic_cmdq_completion *completion,
184 struct hinic_cmdq_buf *buf_out)
186 struct hinic_sge_resp *sge_resp = &completion->sge_resp;
188 hinic_set_sge(&sge_resp->sge, buf_out->dma_addr, buf_out->size);
191 static void cmdq_prepare_wqe_ctrl(struct hinic_cmdq_wqe *wqe, int wrapped,
192 enum hinic_cmd_ack_type ack_type,
193 enum hinic_mod_type mod, u8 cmd, u16 prod_idx,
194 enum completion_format complete_format,
195 enum data_format data_format,
196 enum bufdesc_len buf_len)
198 struct hinic_cmdq_wqe_lcmd *wqe_lcmd;
199 struct hinic_cmdq_wqe_scmd *wqe_scmd;
200 enum ctrl_sect_len ctrl_len;
201 struct hinic_ctrl *ctrl;
204 if (data_format == DATA_SGE) {
205 wqe_lcmd = &wqe->wqe_lcmd;
207 wqe_lcmd->status.status_info = 0;
208 ctrl = &wqe_lcmd->ctrl;
209 ctrl_len = CTRL_SECT_LEN;
211 wqe_scmd = &wqe->direct_wqe.wqe_scmd;
213 wqe_scmd->status.status_info = 0;
214 ctrl = &wqe_scmd->ctrl;
215 ctrl_len = CTRL_DIRECT_SECT_LEN;
218 ctrl->ctrl_info = HINIC_CMDQ_CTRL_SET(prod_idx, PI) |
219 HINIC_CMDQ_CTRL_SET(cmd, CMD) |
220 HINIC_CMDQ_CTRL_SET(mod, MOD) |
221 HINIC_CMDQ_CTRL_SET(ack_type, ACK_TYPE);
223 CMDQ_WQE_HEADER(wqe)->header_info =
224 HINIC_CMDQ_WQE_HEADER_SET(buf_len, BUFDESC_LEN) |
225 HINIC_CMDQ_WQE_HEADER_SET(complete_format, COMPLETE_FMT) |
226 HINIC_CMDQ_WQE_HEADER_SET(data_format, DATA_FMT) |
227 HINIC_CMDQ_WQE_HEADER_SET(CEQ_SET, COMPLETE_REQ) |
228 HINIC_CMDQ_WQE_HEADER_SET(COMPLETE_LEN, COMPLETE_SECT_LEN) |
229 HINIC_CMDQ_WQE_HEADER_SET(ctrl_len, CTRL_LEN) |
230 HINIC_CMDQ_WQE_HEADER_SET(wrapped, TOGGLED_WRAPPED);
232 saved_data = CMDQ_WQE_HEADER(wqe)->saved_data;
233 saved_data = HINIC_SAVED_DATA_CLEAR(saved_data, ARM);
235 if ((cmd == CMDQ_SET_ARM_CMD) && (mod == HINIC_MOD_COMM))
236 CMDQ_WQE_HEADER(wqe)->saved_data |=
237 HINIC_SAVED_DATA_SET(1, ARM);
239 CMDQ_WQE_HEADER(wqe)->saved_data = saved_data;
242 static void cmdq_set_lcmd_bufdesc(struct hinic_cmdq_wqe_lcmd *wqe_lcmd,
243 struct hinic_cmdq_buf *buf_in)
245 hinic_set_sge(&wqe_lcmd->buf_desc.sge, buf_in->dma_addr, buf_in->size);
248 static void cmdq_set_direct_wqe_data(struct hinic_cmdq_direct_wqe *wqe,
249 void *buf_in, u32 in_size)
251 struct hinic_cmdq_wqe_scmd *wqe_scmd = &wqe->wqe_scmd;
253 wqe_scmd->buf_desc.buf_len = in_size;
254 memcpy(wqe_scmd->buf_desc.data, buf_in, in_size);
257 static void cmdq_set_lcmd_wqe(struct hinic_cmdq_wqe *wqe,
258 enum cmdq_cmd_type cmd_type,
259 struct hinic_cmdq_buf *buf_in,
260 struct hinic_cmdq_buf *buf_out, int wrapped,
261 enum hinic_cmd_ack_type ack_type,
262 enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
264 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
265 enum completion_format complete_format;
268 case CMDQ_CMD_SYNC_SGE_RESP:
269 complete_format = COMPLETE_SGE;
270 cmdq_set_sge_completion(&wqe_lcmd->completion, buf_out);
272 case CMDQ_CMD_SYNC_DIRECT_RESP:
273 complete_format = COMPLETE_DIRECT;
274 wqe_lcmd->completion.direct_resp = 0;
278 cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd,
279 prod_idx, complete_format, DATA_SGE,
282 cmdq_set_lcmd_bufdesc(wqe_lcmd, buf_in);
285 static void cmdq_set_direct_wqe(struct hinic_cmdq_wqe *wqe,
286 enum cmdq_cmd_type cmd_type,
287 void *buf_in, u16 in_size,
288 struct hinic_cmdq_buf *buf_out, int wrapped,
289 enum hinic_cmd_ack_type ack_type,
290 enum hinic_mod_type mod, u8 cmd, u16 prod_idx)
292 struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
293 enum completion_format complete_format;
294 struct hinic_cmdq_wqe_scmd *wqe_scmd;
296 wqe_scmd = &direct_wqe->wqe_scmd;
299 case CMDQ_CMD_SYNC_SGE_RESP:
300 complete_format = COMPLETE_SGE;
301 cmdq_set_sge_completion(&wqe_scmd->completion, buf_out);
303 case CMDQ_CMD_SYNC_DIRECT_RESP:
304 complete_format = COMPLETE_DIRECT;
305 wqe_scmd->completion.direct_resp = 0;
309 cmdq_prepare_wqe_ctrl(wqe, wrapped, ack_type, mod, cmd, prod_idx,
310 complete_format, DATA_DIRECT, BUFDESC_SCMD_LEN);
312 cmdq_set_direct_wqe_data(direct_wqe, buf_in, in_size);
315 static void cmdq_wqe_fill(void *dst, void *src)
317 memcpy(dst + FIRST_DATA_TO_WRITE_LAST, src + FIRST_DATA_TO_WRITE_LAST,
318 CMDQ_WQE_SIZE - FIRST_DATA_TO_WRITE_LAST);
320 wmb(); /* The first 8 bytes should be written last */
322 *(u64 *)dst = *(u64 *)src;
325 static void cmdq_fill_db(u32 *db_info,
326 enum hinic_cmdq_type cmdq_type, u16 prod_idx)
328 *db_info = HINIC_CMDQ_DB_INFO_SET(UPPER_8_BITS(prod_idx), HI_PROD_IDX) |
329 HINIC_CMDQ_DB_INFO_SET(HINIC_CTRL_PATH, PATH) |
330 HINIC_CMDQ_DB_INFO_SET(cmdq_type, CMDQ_TYPE) |
331 HINIC_CMDQ_DB_INFO_SET(HINIC_DB_CMDQ_TYPE, DB_TYPE);
334 static void cmdq_set_db(struct hinic_cmdq *cmdq,
335 enum hinic_cmdq_type cmdq_type, u16 prod_idx)
339 cmdq_fill_db(&db_info, cmdq_type, prod_idx);
341 /* The data that is written to HW should be in Big Endian Format */
342 db_info = cpu_to_be32(db_info);
344 wmb(); /* write all before the doorbell */
346 writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx));
349 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq,
350 enum hinic_mod_type mod, u8 cmd,
351 struct hinic_cmdq_buf *buf_in,
354 struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
355 u16 curr_prod_idx, next_prod_idx;
356 int errcode, wrapped, num_wqebbs;
357 struct hinic_wq *wq = cmdq->wq;
358 struct hinic_hw_wqe *hw_wqe;
359 struct completion done;
361 /* Keep doorbell index correct. bh - for tasklet(ceq). */
362 spin_lock_bh(&cmdq->cmdq_lock);
364 /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
365 hw_wqe = hinic_get_wqe(wq, WQE_LCMD_SIZE, &curr_prod_idx);
366 if (IS_ERR(hw_wqe)) {
367 spin_unlock_bh(&cmdq->cmdq_lock);
371 curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
373 wrapped = cmdq->wrapped;
375 num_wqebbs = ALIGN(WQE_LCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
376 next_prod_idx = curr_prod_idx + num_wqebbs;
377 if (next_prod_idx >= wq->q_depth) {
378 cmdq->wrapped = !cmdq->wrapped;
379 next_prod_idx -= wq->q_depth;
382 cmdq->errcode[curr_prod_idx] = &errcode;
384 init_completion(&done);
385 cmdq->done[curr_prod_idx] = &done;
387 cmdq_set_lcmd_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in, NULL,
388 wrapped, HINIC_CMD_ACK_TYPE_CMDQ, mod, cmd,
391 /* The data that is written to HW should be in Big Endian Format */
392 hinic_cpu_to_be32(&cmdq_wqe, WQE_LCMD_SIZE);
394 /* CMDQ WQE is not shadow, therefore wqe will be written to wq */
395 cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
397 cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
399 spin_unlock_bh(&cmdq->cmdq_lock);
401 if (!wait_for_completion_timeout(&done,
402 msecs_to_jiffies(CMDQ_TIMEOUT))) {
403 spin_lock_bh(&cmdq->cmdq_lock);
405 if (cmdq->errcode[curr_prod_idx] == &errcode)
406 cmdq->errcode[curr_prod_idx] = NULL;
408 if (cmdq->done[curr_prod_idx] == &done)
409 cmdq->done[curr_prod_idx] = NULL;
411 spin_unlock_bh(&cmdq->cmdq_lock);
416 smp_rmb(); /* read error code after completion */
419 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &curr_cmdq_wqe->wqe_lcmd;
421 *resp = cpu_to_be64(wqe_lcmd->completion.direct_resp);
430 static int cmdq_set_arm_bit(struct hinic_cmdq *cmdq, void *buf_in,
433 struct hinic_cmdq_wqe *curr_cmdq_wqe, cmdq_wqe;
434 u16 curr_prod_idx, next_prod_idx;
435 struct hinic_wq *wq = cmdq->wq;
436 struct hinic_hw_wqe *hw_wqe;
437 int wrapped, num_wqebbs;
439 /* Keep doorbell index correct */
440 spin_lock(&cmdq->cmdq_lock);
442 /* WQE_SIZE = WQEBB_SIZE, we will get the wq element and not shadow*/
443 hw_wqe = hinic_get_wqe(wq, WQE_SCMD_SIZE, &curr_prod_idx);
444 if (IS_ERR(hw_wqe)) {
445 spin_unlock(&cmdq->cmdq_lock);
449 curr_cmdq_wqe = &hw_wqe->cmdq_wqe;
451 wrapped = cmdq->wrapped;
453 num_wqebbs = ALIGN(WQE_SCMD_SIZE, wq->wqebb_size) / wq->wqebb_size;
454 next_prod_idx = curr_prod_idx + num_wqebbs;
455 if (next_prod_idx >= wq->q_depth) {
456 cmdq->wrapped = !cmdq->wrapped;
457 next_prod_idx -= wq->q_depth;
460 cmdq_set_direct_wqe(&cmdq_wqe, CMDQ_CMD_SYNC_DIRECT_RESP, buf_in,
461 in_size, NULL, wrapped, HINIC_CMD_ACK_TYPE_CMDQ,
462 HINIC_MOD_COMM, CMDQ_SET_ARM_CMD, curr_prod_idx);
464 /* The data that is written to HW should be in Big Endian Format */
465 hinic_cpu_to_be32(&cmdq_wqe, WQE_SCMD_SIZE);
467 /* cmdq wqe is not shadow, therefore wqe will be written to wq */
468 cmdq_wqe_fill(curr_cmdq_wqe, &cmdq_wqe);
470 cmdq_set_db(cmdq, HINIC_CMDQ_SYNC, next_prod_idx);
472 spin_unlock(&cmdq->cmdq_lock);
476 static int cmdq_params_valid(struct hinic_cmdq_buf *buf_in)
478 if (buf_in->size > HINIC_CMDQ_MAX_DATA_SIZE)
485 * hinic_cmdq_direct_resp - send command with direct data as resp
487 * @mod: module on the card that will handle the command
489 * @buf_in: the buffer for the command
490 * @resp: the response to return
492 * Return 0 - Success, negative - Failure
494 int hinic_cmdq_direct_resp(struct hinic_cmdqs *cmdqs,
495 enum hinic_mod_type mod, u8 cmd,
496 struct hinic_cmdq_buf *buf_in, u64 *resp)
498 struct hinic_hwif *hwif = cmdqs->hwif;
499 struct pci_dev *pdev = hwif->pdev;
502 err = cmdq_params_valid(buf_in);
504 dev_err(&pdev->dev, "Invalid CMDQ parameters\n");
508 return cmdq_sync_cmd_direct_resp(&cmdqs->cmdq[HINIC_CMDQ_SYNC],
509 mod, cmd, buf_in, resp);
513 * hinic_set_arm_bit - set arm bit for enable interrupt again
515 * @q_type: type of queue to set the arm bit for
516 * @q_id: the queue number
518 * Return 0 - Success, negative - Failure
520 int hinic_set_arm_bit(struct hinic_cmdqs *cmdqs,
521 enum hinic_set_arm_qtype q_type, u32 q_id)
523 struct hinic_cmdq *cmdq = &cmdqs->cmdq[HINIC_CMDQ_SYNC];
524 struct hinic_hwif *hwif = cmdqs->hwif;
525 struct pci_dev *pdev = hwif->pdev;
526 struct hinic_cmdq_arm_bit arm_bit;
529 arm_bit.q_type = q_type;
532 err = cmdq_set_arm_bit(cmdq, &arm_bit, sizeof(arm_bit));
534 dev_err(&pdev->dev, "Failed to set arm for qid %d\n", q_id);
541 static void clear_wqe_complete_bit(struct hinic_cmdq *cmdq,
542 struct hinic_cmdq_wqe *wqe)
544 u32 header_info = be32_to_cpu(CMDQ_WQE_HEADER(wqe)->header_info);
545 unsigned int bufdesc_len, wqe_size;
546 struct hinic_ctrl *ctrl;
548 bufdesc_len = HINIC_CMDQ_WQE_HEADER_GET(header_info, BUFDESC_LEN);
549 wqe_size = cmdq_wqe_size_from_bdlen(bufdesc_len);
550 if (wqe_size == WQE_LCMD_SIZE) {
551 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &wqe->wqe_lcmd;
553 ctrl = &wqe_lcmd->ctrl;
555 struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
556 struct hinic_cmdq_wqe_scmd *wqe_scmd;
558 wqe_scmd = &direct_wqe->wqe_scmd;
559 ctrl = &wqe_scmd->ctrl;
562 /* clear HW busy bit */
565 wmb(); /* verify wqe is clear */
569 * cmdq_arm_ceq_handler - cmdq completion event handler for arm command
570 * @cmdq: the cmdq of the arm command
571 * @wqe: the wqe of the arm command
573 * Return 0 - Success, negative - Failure
575 static int cmdq_arm_ceq_handler(struct hinic_cmdq *cmdq,
576 struct hinic_cmdq_wqe *wqe)
578 struct hinic_cmdq_direct_wqe *direct_wqe = &wqe->direct_wqe;
579 struct hinic_cmdq_wqe_scmd *wqe_scmd;
580 struct hinic_ctrl *ctrl;
583 wqe_scmd = &direct_wqe->wqe_scmd;
584 ctrl = &wqe_scmd->ctrl;
585 ctrl_info = be32_to_cpu(ctrl->ctrl_info);
587 /* HW should toggle the HW BUSY BIT */
588 if (!CMDQ_WQE_COMPLETED(ctrl_info))
591 clear_wqe_complete_bit(cmdq, wqe);
593 hinic_put_wqe(cmdq->wq, WQE_SCMD_SIZE);
597 static void cmdq_update_errcode(struct hinic_cmdq *cmdq, u16 prod_idx,
600 if (cmdq->errcode[prod_idx])
601 *cmdq->errcode[prod_idx] = errcode;
605 * cmdq_arm_ceq_handler - cmdq completion event handler for sync command
606 * @cmdq: the cmdq of the command
607 * @cons_idx: the consumer index to update the error code for
608 * @errcode: the error code
610 static void cmdq_sync_cmd_handler(struct hinic_cmdq *cmdq, u16 cons_idx,
613 u16 prod_idx = cons_idx;
615 spin_lock(&cmdq->cmdq_lock);
616 cmdq_update_errcode(cmdq, prod_idx, errcode);
618 wmb(); /* write all before update for the command request */
620 if (cmdq->done[prod_idx])
621 complete(cmdq->done[prod_idx]);
622 spin_unlock(&cmdq->cmdq_lock);
625 static int cmdq_cmd_ceq_handler(struct hinic_cmdq *cmdq, u16 ci,
626 struct hinic_cmdq_wqe *cmdq_wqe)
628 struct hinic_cmdq_wqe_lcmd *wqe_lcmd = &cmdq_wqe->wqe_lcmd;
629 struct hinic_status *status = &wqe_lcmd->status;
630 struct hinic_ctrl *ctrl = &wqe_lcmd->ctrl;
633 if (!CMDQ_WQE_COMPLETED(be32_to_cpu(ctrl->ctrl_info)))
636 errcode = CMDQ_WQE_ERRCODE_GET(be32_to_cpu(status->status_info), VAL);
638 cmdq_sync_cmd_handler(cmdq, ci, errcode);
640 clear_wqe_complete_bit(cmdq, cmdq_wqe);
641 hinic_put_wqe(cmdq->wq, WQE_LCMD_SIZE);
646 * cmdq_ceq_handler - cmdq completion event handler
647 * @handle: private data for the handler(cmdqs)
648 * @ceqe_data: ceq element data
650 static void cmdq_ceq_handler(void *handle, u32 ceqe_data)
652 enum hinic_cmdq_type cmdq_type = CMDQ_CEQE_GET(ceqe_data, TYPE);
653 struct hinic_cmdqs *cmdqs = (struct hinic_cmdqs *)handle;
654 struct hinic_cmdq *cmdq = &cmdqs->cmdq[cmdq_type];
655 struct hinic_cmdq_header *header;
656 struct hinic_hw_wqe *hw_wqe;
657 int err, set_arm = 0;
661 /* Read the smallest wqe size for getting wqe size */
662 while ((hw_wqe = hinic_read_wqe(cmdq->wq, WQE_SCMD_SIZE, &ci))) {
666 header = CMDQ_WQE_HEADER(&hw_wqe->cmdq_wqe);
667 saved_data = be32_to_cpu(header->saved_data);
669 if (HINIC_SAVED_DATA_GET(saved_data, ARM)) {
670 /* arm_bit was set until here */
673 if (cmdq_arm_ceq_handler(cmdq, &hw_wqe->cmdq_wqe))
678 hw_wqe = hinic_read_wqe(cmdq->wq, WQE_LCMD_SIZE, &ci);
682 if (cmdq_cmd_ceq_handler(cmdq, ci, &hw_wqe->cmdq_wqe))
688 struct hinic_hwif *hwif = cmdqs->hwif;
689 struct pci_dev *pdev = hwif->pdev;
691 err = hinic_set_arm_bit(cmdqs, HINIC_SET_ARM_CMDQ, cmdq_type);
693 dev_err(&pdev->dev, "Failed to set arm for CMDQ\n");
698 * cmdq_init_queue_ctxt - init the queue ctxt of a cmdq
699 * @cmdq_ctxt: cmdq ctxt to initialize
701 * @cmdq_pages: the memory of the queue
703 static void cmdq_init_queue_ctxt(struct hinic_cmdq_ctxt *cmdq_ctxt,
704 struct hinic_cmdq *cmdq,
705 struct hinic_cmdq_pages *cmdq_pages)
707 struct hinic_cmdq_ctxt_info *ctxt_info = &cmdq_ctxt->ctxt_info;
708 u64 wq_first_page_paddr, cmdq_first_block_paddr, pfn;
709 struct hinic_cmdqs *cmdqs = cmdq_to_cmdqs(cmdq);
710 struct hinic_wq *wq = cmdq->wq;
712 /* The data in the HW is in Big Endian Format */
713 wq_first_page_paddr = be64_to_cpu(*wq->block_vaddr);
715 pfn = CMDQ_PFN(wq_first_page_paddr, wq->wq_page_size);
717 ctxt_info->curr_wqe_page_pfn =
718 HINIC_CMDQ_CTXT_PAGE_INFO_SET(pfn, CURR_WQE_PAGE_PFN) |
719 HINIC_CMDQ_CTXT_PAGE_INFO_SET(HINIC_CEQ_ID_CMDQ, EQ_ID) |
720 HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_ARM) |
721 HINIC_CMDQ_CTXT_PAGE_INFO_SET(1, CEQ_EN) |
722 HINIC_CMDQ_CTXT_PAGE_INFO_SET(cmdq->wrapped, WRAPPED);
724 /* block PFN - Read Modify Write */
725 cmdq_first_block_paddr = cmdq_pages->page_paddr;
727 pfn = CMDQ_PFN(cmdq_first_block_paddr, wq->wq_page_size);
729 ctxt_info->wq_block_pfn =
730 HINIC_CMDQ_CTXT_BLOCK_INFO_SET(pfn, WQ_BLOCK_PFN) |
731 HINIC_CMDQ_CTXT_BLOCK_INFO_SET(atomic_read(&wq->cons_idx), CI);
733 cmdq_ctxt->func_idx = HINIC_HWIF_FUNC_IDX(cmdqs->hwif);
734 cmdq_ctxt->cmdq_type = cmdq->cmdq_type;
738 * init_cmdq - initialize cmdq
740 * @wq: the wq attaced to the cmdq
741 * @q_type: the cmdq type of the cmdq
742 * @db_area: doorbell area for the cmdq
744 * Return 0 - Success, negative - Failure
746 static int init_cmdq(struct hinic_cmdq *cmdq, struct hinic_wq *wq,
747 enum hinic_cmdq_type q_type, void __iomem *db_area)
752 cmdq->cmdq_type = q_type;
755 spin_lock_init(&cmdq->cmdq_lock);
757 cmdq->done = vzalloc(wq->q_depth * sizeof(*cmdq->done));
761 cmdq->errcode = vzalloc(wq->q_depth * sizeof(*cmdq->errcode));
762 if (!cmdq->errcode) {
767 cmdq->db_base = db_area + CMDQ_DB_OFF;
776 * free_cmdq - Free cmdq
777 * @cmdq: the cmdq to free
779 static void free_cmdq(struct hinic_cmdq *cmdq)
781 vfree(cmdq->errcode);
786 * init_cmdqs_ctxt - write the cmdq ctxt to HW after init all cmdq
787 * @hwdev: the NIC HW device
788 * @cmdqs: cmdqs to write the ctxts for
789 * &db_area: db_area for all the cmdqs
791 * Return 0 - Success, negative - Failure
793 static int init_cmdqs_ctxt(struct hinic_hwdev *hwdev,
794 struct hinic_cmdqs *cmdqs, void __iomem **db_area)
796 struct hinic_hwif *hwif = hwdev->hwif;
797 enum hinic_cmdq_type type, cmdq_type;
798 struct hinic_cmdq_ctxt *cmdq_ctxts;
799 struct pci_dev *pdev = hwif->pdev;
800 struct hinic_pfhwdev *pfhwdev;
801 size_t cmdq_ctxts_size;
804 if (!HINIC_IS_PF(hwif) && !HINIC_IS_PPF(hwif)) {
805 dev_err(&pdev->dev, "Unsupported PCI function type\n");
809 cmdq_ctxts_size = HINIC_MAX_CMDQ_TYPES * sizeof(*cmdq_ctxts);
810 cmdq_ctxts = devm_kzalloc(&pdev->dev, cmdq_ctxts_size, GFP_KERNEL);
814 pfhwdev = container_of(hwdev, struct hinic_pfhwdev, hwdev);
816 cmdq_type = HINIC_CMDQ_SYNC;
817 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
818 err = init_cmdq(&cmdqs->cmdq[cmdq_type],
819 &cmdqs->saved_wqs[cmdq_type], cmdq_type,
822 dev_err(&pdev->dev, "Failed to initialize cmdq\n");
826 cmdq_init_queue_ctxt(&cmdq_ctxts[cmdq_type],
827 &cmdqs->cmdq[cmdq_type],
831 /* Write the CMDQ ctxts */
832 cmdq_type = HINIC_CMDQ_SYNC;
833 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++) {
834 err = hinic_msg_to_mgmt(&pfhwdev->pf_to_mgmt, HINIC_MOD_COMM,
835 HINIC_COMM_CMD_CMDQ_CTXT_SET,
836 &cmdq_ctxts[cmdq_type],
837 sizeof(cmdq_ctxts[cmdq_type]),
838 NULL, NULL, HINIC_MGMT_MSG_SYNC);
840 dev_err(&pdev->dev, "Failed to set CMDQ CTXT type = %d\n",
842 goto err_write_cmdq_ctxt;
846 devm_kfree(&pdev->dev, cmdq_ctxts);
850 cmdq_type = HINIC_MAX_CMDQ_TYPES;
853 for (type = HINIC_CMDQ_SYNC; type < cmdq_type; type++)
854 free_cmdq(&cmdqs->cmdq[type]);
856 devm_kfree(&pdev->dev, cmdq_ctxts);
861 * hinic_init_cmdqs - init all cmdqs
862 * @cmdqs: cmdqs to init
863 * @hwif: HW interface for accessing cmdqs
864 * @db_area: doorbell areas for all the cmdqs
866 * Return 0 - Success, negative - Failure
868 int hinic_init_cmdqs(struct hinic_cmdqs *cmdqs, struct hinic_hwif *hwif,
869 void __iomem **db_area)
871 struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
872 struct pci_dev *pdev = hwif->pdev;
873 struct hinic_hwdev *hwdev;
874 size_t saved_wqs_size;
879 cmdqs->cmdq_buf_pool = pci_pool_create("hinic_cmdq", pdev,
881 HINIC_CMDQ_BUF_SIZE, 0);
882 if (!cmdqs->cmdq_buf_pool)
885 saved_wqs_size = HINIC_MAX_CMDQ_TYPES * sizeof(struct hinic_wq);
886 cmdqs->saved_wqs = devm_kzalloc(&pdev->dev, saved_wqs_size, GFP_KERNEL);
887 if (!cmdqs->saved_wqs) {
892 max_wqe_size = WQE_LCMD_SIZE;
893 err = hinic_wqs_cmdq_alloc(&cmdqs->cmdq_pages, cmdqs->saved_wqs, hwif,
894 HINIC_MAX_CMDQ_TYPES, CMDQ_WQEBB_SIZE,
895 CMDQ_WQ_PAGE_SIZE, CMDQ_DEPTH, max_wqe_size);
897 dev_err(&pdev->dev, "Failed to allocate CMDQ wqs\n");
901 hwdev = container_of(func_to_io, struct hinic_hwdev, func_to_io);
902 err = init_cmdqs_ctxt(hwdev, cmdqs, db_area);
904 dev_err(&pdev->dev, "Failed to write cmdq ctxt\n");
908 hinic_ceq_register_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ, cmdqs,
913 hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
914 HINIC_MAX_CMDQ_TYPES);
917 devm_kfree(&pdev->dev, cmdqs->saved_wqs);
920 pci_pool_destroy(cmdqs->cmdq_buf_pool);
925 * hinic_free_cmdqs - free all cmdqs
926 * @cmdqs: cmdqs to free
928 void hinic_free_cmdqs(struct hinic_cmdqs *cmdqs)
930 struct hinic_func_to_io *func_to_io = cmdqs_to_func_to_io(cmdqs);
931 struct hinic_hwif *hwif = cmdqs->hwif;
932 struct pci_dev *pdev = hwif->pdev;
933 enum hinic_cmdq_type cmdq_type;
935 hinic_ceq_unregister_cb(&func_to_io->ceqs, HINIC_CEQ_CMDQ);
937 cmdq_type = HINIC_CMDQ_SYNC;
938 for (; cmdq_type < HINIC_MAX_CMDQ_TYPES; cmdq_type++)
939 free_cmdq(&cmdqs->cmdq[cmdq_type]);
941 hinic_wqs_cmdq_free(&cmdqs->cmdq_pages, cmdqs->saved_wqs,
942 HINIC_MAX_CMDQ_TYPES);
944 devm_kfree(&pdev->dev, cmdqs->saved_wqs);
946 pci_pool_destroy(cmdqs->cmdq_buf_pool);