1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/errno.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
23 #include "qed_dev_api.h"
28 #include "qed_reg_addr.h"
30 #include "qed_sriov.h"
33 /***************************************************************************
34 * Structures & Definitions
35 ***************************************************************************/
37 #define SPQ_HIGH_PRI_RESERVE_DEFAULT (1)
38 #define SPQ_BLOCK_SLEEP_LENGTH (1000)
40 /***************************************************************************
41 * Blocking Imp. (BLOCK/EBLOCK mode)
42 ***************************************************************************/
43 static void qed_spq_blocking_cb(struct qed_hwfn *p_hwfn,
45 union event_ring_data *data, u8 fw_return_code)
47 struct qed_spq_comp_done *comp_done;
49 comp_done = (struct qed_spq_comp_done *)cookie;
51 comp_done->done = 0x1;
52 comp_done->fw_return_code = fw_return_code;
54 /* make update visible to waiting thread */
58 static int qed_spq_block(struct qed_hwfn *p_hwfn,
59 struct qed_spq_entry *p_ent,
62 int sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
63 struct qed_spq_comp_done *comp_done;
66 comp_done = (struct qed_spq_comp_done *)p_ent->comp_cb.cookie;
68 /* validate we receive completion update */
70 if (comp_done->done == 1) {
72 *p_fw_ret = comp_done->fw_return_code;
75 usleep_range(5000, 10000);
79 DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n");
80 rc = qed_mcp_drain(p_hwfn, p_hwfn->p_main_ptt);
82 DP_NOTICE(p_hwfn, "MCP drain failed\n");
84 /* Retry after drain */
85 sleep_count = SPQ_BLOCK_SLEEP_LENGTH;
87 /* validate we receive completion update */
89 if (comp_done->done == 1) {
91 *p_fw_ret = comp_done->fw_return_code;
94 usleep_range(5000, 10000);
98 if (comp_done->done == 1) {
100 *p_fw_ret = comp_done->fw_return_code;
104 DP_NOTICE(p_hwfn, "Ramrod is stuck, MCP drain failed\n");
109 /***************************************************************************
110 * SPQ entries inner API
111 ***************************************************************************/
112 static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
113 struct qed_spq_entry *p_ent)
117 switch (p_ent->comp_mode) {
118 case QED_SPQ_MODE_EBLOCK:
119 case QED_SPQ_MODE_BLOCK:
120 p_ent->comp_cb.function = qed_spq_blocking_cb;
122 case QED_SPQ_MODE_CB:
125 DP_NOTICE(p_hwfn, "Unknown SPQE completion mode %d\n",
130 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
131 "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x] Data pointer: [%08x:%08x] Completion Mode: %s\n",
133 p_ent->elem.hdr.cmd_id,
134 p_ent->elem.hdr.protocol_id,
135 p_ent->elem.data_ptr.hi,
136 p_ent->elem.data_ptr.lo,
137 D_TRINE(p_ent->comp_mode, QED_SPQ_MODE_EBLOCK,
138 QED_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK",
144 /***************************************************************************
146 ***************************************************************************/
147 static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
148 struct qed_spq *p_spq)
151 struct qed_cxt_info cxt_info;
152 struct core_conn_context *p_cxt;
153 union qed_qm_pq_params pq_params;
156 cxt_info.iid = p_spq->cid;
158 rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
161 DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
166 p_cxt = cxt_info.p_cxt;
168 SET_FIELD(p_cxt->xstorm_ag_context.flags10,
169 XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
170 SET_FIELD(p_cxt->xstorm_ag_context.flags1,
171 XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
172 SET_FIELD(p_cxt->xstorm_ag_context.flags9,
173 XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
175 /* QM physical queue */
176 memset(&pq_params, 0, sizeof(pq_params));
177 pq_params.core.tc = LB_TC;
178 pq = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
179 p_cxt->xstorm_ag_context.physical_q0 = cpu_to_le16(pq);
181 p_cxt->xstorm_st_context.spq_base_lo =
182 DMA_LO_LE(p_spq->chain.p_phys_addr);
183 p_cxt->xstorm_st_context.spq_base_hi =
184 DMA_HI_LE(p_spq->chain.p_phys_addr);
186 DMA_REGPAIR_LE(p_cxt->xstorm_st_context.consolid_base_addr,
187 p_hwfn->p_consq->chain.p_phys_addr);
190 static int qed_spq_hw_post(struct qed_hwfn *p_hwfn,
191 struct qed_spq *p_spq, struct qed_spq_entry *p_ent)
193 struct qed_chain *p_chain = &p_hwfn->p_spq->chain;
194 u16 echo = qed_chain_get_prod_idx(p_chain);
195 struct slow_path_element *elem;
196 struct core_db_data db;
198 p_ent->elem.hdr.echo = cpu_to_le16(echo);
199 elem = qed_chain_produce(p_chain);
201 DP_NOTICE(p_hwfn, "Failed to produce from SPQ chain\n");
205 *elem = p_ent->elem; /* struct assignment */
207 /* send a doorbell on the slow hwfn session */
208 memset(&db, 0, sizeof(db));
209 SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM);
210 SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET);
211 SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL,
212 DQ_XCM_CORE_SPQ_PROD_CMD);
213 db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
214 db.spq_prod = cpu_to_le16(qed_chain_get_prod_idx(p_chain));
216 /* make sure the SPQE is updated before the doorbell */
219 DOORBELL(p_hwfn, qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db);
221 /* make sure doorbell is rang */
224 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
225 "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x agg_params: %02x, prod: %04x\n",
226 qed_db_addr(p_spq->cid, DQ_DEMS_LEGACY),
227 p_spq->cid, db.params, db.agg_flags,
228 qed_chain_get_prod_idx(p_chain));
233 /***************************************************************************
234 * Asynchronous events
235 ***************************************************************************/
237 qed_async_event_completion(struct qed_hwfn *p_hwfn,
238 struct event_ring_entry *p_eqe)
240 switch (p_eqe->protocol_id) {
241 case PROTOCOLID_ROCE:
242 qed_async_roce_event(p_hwfn, p_eqe);
244 case PROTOCOLID_COMMON:
245 return qed_sriov_eqe_event(p_hwfn,
247 p_eqe->echo, &p_eqe->data);
250 "Unknown Async completion for protocol: %d\n",
256 /***************************************************************************
258 ***************************************************************************/
259 void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod)
261 u32 addr = GTT_BAR0_MAP_REG_USDM_RAM +
262 USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id);
264 REG_WR16(p_hwfn, addr, prod);
266 /* keep prod updates ordered */
270 int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie)
272 struct qed_eq *p_eq = cookie;
273 struct qed_chain *p_chain = &p_eq->chain;
276 /* take a snapshot of the FW consumer */
277 u16 fw_cons_idx = le16_to_cpu(*p_eq->p_fw_cons);
279 DP_VERBOSE(p_hwfn, QED_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx);
281 /* Need to guarantee the fw_cons index we use points to a usuable
282 * element (to comply with our chain), so our macros would comply
284 if ((fw_cons_idx & qed_chain_get_usable_per_page(p_chain)) ==
285 qed_chain_get_usable_per_page(p_chain))
286 fw_cons_idx += qed_chain_get_unusable_per_page(p_chain);
288 /* Complete current segment of eq entries */
289 while (fw_cons_idx != qed_chain_get_cons_idx(p_chain)) {
290 struct event_ring_entry *p_eqe = qed_chain_consume(p_chain);
297 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
298 "op %x prot %x res0 %x echo %x fwret %x flags %x\n",
302 le16_to_cpu(p_eqe->echo),
303 p_eqe->fw_return_code,
306 if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) {
307 if (qed_async_event_completion(p_hwfn, p_eqe))
309 } else if (qed_spq_completion(p_hwfn,
311 p_eqe->fw_return_code,
316 qed_chain_recycle_consumed(p_chain);
319 qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
324 struct qed_eq *qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
328 /* Allocate EQ struct */
329 p_eq = kzalloc(sizeof(*p_eq), GFP_KERNEL);
333 /* Allocate and initialize EQ chain*/
334 if (qed_chain_alloc(p_hwfn->cdev,
335 QED_CHAIN_USE_TO_PRODUCE,
337 QED_CHAIN_CNT_TYPE_U16,
339 sizeof(union event_ring_element),
341 goto eq_allocate_fail;
343 /* register EQ completion on the SP SB */
344 qed_int_register_cb(p_hwfn, qed_eq_completion,
345 p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons);
350 qed_eq_free(p_hwfn, p_eq);
354 void qed_eq_setup(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
356 qed_chain_reset(&p_eq->chain);
359 void qed_eq_free(struct qed_hwfn *p_hwfn, struct qed_eq *p_eq)
363 qed_chain_free(p_hwfn->cdev, &p_eq->chain);
367 /***************************************************************************
368 * CQE API - manipulate EQ functionality
369 ***************************************************************************/
370 static int qed_cqe_completion(struct qed_hwfn *p_hwfn,
371 struct eth_slow_path_rx_cqe *cqe,
372 enum protocol_type protocol)
374 if (IS_VF(p_hwfn->cdev))
377 /* @@@tmp - it's possible we'll eventually want to handle some
378 * actual commands that can arrive here, but for now this is only
379 * used to complete the ramrod using the echo value on the cqe
381 return qed_spq_completion(p_hwfn, cqe->echo, 0, NULL);
384 int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn,
385 struct eth_slow_path_rx_cqe *cqe)
389 rc = qed_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH);
392 "Failed to handle RXQ CQE [cmd 0x%02x]\n",
398 /***************************************************************************
399 * Slow hwfn Queue (spq)
400 ***************************************************************************/
401 void qed_spq_setup(struct qed_hwfn *p_hwfn)
403 struct qed_spq *p_spq = p_hwfn->p_spq;
404 struct qed_spq_entry *p_virt = NULL;
405 dma_addr_t p_phys = 0;
408 INIT_LIST_HEAD(&p_spq->pending);
409 INIT_LIST_HEAD(&p_spq->completion_pending);
410 INIT_LIST_HEAD(&p_spq->free_pool);
411 INIT_LIST_HEAD(&p_spq->unlimited_pending);
412 spin_lock_init(&p_spq->lock);
415 p_phys = p_spq->p_phys + offsetof(struct qed_spq_entry, ramrod);
416 p_virt = p_spq->p_virt;
418 capacity = qed_chain_get_capacity(&p_spq->chain);
419 for (i = 0; i < capacity; i++) {
420 DMA_REGPAIR_LE(p_virt->elem.data_ptr, p_phys);
422 list_add_tail(&p_virt->list, &p_spq->free_pool);
425 p_phys += sizeof(struct qed_spq_entry);
429 p_spq->normal_count = 0;
430 p_spq->comp_count = 0;
431 p_spq->comp_sent_count = 0;
432 p_spq->unlimited_pending_count = 0;
434 bitmap_zero(p_spq->p_comp_bitmap, SPQ_RING_SIZE);
435 p_spq->comp_bitmap_idx = 0;
437 /* SPQ cid, cannot fail */
438 qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid);
439 qed_spq_hw_initialize(p_hwfn, p_spq);
441 /* reset the chain itself */
442 qed_chain_reset(&p_spq->chain);
445 int qed_spq_alloc(struct qed_hwfn *p_hwfn)
447 struct qed_spq_entry *p_virt = NULL;
448 struct qed_spq *p_spq = NULL;
449 dma_addr_t p_phys = 0;
453 p_spq = kzalloc(sizeof(struct qed_spq), GFP_KERNEL);
458 if (qed_chain_alloc(p_hwfn->cdev,
459 QED_CHAIN_USE_TO_PRODUCE,
460 QED_CHAIN_MODE_SINGLE,
461 QED_CHAIN_CNT_TYPE_U16,
462 0, /* N/A when the mode is SINGLE */
463 sizeof(struct slow_path_element),
465 goto spq_allocate_fail;
467 /* allocate and fill the SPQ elements (incl. ramrod data list) */
468 capacity = qed_chain_get_capacity(&p_spq->chain);
469 p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
470 capacity * sizeof(struct qed_spq_entry),
471 &p_phys, GFP_KERNEL);
473 goto spq_allocate_fail;
475 p_spq->p_virt = p_virt;
476 p_spq->p_phys = p_phys;
477 p_hwfn->p_spq = p_spq;
482 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
487 void qed_spq_free(struct qed_hwfn *p_hwfn)
489 struct qed_spq *p_spq = p_hwfn->p_spq;
496 capacity = qed_chain_get_capacity(&p_spq->chain);
497 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
499 sizeof(struct qed_spq_entry),
500 p_spq->p_virt, p_spq->p_phys);
503 qed_chain_free(p_hwfn->cdev, &p_spq->chain);
508 int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent)
510 struct qed_spq *p_spq = p_hwfn->p_spq;
511 struct qed_spq_entry *p_ent = NULL;
514 spin_lock_bh(&p_spq->lock);
516 if (list_empty(&p_spq->free_pool)) {
517 p_ent = kzalloc(sizeof(*p_ent), GFP_ATOMIC);
520 "Failed to allocate an SPQ entry for a pending ramrod\n");
524 p_ent->queue = &p_spq->unlimited_pending;
526 p_ent = list_first_entry(&p_spq->free_pool,
527 struct qed_spq_entry, list);
528 list_del(&p_ent->list);
529 p_ent->queue = &p_spq->pending;
535 spin_unlock_bh(&p_spq->lock);
539 /* Locked variant; Should be called while the SPQ lock is taken */
540 static void __qed_spq_return_entry(struct qed_hwfn *p_hwfn,
541 struct qed_spq_entry *p_ent)
543 list_add_tail(&p_ent->list, &p_hwfn->p_spq->free_pool);
546 void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent)
548 spin_lock_bh(&p_hwfn->p_spq->lock);
549 __qed_spq_return_entry(p_hwfn, p_ent);
550 spin_unlock_bh(&p_hwfn->p_spq->lock);
554 * @brief qed_spq_add_entry - adds a new entry to the pending
555 * list. Should be used while lock is being held.
557 * Addes an entry to the pending list is there is room (en empty
558 * element is available in the free_pool), or else places the
559 * entry in the unlimited_pending pool.
567 static int qed_spq_add_entry(struct qed_hwfn *p_hwfn,
568 struct qed_spq_entry *p_ent,
569 enum spq_priority priority)
571 struct qed_spq *p_spq = p_hwfn->p_spq;
573 if (p_ent->queue == &p_spq->unlimited_pending) {
575 if (list_empty(&p_spq->free_pool)) {
576 list_add_tail(&p_ent->list, &p_spq->unlimited_pending);
577 p_spq->unlimited_pending_count++;
581 struct qed_spq_entry *p_en2;
583 p_en2 = list_first_entry(&p_spq->free_pool,
584 struct qed_spq_entry, list);
585 list_del(&p_en2->list);
587 /* Copy the ring element physical pointer to the new
588 * entry, since we are about to override the entire ring
589 * entry and don't want to lose the pointer.
591 p_ent->elem.data_ptr = p_en2->elem.data_ptr;
595 /* EBLOCK responsible to free the allocated p_ent */
596 if (p_ent->comp_mode != QED_SPQ_MODE_EBLOCK)
599 p_ent->post_ent = p_en2;
605 /* entry is to be placed in 'pending' queue */
607 case QED_SPQ_PRIORITY_NORMAL:
608 list_add_tail(&p_ent->list, &p_spq->pending);
609 p_spq->normal_count++;
611 case QED_SPQ_PRIORITY_HIGH:
612 list_add(&p_ent->list, &p_spq->pending);
622 /***************************************************************************
624 ***************************************************************************/
625 u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn)
628 return 0xffffffff; /* illegal */
629 return p_hwfn->p_spq->cid;
632 /***************************************************************************
633 * Posting new Ramrods
634 ***************************************************************************/
635 static int qed_spq_post_list(struct qed_hwfn *p_hwfn,
636 struct list_head *head, u32 keep_reserve)
638 struct qed_spq *p_spq = p_hwfn->p_spq;
641 while (qed_chain_get_elem_left(&p_spq->chain) > keep_reserve &&
643 struct qed_spq_entry *p_ent =
644 list_first_entry(head, struct qed_spq_entry, list);
645 list_del(&p_ent->list);
646 list_add_tail(&p_ent->list, &p_spq->completion_pending);
647 p_spq->comp_sent_count++;
649 rc = qed_spq_hw_post(p_hwfn, p_spq, p_ent);
651 list_del(&p_ent->list);
652 __qed_spq_return_entry(p_hwfn, p_ent);
660 static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
662 struct qed_spq *p_spq = p_hwfn->p_spq;
663 struct qed_spq_entry *p_ent = NULL;
665 while (!list_empty(&p_spq->free_pool)) {
666 if (list_empty(&p_spq->unlimited_pending))
669 p_ent = list_first_entry(&p_spq->unlimited_pending,
670 struct qed_spq_entry, list);
674 list_del(&p_ent->list);
676 qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
679 return qed_spq_post_list(p_hwfn, &p_spq->pending,
680 SPQ_HIGH_PRI_RESERVE_DEFAULT);
683 /* Avoid overriding of SPQ entries when getting out-of-order completions, by
684 * marking the completions in a bitmap and increasing the chain consumer only
685 * for the first successive completed entries.
687 static void qed_spq_comp_bmap_update(struct qed_hwfn *p_hwfn, __le16 echo)
689 u16 pos = le16_to_cpu(echo) % SPQ_RING_SIZE;
690 struct qed_spq *p_spq = p_hwfn->p_spq;
692 __set_bit(pos, p_spq->p_comp_bitmap);
693 while (test_bit(p_spq->comp_bitmap_idx,
694 p_spq->p_comp_bitmap)) {
695 __clear_bit(p_spq->comp_bitmap_idx,
696 p_spq->p_comp_bitmap);
697 p_spq->comp_bitmap_idx++;
698 qed_chain_return_produced(&p_spq->chain);
702 int qed_spq_post(struct qed_hwfn *p_hwfn,
703 struct qed_spq_entry *p_ent, u8 *fw_return_code)
706 struct qed_spq *p_spq = p_hwfn ? p_hwfn->p_spq : NULL;
707 bool b_ret_ent = true;
713 DP_NOTICE(p_hwfn, "Got a NULL pointer\n");
717 /* Complete the entry */
718 rc = qed_spq_fill_entry(p_hwfn, p_ent);
720 spin_lock_bh(&p_spq->lock);
722 /* Check return value after LOCK is taken for cleaner error flow */
726 /* Add the request to the pending queue */
727 rc = qed_spq_add_entry(p_hwfn, p_ent, p_ent->priority);
731 rc = qed_spq_pend_post(p_hwfn);
733 /* Since it's possible that pending failed for a different
734 * entry [although unlikely], the failed entry was already
735 * dealt with; No need to return it here.
741 spin_unlock_bh(&p_spq->lock);
743 if (p_ent->comp_mode == QED_SPQ_MODE_EBLOCK) {
744 /* For entries in QED BLOCK mode, the completion code cannot
745 * perform the necessary cleanup - if it did, we couldn't
746 * access p_ent here to see whether it's successful or not.
747 * Thus, after gaining the answer perform the cleanup here.
749 rc = qed_spq_block(p_hwfn, p_ent, fw_return_code);
751 if (p_ent->queue == &p_spq->unlimited_pending) {
752 struct qed_spq_entry *p_post_ent = p_ent->post_ent;
756 /* Return the entry which was actually posted */
764 qed_spq_return_entry(p_hwfn, p_ent);
769 spin_lock_bh(&p_spq->lock);
770 list_del(&p_ent->list);
771 qed_spq_comp_bmap_update(p_hwfn, p_ent->elem.hdr.echo);
774 /* return to the free pool */
776 __qed_spq_return_entry(p_hwfn, p_ent);
777 spin_unlock_bh(&p_spq->lock);
782 int qed_spq_completion(struct qed_hwfn *p_hwfn,
785 union event_ring_data *p_data)
787 struct qed_spq *p_spq;
788 struct qed_spq_entry *p_ent = NULL;
789 struct qed_spq_entry *tmp;
790 struct qed_spq_entry *found = NULL;
796 p_spq = p_hwfn->p_spq;
800 spin_lock_bh(&p_spq->lock);
801 list_for_each_entry_safe(p_ent, tmp, &p_spq->completion_pending, list) {
802 if (p_ent->elem.hdr.echo == echo) {
803 list_del(&p_ent->list);
804 qed_spq_comp_bmap_update(p_hwfn, echo);
810 /* This is relatively uncommon - depends on scenarios
811 * which have mutliple per-PF sent ramrods.
813 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
814 "Got completion for echo %04x - doesn't match echo %04x in completion pending list\n",
816 le16_to_cpu(p_ent->elem.hdr.echo));
819 /* Release lock before callback, as callback may post
820 * an additional ramrod.
822 spin_unlock_bh(&p_spq->lock);
826 "Failed to find an entry this EQE [echo %04x] completes\n",
831 DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
832 "Complete EQE [echo %04x]: func %p cookie %p)\n",
834 p_ent->comp_cb.function, p_ent->comp_cb.cookie);
835 if (found->comp_cb.function)
836 found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data,
841 "Got a completion without a callback function\n");
843 if (found->comp_mode != QED_SPQ_MODE_EBLOCK)
844 /* EBLOCK is responsible for returning its own entry into the
847 qed_spq_return_entry(p_hwfn, found);
849 /* Attempt to post pending requests */
850 spin_lock_bh(&p_spq->lock);
851 rc = qed_spq_pend_post(p_hwfn);
852 spin_unlock_bh(&p_spq->lock);
857 struct qed_consq *qed_consq_alloc(struct qed_hwfn *p_hwfn)
859 struct qed_consq *p_consq;
861 /* Allocate ConsQ struct */
862 p_consq = kzalloc(sizeof(*p_consq), GFP_KERNEL);
866 /* Allocate and initialize EQ chain*/
867 if (qed_chain_alloc(p_hwfn->cdev,
868 QED_CHAIN_USE_TO_PRODUCE,
870 QED_CHAIN_CNT_TYPE_U16,
871 QED_CHAIN_PAGE_SIZE / 0x80,
872 0x80, &p_consq->chain))
873 goto consq_allocate_fail;
878 qed_consq_free(p_hwfn, p_consq);
882 void qed_consq_setup(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
884 qed_chain_reset(&p_consq->chain);
887 void qed_consq_free(struct qed_hwfn *p_hwfn, struct qed_consq *p_consq)
891 qed_chain_free(p_hwfn->cdev, &p_consq->chain);