1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
4 #include "ice_common.h"
6 #define ICE_CQ_INIT_REGS(qinfo, prefix) \
8 (qinfo)->sq.head = prefix##_ATQH; \
9 (qinfo)->sq.tail = prefix##_ATQT; \
10 (qinfo)->sq.len = prefix##_ATQLEN; \
11 (qinfo)->sq.bah = prefix##_ATQBAH; \
12 (qinfo)->sq.bal = prefix##_ATQBAL; \
13 (qinfo)->sq.len_mask = prefix##_ATQLEN_ATQLEN_M; \
14 (qinfo)->sq.len_ena_mask = prefix##_ATQLEN_ATQENABLE_M; \
15 (qinfo)->sq.len_crit_mask = prefix##_ATQLEN_ATQCRIT_M; \
16 (qinfo)->sq.head_mask = prefix##_ATQH_ATQH_M; \
17 (qinfo)->rq.head = prefix##_ARQH; \
18 (qinfo)->rq.tail = prefix##_ARQT; \
19 (qinfo)->rq.len = prefix##_ARQLEN; \
20 (qinfo)->rq.bah = prefix##_ARQBAH; \
21 (qinfo)->rq.bal = prefix##_ARQBAL; \
22 (qinfo)->rq.len_mask = prefix##_ARQLEN_ARQLEN_M; \
23 (qinfo)->rq.len_ena_mask = prefix##_ARQLEN_ARQENABLE_M; \
24 (qinfo)->rq.len_crit_mask = prefix##_ARQLEN_ARQCRIT_M; \
25 (qinfo)->rq.head_mask = prefix##_ARQH_ARQH_M; \
29 * ice_adminq_init_regs - Initialize AdminQ registers
30 * @hw: pointer to the hardware structure
32 * This assumes the alloc_sq and alloc_rq functions have already been called
34 static void ice_adminq_init_regs(struct ice_hw *hw)
36 struct ice_ctl_q_info *cq = &hw->adminq;
38 ICE_CQ_INIT_REGS(cq, PF_FW);
42 * ice_mailbox_init_regs - Initialize Mailbox registers
43 * @hw: pointer to the hardware structure
45 * This assumes the alloc_sq and alloc_rq functions have already been called
47 static void ice_mailbox_init_regs(struct ice_hw *hw)
49 struct ice_ctl_q_info *cq = &hw->mailboxq;
51 ICE_CQ_INIT_REGS(cq, PF_MBX);
56 * @hw: pointer to the HW struct
57 * @cq: pointer to the specific Control queue
59 * Returns true if Queue is enabled else false.
61 bool ice_check_sq_alive(struct ice_hw *hw, struct ice_ctl_q_info *cq)
63 /* check both queue-length and queue-enable fields */
64 if (cq->sq.len && cq->sq.len_mask && cq->sq.len_ena_mask)
65 return (rd32(hw, cq->sq.len) & (cq->sq.len_mask |
66 cq->sq.len_ena_mask)) ==
67 (cq->num_sq_entries | cq->sq.len_ena_mask);
73 * ice_alloc_ctrlq_sq_ring - Allocate Control Transmit Queue (ATQ) rings
74 * @hw: pointer to the hardware structure
75 * @cq: pointer to the specific Control queue
77 static enum ice_status
78 ice_alloc_ctrlq_sq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
80 size_t size = cq->num_sq_entries * sizeof(struct ice_aq_desc);
82 cq->sq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
84 GFP_KERNEL | __GFP_ZERO);
85 if (!cq->sq.desc_buf.va)
86 return ICE_ERR_NO_MEMORY;
87 cq->sq.desc_buf.size = size;
89 cq->sq.cmd_buf = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
90 sizeof(struct ice_sq_cd), GFP_KERNEL);
91 if (!cq->sq.cmd_buf) {
92 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.desc_buf.size,
93 cq->sq.desc_buf.va, cq->sq.desc_buf.pa);
94 cq->sq.desc_buf.va = NULL;
95 cq->sq.desc_buf.pa = 0;
96 cq->sq.desc_buf.size = 0;
97 return ICE_ERR_NO_MEMORY;
104 * ice_alloc_ctrlq_rq_ring - Allocate Control Receive Queue (ARQ) rings
105 * @hw: pointer to the hardware structure
106 * @cq: pointer to the specific Control queue
108 static enum ice_status
109 ice_alloc_ctrlq_rq_ring(struct ice_hw *hw, struct ice_ctl_q_info *cq)
111 size_t size = cq->num_rq_entries * sizeof(struct ice_aq_desc);
113 cq->rq.desc_buf.va = dmam_alloc_coherent(ice_hw_to_dev(hw), size,
115 GFP_KERNEL | __GFP_ZERO);
116 if (!cq->rq.desc_buf.va)
117 return ICE_ERR_NO_MEMORY;
118 cq->rq.desc_buf.size = size;
123 * ice_free_cq_ring - Free control queue ring
124 * @hw: pointer to the hardware structure
125 * @ring: pointer to the specific control queue ring
127 * This assumes the posted buffers have already been cleaned
130 static void ice_free_cq_ring(struct ice_hw *hw, struct ice_ctl_q_ring *ring)
132 dmam_free_coherent(ice_hw_to_dev(hw), ring->desc_buf.size,
133 ring->desc_buf.va, ring->desc_buf.pa);
134 ring->desc_buf.va = NULL;
135 ring->desc_buf.pa = 0;
136 ring->desc_buf.size = 0;
140 * ice_alloc_rq_bufs - Allocate pre-posted buffers for the ARQ
141 * @hw: pointer to the hardware structure
142 * @cq: pointer to the specific Control queue
144 static enum ice_status
145 ice_alloc_rq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
149 /* We'll be allocating the buffer info memory first, then we can
150 * allocate the mapped buffers for the event processing
152 cq->rq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_rq_entries,
153 sizeof(cq->rq.desc_buf), GFP_KERNEL);
154 if (!cq->rq.dma_head)
155 return ICE_ERR_NO_MEMORY;
156 cq->rq.r.rq_bi = (struct ice_dma_mem *)cq->rq.dma_head;
158 /* allocate the mapped buffers */
159 for (i = 0; i < cq->num_rq_entries; i++) {
160 struct ice_aq_desc *desc;
161 struct ice_dma_mem *bi;
163 bi = &cq->rq.r.rq_bi[i];
164 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
165 cq->rq_buf_size, &bi->pa,
166 GFP_KERNEL | __GFP_ZERO);
168 goto unwind_alloc_rq_bufs;
169 bi->size = cq->rq_buf_size;
171 /* now configure the descriptors for use */
172 desc = ICE_CTL_Q_DESC(cq->rq, i);
174 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
175 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
176 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
178 /* This is in accordance with Admin queue design, there is no
179 * register for buffer size configuration
181 desc->datalen = cpu_to_le16(bi->size);
183 desc->cookie_high = 0;
184 desc->cookie_low = 0;
185 desc->params.generic.addr_high =
186 cpu_to_le32(upper_32_bits(bi->pa));
187 desc->params.generic.addr_low =
188 cpu_to_le32(lower_32_bits(bi->pa));
189 desc->params.generic.param0 = 0;
190 desc->params.generic.param1 = 0;
194 unwind_alloc_rq_bufs:
195 /* don't try to free the one that failed... */
197 for (; i >= 0; i--) {
198 dmam_free_coherent(ice_hw_to_dev(hw), cq->rq.r.rq_bi[i].size,
199 cq->rq.r.rq_bi[i].va, cq->rq.r.rq_bi[i].pa);
200 cq->rq.r.rq_bi[i].va = NULL;
201 cq->rq.r.rq_bi[i].pa = 0;
202 cq->rq.r.rq_bi[i].size = 0;
204 cq->rq.r.rq_bi = NULL;
205 devm_kfree(ice_hw_to_dev(hw), cq->rq.dma_head);
206 cq->rq.dma_head = NULL;
208 return ICE_ERR_NO_MEMORY;
212 * ice_alloc_sq_bufs - Allocate empty buffer structs for the ATQ
213 * @hw: pointer to the hardware structure
214 * @cq: pointer to the specific Control queue
216 static enum ice_status
217 ice_alloc_sq_bufs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
221 /* No mapped memory needed yet, just the buffer info structures */
222 cq->sq.dma_head = devm_kcalloc(ice_hw_to_dev(hw), cq->num_sq_entries,
223 sizeof(cq->sq.desc_buf), GFP_KERNEL);
224 if (!cq->sq.dma_head)
225 return ICE_ERR_NO_MEMORY;
226 cq->sq.r.sq_bi = (struct ice_dma_mem *)cq->sq.dma_head;
228 /* allocate the mapped buffers */
229 for (i = 0; i < cq->num_sq_entries; i++) {
230 struct ice_dma_mem *bi;
232 bi = &cq->sq.r.sq_bi[i];
233 bi->va = dmam_alloc_coherent(ice_hw_to_dev(hw),
234 cq->sq_buf_size, &bi->pa,
235 GFP_KERNEL | __GFP_ZERO);
237 goto unwind_alloc_sq_bufs;
238 bi->size = cq->sq_buf_size;
242 unwind_alloc_sq_bufs:
243 /* don't try to free the one that failed... */
245 for (; i >= 0; i--) {
246 dmam_free_coherent(ice_hw_to_dev(hw), cq->sq.r.sq_bi[i].size,
247 cq->sq.r.sq_bi[i].va, cq->sq.r.sq_bi[i].pa);
248 cq->sq.r.sq_bi[i].va = NULL;
249 cq->sq.r.sq_bi[i].pa = 0;
250 cq->sq.r.sq_bi[i].size = 0;
252 cq->sq.r.sq_bi = NULL;
253 devm_kfree(ice_hw_to_dev(hw), cq->sq.dma_head);
254 cq->sq.dma_head = NULL;
256 return ICE_ERR_NO_MEMORY;
259 static enum ice_status
260 ice_cfg_cq_regs(struct ice_hw *hw, struct ice_ctl_q_ring *ring, u16 num_entries)
262 /* Clear Head and Tail */
263 wr32(hw, ring->head, 0);
264 wr32(hw, ring->tail, 0);
266 /* set starting point */
267 wr32(hw, ring->len, (num_entries | ring->len_ena_mask));
268 wr32(hw, ring->bal, lower_32_bits(ring->desc_buf.pa));
269 wr32(hw, ring->bah, upper_32_bits(ring->desc_buf.pa));
271 /* Check one register to verify that config was applied */
272 if (rd32(hw, ring->bal) != lower_32_bits(ring->desc_buf.pa))
273 return ICE_ERR_AQ_ERROR;
279 * ice_cfg_sq_regs - configure Control ATQ registers
280 * @hw: pointer to the hardware structure
281 * @cq: pointer to the specific Control queue
283 * Configure base address and length registers for the transmit queue
285 static enum ice_status
286 ice_cfg_sq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
288 return ice_cfg_cq_regs(hw, &cq->sq, cq->num_sq_entries);
292 * ice_cfg_rq_regs - configure Control ARQ register
293 * @hw: pointer to the hardware structure
294 * @cq: pointer to the specific Control queue
296 * Configure base address and length registers for the receive (event queue)
298 static enum ice_status
299 ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
301 enum ice_status status;
303 status = ice_cfg_cq_regs(hw, &cq->rq, cq->num_rq_entries);
307 /* Update tail in the HW to post pre-allocated buffers */
308 wr32(hw, cq->rq.tail, (u32)(cq->num_rq_entries - 1));
313 #define ICE_FREE_CQ_BUFS(hw, qi, ring) \
315 /* free descriptors */ \
316 if ((qi)->ring.r.ring##_bi) { \
319 for (i = 0; i < (qi)->num_##ring##_entries; i++) \
320 if ((qi)->ring.r.ring##_bi[i].pa) { \
321 dmam_free_coherent(ice_hw_to_dev(hw), \
322 (qi)->ring.r.ring##_bi[i].size, \
323 (qi)->ring.r.ring##_bi[i].va, \
324 (qi)->ring.r.ring##_bi[i].pa); \
325 (qi)->ring.r.ring##_bi[i].va = NULL;\
326 (qi)->ring.r.ring##_bi[i].pa = 0;\
327 (qi)->ring.r.ring##_bi[i].size = 0;\
330 /* free the buffer info list */ \
331 if ((qi)->ring.cmd_buf) \
332 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
333 /* free DMA head */ \
334 devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
338 * ice_init_sq - main initialization routine for Control ATQ
339 * @hw: pointer to the hardware structure
340 * @cq: pointer to the specific Control queue
342 * This is the main initialization routine for the Control Send Queue
343 * Prior to calling this function, the driver *MUST* set the following fields
344 * in the cq->structure:
345 * - cq->num_sq_entries
348 * Do *NOT* hold the lock when calling this as the memory allocation routines
349 * called are not going to be atomic context safe
351 static enum ice_status ice_init_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
353 enum ice_status ret_code;
355 if (cq->sq.count > 0) {
356 /* queue already initialized */
357 ret_code = ICE_ERR_NOT_READY;
358 goto init_ctrlq_exit;
361 /* verify input for valid configuration */
362 if (!cq->num_sq_entries || !cq->sq_buf_size) {
363 ret_code = ICE_ERR_CFG;
364 goto init_ctrlq_exit;
367 cq->sq.next_to_use = 0;
368 cq->sq.next_to_clean = 0;
370 /* allocate the ring memory */
371 ret_code = ice_alloc_ctrlq_sq_ring(hw, cq);
373 goto init_ctrlq_exit;
375 /* allocate buffers in the rings */
376 ret_code = ice_alloc_sq_bufs(hw, cq);
378 goto init_ctrlq_free_rings;
380 /* initialize base registers */
381 ret_code = ice_cfg_sq_regs(hw, cq);
383 goto init_ctrlq_free_rings;
386 cq->sq.count = cq->num_sq_entries;
387 goto init_ctrlq_exit;
389 init_ctrlq_free_rings:
390 ICE_FREE_CQ_BUFS(hw, cq, sq);
391 ice_free_cq_ring(hw, &cq->sq);
398 * ice_init_rq - initialize ARQ
399 * @hw: pointer to the hardware structure
400 * @cq: pointer to the specific Control queue
402 * The main initialization routine for the Admin Receive (Event) Queue.
403 * Prior to calling this function, the driver *MUST* set the following fields
404 * in the cq->structure:
405 * - cq->num_rq_entries
408 * Do *NOT* hold the lock when calling this as the memory allocation routines
409 * called are not going to be atomic context safe
411 static enum ice_status ice_init_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
413 enum ice_status ret_code;
415 if (cq->rq.count > 0) {
416 /* queue already initialized */
417 ret_code = ICE_ERR_NOT_READY;
418 goto init_ctrlq_exit;
421 /* verify input for valid configuration */
422 if (!cq->num_rq_entries || !cq->rq_buf_size) {
423 ret_code = ICE_ERR_CFG;
424 goto init_ctrlq_exit;
427 cq->rq.next_to_use = 0;
428 cq->rq.next_to_clean = 0;
430 /* allocate the ring memory */
431 ret_code = ice_alloc_ctrlq_rq_ring(hw, cq);
433 goto init_ctrlq_exit;
435 /* allocate buffers in the rings */
436 ret_code = ice_alloc_rq_bufs(hw, cq);
438 goto init_ctrlq_free_rings;
440 /* initialize base registers */
441 ret_code = ice_cfg_rq_regs(hw, cq);
443 goto init_ctrlq_free_rings;
446 cq->rq.count = cq->num_rq_entries;
447 goto init_ctrlq_exit;
449 init_ctrlq_free_rings:
450 ICE_FREE_CQ_BUFS(hw, cq, rq);
451 ice_free_cq_ring(hw, &cq->rq);
458 * ice_shutdown_sq - shutdown the Control ATQ
459 * @hw: pointer to the hardware structure
460 * @cq: pointer to the specific Control queue
462 * The main shutdown routine for the Control Transmit Queue
464 static enum ice_status
465 ice_shutdown_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
467 enum ice_status ret_code = 0;
469 mutex_lock(&cq->sq_lock);
472 ret_code = ICE_ERR_NOT_READY;
473 goto shutdown_sq_out;
476 /* Stop firmware AdminQ processing */
477 wr32(hw, cq->sq.head, 0);
478 wr32(hw, cq->sq.tail, 0);
479 wr32(hw, cq->sq.len, 0);
480 wr32(hw, cq->sq.bal, 0);
481 wr32(hw, cq->sq.bah, 0);
483 cq->sq.count = 0; /* to indicate uninitialized queue */
485 /* free ring buffers and the ring itself */
486 ICE_FREE_CQ_BUFS(hw, cq, sq);
487 ice_free_cq_ring(hw, &cq->sq);
490 mutex_unlock(&cq->sq_lock);
495 * ice_aq_ver_check - Check the reported AQ API version.
496 * @hw: pointer to the hardware structure
498 * Checks if the driver should load on a given AQ API version.
500 * Return: 'true' iff the driver should attempt to load. 'false' otherwise.
502 static bool ice_aq_ver_check(struct ice_hw *hw)
504 if (hw->api_maj_ver > EXP_FW_API_VER_MAJOR) {
505 /* Major API version is newer than expected, don't load */
506 dev_warn(ice_hw_to_dev(hw),
507 "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
509 } else if (hw->api_maj_ver == EXP_FW_API_VER_MAJOR) {
510 if (hw->api_min_ver > (EXP_FW_API_VER_MINOR + 2))
511 dev_info(ice_hw_to_dev(hw),
512 "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
513 else if ((hw->api_min_ver + 2) < EXP_FW_API_VER_MINOR)
514 dev_info(ice_hw_to_dev(hw),
515 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
517 /* Major API version is older than expected, log a warning */
518 dev_info(ice_hw_to_dev(hw),
519 "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
525 * ice_shutdown_rq - shutdown Control ARQ
526 * @hw: pointer to the hardware structure
527 * @cq: pointer to the specific Control queue
529 * The main shutdown routine for the Control Receive Queue
531 static enum ice_status
532 ice_shutdown_rq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
534 enum ice_status ret_code = 0;
536 mutex_lock(&cq->rq_lock);
539 ret_code = ICE_ERR_NOT_READY;
540 goto shutdown_rq_out;
543 /* Stop Control Queue processing */
544 wr32(hw, cq->rq.head, 0);
545 wr32(hw, cq->rq.tail, 0);
546 wr32(hw, cq->rq.len, 0);
547 wr32(hw, cq->rq.bal, 0);
548 wr32(hw, cq->rq.bah, 0);
550 /* set rq.count to 0 to indicate uninitialized queue */
553 /* free ring buffers and the ring itself */
554 ICE_FREE_CQ_BUFS(hw, cq, rq);
555 ice_free_cq_ring(hw, &cq->rq);
558 mutex_unlock(&cq->rq_lock);
563 * ice_init_check_adminq - Check version for Admin Queue to know if its alive
564 * @hw: pointer to the hardware structure
566 static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
568 struct ice_ctl_q_info *cq = &hw->adminq;
569 enum ice_status status;
571 status = ice_aq_get_fw_ver(hw, NULL);
573 goto init_ctrlq_free_rq;
575 if (!ice_aq_ver_check(hw)) {
576 status = ICE_ERR_FW_API_VER;
577 goto init_ctrlq_free_rq;
583 ice_shutdown_rq(hw, cq);
584 ice_shutdown_sq(hw, cq);
589 * ice_init_ctrlq - main initialization routine for any control Queue
590 * @hw: pointer to the hardware structure
591 * @q_type: specific Control queue type
593 * Prior to calling this function, the driver *MUST* set the following fields
594 * in the cq->structure:
595 * - cq->num_sq_entries
596 * - cq->num_rq_entries
600 * NOTE: this function does not initialize the controlq locks
602 static enum ice_status ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
604 struct ice_ctl_q_info *cq;
605 enum ice_status ret_code;
608 case ICE_CTL_Q_ADMIN:
609 ice_adminq_init_regs(hw);
612 case ICE_CTL_Q_MAILBOX:
613 ice_mailbox_init_regs(hw);
617 return ICE_ERR_PARAM;
621 /* verify input for valid configuration */
622 if (!cq->num_rq_entries || !cq->num_sq_entries ||
623 !cq->rq_buf_size || !cq->sq_buf_size) {
627 /* setup SQ command write back timeout */
628 cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT;
630 /* allocate the ATQ */
631 ret_code = ice_init_sq(hw, cq);
635 /* allocate the ARQ */
636 ret_code = ice_init_rq(hw, cq);
638 goto init_ctrlq_free_sq;
644 ice_shutdown_sq(hw, cq);
649 * ice_shutdown_ctrlq - shutdown routine for any control queue
650 * @hw: pointer to the hardware structure
651 * @q_type: specific Control queue type
653 * NOTE: this function does not destroy the control queue locks.
655 static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
657 struct ice_ctl_q_info *cq;
660 case ICE_CTL_Q_ADMIN:
662 if (ice_check_sq_alive(hw, cq))
663 ice_aq_q_shutdown(hw, true);
665 case ICE_CTL_Q_MAILBOX:
672 ice_shutdown_sq(hw, cq);
673 ice_shutdown_rq(hw, cq);
677 * ice_shutdown_all_ctrlq - shutdown routine for all control queues
678 * @hw: pointer to the hardware structure
680 * NOTE: this function does not destroy the control queue locks. The driver
681 * may call this at runtime to shutdown and later restart control queues, such
682 * as in response to a reset event.
684 void ice_shutdown_all_ctrlq(struct ice_hw *hw)
686 /* Shutdown FW admin queue */
687 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
688 /* Shutdown PF-VF Mailbox */
689 ice_shutdown_ctrlq(hw, ICE_CTL_Q_MAILBOX);
693 * ice_init_all_ctrlq - main initialization routine for all control queues
694 * @hw: pointer to the hardware structure
696 * Prior to calling this function, the driver MUST* set the following fields
697 * in the cq->structure for all control queues:
698 * - cq->num_sq_entries
699 * - cq->num_rq_entries
703 * NOTE: this function does not initialize the controlq locks.
705 enum ice_status ice_init_all_ctrlq(struct ice_hw *hw)
707 enum ice_status status;
710 /* Init FW admin queue */
712 status = ice_init_ctrlq(hw, ICE_CTL_Q_ADMIN);
716 status = ice_init_check_adminq(hw);
717 if (status != ICE_ERR_AQ_FW_CRITICAL)
720 ice_debug(hw, ICE_DBG_AQ_MSG,
721 "Retry Admin Queue init due to FW critical error\n");
722 ice_shutdown_ctrlq(hw, ICE_CTL_Q_ADMIN);
723 msleep(ICE_CTL_Q_ADMIN_INIT_MSEC);
724 } while (retry++ < ICE_CTL_Q_ADMIN_INIT_TIMEOUT);
728 /* Init Mailbox queue */
729 return ice_init_ctrlq(hw, ICE_CTL_Q_MAILBOX);
733 * ice_init_ctrlq_locks - Initialize locks for a control queue
734 * @cq: pointer to the control queue
736 * Initializes the send and receive queue locks for a given control queue.
738 static void ice_init_ctrlq_locks(struct ice_ctl_q_info *cq)
740 mutex_init(&cq->sq_lock);
741 mutex_init(&cq->rq_lock);
745 * ice_create_all_ctrlq - main initialization routine for all control queues
746 * @hw: pointer to the hardware structure
748 * Prior to calling this function, the driver *MUST* set the following fields
749 * in the cq->structure for all control queues:
750 * - cq->num_sq_entries
751 * - cq->num_rq_entries
755 * This function creates all the control queue locks and then calls
756 * ice_init_all_ctrlq. It should be called once during driver load. If the
757 * driver needs to re-initialize control queues at run time it should call
758 * ice_init_all_ctrlq instead.
760 enum ice_status ice_create_all_ctrlq(struct ice_hw *hw)
762 ice_init_ctrlq_locks(&hw->adminq);
763 ice_init_ctrlq_locks(&hw->mailboxq);
765 return ice_init_all_ctrlq(hw);
769 * ice_destroy_ctrlq_locks - Destroy locks for a control queue
770 * @cq: pointer to the control queue
772 * Destroys the send and receive queue locks for a given control queue.
774 static void ice_destroy_ctrlq_locks(struct ice_ctl_q_info *cq)
776 mutex_destroy(&cq->sq_lock);
777 mutex_destroy(&cq->rq_lock);
781 * ice_destroy_all_ctrlq - exit routine for all control queues
782 * @hw: pointer to the hardware structure
784 * This function shuts down all the control queues and then destroys the
785 * control queue locks. It should be called once during driver unload. The
786 * driver should call ice_shutdown_all_ctrlq if it needs to shut down and
787 * reinitialize control queues, such as in response to a reset event.
789 void ice_destroy_all_ctrlq(struct ice_hw *hw)
791 /* shut down all the control queues first */
792 ice_shutdown_all_ctrlq(hw);
794 ice_destroy_ctrlq_locks(&hw->adminq);
795 ice_destroy_ctrlq_locks(&hw->mailboxq);
799 * ice_clean_sq - cleans Admin send queue (ATQ)
800 * @hw: pointer to the hardware structure
801 * @cq: pointer to the specific Control queue
803 * returns the number of free desc
805 static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
807 struct ice_ctl_q_ring *sq = &cq->sq;
808 u16 ntc = sq->next_to_clean;
809 struct ice_sq_cd *details;
810 struct ice_aq_desc *desc;
812 desc = ICE_CTL_Q_DESC(*sq, ntc);
813 details = ICE_CTL_Q_DETAILS(*sq, ntc);
815 while (rd32(hw, cq->sq.head) != ntc) {
816 ice_debug(hw, ICE_DBG_AQ_MSG,
817 "ntc %d head %d.\n", ntc, rd32(hw, cq->sq.head));
818 memset(desc, 0, sizeof(*desc));
819 memset(details, 0, sizeof(*details));
821 if (ntc == sq->count)
823 desc = ICE_CTL_Q_DESC(*sq, ntc);
824 details = ICE_CTL_Q_DETAILS(*sq, ntc);
827 sq->next_to_clean = ntc;
829 return ICE_CTL_Q_DESC_UNUSED(sq);
834 * @hw: pointer to the hardware structure
835 * @desc: pointer to control queue descriptor
836 * @buf: pointer to command buffer
837 * @buf_len: max length of buf
839 * Dumps debug log about control command with descriptor contents.
841 static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
843 struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
846 if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
847 !((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
853 len = le16_to_cpu(cq_desc->datalen);
855 ice_debug(hw, ICE_DBG_AQ_DESC,
856 "CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
857 le16_to_cpu(cq_desc->opcode),
858 le16_to_cpu(cq_desc->flags),
859 le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
860 ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
861 le32_to_cpu(cq_desc->cookie_high),
862 le32_to_cpu(cq_desc->cookie_low));
863 ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
864 le32_to_cpu(cq_desc->params.generic.param0),
865 le32_to_cpu(cq_desc->params.generic.param1));
866 ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
867 le32_to_cpu(cq_desc->params.generic.addr_high),
868 le32_to_cpu(cq_desc->params.generic.addr_low));
869 if (buf && cq_desc->datalen != 0) {
870 ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
874 ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len);
879 * ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
880 * @hw: pointer to the HW struct
881 * @cq: pointer to the specific Control queue
883 * Returns true if the firmware has processed all descriptors on the
884 * admin send queue. Returns false if there are still requests pending.
886 static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
888 /* AQ designers suggest use of head for better
889 * timing reliability than DD bit
891 return rd32(hw, cq->sq.head) == cq->sq.next_to_use;
895 * ice_sq_send_cmd - send command to Control Queue (ATQ)
896 * @hw: pointer to the HW struct
897 * @cq: pointer to the specific Control queue
898 * @desc: prefilled descriptor describing the command (non DMA mem)
899 * @buf: buffer to use for indirect commands (or NULL for direct commands)
900 * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
901 * @cd: pointer to command details structure
903 * This is the main send command routine for the ATQ. It runs the queue,
904 * cleans the queue, etc.
907 ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
908 struct ice_aq_desc *desc, void *buf, u16 buf_size,
909 struct ice_sq_cd *cd)
911 struct ice_dma_mem *dma_buf = NULL;
912 struct ice_aq_desc *desc_on_ring;
913 bool cmd_completed = false;
914 enum ice_status status = 0;
915 struct ice_sq_cd *details;
920 /* if reset is in progress return a soft error */
921 if (hw->reset_ongoing)
922 return ICE_ERR_RESET_ONGOING;
923 mutex_lock(&cq->sq_lock);
925 cq->sq_last_status = ICE_AQ_RC_OK;
928 ice_debug(hw, ICE_DBG_AQ_MSG,
929 "Control Send queue not initialized.\n");
930 status = ICE_ERR_AQ_EMPTY;
931 goto sq_send_command_error;
934 if ((buf && !buf_size) || (!buf && buf_size)) {
935 status = ICE_ERR_PARAM;
936 goto sq_send_command_error;
940 if (buf_size > cq->sq_buf_size) {
941 ice_debug(hw, ICE_DBG_AQ_MSG,
942 "Invalid buffer size for Control Send queue: %d.\n",
944 status = ICE_ERR_INVAL_SIZE;
945 goto sq_send_command_error;
948 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_BUF);
949 if (buf_size > ICE_AQ_LG_BUF)
950 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
953 val = rd32(hw, cq->sq.head);
954 if (val >= cq->num_sq_entries) {
955 ice_debug(hw, ICE_DBG_AQ_MSG,
956 "head overrun at %d in the Control Send Queue ring\n",
958 status = ICE_ERR_AQ_EMPTY;
959 goto sq_send_command_error;
962 details = ICE_CTL_Q_DETAILS(cq->sq, cq->sq.next_to_use);
966 memset(details, 0, sizeof(*details));
968 /* Call clean and check queue available function to reclaim the
969 * descriptors that were processed by FW/MBX; the function returns the
970 * number of desc available. The clean function called here could be
971 * called in a separate thread in case of asynchronous completions.
973 if (ice_clean_sq(hw, cq) == 0) {
974 ice_debug(hw, ICE_DBG_AQ_MSG,
975 "Error: Control Send Queue is full.\n");
976 status = ICE_ERR_AQ_FULL;
977 goto sq_send_command_error;
980 /* initialize the temp desc pointer with the right desc */
981 desc_on_ring = ICE_CTL_Q_DESC(cq->sq, cq->sq.next_to_use);
983 /* if the desc is available copy the temp desc to the right place */
984 memcpy(desc_on_ring, desc, sizeof(*desc_on_ring));
986 /* if buf is not NULL assume indirect command */
988 dma_buf = &cq->sq.r.sq_bi[cq->sq.next_to_use];
989 /* copy the user buf into the respective DMA buf */
990 memcpy(dma_buf->va, buf, buf_size);
991 desc_on_ring->datalen = cpu_to_le16(buf_size);
993 /* Update the address values in the desc with the pa value
994 * for respective buffer
996 desc_on_ring->params.generic.addr_high =
997 cpu_to_le32(upper_32_bits(dma_buf->pa));
998 desc_on_ring->params.generic.addr_low =
999 cpu_to_le32(lower_32_bits(dma_buf->pa));
1002 /* Debug desc and buffer */
1003 ice_debug(hw, ICE_DBG_AQ_DESC,
1004 "ATQ: Control Send queue desc and buffer:\n");
1006 ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
1008 (cq->sq.next_to_use)++;
1009 if (cq->sq.next_to_use == cq->sq.count)
1010 cq->sq.next_to_use = 0;
1011 wr32(hw, cq->sq.tail, cq->sq.next_to_use);
1014 if (ice_sq_done(hw, cq))
1017 udelay(ICE_CTL_Q_SQ_CMD_USEC);
1019 } while (total_delay < cq->sq_cmd_timeout);
1021 /* if ready, copy the desc back to temp */
1022 if (ice_sq_done(hw, cq)) {
1023 memcpy(desc, desc_on_ring, sizeof(*desc));
1025 /* get returned length to copy */
1026 u16 copy_size = le16_to_cpu(desc->datalen);
1028 if (copy_size > buf_size) {
1029 ice_debug(hw, ICE_DBG_AQ_MSG,
1030 "Return len %d > than buf len %d\n",
1031 copy_size, buf_size);
1032 status = ICE_ERR_AQ_ERROR;
1034 memcpy(buf, dma_buf->va, copy_size);
1037 retval = le16_to_cpu(desc->retval);
1039 ice_debug(hw, ICE_DBG_AQ_MSG,
1040 "Control Send Queue command 0x%04X completed with error 0x%X\n",
1041 le16_to_cpu(desc->opcode),
1044 /* strip off FW internal code */
1047 cmd_completed = true;
1048 if (!status && retval != ICE_AQ_RC_OK)
1049 status = ICE_ERR_AQ_ERROR;
1050 cq->sq_last_status = (enum ice_aq_err)retval;
1053 ice_debug(hw, ICE_DBG_AQ_MSG,
1054 "ATQ: desc and buffer writeback:\n");
1056 ice_debug_cq(hw, (void *)desc, buf, buf_size);
1058 /* save writeback AQ if requested */
1059 if (details->wb_desc)
1060 memcpy(details->wb_desc, desc_on_ring,
1061 sizeof(*details->wb_desc));
1063 /* update the error if time out occurred */
1064 if (!cmd_completed) {
1065 if (rd32(hw, cq->rq.len) & cq->rq.len_crit_mask ||
1066 rd32(hw, cq->sq.len) & cq->sq.len_crit_mask) {
1067 ice_debug(hw, ICE_DBG_AQ_MSG, "Critical FW error.\n");
1068 status = ICE_ERR_AQ_FW_CRITICAL;
1070 ice_debug(hw, ICE_DBG_AQ_MSG,
1071 "Control Send Queue Writeback timeout.\n");
1072 status = ICE_ERR_AQ_TIMEOUT;
1076 sq_send_command_error:
1077 mutex_unlock(&cq->sq_lock);
1082 * ice_fill_dflt_direct_cmd_desc - AQ descriptor helper function
1083 * @desc: pointer to the temp descriptor (non DMA mem)
1084 * @opcode: the opcode can be used to decide which flags to turn off or on
1086 * Fill the desc with default values
1088 void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode)
1090 /* zero out the desc */
1091 memset(desc, 0, sizeof(*desc));
1092 desc->opcode = cpu_to_le16(opcode);
1093 desc->flags = cpu_to_le16(ICE_AQ_FLAG_SI);
1098 * @hw: pointer to the HW struct
1099 * @cq: pointer to the specific Control queue
1100 * @e: event info from the receive descriptor, includes any buffers
1101 * @pending: number of events that could be left to process
1103 * This function cleans one Admin Receive Queue element and returns
1104 * the contents through e. It can also return how many events are
1105 * left to process through 'pending'.
1108 ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
1109 struct ice_rq_event_info *e, u16 *pending)
1111 u16 ntc = cq->rq.next_to_clean;
1112 enum ice_status ret_code = 0;
1113 struct ice_aq_desc *desc;
1114 struct ice_dma_mem *bi;
1120 /* pre-clean the event info */
1121 memset(&e->desc, 0, sizeof(e->desc));
1123 /* take the lock before we start messing with the ring */
1124 mutex_lock(&cq->rq_lock);
1126 if (!cq->rq.count) {
1127 ice_debug(hw, ICE_DBG_AQ_MSG,
1128 "Control Receive queue not initialized.\n");
1129 ret_code = ICE_ERR_AQ_EMPTY;
1130 goto clean_rq_elem_err;
1133 /* set next_to_use to head */
1134 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1137 /* nothing to do - shouldn't need to update ring's values */
1138 ret_code = ICE_ERR_AQ_NO_WORK;
1139 goto clean_rq_elem_out;
1142 /* now clean the next descriptor */
1143 desc = ICE_CTL_Q_DESC(cq->rq, ntc);
1146 cq->rq_last_status = (enum ice_aq_err)le16_to_cpu(desc->retval);
1147 flags = le16_to_cpu(desc->flags);
1148 if (flags & ICE_AQ_FLAG_ERR) {
1149 ret_code = ICE_ERR_AQ_ERROR;
1150 ice_debug(hw, ICE_DBG_AQ_MSG,
1151 "Control Receive Queue Event 0x%04X received with error 0x%X\n",
1152 le16_to_cpu(desc->opcode),
1153 cq->rq_last_status);
1155 memcpy(&e->desc, desc, sizeof(e->desc));
1156 datalen = le16_to_cpu(desc->datalen);
1157 e->msg_len = min_t(u16, datalen, e->buf_len);
1158 if (e->msg_buf && e->msg_len)
1159 memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
1161 ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
1163 ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
1165 /* Restore the original datalen and buffer address in the desc,
1166 * FW updates datalen to indicate the event message size
1168 bi = &cq->rq.r.rq_bi[ntc];
1169 memset(desc, 0, sizeof(*desc));
1171 desc->flags = cpu_to_le16(ICE_AQ_FLAG_BUF);
1172 if (cq->rq_buf_size > ICE_AQ_LG_BUF)
1173 desc->flags |= cpu_to_le16(ICE_AQ_FLAG_LB);
1174 desc->datalen = cpu_to_le16(bi->size);
1175 desc->params.generic.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1176 desc->params.generic.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1178 /* set tail = the last cleaned desc index. */
1179 wr32(hw, cq->rq.tail, ntc);
1180 /* ntc is updated to tail + 1 */
1182 if (ntc == cq->num_rq_entries)
1184 cq->rq.next_to_clean = ntc;
1185 cq->rq.next_to_use = ntu;
1188 /* Set pending if needed, unlock and return */
1190 /* re-read HW head to calculate actual pending messages */
1191 ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
1192 *pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
1195 mutex_unlock(&cq->rq_lock);