2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 /*****************************************************************************/
36 /*****************************************************************************/
38 /* Timeout in micro-sec */
39 #define ADMIN_CMD_TIMEOUT_US (3000000)
41 #define ENA_ASYNC_QUEUE_DEPTH 16
42 #define ENA_ADMIN_QUEUE_DEPTH 32
44 #define MIN_ENA_VER (((ENA_COMMON_SPEC_VERSION_MAJOR) << \
45 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) \
46 | (ENA_COMMON_SPEC_VERSION_MINOR))
48 #define ENA_CTRL_MAJOR 0
49 #define ENA_CTRL_MINOR 0
50 #define ENA_CTRL_SUB_MINOR 1
52 #define MIN_ENA_CTRL_VER \
53 (((ENA_CTRL_MAJOR) << \
54 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
55 ((ENA_CTRL_MINOR) << \
56 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
59 #define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
60 #define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
62 #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
64 #define ENA_REGS_ADMIN_INTR_MASK 1
66 /*****************************************************************************/
67 /*****************************************************************************/
68 /*****************************************************************************/
73 /* Abort - canceled by the driver */
78 struct completion wait_event;
79 struct ena_admin_acq_entry *user_cqe;
81 enum ena_cmd_status status;
82 /* status from the device */
88 struct ena_com_stats_ctx {
89 struct ena_admin_aq_get_stats_cmd get_cmd;
90 struct ena_admin_acq_get_stats_resp get_resp;
93 static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
94 struct ena_common_mem_addr *ena_addr,
97 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
98 pr_err("dma address has more bits that the device supports\n");
102 ena_addr->mem_addr_low = lower_32_bits(addr);
103 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
108 static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
110 struct ena_com_admin_sq *sq = &queue->sq;
111 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
113 sq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
117 pr_err("memory allocation failed");
130 static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
132 struct ena_com_admin_cq *cq = &queue->cq;
133 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
135 cq->entries = dma_zalloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
139 pr_err("memory allocation failed");
149 static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
150 struct ena_aenq_handlers *aenq_handlers)
152 struct ena_com_aenq *aenq = &dev->aenq;
153 u32 addr_low, addr_high, aenq_caps;
156 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
157 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
158 aenq->entries = dma_zalloc_coherent(dev->dmadev, size, &aenq->dma_addr,
161 if (!aenq->entries) {
162 pr_err("memory allocation failed");
166 aenq->head = aenq->q_depth;
169 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
170 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
172 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
173 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
176 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
177 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
178 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
179 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
180 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
182 if (unlikely(!aenq_handlers)) {
183 pr_err("aenq handlers pointer is NULL\n");
187 aenq->aenq_handlers = aenq_handlers;
192 static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
193 struct ena_comp_ctx *comp_ctx)
195 comp_ctx->occupied = false;
196 atomic_dec(&queue->outstanding_cmds);
199 static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
200 u16 command_id, bool capture)
202 if (unlikely(!queue->comp_ctx)) {
203 pr_err("Completion context is NULL\n");
207 if (unlikely(command_id >= queue->q_depth)) {
208 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
209 command_id, queue->q_depth);
213 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
214 pr_err("Completion context is occupied\n");
219 atomic_inc(&queue->outstanding_cmds);
220 queue->comp_ctx[command_id].occupied = true;
223 return &queue->comp_ctx[command_id];
226 static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
227 struct ena_admin_aq_entry *cmd,
228 size_t cmd_size_in_bytes,
229 struct ena_admin_acq_entry *comp,
230 size_t comp_size_in_bytes)
232 struct ena_comp_ctx *comp_ctx;
233 u16 tail_masked, cmd_id;
237 queue_size_mask = admin_queue->q_depth - 1;
239 tail_masked = admin_queue->sq.tail & queue_size_mask;
241 /* In case of queue FULL */
242 cnt = atomic_read(&admin_queue->outstanding_cmds);
243 if (cnt >= admin_queue->q_depth) {
244 pr_debug("admin queue is full.\n");
245 admin_queue->stats.out_of_space++;
246 return ERR_PTR(-ENOSPC);
249 cmd_id = admin_queue->curr_cmd_id;
251 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
252 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
254 cmd->aq_common_descriptor.command_id |= cmd_id &
255 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
257 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
258 if (unlikely(!comp_ctx))
259 return ERR_PTR(-EINVAL);
261 comp_ctx->status = ENA_CMD_SUBMITTED;
262 comp_ctx->comp_size = (u32)comp_size_in_bytes;
263 comp_ctx->user_cqe = comp;
264 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
266 reinit_completion(&comp_ctx->wait_event);
268 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
270 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
273 admin_queue->sq.tail++;
274 admin_queue->stats.submitted_cmd++;
276 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
277 admin_queue->sq.phase = !admin_queue->sq.phase;
279 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
284 static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
286 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
287 struct ena_comp_ctx *comp_ctx;
290 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
291 if (unlikely(!queue->comp_ctx)) {
292 pr_err("memory allocation failed");
296 for (i = 0; i < queue->q_depth; i++) {
297 comp_ctx = get_comp_ctxt(queue, i, false);
299 init_completion(&comp_ctx->wait_event);
305 static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
306 struct ena_admin_aq_entry *cmd,
307 size_t cmd_size_in_bytes,
308 struct ena_admin_acq_entry *comp,
309 size_t comp_size_in_bytes)
312 struct ena_comp_ctx *comp_ctx;
314 spin_lock_irqsave(&admin_queue->q_lock, flags);
315 if (unlikely(!admin_queue->running_state)) {
316 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
317 return ERR_PTR(-ENODEV);
319 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
323 if (unlikely(IS_ERR(comp_ctx)))
324 admin_queue->running_state = false;
325 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
330 static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
331 struct ena_com_create_io_ctx *ctx,
332 struct ena_com_io_sq *io_sq)
337 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
339 io_sq->dma_addr_bits = ena_dev->dma_addr_bits;
340 io_sq->desc_entry_size =
341 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
342 sizeof(struct ena_eth_io_tx_desc) :
343 sizeof(struct ena_eth_io_rx_desc);
345 size = io_sq->desc_entry_size * io_sq->q_depth;
347 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
348 dev_node = dev_to_node(ena_dev->dmadev);
349 set_dev_node(ena_dev->dmadev, ctx->numa_node);
350 io_sq->desc_addr.virt_addr =
351 dma_zalloc_coherent(ena_dev->dmadev, size,
352 &io_sq->desc_addr.phys_addr,
354 set_dev_node(ena_dev->dmadev, dev_node);
355 if (!io_sq->desc_addr.virt_addr) {
356 io_sq->desc_addr.virt_addr =
357 dma_zalloc_coherent(ena_dev->dmadev, size,
358 &io_sq->desc_addr.phys_addr,
362 dev_node = dev_to_node(ena_dev->dmadev);
363 set_dev_node(ena_dev->dmadev, ctx->numa_node);
364 io_sq->desc_addr.virt_addr =
365 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
366 set_dev_node(ena_dev->dmadev, dev_node);
367 if (!io_sq->desc_addr.virt_addr) {
368 io_sq->desc_addr.virt_addr =
369 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
373 if (!io_sq->desc_addr.virt_addr) {
374 pr_err("memory allocation failed");
379 io_sq->next_to_comp = 0;
385 static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
386 struct ena_com_create_io_ctx *ctx,
387 struct ena_com_io_cq *io_cq)
392 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
394 /* Use the basic completion descriptor for Rx */
395 io_cq->cdesc_entry_size_in_bytes =
396 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
397 sizeof(struct ena_eth_io_tx_cdesc) :
398 sizeof(struct ena_eth_io_rx_cdesc_base);
400 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
402 prev_node = dev_to_node(ena_dev->dmadev);
403 set_dev_node(ena_dev->dmadev, ctx->numa_node);
404 io_cq->cdesc_addr.virt_addr =
405 dma_zalloc_coherent(ena_dev->dmadev, size,
406 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
407 set_dev_node(ena_dev->dmadev, prev_node);
408 if (!io_cq->cdesc_addr.virt_addr) {
409 io_cq->cdesc_addr.virt_addr =
410 dma_zalloc_coherent(ena_dev->dmadev, size,
411 &io_cq->cdesc_addr.phys_addr,
415 if (!io_cq->cdesc_addr.virt_addr) {
416 pr_err("memory allocation failed");
426 static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
427 struct ena_admin_acq_entry *cqe)
429 struct ena_comp_ctx *comp_ctx;
432 cmd_id = cqe->acq_common_descriptor.command &
433 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
435 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
436 if (unlikely(!comp_ctx)) {
437 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
438 admin_queue->running_state = false;
442 comp_ctx->status = ENA_CMD_COMPLETED;
443 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
445 if (comp_ctx->user_cqe)
446 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
448 if (!admin_queue->polling)
449 complete(&comp_ctx->wait_event);
452 static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
454 struct ena_admin_acq_entry *cqe = NULL;
459 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
460 phase = admin_queue->cq.phase;
462 cqe = &admin_queue->cq.entries[head_masked];
464 /* Go over all the completions */
465 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
466 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
467 /* Do not read the rest of the completion entry before the
468 * phase bit was validated
471 ena_com_handle_single_admin_completion(admin_queue, cqe);
475 if (unlikely(head_masked == admin_queue->q_depth)) {
480 cqe = &admin_queue->cq.entries[head_masked];
483 admin_queue->cq.head += comp_num;
484 admin_queue->cq.phase = phase;
485 admin_queue->sq.head += comp_num;
486 admin_queue->stats.completed_cmd += comp_num;
489 static int ena_com_comp_status_to_errno(u8 comp_status)
491 if (unlikely(comp_status != 0))
492 pr_err("admin command failed[%u]\n", comp_status);
494 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
497 switch (comp_status) {
498 case ENA_ADMIN_SUCCESS:
500 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
502 case ENA_ADMIN_UNSUPPORTED_OPCODE:
504 case ENA_ADMIN_BAD_OPCODE:
505 case ENA_ADMIN_MALFORMED_REQUEST:
506 case ENA_ADMIN_ILLEGAL_PARAMETER:
507 case ENA_ADMIN_UNKNOWN_ERROR:
514 static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
515 struct ena_com_admin_queue *admin_queue)
517 unsigned long flags, timeout;
520 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
523 spin_lock_irqsave(&admin_queue->q_lock, flags);
524 ena_com_handle_admin_completion(admin_queue);
525 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
527 if (comp_ctx->status != ENA_CMD_SUBMITTED)
530 if (time_is_before_jiffies(timeout)) {
531 pr_err("Wait for completion (polling) timeout\n");
532 /* ENA didn't have any completion */
533 spin_lock_irqsave(&admin_queue->q_lock, flags);
534 admin_queue->stats.no_completion++;
535 admin_queue->running_state = false;
536 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
545 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
546 pr_err("Command was aborted\n");
547 spin_lock_irqsave(&admin_queue->q_lock, flags);
548 admin_queue->stats.aborted_cmd++;
549 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
554 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
557 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
559 comp_ctxt_release(admin_queue, comp_ctx);
563 static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
564 struct ena_com_admin_queue *admin_queue)
569 wait_for_completion_timeout(&comp_ctx->wait_event,
571 admin_queue->completion_timeout));
573 /* In case the command wasn't completed find out the root cause.
574 * There might be 2 kinds of errors
575 * 1) No completion (timeout reached)
576 * 2) There is completion but the device didn't get any msi-x interrupt.
578 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
579 spin_lock_irqsave(&admin_queue->q_lock, flags);
580 ena_com_handle_admin_completion(admin_queue);
581 admin_queue->stats.no_completion++;
582 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
584 if (comp_ctx->status == ENA_CMD_COMPLETED)
585 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
586 comp_ctx->cmd_opcode);
588 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
589 comp_ctx->cmd_opcode, comp_ctx->status);
591 admin_queue->running_state = false;
596 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
598 comp_ctxt_release(admin_queue, comp_ctx);
602 /* This method read the hardware device register through posting writes
603 * and waiting for response
604 * On timeout the function will return ENA_MMIO_READ_TIMEOUT
606 static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
608 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
609 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
610 mmio_read->read_resp;
611 u32 mmio_read_reg, ret, i;
613 u32 timeout = mmio_read->reg_read_to;
618 timeout = ENA_REG_READ_TIMEOUT;
620 /* If readless is disabled, perform regular read */
621 if (!mmio_read->readless_supported)
622 return readl(ena_dev->reg_bar + offset);
624 spin_lock_irqsave(&mmio_read->lock, flags);
625 mmio_read->seq_num++;
627 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
628 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
629 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
630 mmio_read_reg |= mmio_read->seq_num &
631 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
633 /* make sure read_resp->req_id get updated before the hw can write
638 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
640 for (i = 0; i < timeout; i++) {
641 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
647 if (unlikely(i == timeout)) {
648 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
649 mmio_read->seq_num, offset, read_resp->req_id,
651 ret = ENA_MMIO_READ_TIMEOUT;
655 if (read_resp->reg_off != offset) {
656 pr_err("Read failure: wrong offset provided");
657 ret = ENA_MMIO_READ_TIMEOUT;
659 ret = read_resp->reg_val;
662 spin_unlock_irqrestore(&mmio_read->lock, flags);
667 /* There are two types to wait for completion.
668 * Polling mode - wait until the completion is available.
669 * Async mode - wait on wait queue until the completion is ready
670 * (or the timeout expired).
671 * It is expected that the IRQ called ena_com_handle_admin_completion
672 * to mark the completions.
674 static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
675 struct ena_com_admin_queue *admin_queue)
677 if (admin_queue->polling)
678 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
681 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
685 static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
686 struct ena_com_io_sq *io_sq)
688 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
689 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
690 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
694 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
696 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
697 direction = ENA_ADMIN_SQ_DIRECTION_TX;
699 direction = ENA_ADMIN_SQ_DIRECTION_RX;
701 destroy_cmd.sq.sq_identity |= (direction <<
702 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
703 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
705 destroy_cmd.sq.sq_idx = io_sq->idx;
706 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
708 ret = ena_com_execute_admin_command(admin_queue,
709 (struct ena_admin_aq_entry *)&destroy_cmd,
711 (struct ena_admin_acq_entry *)&destroy_resp,
712 sizeof(destroy_resp));
714 if (unlikely(ret && (ret != -ENODEV)))
715 pr_err("failed to destroy io sq error: %d\n", ret);
720 static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
721 struct ena_com_io_sq *io_sq,
722 struct ena_com_io_cq *io_cq)
726 if (io_cq->cdesc_addr.virt_addr) {
727 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
729 dma_free_coherent(ena_dev->dmadev, size,
730 io_cq->cdesc_addr.virt_addr,
731 io_cq->cdesc_addr.phys_addr);
733 io_cq->cdesc_addr.virt_addr = NULL;
736 if (io_sq->desc_addr.virt_addr) {
737 size = io_sq->desc_entry_size * io_sq->q_depth;
739 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST)
740 dma_free_coherent(ena_dev->dmadev, size,
741 io_sq->desc_addr.virt_addr,
742 io_sq->desc_addr.phys_addr);
744 devm_kfree(ena_dev->dmadev, io_sq->desc_addr.virt_addr);
746 io_sq->desc_addr.virt_addr = NULL;
750 static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
755 for (i = 0; i < timeout; i++) {
756 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
758 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
759 pr_err("Reg read timeout occurred\n");
763 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
767 /* The resolution of the timeout is 100ms */
774 static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
775 enum ena_admin_aq_feature_id feature_id)
777 u32 feature_mask = 1 << feature_id;
779 /* Device attributes is always supported */
780 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
781 !(ena_dev->supported_features & feature_mask))
787 static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
788 struct ena_admin_get_feat_resp *get_resp,
789 enum ena_admin_aq_feature_id feature_id,
790 dma_addr_t control_buf_dma_addr,
791 u32 control_buff_size)
793 struct ena_com_admin_queue *admin_queue;
794 struct ena_admin_get_feat_cmd get_cmd;
797 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
798 pr_debug("Feature %d isn't supported\n", feature_id);
802 memset(&get_cmd, 0x0, sizeof(get_cmd));
803 admin_queue = &ena_dev->admin_queue;
805 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
807 if (control_buff_size)
808 get_cmd.aq_common_descriptor.flags =
809 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
811 get_cmd.aq_common_descriptor.flags = 0;
813 ret = ena_com_mem_addr_set(ena_dev,
814 &get_cmd.control_buffer.address,
815 control_buf_dma_addr);
817 pr_err("memory address set failed\n");
821 get_cmd.control_buffer.length = control_buff_size;
823 get_cmd.feat_common.feature_id = feature_id;
825 ret = ena_com_execute_admin_command(admin_queue,
826 (struct ena_admin_aq_entry *)
829 (struct ena_admin_acq_entry *)
834 pr_err("Failed to submit get_feature command %d error: %d\n",
840 static int ena_com_get_feature(struct ena_com_dev *ena_dev,
841 struct ena_admin_get_feat_resp *get_resp,
842 enum ena_admin_aq_feature_id feature_id)
844 return ena_com_get_feature_ex(ena_dev,
851 static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
853 struct ena_admin_feature_rss_flow_hash_control *hash_key =
854 (ena_dev->rss).hash_key;
856 netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
857 /* The key is stored in the device in u32 array
858 * as well as the API requires the key to be passed in this
859 * format. Thus the size of our array should be divided by 4
861 hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
864 int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
866 return ena_dev->rss.hash_func;
869 static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
871 struct ena_rss *rss = &ena_dev->rss;
874 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
875 &rss->hash_key_dma_addr, GFP_KERNEL);
877 if (unlikely(!rss->hash_key))
883 static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
885 struct ena_rss *rss = &ena_dev->rss;
888 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
889 rss->hash_key, rss->hash_key_dma_addr);
890 rss->hash_key = NULL;
893 static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
895 struct ena_rss *rss = &ena_dev->rss;
898 dma_zalloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
899 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
901 if (unlikely(!rss->hash_ctrl))
907 static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
909 struct ena_rss *rss = &ena_dev->rss;
912 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
913 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
914 rss->hash_ctrl = NULL;
917 static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
920 struct ena_rss *rss = &ena_dev->rss;
921 struct ena_admin_get_feat_resp get_resp;
925 ret = ena_com_get_feature(ena_dev, &get_resp,
926 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
930 if ((get_resp.u.ind_table.min_size > log_size) ||
931 (get_resp.u.ind_table.max_size < log_size)) {
932 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
933 1 << log_size, 1 << get_resp.u.ind_table.min_size,
934 1 << get_resp.u.ind_table.max_size);
938 tbl_size = (1ULL << log_size) *
939 sizeof(struct ena_admin_rss_ind_table_entry);
942 dma_zalloc_coherent(ena_dev->dmadev, tbl_size,
943 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
944 if (unlikely(!rss->rss_ind_tbl))
947 tbl_size = (1ULL << log_size) * sizeof(u16);
948 rss->host_rss_ind_tbl =
949 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
950 if (unlikely(!rss->host_rss_ind_tbl))
953 rss->tbl_log_size = log_size;
958 tbl_size = (1ULL << log_size) *
959 sizeof(struct ena_admin_rss_ind_table_entry);
961 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
962 rss->rss_ind_tbl_dma_addr);
963 rss->rss_ind_tbl = NULL;
965 rss->tbl_log_size = 0;
969 static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
971 struct ena_rss *rss = &ena_dev->rss;
972 size_t tbl_size = (1ULL << rss->tbl_log_size) *
973 sizeof(struct ena_admin_rss_ind_table_entry);
975 if (rss->rss_ind_tbl)
976 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
977 rss->rss_ind_tbl_dma_addr);
978 rss->rss_ind_tbl = NULL;
980 if (rss->host_rss_ind_tbl)
981 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
982 rss->host_rss_ind_tbl = NULL;
985 static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
986 struct ena_com_io_sq *io_sq, u16 cq_idx)
988 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
989 struct ena_admin_aq_create_sq_cmd create_cmd;
990 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
994 memset(&create_cmd, 0x0, sizeof(create_cmd));
996 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
998 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
999 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1001 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1003 create_cmd.sq_identity |= (direction <<
1004 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1005 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1007 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1008 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1010 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1011 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1012 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1014 create_cmd.sq_caps_3 |=
1015 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1017 create_cmd.cq_idx = cq_idx;
1018 create_cmd.sq_depth = io_sq->q_depth;
1020 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1021 ret = ena_com_mem_addr_set(ena_dev,
1023 io_sq->desc_addr.phys_addr);
1024 if (unlikely(ret)) {
1025 pr_err("memory address set failed\n");
1030 ret = ena_com_execute_admin_command(admin_queue,
1031 (struct ena_admin_aq_entry *)&create_cmd,
1033 (struct ena_admin_acq_entry *)&cmd_completion,
1034 sizeof(cmd_completion));
1035 if (unlikely(ret)) {
1036 pr_err("Failed to create IO SQ. error: %d\n", ret);
1040 io_sq->idx = cmd_completion.sq_idx;
1042 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1043 (uintptr_t)cmd_completion.sq_doorbell_offset);
1045 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1046 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1047 + cmd_completion.llq_headers_offset);
1049 io_sq->desc_addr.pbuf_dev_addr =
1050 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1051 cmd_completion.llq_descriptors_offset);
1054 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1059 static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1061 struct ena_rss *rss = &ena_dev->rss;
1062 struct ena_com_io_sq *io_sq;
1066 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1067 qid = rss->host_rss_ind_tbl[i];
1068 if (qid >= ENA_TOTAL_NUM_QUEUES)
1071 io_sq = &ena_dev->io_sq_queues[qid];
1073 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1076 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1082 static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1084 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1085 struct ena_rss *rss = &ena_dev->rss;
1089 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1090 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1092 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1093 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1095 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1097 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1100 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1106 static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1110 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1112 ena_dev->intr_moder_tbl =
1113 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
1114 if (!ena_dev->intr_moder_tbl)
1117 ena_com_config_default_interrupt_moderation_table(ena_dev);
1122 static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1123 u16 intr_delay_resolution)
1125 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1128 if (!intr_delay_resolution) {
1129 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1130 intr_delay_resolution = 1;
1132 ena_dev->intr_delay_resolution = intr_delay_resolution;
1135 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1136 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1139 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1142 /*****************************************************************************/
1143 /******************************* API ******************************/
1144 /*****************************************************************************/
1146 int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1147 struct ena_admin_aq_entry *cmd,
1149 struct ena_admin_acq_entry *comp,
1152 struct ena_comp_ctx *comp_ctx;
1155 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1157 if (unlikely(IS_ERR(comp_ctx))) {
1158 if (comp_ctx == ERR_PTR(-ENODEV))
1159 pr_debug("Failed to submit command [%ld]\n",
1162 pr_err("Failed to submit command [%ld]\n",
1165 return PTR_ERR(comp_ctx);
1168 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1169 if (unlikely(ret)) {
1170 if (admin_queue->running_state)
1171 pr_err("Failed to process command. ret = %d\n", ret);
1173 pr_debug("Failed to process command. ret = %d\n", ret);
1178 int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1179 struct ena_com_io_cq *io_cq)
1181 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1182 struct ena_admin_aq_create_cq_cmd create_cmd;
1183 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1186 memset(&create_cmd, 0x0, sizeof(create_cmd));
1188 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1190 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1191 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1192 create_cmd.cq_caps_1 |=
1193 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1195 create_cmd.msix_vector = io_cq->msix_vector;
1196 create_cmd.cq_depth = io_cq->q_depth;
1198 ret = ena_com_mem_addr_set(ena_dev,
1200 io_cq->cdesc_addr.phys_addr);
1201 if (unlikely(ret)) {
1202 pr_err("memory address set failed\n");
1206 ret = ena_com_execute_admin_command(admin_queue,
1207 (struct ena_admin_aq_entry *)&create_cmd,
1209 (struct ena_admin_acq_entry *)&cmd_completion,
1210 sizeof(cmd_completion));
1211 if (unlikely(ret)) {
1212 pr_err("Failed to create IO CQ. error: %d\n", ret);
1216 io_cq->idx = cmd_completion.cq_idx;
1218 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1219 cmd_completion.cq_interrupt_unmask_register_offset);
1221 if (cmd_completion.cq_head_db_register_offset)
1222 io_cq->cq_head_db_reg =
1223 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1224 cmd_completion.cq_head_db_register_offset);
1226 if (cmd_completion.numa_node_register_offset)
1227 io_cq->numa_node_cfg_reg =
1228 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1229 cmd_completion.numa_node_register_offset);
1231 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1236 int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1237 struct ena_com_io_sq **io_sq,
1238 struct ena_com_io_cq **io_cq)
1240 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1241 pr_err("Invalid queue number %d but the max is %d\n", qid,
1242 ENA_TOTAL_NUM_QUEUES);
1246 *io_sq = &ena_dev->io_sq_queues[qid];
1247 *io_cq = &ena_dev->io_cq_queues[qid];
1252 void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1254 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1255 struct ena_comp_ctx *comp_ctx;
1258 if (!admin_queue->comp_ctx)
1261 for (i = 0; i < admin_queue->q_depth; i++) {
1262 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1263 if (unlikely(!comp_ctx))
1266 comp_ctx->status = ENA_CMD_ABORTED;
1268 complete(&comp_ctx->wait_event);
1272 void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1274 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1275 unsigned long flags;
1277 spin_lock_irqsave(&admin_queue->q_lock, flags);
1278 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1279 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1281 spin_lock_irqsave(&admin_queue->q_lock, flags);
1283 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1286 int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1287 struct ena_com_io_cq *io_cq)
1289 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1290 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1291 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1294 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1296 destroy_cmd.cq_idx = io_cq->idx;
1297 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1299 ret = ena_com_execute_admin_command(admin_queue,
1300 (struct ena_admin_aq_entry *)&destroy_cmd,
1301 sizeof(destroy_cmd),
1302 (struct ena_admin_acq_entry *)&destroy_resp,
1303 sizeof(destroy_resp));
1305 if (unlikely(ret && (ret != -ENODEV)))
1306 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1311 bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1313 return ena_dev->admin_queue.running_state;
1316 void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1318 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1319 unsigned long flags;
1321 spin_lock_irqsave(&admin_queue->q_lock, flags);
1322 ena_dev->admin_queue.running_state = state;
1323 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1326 void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1328 u16 depth = ena_dev->aenq.q_depth;
1330 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1332 /* Init head_db to mark that all entries in the queue
1333 * are initially available
1335 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1338 int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1340 struct ena_com_admin_queue *admin_queue;
1341 struct ena_admin_set_feat_cmd cmd;
1342 struct ena_admin_set_feat_resp resp;
1343 struct ena_admin_get_feat_resp get_resp;
1346 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1348 pr_info("Can't get aenq configuration\n");
1352 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1353 pr_warn("Trying to set unsupported aenq events. supported flag: %x asked flag: %x\n",
1354 get_resp.u.aenq.supported_groups, groups_flag);
1358 memset(&cmd, 0x0, sizeof(cmd));
1359 admin_queue = &ena_dev->admin_queue;
1361 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1362 cmd.aq_common_descriptor.flags = 0;
1363 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1364 cmd.u.aenq.enabled_groups = groups_flag;
1366 ret = ena_com_execute_admin_command(admin_queue,
1367 (struct ena_admin_aq_entry *)&cmd,
1369 (struct ena_admin_acq_entry *)&resp,
1373 pr_err("Failed to config AENQ ret: %d\n", ret);
1378 int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1380 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1383 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1384 pr_err("Reg read timeout occurred\n");
1388 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1389 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1391 pr_debug("ENA dma width: %d\n", width);
1393 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1394 pr_err("DMA width illegal value: %d\n", width);
1398 ena_dev->dma_addr_bits = width;
1403 int ena_com_validate_version(struct ena_com_dev *ena_dev)
1407 u32 ctrl_ver_masked;
1409 /* Make sure the ENA version and the controller version are at least
1410 * as the driver expects
1412 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1413 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1414 ENA_REGS_CONTROLLER_VERSION_OFF);
1416 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1417 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1418 pr_err("Reg read timeout occurred\n");
1422 pr_info("ena device version: %d.%d\n",
1423 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1424 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1425 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1427 if (ver < MIN_ENA_VER) {
1428 pr_err("ENA version is lower than the minimal version the driver supports\n");
1432 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1433 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1434 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1435 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1436 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1437 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1438 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1439 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1442 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1443 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1444 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1446 /* Validate the ctrl version without the implementation ID */
1447 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1448 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1455 void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1457 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1458 struct ena_com_admin_cq *cq = &admin_queue->cq;
1459 struct ena_com_admin_sq *sq = &admin_queue->sq;
1460 struct ena_com_aenq *aenq = &ena_dev->aenq;
1463 if (admin_queue->comp_ctx)
1464 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1465 admin_queue->comp_ctx = NULL;
1466 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1468 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1472 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1474 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1478 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1479 if (ena_dev->aenq.entries)
1480 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1482 aenq->entries = NULL;
1485 void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1490 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1492 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1493 ena_dev->admin_queue.polling = polling;
1496 int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1498 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1500 spin_lock_init(&mmio_read->lock);
1501 mmio_read->read_resp =
1502 dma_zalloc_coherent(ena_dev->dmadev,
1503 sizeof(*mmio_read->read_resp),
1504 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1505 if (unlikely(!mmio_read->read_resp))
1508 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1510 mmio_read->read_resp->req_id = 0x0;
1511 mmio_read->seq_num = 0x0;
1512 mmio_read->readless_supported = true;
1517 void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1519 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1521 mmio_read->readless_supported = readless_supported;
1524 void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1526 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1528 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1529 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1531 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1532 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1534 mmio_read->read_resp = NULL;
1537 void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1539 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1540 u32 addr_low, addr_high;
1542 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1543 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1545 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1546 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1549 int ena_com_admin_init(struct ena_com_dev *ena_dev,
1550 struct ena_aenq_handlers *aenq_handlers,
1553 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1554 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1557 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1559 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1560 pr_err("Reg read timeout occurred\n");
1564 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1565 pr_err("Device isn't ready, abort com init\n");
1569 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1571 admin_queue->q_dmadev = ena_dev->dmadev;
1572 admin_queue->polling = false;
1573 admin_queue->curr_cmd_id = 0;
1575 atomic_set(&admin_queue->outstanding_cmds, 0);
1578 spin_lock_init(&admin_queue->q_lock);
1580 ret = ena_com_init_comp_ctxt(admin_queue);
1584 ret = ena_com_admin_init_sq(admin_queue);
1588 ret = ena_com_admin_init_cq(admin_queue);
1592 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1593 ENA_REGS_AQ_DB_OFF);
1595 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1596 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1598 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1599 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1601 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1602 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1604 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1605 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1608 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1609 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1610 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1611 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1614 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1615 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1616 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1617 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1619 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1620 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1621 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1625 admin_queue->running_state = true;
1629 ena_com_admin_destroy(ena_dev);
1634 int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1635 struct ena_com_create_io_ctx *ctx)
1637 struct ena_com_io_sq *io_sq;
1638 struct ena_com_io_cq *io_cq;
1641 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1642 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1643 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1647 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1648 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1650 memset(io_sq, 0x0, sizeof(*io_sq));
1651 memset(io_cq, 0x0, sizeof(*io_cq));
1654 io_cq->q_depth = ctx->queue_size;
1655 io_cq->direction = ctx->direction;
1656 io_cq->qid = ctx->qid;
1658 io_cq->msix_vector = ctx->msix_vector;
1660 io_sq->q_depth = ctx->queue_size;
1661 io_sq->direction = ctx->direction;
1662 io_sq->qid = ctx->qid;
1664 io_sq->mem_queue_type = ctx->mem_queue_type;
1666 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1667 /* header length is limited to 8 bits */
1668 io_sq->tx_max_header_size =
1669 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1671 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1674 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1678 ret = ena_com_create_io_cq(ena_dev, io_cq);
1682 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1689 ena_com_destroy_io_cq(ena_dev, io_cq);
1691 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1695 void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1697 struct ena_com_io_sq *io_sq;
1698 struct ena_com_io_cq *io_cq;
1700 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1701 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1702 ENA_TOTAL_NUM_QUEUES);
1706 io_sq = &ena_dev->io_sq_queues[qid];
1707 io_cq = &ena_dev->io_cq_queues[qid];
1709 ena_com_destroy_io_sq(ena_dev, io_sq);
1710 ena_com_destroy_io_cq(ena_dev, io_cq);
1712 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1715 int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1716 struct ena_admin_get_feat_resp *resp)
1718 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1721 int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1722 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1724 struct ena_admin_get_feat_resp get_resp;
1727 rc = ena_com_get_feature(ena_dev, &get_resp,
1728 ENA_ADMIN_DEVICE_ATTRIBUTES);
1732 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1733 sizeof(get_resp.u.dev_attr));
1734 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1736 rc = ena_com_get_feature(ena_dev, &get_resp,
1737 ENA_ADMIN_MAX_QUEUES_NUM);
1741 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1742 sizeof(get_resp.u.max_queue));
1743 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1745 rc = ena_com_get_feature(ena_dev, &get_resp,
1746 ENA_ADMIN_AENQ_CONFIG);
1750 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1751 sizeof(get_resp.u.aenq));
1753 rc = ena_com_get_feature(ena_dev, &get_resp,
1754 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1758 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1759 sizeof(get_resp.u.offload));
1761 /* Driver hints isn't mandatory admin command. So in case the
1762 * command isn't supported set driver hints to 0
1764 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
1767 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1768 sizeof(get_resp.u.hw_hints));
1769 else if (rc == -EOPNOTSUPP)
1770 memset(&get_feat_ctx->hw_hints, 0x0,
1771 sizeof(get_feat_ctx->hw_hints));
1778 void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1780 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1783 /* ena_handle_specific_aenq_event:
1784 * return the handler that is relevant to the specific event group
1786 static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1789 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1791 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1792 return aenq_handlers->handlers[group];
1794 return aenq_handlers->unimplemented_handler;
1797 /* ena_aenq_intr_handler:
1798 * handles the aenq incoming events.
1799 * pop events from the queue and apply the specific handler
1801 void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1803 struct ena_admin_aenq_entry *aenq_e;
1804 struct ena_admin_aenq_common_desc *aenq_common;
1805 struct ena_com_aenq *aenq = &dev->aenq;
1806 ena_aenq_handler handler_cb;
1807 u16 masked_head, processed = 0;
1810 masked_head = aenq->head & (aenq->q_depth - 1);
1811 phase = aenq->phase;
1812 aenq_e = &aenq->entries[masked_head]; /* Get first entry */
1813 aenq_common = &aenq_e->aenq_common_desc;
1815 /* Go over all the events */
1816 while ((READ_ONCE(aenq_common->flags) &
1817 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
1818 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1819 aenq_common->group, aenq_common->syndrom,
1820 (u64)aenq_common->timestamp_low +
1821 ((u64)aenq_common->timestamp_high << 32));
1823 /* Handle specific event*/
1824 handler_cb = ena_com_get_specific_aenq_cb(dev,
1825 aenq_common->group);
1826 handler_cb(data, aenq_e); /* call the actual event handler*/
1828 /* Get next event entry */
1832 if (unlikely(masked_head == aenq->q_depth)) {
1836 aenq_e = &aenq->entries[masked_head];
1837 aenq_common = &aenq_e->aenq_common_desc;
1840 aenq->head += processed;
1841 aenq->phase = phase;
1843 /* Don't update aenq doorbell if there weren't any processed events */
1847 /* write the aenq doorbell after all AENQ descriptors were read */
1849 writel((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1852 int ena_com_dev_reset(struct ena_com_dev *ena_dev,
1853 enum ena_regs_reset_reason_types reset_reason)
1855 u32 stat, timeout, cap, reset_val;
1858 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1859 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1861 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
1862 (cap == ENA_MMIO_READ_TIMEOUT))) {
1863 pr_err("Reg read32 timeout occurred\n");
1867 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
1868 pr_err("Device isn't ready, can't reset device\n");
1872 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
1873 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
1875 pr_err("Invalid timeout value\n");
1880 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
1881 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
1882 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
1883 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1885 /* Write again the MMIO read request address */
1886 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1888 rc = wait_for_reset_state(ena_dev, timeout,
1889 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
1891 pr_err("Reset indication didn't turn on\n");
1896 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
1897 rc = wait_for_reset_state(ena_dev, timeout, 0);
1899 pr_err("Reset indication didn't turn off\n");
1903 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
1904 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
1906 /* the resolution of timeout reg is 100ms */
1907 ena_dev->admin_queue.completion_timeout = timeout * 100000;
1909 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
1914 static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
1915 struct ena_com_stats_ctx *ctx,
1916 enum ena_admin_get_stats_type type)
1918 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
1919 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
1920 struct ena_com_admin_queue *admin_queue;
1923 admin_queue = &ena_dev->admin_queue;
1925 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
1926 get_cmd->aq_common_descriptor.flags = 0;
1927 get_cmd->type = type;
1929 ret = ena_com_execute_admin_command(admin_queue,
1930 (struct ena_admin_aq_entry *)get_cmd,
1932 (struct ena_admin_acq_entry *)get_resp,
1936 pr_err("Failed to get stats. error: %d\n", ret);
1941 int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
1942 struct ena_admin_basic_stats *stats)
1944 struct ena_com_stats_ctx ctx;
1947 memset(&ctx, 0x0, sizeof(ctx));
1948 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
1949 if (likely(ret == 0))
1950 memcpy(stats, &ctx.get_resp.basic_stats,
1951 sizeof(ctx.get_resp.basic_stats));
1956 int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
1958 struct ena_com_admin_queue *admin_queue;
1959 struct ena_admin_set_feat_cmd cmd;
1960 struct ena_admin_set_feat_resp resp;
1963 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
1964 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
1968 memset(&cmd, 0x0, sizeof(cmd));
1969 admin_queue = &ena_dev->admin_queue;
1971 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1972 cmd.aq_common_descriptor.flags = 0;
1973 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
1974 cmd.u.mtu.mtu = mtu;
1976 ret = ena_com_execute_admin_command(admin_queue,
1977 (struct ena_admin_aq_entry *)&cmd,
1979 (struct ena_admin_acq_entry *)&resp,
1983 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
1988 int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
1989 struct ena_admin_feature_offload_desc *offload)
1992 struct ena_admin_get_feat_resp resp;
1994 ret = ena_com_get_feature(ena_dev, &resp,
1995 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1996 if (unlikely(ret)) {
1997 pr_err("Failed to get offload capabilities %d\n", ret);
2001 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2006 int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2008 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2009 struct ena_rss *rss = &ena_dev->rss;
2010 struct ena_admin_set_feat_cmd cmd;
2011 struct ena_admin_set_feat_resp resp;
2012 struct ena_admin_get_feat_resp get_resp;
2015 if (!ena_com_check_supported_feature_id(ena_dev,
2016 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2017 pr_debug("Feature %d isn't supported\n",
2018 ENA_ADMIN_RSS_HASH_FUNCTION);
2022 /* Validate hash function is supported */
2023 ret = ena_com_get_feature(ena_dev, &get_resp,
2024 ENA_ADMIN_RSS_HASH_FUNCTION);
2028 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2029 pr_err("Func hash %d isn't supported by device, abort\n",
2034 memset(&cmd, 0x0, sizeof(cmd));
2036 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2037 cmd.aq_common_descriptor.flags =
2038 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2039 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2040 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2041 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2043 ret = ena_com_mem_addr_set(ena_dev,
2044 &cmd.control_buffer.address,
2045 rss->hash_key_dma_addr);
2046 if (unlikely(ret)) {
2047 pr_err("memory address set failed\n");
2051 cmd.control_buffer.length = sizeof(*rss->hash_key);
2053 ret = ena_com_execute_admin_command(admin_queue,
2054 (struct ena_admin_aq_entry *)&cmd,
2056 (struct ena_admin_acq_entry *)&resp,
2058 if (unlikely(ret)) {
2059 pr_err("Failed to set hash function %d. error: %d\n",
2060 rss->hash_func, ret);
2067 int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2068 enum ena_admin_hash_functions func,
2069 const u8 *key, u16 key_len, u32 init_val)
2071 struct ena_rss *rss = &ena_dev->rss;
2072 struct ena_admin_get_feat_resp get_resp;
2073 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2077 /* Make sure size is a mult of DWs */
2078 if (unlikely(key_len & 0x3))
2081 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2082 ENA_ADMIN_RSS_HASH_FUNCTION,
2083 rss->hash_key_dma_addr,
2084 sizeof(*rss->hash_key));
2088 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2089 pr_err("Flow hash function %d isn't supported\n", func);
2094 case ENA_ADMIN_TOEPLITZ:
2096 if (key_len != sizeof(hash_key->key)) {
2097 pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2098 key_len, sizeof(hash_key->key));
2101 memcpy(hash_key->key, key, key_len);
2102 rss->hash_init_val = init_val;
2103 hash_key->keys_num = key_len >> 2;
2106 case ENA_ADMIN_CRC32:
2107 rss->hash_init_val = init_val;
2110 pr_err("Invalid hash function (%d)\n", func);
2114 rss->hash_func = func;
2115 rc = ena_com_set_hash_function(ena_dev);
2117 /* Restore the old function */
2119 ena_com_get_hash_function(ena_dev, NULL, NULL);
2124 int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2125 enum ena_admin_hash_functions *func,
2128 struct ena_rss *rss = &ena_dev->rss;
2129 struct ena_admin_get_feat_resp get_resp;
2130 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2134 if (unlikely(!func))
2137 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2138 ENA_ADMIN_RSS_HASH_FUNCTION,
2139 rss->hash_key_dma_addr,
2140 sizeof(*rss->hash_key));
2144 /* ffs() returns 1 in case the lsb is set */
2145 rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2149 *func = rss->hash_func;
2152 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2157 int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2158 enum ena_admin_flow_hash_proto proto,
2161 struct ena_rss *rss = &ena_dev->rss;
2162 struct ena_admin_get_feat_resp get_resp;
2165 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2166 ENA_ADMIN_RSS_HASH_INPUT,
2167 rss->hash_ctrl_dma_addr,
2168 sizeof(*rss->hash_ctrl));
2173 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2178 int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2180 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2181 struct ena_rss *rss = &ena_dev->rss;
2182 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2183 struct ena_admin_set_feat_cmd cmd;
2184 struct ena_admin_set_feat_resp resp;
2187 if (!ena_com_check_supported_feature_id(ena_dev,
2188 ENA_ADMIN_RSS_HASH_INPUT)) {
2189 pr_debug("Feature %d isn't supported\n",
2190 ENA_ADMIN_RSS_HASH_INPUT);
2194 memset(&cmd, 0x0, sizeof(cmd));
2196 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2197 cmd.aq_common_descriptor.flags =
2198 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2199 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2200 cmd.u.flow_hash_input.enabled_input_sort =
2201 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2202 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2204 ret = ena_com_mem_addr_set(ena_dev,
2205 &cmd.control_buffer.address,
2206 rss->hash_ctrl_dma_addr);
2207 if (unlikely(ret)) {
2208 pr_err("memory address set failed\n");
2211 cmd.control_buffer.length = sizeof(*hash_ctrl);
2213 ret = ena_com_execute_admin_command(admin_queue,
2214 (struct ena_admin_aq_entry *)&cmd,
2216 (struct ena_admin_acq_entry *)&resp,
2219 pr_err("Failed to set hash input. error: %d\n", ret);
2224 int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2226 struct ena_rss *rss = &ena_dev->rss;
2227 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2229 u16 available_fields = 0;
2232 /* Get the supported hash input */
2233 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2237 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2238 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2239 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2241 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2242 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2243 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2245 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2246 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2247 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2249 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2250 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2251 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2253 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2254 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2256 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2257 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2259 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2260 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2262 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2263 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2265 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2266 available_fields = hash_ctrl->selected_fields[i].fields &
2267 hash_ctrl->supported_fields[i].fields;
2268 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2269 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2270 i, hash_ctrl->supported_fields[i].fields,
2271 hash_ctrl->selected_fields[i].fields);
2276 rc = ena_com_set_hash_ctrl(ena_dev);
2278 /* In case of failure, restore the old hash ctrl */
2280 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2285 int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2286 enum ena_admin_flow_hash_proto proto,
2289 struct ena_rss *rss = &ena_dev->rss;
2290 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2291 u16 supported_fields;
2294 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2295 pr_err("Invalid proto num (%u)\n", proto);
2299 /* Get the ctrl table */
2300 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2304 /* Make sure all the fields are supported */
2305 supported_fields = hash_ctrl->supported_fields[proto].fields;
2306 if ((hash_fields & supported_fields) != hash_fields) {
2307 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2308 proto, hash_fields, supported_fields);
2311 hash_ctrl->selected_fields[proto].fields = hash_fields;
2313 rc = ena_com_set_hash_ctrl(ena_dev);
2315 /* In case of failure, restore the old hash ctrl */
2317 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2322 int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2323 u16 entry_idx, u16 entry_value)
2325 struct ena_rss *rss = &ena_dev->rss;
2327 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2330 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2333 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2338 int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2340 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2341 struct ena_rss *rss = &ena_dev->rss;
2342 struct ena_admin_set_feat_cmd cmd;
2343 struct ena_admin_set_feat_resp resp;
2346 if (!ena_com_check_supported_feature_id(
2347 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2348 pr_debug("Feature %d isn't supported\n",
2349 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2353 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2355 pr_err("Failed to convert host indirection table to device table\n");
2359 memset(&cmd, 0x0, sizeof(cmd));
2361 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2362 cmd.aq_common_descriptor.flags =
2363 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2364 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2365 cmd.u.ind_table.size = rss->tbl_log_size;
2366 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2368 ret = ena_com_mem_addr_set(ena_dev,
2369 &cmd.control_buffer.address,
2370 rss->rss_ind_tbl_dma_addr);
2371 if (unlikely(ret)) {
2372 pr_err("memory address set failed\n");
2376 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2377 sizeof(struct ena_admin_rss_ind_table_entry);
2379 ret = ena_com_execute_admin_command(admin_queue,
2380 (struct ena_admin_aq_entry *)&cmd,
2382 (struct ena_admin_acq_entry *)&resp,
2386 pr_err("Failed to set indirect table. error: %d\n", ret);
2391 int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2393 struct ena_rss *rss = &ena_dev->rss;
2394 struct ena_admin_get_feat_resp get_resp;
2398 tbl_size = (1ULL << rss->tbl_log_size) *
2399 sizeof(struct ena_admin_rss_ind_table_entry);
2401 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2402 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2403 rss->rss_ind_tbl_dma_addr,
2411 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2415 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2416 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2421 int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2425 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2427 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2431 rc = ena_com_hash_key_allocate(ena_dev);
2435 ena_com_hash_key_fill_default_key(ena_dev);
2437 rc = ena_com_hash_ctrl_init(ena_dev);
2444 ena_com_hash_key_destroy(ena_dev);
2446 ena_com_indirect_table_destroy(ena_dev);
2452 void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2454 ena_com_indirect_table_destroy(ena_dev);
2455 ena_com_hash_key_destroy(ena_dev);
2456 ena_com_hash_ctrl_destroy(ena_dev);
2458 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2461 int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2463 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2465 host_attr->host_info =
2466 dma_zalloc_coherent(ena_dev->dmadev, SZ_4K,
2467 &host_attr->host_info_dma_addr, GFP_KERNEL);
2468 if (unlikely(!host_attr->host_info))
2474 int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2475 u32 debug_area_size)
2477 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2479 host_attr->debug_area_virt_addr =
2480 dma_zalloc_coherent(ena_dev->dmadev, debug_area_size,
2481 &host_attr->debug_area_dma_addr, GFP_KERNEL);
2482 if (unlikely(!host_attr->debug_area_virt_addr)) {
2483 host_attr->debug_area_size = 0;
2487 host_attr->debug_area_size = debug_area_size;
2492 void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2494 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2496 if (host_attr->host_info) {
2497 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2498 host_attr->host_info_dma_addr);
2499 host_attr->host_info = NULL;
2503 void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2505 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2507 if (host_attr->debug_area_virt_addr) {
2508 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2509 host_attr->debug_area_virt_addr,
2510 host_attr->debug_area_dma_addr);
2511 host_attr->debug_area_virt_addr = NULL;
2515 int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2517 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2518 struct ena_com_admin_queue *admin_queue;
2519 struct ena_admin_set_feat_cmd cmd;
2520 struct ena_admin_set_feat_resp resp;
2524 /* Host attribute config is called before ena_com_get_dev_attr_feat
2525 * so ena_com can't check if the feature is supported.
2528 memset(&cmd, 0x0, sizeof(cmd));
2529 admin_queue = &ena_dev->admin_queue;
2531 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2532 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2534 ret = ena_com_mem_addr_set(ena_dev,
2535 &cmd.u.host_attr.debug_ba,
2536 host_attr->debug_area_dma_addr);
2537 if (unlikely(ret)) {
2538 pr_err("memory address set failed\n");
2542 ret = ena_com_mem_addr_set(ena_dev,
2543 &cmd.u.host_attr.os_info_ba,
2544 host_attr->host_info_dma_addr);
2545 if (unlikely(ret)) {
2546 pr_err("memory address set failed\n");
2550 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2552 ret = ena_com_execute_admin_command(admin_queue,
2553 (struct ena_admin_aq_entry *)&cmd,
2555 (struct ena_admin_acq_entry *)&resp,
2559 pr_err("Failed to set host attributes: %d\n", ret);
2564 /* Interrupt moderation */
2565 bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2567 return ena_com_check_supported_feature_id(ena_dev,
2568 ENA_ADMIN_INTERRUPT_MODERATION);
2571 int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2572 u32 tx_coalesce_usecs)
2574 if (!ena_dev->intr_delay_resolution) {
2575 pr_err("Illegal interrupt delay granularity value\n");
2579 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2580 ena_dev->intr_delay_resolution;
2585 int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2586 u32 rx_coalesce_usecs)
2588 if (!ena_dev->intr_delay_resolution) {
2589 pr_err("Illegal interrupt delay granularity value\n");
2593 /* We use LOWEST entry of moderation table for storing
2594 * nonadaptive interrupt coalescing values
2596 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2597 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2602 void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2604 if (ena_dev->intr_moder_tbl)
2605 devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2606 ena_dev->intr_moder_tbl = NULL;
2609 int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2611 struct ena_admin_get_feat_resp get_resp;
2612 u16 delay_resolution;
2615 rc = ena_com_get_feature(ena_dev, &get_resp,
2616 ENA_ADMIN_INTERRUPT_MODERATION);
2619 if (rc == -EOPNOTSUPP) {
2620 pr_debug("Feature %d isn't supported\n",
2621 ENA_ADMIN_INTERRUPT_MODERATION);
2624 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2628 /* no moderation supported, disable adaptive support */
2629 ena_com_disable_adaptive_moderation(ena_dev);
2633 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2637 /* if moderation is supported by device we set adaptive moderation */
2638 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2639 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2640 ena_com_enable_adaptive_moderation(ena_dev);
2644 ena_com_destroy_interrupt_moderation(ena_dev);
2648 void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2650 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2652 if (!intr_moder_tbl)
2655 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2656 ENA_INTR_LOWEST_USECS;
2657 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2658 ENA_INTR_LOWEST_PKTS;
2659 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2660 ENA_INTR_LOWEST_BYTES;
2662 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2664 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2666 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2669 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2671 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2673 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2676 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2677 ENA_INTR_HIGH_USECS;
2678 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2680 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2681 ENA_INTR_HIGH_BYTES;
2683 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2684 ENA_INTR_HIGHEST_USECS;
2685 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2686 ENA_INTR_HIGHEST_PKTS;
2687 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2688 ENA_INTR_HIGHEST_BYTES;
2691 unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2693 return ena_dev->intr_moder_tx_interval;
2696 unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2698 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2701 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2706 void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2707 enum ena_intr_moder_level level,
2708 struct ena_intr_moder_entry *entry)
2710 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2712 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2715 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2716 if (ena_dev->intr_delay_resolution)
2717 intr_moder_tbl[level].intr_moder_interval /=
2718 ena_dev->intr_delay_resolution;
2719 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2721 /* use hardcoded value until ethtool supports bytecount parameter */
2722 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2723 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2726 void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2727 enum ena_intr_moder_level level,
2728 struct ena_intr_moder_entry *entry)
2730 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2732 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2735 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2736 if (ena_dev->intr_delay_resolution)
2737 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2738 entry->pkts_per_interval =
2739 intr_moder_tbl[level].pkts_per_interval;
2740 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;