2 * Copyright (c) 2016~2017 Hisilicon Limited.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/dma-mapping.h>
11 #include <linux/slab.h>
12 #include <linux/pci.h>
13 #include <linux/device.h>
14 #include <linux/err.h>
15 #include <linux/dma-direction.h>
16 #include "hclge_cmd.h"
18 #include "hclge_main.h"
20 #define hclge_is_csq(ring) ((ring)->flag & HCLGE_TYPE_CSQ)
21 #define hclge_ring_to_dma_dir(ring) (hclge_is_csq(ring) ? \
22 DMA_TO_DEVICE : DMA_FROM_DEVICE)
23 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
25 static int hclge_ring_space(struct hclge_cmq_ring *ring)
27 int ntu = ring->next_to_use;
28 int ntc = ring->next_to_clean;
29 int used = (ntu - ntc + ring->desc_num) % ring->desc_num;
31 return ring->desc_num - used - 1;
34 static int hclge_alloc_cmd_desc(struct hclge_cmq_ring *ring)
36 int size = ring->desc_num * sizeof(struct hclge_desc);
38 ring->desc = kzalloc(size, GFP_KERNEL);
42 ring->desc_dma_addr = dma_map_single(cmq_ring_to_dev(ring), ring->desc,
43 size, DMA_BIDIRECTIONAL);
44 if (dma_mapping_error(cmq_ring_to_dev(ring), ring->desc_dma_addr)) {
45 ring->desc_dma_addr = 0;
54 static void hclge_free_cmd_desc(struct hclge_cmq_ring *ring)
56 dma_unmap_single(cmq_ring_to_dev(ring), ring->desc_dma_addr,
57 ring->desc_num * sizeof(ring->desc[0]),
60 ring->desc_dma_addr = 0;
65 static int hclge_init_cmd_queue(struct hclge_dev *hdev, int ring_type)
67 struct hclge_hw *hw = &hdev->hw;
68 struct hclge_cmq_ring *ring =
69 (ring_type == HCLGE_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
72 ring->flag = ring_type;
75 ret = hclge_alloc_cmd_desc(ring);
77 dev_err(&hdev->pdev->dev, "descriptor %s alloc error %d\n",
78 (ring_type == HCLGE_TYPE_CSQ) ? "CSQ" : "CRQ", ret);
82 ring->next_to_clean = 0;
83 ring->next_to_use = 0;
88 void hclge_cmd_setup_basic_desc(struct hclge_desc *desc,
89 enum hclge_opcode_type opcode, bool is_read)
91 memset((void *)desc, 0, sizeof(struct hclge_desc));
92 desc->opcode = cpu_to_le16(opcode);
93 desc->flag = cpu_to_le16(HCLGE_CMD_FLAG_NO_INTR | HCLGE_CMD_FLAG_IN);
96 desc->flag |= cpu_to_le16(HCLGE_CMD_FLAG_WR);
98 desc->flag &= cpu_to_le16(~HCLGE_CMD_FLAG_WR);
101 static void hclge_cmd_config_regs(struct hclge_cmq_ring *ring)
103 dma_addr_t dma = ring->desc_dma_addr;
104 struct hclge_dev *hdev = ring->dev;
105 struct hclge_hw *hw = &hdev->hw;
107 if (ring->flag == HCLGE_TYPE_CSQ) {
108 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_L_REG,
110 hclge_write_dev(hw, HCLGE_NIC_CSQ_BASEADDR_H_REG,
111 (u32)((dma >> 31) >> 1));
112 hclge_write_dev(hw, HCLGE_NIC_CSQ_DEPTH_REG,
113 (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
114 HCLGE_NIC_CMQ_ENABLE);
115 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, 0);
116 hclge_write_dev(hw, HCLGE_NIC_CSQ_HEAD_REG, 0);
118 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_L_REG,
120 hclge_write_dev(hw, HCLGE_NIC_CRQ_BASEADDR_H_REG,
121 (u32)((dma >> 31) >> 1));
122 hclge_write_dev(hw, HCLGE_NIC_CRQ_DEPTH_REG,
123 (ring->desc_num >> HCLGE_NIC_CMQ_DESC_NUM_S) |
124 HCLGE_NIC_CMQ_ENABLE);
125 hclge_write_dev(hw, HCLGE_NIC_CRQ_TAIL_REG, 0);
126 hclge_write_dev(hw, HCLGE_NIC_CRQ_HEAD_REG, 0);
130 static void hclge_cmd_init_regs(struct hclge_hw *hw)
132 hclge_cmd_config_regs(&hw->cmq.csq);
133 hclge_cmd_config_regs(&hw->cmq.crq);
136 static int hclge_cmd_csq_clean(struct hclge_hw *hw)
138 struct hclge_cmq_ring *csq = &hw->cmq.csq;
139 u16 ntc = csq->next_to_clean;
140 struct hclge_desc *desc;
144 desc = &csq->desc[ntc];
145 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
147 while (head != ntc) {
148 memset(desc, 0, sizeof(*desc));
150 if (ntc == csq->desc_num)
152 desc = &csq->desc[ntc];
155 csq->next_to_clean = ntc;
160 static int hclge_cmd_csq_done(struct hclge_hw *hw)
162 u32 head = hclge_read_dev(hw, HCLGE_NIC_CSQ_HEAD_REG);
163 return head == hw->cmq.csq.next_to_use;
166 static bool hclge_is_special_opcode(u16 opcode)
168 u16 spec_opcode[3] = {0x0030, 0x0031, 0x0032};
171 for (i = 0; i < ARRAY_SIZE(spec_opcode); i++) {
172 if (spec_opcode[i] == opcode)
180 * hclge_cmd_send - send command to command queue
181 * @hw: pointer to the hw struct
182 * @desc: prefilled descriptor for describing the command
183 * @num : the number of descriptors to be sent
185 * This is the main send command for command queue, it
186 * sends the queue, cleans the queue, etc
188 int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
190 struct hclge_dev *hdev = (struct hclge_dev *)hw->back;
191 struct hclge_desc *desc_to_use;
192 bool complete = false;
196 u16 opcode, desc_ret;
199 spin_lock_bh(&hw->cmq.csq.lock);
201 if (num > hclge_ring_space(&hw->cmq.csq)) {
202 spin_unlock_bh(&hw->cmq.csq.lock);
207 * Record the location of desc in the ring for this time
208 * which will be use for hardware to write back
210 ntc = hw->cmq.csq.next_to_use;
211 opcode = desc[0].opcode;
212 while (handle < num) {
213 desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
214 *desc_to_use = desc[handle];
215 (hw->cmq.csq.next_to_use)++;
216 if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
217 hw->cmq.csq.next_to_use = 0;
221 /* Write to hardware */
222 hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
225 * If the command is sync, wait for the firmware to write back,
226 * if multi descriptors to be sent, use the first one to check
228 if (HCLGE_SEND_SYNC(desc->flag)) {
230 if (hclge_cmd_csq_done(hw))
234 } while (timeout < hw->cmq.tx_timeout);
237 if (hclge_cmd_csq_done(hw)) {
240 while (handle < num) {
241 /* Get the result of hardware write back */
242 desc_to_use = &hw->cmq.csq.desc[ntc];
243 desc[handle] = *desc_to_use;
244 pr_debug("Get cmd desc:\n");
246 if (likely(!hclge_is_special_opcode(opcode)))
247 desc_ret = desc[handle].retval;
249 desc_ret = desc[0].retval;
251 if ((enum hclge_cmd_return_status)desc_ret ==
252 HCLGE_CMD_EXEC_SUCCESS)
254 else if (desc_ret == HCLGE_CMD_NOT_SUPPORTED)
255 retval = -EOPNOTSUPP;
258 hw->cmq.last_status = (enum hclge_cmd_status)desc_ret;
261 if (ntc == hw->cmq.csq.desc_num)
269 /* Clean the command send queue */
270 handle = hclge_cmd_csq_clean(hw);
272 dev_warn(&hdev->pdev->dev,
273 "cleaned %d, need to clean %d\n", handle, num);
276 spin_unlock_bh(&hw->cmq.csq.lock);
281 enum hclge_cmd_status hclge_cmd_query_firmware_version(struct hclge_hw *hw,
284 struct hclge_query_version *resp;
285 struct hclge_desc desc;
288 hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_QUERY_FW_VER, 1);
289 resp = (struct hclge_query_version *)desc.data;
291 ret = hclge_cmd_send(hw, &desc, 1);
293 *version = le32_to_cpu(resp->firmware);
298 int hclge_cmd_init(struct hclge_dev *hdev)
303 /* Setup the queue entries for use cmd queue */
304 hdev->hw.cmq.csq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
305 hdev->hw.cmq.crq.desc_num = HCLGE_NIC_CMQ_DESC_NUM;
307 /* Setup the lock for command queue */
308 spin_lock_init(&hdev->hw.cmq.csq.lock);
309 spin_lock_init(&hdev->hw.cmq.crq.lock);
311 /* Setup Tx write back timeout */
312 hdev->hw.cmq.tx_timeout = HCLGE_CMDQ_TX_TIMEOUT;
314 /* Setup queue rings */
315 ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CSQ);
317 dev_err(&hdev->pdev->dev,
318 "CSQ ring setup error %d\n", ret);
322 ret = hclge_init_cmd_queue(hdev, HCLGE_TYPE_CRQ);
324 dev_err(&hdev->pdev->dev,
325 "CRQ ring setup error %d\n", ret);
329 hclge_cmd_init_regs(&hdev->hw);
331 ret = hclge_cmd_query_firmware_version(&hdev->hw, &version);
333 dev_err(&hdev->pdev->dev,
334 "firmware version query failed %d\n", ret);
337 hdev->fw_version = version;
339 dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
343 hclge_free_cmd_desc(&hdev->hw.cmq.csq);
347 static void hclge_destroy_queue(struct hclge_cmq_ring *ring)
349 spin_lock_bh(&ring->lock);
350 hclge_free_cmd_desc(ring);
351 spin_unlock_bh(&ring->lock);
354 void hclge_destroy_cmd_queue(struct hclge_hw *hw)
356 hclge_destroy_queue(&hw->cmq.csq);
357 hclge_destroy_queue(&hw->cmq.crq);