GNU Linux-libre 4.9.297-gnu1
[releases.git] / drivers / net / ethernet / qlogic / qed / qed_hw.c
1 /* QLogic qed NIC Driver
2  * Copyright (c) 2015 QLogic Corporation
3  *
4  * This software is available under the terms of the GNU General Public License
5  * (GPL) Version 2, available from the file COPYING in the main directory of
6  * this source tree.
7  */
8
9 #include <linux/types.h>
10 #include <linux/io.h>
11 #include <linux/delay.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/errno.h>
14 #include <linux/kernel.h>
15 #include <linux/list.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/string.h>
21 #include <linux/qed/qed_chain.h>
22 #include "qed.h"
23 #include "qed_hsi.h"
24 #include "qed_hw.h"
25 #include "qed_reg_addr.h"
26 #include "qed_sriov.h"
27
28 #define QED_BAR_ACQUIRE_TIMEOUT 1000
29
30 /* Invalid values */
31 #define QED_BAR_INVALID_OFFSET          (cpu_to_le32(-1))
32
33 struct qed_ptt {
34         struct list_head        list_entry;
35         unsigned int            idx;
36         struct pxp_ptt_entry    pxp;
37         u8                      hwfn_id;
38 };
39
40 struct qed_ptt_pool {
41         struct list_head        free_list;
42         spinlock_t              lock; /* ptt synchronized access */
43         struct qed_ptt          ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM];
44 };
45
46 int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn)
47 {
48         struct qed_ptt_pool *p_pool = kmalloc(sizeof(*p_pool), GFP_KERNEL);
49         int i;
50
51         if (!p_pool)
52                 return -ENOMEM;
53
54         INIT_LIST_HEAD(&p_pool->free_list);
55         for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
56                 p_pool->ptts[i].idx = i;
57                 p_pool->ptts[i].pxp.offset = QED_BAR_INVALID_OFFSET;
58                 p_pool->ptts[i].pxp.pretend.control = 0;
59                 p_pool->ptts[i].hwfn_id = p_hwfn->my_id;
60                 if (i >= RESERVED_PTT_MAX)
61                         list_add(&p_pool->ptts[i].list_entry,
62                                  &p_pool->free_list);
63         }
64
65         p_hwfn->p_ptt_pool = p_pool;
66         spin_lock_init(&p_pool->lock);
67
68         return 0;
69 }
70
71 void qed_ptt_invalidate(struct qed_hwfn *p_hwfn)
72 {
73         struct qed_ptt *p_ptt;
74         int i;
75
76         for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) {
77                 p_ptt = &p_hwfn->p_ptt_pool->ptts[i];
78                 p_ptt->pxp.offset = QED_BAR_INVALID_OFFSET;
79         }
80 }
81
82 void qed_ptt_pool_free(struct qed_hwfn *p_hwfn)
83 {
84         kfree(p_hwfn->p_ptt_pool);
85         p_hwfn->p_ptt_pool = NULL;
86 }
87
88 struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn)
89 {
90         struct qed_ptt *p_ptt;
91         unsigned int i;
92
93         /* Take the free PTT from the list */
94         for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) {
95                 spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
96
97                 if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) {
98                         p_ptt = list_first_entry(&p_hwfn->p_ptt_pool->free_list,
99                                                  struct qed_ptt, list_entry);
100                         list_del(&p_ptt->list_entry);
101
102                         spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
103
104                         DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
105                                    "allocated ptt %d\n", p_ptt->idx);
106                         return p_ptt;
107                 }
108
109                 spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
110                 usleep_range(1000, 2000);
111         }
112
113         DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n");
114         return NULL;
115 }
116
117 void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
118 {
119         spin_lock_bh(&p_hwfn->p_ptt_pool->lock);
120         list_add(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list);
121         spin_unlock_bh(&p_hwfn->p_ptt_pool->lock);
122 }
123
124 u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
125 {
126         /* The HW is using DWORDS and we need to translate it to Bytes */
127         return le32_to_cpu(p_ptt->pxp.offset) << 2;
128 }
129
130 static u32 qed_ptt_config_addr(struct qed_ptt *p_ptt)
131 {
132         return PXP_PF_WINDOW_ADMIN_PER_PF_START +
133                p_ptt->idx * sizeof(struct pxp_ptt_entry);
134 }
135
136 u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt)
137 {
138         return PXP_EXTERNAL_BAR_PF_WINDOW_START +
139                p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE;
140 }
141
142 void qed_ptt_set_win(struct qed_hwfn *p_hwfn,
143                      struct qed_ptt *p_ptt, u32 new_hw_addr)
144 {
145         u32 prev_hw_addr;
146
147         prev_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
148
149         if (new_hw_addr == prev_hw_addr)
150                 return;
151
152         /* Update PTT entery in admin window */
153         DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
154                    "Updating PTT entry %d to offset 0x%x\n",
155                    p_ptt->idx, new_hw_addr);
156
157         /* The HW is using DWORDS and the address is in Bytes */
158         p_ptt->pxp.offset = cpu_to_le32(new_hw_addr >> 2);
159
160         REG_WR(p_hwfn,
161                qed_ptt_config_addr(p_ptt) +
162                offsetof(struct pxp_ptt_entry, offset),
163                le32_to_cpu(p_ptt->pxp.offset));
164 }
165
166 static u32 qed_set_ptt(struct qed_hwfn *p_hwfn,
167                        struct qed_ptt *p_ptt, u32 hw_addr)
168 {
169         u32 win_hw_addr = qed_ptt_get_hw_addr(p_hwfn, p_ptt);
170         u32 offset;
171
172         offset = hw_addr - win_hw_addr;
173
174         if (p_ptt->hwfn_id != p_hwfn->my_id)
175                 DP_NOTICE(p_hwfn,
176                           "ptt[%d] of hwfn[%02x] is used by hwfn[%02x]!\n",
177                           p_ptt->idx, p_ptt->hwfn_id, p_hwfn->my_id);
178
179         /* Verify the address is within the window */
180         if (hw_addr < win_hw_addr ||
181             offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) {
182                 qed_ptt_set_win(p_hwfn, p_ptt, hw_addr);
183                 offset = 0;
184         }
185
186         return qed_ptt_get_bar_addr(p_ptt) + offset;
187 }
188
189 struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn,
190                                      enum reserved_ptts ptt_idx)
191 {
192         if (ptt_idx >= RESERVED_PTT_MAX) {
193                 DP_NOTICE(p_hwfn,
194                           "Requested PTT %d is out of range\n", ptt_idx);
195                 return NULL;
196         }
197
198         return &p_hwfn->p_ptt_pool->ptts[ptt_idx];
199 }
200
201 void qed_wr(struct qed_hwfn *p_hwfn,
202             struct qed_ptt *p_ptt,
203             u32 hw_addr, u32 val)
204 {
205         u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
206
207         REG_WR(p_hwfn, bar_addr, val);
208         DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
209                    "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
210                    bar_addr, hw_addr, val);
211 }
212
213 u32 qed_rd(struct qed_hwfn *p_hwfn,
214            struct qed_ptt *p_ptt,
215            u32 hw_addr)
216 {
217         u32 bar_addr = qed_set_ptt(p_hwfn, p_ptt, hw_addr);
218         u32 val = REG_RD(p_hwfn, bar_addr);
219
220         DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
221                    "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n",
222                    bar_addr, hw_addr, val);
223
224         return val;
225 }
226
227 static void qed_memcpy_hw(struct qed_hwfn *p_hwfn,
228                           struct qed_ptt *p_ptt,
229                           void *addr, u32 hw_addr, size_t n, bool to_device)
230 {
231         u32 dw_count, *host_addr, hw_offset;
232         size_t quota, done = 0;
233         u32 __iomem *reg_addr;
234
235         while (done < n) {
236                 quota = min_t(size_t, n - done,
237                               PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE);
238
239                 if (IS_PF(p_hwfn->cdev)) {
240                         qed_ptt_set_win(p_hwfn, p_ptt, hw_addr + done);
241                         hw_offset = qed_ptt_get_bar_addr(p_ptt);
242                 } else {
243                         hw_offset = hw_addr + done;
244                 }
245
246                 dw_count = quota / 4;
247                 host_addr = (u32 *)((u8 *)addr + done);
248                 reg_addr = (u32 __iomem *)REG_ADDR(p_hwfn, hw_offset);
249                 if (to_device)
250                         while (dw_count--)
251                                 DIRECT_REG_WR(reg_addr++, *host_addr++);
252                 else
253                         while (dw_count--)
254                                 *host_addr++ = DIRECT_REG_RD(reg_addr++);
255
256                 done += quota;
257         }
258 }
259
260 void qed_memcpy_from(struct qed_hwfn *p_hwfn,
261                      struct qed_ptt *p_ptt, void *dest, u32 hw_addr, size_t n)
262 {
263         DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
264                    "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n",
265                    hw_addr, dest, hw_addr, (unsigned long)n);
266
267         qed_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false);
268 }
269
270 void qed_memcpy_to(struct qed_hwfn *p_hwfn,
271                    struct qed_ptt *p_ptt, u32 hw_addr, void *src, size_t n)
272 {
273         DP_VERBOSE(p_hwfn, NETIF_MSG_HW,
274                    "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n",
275                    hw_addr, hw_addr, src, (unsigned long)n);
276
277         qed_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true);
278 }
279
280 void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid)
281 {
282         u16 control = 0;
283
284         SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
285         SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
286
287         /* Every pretend undos previous pretends, including
288          * previous port pretend.
289          */
290         SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
291         SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
292         SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
293
294         if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
295                 fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
296
297         p_ptt->pxp.pretend.control = cpu_to_le16(control);
298         p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
299
300         REG_WR(p_hwfn,
301                qed_ptt_config_addr(p_ptt) +
302                offsetof(struct pxp_ptt_entry, pretend),
303                *(u32 *)&p_ptt->pxp.pretend);
304 }
305
306 void qed_port_pretend(struct qed_hwfn *p_hwfn,
307                       struct qed_ptt *p_ptt, u8 port_id)
308 {
309         u16 control = 0;
310
311         SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
312         SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
313         SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
314
315         p_ptt->pxp.pretend.control = cpu_to_le16(control);
316
317         REG_WR(p_hwfn,
318                qed_ptt_config_addr(p_ptt) +
319                offsetof(struct pxp_ptt_entry, pretend),
320                *(u32 *)&p_ptt->pxp.pretend);
321 }
322
323 void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
324 {
325         u16 control = 0;
326
327         SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0);
328         SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0);
329         SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
330
331         p_ptt->pxp.pretend.control = cpu_to_le16(control);
332
333         REG_WR(p_hwfn,
334                qed_ptt_config_addr(p_ptt) +
335                offsetof(struct pxp_ptt_entry, pretend),
336                *(u32 *)&p_ptt->pxp.pretend);
337 }
338
339 u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
340 {
341         u32 concrete_fid = 0;
342
343         SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id);
344         SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid);
345         SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1);
346
347         return concrete_fid;
348 }
349
350 /* DMAE */
351 static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
352                             const u8 is_src_type_grc,
353                             const u8 is_dst_type_grc,
354                             struct qed_dmae_params *p_params)
355 {
356         u16 opcode_b = 0;
357         u32 opcode = 0;
358
359         /* Whether the source is the PCIe or the GRC.
360          * 0- The source is the PCIe
361          * 1- The source is the GRC.
362          */
363         opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
364                                    : DMAE_CMD_SRC_MASK_PCIE) <<
365                    DMAE_CMD_SRC_SHIFT;
366         opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) <<
367                    DMAE_CMD_SRC_PF_ID_SHIFT);
368
369         /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
370         opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
371                                    : DMAE_CMD_DST_MASK_PCIE) <<
372                    DMAE_CMD_DST_SHIFT;
373         opcode |= ((p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) <<
374                    DMAE_CMD_DST_PF_ID_SHIFT);
375
376         /* Whether to write a completion word to the completion destination:
377          * 0-Do not write a completion word
378          * 1-Write the completion word
379          */
380         opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
381         opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
382                    DMAE_CMD_SRC_ADDR_RESET_SHIFT);
383
384         if (p_params->flags & QED_DMAE_FLAG_COMPLETION_DST)
385                 opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
386
387         opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
388
389         opcode |= ((p_hwfn->port_id) << DMAE_CMD_PORT_ID_SHIFT);
390
391         /* reset source address in next go */
392         opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
393                    DMAE_CMD_SRC_ADDR_RESET_SHIFT);
394
395         /* reset dest address in next go */
396         opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
397                    DMAE_CMD_DST_ADDR_RESET_SHIFT);
398
399         /* SRC/DST VFID: all 1's - pf, otherwise VF id */
400         if (p_params->flags & QED_DMAE_FLAG_VF_SRC) {
401                 opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
402                 opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
403         } else {
404                 opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
405                             DMAE_CMD_SRC_VF_ID_SHIFT;
406         }
407
408         if (p_params->flags & QED_DMAE_FLAG_VF_DST) {
409                 opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
410                 opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
411         } else {
412                 opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
413         }
414
415         p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
416         p_hwfn->dmae_info.p_dmae_cmd->opcode_b = cpu_to_le16(opcode_b);
417 }
418
419 u32 qed_dmae_idx_to_go_cmd(u8 idx)
420 {
421         /* All the DMAE 'go' registers form an array in internal memory */
422         return DMAE_REG_GO_C0 + (idx << 2);
423 }
424
425 static int qed_dmae_post_command(struct qed_hwfn *p_hwfn,
426                                  struct qed_ptt *p_ptt)
427 {
428         struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd;
429         u8 idx_cmd = p_hwfn->dmae_info.channel, i;
430         int qed_status = 0;
431
432         /* verify address is not NULL */
433         if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) ||
434              ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) {
435                 DP_NOTICE(p_hwfn,
436                           "source or destination address 0 idx_cmd=%d\n"
437                           "opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
438                           idx_cmd,
439                           le32_to_cpu(p_command->opcode),
440                           le16_to_cpu(p_command->opcode_b),
441                           le16_to_cpu(p_command->length_dw),
442                           le32_to_cpu(p_command->src_addr_hi),
443                           le32_to_cpu(p_command->src_addr_lo),
444                           le32_to_cpu(p_command->dst_addr_hi),
445                           le32_to_cpu(p_command->dst_addr_lo));
446
447                 return -EINVAL;
448         }
449
450         DP_VERBOSE(p_hwfn,
451                    NETIF_MSG_HW,
452                    "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x] len=0x%x src=0x%x:%x dst=0x%x:%x\n",
453                    idx_cmd,
454                    le32_to_cpu(p_command->opcode),
455                    le16_to_cpu(p_command->opcode_b),
456                    le16_to_cpu(p_command->length_dw),
457                    le32_to_cpu(p_command->src_addr_hi),
458                    le32_to_cpu(p_command->src_addr_lo),
459                    le32_to_cpu(p_command->dst_addr_hi),
460                    le32_to_cpu(p_command->dst_addr_lo));
461
462         /* Copy the command to DMAE - need to do it before every call
463          * for source/dest address no reset.
464          * The first 9 DWs are the command registers, the 10 DW is the
465          * GO register, and the rest are result registers
466          * (which are read only by the client).
467          */
468         for (i = 0; i < DMAE_CMD_SIZE; i++) {
469                 u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ?
470                            *(((u32 *)p_command) + i) : 0;
471
472                 qed_wr(p_hwfn, p_ptt,
473                        DMAE_REG_CMD_MEM +
474                        (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) +
475                        (i * sizeof(u32)), data);
476         }
477
478         qed_wr(p_hwfn, p_ptt, qed_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE);
479
480         return qed_status;
481 }
482
483 int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn)
484 {
485         dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr;
486         struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd;
487         u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer;
488         u32 **p_comp = &p_hwfn->dmae_info.p_completion_word;
489
490         *p_comp = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
491                                      sizeof(u32), p_addr, GFP_KERNEL);
492         if (!*p_comp)
493                 goto err;
494
495         p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr;
496         *p_cmd = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
497                                     sizeof(struct dmae_cmd),
498                                     p_addr, GFP_KERNEL);
499         if (!*p_cmd)
500                 goto err;
501
502         p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr;
503         *p_buff = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
504                                      sizeof(u32) * DMAE_MAX_RW_SIZE,
505                                      p_addr, GFP_KERNEL);
506         if (!*p_buff)
507                 goto err;
508
509         p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id;
510
511         return 0;
512 err:
513         qed_dmae_info_free(p_hwfn);
514         return -ENOMEM;
515 }
516
517 void qed_dmae_info_free(struct qed_hwfn *p_hwfn)
518 {
519         dma_addr_t p_phys;
520
521         /* Just make sure no one is in the middle */
522         mutex_lock(&p_hwfn->dmae_info.mutex);
523
524         if (p_hwfn->dmae_info.p_completion_word) {
525                 p_phys = p_hwfn->dmae_info.completion_word_phys_addr;
526                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
527                                   sizeof(u32),
528                                   p_hwfn->dmae_info.p_completion_word, p_phys);
529                 p_hwfn->dmae_info.p_completion_word = NULL;
530         }
531
532         if (p_hwfn->dmae_info.p_dmae_cmd) {
533                 p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr;
534                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
535                                   sizeof(struct dmae_cmd),
536                                   p_hwfn->dmae_info.p_dmae_cmd, p_phys);
537                 p_hwfn->dmae_info.p_dmae_cmd = NULL;
538         }
539
540         if (p_hwfn->dmae_info.p_intermediate_buffer) {
541                 p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
542                 dma_free_coherent(&p_hwfn->cdev->pdev->dev,
543                                   sizeof(u32) * DMAE_MAX_RW_SIZE,
544                                   p_hwfn->dmae_info.p_intermediate_buffer,
545                                   p_phys);
546                 p_hwfn->dmae_info.p_intermediate_buffer = NULL;
547         }
548
549         mutex_unlock(&p_hwfn->dmae_info.mutex);
550 }
551
552 static int qed_dmae_operation_wait(struct qed_hwfn *p_hwfn)
553 {
554         u32 wait_cnt_limit = 10000, wait_cnt = 0;
555         int qed_status = 0;
556
557         barrier();
558         while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) {
559                 udelay(DMAE_MIN_WAIT_TIME);
560                 if (++wait_cnt > wait_cnt_limit) {
561                         DP_NOTICE(p_hwfn->cdev,
562                                   "Timed-out waiting for operation to complete. Completion word is 0x%08x expected 0x%08x.\n",
563                                   *p_hwfn->dmae_info.p_completion_word,
564                                  DMAE_COMPLETION_VAL);
565                         qed_status = -EBUSY;
566                         break;
567                 }
568
569                 /* to sync the completion_word since we are not
570                  * using the volatile keyword for p_completion_word
571                  */
572                 barrier();
573         }
574
575         if (qed_status == 0)
576                 *p_hwfn->dmae_info.p_completion_word = 0;
577
578         return qed_status;
579 }
580
581 static int qed_dmae_execute_sub_operation(struct qed_hwfn *p_hwfn,
582                                           struct qed_ptt *p_ptt,
583                                           u64 src_addr,
584                                           u64 dst_addr,
585                                           u8 src_type,
586                                           u8 dst_type,
587                                           u32 length_dw)
588 {
589         dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr;
590         struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
591         int qed_status = 0;
592
593         switch (src_type) {
594         case QED_DMAE_ADDRESS_GRC:
595         case QED_DMAE_ADDRESS_HOST_PHYS:
596                 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(src_addr));
597                 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(src_addr));
598                 break;
599         /* for virtual source addresses we use the intermediate buffer. */
600         case QED_DMAE_ADDRESS_HOST_VIRT:
601                 cmd->src_addr_hi = cpu_to_le32(upper_32_bits(phys));
602                 cmd->src_addr_lo = cpu_to_le32(lower_32_bits(phys));
603                 memcpy(&p_hwfn->dmae_info.p_intermediate_buffer[0],
604                        (void *)(uintptr_t)src_addr,
605                        length_dw * sizeof(u32));
606                 break;
607         default:
608                 return -EINVAL;
609         }
610
611         switch (dst_type) {
612         case QED_DMAE_ADDRESS_GRC:
613         case QED_DMAE_ADDRESS_HOST_PHYS:
614                 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(dst_addr));
615                 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(dst_addr));
616                 break;
617         /* for virtual source addresses we use the intermediate buffer. */
618         case QED_DMAE_ADDRESS_HOST_VIRT:
619                 cmd->dst_addr_hi = cpu_to_le32(upper_32_bits(phys));
620                 cmd->dst_addr_lo = cpu_to_le32(lower_32_bits(phys));
621                 break;
622         default:
623                 return -EINVAL;
624         }
625
626         cmd->length_dw = cpu_to_le16((u16)length_dw);
627
628         qed_dmae_post_command(p_hwfn, p_ptt);
629
630         qed_status = qed_dmae_operation_wait(p_hwfn);
631
632         if (qed_status) {
633                 DP_NOTICE(p_hwfn,
634                           "qed_dmae_host2grc: Wait Failed. source_addr 0x%llx, grc_addr 0x%llx, size_in_dwords 0x%x\n",
635                           src_addr, dst_addr, length_dw);
636                 return qed_status;
637         }
638
639         if (dst_type == QED_DMAE_ADDRESS_HOST_VIRT)
640                 memcpy((void *)(uintptr_t)(dst_addr),
641                        &p_hwfn->dmae_info.p_intermediate_buffer[0],
642                        length_dw * sizeof(u32));
643
644         return 0;
645 }
646
647 static int qed_dmae_execute_command(struct qed_hwfn *p_hwfn,
648                                     struct qed_ptt *p_ptt,
649                                     u64 src_addr, u64 dst_addr,
650                                     u8 src_type, u8 dst_type,
651                                     u32 size_in_dwords,
652                                     struct qed_dmae_params *p_params)
653 {
654         dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr;
655         u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0;
656         struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd;
657         u64 src_addr_split = 0, dst_addr_split = 0;
658         u16 length_limit = DMAE_MAX_RW_SIZE;
659         int qed_status = 0;
660         u32 offset = 0;
661
662         qed_dmae_opcode(p_hwfn,
663                         (src_type == QED_DMAE_ADDRESS_GRC),
664                         (dst_type == QED_DMAE_ADDRESS_GRC),
665                         p_params);
666
667         cmd->comp_addr_lo = cpu_to_le32(lower_32_bits(phys));
668         cmd->comp_addr_hi = cpu_to_le32(upper_32_bits(phys));
669         cmd->comp_val = cpu_to_le32(DMAE_COMPLETION_VAL);
670
671         /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */
672         cnt_split = size_in_dwords / length_limit;
673         length_mod = size_in_dwords % length_limit;
674
675         src_addr_split = src_addr;
676         dst_addr_split = dst_addr;
677
678         for (i = 0; i <= cnt_split; i++) {
679                 offset = length_limit * i;
680
681                 if (!(p_params->flags & QED_DMAE_FLAG_RW_REPL_SRC)) {
682                         if (src_type == QED_DMAE_ADDRESS_GRC)
683                                 src_addr_split = src_addr + offset;
684                         else
685                                 src_addr_split = src_addr + (offset * 4);
686                 }
687
688                 if (dst_type == QED_DMAE_ADDRESS_GRC)
689                         dst_addr_split = dst_addr + offset;
690                 else
691                         dst_addr_split = dst_addr + (offset * 4);
692
693                 length_cur = (cnt_split == i) ? length_mod : length_limit;
694
695                 /* might be zero on last iteration */
696                 if (!length_cur)
697                         continue;
698
699                 qed_status = qed_dmae_execute_sub_operation(p_hwfn,
700                                                             p_ptt,
701                                                             src_addr_split,
702                                                             dst_addr_split,
703                                                             src_type,
704                                                             dst_type,
705                                                             length_cur);
706                 if (qed_status) {
707                         DP_NOTICE(p_hwfn,
708                                   "qed_dmae_execute_sub_operation Failed with error 0x%x. source_addr 0x%llx, destination addr 0x%llx, size_in_dwords 0x%x\n",
709                                   qed_status, src_addr, dst_addr, length_cur);
710                         break;
711                 }
712         }
713
714         return qed_status;
715 }
716
717 int qed_dmae_host2grc(struct qed_hwfn *p_hwfn,
718                       struct qed_ptt *p_ptt,
719                   u64 source_addr, u32 grc_addr, u32 size_in_dwords, u32 flags)
720 {
721         u32 grc_addr_in_dw = grc_addr / sizeof(u32);
722         struct qed_dmae_params params;
723         int rc;
724
725         memset(&params, 0, sizeof(struct qed_dmae_params));
726         params.flags = flags;
727
728         mutex_lock(&p_hwfn->dmae_info.mutex);
729
730         rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
731                                       grc_addr_in_dw,
732                                       QED_DMAE_ADDRESS_HOST_VIRT,
733                                       QED_DMAE_ADDRESS_GRC,
734                                       size_in_dwords, &params);
735
736         mutex_unlock(&p_hwfn->dmae_info.mutex);
737
738         return rc;
739 }
740
741 int qed_dmae_grc2host(struct qed_hwfn *p_hwfn,
742                       struct qed_ptt *p_ptt,
743                       u32 grc_addr,
744                       dma_addr_t dest_addr, u32 size_in_dwords, u32 flags)
745 {
746         u32 grc_addr_in_dw = grc_addr / sizeof(u32);
747         struct qed_dmae_params params;
748         int rc;
749
750         memset(&params, 0, sizeof(struct qed_dmae_params));
751         params.flags = flags;
752
753         mutex_lock(&p_hwfn->dmae_info.mutex);
754
755         rc = qed_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw,
756                                       dest_addr, QED_DMAE_ADDRESS_GRC,
757                                       QED_DMAE_ADDRESS_HOST_VIRT,
758                                       size_in_dwords, &params);
759
760         mutex_unlock(&p_hwfn->dmae_info.mutex);
761
762         return rc;
763 }
764
765 int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
766                        struct qed_ptt *p_ptt,
767                        dma_addr_t source_addr,
768                        dma_addr_t dest_addr,
769                        u32 size_in_dwords, struct qed_dmae_params *p_params)
770 {
771         int rc;
772
773         mutex_lock(&(p_hwfn->dmae_info.mutex));
774
775         rc = qed_dmae_execute_command(p_hwfn, p_ptt, source_addr,
776                                       dest_addr,
777                                       QED_DMAE_ADDRESS_HOST_PHYS,
778                                       QED_DMAE_ADDRESS_HOST_PHYS,
779                                       size_in_dwords, p_params);
780
781         mutex_unlock(&(p_hwfn->dmae_info.mutex));
782
783         return rc;
784 }
785
786 u16 qed_get_qm_pq(struct qed_hwfn *p_hwfn,
787                   enum protocol_type proto, union qed_qm_pq_params *p_params)
788 {
789         u16 pq_id = 0;
790
791         if ((proto == PROTOCOLID_CORE ||
792              proto == PROTOCOLID_ETH ||
793              proto == PROTOCOLID_ISCSI ||
794              proto == PROTOCOLID_ROCE) && !p_params) {
795                 DP_NOTICE(p_hwfn,
796                           "Protocol %d received NULL PQ params\n", proto);
797                 return 0;
798         }
799
800         switch (proto) {
801         case PROTOCOLID_CORE:
802                 if (p_params->core.tc == LB_TC)
803                         pq_id = p_hwfn->qm_info.pure_lb_pq;
804                 else if (p_params->core.tc == OOO_LB_TC)
805                         pq_id = p_hwfn->qm_info.ooo_pq;
806                 else
807                         pq_id = p_hwfn->qm_info.offload_pq;
808                 break;
809         case PROTOCOLID_ETH:
810                 pq_id = p_params->eth.tc;
811                 if (p_params->eth.is_vf)
812                         pq_id += p_hwfn->qm_info.vf_queues_offset +
813                                  p_params->eth.vf_id;
814                 break;
815         case PROTOCOLID_ISCSI:
816                 if (p_params->iscsi.q_idx == 1)
817                         pq_id = p_hwfn->qm_info.pure_ack_pq;
818                 break;
819         case PROTOCOLID_ROCE:
820                 if (p_params->roce.dcqcn)
821                         pq_id = p_params->roce.qpid;
822                 else
823                         pq_id = p_hwfn->qm_info.offload_pq;
824                 if (pq_id > p_hwfn->qm_info.num_pf_rls)
825                         pq_id = p_hwfn->qm_info.offload_pq;
826                 break;
827         default:
828                 pq_id = 0;
829         }
830
831         pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, QED_PQ);
832
833         return pq_id;
834 }