GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / net / ethernet / intel / i40e / i40e_adminq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include <linux/delay.h>
5 #include "i40e_alloc.h"
6 #include "i40e_register.h"
7 #include "i40e_prototype.h"
8
9 static void i40e_resume_aq(struct i40e_hw *hw);
10
11 /**
12  *  i40e_alloc_adminq_asq_ring - Allocate Admin Queue send rings
13  *  @hw: pointer to the hardware structure
14  **/
15 static int i40e_alloc_adminq_asq_ring(struct i40e_hw *hw)
16 {
17         int ret_code;
18
19         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.asq.desc_buf,
20                                          (hw->aq.num_asq_entries *
21                                          sizeof(struct i40e_aq_desc)),
22                                          I40E_ADMINQ_DESC_ALIGNMENT);
23         if (ret_code)
24                 return ret_code;
25
26         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.cmd_buf,
27                                           (hw->aq.num_asq_entries *
28                                           sizeof(struct i40e_asq_cmd_details)));
29         if (ret_code) {
30                 i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
31                 return ret_code;
32         }
33
34         return ret_code;
35 }
36
37 /**
38  *  i40e_alloc_adminq_arq_ring - Allocate Admin Queue receive rings
39  *  @hw: pointer to the hardware structure
40  **/
41 static int i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
42 {
43         int ret_code;
44
45         ret_code = i40e_allocate_dma_mem(hw, &hw->aq.arq.desc_buf,
46                                          (hw->aq.num_arq_entries *
47                                          sizeof(struct i40e_aq_desc)),
48                                          I40E_ADMINQ_DESC_ALIGNMENT);
49
50         return ret_code;
51 }
52
53 /**
54  *  i40e_free_adminq_asq - Free Admin Queue send rings
55  *  @hw: pointer to the hardware structure
56  *
57  *  This assumes the posted send buffers have already been cleaned
58  *  and de-allocated
59  **/
60 static void i40e_free_adminq_asq(struct i40e_hw *hw)
61 {
62         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
63 }
64
65 /**
66  *  i40e_free_adminq_arq - Free Admin Queue receive rings
67  *  @hw: pointer to the hardware structure
68  *
69  *  This assumes the posted receive buffers have already been cleaned
70  *  and de-allocated
71  **/
72 static void i40e_free_adminq_arq(struct i40e_hw *hw)
73 {
74         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
75 }
76
77 /**
78  *  i40e_alloc_arq_bufs - Allocate pre-posted buffers for the receive queue
79  *  @hw: pointer to the hardware structure
80  **/
81 static int i40e_alloc_arq_bufs(struct i40e_hw *hw)
82 {
83         struct i40e_aq_desc *desc;
84         struct i40e_dma_mem *bi;
85         int ret_code;
86         int i;
87
88         /* We'll be allocating the buffer info memory first, then we can
89          * allocate the mapped buffers for the event processing
90          */
91
92         /* buffer_info structures do not need alignment */
93         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.arq.dma_head,
94                 (hw->aq.num_arq_entries * sizeof(struct i40e_dma_mem)));
95         if (ret_code)
96                 goto alloc_arq_bufs;
97         hw->aq.arq.r.arq_bi = (struct i40e_dma_mem *)hw->aq.arq.dma_head.va;
98
99         /* allocate the mapped buffers */
100         for (i = 0; i < hw->aq.num_arq_entries; i++) {
101                 bi = &hw->aq.arq.r.arq_bi[i];
102                 ret_code = i40e_allocate_dma_mem(hw, bi,
103                                                  hw->aq.arq_buf_size,
104                                                  I40E_ADMINQ_DESC_ALIGNMENT);
105                 if (ret_code)
106                         goto unwind_alloc_arq_bufs;
107
108                 /* now configure the descriptors for use */
109                 desc = I40E_ADMINQ_DESC(hw->aq.arq, i);
110
111                 desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
112                 if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
113                         desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
114                 desc->opcode = 0;
115                 /* This is in accordance with Admin queue design, there is no
116                  * register for buffer size configuration
117                  */
118                 desc->datalen = cpu_to_le16((u16)bi->size);
119                 desc->retval = 0;
120                 desc->cookie_high = 0;
121                 desc->cookie_low = 0;
122                 desc->params.external.addr_high =
123                         cpu_to_le32(upper_32_bits(bi->pa));
124                 desc->params.external.addr_low =
125                         cpu_to_le32(lower_32_bits(bi->pa));
126                 desc->params.external.param0 = 0;
127                 desc->params.external.param1 = 0;
128         }
129
130 alloc_arq_bufs:
131         return ret_code;
132
133 unwind_alloc_arq_bufs:
134         /* don't try to free the one that failed... */
135         i--;
136         for (; i >= 0; i--)
137                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
138         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
139
140         return ret_code;
141 }
142
143 /**
144  *  i40e_alloc_asq_bufs - Allocate empty buffer structs for the send queue
145  *  @hw: pointer to the hardware structure
146  **/
147 static int i40e_alloc_asq_bufs(struct i40e_hw *hw)
148 {
149         struct i40e_dma_mem *bi;
150         int ret_code;
151         int i;
152
153         /* No mapped memory needed yet, just the buffer info structures */
154         ret_code = i40e_allocate_virt_mem(hw, &hw->aq.asq.dma_head,
155                 (hw->aq.num_asq_entries * sizeof(struct i40e_dma_mem)));
156         if (ret_code)
157                 goto alloc_asq_bufs;
158         hw->aq.asq.r.asq_bi = (struct i40e_dma_mem *)hw->aq.asq.dma_head.va;
159
160         /* allocate the mapped buffers */
161         for (i = 0; i < hw->aq.num_asq_entries; i++) {
162                 bi = &hw->aq.asq.r.asq_bi[i];
163                 ret_code = i40e_allocate_dma_mem(hw, bi,
164                                                  hw->aq.asq_buf_size,
165                                                  I40E_ADMINQ_DESC_ALIGNMENT);
166                 if (ret_code)
167                         goto unwind_alloc_asq_bufs;
168         }
169 alloc_asq_bufs:
170         return ret_code;
171
172 unwind_alloc_asq_bufs:
173         /* don't try to free the one that failed... */
174         i--;
175         for (; i >= 0; i--)
176                 i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
177         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
178
179         return ret_code;
180 }
181
182 /**
183  *  i40e_free_arq_bufs - Free receive queue buffer info elements
184  *  @hw: pointer to the hardware structure
185  **/
186 static void i40e_free_arq_bufs(struct i40e_hw *hw)
187 {
188         int i;
189
190         /* free descriptors */
191         for (i = 0; i < hw->aq.num_arq_entries; i++)
192                 i40e_free_dma_mem(hw, &hw->aq.arq.r.arq_bi[i]);
193
194         /* free the descriptor memory */
195         i40e_free_dma_mem(hw, &hw->aq.arq.desc_buf);
196
197         /* free the dma header */
198         i40e_free_virt_mem(hw, &hw->aq.arq.dma_head);
199 }
200
201 /**
202  *  i40e_free_asq_bufs - Free send queue buffer info elements
203  *  @hw: pointer to the hardware structure
204  **/
205 static void i40e_free_asq_bufs(struct i40e_hw *hw)
206 {
207         int i;
208
209         /* only unmap if the address is non-NULL */
210         for (i = 0; i < hw->aq.num_asq_entries; i++)
211                 if (hw->aq.asq.r.asq_bi[i].pa)
212                         i40e_free_dma_mem(hw, &hw->aq.asq.r.asq_bi[i]);
213
214         /* free the buffer info list */
215         i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
216
217         /* free the descriptor memory */
218         i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
219
220         /* free the dma header */
221         i40e_free_virt_mem(hw, &hw->aq.asq.dma_head);
222 }
223
224 /**
225  *  i40e_config_asq_regs - configure ASQ registers
226  *  @hw: pointer to the hardware structure
227  *
228  *  Configure base address and length registers for the transmit queue
229  **/
230 static int i40e_config_asq_regs(struct i40e_hw *hw)
231 {
232         int ret_code = 0;
233         u32 reg = 0;
234
235         /* Clear Head and Tail */
236         wr32(hw, I40E_PF_ATQH, 0);
237         wr32(hw, I40E_PF_ATQT, 0);
238
239         /* set starting point */
240         wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
241                                   I40E_PF_ATQLEN_ATQENABLE_MASK));
242         wr32(hw, I40E_PF_ATQBAL, lower_32_bits(hw->aq.asq.desc_buf.pa));
243         wr32(hw, I40E_PF_ATQBAH, upper_32_bits(hw->aq.asq.desc_buf.pa));
244
245         /* Check one register to verify that config was applied */
246         reg = rd32(hw, I40E_PF_ATQBAL);
247         if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
248                 ret_code = -EIO;
249
250         return ret_code;
251 }
252
253 /**
254  *  i40e_config_arq_regs - ARQ register configuration
255  *  @hw: pointer to the hardware structure
256  *
257  * Configure base address and length registers for the receive (event queue)
258  **/
259 static int i40e_config_arq_regs(struct i40e_hw *hw)
260 {
261         int ret_code = 0;
262         u32 reg = 0;
263
264         /* Clear Head and Tail */
265         wr32(hw, I40E_PF_ARQH, 0);
266         wr32(hw, I40E_PF_ARQT, 0);
267
268         /* set starting point */
269         wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
270                                   I40E_PF_ARQLEN_ARQENABLE_MASK));
271         wr32(hw, I40E_PF_ARQBAL, lower_32_bits(hw->aq.arq.desc_buf.pa));
272         wr32(hw, I40E_PF_ARQBAH, upper_32_bits(hw->aq.arq.desc_buf.pa));
273
274         /* Update tail in the HW to post pre-allocated buffers */
275         wr32(hw, I40E_PF_ARQT, hw->aq.num_arq_entries - 1);
276
277         /* Check one register to verify that config was applied */
278         reg = rd32(hw, I40E_PF_ARQBAL);
279         if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
280                 ret_code = -EIO;
281
282         return ret_code;
283 }
284
285 /**
286  *  i40e_init_asq - main initialization routine for ASQ
287  *  @hw: pointer to the hardware structure
288  *
289  *  This is the main initialization routine for the Admin Send Queue
290  *  Prior to calling this function, drivers *MUST* set the following fields
291  *  in the hw->aq structure:
292  *     - hw->aq.num_asq_entries
293  *     - hw->aq.arq_buf_size
294  *
295  *  Do *NOT* hold the lock when calling this as the memory allocation routines
296  *  called are not going to be atomic context safe
297  **/
298 static int i40e_init_asq(struct i40e_hw *hw)
299 {
300         int ret_code = 0;
301
302         if (hw->aq.asq.count > 0) {
303                 /* queue already initialized */
304                 ret_code = -EBUSY;
305                 goto init_adminq_exit;
306         }
307
308         /* verify input for valid configuration */
309         if ((hw->aq.num_asq_entries == 0) ||
310             (hw->aq.asq_buf_size == 0)) {
311                 ret_code = -EIO;
312                 goto init_adminq_exit;
313         }
314
315         hw->aq.asq.next_to_use = 0;
316         hw->aq.asq.next_to_clean = 0;
317
318         /* allocate the ring memory */
319         ret_code = i40e_alloc_adminq_asq_ring(hw);
320         if (ret_code)
321                 goto init_adminq_exit;
322
323         /* allocate buffers in the rings */
324         ret_code = i40e_alloc_asq_bufs(hw);
325         if (ret_code)
326                 goto init_adminq_free_rings;
327
328         /* initialize base registers */
329         ret_code = i40e_config_asq_regs(hw);
330         if (ret_code)
331                 goto init_adminq_free_rings;
332
333         /* success! */
334         hw->aq.asq.count = hw->aq.num_asq_entries;
335         goto init_adminq_exit;
336
337 init_adminq_free_rings:
338         i40e_free_adminq_asq(hw);
339
340 init_adminq_exit:
341         return ret_code;
342 }
343
344 /**
345  *  i40e_init_arq - initialize ARQ
346  *  @hw: pointer to the hardware structure
347  *
348  *  The main initialization routine for the Admin Receive (Event) Queue.
349  *  Prior to calling this function, drivers *MUST* set the following fields
350  *  in the hw->aq structure:
351  *     - hw->aq.num_asq_entries
352  *     - hw->aq.arq_buf_size
353  *
354  *  Do *NOT* hold the lock when calling this as the memory allocation routines
355  *  called are not going to be atomic context safe
356  **/
357 static int i40e_init_arq(struct i40e_hw *hw)
358 {
359         int ret_code = 0;
360
361         if (hw->aq.arq.count > 0) {
362                 /* queue already initialized */
363                 ret_code = -EBUSY;
364                 goto init_adminq_exit;
365         }
366
367         /* verify input for valid configuration */
368         if ((hw->aq.num_arq_entries == 0) ||
369             (hw->aq.arq_buf_size == 0)) {
370                 ret_code = -EIO;
371                 goto init_adminq_exit;
372         }
373
374         hw->aq.arq.next_to_use = 0;
375         hw->aq.arq.next_to_clean = 0;
376
377         /* allocate the ring memory */
378         ret_code = i40e_alloc_adminq_arq_ring(hw);
379         if (ret_code)
380                 goto init_adminq_exit;
381
382         /* allocate buffers in the rings */
383         ret_code = i40e_alloc_arq_bufs(hw);
384         if (ret_code)
385                 goto init_adminq_free_rings;
386
387         /* initialize base registers */
388         ret_code = i40e_config_arq_regs(hw);
389         if (ret_code)
390                 goto init_adminq_free_rings;
391
392         /* success! */
393         hw->aq.arq.count = hw->aq.num_arq_entries;
394         goto init_adminq_exit;
395
396 init_adminq_free_rings:
397         i40e_free_adminq_arq(hw);
398
399 init_adminq_exit:
400         return ret_code;
401 }
402
403 /**
404  *  i40e_shutdown_asq - shutdown the ASQ
405  *  @hw: pointer to the hardware structure
406  *
407  *  The main shutdown routine for the Admin Send Queue
408  **/
409 static int i40e_shutdown_asq(struct i40e_hw *hw)
410 {
411         int ret_code = 0;
412
413         mutex_lock(&hw->aq.asq_mutex);
414
415         if (hw->aq.asq.count == 0) {
416                 ret_code = -EBUSY;
417                 goto shutdown_asq_out;
418         }
419
420         /* Stop firmware AdminQ processing */
421         wr32(hw, I40E_PF_ATQH, 0);
422         wr32(hw, I40E_PF_ATQT, 0);
423         wr32(hw, I40E_PF_ATQLEN, 0);
424         wr32(hw, I40E_PF_ATQBAL, 0);
425         wr32(hw, I40E_PF_ATQBAH, 0);
426
427         hw->aq.asq.count = 0; /* to indicate uninitialized queue */
428
429         /* free ring buffers */
430         i40e_free_asq_bufs(hw);
431
432 shutdown_asq_out:
433         mutex_unlock(&hw->aq.asq_mutex);
434         return ret_code;
435 }
436
437 /**
438  *  i40e_shutdown_arq - shutdown ARQ
439  *  @hw: pointer to the hardware structure
440  *
441  *  The main shutdown routine for the Admin Receive Queue
442  **/
443 static int i40e_shutdown_arq(struct i40e_hw *hw)
444 {
445         int ret_code = 0;
446
447         mutex_lock(&hw->aq.arq_mutex);
448
449         if (hw->aq.arq.count == 0) {
450                 ret_code = -EBUSY;
451                 goto shutdown_arq_out;
452         }
453
454         /* Stop firmware AdminQ processing */
455         wr32(hw, I40E_PF_ARQH, 0);
456         wr32(hw, I40E_PF_ARQT, 0);
457         wr32(hw, I40E_PF_ARQLEN, 0);
458         wr32(hw, I40E_PF_ARQBAL, 0);
459         wr32(hw, I40E_PF_ARQBAH, 0);
460
461         hw->aq.arq.count = 0; /* to indicate uninitialized queue */
462
463         /* free ring buffers */
464         i40e_free_arq_bufs(hw);
465
466 shutdown_arq_out:
467         mutex_unlock(&hw->aq.arq_mutex);
468         return ret_code;
469 }
470
471 /**
472  *  i40e_set_hw_caps - set HW flags
473  *  @hw: pointer to the hardware structure
474  **/
475 static void i40e_set_hw_caps(struct i40e_hw *hw)
476 {
477         bitmap_zero(hw->caps, I40E_HW_CAPS_NBITS);
478
479         switch (hw->mac.type) {
480         case I40E_MAC_XL710:
481                 if (i40e_is_aq_api_ver_ge(hw, 1,
482                                           I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
483                         set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps);
484                         set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps);
485                         /* The ability to RX (not drop) 802.1ad frames */
486                         set_bit(I40E_HW_CAP_802_1AD, hw->caps);
487                 }
488                 if (i40e_is_aq_api_ver_ge(hw, 1, 5)) {
489                         /* Supported in FW API version higher than 1.4 */
490                         set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps);
491                 }
492                 if (i40e_is_fw_ver_lt(hw, 4, 33)) {
493                         set_bit(I40E_HW_CAP_RESTART_AUTONEG, hw->caps);
494                         /* No DCB support  for FW < v4.33 */
495                         set_bit(I40E_HW_CAP_NO_DCB_SUPPORT, hw->caps);
496                 }
497                 if (i40e_is_fw_ver_lt(hw, 4, 3)) {
498                         /* Disable FW LLDP if FW < v4.3 */
499                         set_bit(I40E_HW_CAP_STOP_FW_LLDP, hw->caps);
500                 }
501                 if (i40e_is_fw_ver_ge(hw, 4, 40)) {
502                         /* Use the FW Set LLDP MIB API if FW >= v4.40 */
503                         set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps);
504                 }
505                 if (i40e_is_fw_ver_ge(hw, 6, 0)) {
506                         /* Enable PTP L4 if FW > v6.0 */
507                         set_bit(I40E_HW_CAP_PTP_L4, hw->caps);
508                 }
509                 break;
510         case I40E_MAC_X722:
511                 set_bit(I40E_HW_CAP_AQ_SRCTL_ACCESS_ENABLE, hw->caps);
512                 set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps);
513                 set_bit(I40E_HW_CAP_RSS_AQ, hw->caps);
514                 set_bit(I40E_HW_CAP_128_QP_RSS, hw->caps);
515                 set_bit(I40E_HW_CAP_ATR_EVICT, hw->caps);
516                 set_bit(I40E_HW_CAP_WB_ON_ITR, hw->caps);
517                 set_bit(I40E_HW_CAP_MULTI_TCP_UDP_RSS_PCTYPE, hw->caps);
518                 set_bit(I40E_HW_CAP_NO_PCI_LINK_CHECK, hw->caps);
519                 set_bit(I40E_HW_CAP_USE_SET_LLDP_MIB, hw->caps);
520                 set_bit(I40E_HW_CAP_GENEVE_OFFLOAD, hw->caps);
521                 set_bit(I40E_HW_CAP_PTP_L4, hw->caps);
522                 set_bit(I40E_HW_CAP_WOL_MC_MAGIC_PKT_WAKE, hw->caps);
523                 set_bit(I40E_HW_CAP_OUTER_UDP_CSUM, hw->caps);
524
525                 if (rd32(hw, I40E_GLQF_FDEVICTENA(1)) !=
526                     I40E_FDEVICT_PCTYPE_DEFAULT) {
527                         hw_warn(hw, "FD EVICT PCTYPES are not right, disable FD HW EVICT\n");
528                         clear_bit(I40E_HW_CAP_ATR_EVICT, hw->caps);
529                 }
530
531                 if (i40e_is_aq_api_ver_ge(hw, 1,
532                                           I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
533                         set_bit(I40E_HW_CAP_FW_LLDP_STOPPABLE, hw->caps);
534
535                 if (i40e_is_aq_api_ver_ge(hw, 1,
536                                           I40E_MINOR_VER_GET_LINK_INFO_X722))
537                         set_bit(I40E_HW_CAP_AQ_PHY_ACCESS, hw->caps);
538
539                 if (i40e_is_aq_api_ver_ge(hw, 1,
540                                           I40E_MINOR_VER_FW_REQUEST_FEC_X722))
541                         set_bit(I40E_HW_CAP_X722_FEC_REQUEST, hw->caps);
542
543                 fallthrough;
544         default:
545                 break;
546         }
547
548         /* Newer versions of firmware require lock when reading the NVM */
549         if (i40e_is_aq_api_ver_ge(hw, 1, 5))
550                 set_bit(I40E_HW_CAP_NVM_READ_REQUIRES_LOCK, hw->caps);
551
552         /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
553         if (i40e_is_aq_api_ver_ge(hw, 1, 7))
554                 set_bit(I40E_HW_CAP_802_1AD, hw->caps);
555
556         if (i40e_is_aq_api_ver_ge(hw, 1, 8))
557                 set_bit(I40E_HW_CAP_FW_LLDP_PERSISTENT, hw->caps);
558
559         if (i40e_is_aq_api_ver_ge(hw, 1, 9))
560                 set_bit(I40E_HW_CAP_AQ_PHY_ACCESS_EXTENDED, hw->caps);
561 }
562
563 /**
564  *  i40e_init_adminq - main initialization routine for Admin Queue
565  *  @hw: pointer to the hardware structure
566  *
567  *  Prior to calling this function, drivers *MUST* set the following fields
568  *  in the hw->aq structure:
569  *     - hw->aq.num_asq_entries
570  *     - hw->aq.num_arq_entries
571  *     - hw->aq.arq_buf_size
572  *     - hw->aq.asq_buf_size
573  **/
574 int i40e_init_adminq(struct i40e_hw *hw)
575 {
576         u16 cfg_ptr, oem_hi, oem_lo;
577         u16 eetrack_lo, eetrack_hi;
578         int retry = 0;
579         int ret_code;
580
581         /* verify input for valid configuration */
582         if ((hw->aq.num_arq_entries == 0) ||
583             (hw->aq.num_asq_entries == 0) ||
584             (hw->aq.arq_buf_size == 0) ||
585             (hw->aq.asq_buf_size == 0)) {
586                 ret_code = -EIO;
587                 goto init_adminq_exit;
588         }
589
590         /* setup ASQ command write back timeout */
591         hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
592
593         /* allocate the ASQ */
594         ret_code = i40e_init_asq(hw);
595         if (ret_code)
596                 goto init_adminq_destroy_locks;
597
598         /* allocate the ARQ */
599         ret_code = i40e_init_arq(hw);
600         if (ret_code)
601                 goto init_adminq_free_asq;
602
603         /* There are some cases where the firmware may not be quite ready
604          * for AdminQ operations, so we retry the AdminQ setup a few times
605          * if we see timeouts in this first AQ call.
606          */
607         do {
608                 ret_code = i40e_aq_get_firmware_version(hw,
609                                                         &hw->aq.fw_maj_ver,
610                                                         &hw->aq.fw_min_ver,
611                                                         &hw->aq.fw_build,
612                                                         &hw->aq.api_maj_ver,
613                                                         &hw->aq.api_min_ver,
614                                                         NULL);
615                 if (ret_code != -EIO)
616                         break;
617                 retry++;
618                 msleep(100);
619                 i40e_resume_aq(hw);
620         } while (retry < 10);
621         if (ret_code != 0)
622                 goto init_adminq_free_arq;
623
624         /* Some features were introduced in different FW API version
625          * for different MAC type.
626          */
627         i40e_set_hw_caps(hw);
628
629         /* get the NVM version info */
630         i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
631                            &hw->nvm.version);
632         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_LO, &eetrack_lo);
633         i40e_read_nvm_word(hw, I40E_SR_NVM_EETRACK_HI, &eetrack_hi);
634         hw->nvm.eetrack = (eetrack_hi << 16) | eetrack_lo;
635         i40e_read_nvm_word(hw, I40E_SR_BOOT_CONFIG_PTR, &cfg_ptr);
636         i40e_read_nvm_word(hw, (cfg_ptr + I40E_NVM_OEM_VER_OFF),
637                            &oem_hi);
638         i40e_read_nvm_word(hw, (cfg_ptr + (I40E_NVM_OEM_VER_OFF + 1)),
639                            &oem_lo);
640         hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
641
642         if (i40e_is_aq_api_ver_ge(hw, I40E_FW_API_VERSION_MAJOR + 1, 0)) {
643                 ret_code = -EIO;
644                 goto init_adminq_free_arq;
645         }
646
647         /* pre-emptive resource lock release */
648         i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
649         hw->nvm_release_on_done = false;
650         hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
651
652         ret_code = 0;
653
654         /* success! */
655         goto init_adminq_exit;
656
657 init_adminq_free_arq:
658         i40e_shutdown_arq(hw);
659 init_adminq_free_asq:
660         i40e_shutdown_asq(hw);
661 init_adminq_destroy_locks:
662
663 init_adminq_exit:
664         return ret_code;
665 }
666
667 /**
668  *  i40e_shutdown_adminq - shutdown routine for the Admin Queue
669  *  @hw: pointer to the hardware structure
670  **/
671 void i40e_shutdown_adminq(struct i40e_hw *hw)
672 {
673         if (i40e_check_asq_alive(hw))
674                 i40e_aq_queue_shutdown(hw, true);
675
676         i40e_shutdown_asq(hw);
677         i40e_shutdown_arq(hw);
678
679         if (hw->nvm_buff.va)
680                 i40e_free_virt_mem(hw, &hw->nvm_buff);
681 }
682
683 /**
684  *  i40e_clean_asq - cleans Admin send queue
685  *  @hw: pointer to the hardware structure
686  *
687  *  returns the number of free desc
688  **/
689 static u16 i40e_clean_asq(struct i40e_hw *hw)
690 {
691         struct i40e_adminq_ring *asq = &(hw->aq.asq);
692         struct i40e_asq_cmd_details *details;
693         u16 ntc = asq->next_to_clean;
694         struct i40e_aq_desc desc_cb;
695         struct i40e_aq_desc *desc;
696
697         desc = I40E_ADMINQ_DESC(*asq, ntc);
698         details = I40E_ADMINQ_DETAILS(*asq, ntc);
699         while (rd32(hw, I40E_PF_ATQH) != ntc) {
700                 i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
701                            "ntc %d head %d.\n", ntc, rd32(hw, I40E_PF_ATQH));
702
703                 if (details->callback) {
704                         I40E_ADMINQ_CALLBACK cb_func =
705                                         (I40E_ADMINQ_CALLBACK)details->callback;
706                         desc_cb = *desc;
707                         cb_func(hw, &desc_cb);
708                 }
709                 memset(desc, 0, sizeof(*desc));
710                 memset(details, 0, sizeof(*details));
711                 ntc++;
712                 if (ntc == asq->count)
713                         ntc = 0;
714                 desc = I40E_ADMINQ_DESC(*asq, ntc);
715                 details = I40E_ADMINQ_DETAILS(*asq, ntc);
716         }
717
718         asq->next_to_clean = ntc;
719
720         return I40E_DESC_UNUSED(asq);
721 }
722
723 /**
724  *  i40e_asq_done - check if FW has processed the Admin Send Queue
725  *  @hw: pointer to the hw struct
726  *
727  *  Returns true if the firmware has processed all descriptors on the
728  *  admin send queue. Returns false if there are still requests pending.
729  **/
730 static bool i40e_asq_done(struct i40e_hw *hw)
731 {
732         /* AQ designers suggest use of head for better
733          * timing reliability than DD bit
734          */
735         return rd32(hw, I40E_PF_ATQH) == hw->aq.asq.next_to_use;
736
737 }
738
739 /**
740  *  i40e_asq_send_command_atomic_exec - send command to Admin Queue
741  *  @hw: pointer to the hw struct
742  *  @desc: prefilled descriptor describing the command (non DMA mem)
743  *  @buff: buffer to use for indirect commands
744  *  @buff_size: size of buffer for indirect commands
745  *  @cmd_details: pointer to command details structure
746  *  @is_atomic_context: is the function called in an atomic context?
747  *
748  *  This is the main send command driver routine for the Admin Queue send
749  *  queue.  It runs the queue, cleans the queue, etc
750  **/
751 static int
752 i40e_asq_send_command_atomic_exec(struct i40e_hw *hw,
753                                   struct i40e_aq_desc *desc,
754                                   void *buff, /* can be NULL */
755                                   u16  buff_size,
756                                   struct i40e_asq_cmd_details *cmd_details,
757                                   bool is_atomic_context)
758 {
759         struct i40e_dma_mem *dma_buff = NULL;
760         struct i40e_asq_cmd_details *details;
761         struct i40e_aq_desc *desc_on_ring;
762         bool cmd_completed = false;
763         u16  retval = 0;
764         int status = 0;
765         u32  val = 0;
766
767         if (hw->aq.asq.count == 0) {
768                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
769                            "AQTX: Admin queue not initialized.\n");
770                 status = -EIO;
771                 goto asq_send_command_error;
772         }
773
774         hw->aq.asq_last_status = I40E_AQ_RC_OK;
775
776         val = rd32(hw, I40E_PF_ATQH);
777         if (val >= hw->aq.num_asq_entries) {
778                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
779                            "AQTX: head overrun at %d\n", val);
780                 status = -ENOSPC;
781                 goto asq_send_command_error;
782         }
783
784         details = I40E_ADMINQ_DETAILS(hw->aq.asq, hw->aq.asq.next_to_use);
785         if (cmd_details) {
786                 *details = *cmd_details;
787
788                 /* If the cmd_details are defined copy the cookie.  The
789                  * cpu_to_le32 is not needed here because the data is ignored
790                  * by the FW, only used by the driver
791                  */
792                 if (details->cookie) {
793                         desc->cookie_high =
794                                 cpu_to_le32(upper_32_bits(details->cookie));
795                         desc->cookie_low =
796                                 cpu_to_le32(lower_32_bits(details->cookie));
797                 }
798         } else {
799                 memset(details, 0, sizeof(struct i40e_asq_cmd_details));
800         }
801
802         /* clear requested flags and then set additional flags if defined */
803         desc->flags &= ~cpu_to_le16(details->flags_dis);
804         desc->flags |= cpu_to_le16(details->flags_ena);
805
806         if (buff_size > hw->aq.asq_buf_size) {
807                 i40e_debug(hw,
808                            I40E_DEBUG_AQ_MESSAGE,
809                            "AQTX: Invalid buffer size: %d.\n",
810                            buff_size);
811                 status = -EINVAL;
812                 goto asq_send_command_error;
813         }
814
815         if (details->postpone && !details->async) {
816                 i40e_debug(hw,
817                            I40E_DEBUG_AQ_MESSAGE,
818                            "AQTX: Async flag not set along with postpone flag");
819                 status = -EINVAL;
820                 goto asq_send_command_error;
821         }
822
823         /* call clean and check queue available function to reclaim the
824          * descriptors that were processed by FW, the function returns the
825          * number of desc available
826          */
827         /* the clean function called here could be called in a separate thread
828          * in case of asynchronous completions
829          */
830         if (i40e_clean_asq(hw) == 0) {
831                 i40e_debug(hw,
832                            I40E_DEBUG_AQ_MESSAGE,
833                            "AQTX: Error queue is full.\n");
834                 status = -ENOSPC;
835                 goto asq_send_command_error;
836         }
837
838         /* initialize the temp desc pointer with the right desc */
839         desc_on_ring = I40E_ADMINQ_DESC(hw->aq.asq, hw->aq.asq.next_to_use);
840
841         /* if the desc is available copy the temp desc to the right place */
842         *desc_on_ring = *desc;
843
844         /* if buff is not NULL assume indirect command */
845         if (buff != NULL) {
846                 dma_buff = &(hw->aq.asq.r.asq_bi[hw->aq.asq.next_to_use]);
847                 /* copy the user buff into the respective DMA buff */
848                 memcpy(dma_buff->va, buff, buff_size);
849                 desc_on_ring->datalen = cpu_to_le16(buff_size);
850
851                 /* Update the address values in the desc with the pa value
852                  * for respective buffer
853                  */
854                 desc_on_ring->params.external.addr_high =
855                                 cpu_to_le32(upper_32_bits(dma_buff->pa));
856                 desc_on_ring->params.external.addr_low =
857                                 cpu_to_le32(lower_32_bits(dma_buff->pa));
858         }
859
860         /* bump the tail */
861         i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
862         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
863                       buff, buff_size);
864         (hw->aq.asq.next_to_use)++;
865         if (hw->aq.asq.next_to_use == hw->aq.asq.count)
866                 hw->aq.asq.next_to_use = 0;
867         if (!details->postpone)
868                 wr32(hw, I40E_PF_ATQT, hw->aq.asq.next_to_use);
869
870         /* if cmd_details are not defined or async flag is not set,
871          * we need to wait for desc write back
872          */
873         if (!details->async && !details->postpone) {
874                 u32 total_delay = 0;
875
876                 do {
877                         /* AQ designers suggest use of head for better
878                          * timing reliability than DD bit
879                          */
880                         if (i40e_asq_done(hw))
881                                 break;
882
883                         if (is_atomic_context)
884                                 udelay(50);
885                         else
886                                 usleep_range(40, 60);
887
888                         total_delay += 50;
889                 } while (total_delay < hw->aq.asq_cmd_timeout);
890         }
891
892         /* if ready, copy the desc back to temp */
893         if (i40e_asq_done(hw)) {
894                 *desc = *desc_on_ring;
895                 if (buff != NULL)
896                         memcpy(buff, dma_buff->va, buff_size);
897                 retval = le16_to_cpu(desc->retval);
898                 if (retval != 0) {
899                         i40e_debug(hw,
900                                    I40E_DEBUG_AQ_MESSAGE,
901                                    "AQTX: Command completed with error 0x%X.\n",
902                                    retval);
903
904                         /* strip off FW internal code */
905                         retval &= 0xff;
906                 }
907                 cmd_completed = true;
908                 if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
909                         status = 0;
910                 else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
911                         status = -EBUSY;
912                 else
913                         status = -EIO;
914                 hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
915         }
916
917         i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
918                    "AQTX: desc and buffer writeback:\n");
919         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
920
921         /* save writeback aq if requested */
922         if (details->wb_desc)
923                 *details->wb_desc = *desc_on_ring;
924
925         /* update the error if time out occurred */
926         if ((!cmd_completed) &&
927             (!details->async && !details->postpone)) {
928                 if (rd32(hw, I40E_PF_ATQLEN) & I40E_GL_ATQLEN_ATQCRIT_MASK) {
929                         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
930                                    "AQTX: AQ Critical error.\n");
931                         status = -EIO;
932                 } else {
933                         i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
934                                    "AQTX: Writeback timeout.\n");
935                         status = -EIO;
936                 }
937         }
938
939 asq_send_command_error:
940         return status;
941 }
942
943 /**
944  *  i40e_asq_send_command_atomic - send command to Admin Queue
945  *  @hw: pointer to the hw struct
946  *  @desc: prefilled descriptor describing the command (non DMA mem)
947  *  @buff: buffer to use for indirect commands
948  *  @buff_size: size of buffer for indirect commands
949  *  @cmd_details: pointer to command details structure
950  *  @is_atomic_context: is the function called in an atomic context?
951  *
952  *  Acquires the lock and calls the main send command execution
953  *  routine.
954  **/
955 int
956 i40e_asq_send_command_atomic(struct i40e_hw *hw,
957                              struct i40e_aq_desc *desc,
958                              void *buff, /* can be NULL */
959                              u16  buff_size,
960                              struct i40e_asq_cmd_details *cmd_details,
961                              bool is_atomic_context)
962 {
963         int status;
964
965         mutex_lock(&hw->aq.asq_mutex);
966         status = i40e_asq_send_command_atomic_exec(hw, desc, buff, buff_size,
967                                                    cmd_details,
968                                                    is_atomic_context);
969
970         mutex_unlock(&hw->aq.asq_mutex);
971         return status;
972 }
973
974 int
975 i40e_asq_send_command(struct i40e_hw *hw, struct i40e_aq_desc *desc,
976                       void *buff, /* can be NULL */ u16  buff_size,
977                       struct i40e_asq_cmd_details *cmd_details)
978 {
979         return i40e_asq_send_command_atomic(hw, desc, buff, buff_size,
980                                             cmd_details, false);
981 }
982
983 /**
984  *  i40e_asq_send_command_atomic_v2 - send command to Admin Queue
985  *  @hw: pointer to the hw struct
986  *  @desc: prefilled descriptor describing the command (non DMA mem)
987  *  @buff: buffer to use for indirect commands
988  *  @buff_size: size of buffer for indirect commands
989  *  @cmd_details: pointer to command details structure
990  *  @is_atomic_context: is the function called in an atomic context?
991  *  @aq_status: pointer to Admin Queue status return value
992  *
993  *  Acquires the lock and calls the main send command execution
994  *  routine. Returns the last Admin Queue status in aq_status
995  *  to avoid race conditions in access to hw->aq.asq_last_status.
996  **/
997 int
998 i40e_asq_send_command_atomic_v2(struct i40e_hw *hw,
999                                 struct i40e_aq_desc *desc,
1000                                 void *buff, /* can be NULL */
1001                                 u16  buff_size,
1002                                 struct i40e_asq_cmd_details *cmd_details,
1003                                 bool is_atomic_context,
1004                                 enum i40e_admin_queue_err *aq_status)
1005 {
1006         int status;
1007
1008         mutex_lock(&hw->aq.asq_mutex);
1009         status = i40e_asq_send_command_atomic_exec(hw, desc, buff,
1010                                                    buff_size,
1011                                                    cmd_details,
1012                                                    is_atomic_context);
1013         if (aq_status)
1014                 *aq_status = hw->aq.asq_last_status;
1015         mutex_unlock(&hw->aq.asq_mutex);
1016         return status;
1017 }
1018
1019 int
1020 i40e_asq_send_command_v2(struct i40e_hw *hw, struct i40e_aq_desc *desc,
1021                          void *buff, /* can be NULL */ u16  buff_size,
1022                          struct i40e_asq_cmd_details *cmd_details,
1023                          enum i40e_admin_queue_err *aq_status)
1024 {
1025         return i40e_asq_send_command_atomic_v2(hw, desc, buff, buff_size,
1026                                                cmd_details, true, aq_status);
1027 }
1028
1029 /**
1030  *  i40e_fill_default_direct_cmd_desc - AQ descriptor helper function
1031  *  @desc:     pointer to the temp descriptor (non DMA mem)
1032  *  @opcode:   the opcode can be used to decide which flags to turn off or on
1033  *
1034  *  Fill the desc with default values
1035  **/
1036 void i40e_fill_default_direct_cmd_desc(struct i40e_aq_desc *desc,
1037                                        u16 opcode)
1038 {
1039         /* zero out the desc */
1040         memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
1041         desc->opcode = cpu_to_le16(opcode);
1042         desc->flags = cpu_to_le16(I40E_AQ_FLAG_SI);
1043 }
1044
1045 /**
1046  *  i40e_clean_arq_element
1047  *  @hw: pointer to the hw struct
1048  *  @e: event info from the receive descriptor, includes any buffers
1049  *  @pending: number of events that could be left to process
1050  *
1051  *  This function cleans one Admin Receive Queue element and returns
1052  *  the contents through e.  It can also return how many events are
1053  *  left to process through 'pending'
1054  **/
1055 int i40e_clean_arq_element(struct i40e_hw *hw,
1056                            struct i40e_arq_event_info *e,
1057                            u16 *pending)
1058 {
1059         u16 ntc = hw->aq.arq.next_to_clean;
1060         struct i40e_aq_desc *desc;
1061         struct i40e_dma_mem *bi;
1062         int ret_code = 0;
1063         u16 desc_idx;
1064         u16 datalen;
1065         u16 flags;
1066         u16 ntu;
1067
1068         /* pre-clean the event info */
1069         memset(&e->desc, 0, sizeof(e->desc));
1070
1071         /* take the lock before we start messing with the ring */
1072         mutex_lock(&hw->aq.arq_mutex);
1073
1074         if (hw->aq.arq.count == 0) {
1075                 i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
1076                            "AQRX: Admin queue not initialized.\n");
1077                 ret_code = -EIO;
1078                 goto clean_arq_element_err;
1079         }
1080
1081         /* set next_to_use to head */
1082         ntu = rd32(hw, I40E_PF_ARQH) & I40E_PF_ARQH_ARQH_MASK;
1083         if (ntu == ntc) {
1084                 /* nothing to do - shouldn't need to update ring's values */
1085                 ret_code = -EALREADY;
1086                 goto clean_arq_element_out;
1087         }
1088
1089         /* now clean the next descriptor */
1090         desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
1091         desc_idx = ntc;
1092
1093         hw->aq.arq_last_status =
1094                 (enum i40e_admin_queue_err)le16_to_cpu(desc->retval);
1095         flags = le16_to_cpu(desc->flags);
1096         if (flags & I40E_AQ_FLAG_ERR) {
1097                 ret_code = -EIO;
1098                 i40e_debug(hw,
1099                            I40E_DEBUG_AQ_MESSAGE,
1100                            "AQRX: Event received with error 0x%X.\n",
1101                            hw->aq.arq_last_status);
1102         }
1103
1104         e->desc = *desc;
1105         datalen = le16_to_cpu(desc->datalen);
1106         e->msg_len = min(datalen, e->buf_len);
1107         if (e->msg_buf != NULL && (e->msg_len != 0))
1108                 memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
1109                        e->msg_len);
1110
1111         i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
1112         i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
1113                       hw->aq.arq_buf_size);
1114
1115         /* Restore the original datalen and buffer address in the desc,
1116          * FW updates datalen to indicate the event message
1117          * size
1118          */
1119         bi = &hw->aq.arq.r.arq_bi[ntc];
1120         memset((void *)desc, 0, sizeof(struct i40e_aq_desc));
1121
1122         desc->flags = cpu_to_le16(I40E_AQ_FLAG_BUF);
1123         if (hw->aq.arq_buf_size > I40E_AQ_LARGE_BUF)
1124                 desc->flags |= cpu_to_le16(I40E_AQ_FLAG_LB);
1125         desc->datalen = cpu_to_le16((u16)bi->size);
1126         desc->params.external.addr_high = cpu_to_le32(upper_32_bits(bi->pa));
1127         desc->params.external.addr_low = cpu_to_le32(lower_32_bits(bi->pa));
1128
1129         /* set tail = the last cleaned desc index. */
1130         wr32(hw, I40E_PF_ARQT, ntc);
1131         /* ntc is updated to tail + 1 */
1132         ntc++;
1133         if (ntc == hw->aq.num_arq_entries)
1134                 ntc = 0;
1135         hw->aq.arq.next_to_clean = ntc;
1136         hw->aq.arq.next_to_use = ntu;
1137
1138         i40e_nvmupd_check_wait_event(hw, le16_to_cpu(e->desc.opcode), &e->desc);
1139 clean_arq_element_out:
1140         /* Set pending if needed, unlock and return */
1141         if (pending)
1142                 *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
1143 clean_arq_element_err:
1144         mutex_unlock(&hw->aq.arq_mutex);
1145
1146         return ret_code;
1147 }
1148
1149 static void i40e_resume_aq(struct i40e_hw *hw)
1150 {
1151         /* Registers are reset after PF reset */
1152         hw->aq.asq.next_to_use = 0;
1153         hw->aq.asq.next_to_clean = 0;
1154
1155         i40e_config_asq_regs(hw);
1156
1157         hw->aq.arq.next_to_use = 0;
1158         hw->aq.arq.next_to_clean = 0;
1159
1160         i40e_config_arq_regs(hw);
1161 }