GNU Linux-libre 4.4.283-gnu1
[releases.git] / drivers / net / ethernet / intel / i40e / i40e_main.c
1 /*******************************************************************************
2  *
3  * Intel Ethernet Controller XL710 Family Linux Driver
4  * Copyright(c) 2013 - 2015 Intel Corporation.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms and conditions of the GNU General Public License,
8  * version 2, as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope it will be useful, but WITHOUT
11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
13  * more details.
14  *
15  * You should have received a copy of the GNU General Public License along
16  * with this program.  If not, see <http://www.gnu.org/licenses/>.
17  *
18  * The full GNU General Public License is included in this distribution in
19  * the file called "COPYING".
20  *
21  * Contact Information:
22  * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24  *
25  ******************************************************************************/
26
27 /* Local includes */
28 #include "i40e.h"
29 #include "i40e_diag.h"
30 #ifdef CONFIG_I40E_VXLAN
31 #include <net/vxlan.h>
32 #endif
33
34 const char i40e_driver_name[] = "i40e";
35 static const char i40e_driver_string[] =
36                         "Intel(R) Ethernet Connection XL710 Network Driver";
37
38 #define DRV_KERN "-k"
39
40 #define DRV_VERSION_MAJOR 1
41 #define DRV_VERSION_MINOR 3
42 #define DRV_VERSION_BUILD 46
43 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
44              __stringify(DRV_VERSION_MINOR) "." \
45              __stringify(DRV_VERSION_BUILD)    DRV_KERN
46 const char i40e_driver_version_str[] = DRV_VERSION;
47 static const char i40e_copyright[] = "Copyright (c) 2013 - 2014 Intel Corporation.";
48
49 /* a bit of forward declarations */
50 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi);
51 static void i40e_handle_reset_warning(struct i40e_pf *pf);
52 static int i40e_add_vsi(struct i40e_vsi *vsi);
53 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi);
54 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit);
55 static int i40e_setup_misc_vector(struct i40e_pf *pf);
56 static void i40e_determine_queue_usage(struct i40e_pf *pf);
57 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
58 static void i40e_fdir_sb_setup(struct i40e_pf *pf);
59 static int i40e_veb_get_bw_info(struct i40e_veb *veb);
60
61 /* i40e_pci_tbl - PCI Device ID Table
62  *
63  * Last entry must be all 0s
64  *
65  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
66  *   Class, Class Mask, private data (not used) }
67  */
68 static const struct pci_device_id i40e_pci_tbl[] = {
69         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_XL710), 0},
70         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QEMU), 0},
71         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_A), 0},
72         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_B), 0},
73         {PCI_VDEVICE(INTEL, I40E_DEV_ID_KX_C), 0},
74         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_A), 0},
75         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_B), 0},
76         {PCI_VDEVICE(INTEL, I40E_DEV_ID_QSFP_C), 0},
77         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T), 0},
78         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T4), 0},
79         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
80         {PCI_VDEVICE(INTEL, I40E_DEV_ID_SFP_X722), 0},
81         {PCI_VDEVICE(INTEL, I40E_DEV_ID_1G_BASE_T_X722), 0},
82         {PCI_VDEVICE(INTEL, I40E_DEV_ID_10G_BASE_T_X722), 0},
83         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2), 0},
84         {PCI_VDEVICE(INTEL, I40E_DEV_ID_20G_KR2_A), 0},
85         /* required last entry */
86         {0, }
87 };
88 MODULE_DEVICE_TABLE(pci, i40e_pci_tbl);
89
90 #define I40E_MAX_VF_COUNT 128
91 static int debug = -1;
92 module_param(debug, int, 0);
93 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
94
95 MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
96 MODULE_DESCRIPTION("Intel(R) Ethernet Connection XL710 Network Driver");
97 MODULE_LICENSE("GPL");
98 MODULE_VERSION(DRV_VERSION);
99
100 /**
101  * i40e_allocate_dma_mem_d - OS specific memory alloc for shared code
102  * @hw:   pointer to the HW structure
103  * @mem:  ptr to mem struct to fill out
104  * @size: size of memory requested
105  * @alignment: what to align the allocation to
106  **/
107 int i40e_allocate_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem,
108                             u64 size, u32 alignment)
109 {
110         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
111
112         mem->size = ALIGN(size, alignment);
113         mem->va = dma_zalloc_coherent(&pf->pdev->dev, mem->size,
114                                       &mem->pa, GFP_KERNEL);
115         if (!mem->va)
116                 return -ENOMEM;
117
118         return 0;
119 }
120
121 /**
122  * i40e_free_dma_mem_d - OS specific memory free for shared code
123  * @hw:   pointer to the HW structure
124  * @mem:  ptr to mem struct to free
125  **/
126 int i40e_free_dma_mem_d(struct i40e_hw *hw, struct i40e_dma_mem *mem)
127 {
128         struct i40e_pf *pf = (struct i40e_pf *)hw->back;
129
130         dma_free_coherent(&pf->pdev->dev, mem->size, mem->va, mem->pa);
131         mem->va = NULL;
132         mem->pa = 0;
133         mem->size = 0;
134
135         return 0;
136 }
137
138 /**
139  * i40e_allocate_virt_mem_d - OS specific memory alloc for shared code
140  * @hw:   pointer to the HW structure
141  * @mem:  ptr to mem struct to fill out
142  * @size: size of memory requested
143  **/
144 int i40e_allocate_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem,
145                              u32 size)
146 {
147         mem->size = size;
148         mem->va = kzalloc(size, GFP_KERNEL);
149
150         if (!mem->va)
151                 return -ENOMEM;
152
153         return 0;
154 }
155
156 /**
157  * i40e_free_virt_mem_d - OS specific memory free for shared code
158  * @hw:   pointer to the HW structure
159  * @mem:  ptr to mem struct to free
160  **/
161 int i40e_free_virt_mem_d(struct i40e_hw *hw, struct i40e_virt_mem *mem)
162 {
163         /* it's ok to kfree a NULL pointer */
164         kfree(mem->va);
165         mem->va = NULL;
166         mem->size = 0;
167
168         return 0;
169 }
170
171 /**
172  * i40e_get_lump - find a lump of free generic resource
173  * @pf: board private structure
174  * @pile: the pile of resource to search
175  * @needed: the number of items needed
176  * @id: an owner id to stick on the items assigned
177  *
178  * Returns the base item index of the lump, or negative for error
179  *
180  * The search_hint trick and lack of advanced fit-finding only work
181  * because we're highly likely to have all the same size lump requests.
182  * Linear search time and any fragmentation should be minimal.
183  **/
184 static int i40e_get_lump(struct i40e_pf *pf, struct i40e_lump_tracking *pile,
185                          u16 needed, u16 id)
186 {
187         int ret = -ENOMEM;
188         int i, j;
189
190         if (!pile || needed == 0 || id >= I40E_PILE_VALID_BIT) {
191                 dev_info(&pf->pdev->dev,
192                          "param err: pile=%p needed=%d id=0x%04x\n",
193                          pile, needed, id);
194                 return -EINVAL;
195         }
196
197         /* start the linear search with an imperfect hint */
198         i = pile->search_hint;
199         while (i < pile->num_entries) {
200                 /* skip already allocated entries */
201                 if (pile->list[i] & I40E_PILE_VALID_BIT) {
202                         i++;
203                         continue;
204                 }
205
206                 /* do we have enough in this lump? */
207                 for (j = 0; (j < needed) && ((i+j) < pile->num_entries); j++) {
208                         if (pile->list[i+j] & I40E_PILE_VALID_BIT)
209                                 break;
210                 }
211
212                 if (j == needed) {
213                         /* there was enough, so assign it to the requestor */
214                         for (j = 0; j < needed; j++)
215                                 pile->list[i+j] = id | I40E_PILE_VALID_BIT;
216                         ret = i;
217                         pile->search_hint = i + j;
218                         break;
219                 }
220
221                 /* not enough, so skip over it and continue looking */
222                 i += j;
223         }
224
225         return ret;
226 }
227
228 /**
229  * i40e_put_lump - return a lump of generic resource
230  * @pile: the pile of resource to search
231  * @index: the base item index
232  * @id: the owner id of the items assigned
233  *
234  * Returns the count of items in the lump
235  **/
236 static int i40e_put_lump(struct i40e_lump_tracking *pile, u16 index, u16 id)
237 {
238         int valid_id = (id | I40E_PILE_VALID_BIT);
239         int count = 0;
240         int i;
241
242         if (!pile || index >= pile->num_entries)
243                 return -EINVAL;
244
245         for (i = index;
246              i < pile->num_entries && pile->list[i] == valid_id;
247              i++) {
248                 pile->list[i] = 0;
249                 count++;
250         }
251
252         if (count && index < pile->search_hint)
253                 pile->search_hint = index;
254
255         return count;
256 }
257
258 /**
259  * i40e_find_vsi_from_id - searches for the vsi with the given id
260  * @pf - the pf structure to search for the vsi
261  * @id - id of the vsi it is searching for
262  **/
263 struct i40e_vsi *i40e_find_vsi_from_id(struct i40e_pf *pf, u16 id)
264 {
265         int i;
266
267         for (i = 0; i < pf->num_alloc_vsi; i++)
268                 if (pf->vsi[i] && (pf->vsi[i]->id == id))
269                         return pf->vsi[i];
270
271         return NULL;
272 }
273
274 /**
275  * i40e_service_event_schedule - Schedule the service task to wake up
276  * @pf: board private structure
277  *
278  * If not already scheduled, this puts the task into the work queue
279  **/
280 static void i40e_service_event_schedule(struct i40e_pf *pf)
281 {
282         if (!test_bit(__I40E_DOWN, &pf->state) &&
283             !test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) &&
284             !test_and_set_bit(__I40E_SERVICE_SCHED, &pf->state))
285                 schedule_work(&pf->service_task);
286 }
287
288 /**
289  * i40e_tx_timeout - Respond to a Tx Hang
290  * @netdev: network interface device structure
291  *
292  * If any port has noticed a Tx timeout, it is likely that the whole
293  * device is munged, not just the one netdev port, so go for the full
294  * reset.
295  **/
296 #ifdef I40E_FCOE
297 void i40e_tx_timeout(struct net_device *netdev)
298 #else
299 static void i40e_tx_timeout(struct net_device *netdev)
300 #endif
301 {
302         struct i40e_netdev_priv *np = netdev_priv(netdev);
303         struct i40e_vsi *vsi = np->vsi;
304         struct i40e_pf *pf = vsi->back;
305         struct i40e_ring *tx_ring = NULL;
306         unsigned int i, hung_queue = 0;
307         u32 head, val;
308
309         pf->tx_timeout_count++;
310
311         /* find the stopped queue the same way the stack does */
312         for (i = 0; i < netdev->num_tx_queues; i++) {
313                 struct netdev_queue *q;
314                 unsigned long trans_start;
315
316                 q = netdev_get_tx_queue(netdev, i);
317                 trans_start = q->trans_start ? : netdev->trans_start;
318                 if (netif_xmit_stopped(q) &&
319                     time_after(jiffies,
320                                (trans_start + netdev->watchdog_timeo))) {
321                         hung_queue = i;
322                         break;
323                 }
324         }
325
326         if (i == netdev->num_tx_queues) {
327                 netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
328         } else {
329                 /* now that we have an index, find the tx_ring struct */
330                 for (i = 0; i < vsi->num_queue_pairs; i++) {
331                         if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
332                                 if (hung_queue ==
333                                     vsi->tx_rings[i]->queue_index) {
334                                         tx_ring = vsi->tx_rings[i];
335                                         break;
336                                 }
337                         }
338                 }
339         }
340
341         if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
342                 pf->tx_timeout_recovery_level = 1;  /* reset after some time */
343         else if (time_before(jiffies,
344                       (pf->tx_timeout_last_recovery + netdev->watchdog_timeo)))
345                 return;   /* don't do any new action before the next timeout */
346
347         if (tx_ring) {
348                 head = i40e_get_head(tx_ring);
349                 /* Read interrupt register */
350                 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
351                         val = rd32(&pf->hw,
352                              I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
353                                                 tx_ring->vsi->base_vector - 1));
354                 else
355                         val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
356
357                 netdev_info(netdev, "tx_timeout: VSI_seid: %d, Q %d, NTC: 0x%x, HWB: 0x%x, NTU: 0x%x, TAIL: 0x%x, INT: 0x%x\n",
358                             vsi->seid, hung_queue, tx_ring->next_to_clean,
359                             head, tx_ring->next_to_use,
360                             readl(tx_ring->tail), val);
361         }
362
363         pf->tx_timeout_last_recovery = jiffies;
364         netdev_info(netdev, "tx_timeout recovery level %d, hung_queue %d\n",
365                     pf->tx_timeout_recovery_level, hung_queue);
366
367         switch (pf->tx_timeout_recovery_level) {
368         case 1:
369                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
370                 break;
371         case 2:
372                 set_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
373                 break;
374         case 3:
375                 set_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
376                 break;
377         default:
378                 netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
379                 break;
380         }
381
382         i40e_service_event_schedule(pf);
383         pf->tx_timeout_recovery_level++;
384 }
385
386 /**
387  * i40e_release_rx_desc - Store the new tail and head values
388  * @rx_ring: ring to bump
389  * @val: new head index
390  **/
391 static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
392 {
393         rx_ring->next_to_use = val;
394
395         /* Force memory writes to complete before letting h/w
396          * know there are new descriptors to fetch.  (Only
397          * applicable for weak-ordered memory model archs,
398          * such as IA-64).
399          */
400         wmb();
401         writel(val, rx_ring->tail);
402 }
403
404 /**
405  * i40e_get_vsi_stats_struct - Get System Network Statistics
406  * @vsi: the VSI we care about
407  *
408  * Returns the address of the device statistics structure.
409  * The statistics are actually updated from the service task.
410  **/
411 struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
412 {
413         return &vsi->net_stats;
414 }
415
416 /**
417  * i40e_get_netdev_stats_struct - Get statistics for netdev interface
418  * @netdev: network interface device structure
419  *
420  * Returns the address of the device statistics structure.
421  * The statistics are actually updated from the service task.
422  **/
423 #ifdef I40E_FCOE
424 struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
425                                              struct net_device *netdev,
426                                              struct rtnl_link_stats64 *stats)
427 #else
428 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
429                                              struct net_device *netdev,
430                                              struct rtnl_link_stats64 *stats)
431 #endif
432 {
433         struct i40e_netdev_priv *np = netdev_priv(netdev);
434         struct i40e_ring *tx_ring, *rx_ring;
435         struct i40e_vsi *vsi = np->vsi;
436         struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
437         int i;
438
439         if (test_bit(__I40E_DOWN, &vsi->state))
440                 return stats;
441
442         if (!vsi->tx_rings)
443                 return stats;
444
445         rcu_read_lock();
446         for (i = 0; i < vsi->num_queue_pairs; i++) {
447                 u64 bytes, packets;
448                 unsigned int start;
449
450                 tx_ring = ACCESS_ONCE(vsi->tx_rings[i]);
451                 if (!tx_ring)
452                         continue;
453
454                 do {
455                         start = u64_stats_fetch_begin_irq(&tx_ring->syncp);
456                         packets = tx_ring->stats.packets;
457                         bytes   = tx_ring->stats.bytes;
458                 } while (u64_stats_fetch_retry_irq(&tx_ring->syncp, start));
459
460                 stats->tx_packets += packets;
461                 stats->tx_bytes   += bytes;
462                 rx_ring = &tx_ring[1];
463
464                 do {
465                         start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
466                         packets = rx_ring->stats.packets;
467                         bytes   = rx_ring->stats.bytes;
468                 } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
469
470                 stats->rx_packets += packets;
471                 stats->rx_bytes   += bytes;
472         }
473         rcu_read_unlock();
474
475         /* following stats updated by i40e_watchdog_subtask() */
476         stats->multicast        = vsi_stats->multicast;
477         stats->tx_errors        = vsi_stats->tx_errors;
478         stats->tx_dropped       = vsi_stats->tx_dropped;
479         stats->rx_errors        = vsi_stats->rx_errors;
480         stats->rx_dropped       = vsi_stats->rx_dropped;
481         stats->rx_crc_errors    = vsi_stats->rx_crc_errors;
482         stats->rx_length_errors = vsi_stats->rx_length_errors;
483
484         return stats;
485 }
486
487 /**
488  * i40e_vsi_reset_stats - Resets all stats of the given vsi
489  * @vsi: the VSI to have its stats reset
490  **/
491 void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
492 {
493         struct rtnl_link_stats64 *ns;
494         int i;
495
496         if (!vsi)
497                 return;
498
499         ns = i40e_get_vsi_stats_struct(vsi);
500         memset(ns, 0, sizeof(*ns));
501         memset(&vsi->net_stats_offsets, 0, sizeof(vsi->net_stats_offsets));
502         memset(&vsi->eth_stats, 0, sizeof(vsi->eth_stats));
503         memset(&vsi->eth_stats_offsets, 0, sizeof(vsi->eth_stats_offsets));
504         if (vsi->rx_rings && vsi->rx_rings[0]) {
505                 for (i = 0; i < vsi->num_queue_pairs; i++) {
506                         memset(&vsi->rx_rings[i]->stats, 0,
507                                sizeof(vsi->rx_rings[i]->stats));
508                         memset(&vsi->rx_rings[i]->rx_stats, 0,
509                                sizeof(vsi->rx_rings[i]->rx_stats));
510                         memset(&vsi->tx_rings[i]->stats, 0,
511                                sizeof(vsi->tx_rings[i]->stats));
512                         memset(&vsi->tx_rings[i]->tx_stats, 0,
513                                sizeof(vsi->tx_rings[i]->tx_stats));
514                 }
515         }
516         vsi->stat_offsets_loaded = false;
517 }
518
519 /**
520  * i40e_pf_reset_stats - Reset all of the stats for the given PF
521  * @pf: the PF to be reset
522  **/
523 void i40e_pf_reset_stats(struct i40e_pf *pf)
524 {
525         int i;
526
527         memset(&pf->stats, 0, sizeof(pf->stats));
528         memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
529         pf->stat_offsets_loaded = false;
530
531         for (i = 0; i < I40E_MAX_VEB; i++) {
532                 if (pf->veb[i]) {
533                         memset(&pf->veb[i]->stats, 0,
534                                sizeof(pf->veb[i]->stats));
535                         memset(&pf->veb[i]->stats_offsets, 0,
536                                sizeof(pf->veb[i]->stats_offsets));
537                         pf->veb[i]->stat_offsets_loaded = false;
538                 }
539         }
540 }
541
542 /**
543  * i40e_stat_update48 - read and update a 48 bit stat from the chip
544  * @hw: ptr to the hardware info
545  * @hireg: the high 32 bit reg to read
546  * @loreg: the low 32 bit reg to read
547  * @offset_loaded: has the initial offset been loaded yet
548  * @offset: ptr to current offset value
549  * @stat: ptr to the stat
550  *
551  * Since the device stats are not reset at PFReset, they likely will not
552  * be zeroed when the driver starts.  We'll save the first values read
553  * and use them as offsets to be subtracted from the raw values in order
554  * to report stats that count from zero.  In the process, we also manage
555  * the potential roll-over.
556  **/
557 static void i40e_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
558                                bool offset_loaded, u64 *offset, u64 *stat)
559 {
560         u64 new_data;
561
562         if (hw->device_id == I40E_DEV_ID_QEMU) {
563                 new_data = rd32(hw, loreg);
564                 new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
565         } else {
566                 new_data = rd64(hw, loreg);
567         }
568         if (!offset_loaded)
569                 *offset = new_data;
570         if (likely(new_data >= *offset))
571                 *stat = new_data - *offset;
572         else
573                 *stat = (new_data + BIT_ULL(48)) - *offset;
574         *stat &= 0xFFFFFFFFFFFFULL;
575 }
576
577 /**
578  * i40e_stat_update32 - read and update a 32 bit stat from the chip
579  * @hw: ptr to the hardware info
580  * @reg: the hw reg to read
581  * @offset_loaded: has the initial offset been loaded yet
582  * @offset: ptr to current offset value
583  * @stat: ptr to the stat
584  **/
585 static void i40e_stat_update32(struct i40e_hw *hw, u32 reg,
586                                bool offset_loaded, u64 *offset, u64 *stat)
587 {
588         u32 new_data;
589
590         new_data = rd32(hw, reg);
591         if (!offset_loaded)
592                 *offset = new_data;
593         if (likely(new_data >= *offset))
594                 *stat = (u32)(new_data - *offset);
595         else
596                 *stat = (u32)((new_data + BIT_ULL(32)) - *offset);
597 }
598
599 /**
600  * i40e_update_eth_stats - Update VSI-specific ethernet statistics counters.
601  * @vsi: the VSI to be updated
602  **/
603 void i40e_update_eth_stats(struct i40e_vsi *vsi)
604 {
605         int stat_idx = le16_to_cpu(vsi->info.stat_counter_idx);
606         struct i40e_pf *pf = vsi->back;
607         struct i40e_hw *hw = &pf->hw;
608         struct i40e_eth_stats *oes;
609         struct i40e_eth_stats *es;     /* device's eth stats */
610
611         es = &vsi->eth_stats;
612         oes = &vsi->eth_stats_offsets;
613
614         /* Gather up the stats that the hw collects */
615         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
616                            vsi->stat_offsets_loaded,
617                            &oes->tx_errors, &es->tx_errors);
618         i40e_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
619                            vsi->stat_offsets_loaded,
620                            &oes->rx_discards, &es->rx_discards);
621         i40e_stat_update32(hw, I40E_GLV_RUPP(stat_idx),
622                            vsi->stat_offsets_loaded,
623                            &oes->rx_unknown_protocol, &es->rx_unknown_protocol);
624         i40e_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
625                            vsi->stat_offsets_loaded,
626                            &oes->tx_errors, &es->tx_errors);
627
628         i40e_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
629                            I40E_GLV_GORCL(stat_idx),
630                            vsi->stat_offsets_loaded,
631                            &oes->rx_bytes, &es->rx_bytes);
632         i40e_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
633                            I40E_GLV_UPRCL(stat_idx),
634                            vsi->stat_offsets_loaded,
635                            &oes->rx_unicast, &es->rx_unicast);
636         i40e_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
637                            I40E_GLV_MPRCL(stat_idx),
638                            vsi->stat_offsets_loaded,
639                            &oes->rx_multicast, &es->rx_multicast);
640         i40e_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
641                            I40E_GLV_BPRCL(stat_idx),
642                            vsi->stat_offsets_loaded,
643                            &oes->rx_broadcast, &es->rx_broadcast);
644
645         i40e_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
646                            I40E_GLV_GOTCL(stat_idx),
647                            vsi->stat_offsets_loaded,
648                            &oes->tx_bytes, &es->tx_bytes);
649         i40e_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
650                            I40E_GLV_UPTCL(stat_idx),
651                            vsi->stat_offsets_loaded,
652                            &oes->tx_unicast, &es->tx_unicast);
653         i40e_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
654                            I40E_GLV_MPTCL(stat_idx),
655                            vsi->stat_offsets_loaded,
656                            &oes->tx_multicast, &es->tx_multicast);
657         i40e_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
658                            I40E_GLV_BPTCL(stat_idx),
659                            vsi->stat_offsets_loaded,
660                            &oes->tx_broadcast, &es->tx_broadcast);
661         vsi->stat_offsets_loaded = true;
662 }
663
664 /**
665  * i40e_update_veb_stats - Update Switch component statistics
666  * @veb: the VEB being updated
667  **/
668 static void i40e_update_veb_stats(struct i40e_veb *veb)
669 {
670         struct i40e_pf *pf = veb->pf;
671         struct i40e_hw *hw = &pf->hw;
672         struct i40e_eth_stats *oes;
673         struct i40e_eth_stats *es;     /* device's eth stats */
674         struct i40e_veb_tc_stats *veb_oes;
675         struct i40e_veb_tc_stats *veb_es;
676         int i, idx = 0;
677
678         idx = veb->stats_idx;
679         es = &veb->stats;
680         oes = &veb->stats_offsets;
681         veb_es = &veb->tc_stats;
682         veb_oes = &veb->tc_stats_offsets;
683
684         /* Gather up the stats that the hw collects */
685         i40e_stat_update32(hw, I40E_GLSW_TDPC(idx),
686                            veb->stat_offsets_loaded,
687                            &oes->tx_discards, &es->tx_discards);
688         if (hw->revision_id > 0)
689                 i40e_stat_update32(hw, I40E_GLSW_RUPP(idx),
690                                    veb->stat_offsets_loaded,
691                                    &oes->rx_unknown_protocol,
692                                    &es->rx_unknown_protocol);
693         i40e_stat_update48(hw, I40E_GLSW_GORCH(idx), I40E_GLSW_GORCL(idx),
694                            veb->stat_offsets_loaded,
695                            &oes->rx_bytes, &es->rx_bytes);
696         i40e_stat_update48(hw, I40E_GLSW_UPRCH(idx), I40E_GLSW_UPRCL(idx),
697                            veb->stat_offsets_loaded,
698                            &oes->rx_unicast, &es->rx_unicast);
699         i40e_stat_update48(hw, I40E_GLSW_MPRCH(idx), I40E_GLSW_MPRCL(idx),
700                            veb->stat_offsets_loaded,
701                            &oes->rx_multicast, &es->rx_multicast);
702         i40e_stat_update48(hw, I40E_GLSW_BPRCH(idx), I40E_GLSW_BPRCL(idx),
703                            veb->stat_offsets_loaded,
704                            &oes->rx_broadcast, &es->rx_broadcast);
705
706         i40e_stat_update48(hw, I40E_GLSW_GOTCH(idx), I40E_GLSW_GOTCL(idx),
707                            veb->stat_offsets_loaded,
708                            &oes->tx_bytes, &es->tx_bytes);
709         i40e_stat_update48(hw, I40E_GLSW_UPTCH(idx), I40E_GLSW_UPTCL(idx),
710                            veb->stat_offsets_loaded,
711                            &oes->tx_unicast, &es->tx_unicast);
712         i40e_stat_update48(hw, I40E_GLSW_MPTCH(idx), I40E_GLSW_MPTCL(idx),
713                            veb->stat_offsets_loaded,
714                            &oes->tx_multicast, &es->tx_multicast);
715         i40e_stat_update48(hw, I40E_GLSW_BPTCH(idx), I40E_GLSW_BPTCL(idx),
716                            veb->stat_offsets_loaded,
717                            &oes->tx_broadcast, &es->tx_broadcast);
718         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
719                 i40e_stat_update48(hw, I40E_GLVEBTC_RPCH(i, idx),
720                                    I40E_GLVEBTC_RPCL(i, idx),
721                                    veb->stat_offsets_loaded,
722                                    &veb_oes->tc_rx_packets[i],
723                                    &veb_es->tc_rx_packets[i]);
724                 i40e_stat_update48(hw, I40E_GLVEBTC_RBCH(i, idx),
725                                    I40E_GLVEBTC_RBCL(i, idx),
726                                    veb->stat_offsets_loaded,
727                                    &veb_oes->tc_rx_bytes[i],
728                                    &veb_es->tc_rx_bytes[i]);
729                 i40e_stat_update48(hw, I40E_GLVEBTC_TPCH(i, idx),
730                                    I40E_GLVEBTC_TPCL(i, idx),
731                                    veb->stat_offsets_loaded,
732                                    &veb_oes->tc_tx_packets[i],
733                                    &veb_es->tc_tx_packets[i]);
734                 i40e_stat_update48(hw, I40E_GLVEBTC_TBCH(i, idx),
735                                    I40E_GLVEBTC_TBCL(i, idx),
736                                    veb->stat_offsets_loaded,
737                                    &veb_oes->tc_tx_bytes[i],
738                                    &veb_es->tc_tx_bytes[i]);
739         }
740         veb->stat_offsets_loaded = true;
741 }
742
743 #ifdef I40E_FCOE
744 /**
745  * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
746  * @vsi: the VSI that is capable of doing FCoE
747  **/
748 static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
749 {
750         struct i40e_pf *pf = vsi->back;
751         struct i40e_hw *hw = &pf->hw;
752         struct i40e_fcoe_stats *ofs;
753         struct i40e_fcoe_stats *fs;     /* device's eth stats */
754         int idx;
755
756         if (vsi->type != I40E_VSI_FCOE)
757                 return;
758
759         idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
760         fs = &vsi->fcoe_stats;
761         ofs = &vsi->fcoe_stats_offsets;
762
763         i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
764                            vsi->fcoe_stat_offsets_loaded,
765                            &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
766         i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
767                            vsi->fcoe_stat_offsets_loaded,
768                            &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
769         i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
770                            vsi->fcoe_stat_offsets_loaded,
771                            &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
772         i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
773                            vsi->fcoe_stat_offsets_loaded,
774                            &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
775         i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
776                            vsi->fcoe_stat_offsets_loaded,
777                            &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
778         i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
779                            vsi->fcoe_stat_offsets_loaded,
780                            &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
781         i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
782                            vsi->fcoe_stat_offsets_loaded,
783                            &ofs->fcoe_last_error, &fs->fcoe_last_error);
784         i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
785                            vsi->fcoe_stat_offsets_loaded,
786                            &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
787
788         vsi->fcoe_stat_offsets_loaded = true;
789 }
790
791 #endif
792 /**
793  * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
794  * @pf: the corresponding PF
795  *
796  * Update the Rx XOFF counter (PAUSE frames) in link flow control mode
797  **/
798 static void i40e_update_link_xoff_rx(struct i40e_pf *pf)
799 {
800         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
801         struct i40e_hw_port_stats *nsd = &pf->stats;
802         struct i40e_hw *hw = &pf->hw;
803         u64 xoff = 0;
804
805         if ((hw->fc.current_mode != I40E_FC_FULL) &&
806             (hw->fc.current_mode != I40E_FC_RX_PAUSE))
807                 return;
808
809         xoff = nsd->link_xoff_rx;
810         i40e_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
811                            pf->stat_offsets_loaded,
812                            &osd->link_xoff_rx, &nsd->link_xoff_rx);
813
814         /* No new LFC xoff rx */
815         if (!(nsd->link_xoff_rx - xoff))
816                 return;
817
818 }
819
820 /**
821  * i40e_update_prio_xoff_rx - Update XOFF received in PFC mode
822  * @pf: the corresponding PF
823  *
824  * Update the Rx XOFF counter (PAUSE frames) in PFC mode
825  **/
826 static void i40e_update_prio_xoff_rx(struct i40e_pf *pf)
827 {
828         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
829         struct i40e_hw_port_stats *nsd = &pf->stats;
830         bool xoff[I40E_MAX_TRAFFIC_CLASS] = {false};
831         struct i40e_dcbx_config *dcb_cfg;
832         struct i40e_hw *hw = &pf->hw;
833         u16 i;
834         u8 tc;
835
836         dcb_cfg = &hw->local_dcbx_config;
837
838         /* Collect Link XOFF stats when PFC is disabled */
839         if (!dcb_cfg->pfc.pfcenable) {
840                 i40e_update_link_xoff_rx(pf);
841                 return;
842         }
843
844         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
845                 u64 prio_xoff = nsd->priority_xoff_rx[i];
846
847                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFRXC(hw->port, i),
848                                    pf->stat_offsets_loaded,
849                                    &osd->priority_xoff_rx[i],
850                                    &nsd->priority_xoff_rx[i]);
851
852                 /* No new PFC xoff rx */
853                 if (!(nsd->priority_xoff_rx[i] - prio_xoff))
854                         continue;
855                 /* Get the TC for given priority */
856                 tc = dcb_cfg->etscfg.prioritytable[i];
857                 xoff[tc] = true;
858         }
859 }
860
861 /**
862  * i40e_update_vsi_stats - Update the vsi statistics counters.
863  * @vsi: the VSI to be updated
864  *
865  * There are a few instances where we store the same stat in a
866  * couple of different structs.  This is partly because we have
867  * the netdev stats that need to be filled out, which is slightly
868  * different from the "eth_stats" defined by the chip and used in
869  * VF communications.  We sort it out here.
870  **/
871 static void i40e_update_vsi_stats(struct i40e_vsi *vsi)
872 {
873         struct i40e_pf *pf = vsi->back;
874         struct rtnl_link_stats64 *ons;
875         struct rtnl_link_stats64 *ns;   /* netdev stats */
876         struct i40e_eth_stats *oes;
877         struct i40e_eth_stats *es;     /* device's eth stats */
878         u32 tx_restart, tx_busy;
879         struct i40e_ring *p;
880         u32 rx_page, rx_buf;
881         u64 bytes, packets;
882         unsigned int start;
883         u64 tx_linearize;
884         u64 rx_p, rx_b;
885         u64 tx_p, tx_b;
886         u16 q;
887
888         if (test_bit(__I40E_DOWN, &vsi->state) ||
889             test_bit(__I40E_CONFIG_BUSY, &pf->state))
890                 return;
891
892         ns = i40e_get_vsi_stats_struct(vsi);
893         ons = &vsi->net_stats_offsets;
894         es = &vsi->eth_stats;
895         oes = &vsi->eth_stats_offsets;
896
897         /* Gather up the netdev and vsi stats that the driver collects
898          * on the fly during packet processing
899          */
900         rx_b = rx_p = 0;
901         tx_b = tx_p = 0;
902         tx_restart = tx_busy = tx_linearize = 0;
903         rx_page = 0;
904         rx_buf = 0;
905         rcu_read_lock();
906         for (q = 0; q < vsi->num_queue_pairs; q++) {
907                 /* locate Tx ring */
908                 p = ACCESS_ONCE(vsi->tx_rings[q]);
909
910                 do {
911                         start = u64_stats_fetch_begin_irq(&p->syncp);
912                         packets = p->stats.packets;
913                         bytes = p->stats.bytes;
914                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
915                 tx_b += bytes;
916                 tx_p += packets;
917                 tx_restart += p->tx_stats.restart_queue;
918                 tx_busy += p->tx_stats.tx_busy;
919                 tx_linearize += p->tx_stats.tx_linearize;
920
921                 /* Rx queue is part of the same block as Tx queue */
922                 p = &p[1];
923                 do {
924                         start = u64_stats_fetch_begin_irq(&p->syncp);
925                         packets = p->stats.packets;
926                         bytes = p->stats.bytes;
927                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
928                 rx_b += bytes;
929                 rx_p += packets;
930                 rx_buf += p->rx_stats.alloc_buff_failed;
931                 rx_page += p->rx_stats.alloc_page_failed;
932         }
933         rcu_read_unlock();
934         vsi->tx_restart = tx_restart;
935         vsi->tx_busy = tx_busy;
936         vsi->tx_linearize = tx_linearize;
937         vsi->rx_page_failed = rx_page;
938         vsi->rx_buf_failed = rx_buf;
939
940         ns->rx_packets = rx_p;
941         ns->rx_bytes = rx_b;
942         ns->tx_packets = tx_p;
943         ns->tx_bytes = tx_b;
944
945         /* update netdev stats from eth stats */
946         i40e_update_eth_stats(vsi);
947         ons->tx_errors = oes->tx_errors;
948         ns->tx_errors = es->tx_errors;
949         ons->multicast = oes->rx_multicast;
950         ns->multicast = es->rx_multicast;
951         ons->rx_dropped = oes->rx_discards;
952         ns->rx_dropped = es->rx_discards;
953         ons->tx_dropped = oes->tx_discards;
954         ns->tx_dropped = es->tx_discards;
955
956         /* pull in a couple PF stats if this is the main vsi */
957         if (vsi == pf->vsi[pf->lan_vsi]) {
958                 ns->rx_crc_errors = pf->stats.crc_errors;
959                 ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes;
960                 ns->rx_length_errors = pf->stats.rx_length_errors;
961         }
962 }
963
964 /**
965  * i40e_update_pf_stats - Update the PF statistics counters.
966  * @pf: the PF to be updated
967  **/
968 static void i40e_update_pf_stats(struct i40e_pf *pf)
969 {
970         struct i40e_hw_port_stats *osd = &pf->stats_offsets;
971         struct i40e_hw_port_stats *nsd = &pf->stats;
972         struct i40e_hw *hw = &pf->hw;
973         u32 val;
974         int i;
975
976         i40e_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
977                            I40E_GLPRT_GORCL(hw->port),
978                            pf->stat_offsets_loaded,
979                            &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
980         i40e_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
981                            I40E_GLPRT_GOTCL(hw->port),
982                            pf->stat_offsets_loaded,
983                            &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
984         i40e_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
985                            pf->stat_offsets_loaded,
986                            &osd->eth.rx_discards,
987                            &nsd->eth.rx_discards);
988         i40e_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
989                            I40E_GLPRT_UPRCL(hw->port),
990                            pf->stat_offsets_loaded,
991                            &osd->eth.rx_unicast,
992                            &nsd->eth.rx_unicast);
993         i40e_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
994                            I40E_GLPRT_MPRCL(hw->port),
995                            pf->stat_offsets_loaded,
996                            &osd->eth.rx_multicast,
997                            &nsd->eth.rx_multicast);
998         i40e_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
999                            I40E_GLPRT_BPRCL(hw->port),
1000                            pf->stat_offsets_loaded,
1001                            &osd->eth.rx_broadcast,
1002                            &nsd->eth.rx_broadcast);
1003         i40e_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
1004                            I40E_GLPRT_UPTCL(hw->port),
1005                            pf->stat_offsets_loaded,
1006                            &osd->eth.tx_unicast,
1007                            &nsd->eth.tx_unicast);
1008         i40e_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
1009                            I40E_GLPRT_MPTCL(hw->port),
1010                            pf->stat_offsets_loaded,
1011                            &osd->eth.tx_multicast,
1012                            &nsd->eth.tx_multicast);
1013         i40e_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
1014                            I40E_GLPRT_BPTCL(hw->port),
1015                            pf->stat_offsets_loaded,
1016                            &osd->eth.tx_broadcast,
1017                            &nsd->eth.tx_broadcast);
1018
1019         i40e_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
1020                            pf->stat_offsets_loaded,
1021                            &osd->tx_dropped_link_down,
1022                            &nsd->tx_dropped_link_down);
1023
1024         i40e_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
1025                            pf->stat_offsets_loaded,
1026                            &osd->crc_errors, &nsd->crc_errors);
1027
1028         i40e_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
1029                            pf->stat_offsets_loaded,
1030                            &osd->illegal_bytes, &nsd->illegal_bytes);
1031
1032         i40e_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
1033                            pf->stat_offsets_loaded,
1034                            &osd->mac_local_faults,
1035                            &nsd->mac_local_faults);
1036         i40e_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
1037                            pf->stat_offsets_loaded,
1038                            &osd->mac_remote_faults,
1039                            &nsd->mac_remote_faults);
1040
1041         i40e_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
1042                            pf->stat_offsets_loaded,
1043                            &osd->rx_length_errors,
1044                            &nsd->rx_length_errors);
1045
1046         i40e_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
1047                            pf->stat_offsets_loaded,
1048                            &osd->link_xon_rx, &nsd->link_xon_rx);
1049         i40e_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
1050                            pf->stat_offsets_loaded,
1051                            &osd->link_xon_tx, &nsd->link_xon_tx);
1052         i40e_update_prio_xoff_rx(pf);  /* handles I40E_GLPRT_LXOFFRXC */
1053         i40e_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
1054                            pf->stat_offsets_loaded,
1055                            &osd->link_xoff_tx, &nsd->link_xoff_tx);
1056
1057         for (i = 0; i < 8; i++) {
1058                 i40e_stat_update32(hw, I40E_GLPRT_PXONRXC(hw->port, i),
1059                                    pf->stat_offsets_loaded,
1060                                    &osd->priority_xon_rx[i],
1061                                    &nsd->priority_xon_rx[i]);
1062                 i40e_stat_update32(hw, I40E_GLPRT_PXONTXC(hw->port, i),
1063                                    pf->stat_offsets_loaded,
1064                                    &osd->priority_xon_tx[i],
1065                                    &nsd->priority_xon_tx[i]);
1066                 i40e_stat_update32(hw, I40E_GLPRT_PXOFFTXC(hw->port, i),
1067                                    pf->stat_offsets_loaded,
1068                                    &osd->priority_xoff_tx[i],
1069                                    &nsd->priority_xoff_tx[i]);
1070                 i40e_stat_update32(hw,
1071                                    I40E_GLPRT_RXON2OFFCNT(hw->port, i),
1072                                    pf->stat_offsets_loaded,
1073                                    &osd->priority_xon_2_xoff[i],
1074                                    &nsd->priority_xon_2_xoff[i]);
1075         }
1076
1077         i40e_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
1078                            I40E_GLPRT_PRC64L(hw->port),
1079                            pf->stat_offsets_loaded,
1080                            &osd->rx_size_64, &nsd->rx_size_64);
1081         i40e_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
1082                            I40E_GLPRT_PRC127L(hw->port),
1083                            pf->stat_offsets_loaded,
1084                            &osd->rx_size_127, &nsd->rx_size_127);
1085         i40e_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
1086                            I40E_GLPRT_PRC255L(hw->port),
1087                            pf->stat_offsets_loaded,
1088                            &osd->rx_size_255, &nsd->rx_size_255);
1089         i40e_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
1090                            I40E_GLPRT_PRC511L(hw->port),
1091                            pf->stat_offsets_loaded,
1092                            &osd->rx_size_511, &nsd->rx_size_511);
1093         i40e_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
1094                            I40E_GLPRT_PRC1023L(hw->port),
1095                            pf->stat_offsets_loaded,
1096                            &osd->rx_size_1023, &nsd->rx_size_1023);
1097         i40e_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
1098                            I40E_GLPRT_PRC1522L(hw->port),
1099                            pf->stat_offsets_loaded,
1100                            &osd->rx_size_1522, &nsd->rx_size_1522);
1101         i40e_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
1102                            I40E_GLPRT_PRC9522L(hw->port),
1103                            pf->stat_offsets_loaded,
1104                            &osd->rx_size_big, &nsd->rx_size_big);
1105
1106         i40e_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
1107                            I40E_GLPRT_PTC64L(hw->port),
1108                            pf->stat_offsets_loaded,
1109                            &osd->tx_size_64, &nsd->tx_size_64);
1110         i40e_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
1111                            I40E_GLPRT_PTC127L(hw->port),
1112                            pf->stat_offsets_loaded,
1113                            &osd->tx_size_127, &nsd->tx_size_127);
1114         i40e_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
1115                            I40E_GLPRT_PTC255L(hw->port),
1116                            pf->stat_offsets_loaded,
1117                            &osd->tx_size_255, &nsd->tx_size_255);
1118         i40e_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
1119                            I40E_GLPRT_PTC511L(hw->port),
1120                            pf->stat_offsets_loaded,
1121                            &osd->tx_size_511, &nsd->tx_size_511);
1122         i40e_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
1123                            I40E_GLPRT_PTC1023L(hw->port),
1124                            pf->stat_offsets_loaded,
1125                            &osd->tx_size_1023, &nsd->tx_size_1023);
1126         i40e_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
1127                            I40E_GLPRT_PTC1522L(hw->port),
1128                            pf->stat_offsets_loaded,
1129                            &osd->tx_size_1522, &nsd->tx_size_1522);
1130         i40e_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
1131                            I40E_GLPRT_PTC9522L(hw->port),
1132                            pf->stat_offsets_loaded,
1133                            &osd->tx_size_big, &nsd->tx_size_big);
1134
1135         i40e_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
1136                            pf->stat_offsets_loaded,
1137                            &osd->rx_undersize, &nsd->rx_undersize);
1138         i40e_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
1139                            pf->stat_offsets_loaded,
1140                            &osd->rx_fragments, &nsd->rx_fragments);
1141         i40e_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
1142                            pf->stat_offsets_loaded,
1143                            &osd->rx_oversize, &nsd->rx_oversize);
1144         i40e_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
1145                            pf->stat_offsets_loaded,
1146                            &osd->rx_jabber, &nsd->rx_jabber);
1147
1148         /* FDIR stats */
1149         i40e_stat_update32(hw,
1150                            I40E_GLQF_PCNT(I40E_FD_ATR_STAT_IDX(pf->hw.pf_id)),
1151                            pf->stat_offsets_loaded,
1152                            &osd->fd_atr_match, &nsd->fd_atr_match);
1153         i40e_stat_update32(hw,
1154                            I40E_GLQF_PCNT(I40E_FD_SB_STAT_IDX(pf->hw.pf_id)),
1155                            pf->stat_offsets_loaded,
1156                            &osd->fd_sb_match, &nsd->fd_sb_match);
1157         i40e_stat_update32(hw,
1158                       I40E_GLQF_PCNT(I40E_FD_ATR_TUNNEL_STAT_IDX(pf->hw.pf_id)),
1159                       pf->stat_offsets_loaded,
1160                       &osd->fd_atr_tunnel_match, &nsd->fd_atr_tunnel_match);
1161
1162         val = rd32(hw, I40E_PRTPM_EEE_STAT);
1163         nsd->tx_lpi_status =
1164                        (val & I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK) >>
1165                         I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT;
1166         nsd->rx_lpi_status =
1167                        (val & I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK) >>
1168                         I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT;
1169         i40e_stat_update32(hw, I40E_PRTPM_TLPIC,
1170                            pf->stat_offsets_loaded,
1171                            &osd->tx_lpi_count, &nsd->tx_lpi_count);
1172         i40e_stat_update32(hw, I40E_PRTPM_RLPIC,
1173                            pf->stat_offsets_loaded,
1174                            &osd->rx_lpi_count, &nsd->rx_lpi_count);
1175
1176         if (pf->flags & I40E_FLAG_FD_SB_ENABLED &&
1177             !(pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED))
1178                 nsd->fd_sb_status = true;
1179         else
1180                 nsd->fd_sb_status = false;
1181
1182         if (pf->flags & I40E_FLAG_FD_ATR_ENABLED &&
1183             !(pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
1184                 nsd->fd_atr_status = true;
1185         else
1186                 nsd->fd_atr_status = false;
1187
1188         pf->stat_offsets_loaded = true;
1189 }
1190
1191 /**
1192  * i40e_update_stats - Update the various statistics counters.
1193  * @vsi: the VSI to be updated
1194  *
1195  * Update the various stats for this VSI and its related entities.
1196  **/
1197 void i40e_update_stats(struct i40e_vsi *vsi)
1198 {
1199         struct i40e_pf *pf = vsi->back;
1200
1201         if (vsi == pf->vsi[pf->lan_vsi])
1202                 i40e_update_pf_stats(pf);
1203
1204         i40e_update_vsi_stats(vsi);
1205 #ifdef I40E_FCOE
1206         i40e_update_fcoe_stats(vsi);
1207 #endif
1208 }
1209
1210 /**
1211  * i40e_find_filter - Search VSI filter list for specific mac/vlan filter
1212  * @vsi: the VSI to be searched
1213  * @macaddr: the MAC address
1214  * @vlan: the vlan
1215  * @is_vf: make sure its a VF filter, else doesn't matter
1216  * @is_netdev: make sure its a netdev filter, else doesn't matter
1217  *
1218  * Returns ptr to the filter object or NULL
1219  **/
1220 static struct i40e_mac_filter *i40e_find_filter(struct i40e_vsi *vsi,
1221                                                 u8 *macaddr, s16 vlan,
1222                                                 bool is_vf, bool is_netdev)
1223 {
1224         struct i40e_mac_filter *f;
1225
1226         if (!vsi || !macaddr)
1227                 return NULL;
1228
1229         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1230                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1231                     (vlan == f->vlan)    &&
1232                     (!is_vf || f->is_vf) &&
1233                     (!is_netdev || f->is_netdev))
1234                         return f;
1235         }
1236         return NULL;
1237 }
1238
1239 /**
1240  * i40e_find_mac - Find a mac addr in the macvlan filters list
1241  * @vsi: the VSI to be searched
1242  * @macaddr: the MAC address we are searching for
1243  * @is_vf: make sure its a VF filter, else doesn't matter
1244  * @is_netdev: make sure its a netdev filter, else doesn't matter
1245  *
1246  * Returns the first filter with the provided MAC address or NULL if
1247  * MAC address was not found
1248  **/
1249 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
1250                                       bool is_vf, bool is_netdev)
1251 {
1252         struct i40e_mac_filter *f;
1253
1254         if (!vsi || !macaddr)
1255                 return NULL;
1256
1257         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1258                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1259                     (!is_vf || f->is_vf) &&
1260                     (!is_netdev || f->is_netdev))
1261                         return f;
1262         }
1263         return NULL;
1264 }
1265
1266 /**
1267  * i40e_is_vsi_in_vlan - Check if VSI is in vlan mode
1268  * @vsi: the VSI to be searched
1269  *
1270  * Returns true if VSI is in vlan mode or false otherwise
1271  **/
1272 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi)
1273 {
1274         struct i40e_mac_filter *f;
1275
1276         /* Only -1 for all the filters denotes not in vlan mode
1277          * so we have to go through all the list in order to make sure
1278          */
1279         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1280                 if (f->vlan >= 0 || vsi->info.pvid)
1281                         return true;
1282         }
1283
1284         return false;
1285 }
1286
1287 /**
1288  * i40e_put_mac_in_vlan - Make macvlan filters from macaddrs and vlans
1289  * @vsi: the VSI to be searched
1290  * @macaddr: the mac address to be filtered
1291  * @is_vf: true if it is a VF
1292  * @is_netdev: true if it is a netdev
1293  *
1294  * Goes through all the macvlan filters and adds a
1295  * macvlan filter for each unique vlan that already exists
1296  *
1297  * Returns first filter found on success, else NULL
1298  **/
1299 struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1300                                              bool is_vf, bool is_netdev)
1301 {
1302         struct i40e_mac_filter *f;
1303
1304         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1305                 if (vsi->info.pvid)
1306                         f->vlan = le16_to_cpu(vsi->info.pvid);
1307                 if (!i40e_find_filter(vsi, macaddr, f->vlan,
1308                                       is_vf, is_netdev)) {
1309                         if (!i40e_add_filter(vsi, macaddr, f->vlan,
1310                                              is_vf, is_netdev))
1311                                 return NULL;
1312                 }
1313         }
1314
1315         return list_first_entry_or_null(&vsi->mac_filter_list,
1316                                         struct i40e_mac_filter, list);
1317 }
1318
1319 /**
1320  * i40e_del_mac_all_vlan - Remove a MAC filter from all VLANS
1321  * @vsi: the VSI to be searched
1322  * @macaddr: the mac address to be removed
1323  * @is_vf: true if it is a VF
1324  * @is_netdev: true if it is a netdev
1325  *
1326  * Removes a given MAC address from a VSI, regardless of VLAN
1327  *
1328  * Returns 0 for success, or error
1329  **/
1330 int i40e_del_mac_all_vlan(struct i40e_vsi *vsi, u8 *macaddr,
1331                           bool is_vf, bool is_netdev)
1332 {
1333         struct i40e_mac_filter *f = NULL;
1334         int changed = 0;
1335
1336         WARN(!spin_is_locked(&vsi->mac_filter_list_lock),
1337              "Missing mac_filter_list_lock\n");
1338         list_for_each_entry(f, &vsi->mac_filter_list, list) {
1339                 if ((ether_addr_equal(macaddr, f->macaddr)) &&
1340                     (is_vf == f->is_vf) &&
1341                     (is_netdev == f->is_netdev)) {
1342                         f->counter--;
1343                         f->changed = true;
1344                         changed = 1;
1345                 }
1346         }
1347         if (changed) {
1348                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1349                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1350                 return 0;
1351         }
1352         return -ENOENT;
1353 }
1354
1355 /**
1356  * i40e_rm_default_mac_filter - Remove the default MAC filter set by NVM
1357  * @vsi: the PF Main VSI - inappropriate for any other VSI
1358  * @macaddr: the MAC address
1359  *
1360  * Some older firmware configurations set up a default promiscuous VLAN
1361  * filter that needs to be removed.
1362  **/
1363 static int i40e_rm_default_mac_filter(struct i40e_vsi *vsi, u8 *macaddr)
1364 {
1365         struct i40e_aqc_remove_macvlan_element_data element;
1366         struct i40e_pf *pf = vsi->back;
1367         i40e_status ret;
1368
1369         /* Only appropriate for the PF main VSI */
1370         if (vsi->type != I40E_VSI_MAIN)
1371                 return -EINVAL;
1372
1373         memset(&element, 0, sizeof(element));
1374         ether_addr_copy(element.mac_addr, macaddr);
1375         element.vlan_tag = 0;
1376         element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH |
1377                         I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
1378         ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1379         if (ret)
1380                 return -ENOENT;
1381
1382         return 0;
1383 }
1384
1385 /**
1386  * i40e_add_filter - Add a mac/vlan filter to the VSI
1387  * @vsi: the VSI to be searched
1388  * @macaddr: the MAC address
1389  * @vlan: the vlan
1390  * @is_vf: make sure its a VF filter, else doesn't matter
1391  * @is_netdev: make sure its a netdev filter, else doesn't matter
1392  *
1393  * Returns ptr to the filter object or NULL when no memory available.
1394  *
1395  * NOTE: This function is expected to be called with mac_filter_list_lock
1396  * being held.
1397  **/
1398 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
1399                                         u8 *macaddr, s16 vlan,
1400                                         bool is_vf, bool is_netdev)
1401 {
1402         struct i40e_mac_filter *f;
1403
1404         if (!vsi || !macaddr)
1405                 return NULL;
1406
1407         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1408         if (!f) {
1409                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
1410                 if (!f)
1411                         goto add_filter_out;
1412
1413                 ether_addr_copy(f->macaddr, macaddr);
1414                 f->vlan = vlan;
1415                 f->changed = true;
1416
1417                 INIT_LIST_HEAD(&f->list);
1418                 list_add(&f->list, &vsi->mac_filter_list);
1419         }
1420
1421         /* increment counter and add a new flag if needed */
1422         if (is_vf) {
1423                 if (!f->is_vf) {
1424                         f->is_vf = true;
1425                         f->counter++;
1426                 }
1427         } else if (is_netdev) {
1428                 if (!f->is_netdev) {
1429                         f->is_netdev = true;
1430                         f->counter++;
1431                 }
1432         } else {
1433                 f->counter++;
1434         }
1435
1436         /* changed tells sync_filters_subtask to
1437          * push the filter down to the firmware
1438          */
1439         if (f->changed) {
1440                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1441                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1442         }
1443
1444 add_filter_out:
1445         return f;
1446 }
1447
1448 /**
1449  * i40e_del_filter - Remove a mac/vlan filter from the VSI
1450  * @vsi: the VSI to be searched
1451  * @macaddr: the MAC address
1452  * @vlan: the vlan
1453  * @is_vf: make sure it's a VF filter, else doesn't matter
1454  * @is_netdev: make sure it's a netdev filter, else doesn't matter
1455  *
1456  * NOTE: This function is expected to be called with mac_filter_list_lock
1457  * being held.
1458  **/
1459 void i40e_del_filter(struct i40e_vsi *vsi,
1460                      u8 *macaddr, s16 vlan,
1461                      bool is_vf, bool is_netdev)
1462 {
1463         struct i40e_mac_filter *f;
1464
1465         if (!vsi || !macaddr)
1466                 return;
1467
1468         f = i40e_find_filter(vsi, macaddr, vlan, is_vf, is_netdev);
1469         if (!f || f->counter == 0)
1470                 return;
1471
1472         if (is_vf) {
1473                 if (f->is_vf) {
1474                         f->is_vf = false;
1475                         f->counter--;
1476                 }
1477         } else if (is_netdev) {
1478                 if (f->is_netdev) {
1479                         f->is_netdev = false;
1480                         f->counter--;
1481                 }
1482         } else {
1483                 /* make sure we don't remove a filter in use by VF or netdev */
1484                 int min_f = 0;
1485
1486                 min_f += (f->is_vf ? 1 : 0);
1487                 min_f += (f->is_netdev ? 1 : 0);
1488
1489                 if (f->counter > min_f)
1490                         f->counter--;
1491         }
1492
1493         /* counter == 0 tells sync_filters_subtask to
1494          * remove the filter from the firmware's list
1495          */
1496         if (f->counter == 0) {
1497                 f->changed = true;
1498                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1499                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1500         }
1501 }
1502
1503 /**
1504  * i40e_set_mac - NDO callback to set mac address
1505  * @netdev: network interface device structure
1506  * @p: pointer to an address structure
1507  *
1508  * Returns 0 on success, negative on failure
1509  **/
1510 #ifdef I40E_FCOE
1511 int i40e_set_mac(struct net_device *netdev, void *p)
1512 #else
1513 static int i40e_set_mac(struct net_device *netdev, void *p)
1514 #endif
1515 {
1516         struct i40e_netdev_priv *np = netdev_priv(netdev);
1517         struct i40e_vsi *vsi = np->vsi;
1518         struct i40e_pf *pf = vsi->back;
1519         struct i40e_hw *hw = &pf->hw;
1520         struct sockaddr *addr = p;
1521         struct i40e_mac_filter *f;
1522
1523         if (!is_valid_ether_addr(addr->sa_data))
1524                 return -EADDRNOTAVAIL;
1525
1526         if (ether_addr_equal(netdev->dev_addr, addr->sa_data)) {
1527                 netdev_info(netdev, "already using mac address %pM\n",
1528                             addr->sa_data);
1529                 return 0;
1530         }
1531
1532         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
1533             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
1534                 return -EADDRNOTAVAIL;
1535
1536         if (ether_addr_equal(hw->mac.addr, addr->sa_data))
1537                 netdev_info(netdev, "returning to hw mac address %pM\n",
1538                             hw->mac.addr);
1539         else
1540                 netdev_info(netdev, "set new mac address %pM\n", addr->sa_data);
1541
1542         if (vsi->type == I40E_VSI_MAIN) {
1543                 i40e_status ret;
1544
1545                 ret = i40e_aq_mac_address_write(&vsi->back->hw,
1546                                                 I40E_AQC_WRITE_TYPE_LAA_WOL,
1547                                                 addr->sa_data, NULL);
1548                 if (ret) {
1549                         netdev_info(netdev,
1550                                     "Addr change for Main VSI failed: %d\n",
1551                                     ret);
1552                         return -EADDRNOTAVAIL;
1553                 }
1554         }
1555
1556         if (ether_addr_equal(netdev->dev_addr, hw->mac.addr)) {
1557                 struct i40e_aqc_remove_macvlan_element_data element;
1558
1559                 memset(&element, 0, sizeof(element));
1560                 ether_addr_copy(element.mac_addr, netdev->dev_addr);
1561                 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
1562                 i40e_aq_remove_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1563         } else {
1564                 spin_lock_bh(&vsi->mac_filter_list_lock);
1565                 i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
1566                                 false, false);
1567                 spin_unlock_bh(&vsi->mac_filter_list_lock);
1568         }
1569
1570         if (ether_addr_equal(addr->sa_data, hw->mac.addr)) {
1571                 struct i40e_aqc_add_macvlan_element_data element;
1572
1573                 memset(&element, 0, sizeof(element));
1574                 ether_addr_copy(element.mac_addr, hw->mac.addr);
1575                 element.flags = cpu_to_le16(I40E_AQC_MACVLAN_ADD_PERFECT_MATCH);
1576                 i40e_aq_add_macvlan(&pf->hw, vsi->seid, &element, 1, NULL);
1577         } else {
1578                 spin_lock_bh(&vsi->mac_filter_list_lock);
1579                 f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
1580                                     false, false);
1581                 if (f)
1582                         f->is_laa = true;
1583                 spin_unlock_bh(&vsi->mac_filter_list_lock);
1584         }
1585
1586         ether_addr_copy(netdev->dev_addr, addr->sa_data);
1587         /* schedule our worker thread which will take care of
1588          * applying the new filter changes
1589          */
1590         i40e_service_event_schedule(vsi->back);
1591         return 0;
1592 }
1593
1594 /**
1595  * i40e_vsi_setup_queue_map - Setup a VSI queue map based on enabled_tc
1596  * @vsi: the VSI being setup
1597  * @ctxt: VSI context structure
1598  * @enabled_tc: Enabled TCs bitmap
1599  * @is_add: True if called before Add VSI
1600  *
1601  * Setup VSI queue mapping for enabled traffic classes.
1602  **/
1603 #ifdef I40E_FCOE
1604 void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1605                               struct i40e_vsi_context *ctxt,
1606                               u8 enabled_tc,
1607                               bool is_add)
1608 #else
1609 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
1610                                      struct i40e_vsi_context *ctxt,
1611                                      u8 enabled_tc,
1612                                      bool is_add)
1613 #endif
1614 {
1615         struct i40e_pf *pf = vsi->back;
1616         u16 sections = 0;
1617         u8 netdev_tc = 0;
1618         u16 numtc = 0;
1619         u16 qcount;
1620         u8 offset;
1621         u16 qmap;
1622         int i;
1623         u16 num_tc_qps = 0;
1624
1625         sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
1626         offset = 0;
1627
1628         if (enabled_tc && (vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
1629                 /* Find numtc from enabled TC bitmap */
1630                 for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1631                         if (enabled_tc & BIT_ULL(i)) /* TC is enabled */
1632                                 numtc++;
1633                 }
1634                 if (!numtc) {
1635                         dev_warn(&pf->pdev->dev, "DCB is enabled but no TC enabled, forcing TC0\n");
1636                         numtc = 1;
1637                 }
1638         } else {
1639                 /* At least TC0 is enabled in case of non-DCB case */
1640                 numtc = 1;
1641         }
1642
1643         vsi->tc_config.numtc = numtc;
1644         vsi->tc_config.enabled_tc = enabled_tc ? enabled_tc : 1;
1645         /* Number of queues per enabled TC */
1646         /* In MFP case we can have a much lower count of MSIx
1647          * vectors available and so we need to lower the used
1648          * q count.
1649          */
1650         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1651                 qcount = min_t(int, vsi->alloc_queue_pairs, pf->num_lan_msix);
1652         else
1653                 qcount = vsi->alloc_queue_pairs;
1654         num_tc_qps = qcount / numtc;
1655         num_tc_qps = min_t(int, num_tc_qps, i40e_pf_get_max_q_per_tc(pf));
1656
1657         /* Setup queue offset/count for all TCs for given VSI */
1658         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
1659                 /* See if the given TC is enabled for the given VSI */
1660                 if (vsi->tc_config.enabled_tc & BIT_ULL(i)) {
1661                         /* TC is enabled */
1662                         int pow, num_qps;
1663
1664                         switch (vsi->type) {
1665                         case I40E_VSI_MAIN:
1666                                 qcount = min_t(int, pf->rss_size, num_tc_qps);
1667                                 break;
1668 #ifdef I40E_FCOE
1669                         case I40E_VSI_FCOE:
1670                                 qcount = num_tc_qps;
1671                                 break;
1672 #endif
1673                         case I40E_VSI_FDIR:
1674                         case I40E_VSI_SRIOV:
1675                         case I40E_VSI_VMDQ2:
1676                         default:
1677                                 qcount = num_tc_qps;
1678                                 WARN_ON(i != 0);
1679                                 break;
1680                         }
1681                         vsi->tc_config.tc_info[i].qoffset = offset;
1682                         vsi->tc_config.tc_info[i].qcount = qcount;
1683
1684                         /* find the next higher power-of-2 of num queue pairs */
1685                         num_qps = qcount;
1686                         pow = 0;
1687                         while (num_qps && (BIT_ULL(pow) < qcount)) {
1688                                 pow++;
1689                                 num_qps >>= 1;
1690                         }
1691
1692                         vsi->tc_config.tc_info[i].netdev_tc = netdev_tc++;
1693                         qmap =
1694                             (offset << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
1695                             (pow << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT);
1696
1697                         offset += qcount;
1698                 } else {
1699                         /* TC is not enabled so set the offset to
1700                          * default queue and allocate one queue
1701                          * for the given TC.
1702                          */
1703                         vsi->tc_config.tc_info[i].qoffset = 0;
1704                         vsi->tc_config.tc_info[i].qcount = 1;
1705                         vsi->tc_config.tc_info[i].netdev_tc = 0;
1706
1707                         qmap = 0;
1708                 }
1709                 ctxt->info.tc_mapping[i] = cpu_to_le16(qmap);
1710         }
1711
1712         /* Set actual Tx/Rx queue pairs */
1713         vsi->num_queue_pairs = offset;
1714         if ((vsi->type == I40E_VSI_MAIN) && (numtc == 1)) {
1715                 if (vsi->req_queue_pairs > 0)
1716                         vsi->num_queue_pairs = vsi->req_queue_pairs;
1717                 else if (pf->flags & I40E_FLAG_MSIX_ENABLED)
1718                         vsi->num_queue_pairs = pf->num_lan_msix;
1719         }
1720
1721         /* Scheduler section valid can only be set for ADD VSI */
1722         if (is_add) {
1723                 sections |= I40E_AQ_VSI_PROP_SCHED_VALID;
1724
1725                 ctxt->info.up_enable_bits = enabled_tc;
1726         }
1727         if (vsi->type == I40E_VSI_SRIOV) {
1728                 ctxt->info.mapping_flags |=
1729                                      cpu_to_le16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
1730                 for (i = 0; i < vsi->num_queue_pairs; i++)
1731                         ctxt->info.queue_mapping[i] =
1732                                                cpu_to_le16(vsi->base_queue + i);
1733         } else {
1734                 ctxt->info.mapping_flags |=
1735                                         cpu_to_le16(I40E_AQ_VSI_QUE_MAP_CONTIG);
1736                 ctxt->info.queue_mapping[0] = cpu_to_le16(vsi->base_queue);
1737         }
1738         ctxt->info.valid_sections |= cpu_to_le16(sections);
1739 }
1740
1741 /**
1742  * i40e_set_rx_mode - NDO callback to set the netdev filters
1743  * @netdev: network interface device structure
1744  **/
1745 #ifdef I40E_FCOE
1746 void i40e_set_rx_mode(struct net_device *netdev)
1747 #else
1748 static void i40e_set_rx_mode(struct net_device *netdev)
1749 #endif
1750 {
1751         struct i40e_netdev_priv *np = netdev_priv(netdev);
1752         struct i40e_mac_filter *f, *ftmp;
1753         struct i40e_vsi *vsi = np->vsi;
1754         struct netdev_hw_addr *uca;
1755         struct netdev_hw_addr *mca;
1756         struct netdev_hw_addr *ha;
1757
1758         spin_lock_bh(&vsi->mac_filter_list_lock);
1759
1760         /* add addr if not already in the filter list */
1761         netdev_for_each_uc_addr(uca, netdev) {
1762                 if (!i40e_find_mac(vsi, uca->addr, false, true)) {
1763                         if (i40e_is_vsi_in_vlan(vsi))
1764                                 i40e_put_mac_in_vlan(vsi, uca->addr,
1765                                                      false, true);
1766                         else
1767                                 i40e_add_filter(vsi, uca->addr, I40E_VLAN_ANY,
1768                                                 false, true);
1769                 }
1770         }
1771
1772         netdev_for_each_mc_addr(mca, netdev) {
1773                 if (!i40e_find_mac(vsi, mca->addr, false, true)) {
1774                         if (i40e_is_vsi_in_vlan(vsi))
1775                                 i40e_put_mac_in_vlan(vsi, mca->addr,
1776                                                      false, true);
1777                         else
1778                                 i40e_add_filter(vsi, mca->addr, I40E_VLAN_ANY,
1779                                                 false, true);
1780                 }
1781         }
1782
1783         /* remove filter if not in netdev list */
1784         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1785
1786                 if (!f->is_netdev)
1787                         continue;
1788
1789                 netdev_for_each_mc_addr(mca, netdev)
1790                         if (ether_addr_equal(mca->addr, f->macaddr))
1791                                 goto bottom_of_search_loop;
1792
1793                 netdev_for_each_uc_addr(uca, netdev)
1794                         if (ether_addr_equal(uca->addr, f->macaddr))
1795                                 goto bottom_of_search_loop;
1796
1797                 for_each_dev_addr(netdev, ha)
1798                         if (ether_addr_equal(ha->addr, f->macaddr))
1799                                 goto bottom_of_search_loop;
1800
1801                 /* f->macaddr wasn't found in uc, mc, or ha list so delete it */
1802                 i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY, false, true);
1803
1804 bottom_of_search_loop:
1805                 continue;
1806         }
1807         spin_unlock_bh(&vsi->mac_filter_list_lock);
1808
1809         /* check for other flag changes */
1810         if (vsi->current_netdev_flags != vsi->netdev->flags) {
1811                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
1812                 vsi->back->flags |= I40E_FLAG_FILTER_SYNC;
1813         }
1814 }
1815
1816 /**
1817  * i40e_mac_filter_entry_clone - Clones a MAC filter entry
1818  * @src: source MAC filter entry to be clones
1819  *
1820  * Returns the pointer to newly cloned MAC filter entry or NULL
1821  * in case of error
1822  **/
1823 static struct i40e_mac_filter *i40e_mac_filter_entry_clone(
1824                                         struct i40e_mac_filter *src)
1825 {
1826         struct i40e_mac_filter *f;
1827
1828         f = kzalloc(sizeof(*f), GFP_ATOMIC);
1829         if (!f)
1830                 return NULL;
1831         *f = *src;
1832
1833         INIT_LIST_HEAD(&f->list);
1834
1835         return f;
1836 }
1837
1838 /**
1839  * i40e_undo_del_filter_entries - Undo the changes made to MAC filter entries
1840  * @vsi: pointer to vsi struct
1841  * @from: Pointer to list which contains MAC filter entries - changes to
1842  *        those entries needs to be undone.
1843  *
1844  * MAC filter entries from list were slated to be removed from device.
1845  **/
1846 static void i40e_undo_del_filter_entries(struct i40e_vsi *vsi,
1847                                          struct list_head *from)
1848 {
1849         struct i40e_mac_filter *f, *ftmp;
1850
1851         list_for_each_entry_safe(f, ftmp, from, list) {
1852                 f->changed = true;
1853                 /* Move the element back into MAC filter list*/
1854                 list_move_tail(&f->list, &vsi->mac_filter_list);
1855         }
1856 }
1857
1858 /**
1859  * i40e_undo_add_filter_entries - Undo the changes made to MAC filter entries
1860  * @vsi: pointer to vsi struct
1861  *
1862  * MAC filter entries from list were slated to be added from device.
1863  **/
1864 static void i40e_undo_add_filter_entries(struct i40e_vsi *vsi)
1865 {
1866         struct i40e_mac_filter *f, *ftmp;
1867
1868         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1869                 if (!f->changed && f->counter)
1870                         f->changed = true;
1871         }
1872 }
1873
1874 /**
1875  * i40e_cleanup_add_list - Deletes the element from add list and release
1876  *                      memory
1877  * @add_list: Pointer to list which contains MAC filter entries
1878  **/
1879 static void i40e_cleanup_add_list(struct list_head *add_list)
1880 {
1881         struct i40e_mac_filter *f, *ftmp;
1882
1883         list_for_each_entry_safe(f, ftmp, add_list, list) {
1884                 list_del(&f->list);
1885                 kfree(f);
1886         }
1887 }
1888
1889 /**
1890  * i40e_sync_vsi_filters - Update the VSI filter list to the HW
1891  * @vsi: ptr to the VSI
1892  * @grab_rtnl: whether RTNL needs to be grabbed
1893  *
1894  * Push any outstanding VSI filter changes through the AdminQ.
1895  *
1896  * Returns 0 or error value
1897  **/
1898 int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
1899 {
1900         struct list_head tmp_del_list, tmp_add_list;
1901         struct i40e_mac_filter *f, *ftmp, *fclone;
1902         bool promisc_forced_on = false;
1903         bool add_happened = false;
1904         int filter_list_len = 0;
1905         u32 changed_flags = 0;
1906         bool err_cond = false;
1907         i40e_status ret = 0;
1908         struct i40e_pf *pf;
1909         int num_add = 0;
1910         int num_del = 0;
1911         int aq_err = 0;
1912         u16 cmd_flags;
1913
1914         /* empty array typed pointers, kcalloc later */
1915         struct i40e_aqc_add_macvlan_element_data *add_list;
1916         struct i40e_aqc_remove_macvlan_element_data *del_list;
1917
1918         while (test_and_set_bit(__I40E_CONFIG_BUSY, &vsi->state))
1919                 usleep_range(1000, 2000);
1920         pf = vsi->back;
1921
1922         if (vsi->netdev) {
1923                 changed_flags = vsi->current_netdev_flags ^ vsi->netdev->flags;
1924                 vsi->current_netdev_flags = vsi->netdev->flags;
1925         }
1926
1927         INIT_LIST_HEAD(&tmp_del_list);
1928         INIT_LIST_HEAD(&tmp_add_list);
1929
1930         if (vsi->flags & I40E_VSI_FLAG_FILTER_CHANGED) {
1931                 vsi->flags &= ~I40E_VSI_FLAG_FILTER_CHANGED;
1932
1933                 spin_lock_bh(&vsi->mac_filter_list_lock);
1934                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1935                         if (!f->changed)
1936                                 continue;
1937
1938                         if (f->counter != 0)
1939                                 continue;
1940                         f->changed = false;
1941
1942                         /* Move the element into temporary del_list */
1943                         list_move_tail(&f->list, &tmp_del_list);
1944                 }
1945
1946                 list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
1947                         if (!f->changed)
1948                                 continue;
1949
1950                         if (f->counter == 0)
1951                                 continue;
1952                         f->changed = false;
1953
1954                         /* Clone MAC filter entry and add into temporary list */
1955                         fclone = i40e_mac_filter_entry_clone(f);
1956                         if (!fclone) {
1957                                 err_cond = true;
1958                                 break;
1959                         }
1960                         list_add_tail(&fclone->list, &tmp_add_list);
1961                 }
1962
1963                 /* if failed to clone MAC filter entry - undo */
1964                 if (err_cond) {
1965                         i40e_undo_del_filter_entries(vsi, &tmp_del_list);
1966                         i40e_undo_add_filter_entries(vsi);
1967                 }
1968                 spin_unlock_bh(&vsi->mac_filter_list_lock);
1969
1970                 if (err_cond)
1971                         i40e_cleanup_add_list(&tmp_add_list);
1972         }
1973
1974         /* Now process 'del_list' outside the lock */
1975         if (!list_empty(&tmp_del_list)) {
1976                 int del_list_size;
1977
1978                 filter_list_len = pf->hw.aq.asq_buf_size /
1979                             sizeof(struct i40e_aqc_remove_macvlan_element_data);
1980                 del_list_size = filter_list_len *
1981                             sizeof(struct i40e_aqc_remove_macvlan_element_data);
1982                 del_list = kzalloc(del_list_size, GFP_KERNEL);
1983                 if (!del_list) {
1984                         i40e_cleanup_add_list(&tmp_add_list);
1985
1986                         /* Undo VSI's MAC filter entry element updates */
1987                         spin_lock_bh(&vsi->mac_filter_list_lock);
1988                         i40e_undo_del_filter_entries(vsi, &tmp_del_list);
1989                         i40e_undo_add_filter_entries(vsi);
1990                         spin_unlock_bh(&vsi->mac_filter_list_lock);
1991                         return -ENOMEM;
1992                 }
1993
1994                 list_for_each_entry_safe(f, ftmp, &tmp_del_list, list) {
1995                         cmd_flags = 0;
1996
1997                         /* add to delete list */
1998                         ether_addr_copy(del_list[num_del].mac_addr, f->macaddr);
1999                         del_list[num_del].vlan_tag =
2000                                 cpu_to_le16((u16)(f->vlan ==
2001                                             I40E_VLAN_ANY ? 0 : f->vlan));
2002
2003                         cmd_flags |= I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2004                         del_list[num_del].flags = cmd_flags;
2005                         num_del++;
2006
2007                         /* flush a full buffer */
2008                         if (num_del == filter_list_len) {
2009                                 ret = i40e_aq_remove_macvlan(&pf->hw,
2010                                                   vsi->seid, del_list, num_del,
2011                                                   NULL);
2012                                 aq_err = pf->hw.aq.asq_last_status;
2013                                 num_del = 0;
2014                                 memset(del_list, 0, del_list_size);
2015
2016                                 if (ret && aq_err != I40E_AQ_RC_ENOENT)
2017                                         dev_err(&pf->pdev->dev,
2018                                                 "ignoring delete macvlan error, err %s, aq_err %s while flushing a full buffer\n",
2019                                                 i40e_stat_str(&pf->hw, ret),
2020                                                 i40e_aq_str(&pf->hw, aq_err));
2021                         }
2022                         /* Release memory for MAC filter entries which were
2023                          * synced up with HW.
2024                          */
2025                         list_del(&f->list);
2026                         kfree(f);
2027                 }
2028
2029                 if (num_del) {
2030                         ret = i40e_aq_remove_macvlan(&pf->hw, vsi->seid,
2031                                                      del_list, num_del, NULL);
2032                         aq_err = pf->hw.aq.asq_last_status;
2033                         num_del = 0;
2034
2035                         if (ret && aq_err != I40E_AQ_RC_ENOENT)
2036                                 dev_info(&pf->pdev->dev,
2037                                          "ignoring delete macvlan error, err %s aq_err %s\n",
2038                                          i40e_stat_str(&pf->hw, ret),
2039                                          i40e_aq_str(&pf->hw, aq_err));
2040                 }
2041
2042                 kfree(del_list);
2043                 del_list = NULL;
2044         }
2045
2046         if (!list_empty(&tmp_add_list)) {
2047                 int add_list_size;
2048
2049                 /* do all the adds now */
2050                 filter_list_len = pf->hw.aq.asq_buf_size /
2051                                sizeof(struct i40e_aqc_add_macvlan_element_data),
2052                 add_list_size = filter_list_len *
2053                                sizeof(struct i40e_aqc_add_macvlan_element_data);
2054                 add_list = kzalloc(add_list_size, GFP_KERNEL);
2055                 if (!add_list) {
2056                         /* Purge element from temporary lists */
2057                         i40e_cleanup_add_list(&tmp_add_list);
2058
2059                         /* Undo add filter entries from VSI MAC filter list */
2060                         spin_lock_bh(&vsi->mac_filter_list_lock);
2061                         i40e_undo_add_filter_entries(vsi);
2062                         spin_unlock_bh(&vsi->mac_filter_list_lock);
2063                         return -ENOMEM;
2064                 }
2065
2066                 list_for_each_entry_safe(f, ftmp, &tmp_add_list, list) {
2067
2068                         add_happened = true;
2069                         cmd_flags = 0;
2070
2071                         /* add to add array */
2072                         ether_addr_copy(add_list[num_add].mac_addr, f->macaddr);
2073                         add_list[num_add].vlan_tag =
2074                                 cpu_to_le16(
2075                                  (u16)(f->vlan == I40E_VLAN_ANY ? 0 : f->vlan));
2076                         add_list[num_add].queue_number = 0;
2077
2078                         cmd_flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
2079                         add_list[num_add].flags = cpu_to_le16(cmd_flags);
2080                         num_add++;
2081
2082                         /* flush a full buffer */
2083                         if (num_add == filter_list_len) {
2084                                 ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
2085                                                           add_list, num_add,
2086                                                           NULL);
2087                                 aq_err = pf->hw.aq.asq_last_status;
2088                                 num_add = 0;
2089
2090                                 if (ret)
2091                                         break;
2092                                 memset(add_list, 0, add_list_size);
2093                         }
2094                         /* Entries from tmp_add_list were cloned from MAC
2095                          * filter list, hence clean those cloned entries
2096                          */
2097                         list_del(&f->list);
2098                         kfree(f);
2099                 }
2100
2101                 if (num_add) {
2102                         ret = i40e_aq_add_macvlan(&pf->hw, vsi->seid,
2103                                                   add_list, num_add, NULL);
2104                         aq_err = pf->hw.aq.asq_last_status;
2105                         num_add = 0;
2106                 }
2107                 kfree(add_list);
2108                 add_list = NULL;
2109
2110                 if (add_happened && ret && aq_err != I40E_AQ_RC_EINVAL) {
2111                         dev_info(&pf->pdev->dev,
2112                                  "add filter failed, err %s aq_err %s\n",
2113                                  i40e_stat_str(&pf->hw, ret),
2114                                  i40e_aq_str(&pf->hw, aq_err));
2115                         if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOSPC) &&
2116                             !test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2117                                       &vsi->state)) {
2118                                 promisc_forced_on = true;
2119                                 set_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2120                                         &vsi->state);
2121                                 dev_info(&pf->pdev->dev, "promiscuous mode forced on\n");
2122                         }
2123                 }
2124         }
2125
2126         /* check for changes in promiscuous modes */
2127         if (changed_flags & IFF_ALLMULTI) {
2128                 bool cur_multipromisc;
2129
2130                 cur_multipromisc = !!(vsi->current_netdev_flags & IFF_ALLMULTI);
2131                 ret = i40e_aq_set_vsi_multicast_promiscuous(&vsi->back->hw,
2132                                                             vsi->seid,
2133                                                             cur_multipromisc,
2134                                                             NULL);
2135                 if (ret)
2136                         dev_info(&pf->pdev->dev,
2137                                  "set multi promisc failed, err %s aq_err %s\n",
2138                                  i40e_stat_str(&pf->hw, ret),
2139                                  i40e_aq_str(&pf->hw,
2140                                              pf->hw.aq.asq_last_status));
2141         }
2142         if ((changed_flags & IFF_PROMISC) || promisc_forced_on) {
2143                 bool cur_promisc;
2144
2145                 cur_promisc = (!!(vsi->current_netdev_flags & IFF_PROMISC) ||
2146                                test_bit(__I40E_FILTER_OVERFLOW_PROMISC,
2147                                         &vsi->state));
2148                 if (vsi->type == I40E_VSI_MAIN && pf->lan_veb != I40E_NO_VEB) {
2149                         /* set defport ON for Main VSI instead of true promisc
2150                          * this way we will get all unicast/multicast and VLAN
2151                          * promisc behavior but will not get VF or VMDq traffic
2152                          * replicated on the Main VSI.
2153                          */
2154                         if (pf->cur_promisc != cur_promisc) {
2155                                 pf->cur_promisc = cur_promisc;
2156                                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
2157                         }
2158                 } else {
2159                         ret = i40e_aq_set_vsi_unicast_promiscuous(
2160                                                           &vsi->back->hw,
2161                                                           vsi->seid,
2162                                                           cur_promisc, NULL);
2163                         if (ret)
2164                                 dev_info(&pf->pdev->dev,
2165                                          "set unicast promisc failed, err %d, aq_err %d\n",
2166                                          ret, pf->hw.aq.asq_last_status);
2167                         ret = i40e_aq_set_vsi_multicast_promiscuous(
2168                                                           &vsi->back->hw,
2169                                                           vsi->seid,
2170                                                           cur_promisc, NULL);
2171                         if (ret)
2172                                 dev_info(&pf->pdev->dev,
2173                                          "set multicast promisc failed, err %d, aq_err %d\n",
2174                                          ret, pf->hw.aq.asq_last_status);
2175                 }
2176                 ret = i40e_aq_set_vsi_broadcast(&vsi->back->hw,
2177                                                 vsi->seid,
2178                                                 cur_promisc, NULL);
2179                 if (ret)
2180                         dev_info(&pf->pdev->dev,
2181                                  "set brdcast promisc failed, err %s, aq_err %s\n",
2182                                  i40e_stat_str(&pf->hw, ret),
2183                                  i40e_aq_str(&pf->hw,
2184                                              pf->hw.aq.asq_last_status));
2185         }
2186
2187         clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
2188         return 0;
2189 }
2190
2191 /**
2192  * i40e_sync_filters_subtask - Sync the VSI filter list with HW
2193  * @pf: board private structure
2194  **/
2195 static void i40e_sync_filters_subtask(struct i40e_pf *pf)
2196 {
2197         int v;
2198
2199         if (!pf || !(pf->flags & I40E_FLAG_FILTER_SYNC))
2200                 return;
2201         pf->flags &= ~I40E_FLAG_FILTER_SYNC;
2202
2203         for (v = 0; v < pf->num_alloc_vsi; v++) {
2204                 if (pf->vsi[v] &&
2205                     (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED))
2206                         i40e_sync_vsi_filters(pf->vsi[v], true);
2207         }
2208 }
2209
2210 /**
2211  * i40e_change_mtu - NDO callback to change the Maximum Transfer Unit
2212  * @netdev: network interface device structure
2213  * @new_mtu: new value for maximum frame size
2214  *
2215  * Returns 0 on success, negative on failure
2216  **/
2217 static int i40e_change_mtu(struct net_device *netdev, int new_mtu)
2218 {
2219         struct i40e_netdev_priv *np = netdev_priv(netdev);
2220         int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
2221         struct i40e_vsi *vsi = np->vsi;
2222
2223         /* MTU < 68 is an error and causes problems on some kernels */
2224         if ((new_mtu < 68) || (max_frame > I40E_MAX_RXBUFFER))
2225                 return -EINVAL;
2226
2227         netdev_info(netdev, "changing MTU from %d to %d\n",
2228                     netdev->mtu, new_mtu);
2229         netdev->mtu = new_mtu;
2230         if (netif_running(netdev))
2231                 i40e_vsi_reinit_locked(vsi);
2232
2233         return 0;
2234 }
2235
2236 /**
2237  * i40e_ioctl - Access the hwtstamp interface
2238  * @netdev: network interface device structure
2239  * @ifr: interface request data
2240  * @cmd: ioctl command
2241  **/
2242 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2243 {
2244         struct i40e_netdev_priv *np = netdev_priv(netdev);
2245         struct i40e_pf *pf = np->vsi->back;
2246
2247         switch (cmd) {
2248         case SIOCGHWTSTAMP:
2249                 return i40e_ptp_get_ts_config(pf, ifr);
2250         case SIOCSHWTSTAMP:
2251                 return i40e_ptp_set_ts_config(pf, ifr);
2252         default:
2253                 return -EOPNOTSUPP;
2254         }
2255 }
2256
2257 /**
2258  * i40e_vlan_stripping_enable - Turn on vlan stripping for the VSI
2259  * @vsi: the vsi being adjusted
2260  **/
2261 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi)
2262 {
2263         struct i40e_vsi_context ctxt;
2264         i40e_status ret;
2265
2266         /* Don't modify stripping options if a port VLAN is active */
2267         if (vsi->info.pvid)
2268                 return;
2269
2270         if ((vsi->info.valid_sections &
2271              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2272             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_MODE_MASK) == 0))
2273                 return;  /* already enabled */
2274
2275         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2276         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2277                                     I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
2278
2279         ctxt.seid = vsi->seid;
2280         ctxt.info = vsi->info;
2281         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2282         if (ret) {
2283                 dev_info(&vsi->back->pdev->dev,
2284                          "update vlan stripping failed, err %s aq_err %s\n",
2285                          i40e_stat_str(&vsi->back->hw, ret),
2286                          i40e_aq_str(&vsi->back->hw,
2287                                      vsi->back->hw.aq.asq_last_status));
2288         }
2289 }
2290
2291 /**
2292  * i40e_vlan_stripping_disable - Turn off vlan stripping for the VSI
2293  * @vsi: the vsi being adjusted
2294  **/
2295 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi)
2296 {
2297         struct i40e_vsi_context ctxt;
2298         i40e_status ret;
2299
2300         /* Don't modify stripping options if a port VLAN is active */
2301         if (vsi->info.pvid)
2302                 return;
2303
2304         if ((vsi->info.valid_sections &
2305              cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID)) &&
2306             ((vsi->info.port_vlan_flags & I40E_AQ_VSI_PVLAN_EMOD_MASK) ==
2307              I40E_AQ_VSI_PVLAN_EMOD_MASK))
2308                 return;  /* already disabled */
2309
2310         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2311         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2312                                     I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2313
2314         ctxt.seid = vsi->seid;
2315         ctxt.info = vsi->info;
2316         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2317         if (ret) {
2318                 dev_info(&vsi->back->pdev->dev,
2319                          "update vlan stripping failed, err %s aq_err %s\n",
2320                          i40e_stat_str(&vsi->back->hw, ret),
2321                          i40e_aq_str(&vsi->back->hw,
2322                                      vsi->back->hw.aq.asq_last_status));
2323         }
2324 }
2325
2326 /**
2327  * i40e_vlan_rx_register - Setup or shutdown vlan offload
2328  * @netdev: network interface to be adjusted
2329  * @features: netdev features to test if VLAN offload is enabled or not
2330  **/
2331 static void i40e_vlan_rx_register(struct net_device *netdev, u32 features)
2332 {
2333         struct i40e_netdev_priv *np = netdev_priv(netdev);
2334         struct i40e_vsi *vsi = np->vsi;
2335
2336         if (features & NETIF_F_HW_VLAN_CTAG_RX)
2337                 i40e_vlan_stripping_enable(vsi);
2338         else
2339                 i40e_vlan_stripping_disable(vsi);
2340 }
2341
2342 /**
2343  * i40e_vsi_add_vlan - Add vsi membership for given vlan
2344  * @vsi: the vsi being configured
2345  * @vid: vlan id to be added (0 = untagged only , -1 = any)
2346  **/
2347 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
2348 {
2349         struct i40e_mac_filter *f, *add_f;
2350         bool is_netdev, is_vf;
2351
2352         is_vf = (vsi->type == I40E_VSI_SRIOV);
2353         is_netdev = !!(vsi->netdev);
2354
2355         /* Locked once because all functions invoked below iterates list*/
2356         spin_lock_bh(&vsi->mac_filter_list_lock);
2357
2358         if (is_netdev) {
2359                 add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, vid,
2360                                         is_vf, is_netdev);
2361                 if (!add_f) {
2362                         dev_info(&vsi->back->pdev->dev,
2363                                  "Could not add vlan filter %d for %pM\n",
2364                                  vid, vsi->netdev->dev_addr);
2365                         spin_unlock_bh(&vsi->mac_filter_list_lock);
2366                         return -ENOMEM;
2367                 }
2368         }
2369
2370         list_for_each_entry(f, &vsi->mac_filter_list, list) {
2371                 add_f = i40e_add_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2372                 if (!add_f) {
2373                         dev_info(&vsi->back->pdev->dev,
2374                                  "Could not add vlan filter %d for %pM\n",
2375                                  vid, f->macaddr);
2376                         spin_unlock_bh(&vsi->mac_filter_list_lock);
2377                         return -ENOMEM;
2378                 }
2379         }
2380
2381         /* Now if we add a vlan tag, make sure to check if it is the first
2382          * tag (i.e. a "tag" -1 does exist) and if so replace the -1 "tag"
2383          * with 0, so we now accept untagged and specified tagged traffic
2384          * (and not any taged and untagged)
2385          */
2386         if (vid > 0) {
2387                 if (is_netdev && i40e_find_filter(vsi, vsi->netdev->dev_addr,
2388                                                   I40E_VLAN_ANY,
2389                                                   is_vf, is_netdev)) {
2390                         i40e_del_filter(vsi, vsi->netdev->dev_addr,
2391                                         I40E_VLAN_ANY, is_vf, is_netdev);
2392                         add_f = i40e_add_filter(vsi, vsi->netdev->dev_addr, 0,
2393                                                 is_vf, is_netdev);
2394                         if (!add_f) {
2395                                 dev_info(&vsi->back->pdev->dev,
2396                                          "Could not add filter 0 for %pM\n",
2397                                          vsi->netdev->dev_addr);
2398                                 spin_unlock_bh(&vsi->mac_filter_list_lock);
2399                                 return -ENOMEM;
2400                         }
2401                 }
2402         }
2403
2404         /* Do not assume that I40E_VLAN_ANY should be reset to VLAN 0 */
2405         if (vid > 0 && !vsi->info.pvid) {
2406                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2407                         if (!i40e_find_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2408                                               is_vf, is_netdev))
2409                                 continue;
2410                         i40e_del_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2411                                         is_vf, is_netdev);
2412                         add_f = i40e_add_filter(vsi, f->macaddr,
2413                                                 0, is_vf, is_netdev);
2414                         if (!add_f) {
2415                                 dev_info(&vsi->back->pdev->dev,
2416                                          "Could not add filter 0 for %pM\n",
2417                                         f->macaddr);
2418                                 spin_unlock_bh(&vsi->mac_filter_list_lock);
2419                                 return -ENOMEM;
2420                         }
2421                 }
2422         }
2423
2424         spin_unlock_bh(&vsi->mac_filter_list_lock);
2425
2426         /* schedule our worker thread which will take care of
2427          * applying the new filter changes
2428          */
2429         i40e_service_event_schedule(vsi->back);
2430         return 0;
2431 }
2432
2433 /**
2434  * i40e_vsi_kill_vlan - Remove vsi membership for given vlan
2435  * @vsi: the vsi being configured
2436  * @vid: vlan id to be removed (0 = untagged only , -1 = any)
2437  *
2438  * Return: 0 on success or negative otherwise
2439  **/
2440 int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
2441 {
2442         struct net_device *netdev = vsi->netdev;
2443         struct i40e_mac_filter *f, *add_f;
2444         bool is_vf, is_netdev;
2445         int filter_count = 0;
2446
2447         is_vf = (vsi->type == I40E_VSI_SRIOV);
2448         is_netdev = !!(netdev);
2449
2450         /* Locked once because all functions invoked below iterates list */
2451         spin_lock_bh(&vsi->mac_filter_list_lock);
2452
2453         if (is_netdev)
2454                 i40e_del_filter(vsi, netdev->dev_addr, vid, is_vf, is_netdev);
2455
2456         list_for_each_entry(f, &vsi->mac_filter_list, list)
2457                 i40e_del_filter(vsi, f->macaddr, vid, is_vf, is_netdev);
2458
2459         /* go through all the filters for this VSI and if there is only
2460          * vid == 0 it means there are no other filters, so vid 0 must
2461          * be replaced with -1. This signifies that we should from now
2462          * on accept any traffic (with any tag present, or untagged)
2463          */
2464         list_for_each_entry(f, &vsi->mac_filter_list, list) {
2465                 if (is_netdev) {
2466                         if (f->vlan &&
2467                             ether_addr_equal(netdev->dev_addr, f->macaddr))
2468                                 filter_count++;
2469                 }
2470
2471                 if (f->vlan)
2472                         filter_count++;
2473         }
2474
2475         if (!filter_count && is_netdev) {
2476                 i40e_del_filter(vsi, netdev->dev_addr, 0, is_vf, is_netdev);
2477                 f = i40e_add_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
2478                                     is_vf, is_netdev);
2479                 if (!f) {
2480                         dev_info(&vsi->back->pdev->dev,
2481                                  "Could not add filter %d for %pM\n",
2482                                  I40E_VLAN_ANY, netdev->dev_addr);
2483                         spin_unlock_bh(&vsi->mac_filter_list_lock);
2484                         return -ENOMEM;
2485                 }
2486         }
2487
2488         if (!filter_count) {
2489                 list_for_each_entry(f, &vsi->mac_filter_list, list) {
2490                         i40e_del_filter(vsi, f->macaddr, 0, is_vf, is_netdev);
2491                         add_f = i40e_add_filter(vsi, f->macaddr, I40E_VLAN_ANY,
2492                                                 is_vf, is_netdev);
2493                         if (!add_f) {
2494                                 dev_info(&vsi->back->pdev->dev,
2495                                          "Could not add filter %d for %pM\n",
2496                                          I40E_VLAN_ANY, f->macaddr);
2497                                 spin_unlock_bh(&vsi->mac_filter_list_lock);
2498                                 return -ENOMEM;
2499                         }
2500                 }
2501         }
2502
2503         spin_unlock_bh(&vsi->mac_filter_list_lock);
2504
2505         /* schedule our worker thread which will take care of
2506          * applying the new filter changes
2507          */
2508         i40e_service_event_schedule(vsi->back);
2509         return 0;
2510 }
2511
2512 /**
2513  * i40e_vlan_rx_add_vid - Add a vlan id filter to HW offload
2514  * @netdev: network interface to be adjusted
2515  * @vid: vlan id to be added
2516  *
2517  * net_device_ops implementation for adding vlan ids
2518  **/
2519 #ifdef I40E_FCOE
2520 int i40e_vlan_rx_add_vid(struct net_device *netdev,
2521                          __always_unused __be16 proto, u16 vid)
2522 #else
2523 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
2524                                 __always_unused __be16 proto, u16 vid)
2525 #endif
2526 {
2527         struct i40e_netdev_priv *np = netdev_priv(netdev);
2528         struct i40e_vsi *vsi = np->vsi;
2529         int ret = 0;
2530
2531         if (vid > 4095)
2532                 return -EINVAL;
2533
2534         netdev_info(netdev, "adding %pM vid=%d\n", netdev->dev_addr, vid);
2535
2536         /* If the network stack called us with vid = 0 then
2537          * it is asking to receive priority tagged packets with
2538          * vlan id 0.  Our HW receives them by default when configured
2539          * to receive untagged packets so there is no need to add an
2540          * extra filter for vlan 0 tagged packets.
2541          */
2542         if (vid)
2543                 ret = i40e_vsi_add_vlan(vsi, vid);
2544
2545         if (!ret && (vid < VLAN_N_VID))
2546                 set_bit(vid, vsi->active_vlans);
2547
2548         return ret;
2549 }
2550
2551 /**
2552  * i40e_vlan_rx_kill_vid - Remove a vlan id filter from HW offload
2553  * @netdev: network interface to be adjusted
2554  * @vid: vlan id to be removed
2555  *
2556  * net_device_ops implementation for removing vlan ids
2557  **/
2558 #ifdef I40E_FCOE
2559 int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2560                           __always_unused __be16 proto, u16 vid)
2561 #else
2562 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
2563                                  __always_unused __be16 proto, u16 vid)
2564 #endif
2565 {
2566         struct i40e_netdev_priv *np = netdev_priv(netdev);
2567         struct i40e_vsi *vsi = np->vsi;
2568
2569         netdev_info(netdev, "removing %pM vid=%d\n", netdev->dev_addr, vid);
2570
2571         /* return code is ignored as there is nothing a user
2572          * can do about failure to remove and a log message was
2573          * already printed from the other function
2574          */
2575         i40e_vsi_kill_vlan(vsi, vid);
2576
2577         clear_bit(vid, vsi->active_vlans);
2578
2579         return 0;
2580 }
2581
2582 /**
2583  * i40e_restore_vlan - Reinstate vlans when vsi/netdev comes back up
2584  * @vsi: the vsi being brought back up
2585  **/
2586 static void i40e_restore_vlan(struct i40e_vsi *vsi)
2587 {
2588         u16 vid;
2589
2590         if (!vsi->netdev)
2591                 return;
2592
2593         i40e_vlan_rx_register(vsi->netdev, vsi->netdev->features);
2594
2595         for_each_set_bit(vid, vsi->active_vlans, VLAN_N_VID)
2596                 i40e_vlan_rx_add_vid(vsi->netdev, htons(ETH_P_8021Q),
2597                                      vid);
2598 }
2599
2600 /**
2601  * i40e_vsi_add_pvid - Add pvid for the VSI
2602  * @vsi: the vsi being adjusted
2603  * @vid: the vlan id to set as a PVID
2604  **/
2605 int i40e_vsi_add_pvid(struct i40e_vsi *vsi, u16 vid)
2606 {
2607         struct i40e_vsi_context ctxt;
2608         i40e_status ret;
2609
2610         vsi->info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
2611         vsi->info.pvid = cpu_to_le16(vid);
2612         vsi->info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_TAGGED |
2613                                     I40E_AQ_VSI_PVLAN_INSERT_PVID |
2614                                     I40E_AQ_VSI_PVLAN_EMOD_STR;
2615
2616         ctxt.seid = vsi->seid;
2617         ctxt.info = vsi->info;
2618         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
2619         if (ret) {
2620                 dev_info(&vsi->back->pdev->dev,
2621                          "add pvid failed, err %s aq_err %s\n",
2622                          i40e_stat_str(&vsi->back->hw, ret),
2623                          i40e_aq_str(&vsi->back->hw,
2624                                      vsi->back->hw.aq.asq_last_status));
2625                 return -ENOENT;
2626         }
2627
2628         return 0;
2629 }
2630
2631 /**
2632  * i40e_vsi_remove_pvid - Remove the pvid from the VSI
2633  * @vsi: the vsi being adjusted
2634  *
2635  * Just use the vlan_rx_register() service to put it back to normal
2636  **/
2637 void i40e_vsi_remove_pvid(struct i40e_vsi *vsi)
2638 {
2639         i40e_vlan_stripping_disable(vsi);
2640
2641         vsi->info.pvid = 0;
2642 }
2643
2644 /**
2645  * i40e_vsi_setup_tx_resources - Allocate VSI Tx queue resources
2646  * @vsi: ptr to the VSI
2647  *
2648  * If this function returns with an error, then it's possible one or
2649  * more of the rings is populated (while the rest are not).  It is the
2650  * callers duty to clean those orphaned rings.
2651  *
2652  * Return 0 on success, negative on failure
2653  **/
2654 static int i40e_vsi_setup_tx_resources(struct i40e_vsi *vsi)
2655 {
2656         int i, err = 0;
2657
2658         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2659                 err = i40e_setup_tx_descriptors(vsi->tx_rings[i]);
2660
2661         return err;
2662 }
2663
2664 /**
2665  * i40e_vsi_free_tx_resources - Free Tx resources for VSI queues
2666  * @vsi: ptr to the VSI
2667  *
2668  * Free VSI's transmit software resources
2669  **/
2670 static void i40e_vsi_free_tx_resources(struct i40e_vsi *vsi)
2671 {
2672         int i;
2673
2674         if (!vsi->tx_rings)
2675                 return;
2676
2677         for (i = 0; i < vsi->num_queue_pairs; i++)
2678                 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
2679                         i40e_free_tx_resources(vsi->tx_rings[i]);
2680 }
2681
2682 /**
2683  * i40e_vsi_setup_rx_resources - Allocate VSI queues Rx resources
2684  * @vsi: ptr to the VSI
2685  *
2686  * If this function returns with an error, then it's possible one or
2687  * more of the rings is populated (while the rest are not).  It is the
2688  * callers duty to clean those orphaned rings.
2689  *
2690  * Return 0 on success, negative on failure
2691  **/
2692 static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
2693 {
2694         int i, err = 0;
2695
2696         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
2697                 err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
2698 #ifdef I40E_FCOE
2699         i40e_fcoe_setup_ddp_resources(vsi);
2700 #endif
2701         return err;
2702 }
2703
2704 /**
2705  * i40e_vsi_free_rx_resources - Free Rx Resources for VSI queues
2706  * @vsi: ptr to the VSI
2707  *
2708  * Free all receive software resources
2709  **/
2710 static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
2711 {
2712         int i;
2713
2714         if (!vsi->rx_rings)
2715                 return;
2716
2717         for (i = 0; i < vsi->num_queue_pairs; i++)
2718                 if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
2719                         i40e_free_rx_resources(vsi->rx_rings[i]);
2720 #ifdef I40E_FCOE
2721         i40e_fcoe_free_ddp_resources(vsi);
2722 #endif
2723 }
2724
2725 /**
2726  * i40e_config_xps_tx_ring - Configure XPS for a Tx ring
2727  * @ring: The Tx ring to configure
2728  *
2729  * This enables/disables XPS for a given Tx descriptor ring
2730  * based on the TCs enabled for the VSI that ring belongs to.
2731  **/
2732 static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
2733 {
2734         struct i40e_vsi *vsi = ring->vsi;
2735         cpumask_var_t mask;
2736
2737         if (!ring->q_vector || !ring->netdev)
2738                 return;
2739
2740         /* Single TC mode enable XPS */
2741         if (vsi->tc_config.numtc <= 1) {
2742                 if (!test_and_set_bit(__I40E_TX_XPS_INIT_DONE, &ring->state))
2743                         netif_set_xps_queue(ring->netdev,
2744                                             &ring->q_vector->affinity_mask,
2745                                             ring->queue_index);
2746         } else if (alloc_cpumask_var(&mask, GFP_KERNEL)) {
2747                 /* Disable XPS to allow selection based on TC */
2748                 bitmap_zero(cpumask_bits(mask), nr_cpumask_bits);
2749                 netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
2750                 free_cpumask_var(mask);
2751         }
2752
2753         /* schedule our worker thread which will take care of
2754          * applying the new filter changes
2755          */
2756         i40e_service_event_schedule(vsi->back);
2757 }
2758
2759 /**
2760  * i40e_configure_tx_ring - Configure a transmit ring context and rest
2761  * @ring: The Tx ring to configure
2762  *
2763  * Configure the Tx descriptor ring in the HMC context.
2764  **/
2765 static int i40e_configure_tx_ring(struct i40e_ring *ring)
2766 {
2767         struct i40e_vsi *vsi = ring->vsi;
2768         u16 pf_q = vsi->base_queue + ring->queue_index;
2769         struct i40e_hw *hw = &vsi->back->hw;
2770         struct i40e_hmc_obj_txq tx_ctx;
2771         i40e_status err = 0;
2772         u32 qtx_ctl = 0;
2773
2774         /* some ATR related tx ring init */
2775         if (vsi->back->flags & I40E_FLAG_FD_ATR_ENABLED) {
2776                 ring->atr_sample_rate = vsi->back->atr_sample_rate;
2777                 ring->atr_count = 0;
2778         } else {
2779                 ring->atr_sample_rate = 0;
2780         }
2781
2782         /* configure XPS */
2783         i40e_config_xps_tx_ring(ring);
2784
2785         /* clear the context structure first */
2786         memset(&tx_ctx, 0, sizeof(tx_ctx));
2787
2788         tx_ctx.new_context = 1;
2789         tx_ctx.base = (ring->dma / 128);
2790         tx_ctx.qlen = ring->count;
2791         tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
2792                                                I40E_FLAG_FD_ATR_ENABLED));
2793 #ifdef I40E_FCOE
2794         tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2795 #endif
2796         tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
2797         /* FDIR VSI tx ring can still use RS bit and writebacks */
2798         if (vsi->type != I40E_VSI_FDIR)
2799                 tx_ctx.head_wb_ena = 1;
2800         tx_ctx.head_wb_addr = ring->dma +
2801                               (ring->count * sizeof(struct i40e_tx_desc));
2802
2803         /* As part of VSI creation/update, FW allocates certain
2804          * Tx arbitration queue sets for each TC enabled for
2805          * the VSI. The FW returns the handles to these queue
2806          * sets as part of the response buffer to Add VSI,
2807          * Update VSI, etc. AQ commands. It is expected that
2808          * these queue set handles be associated with the Tx
2809          * queues by the driver as part of the TX queue context
2810          * initialization. This has to be done regardless of
2811          * DCB as by default everything is mapped to TC0.
2812          */
2813         tx_ctx.rdylist = le16_to_cpu(vsi->info.qs_handle[ring->dcb_tc]);
2814         tx_ctx.rdylist_act = 0;
2815
2816         /* clear the context in the HMC */
2817         err = i40e_clear_lan_tx_queue_context(hw, pf_q);
2818         if (err) {
2819                 dev_info(&vsi->back->pdev->dev,
2820                          "Failed to clear LAN Tx queue context on Tx ring %d (pf_q %d), error: %d\n",
2821                          ring->queue_index, pf_q, err);
2822                 return -ENOMEM;
2823         }
2824
2825         /* set the context in the HMC */
2826         err = i40e_set_lan_tx_queue_context(hw, pf_q, &tx_ctx);
2827         if (err) {
2828                 dev_info(&vsi->back->pdev->dev,
2829                          "Failed to set LAN Tx queue context on Tx ring %d (pf_q %d, error: %d\n",
2830                          ring->queue_index, pf_q, err);
2831                 return -ENOMEM;
2832         }
2833
2834         /* Now associate this queue with this PCI function */
2835         if (vsi->type == I40E_VSI_VMDQ2) {
2836                 qtx_ctl = I40E_QTX_CTL_VM_QUEUE;
2837                 qtx_ctl |= ((vsi->id) << I40E_QTX_CTL_VFVM_INDX_SHIFT) &
2838                            I40E_QTX_CTL_VFVM_INDX_MASK;
2839         } else {
2840                 qtx_ctl = I40E_QTX_CTL_PF_QUEUE;
2841         }
2842
2843         qtx_ctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2844                     I40E_QTX_CTL_PF_INDX_MASK);
2845         wr32(hw, I40E_QTX_CTL(pf_q), qtx_ctl);
2846         i40e_flush(hw);
2847
2848         /* cache tail off for easier writes later */
2849         ring->tail = hw->hw_addr + I40E_QTX_TAIL(pf_q);
2850
2851         return 0;
2852 }
2853
2854 /**
2855  * i40e_configure_rx_ring - Configure a receive ring context
2856  * @ring: The Rx ring to configure
2857  *
2858  * Configure the Rx descriptor ring in the HMC context.
2859  **/
2860 static int i40e_configure_rx_ring(struct i40e_ring *ring)
2861 {
2862         struct i40e_vsi *vsi = ring->vsi;
2863         u32 chain_len = vsi->back->hw.func_caps.rx_buf_chain_len;
2864         u16 pf_q = vsi->base_queue + ring->queue_index;
2865         struct i40e_hw *hw = &vsi->back->hw;
2866         struct i40e_hmc_obj_rxq rx_ctx;
2867         i40e_status err = 0;
2868
2869         ring->state = 0;
2870
2871         /* clear the context structure first */
2872         memset(&rx_ctx, 0, sizeof(rx_ctx));
2873
2874         ring->rx_buf_len = vsi->rx_buf_len;
2875         ring->rx_hdr_len = vsi->rx_hdr_len;
2876
2877         rx_ctx.dbuff = ring->rx_buf_len >> I40E_RXQ_CTX_DBUFF_SHIFT;
2878         rx_ctx.hbuff = ring->rx_hdr_len >> I40E_RXQ_CTX_HBUFF_SHIFT;
2879
2880         rx_ctx.base = (ring->dma / 128);
2881         rx_ctx.qlen = ring->count;
2882
2883         if (vsi->back->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED) {
2884                 set_ring_16byte_desc_enabled(ring);
2885                 rx_ctx.dsize = 0;
2886         } else {
2887                 rx_ctx.dsize = 1;
2888         }
2889
2890         rx_ctx.dtype = vsi->dtype;
2891         if (vsi->dtype) {
2892                 set_ring_ps_enabled(ring);
2893                 rx_ctx.hsplit_0 = I40E_RX_SPLIT_L2      |
2894                                   I40E_RX_SPLIT_IP      |
2895                                   I40E_RX_SPLIT_TCP_UDP |
2896                                   I40E_RX_SPLIT_SCTP;
2897         } else {
2898                 rx_ctx.hsplit_0 = 0;
2899         }
2900
2901         rx_ctx.rxmax = min_t(u16, vsi->max_frame,
2902                                   (chain_len * ring->rx_buf_len));
2903         if (hw->revision_id == 0)
2904                 rx_ctx.lrxqthresh = 0;
2905         else
2906                 rx_ctx.lrxqthresh = 2;
2907         rx_ctx.crcstrip = 1;
2908         rx_ctx.l2tsel = 1;
2909         /* this controls whether VLAN is stripped from inner headers */
2910         rx_ctx.showiv = 0;
2911 #ifdef I40E_FCOE
2912         rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
2913 #endif
2914         /* set the prefena field to 1 because the manual says to */
2915         rx_ctx.prefena = 1;
2916
2917         /* clear the context in the HMC */
2918         err = i40e_clear_lan_rx_queue_context(hw, pf_q);
2919         if (err) {
2920                 dev_info(&vsi->back->pdev->dev,
2921                          "Failed to clear LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2922                          ring->queue_index, pf_q, err);
2923                 return -ENOMEM;
2924         }
2925
2926         /* set the context in the HMC */
2927         err = i40e_set_lan_rx_queue_context(hw, pf_q, &rx_ctx);
2928         if (err) {
2929                 dev_info(&vsi->back->pdev->dev,
2930                          "Failed to set LAN Rx queue context on Rx ring %d (pf_q %d), error: %d\n",
2931                          ring->queue_index, pf_q, err);
2932                 return -ENOMEM;
2933         }
2934
2935         /* cache tail for quicker writes, and clear the reg before use */
2936         ring->tail = hw->hw_addr + I40E_QRX_TAIL(pf_q);
2937         writel(0, ring->tail);
2938
2939         if (ring_is_ps_enabled(ring)) {
2940                 i40e_alloc_rx_headers(ring);
2941                 i40e_alloc_rx_buffers_ps(ring, I40E_DESC_UNUSED(ring));
2942         } else {
2943                 i40e_alloc_rx_buffers_1buf(ring, I40E_DESC_UNUSED(ring));
2944         }
2945
2946         return 0;
2947 }
2948
2949 /**
2950  * i40e_vsi_configure_tx - Configure the VSI for Tx
2951  * @vsi: VSI structure describing this set of rings and resources
2952  *
2953  * Configure the Tx VSI for operation.
2954  **/
2955 static int i40e_vsi_configure_tx(struct i40e_vsi *vsi)
2956 {
2957         int err = 0;
2958         u16 i;
2959
2960         for (i = 0; (i < vsi->num_queue_pairs) && !err; i++)
2961                 err = i40e_configure_tx_ring(vsi->tx_rings[i]);
2962
2963         return err;
2964 }
2965
2966 /**
2967  * i40e_vsi_configure_rx - Configure the VSI for Rx
2968  * @vsi: the VSI being configured
2969  *
2970  * Configure the Rx VSI for operation.
2971  **/
2972 static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
2973 {
2974         int err = 0;
2975         u16 i;
2976
2977         if (vsi->netdev && (vsi->netdev->mtu > ETH_DATA_LEN))
2978                 vsi->max_frame = vsi->netdev->mtu + ETH_HLEN
2979                                + ETH_FCS_LEN + VLAN_HLEN;
2980         else
2981                 vsi->max_frame = I40E_RXBUFFER_2048;
2982
2983         /* figure out correct receive buffer length */
2984         switch (vsi->back->flags & (I40E_FLAG_RX_1BUF_ENABLED |
2985                                     I40E_FLAG_RX_PS_ENABLED)) {
2986         case I40E_FLAG_RX_1BUF_ENABLED:
2987                 vsi->rx_hdr_len = 0;
2988                 vsi->rx_buf_len = vsi->max_frame;
2989                 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
2990                 break;
2991         case I40E_FLAG_RX_PS_ENABLED:
2992                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2993                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2994                 vsi->dtype = I40E_RX_DTYPE_HEADER_SPLIT;
2995                 break;
2996         default:
2997                 vsi->rx_hdr_len = I40E_RX_HDR_SIZE;
2998                 vsi->rx_buf_len = I40E_RXBUFFER_2048;
2999                 vsi->dtype = I40E_RX_DTYPE_SPLIT_ALWAYS;
3000                 break;
3001         }
3002
3003 #ifdef I40E_FCOE
3004         /* setup rx buffer for FCoE */
3005         if ((vsi->type == I40E_VSI_FCOE) &&
3006             (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
3007                 vsi->rx_hdr_len = 0;
3008                 vsi->rx_buf_len = I40E_RXBUFFER_3072;
3009                 vsi->max_frame = I40E_RXBUFFER_3072;
3010                 vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
3011         }
3012
3013 #endif /* I40E_FCOE */
3014         /* round up for the chip's needs */
3015         vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
3016                                 BIT_ULL(I40E_RXQ_CTX_HBUFF_SHIFT));
3017         vsi->rx_buf_len = ALIGN(vsi->rx_buf_len,
3018                                 BIT_ULL(I40E_RXQ_CTX_DBUFF_SHIFT));
3019
3020         /* set up individual rings */
3021         for (i = 0; i < vsi->num_queue_pairs && !err; i++)
3022                 err = i40e_configure_rx_ring(vsi->rx_rings[i]);
3023
3024         return err;
3025 }
3026
3027 /**
3028  * i40e_vsi_config_dcb_rings - Update rings to reflect DCB TC
3029  * @vsi: ptr to the VSI
3030  **/
3031 static void i40e_vsi_config_dcb_rings(struct i40e_vsi *vsi)
3032 {
3033         struct i40e_ring *tx_ring, *rx_ring;
3034         u16 qoffset, qcount;
3035         int i, n;
3036
3037         if (!(vsi->back->flags & I40E_FLAG_DCB_ENABLED)) {
3038                 /* Reset the TC information */
3039                 for (i = 0; i < vsi->num_queue_pairs; i++) {
3040                         rx_ring = vsi->rx_rings[i];
3041                         tx_ring = vsi->tx_rings[i];
3042                         rx_ring->dcb_tc = 0;
3043                         tx_ring->dcb_tc = 0;
3044                 }
3045         }
3046
3047         for (n = 0; n < I40E_MAX_TRAFFIC_CLASS; n++) {
3048                 if (!(vsi->tc_config.enabled_tc & BIT_ULL(n)))
3049                         continue;
3050
3051                 qoffset = vsi->tc_config.tc_info[n].qoffset;
3052                 qcount = vsi->tc_config.tc_info[n].qcount;
3053                 for (i = qoffset; i < (qoffset + qcount); i++) {
3054                         rx_ring = vsi->rx_rings[i];
3055                         tx_ring = vsi->tx_rings[i];
3056                         rx_ring->dcb_tc = n;
3057                         tx_ring->dcb_tc = n;
3058                 }
3059         }
3060 }
3061
3062 /**
3063  * i40e_set_vsi_rx_mode - Call set_rx_mode on a VSI
3064  * @vsi: ptr to the VSI
3065  **/
3066 static void i40e_set_vsi_rx_mode(struct i40e_vsi *vsi)
3067 {
3068         if (vsi->netdev)
3069                 i40e_set_rx_mode(vsi->netdev);
3070 }
3071
3072 /**
3073  * i40e_fdir_filter_restore - Restore the Sideband Flow Director filters
3074  * @vsi: Pointer to the targeted VSI
3075  *
3076  * This function replays the hlist on the hw where all the SB Flow Director
3077  * filters were saved.
3078  **/
3079 static void i40e_fdir_filter_restore(struct i40e_vsi *vsi)
3080 {
3081         struct i40e_fdir_filter *filter;
3082         struct i40e_pf *pf = vsi->back;
3083         struct hlist_node *node;
3084
3085         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
3086                 return;
3087
3088         hlist_for_each_entry_safe(filter, node,
3089                                   &pf->fdir_filter_list, fdir_node) {
3090                 i40e_add_del_fdir(vsi, filter, true);
3091         }
3092 }
3093
3094 /**
3095  * i40e_vsi_configure - Set up the VSI for action
3096  * @vsi: the VSI being configured
3097  **/
3098 static int i40e_vsi_configure(struct i40e_vsi *vsi)
3099 {
3100         int err;
3101
3102         i40e_set_vsi_rx_mode(vsi);
3103         i40e_restore_vlan(vsi);
3104         i40e_vsi_config_dcb_rings(vsi);
3105         err = i40e_vsi_configure_tx(vsi);
3106         if (!err)
3107                 err = i40e_vsi_configure_rx(vsi);
3108
3109         return err;
3110 }
3111
3112 /**
3113  * i40e_vsi_configure_msix - MSIX mode Interrupt Config in the HW
3114  * @vsi: the VSI being configured
3115  **/
3116 static void i40e_vsi_configure_msix(struct i40e_vsi *vsi)
3117 {
3118         struct i40e_pf *pf = vsi->back;
3119         struct i40e_hw *hw = &pf->hw;
3120         u16 vector;
3121         int i, q;
3122         u32 qp;
3123
3124         /* The interrupt indexing is offset by 1 in the PFINT_ITRn
3125          * and PFINT_LNKLSTn registers, e.g.:
3126          *   PFINT_ITRn[0..n-1] gets msix-1..msix-n  (qpair interrupts)
3127          */
3128         qp = vsi->base_queue;
3129         vector = vsi->base_vector;
3130         for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
3131                 struct i40e_q_vector *q_vector = vsi->q_vectors[i];
3132
3133                 q_vector->itr_countdown = ITR_COUNTDOWN_START;
3134                 q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
3135                 q_vector->rx.latency_range = I40E_LOW_LATENCY;
3136                 wr32(hw, I40E_PFINT_ITRN(I40E_RX_ITR, vector - 1),
3137                      q_vector->rx.itr);
3138                 q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
3139                 q_vector->tx.latency_range = I40E_LOW_LATENCY;
3140                 wr32(hw, I40E_PFINT_ITRN(I40E_TX_ITR, vector - 1),
3141                      q_vector->tx.itr);
3142                 wr32(hw, I40E_PFINT_RATEN(vector - 1),
3143                      INTRL_USEC_TO_REG(vsi->int_rate_limit));
3144
3145                 /* Linked list for the queuepairs assigned to this vector */
3146                 wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), qp);
3147                 for (q = 0; q < q_vector->num_ringpairs; q++) {
3148                         u32 val;
3149
3150                         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
3151                               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)  |
3152                               (vector      << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
3153                               (qp          << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)|
3154                               (I40E_QUEUE_TYPE_TX
3155                                       << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
3156
3157                         wr32(hw, I40E_QINT_RQCTL(qp), val);
3158
3159                         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
3160                               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)  |
3161                               (vector      << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
3162                               ((qp+1)      << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)|
3163                               (I40E_QUEUE_TYPE_RX
3164                                       << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3165
3166                         /* Terminate the linked list */
3167                         if (q == (q_vector->num_ringpairs - 1))
3168                                 val |= (I40E_QUEUE_END_OF_LIST
3169                                            << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3170
3171                         wr32(hw, I40E_QINT_TQCTL(qp), val);
3172                         qp++;
3173                 }
3174         }
3175
3176         i40e_flush(hw);
3177 }
3178
3179 /**
3180  * i40e_enable_misc_int_causes - enable the non-queue interrupts
3181  * @hw: ptr to the hardware info
3182  **/
3183 static void i40e_enable_misc_int_causes(struct i40e_pf *pf)
3184 {
3185         struct i40e_hw *hw = &pf->hw;
3186         u32 val;
3187
3188         /* clear things first */
3189         wr32(hw, I40E_PFINT_ICR0_ENA, 0);  /* disable all */
3190         rd32(hw, I40E_PFINT_ICR0);         /* read to clear */
3191
3192         val = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK       |
3193               I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK    |
3194               I40E_PFINT_ICR0_ENA_GRST_MASK          |
3195               I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK |
3196               I40E_PFINT_ICR0_ENA_GPIO_MASK          |
3197               I40E_PFINT_ICR0_ENA_HMC_ERR_MASK       |
3198               I40E_PFINT_ICR0_ENA_VFLR_MASK          |
3199               I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3200
3201         if (pf->flags & I40E_FLAG_IWARP_ENABLED)
3202                 val |= I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3203
3204         if (pf->flags & I40E_FLAG_PTP)
3205                 val |= I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3206
3207         wr32(hw, I40E_PFINT_ICR0_ENA, val);
3208
3209         /* SW_ITR_IDX = 0, but don't change INTENA */
3210         wr32(hw, I40E_PFINT_DYN_CTL0, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
3211                                         I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
3212
3213         /* OTHER_ITR_IDX = 0 */
3214         wr32(hw, I40E_PFINT_STAT_CTL0, 0);
3215 }
3216
3217 /**
3218  * i40e_configure_msi_and_legacy - Legacy mode interrupt config in the HW
3219  * @vsi: the VSI being configured
3220  **/
3221 static void i40e_configure_msi_and_legacy(struct i40e_vsi *vsi)
3222 {
3223         struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3224         struct i40e_pf *pf = vsi->back;
3225         struct i40e_hw *hw = &pf->hw;
3226         u32 val;
3227
3228         /* set the ITR configuration */
3229         q_vector->itr_countdown = ITR_COUNTDOWN_START;
3230         q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
3231         q_vector->rx.latency_range = I40E_LOW_LATENCY;
3232         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), q_vector->rx.itr);
3233         q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
3234         q_vector->tx.latency_range = I40E_LOW_LATENCY;
3235         wr32(hw, I40E_PFINT_ITR0(I40E_TX_ITR), q_vector->tx.itr);
3236
3237         i40e_enable_misc_int_causes(pf);
3238
3239         /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
3240         wr32(hw, I40E_PFINT_LNKLST0, 0);
3241
3242         /* Associate the queue pair to the vector and enable the queue int */
3243         val = I40E_QINT_RQCTL_CAUSE_ENA_MASK                  |
3244               (I40E_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
3245               (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
3246
3247         wr32(hw, I40E_QINT_RQCTL(0), val);
3248
3249         val = I40E_QINT_TQCTL_CAUSE_ENA_MASK                  |
3250               (I40E_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
3251               (I40E_QUEUE_END_OF_LIST << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
3252
3253         wr32(hw, I40E_QINT_TQCTL(0), val);
3254         i40e_flush(hw);
3255 }
3256
3257 /**
3258  * i40e_irq_dynamic_disable_icr0 - Disable default interrupt generation for icr0
3259  * @pf: board private structure
3260  **/
3261 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
3262 {
3263         struct i40e_hw *hw = &pf->hw;
3264
3265         wr32(hw, I40E_PFINT_DYN_CTL0,
3266              I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
3267         i40e_flush(hw);
3268 }
3269
3270 /**
3271  * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
3272  * @pf: board private structure
3273  **/
3274 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf)
3275 {
3276         struct i40e_hw *hw = &pf->hw;
3277         u32 val;
3278
3279         val = I40E_PFINT_DYN_CTL0_INTENA_MASK   |
3280               I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
3281               (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
3282
3283         wr32(hw, I40E_PFINT_DYN_CTL0, val);
3284         i40e_flush(hw);
3285 }
3286
3287 /**
3288  * i40e_irq_dynamic_disable - Disable default interrupt generation settings
3289  * @vsi: pointer to a vsi
3290  * @vector: disable a particular Hw Interrupt vector
3291  **/
3292 void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
3293 {
3294         struct i40e_pf *pf = vsi->back;
3295         struct i40e_hw *hw = &pf->hw;
3296         u32 val;
3297
3298         val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
3299         wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
3300         i40e_flush(hw);
3301 }
3302
3303 /**
3304  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
3305  * @irq: interrupt number
3306  * @data: pointer to a q_vector
3307  **/
3308 static irqreturn_t i40e_msix_clean_rings(int irq, void *data)
3309 {
3310         struct i40e_q_vector *q_vector = data;
3311
3312         if (!q_vector->tx.ring && !q_vector->rx.ring)
3313                 return IRQ_HANDLED;
3314
3315         napi_schedule_irqoff(&q_vector->napi);
3316
3317         return IRQ_HANDLED;
3318 }
3319
3320 /**
3321  * i40e_vsi_request_irq_msix - Initialize MSI-X interrupts
3322  * @vsi: the VSI being configured
3323  * @basename: name for the vector
3324  *
3325  * Allocates MSI-X vectors and requests interrupts from the kernel.
3326  **/
3327 static int i40e_vsi_request_irq_msix(struct i40e_vsi *vsi, char *basename)
3328 {
3329         int q_vectors = vsi->num_q_vectors;
3330         struct i40e_pf *pf = vsi->back;
3331         int base = vsi->base_vector;
3332         int rx_int_idx = 0;
3333         int tx_int_idx = 0;
3334         int vector, err;
3335
3336         for (vector = 0; vector < q_vectors; vector++) {
3337                 struct i40e_q_vector *q_vector = vsi->q_vectors[vector];
3338
3339                 if (q_vector->tx.ring && q_vector->rx.ring) {
3340                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3341                                  "%s-%s-%d", basename, "TxRx", rx_int_idx++);
3342                         tx_int_idx++;
3343                 } else if (q_vector->rx.ring) {
3344                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3345                                  "%s-%s-%d", basename, "rx", rx_int_idx++);
3346                 } else if (q_vector->tx.ring) {
3347                         snprintf(q_vector->name, sizeof(q_vector->name) - 1,
3348                                  "%s-%s-%d", basename, "tx", tx_int_idx++);
3349                 } else {
3350                         /* skip this unused q_vector */
3351                         continue;
3352                 }
3353                 err = request_irq(pf->msix_entries[base + vector].vector,
3354                                   vsi->irq_handler,
3355                                   0,
3356                                   q_vector->name,
3357                                   q_vector);
3358                 if (err) {
3359                         dev_info(&pf->pdev->dev,
3360                                  "MSIX request_irq failed, error: %d\n", err);
3361                         goto free_queue_irqs;
3362                 }
3363                 /* assign the mask for this irq */
3364                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3365                                       &q_vector->affinity_mask);
3366         }
3367
3368         vsi->irqs_ready = true;
3369         return 0;
3370
3371 free_queue_irqs:
3372         while (vector) {
3373                 vector--;
3374                 irq_set_affinity_hint(pf->msix_entries[base + vector].vector,
3375                                       NULL);
3376                 free_irq(pf->msix_entries[base + vector].vector,
3377                          &(vsi->q_vectors[vector]));
3378         }
3379         return err;
3380 }
3381
3382 /**
3383  * i40e_vsi_disable_irq - Mask off queue interrupt generation on the VSI
3384  * @vsi: the VSI being un-configured
3385  **/
3386 static void i40e_vsi_disable_irq(struct i40e_vsi *vsi)
3387 {
3388         struct i40e_pf *pf = vsi->back;
3389         struct i40e_hw *hw = &pf->hw;
3390         int base = vsi->base_vector;
3391         int i;
3392
3393         for (i = 0; i < vsi->num_queue_pairs; i++) {
3394                 wr32(hw, I40E_QINT_TQCTL(vsi->tx_rings[i]->reg_idx), 0);
3395                 wr32(hw, I40E_QINT_RQCTL(vsi->rx_rings[i]->reg_idx), 0);
3396         }
3397
3398         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3399                 for (i = vsi->base_vector;
3400                      i < (vsi->num_q_vectors + vsi->base_vector); i++)
3401                         wr32(hw, I40E_PFINT_DYN_CTLN(i - 1), 0);
3402
3403                 i40e_flush(hw);
3404                 for (i = 0; i < vsi->num_q_vectors; i++)
3405                         synchronize_irq(pf->msix_entries[i + base].vector);
3406         } else {
3407                 /* Legacy and MSI mode - this stops all interrupt handling */
3408                 wr32(hw, I40E_PFINT_ICR0_ENA, 0);
3409                 wr32(hw, I40E_PFINT_DYN_CTL0, 0);
3410                 i40e_flush(hw);
3411                 synchronize_irq(pf->pdev->irq);
3412         }
3413 }
3414
3415 /**
3416  * i40e_vsi_enable_irq - Enable IRQ for the given VSI
3417  * @vsi: the VSI being configured
3418  **/
3419 static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
3420 {
3421         struct i40e_pf *pf = vsi->back;
3422         int i;
3423
3424         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3425                 for (i = 0; i < vsi->num_q_vectors; i++)
3426                         i40e_irq_dynamic_enable(vsi, i);
3427         } else {
3428                 i40e_irq_dynamic_enable_icr0(pf);
3429         }
3430
3431         i40e_flush(&pf->hw);
3432         return 0;
3433 }
3434
3435 /**
3436  * i40e_stop_misc_vector - Stop the vector that handles non-queue events
3437  * @pf: board private structure
3438  **/
3439 static void i40e_stop_misc_vector(struct i40e_pf *pf)
3440 {
3441         /* Disable ICR 0 */
3442         wr32(&pf->hw, I40E_PFINT_ICR0_ENA, 0);
3443         i40e_flush(&pf->hw);
3444 }
3445
3446 /**
3447  * i40e_intr - MSI/Legacy and non-queue interrupt handler
3448  * @irq: interrupt number
3449  * @data: pointer to a q_vector
3450  *
3451  * This is the handler used for all MSI/Legacy interrupts, and deals
3452  * with both queue and non-queue interrupts.  This is also used in
3453  * MSIX mode to handle the non-queue interrupts.
3454  **/
3455 static irqreturn_t i40e_intr(int irq, void *data)
3456 {
3457         struct i40e_pf *pf = (struct i40e_pf *)data;
3458         struct i40e_hw *hw = &pf->hw;
3459         irqreturn_t ret = IRQ_NONE;
3460         u32 icr0, icr0_remaining;
3461         u32 val, ena_mask;
3462
3463         icr0 = rd32(hw, I40E_PFINT_ICR0);
3464         ena_mask = rd32(hw, I40E_PFINT_ICR0_ENA);
3465
3466         /* if sharing a legacy IRQ, we might get called w/o an intr pending */
3467         if ((icr0 & I40E_PFINT_ICR0_INTEVENT_MASK) == 0)
3468                 goto enable_intr;
3469
3470         /* if interrupt but no bits showing, must be SWINT */
3471         if (((icr0 & ~I40E_PFINT_ICR0_INTEVENT_MASK) == 0) ||
3472             (icr0 & I40E_PFINT_ICR0_SWINT_MASK))
3473                 pf->sw_int_count++;
3474
3475         if ((pf->flags & I40E_FLAG_IWARP_ENABLED) &&
3476             (ena_mask & I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK)) {
3477                 ena_mask &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3478                 icr0 &= ~I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK;
3479                 dev_info(&pf->pdev->dev, "cleared PE_CRITERR\n");
3480         }
3481
3482         /* only q0 is used in MSI/Legacy mode, and none are used in MSIX */
3483         if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK) {
3484                 struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
3485                 struct i40e_q_vector *q_vector = vsi->q_vectors[0];
3486
3487                 /* temporarily disable queue cause for NAPI processing */
3488                 u32 qval = rd32(hw, I40E_QINT_RQCTL(0));
3489
3490                 qval &= ~I40E_QINT_RQCTL_CAUSE_ENA_MASK;
3491                 wr32(hw, I40E_QINT_RQCTL(0), qval);
3492
3493                 qval = rd32(hw, I40E_QINT_TQCTL(0));
3494                 qval &= ~I40E_QINT_TQCTL_CAUSE_ENA_MASK;
3495                 wr32(hw, I40E_QINT_TQCTL(0), qval);
3496
3497                 if (!test_bit(__I40E_DOWN, &pf->state))
3498                         napi_schedule_irqoff(&q_vector->napi);
3499         }
3500
3501         if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
3502                 ena_mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
3503                 set_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
3504         }
3505
3506         if (icr0 & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
3507                 ena_mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
3508                 set_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
3509         }
3510
3511         if (icr0 & I40E_PFINT_ICR0_VFLR_MASK) {
3512                 ena_mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
3513                 set_bit(__I40E_VFLR_EVENT_PENDING, &pf->state);
3514         }
3515
3516         if (icr0 & I40E_PFINT_ICR0_GRST_MASK) {
3517                 if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
3518                         set_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
3519                 ena_mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
3520                 val = rd32(hw, I40E_GLGEN_RSTAT);
3521                 val = (val & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
3522                        >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
3523                 if (val == I40E_RESET_CORER) {
3524                         pf->corer_count++;
3525                 } else if (val == I40E_RESET_GLOBR) {
3526                         pf->globr_count++;
3527                 } else if (val == I40E_RESET_EMPR) {
3528                         pf->empr_count++;
3529                         set_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state);
3530                 }
3531         }
3532
3533         if (icr0 & I40E_PFINT_ICR0_HMC_ERR_MASK) {
3534                 icr0 &= ~I40E_PFINT_ICR0_HMC_ERR_MASK;
3535                 dev_info(&pf->pdev->dev, "HMC error interrupt\n");
3536                 dev_info(&pf->pdev->dev, "HMC error info 0x%x, HMC error data 0x%x\n",
3537                          rd32(hw, I40E_PFHMC_ERRORINFO),
3538                          rd32(hw, I40E_PFHMC_ERRORDATA));
3539         }
3540
3541         if (icr0 & I40E_PFINT_ICR0_TIMESYNC_MASK) {
3542                 u32 prttsyn_stat = rd32(hw, I40E_PRTTSYN_STAT_0);
3543
3544                 if (prttsyn_stat & I40E_PRTTSYN_STAT_0_TXTIME_MASK) {
3545                         icr0 &= ~I40E_PFINT_ICR0_ENA_TIMESYNC_MASK;
3546                         i40e_ptp_tx_hwtstamp(pf);
3547                 }
3548         }
3549
3550         /* If a critical error is pending we have no choice but to reset the
3551          * device.
3552          * Report and mask out any remaining unexpected interrupts.
3553          */
3554         icr0_remaining = icr0 & ena_mask;
3555         if (icr0_remaining) {
3556                 dev_info(&pf->pdev->dev, "unhandled interrupt icr0=0x%08x\n",
3557                          icr0_remaining);
3558                 if ((icr0_remaining & I40E_PFINT_ICR0_PE_CRITERR_MASK) ||
3559                     (icr0_remaining & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) ||
3560                     (icr0_remaining & I40E_PFINT_ICR0_ECC_ERR_MASK)) {
3561                         dev_info(&pf->pdev->dev, "device will be reset\n");
3562                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
3563                         i40e_service_event_schedule(pf);
3564                 }
3565                 ena_mask &= ~icr0_remaining;
3566         }
3567         ret = IRQ_HANDLED;
3568
3569 enable_intr:
3570         /* re-enable interrupt causes */
3571         wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
3572         if (!test_bit(__I40E_DOWN, &pf->state)) {
3573                 i40e_service_event_schedule(pf);
3574                 i40e_irq_dynamic_enable_icr0(pf);
3575         }
3576
3577         return ret;
3578 }
3579
3580 /**
3581  * i40e_clean_fdir_tx_irq - Reclaim resources after transmit completes
3582  * @tx_ring:  tx ring to clean
3583  * @budget:   how many cleans we're allowed
3584  *
3585  * Returns true if there's any budget left (e.g. the clean is finished)
3586  **/
3587 static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
3588 {
3589         struct i40e_vsi *vsi = tx_ring->vsi;
3590         u16 i = tx_ring->next_to_clean;
3591         struct i40e_tx_buffer *tx_buf;
3592         struct i40e_tx_desc *tx_desc;
3593
3594         tx_buf = &tx_ring->tx_bi[i];
3595         tx_desc = I40E_TX_DESC(tx_ring, i);
3596         i -= tx_ring->count;
3597
3598         do {
3599                 struct i40e_tx_desc *eop_desc = tx_buf->next_to_watch;
3600
3601                 /* if next_to_watch is not set then there is no work pending */
3602                 if (!eop_desc)
3603                         break;
3604
3605                 /* prevent any other reads prior to eop_desc */
3606                 smp_rmb();
3607
3608                 /* if the descriptor isn't done, no work yet to do */
3609                 if (!(eop_desc->cmd_type_offset_bsz &
3610                       cpu_to_le64(I40E_TX_DESC_DTYPE_DESC_DONE)))
3611                         break;
3612
3613                 /* clear next_to_watch to prevent false hangs */
3614                 tx_buf->next_to_watch = NULL;
3615
3616                 tx_desc->buffer_addr = 0;
3617                 tx_desc->cmd_type_offset_bsz = 0;
3618                 /* move past filter desc */
3619                 tx_buf++;
3620                 tx_desc++;
3621                 i++;
3622                 if (unlikely(!i)) {
3623                         i -= tx_ring->count;
3624                         tx_buf = tx_ring->tx_bi;
3625                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3626                 }
3627                 /* unmap skb header data */
3628                 dma_unmap_single(tx_ring->dev,
3629                                  dma_unmap_addr(tx_buf, dma),
3630                                  dma_unmap_len(tx_buf, len),
3631                                  DMA_TO_DEVICE);
3632                 if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
3633                         kfree(tx_buf->raw_buf);
3634
3635                 tx_buf->raw_buf = NULL;
3636                 tx_buf->tx_flags = 0;
3637                 tx_buf->next_to_watch = NULL;
3638                 dma_unmap_len_set(tx_buf, len, 0);
3639                 tx_desc->buffer_addr = 0;
3640                 tx_desc->cmd_type_offset_bsz = 0;
3641
3642                 /* move us past the eop_desc for start of next FD desc */
3643                 tx_buf++;
3644                 tx_desc++;
3645                 i++;
3646                 if (unlikely(!i)) {
3647                         i -= tx_ring->count;
3648                         tx_buf = tx_ring->tx_bi;
3649                         tx_desc = I40E_TX_DESC(tx_ring, 0);
3650                 }
3651
3652                 /* update budget accounting */
3653                 budget--;
3654         } while (likely(budget));
3655
3656         i += tx_ring->count;
3657         tx_ring->next_to_clean = i;
3658
3659         if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED)
3660                 i40e_irq_dynamic_enable(vsi, tx_ring->q_vector->v_idx);
3661
3662         return budget > 0;
3663 }
3664
3665 /**
3666  * i40e_fdir_clean_ring - Interrupt Handler for FDIR SB ring
3667  * @irq: interrupt number
3668  * @data: pointer to a q_vector
3669  **/
3670 static irqreturn_t i40e_fdir_clean_ring(int irq, void *data)
3671 {
3672         struct i40e_q_vector *q_vector = data;
3673         struct i40e_vsi *vsi;
3674
3675         if (!q_vector->tx.ring)
3676                 return IRQ_HANDLED;
3677
3678         vsi = q_vector->tx.ring->vsi;
3679         i40e_clean_fdir_tx_irq(q_vector->tx.ring, vsi->work_limit);
3680
3681         return IRQ_HANDLED;
3682 }
3683
3684 /**
3685  * i40e_map_vector_to_qp - Assigns the queue pair to the vector
3686  * @vsi: the VSI being configured
3687  * @v_idx: vector index
3688  * @qp_idx: queue pair index
3689  **/
3690 static void i40e_map_vector_to_qp(struct i40e_vsi *vsi, int v_idx, int qp_idx)
3691 {
3692         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
3693         struct i40e_ring *tx_ring = vsi->tx_rings[qp_idx];
3694         struct i40e_ring *rx_ring = vsi->rx_rings[qp_idx];
3695
3696         tx_ring->q_vector = q_vector;
3697         tx_ring->next = q_vector->tx.ring;
3698         q_vector->tx.ring = tx_ring;
3699         q_vector->tx.count++;
3700
3701         rx_ring->q_vector = q_vector;
3702         rx_ring->next = q_vector->rx.ring;
3703         q_vector->rx.ring = rx_ring;
3704         q_vector->rx.count++;
3705 }
3706
3707 /**
3708  * i40e_vsi_map_rings_to_vectors - Maps descriptor rings to vectors
3709  * @vsi: the VSI being configured
3710  *
3711  * This function maps descriptor rings to the queue-specific vectors
3712  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
3713  * one vector per queue pair, but on a constrained vector budget, we
3714  * group the queue pairs as "efficiently" as possible.
3715  **/
3716 static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
3717 {
3718         int qp_remaining = vsi->num_queue_pairs;
3719         int q_vectors = vsi->num_q_vectors;
3720         int num_ringpairs;
3721         int v_start = 0;
3722         int qp_idx = 0;
3723
3724         /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
3725          * group them so there are multiple queues per vector.
3726          * It is also important to go through all the vectors available to be
3727          * sure that if we don't use all the vectors, that the remaining vectors
3728          * are cleared. This is especially important when decreasing the
3729          * number of queues in use.
3730          */
3731         for (; v_start < q_vectors; v_start++) {
3732                 struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
3733
3734                 num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
3735
3736                 q_vector->num_ringpairs = num_ringpairs;
3737
3738                 q_vector->rx.count = 0;
3739                 q_vector->tx.count = 0;
3740                 q_vector->rx.ring = NULL;
3741                 q_vector->tx.ring = NULL;
3742
3743                 while (num_ringpairs--) {
3744                         i40e_map_vector_to_qp(vsi, v_start, qp_idx);
3745                         qp_idx++;
3746                         qp_remaining--;
3747                 }
3748         }
3749 }
3750
3751 /**
3752  * i40e_vsi_request_irq - Request IRQ from the OS
3753  * @vsi: the VSI being configured
3754  * @basename: name for the vector
3755  **/
3756 static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
3757 {
3758         struct i40e_pf *pf = vsi->back;
3759         int err;
3760
3761         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
3762                 err = i40e_vsi_request_irq_msix(vsi, basename);
3763         else if (pf->flags & I40E_FLAG_MSI_ENABLED)
3764                 err = request_irq(pf->pdev->irq, i40e_intr, 0,
3765                                   pf->int_name, pf);
3766         else
3767                 err = request_irq(pf->pdev->irq, i40e_intr, IRQF_SHARED,
3768                                   pf->int_name, pf);
3769
3770         if (err)
3771                 dev_info(&pf->pdev->dev, "request_irq failed, Error %d\n", err);
3772
3773         return err;
3774 }
3775
3776 #ifdef CONFIG_NET_POLL_CONTROLLER
3777 /**
3778  * i40e_netpoll - A Polling 'interrupt'handler
3779  * @netdev: network interface device structure
3780  *
3781  * This is used by netconsole to send skbs without having to re-enable
3782  * interrupts.  It's not called while the normal interrupt routine is executing.
3783  **/
3784 #ifdef I40E_FCOE
3785 void i40e_netpoll(struct net_device *netdev)
3786 #else
3787 static void i40e_netpoll(struct net_device *netdev)
3788 #endif
3789 {
3790         struct i40e_netdev_priv *np = netdev_priv(netdev);
3791         struct i40e_vsi *vsi = np->vsi;
3792         struct i40e_pf *pf = vsi->back;
3793         int i;
3794
3795         /* if interface is down do nothing */
3796         if (test_bit(__I40E_DOWN, &vsi->state))
3797                 return;
3798
3799         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
3800                 for (i = 0; i < vsi->num_q_vectors; i++)
3801                         i40e_msix_clean_rings(0, vsi->q_vectors[i]);
3802         } else {
3803                 i40e_intr(pf->pdev->irq, netdev);
3804         }
3805 }
3806 #endif
3807
3808 /**
3809  * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
3810  * @pf: the PF being configured
3811  * @pf_q: the PF queue
3812  * @enable: enable or disable state of the queue
3813  *
3814  * This routine will wait for the given Tx queue of the PF to reach the
3815  * enabled or disabled state.
3816  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3817  * multiple retries; else will return 0 in case of success.
3818  **/
3819 static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3820 {
3821         int i;
3822         u32 tx_reg;
3823
3824         for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3825                 tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
3826                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3827                         break;
3828
3829                 usleep_range(10, 20);
3830         }
3831         if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3832                 return -ETIMEDOUT;
3833
3834         return 0;
3835 }
3836
3837 /**
3838  * i40e_vsi_control_tx - Start or stop a VSI's rings
3839  * @vsi: the VSI being configured
3840  * @enable: start or stop the rings
3841  **/
3842 static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
3843 {
3844         struct i40e_pf *pf = vsi->back;
3845         struct i40e_hw *hw = &pf->hw;
3846         int i, j, pf_q, ret = 0;
3847         u32 tx_reg;
3848
3849         pf_q = vsi->base_queue;
3850         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3851
3852                 /* warn the TX unit of coming changes */
3853                 i40e_pre_tx_queue_cfg(&pf->hw, pf_q, enable);
3854                 if (!enable)
3855                         usleep_range(10, 20);
3856
3857                 for (j = 0; j < 50; j++) {
3858                         tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
3859                         if (((tx_reg >> I40E_QTX_ENA_QENA_REQ_SHIFT) & 1) ==
3860                             ((tx_reg >> I40E_QTX_ENA_QENA_STAT_SHIFT) & 1))
3861                                 break;
3862                         usleep_range(1000, 2000);
3863                 }
3864                 /* Skip if the queue is already in the requested state */
3865                 if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
3866                         continue;
3867
3868                 /* turn on/off the queue */
3869                 if (enable) {
3870                         wr32(hw, I40E_QTX_HEAD(pf_q), 0);
3871                         tx_reg |= I40E_QTX_ENA_QENA_REQ_MASK;
3872                 } else {
3873                         tx_reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
3874                 }
3875
3876                 wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
3877                 /* No waiting for the Tx queue to disable */
3878                 if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
3879                         continue;
3880
3881                 /* wait for the change to finish */
3882                 ret = i40e_pf_txq_wait(pf, pf_q, enable);
3883                 if (ret) {
3884                         dev_info(&pf->pdev->dev,
3885                                  "VSI seid %d Tx ring %d %sable timeout\n",
3886                                  vsi->seid, pf_q, (enable ? "en" : "dis"));
3887                         break;
3888                 }
3889         }
3890
3891         if (hw->revision_id == 0)
3892                 mdelay(50);
3893         return ret;
3894 }
3895
3896 /**
3897  * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
3898  * @pf: the PF being configured
3899  * @pf_q: the PF queue
3900  * @enable: enable or disable state of the queue
3901  *
3902  * This routine will wait for the given Rx queue of the PF to reach the
3903  * enabled or disabled state.
3904  * Returns -ETIMEDOUT in case of failing to reach the requested state after
3905  * multiple retries; else will return 0 in case of success.
3906  **/
3907 static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
3908 {
3909         int i;
3910         u32 rx_reg;
3911
3912         for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
3913                 rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
3914                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3915                         break;
3916
3917                 usleep_range(10, 20);
3918         }
3919         if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
3920                 return -ETIMEDOUT;
3921
3922         return 0;
3923 }
3924
3925 /**
3926  * i40e_vsi_control_rx - Start or stop a VSI's rings
3927  * @vsi: the VSI being configured
3928  * @enable: start or stop the rings
3929  **/
3930 static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
3931 {
3932         struct i40e_pf *pf = vsi->back;
3933         struct i40e_hw *hw = &pf->hw;
3934         int i, j, pf_q, ret = 0;
3935         u32 rx_reg;
3936
3937         pf_q = vsi->base_queue;
3938         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
3939                 for (j = 0; j < 50; j++) {
3940                         rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
3941                         if (((rx_reg >> I40E_QRX_ENA_QENA_REQ_SHIFT) & 1) ==
3942                             ((rx_reg >> I40E_QRX_ENA_QENA_STAT_SHIFT) & 1))
3943                                 break;
3944                         usleep_range(1000, 2000);
3945                 }
3946
3947                 /* Skip if the queue is already in the requested state */
3948                 if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
3949                         continue;
3950
3951                 /* turn on/off the queue */
3952                 if (enable)
3953                         rx_reg |= I40E_QRX_ENA_QENA_REQ_MASK;
3954                 else
3955                         rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
3956                 wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
3957
3958                 /* wait for the change to finish */
3959                 ret = i40e_pf_rxq_wait(pf, pf_q, enable);
3960                 if (ret) {
3961                         dev_info(&pf->pdev->dev,
3962                                  "VSI seid %d Rx ring %d %sable timeout\n",
3963                                  vsi->seid, pf_q, (enable ? "en" : "dis"));
3964                         break;
3965                 }
3966         }
3967
3968         return ret;
3969 }
3970
3971 /**
3972  * i40e_vsi_control_rings - Start or stop a VSI's rings
3973  * @vsi: the VSI being configured
3974  * @enable: start or stop the rings
3975  **/
3976 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool request)
3977 {
3978         int ret = 0;
3979
3980         /* do rx first for enable and last for disable */
3981         if (request) {
3982                 ret = i40e_vsi_control_rx(vsi, request);
3983                 if (ret)
3984                         return ret;
3985                 ret = i40e_vsi_control_tx(vsi, request);
3986         } else {
3987                 /* Ignore return value, we need to shutdown whatever we can */
3988                 i40e_vsi_control_tx(vsi, request);
3989                 i40e_vsi_control_rx(vsi, request);
3990         }
3991
3992         return ret;
3993 }
3994
3995 /**
3996  * i40e_vsi_free_irq - Free the irq association with the OS
3997  * @vsi: the VSI being configured
3998  **/
3999 static void i40e_vsi_free_irq(struct i40e_vsi *vsi)
4000 {
4001         struct i40e_pf *pf = vsi->back;
4002         struct i40e_hw *hw = &pf->hw;
4003         int base = vsi->base_vector;
4004         u32 val, qp;
4005         int i;
4006
4007         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4008                 if (!vsi->q_vectors)
4009                         return;
4010
4011                 if (!vsi->irqs_ready)
4012                         return;
4013
4014                 vsi->irqs_ready = false;
4015                 for (i = 0; i < vsi->num_q_vectors; i++) {
4016                         u16 vector = i + base;
4017
4018                         /* free only the irqs that were actually requested */
4019                         if (!vsi->q_vectors[i] ||
4020                             !vsi->q_vectors[i]->num_ringpairs)
4021                                 continue;
4022
4023                         /* clear the affinity_mask in the IRQ descriptor */
4024                         irq_set_affinity_hint(pf->msix_entries[vector].vector,
4025                                               NULL);
4026                         free_irq(pf->msix_entries[vector].vector,
4027                                  vsi->q_vectors[i]);
4028
4029                         /* Tear down the interrupt queue link list
4030                          *
4031                          * We know that they come in pairs and always
4032                          * the Rx first, then the Tx.  To clear the
4033                          * link list, stick the EOL value into the
4034                          * next_q field of the registers.
4035                          */
4036                         val = rd32(hw, I40E_PFINT_LNKLSTN(vector - 1));
4037                         qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4038                                 >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4039                         val |= I40E_QUEUE_END_OF_LIST
4040                                 << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4041                         wr32(hw, I40E_PFINT_LNKLSTN(vector - 1), val);
4042
4043                         while (qp != I40E_QUEUE_END_OF_LIST) {
4044                                 u32 next;
4045
4046                                 val = rd32(hw, I40E_QINT_RQCTL(qp));
4047
4048                                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4049                                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4050                                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4051                                          I40E_QINT_RQCTL_INTEVENT_MASK);
4052
4053                                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4054                                          I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4055
4056                                 wr32(hw, I40E_QINT_RQCTL(qp), val);
4057
4058                                 val = rd32(hw, I40E_QINT_TQCTL(qp));
4059
4060                                 next = (val & I40E_QINT_TQCTL_NEXTQ_INDX_MASK)
4061                                         >> I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT;
4062
4063                                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4064                                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4065                                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4066                                          I40E_QINT_TQCTL_INTEVENT_MASK);
4067
4068                                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4069                                          I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4070
4071                                 wr32(hw, I40E_QINT_TQCTL(qp), val);
4072                                 qp = next;
4073                         }
4074                 }
4075         } else {
4076                 free_irq(pf->pdev->irq, pf);
4077
4078                 val = rd32(hw, I40E_PFINT_LNKLST0);
4079                 qp = (val & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK)
4080                         >> I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT;
4081                 val |= I40E_QUEUE_END_OF_LIST
4082                         << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
4083                 wr32(hw, I40E_PFINT_LNKLST0, val);
4084
4085                 val = rd32(hw, I40E_QINT_RQCTL(qp));
4086                 val &= ~(I40E_QINT_RQCTL_MSIX_INDX_MASK  |
4087                          I40E_QINT_RQCTL_MSIX0_INDX_MASK |
4088                          I40E_QINT_RQCTL_CAUSE_ENA_MASK  |
4089                          I40E_QINT_RQCTL_INTEVENT_MASK);
4090
4091                 val |= (I40E_QINT_RQCTL_ITR_INDX_MASK |
4092                         I40E_QINT_RQCTL_NEXTQ_INDX_MASK);
4093
4094                 wr32(hw, I40E_QINT_RQCTL(qp), val);
4095
4096                 val = rd32(hw, I40E_QINT_TQCTL(qp));
4097
4098                 val &= ~(I40E_QINT_TQCTL_MSIX_INDX_MASK  |
4099                          I40E_QINT_TQCTL_MSIX0_INDX_MASK |
4100                          I40E_QINT_TQCTL_CAUSE_ENA_MASK  |
4101                          I40E_QINT_TQCTL_INTEVENT_MASK);
4102
4103                 val |= (I40E_QINT_TQCTL_ITR_INDX_MASK |
4104                         I40E_QINT_TQCTL_NEXTQ_INDX_MASK);
4105
4106                 wr32(hw, I40E_QINT_TQCTL(qp), val);
4107         }
4108 }
4109
4110 /**
4111  * i40e_free_q_vector - Free memory allocated for specific interrupt vector
4112  * @vsi: the VSI being configured
4113  * @v_idx: Index of vector to be freed
4114  *
4115  * This function frees the memory allocated to the q_vector.  In addition if
4116  * NAPI is enabled it will delete any references to the NAPI struct prior
4117  * to freeing the q_vector.
4118  **/
4119 static void i40e_free_q_vector(struct i40e_vsi *vsi, int v_idx)
4120 {
4121         struct i40e_q_vector *q_vector = vsi->q_vectors[v_idx];
4122         struct i40e_ring *ring;
4123
4124         if (!q_vector)
4125                 return;
4126
4127         /* disassociate q_vector from rings */
4128         i40e_for_each_ring(ring, q_vector->tx)
4129                 ring->q_vector = NULL;
4130
4131         i40e_for_each_ring(ring, q_vector->rx)
4132                 ring->q_vector = NULL;
4133
4134         /* only VSI w/ an associated netdev is set up w/ NAPI */
4135         if (vsi->netdev)
4136                 netif_napi_del(&q_vector->napi);
4137
4138         vsi->q_vectors[v_idx] = NULL;
4139
4140         kfree_rcu(q_vector, rcu);
4141 }
4142
4143 /**
4144  * i40e_vsi_free_q_vectors - Free memory allocated for interrupt vectors
4145  * @vsi: the VSI being un-configured
4146  *
4147  * This frees the memory allocated to the q_vectors and
4148  * deletes references to the NAPI struct.
4149  **/
4150 static void i40e_vsi_free_q_vectors(struct i40e_vsi *vsi)
4151 {
4152         int v_idx;
4153
4154         for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++)
4155                 i40e_free_q_vector(vsi, v_idx);
4156 }
4157
4158 /**
4159  * i40e_reset_interrupt_capability - Disable interrupt setup in OS
4160  * @pf: board private structure
4161  **/
4162 static void i40e_reset_interrupt_capability(struct i40e_pf *pf)
4163 {
4164         /* If we're in Legacy mode, the interrupt was cleaned in vsi_close */
4165         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4166                 pci_disable_msix(pf->pdev);
4167                 kfree(pf->msix_entries);
4168                 pf->msix_entries = NULL;
4169                 kfree(pf->irq_pile);
4170                 pf->irq_pile = NULL;
4171         } else if (pf->flags & I40E_FLAG_MSI_ENABLED) {
4172                 pci_disable_msi(pf->pdev);
4173         }
4174         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED);
4175 }
4176
4177 /**
4178  * i40e_clear_interrupt_scheme - Clear the current interrupt scheme settings
4179  * @pf: board private structure
4180  *
4181  * We go through and clear interrupt specific resources and reset the structure
4182  * to pre-load conditions
4183  **/
4184 static void i40e_clear_interrupt_scheme(struct i40e_pf *pf)
4185 {
4186         int i;
4187
4188         i40e_stop_misc_vector(pf);
4189         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
4190                 synchronize_irq(pf->msix_entries[0].vector);
4191                 free_irq(pf->msix_entries[0].vector, pf);
4192         }
4193
4194         i40e_put_lump(pf->irq_pile, 0, I40E_PILE_VALID_BIT-1);
4195         for (i = 0; i < pf->num_alloc_vsi; i++)
4196                 if (pf->vsi[i])
4197                         i40e_vsi_free_q_vectors(pf->vsi[i]);
4198         i40e_reset_interrupt_capability(pf);
4199 }
4200
4201 /**
4202  * i40e_napi_enable_all - Enable NAPI for all q_vectors in the VSI
4203  * @vsi: the VSI being configured
4204  **/
4205 static void i40e_napi_enable_all(struct i40e_vsi *vsi)
4206 {
4207         int q_idx;
4208
4209         if (!vsi->netdev)
4210                 return;
4211
4212         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4213                 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4214
4215                 if (q_vector->rx.ring || q_vector->tx.ring)
4216                         napi_enable(&q_vector->napi);
4217         }
4218 }
4219
4220 /**
4221  * i40e_napi_disable_all - Disable NAPI for all q_vectors in the VSI
4222  * @vsi: the VSI being configured
4223  **/
4224 static void i40e_napi_disable_all(struct i40e_vsi *vsi)
4225 {
4226         int q_idx;
4227
4228         if (!vsi->netdev)
4229                 return;
4230
4231         for (q_idx = 0; q_idx < vsi->num_q_vectors; q_idx++) {
4232                 struct i40e_q_vector *q_vector = vsi->q_vectors[q_idx];
4233
4234                 if (q_vector->rx.ring || q_vector->tx.ring)
4235                         napi_disable(&q_vector->napi);
4236         }
4237 }
4238
4239 /**
4240  * i40e_vsi_close - Shut down a VSI
4241  * @vsi: the vsi to be quelled
4242  **/
4243 static void i40e_vsi_close(struct i40e_vsi *vsi)
4244 {
4245         if (!test_and_set_bit(__I40E_DOWN, &vsi->state))
4246                 i40e_down(vsi);
4247         i40e_vsi_free_irq(vsi);
4248         i40e_vsi_free_tx_resources(vsi);
4249         i40e_vsi_free_rx_resources(vsi);
4250         vsi->current_netdev_flags = 0;
4251 }
4252
4253 /**
4254  * i40e_quiesce_vsi - Pause a given VSI
4255  * @vsi: the VSI being paused
4256  **/
4257 static void i40e_quiesce_vsi(struct i40e_vsi *vsi)
4258 {
4259         if (test_bit(__I40E_DOWN, &vsi->state))
4260                 return;
4261
4262         /* No need to disable FCoE VSI when Tx suspended */
4263         if ((test_bit(__I40E_PORT_TX_SUSPENDED, &vsi->back->state)) &&
4264             vsi->type == I40E_VSI_FCOE) {
4265                 dev_dbg(&vsi->back->pdev->dev,
4266                          "VSI seid %d skipping FCoE VSI disable\n", vsi->seid);
4267                 return;
4268         }
4269
4270         set_bit(__I40E_NEEDS_RESTART, &vsi->state);
4271         if (vsi->netdev && netif_running(vsi->netdev))
4272                 vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
4273         else
4274                 i40e_vsi_close(vsi);
4275 }
4276
4277 /**
4278  * i40e_unquiesce_vsi - Resume a given VSI
4279  * @vsi: the VSI being resumed
4280  **/
4281 static void i40e_unquiesce_vsi(struct i40e_vsi *vsi)
4282 {
4283         if (!test_bit(__I40E_NEEDS_RESTART, &vsi->state))
4284                 return;
4285
4286         clear_bit(__I40E_NEEDS_RESTART, &vsi->state);
4287         if (vsi->netdev && netif_running(vsi->netdev))
4288                 vsi->netdev->netdev_ops->ndo_open(vsi->netdev);
4289         else
4290                 i40e_vsi_open(vsi);   /* this clears the DOWN bit */
4291 }
4292
4293 /**
4294  * i40e_pf_quiesce_all_vsi - Pause all VSIs on a PF
4295  * @pf: the PF
4296  **/
4297 static void i40e_pf_quiesce_all_vsi(struct i40e_pf *pf)
4298 {
4299         int v;
4300
4301         for (v = 0; v < pf->num_alloc_vsi; v++) {
4302                 if (pf->vsi[v])
4303                         i40e_quiesce_vsi(pf->vsi[v]);
4304         }
4305 }
4306
4307 /**
4308  * i40e_pf_unquiesce_all_vsi - Resume all VSIs on a PF
4309  * @pf: the PF
4310  **/
4311 static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
4312 {
4313         int v;
4314
4315         for (v = 0; v < pf->num_alloc_vsi; v++) {
4316                 if (pf->vsi[v])
4317                         i40e_unquiesce_vsi(pf->vsi[v]);
4318         }
4319 }
4320
4321 #ifdef CONFIG_I40E_DCB
4322 /**
4323  * i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled
4324  * @vsi: the VSI being configured
4325  *
4326  * This function waits for the given VSI's Tx queues to be disabled.
4327  **/
4328 static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
4329 {
4330         struct i40e_pf *pf = vsi->back;
4331         int i, pf_q, ret;
4332
4333         pf_q = vsi->base_queue;
4334         for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
4335                 /* Check and wait for the disable status of the queue */
4336                 ret = i40e_pf_txq_wait(pf, pf_q, false);
4337                 if (ret) {
4338                         dev_info(&pf->pdev->dev,
4339                                  "VSI seid %d Tx ring %d disable timeout\n",
4340                                  vsi->seid, pf_q);
4341                         return ret;
4342                 }
4343         }
4344
4345         return 0;
4346 }
4347
4348 /**
4349  * i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled
4350  * @pf: the PF
4351  *
4352  * This function waits for the Tx queues to be in disabled state for all the
4353  * VSIs that are managed by this PF.
4354  **/
4355 static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf)
4356 {
4357         int v, ret = 0;
4358
4359         for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
4360                 /* No need to wait for FCoE VSI queues */
4361                 if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
4362                         ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]);
4363                         if (ret)
4364                                 break;
4365                 }
4366         }
4367
4368         return ret;
4369 }
4370
4371 #endif
4372
4373 /**
4374  * i40e_detect_recover_hung_queue - Function to detect and recover hung_queue
4375  * @q_idx: TX queue number
4376  * @vsi: Pointer to VSI struct
4377  *
4378  * This function checks specified queue for given VSI. Detects hung condition.
4379  * Sets hung bit since it is two step process. Before next run of service task
4380  * if napi_poll runs, it reset 'hung' bit for respective q_vector. If not,
4381  * hung condition remain unchanged and during subsequent run, this function
4382  * issues SW interrupt to recover from hung condition.
4383  **/
4384 static void i40e_detect_recover_hung_queue(int q_idx, struct i40e_vsi *vsi)
4385 {
4386         struct i40e_ring *tx_ring = NULL;
4387         struct i40e_pf  *pf;
4388         u32 head, val, tx_pending;
4389         int i;
4390
4391         pf = vsi->back;
4392
4393         /* now that we have an index, find the tx_ring struct */
4394         for (i = 0; i < vsi->num_queue_pairs; i++) {
4395                 if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
4396                         if (q_idx == vsi->tx_rings[i]->queue_index) {
4397                                 tx_ring = vsi->tx_rings[i];
4398                                 break;
4399                         }
4400                 }
4401         }
4402
4403         if (!tx_ring)
4404                 return;
4405
4406         /* Read interrupt register */
4407         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
4408                 val = rd32(&pf->hw,
4409                            I40E_PFINT_DYN_CTLN(tx_ring->q_vector->v_idx +
4410                                                tx_ring->vsi->base_vector - 1));
4411         else
4412                 val = rd32(&pf->hw, I40E_PFINT_DYN_CTL0);
4413
4414         head = i40e_get_head(tx_ring);
4415
4416         tx_pending = i40e_get_tx_pending(tx_ring);
4417
4418         /* Interrupts are disabled and TX pending is non-zero,
4419          * trigger the SW interrupt (don't wait). Worst case
4420          * there will be one extra interrupt which may result
4421          * into not cleaning any queues because queues are cleaned.
4422          */
4423         if (tx_pending && (!(val & I40E_PFINT_DYN_CTLN_INTENA_MASK)))
4424                 i40e_force_wb(vsi, tx_ring->q_vector);
4425 }
4426
4427 /**
4428  * i40e_detect_recover_hung - Function to detect and recover hung_queues
4429  * @pf:  pointer to PF struct
4430  *
4431  * LAN VSI has netdev and netdev has TX queues. This function is to check
4432  * each of those TX queues if they are hung, trigger recovery by issuing
4433  * SW interrupt.
4434  **/
4435 static void i40e_detect_recover_hung(struct i40e_pf *pf)
4436 {
4437         struct net_device *netdev;
4438         struct i40e_vsi *vsi;
4439         int i;
4440
4441         /* Only for LAN VSI */
4442         vsi = pf->vsi[pf->lan_vsi];
4443
4444         if (!vsi)
4445                 return;
4446
4447         /* Make sure, VSI state is not DOWN/RECOVERY_PENDING */
4448         if (test_bit(__I40E_DOWN, &vsi->back->state) ||
4449             test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
4450                 return;
4451
4452         /* Make sure type is MAIN VSI */
4453         if (vsi->type != I40E_VSI_MAIN)
4454                 return;
4455
4456         netdev = vsi->netdev;
4457         if (!netdev)
4458                 return;
4459
4460         /* Bail out if netif_carrier is not OK */
4461         if (!netif_carrier_ok(netdev))
4462                 return;
4463
4464         /* Go thru' TX queues for netdev */
4465         for (i = 0; i < netdev->num_tx_queues; i++) {
4466                 struct netdev_queue *q;
4467
4468                 q = netdev_get_tx_queue(netdev, i);
4469                 if (q)
4470                         i40e_detect_recover_hung_queue(i, vsi);
4471         }
4472 }
4473
4474 /**
4475  * i40e_get_iscsi_tc_map - Return TC map for iSCSI APP
4476  * @pf: pointer to PF
4477  *
4478  * Get TC map for ISCSI PF type that will include iSCSI TC
4479  * and LAN TC.
4480  **/
4481 static u8 i40e_get_iscsi_tc_map(struct i40e_pf *pf)
4482 {
4483         struct i40e_dcb_app_priority_table app;
4484         struct i40e_hw *hw = &pf->hw;
4485         u8 enabled_tc = 1; /* TC0 is always enabled */
4486         u8 tc, i;
4487         /* Get the iSCSI APP TLV */
4488         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4489
4490         for (i = 0; i < dcbcfg->numapps; i++) {
4491                 app = dcbcfg->app[i];
4492                 if (app.selector == I40E_APP_SEL_TCPIP &&
4493                     app.protocolid == I40E_APP_PROTOID_ISCSI) {
4494                         tc = dcbcfg->etscfg.prioritytable[app.priority];
4495                         enabled_tc |= BIT_ULL(tc);
4496                         break;
4497                 }
4498         }
4499
4500         return enabled_tc;
4501 }
4502
4503 /**
4504  * i40e_dcb_get_num_tc -  Get the number of TCs from DCBx config
4505  * @dcbcfg: the corresponding DCBx configuration structure
4506  *
4507  * Return the number of TCs from given DCBx configuration
4508  **/
4509 static u8 i40e_dcb_get_num_tc(struct i40e_dcbx_config *dcbcfg)
4510 {
4511         u8 num_tc = 0;
4512         int i;
4513
4514         /* Scan the ETS Config Priority Table to find
4515          * traffic class enabled for a given priority
4516          * and use the traffic class index to get the
4517          * number of traffic classes enabled
4518          */
4519         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4520                 if (dcbcfg->etscfg.prioritytable[i] > num_tc)
4521                         num_tc = dcbcfg->etscfg.prioritytable[i];
4522         }
4523
4524         /* Traffic class index starts from zero so
4525          * increment to return the actual count
4526          */
4527         return num_tc + 1;
4528 }
4529
4530 /**
4531  * i40e_dcb_get_enabled_tc - Get enabled traffic classes
4532  * @dcbcfg: the corresponding DCBx configuration structure
4533  *
4534  * Query the current DCB configuration and return the number of
4535  * traffic classes enabled from the given DCBX config
4536  **/
4537 static u8 i40e_dcb_get_enabled_tc(struct i40e_dcbx_config *dcbcfg)
4538 {
4539         u8 num_tc = i40e_dcb_get_num_tc(dcbcfg);
4540         u8 enabled_tc = 1;
4541         u8 i;
4542
4543         for (i = 0; i < num_tc; i++)
4544                 enabled_tc |= BIT(i);
4545
4546         return enabled_tc;
4547 }
4548
4549 /**
4550  * i40e_pf_get_num_tc - Get enabled traffic classes for PF
4551  * @pf: PF being queried
4552  *
4553  * Return number of traffic classes enabled for the given PF
4554  **/
4555 static u8 i40e_pf_get_num_tc(struct i40e_pf *pf)
4556 {
4557         struct i40e_hw *hw = &pf->hw;
4558         u8 i, enabled_tc;
4559         u8 num_tc = 0;
4560         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4561
4562         /* If DCB is not enabled then always in single TC */
4563         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4564                 return 1;
4565
4566         /* SFP mode will be enabled for all TCs on port */
4567         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4568                 return i40e_dcb_get_num_tc(dcbcfg);
4569
4570         /* MFP mode return count of enabled TCs for this PF */
4571         if (pf->hw.func_caps.iscsi)
4572                 enabled_tc =  i40e_get_iscsi_tc_map(pf);
4573         else
4574                 return 1; /* Only TC0 */
4575
4576         /* At least have TC0 */
4577         enabled_tc = (enabled_tc ? enabled_tc : 0x1);
4578         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4579                 if (enabled_tc & BIT_ULL(i))
4580                         num_tc++;
4581         }
4582         return num_tc;
4583 }
4584
4585 /**
4586  * i40e_pf_get_default_tc - Get bitmap for first enabled TC
4587  * @pf: PF being queried
4588  *
4589  * Return a bitmap for first enabled traffic class for this PF.
4590  **/
4591 static u8 i40e_pf_get_default_tc(struct i40e_pf *pf)
4592 {
4593         u8 enabled_tc = pf->hw.func_caps.enabled_tcmap;
4594         u8 i = 0;
4595
4596         if (!enabled_tc)
4597                 return 0x1; /* TC0 */
4598
4599         /* Find the first enabled TC */
4600         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4601                 if (enabled_tc & BIT_ULL(i))
4602                         break;
4603         }
4604
4605         return BIT(i);
4606 }
4607
4608 /**
4609  * i40e_pf_get_pf_tc_map - Get bitmap for enabled traffic classes
4610  * @pf: PF being queried
4611  *
4612  * Return a bitmap for enabled traffic classes for this PF.
4613  **/
4614 static u8 i40e_pf_get_tc_map(struct i40e_pf *pf)
4615 {
4616         /* If DCB is not enabled for this PF then just return default TC */
4617         if (!(pf->flags & I40E_FLAG_DCB_ENABLED))
4618                 return i40e_pf_get_default_tc(pf);
4619
4620         /* SFP mode we want PF to be enabled for all TCs */
4621         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
4622                 return i40e_dcb_get_enabled_tc(&pf->hw.local_dcbx_config);
4623
4624         /* MFP enabled and iSCSI PF type */
4625         if (pf->hw.func_caps.iscsi)
4626                 return i40e_get_iscsi_tc_map(pf);
4627         else
4628                 return i40e_pf_get_default_tc(pf);
4629 }
4630
4631 /**
4632  * i40e_vsi_get_bw_info - Query VSI BW Information
4633  * @vsi: the VSI being queried
4634  *
4635  * Returns 0 on success, negative value on failure
4636  **/
4637 static int i40e_vsi_get_bw_info(struct i40e_vsi *vsi)
4638 {
4639         struct i40e_aqc_query_vsi_ets_sla_config_resp bw_ets_config = {0};
4640         struct i40e_aqc_query_vsi_bw_config_resp bw_config = {0};
4641         struct i40e_pf *pf = vsi->back;
4642         struct i40e_hw *hw = &pf->hw;
4643         i40e_status ret;
4644         u32 tc_bw_max;
4645         int i;
4646
4647         /* Get the VSI level BW configuration */
4648         ret = i40e_aq_query_vsi_bw_config(hw, vsi->seid, &bw_config, NULL);
4649         if (ret) {
4650                 dev_info(&pf->pdev->dev,
4651                          "couldn't get PF vsi bw config, err %s aq_err %s\n",
4652                          i40e_stat_str(&pf->hw, ret),
4653                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4654                 return -EINVAL;
4655         }
4656
4657         /* Get the VSI level BW configuration per TC */
4658         ret = i40e_aq_query_vsi_ets_sla_config(hw, vsi->seid, &bw_ets_config,
4659                                                NULL);
4660         if (ret) {
4661                 dev_info(&pf->pdev->dev,
4662                          "couldn't get PF vsi ets bw config, err %s aq_err %s\n",
4663                          i40e_stat_str(&pf->hw, ret),
4664                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4665                 return -EINVAL;
4666         }
4667
4668         if (bw_config.tc_valid_bits != bw_ets_config.tc_valid_bits) {
4669                 dev_info(&pf->pdev->dev,
4670                          "Enabled TCs mismatch from querying VSI BW info 0x%08x 0x%08x\n",
4671                          bw_config.tc_valid_bits,
4672                          bw_ets_config.tc_valid_bits);
4673                 /* Still continuing */
4674         }
4675
4676         vsi->bw_limit = le16_to_cpu(bw_config.port_bw_limit);
4677         vsi->bw_max_quanta = bw_config.max_bw;
4678         tc_bw_max = le16_to_cpu(bw_ets_config.tc_bw_max[0]) |
4679                     (le16_to_cpu(bw_ets_config.tc_bw_max[1]) << 16);
4680         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4681                 vsi->bw_ets_share_credits[i] = bw_ets_config.share_credits[i];
4682                 vsi->bw_ets_limit_credits[i] =
4683                                         le16_to_cpu(bw_ets_config.credits[i]);
4684                 /* 3 bits out of 4 for each TC */
4685                 vsi->bw_ets_max_quanta[i] = (u8)((tc_bw_max >> (i*4)) & 0x7);
4686         }
4687
4688         return 0;
4689 }
4690
4691 /**
4692  * i40e_vsi_configure_bw_alloc - Configure VSI BW allocation per TC
4693  * @vsi: the VSI being configured
4694  * @enabled_tc: TC bitmap
4695  * @bw_credits: BW shared credits per TC
4696  *
4697  * Returns 0 on success, negative value on failure
4698  **/
4699 static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc,
4700                                        u8 *bw_share)
4701 {
4702         struct i40e_aqc_configure_vsi_tc_bw_data bw_data;
4703         i40e_status ret;
4704         int i;
4705
4706         bw_data.tc_valid_bits = enabled_tc;
4707         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4708                 bw_data.tc_bw_credits[i] = bw_share[i];
4709
4710         ret = i40e_aq_config_vsi_tc_bw(&vsi->back->hw, vsi->seid, &bw_data,
4711                                        NULL);
4712         if (ret) {
4713                 dev_info(&vsi->back->pdev->dev,
4714                          "AQ command Config VSI BW allocation per TC failed = %d\n",
4715                          vsi->back->hw.aq.asq_last_status);
4716                 return -EINVAL;
4717         }
4718
4719         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++)
4720                 vsi->info.qs_handle[i] = bw_data.qs_handles[i];
4721
4722         return 0;
4723 }
4724
4725 /**
4726  * i40e_vsi_config_netdev_tc - Setup the netdev TC configuration
4727  * @vsi: the VSI being configured
4728  * @enabled_tc: TC map to be enabled
4729  *
4730  **/
4731 static void i40e_vsi_config_netdev_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4732 {
4733         struct net_device *netdev = vsi->netdev;
4734         struct i40e_pf *pf = vsi->back;
4735         struct i40e_hw *hw = &pf->hw;
4736         u8 netdev_tc = 0;
4737         int i;
4738         struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
4739
4740         if (!netdev)
4741                 return;
4742
4743         if (!enabled_tc) {
4744                 netdev_reset_tc(netdev);
4745                 return;
4746         }
4747
4748         /* Set up actual enabled TCs on the VSI */
4749         if (netdev_set_num_tc(netdev, vsi->tc_config.numtc))
4750                 return;
4751
4752         /* set per TC queues for the VSI */
4753         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4754                 /* Only set TC queues for enabled tcs
4755                  *
4756                  * e.g. For a VSI that has TC0 and TC3 enabled the
4757                  * enabled_tc bitmap would be 0x00001001; the driver
4758                  * will set the numtc for netdev as 2 that will be
4759                  * referenced by the netdev layer as TC 0 and 1.
4760                  */
4761                 if (vsi->tc_config.enabled_tc & BIT_ULL(i))
4762                         netdev_set_tc_queue(netdev,
4763                                         vsi->tc_config.tc_info[i].netdev_tc,
4764                                         vsi->tc_config.tc_info[i].qcount,
4765                                         vsi->tc_config.tc_info[i].qoffset);
4766         }
4767
4768         /* Assign UP2TC map for the VSI */
4769         for (i = 0; i < I40E_MAX_USER_PRIORITY; i++) {
4770                 /* Get the actual TC# for the UP */
4771                 u8 ets_tc = dcbcfg->etscfg.prioritytable[i];
4772                 /* Get the mapped netdev TC# for the UP */
4773                 netdev_tc =  vsi->tc_config.tc_info[ets_tc].netdev_tc;
4774                 netdev_set_prio_tc_map(netdev, i, netdev_tc);
4775         }
4776 }
4777
4778 /**
4779  * i40e_vsi_update_queue_map - Update our copy of VSi info with new queue map
4780  * @vsi: the VSI being configured
4781  * @ctxt: the ctxt buffer returned from AQ VSI update param command
4782  **/
4783 static void i40e_vsi_update_queue_map(struct i40e_vsi *vsi,
4784                                       struct i40e_vsi_context *ctxt)
4785 {
4786         /* copy just the sections touched not the entire info
4787          * since not all sections are valid as returned by
4788          * update vsi params
4789          */
4790         vsi->info.mapping_flags = ctxt->info.mapping_flags;
4791         memcpy(&vsi->info.queue_mapping,
4792                &ctxt->info.queue_mapping, sizeof(vsi->info.queue_mapping));
4793         memcpy(&vsi->info.tc_mapping, ctxt->info.tc_mapping,
4794                sizeof(vsi->info.tc_mapping));
4795 }
4796
4797 /**
4798  * i40e_vsi_config_tc - Configure VSI Tx Scheduler for given TC map
4799  * @vsi: VSI to be configured
4800  * @enabled_tc: TC bitmap
4801  *
4802  * This configures a particular VSI for TCs that are mapped to the
4803  * given TC bitmap. It uses default bandwidth share for TCs across
4804  * VSIs to configure TC for a particular VSI.
4805  *
4806  * NOTE:
4807  * It is expected that the VSI queues have been quisced before calling
4808  * this function.
4809  **/
4810 static int i40e_vsi_config_tc(struct i40e_vsi *vsi, u8 enabled_tc)
4811 {
4812         u8 bw_share[I40E_MAX_TRAFFIC_CLASS] = {0};
4813         struct i40e_vsi_context ctxt;
4814         int ret = 0;
4815         int i;
4816
4817         /* Check if enabled_tc is same as existing or new TCs */
4818         if (vsi->tc_config.enabled_tc == enabled_tc)
4819                 return ret;
4820
4821         /* Enable ETS TCs with equal BW Share for now across all VSIs */
4822         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4823                 if (enabled_tc & BIT_ULL(i))
4824                         bw_share[i] = 1;
4825         }
4826
4827         ret = i40e_vsi_configure_bw_alloc(vsi, enabled_tc, bw_share);
4828         if (ret) {
4829                 dev_info(&vsi->back->pdev->dev,
4830                          "Failed configuring TC map %d for VSI %d\n",
4831                          enabled_tc, vsi->seid);
4832                 goto out;
4833         }
4834
4835         /* Update Queue Pairs Mapping for currently enabled UPs */
4836         ctxt.seid = vsi->seid;
4837         ctxt.pf_num = vsi->back->hw.pf_id;
4838         ctxt.vf_num = 0;
4839         ctxt.uplink_seid = vsi->uplink_seid;
4840         ctxt.info = vsi->info;
4841         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
4842
4843         /* Update the VSI after updating the VSI queue-mapping information */
4844         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
4845         if (ret) {
4846                 dev_info(&vsi->back->pdev->dev,
4847                          "Update vsi tc config failed, err %s aq_err %s\n",
4848                          i40e_stat_str(&vsi->back->hw, ret),
4849                          i40e_aq_str(&vsi->back->hw,
4850                                      vsi->back->hw.aq.asq_last_status));
4851                 goto out;
4852         }
4853         /* update the local VSI info with updated queue map */
4854         i40e_vsi_update_queue_map(vsi, &ctxt);
4855         vsi->info.valid_sections = 0;
4856
4857         /* Update current VSI BW information */
4858         ret = i40e_vsi_get_bw_info(vsi);
4859         if (ret) {
4860                 dev_info(&vsi->back->pdev->dev,
4861                          "Failed updating vsi bw info, err %s aq_err %s\n",
4862                          i40e_stat_str(&vsi->back->hw, ret),
4863                          i40e_aq_str(&vsi->back->hw,
4864                                      vsi->back->hw.aq.asq_last_status));
4865                 goto out;
4866         }
4867
4868         /* Update the netdev TC setup */
4869         i40e_vsi_config_netdev_tc(vsi, enabled_tc);
4870 out:
4871         return ret;
4872 }
4873
4874 /**
4875  * i40e_veb_config_tc - Configure TCs for given VEB
4876  * @veb: given VEB
4877  * @enabled_tc: TC bitmap
4878  *
4879  * Configures given TC bitmap for VEB (switching) element
4880  **/
4881 int i40e_veb_config_tc(struct i40e_veb *veb, u8 enabled_tc)
4882 {
4883         struct i40e_aqc_configure_switching_comp_bw_config_data bw_data = {0};
4884         struct i40e_pf *pf = veb->pf;
4885         int ret = 0;
4886         int i;
4887
4888         /* No TCs or already enabled TCs just return */
4889         if (!enabled_tc || veb->enabled_tc == enabled_tc)
4890                 return ret;
4891
4892         bw_data.tc_valid_bits = enabled_tc;
4893         /* bw_data.absolute_credits is not set (relative) */
4894
4895         /* Enable ETS TCs with equal BW Share for now */
4896         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
4897                 if (enabled_tc & BIT_ULL(i))
4898                         bw_data.tc_bw_share_credits[i] = 1;
4899         }
4900
4901         ret = i40e_aq_config_switch_comp_bw_config(&pf->hw, veb->seid,
4902                                                    &bw_data, NULL);
4903         if (ret) {
4904                 dev_info(&pf->pdev->dev,
4905                          "VEB bw config failed, err %s aq_err %s\n",
4906                          i40e_stat_str(&pf->hw, ret),
4907                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4908                 goto out;
4909         }
4910
4911         /* Update the BW information */
4912         ret = i40e_veb_get_bw_info(veb);
4913         if (ret) {
4914                 dev_info(&pf->pdev->dev,
4915                          "Failed getting veb bw config, err %s aq_err %s\n",
4916                          i40e_stat_str(&pf->hw, ret),
4917                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
4918         }
4919
4920 out:
4921         return ret;
4922 }
4923
4924 #ifdef CONFIG_I40E_DCB
4925 /**
4926  * i40e_dcb_reconfigure - Reconfigure all VEBs and VSIs
4927  * @pf: PF struct
4928  *
4929  * Reconfigure VEB/VSIs on a given PF; it is assumed that
4930  * the caller would've quiesce all the VSIs before calling
4931  * this function
4932  **/
4933 static void i40e_dcb_reconfigure(struct i40e_pf *pf)
4934 {
4935         u8 tc_map = 0;
4936         int ret;
4937         u8 v;
4938
4939         /* Enable the TCs available on PF to all VEBs */
4940         tc_map = i40e_pf_get_tc_map(pf);
4941         for (v = 0; v < I40E_MAX_VEB; v++) {
4942                 if (!pf->veb[v])
4943                         continue;
4944                 ret = i40e_veb_config_tc(pf->veb[v], tc_map);
4945                 if (ret) {
4946                         dev_info(&pf->pdev->dev,
4947                                  "Failed configuring TC for VEB seid=%d\n",
4948                                  pf->veb[v]->seid);
4949                         /* Will try to configure as many components */
4950                 }
4951         }
4952
4953         /* Update each VSI */
4954         for (v = 0; v < pf->num_alloc_vsi; v++) {
4955                 if (!pf->vsi[v])
4956                         continue;
4957
4958                 /* - Enable all TCs for the LAN VSI
4959 #ifdef I40E_FCOE
4960                  * - For FCoE VSI only enable the TC configured
4961                  *   as per the APP TLV
4962 #endif
4963                  * - For all others keep them at TC0 for now
4964                  */
4965                 if (v == pf->lan_vsi)
4966                         tc_map = i40e_pf_get_tc_map(pf);
4967                 else
4968                         tc_map = i40e_pf_get_default_tc(pf);
4969 #ifdef I40E_FCOE
4970                 if (pf->vsi[v]->type == I40E_VSI_FCOE)
4971                         tc_map = i40e_get_fcoe_tc_map(pf);
4972 #endif /* #ifdef I40E_FCOE */
4973
4974                 ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
4975                 if (ret) {
4976                         dev_info(&pf->pdev->dev,
4977                                  "Failed configuring TC for VSI seid=%d\n",
4978                                  pf->vsi[v]->seid);
4979                         /* Will try to configure as many components */
4980                 } else {
4981                         /* Re-configure VSI vectors based on updated TC map */
4982                         i40e_vsi_map_rings_to_vectors(pf->vsi[v]);
4983                         if (pf->vsi[v]->netdev)
4984                                 i40e_dcbnl_set_all(pf->vsi[v]);
4985                 }
4986         }
4987 }
4988
4989 /**
4990  * i40e_resume_port_tx - Resume port Tx
4991  * @pf: PF struct
4992  *
4993  * Resume a port's Tx and issue a PF reset in case of failure to
4994  * resume.
4995  **/
4996 static int i40e_resume_port_tx(struct i40e_pf *pf)
4997 {
4998         struct i40e_hw *hw = &pf->hw;
4999         int ret;
5000
5001         ret = i40e_aq_resume_port_tx(hw, NULL);
5002         if (ret) {
5003                 dev_info(&pf->pdev->dev,
5004                          "Resume Port Tx failed, err %s aq_err %s\n",
5005                           i40e_stat_str(&pf->hw, ret),
5006                           i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5007                 /* Schedule PF reset to recover */
5008                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5009                 i40e_service_event_schedule(pf);
5010         }
5011
5012         return ret;
5013 }
5014
5015 /**
5016  * i40e_init_pf_dcb - Initialize DCB configuration
5017  * @pf: PF being configured
5018  *
5019  * Query the current DCB configuration and cache it
5020  * in the hardware structure
5021  **/
5022 static int i40e_init_pf_dcb(struct i40e_pf *pf)
5023 {
5024         struct i40e_hw *hw = &pf->hw;
5025         int err = 0;
5026
5027         /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
5028         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
5029             (pf->hw.aq.fw_maj_ver < 4))
5030                 goto out;
5031
5032         /* Get the initial DCB configuration */
5033         err = i40e_init_dcb(hw);
5034         if (!err) {
5035                 /* Device/Function is not DCBX capable */
5036                 if ((!hw->func_caps.dcb) ||
5037                     (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED)) {
5038                         dev_info(&pf->pdev->dev,
5039                                  "DCBX offload is not supported or is disabled for this PF.\n");
5040
5041                         if (pf->flags & I40E_FLAG_MFP_ENABLED)
5042                                 goto out;
5043
5044                 } else {
5045                         /* When status is not DISABLED then DCBX in FW */
5046                         pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED |
5047                                        DCB_CAP_DCBX_VER_IEEE;
5048
5049                         pf->flags |= I40E_FLAG_DCB_CAPABLE;
5050                         /* Enable DCB tagging only when more than one TC */
5051                         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5052                                 pf->flags |= I40E_FLAG_DCB_ENABLED;
5053                         dev_dbg(&pf->pdev->dev,
5054                                 "DCBX offload is supported for this PF.\n");
5055                 }
5056         } else {
5057                 dev_info(&pf->pdev->dev,
5058                          "Query for DCB configuration failed, err %s aq_err %s\n",
5059                          i40e_stat_str(&pf->hw, err),
5060                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5061         }
5062
5063 out:
5064         return err;
5065 }
5066 #endif /* CONFIG_I40E_DCB */
5067 #define SPEED_SIZE 14
5068 #define FC_SIZE 8
5069 /**
5070  * i40e_print_link_message - print link up or down
5071  * @vsi: the VSI for which link needs a message
5072  */
5073 void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
5074 {
5075         char *speed = "Unknown";
5076         char *fc = "Unknown";
5077
5078         if (vsi->current_isup == isup)
5079                 return;
5080         vsi->current_isup = isup;
5081         if (!isup) {
5082                 netdev_info(vsi->netdev, "NIC Link is Down\n");
5083                 return;
5084         }
5085
5086         /* Warn user if link speed on NPAR enabled partition is not at
5087          * least 10GB
5088          */
5089         if (vsi->back->hw.func_caps.npar_enable &&
5090             (vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_1GB ||
5091              vsi->back->hw.phy.link_info.link_speed == I40E_LINK_SPEED_100MB))
5092                 netdev_warn(vsi->netdev,
5093                             "The partition detected link speed that is less than 10Gbps\n");
5094
5095         switch (vsi->back->hw.phy.link_info.link_speed) {
5096         case I40E_LINK_SPEED_40GB:
5097                 speed = "40 G";
5098                 break;
5099         case I40E_LINK_SPEED_20GB:
5100                 speed = "20 G";
5101                 break;
5102         case I40E_LINK_SPEED_10GB:
5103                 speed = "10 G";
5104                 break;
5105         case I40E_LINK_SPEED_1GB:
5106                 speed = "1000 M";
5107                 break;
5108         case I40E_LINK_SPEED_100MB:
5109                 speed = "100 M";
5110                 break;
5111         default:
5112                 break;
5113         }
5114
5115         switch (vsi->back->hw.fc.current_mode) {
5116         case I40E_FC_FULL:
5117                 fc = "RX/TX";
5118                 break;
5119         case I40E_FC_TX_PAUSE:
5120                 fc = "TX";
5121                 break;
5122         case I40E_FC_RX_PAUSE:
5123                 fc = "RX";
5124                 break;
5125         default:
5126                 fc = "None";
5127                 break;
5128         }
5129
5130         netdev_info(vsi->netdev, "NIC Link is Up %sbps Full Duplex, Flow Control: %s\n",
5131                     speed, fc);
5132 }
5133
5134 /**
5135  * i40e_up_complete - Finish the last steps of bringing up a connection
5136  * @vsi: the VSI being configured
5137  **/
5138 static int i40e_up_complete(struct i40e_vsi *vsi)
5139 {
5140         struct i40e_pf *pf = vsi->back;
5141         int err;
5142
5143         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
5144                 i40e_vsi_configure_msix(vsi);
5145         else
5146                 i40e_configure_msi_and_legacy(vsi);
5147
5148         /* start rings */
5149         err = i40e_vsi_control_rings(vsi, true);
5150         if (err)
5151                 return err;
5152
5153         clear_bit(__I40E_DOWN, &vsi->state);
5154         i40e_napi_enable_all(vsi);
5155         i40e_vsi_enable_irq(vsi);
5156
5157         if ((pf->hw.phy.link_info.link_info & I40E_AQ_LINK_UP) &&
5158             (vsi->netdev)) {
5159                 i40e_print_link_message(vsi, true);
5160                 netif_tx_start_all_queues(vsi->netdev);
5161                 netif_carrier_on(vsi->netdev);
5162         } else if (vsi->netdev) {
5163                 i40e_print_link_message(vsi, false);
5164                 /* need to check for qualified module here*/
5165                 if ((pf->hw.phy.link_info.link_info &
5166                         I40E_AQ_MEDIA_AVAILABLE) &&
5167                     (!(pf->hw.phy.link_info.an_info &
5168                         I40E_AQ_QUALIFIED_MODULE)))
5169                         netdev_err(vsi->netdev,
5170                                    "the driver failed to link because an unqualified module was detected.");
5171         }
5172
5173         /* replay FDIR SB filters */
5174         if (vsi->type == I40E_VSI_FDIR) {
5175                 /* reset fd counters */
5176                 pf->fd_add_err = pf->fd_atr_cnt = 0;
5177                 if (pf->fd_tcp_rule > 0) {
5178                         pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5179                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5180                                 dev_info(&pf->pdev->dev, "Forcing ATR off, sideband rules for TCP/IPv4 exist\n");
5181                         pf->fd_tcp_rule = 0;
5182                 }
5183                 i40e_fdir_filter_restore(vsi);
5184         }
5185         i40e_service_event_schedule(pf);
5186
5187         return 0;
5188 }
5189
5190 /**
5191  * i40e_vsi_reinit_locked - Reset the VSI
5192  * @vsi: the VSI being configured
5193  *
5194  * Rebuild the ring structs after some configuration
5195  * has changed, e.g. MTU size.
5196  **/
5197 static void i40e_vsi_reinit_locked(struct i40e_vsi *vsi)
5198 {
5199         struct i40e_pf *pf = vsi->back;
5200
5201         WARN_ON(in_interrupt());
5202         while (test_and_set_bit(__I40E_CONFIG_BUSY, &pf->state))
5203                 usleep_range(1000, 2000);
5204         i40e_down(vsi);
5205
5206         /* Give a VF some time to respond to the reset.  The
5207          * two second wait is based upon the watchdog cycle in
5208          * the VF driver.
5209          */
5210         if (vsi->type == I40E_VSI_SRIOV)
5211                 msleep(2000);
5212         i40e_up(vsi);
5213         clear_bit(__I40E_CONFIG_BUSY, &pf->state);
5214 }
5215
5216 /**
5217  * i40e_up - Bring the connection back up after being down
5218  * @vsi: the VSI being configured
5219  **/
5220 int i40e_up(struct i40e_vsi *vsi)
5221 {
5222         int err;
5223
5224         err = i40e_vsi_configure(vsi);
5225         if (!err)
5226                 err = i40e_up_complete(vsi);
5227
5228         return err;
5229 }
5230
5231 /**
5232  * i40e_down - Shutdown the connection processing
5233  * @vsi: the VSI being stopped
5234  **/
5235 void i40e_down(struct i40e_vsi *vsi)
5236 {
5237         int i;
5238
5239         /* It is assumed that the caller of this function
5240          * sets the vsi->state __I40E_DOWN bit.
5241          */
5242         if (vsi->netdev) {
5243                 netif_carrier_off(vsi->netdev);
5244                 netif_tx_disable(vsi->netdev);
5245         }
5246         i40e_vsi_disable_irq(vsi);
5247         i40e_vsi_control_rings(vsi, false);
5248         i40e_napi_disable_all(vsi);
5249
5250         for (i = 0; i < vsi->num_queue_pairs; i++) {
5251                 i40e_clean_tx_ring(vsi->tx_rings[i]);
5252                 i40e_clean_rx_ring(vsi->rx_rings[i]);
5253         }
5254 }
5255
5256 /**
5257  * i40e_setup_tc - configure multiple traffic classes
5258  * @netdev: net device to configure
5259  * @tc: number of traffic classes to enable
5260  **/
5261 #ifdef I40E_FCOE
5262 int i40e_setup_tc(struct net_device *netdev, u8 tc)
5263 #else
5264 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
5265 #endif
5266 {
5267         struct i40e_netdev_priv *np = netdev_priv(netdev);
5268         struct i40e_vsi *vsi = np->vsi;
5269         struct i40e_pf *pf = vsi->back;
5270         u8 enabled_tc = 0;
5271         int ret = -EINVAL;
5272         int i;
5273
5274         /* Check if DCB enabled to continue */
5275         if (!(pf->flags & I40E_FLAG_DCB_ENABLED)) {
5276                 netdev_info(netdev, "DCB is not enabled for adapter\n");
5277                 goto exit;
5278         }
5279
5280         /* Check if MFP enabled */
5281         if (pf->flags & I40E_FLAG_MFP_ENABLED) {
5282                 netdev_info(netdev, "Configuring TC not supported in MFP mode\n");
5283                 goto exit;
5284         }
5285
5286         /* Check whether tc count is within enabled limit */
5287         if (tc > i40e_pf_get_num_tc(pf)) {
5288                 netdev_info(netdev, "TC count greater than enabled on link for adapter\n");
5289                 goto exit;
5290         }
5291
5292         /* Generate TC map for number of tc requested */
5293         for (i = 0; i < tc; i++)
5294                 enabled_tc |= BIT_ULL(i);
5295
5296         /* Requesting same TC configuration as already enabled */
5297         if (enabled_tc == vsi->tc_config.enabled_tc)
5298                 return 0;
5299
5300         /* Quiesce VSI queues */
5301         i40e_quiesce_vsi(vsi);
5302
5303         /* Configure VSI for enabled TCs */
5304         ret = i40e_vsi_config_tc(vsi, enabled_tc);
5305         if (ret) {
5306                 netdev_info(netdev, "Failed configuring TC for VSI seid=%d\n",
5307                             vsi->seid);
5308                 goto exit;
5309         }
5310
5311         /* Unquiesce VSI */
5312         i40e_unquiesce_vsi(vsi);
5313
5314 exit:
5315         return ret;
5316 }
5317
5318 /**
5319  * i40e_open - Called when a network interface is made active
5320  * @netdev: network interface device structure
5321  *
5322  * The open entry point is called when a network interface is made
5323  * active by the system (IFF_UP).  At this point all resources needed
5324  * for transmit and receive operations are allocated, the interrupt
5325  * handler is registered with the OS, the netdev watchdog subtask is
5326  * enabled, and the stack is notified that the interface is ready.
5327  *
5328  * Returns 0 on success, negative value on failure
5329  **/
5330 int i40e_open(struct net_device *netdev)
5331 {
5332         struct i40e_netdev_priv *np = netdev_priv(netdev);
5333         struct i40e_vsi *vsi = np->vsi;
5334         struct i40e_pf *pf = vsi->back;
5335         int err;
5336
5337         /* disallow open during test or if eeprom is broken */
5338         if (test_bit(__I40E_TESTING, &pf->state) ||
5339             test_bit(__I40E_BAD_EEPROM, &pf->state))
5340                 return -EBUSY;
5341
5342         netif_carrier_off(netdev);
5343
5344         err = i40e_vsi_open(vsi);
5345         if (err)
5346                 return err;
5347
5348         /* configure global TSO hardware offload settings */
5349         wr32(&pf->hw, I40E_GLLAN_TSOMSK_F, be32_to_cpu(TCP_FLAG_PSH |
5350                                                        TCP_FLAG_FIN) >> 16);
5351         wr32(&pf->hw, I40E_GLLAN_TSOMSK_M, be32_to_cpu(TCP_FLAG_PSH |
5352                                                        TCP_FLAG_FIN |
5353                                                        TCP_FLAG_CWR) >> 16);
5354         wr32(&pf->hw, I40E_GLLAN_TSOMSK_L, be32_to_cpu(TCP_FLAG_CWR) >> 16);
5355
5356 #ifdef CONFIG_I40E_VXLAN
5357         vxlan_get_rx_port(netdev);
5358 #endif
5359
5360         return 0;
5361 }
5362
5363 /**
5364  * i40e_vsi_open -
5365  * @vsi: the VSI to open
5366  *
5367  * Finish initialization of the VSI.
5368  *
5369  * Returns 0 on success, negative value on failure
5370  **/
5371 int i40e_vsi_open(struct i40e_vsi *vsi)
5372 {
5373         struct i40e_pf *pf = vsi->back;
5374         char int_name[I40E_INT_NAME_STR_LEN];
5375         int err;
5376
5377         /* allocate descriptors */
5378         err = i40e_vsi_setup_tx_resources(vsi);
5379         if (err)
5380                 goto err_setup_tx;
5381         err = i40e_vsi_setup_rx_resources(vsi);
5382         if (err)
5383                 goto err_setup_rx;
5384
5385         err = i40e_vsi_configure(vsi);
5386         if (err)
5387                 goto err_setup_rx;
5388
5389         if (vsi->netdev) {
5390                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s",
5391                          dev_driver_string(&pf->pdev->dev), vsi->netdev->name);
5392                 err = i40e_vsi_request_irq(vsi, int_name);
5393                 if (err)
5394                         goto err_setup_rx;
5395
5396                 /* Notify the stack of the actual queue counts. */
5397                 err = netif_set_real_num_tx_queues(vsi->netdev,
5398                                                    vsi->num_queue_pairs);
5399                 if (err)
5400                         goto err_set_queues;
5401
5402                 err = netif_set_real_num_rx_queues(vsi->netdev,
5403                                                    vsi->num_queue_pairs);
5404                 if (err)
5405                         goto err_set_queues;
5406
5407         } else if (vsi->type == I40E_VSI_FDIR) {
5408                 snprintf(int_name, sizeof(int_name) - 1, "%s-%s:fdir",
5409                          dev_driver_string(&pf->pdev->dev),
5410                          dev_name(&pf->pdev->dev));
5411                 err = i40e_vsi_request_irq(vsi, int_name);
5412                 if (err)
5413                         goto err_setup_rx;
5414
5415         } else {
5416                 err = -EINVAL;
5417                 goto err_setup_rx;
5418         }
5419
5420         err = i40e_up_complete(vsi);
5421         if (err)
5422                 goto err_up_complete;
5423
5424         return 0;
5425
5426 err_up_complete:
5427         i40e_down(vsi);
5428 err_set_queues:
5429         i40e_vsi_free_irq(vsi);
5430 err_setup_rx:
5431         i40e_vsi_free_rx_resources(vsi);
5432 err_setup_tx:
5433         i40e_vsi_free_tx_resources(vsi);
5434         if (vsi == pf->vsi[pf->lan_vsi])
5435                 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
5436
5437         return err;
5438 }
5439
5440 /**
5441  * i40e_fdir_filter_exit - Cleans up the Flow Director accounting
5442  * @pf: Pointer to PF
5443  *
5444  * This function destroys the hlist where all the Flow Director
5445  * filters were saved.
5446  **/
5447 static void i40e_fdir_filter_exit(struct i40e_pf *pf)
5448 {
5449         struct i40e_fdir_filter *filter;
5450         struct hlist_node *node2;
5451
5452         hlist_for_each_entry_safe(filter, node2,
5453                                   &pf->fdir_filter_list, fdir_node) {
5454                 hlist_del(&filter->fdir_node);
5455                 kfree(filter);
5456         }
5457         pf->fdir_pf_active_filters = 0;
5458 }
5459
5460 /**
5461  * i40e_close - Disables a network interface
5462  * @netdev: network interface device structure
5463  *
5464  * The close entry point is called when an interface is de-activated
5465  * by the OS.  The hardware is still under the driver's control, but
5466  * this netdev interface is disabled.
5467  *
5468  * Returns 0, this is not allowed to fail
5469  **/
5470 #ifdef I40E_FCOE
5471 int i40e_close(struct net_device *netdev)
5472 #else
5473 static int i40e_close(struct net_device *netdev)
5474 #endif
5475 {
5476         struct i40e_netdev_priv *np = netdev_priv(netdev);
5477         struct i40e_vsi *vsi = np->vsi;
5478
5479         i40e_vsi_close(vsi);
5480
5481         return 0;
5482 }
5483
5484 /**
5485  * i40e_do_reset - Start a PF or Core Reset sequence
5486  * @pf: board private structure
5487  * @reset_flags: which reset is requested
5488  *
5489  * The essential difference in resets is that the PF Reset
5490  * doesn't clear the packet buffers, doesn't reset the PE
5491  * firmware, and doesn't bother the other PFs on the chip.
5492  **/
5493 void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
5494 {
5495         u32 val;
5496
5497         WARN_ON(in_interrupt());
5498
5499         if (i40e_check_asq_alive(&pf->hw))
5500                 i40e_vc_notify_reset(pf);
5501
5502         /* do the biggest reset indicated */
5503         if (reset_flags & BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED)) {
5504
5505                 /* Request a Global Reset
5506                  *
5507                  * This will start the chip's countdown to the actual full
5508                  * chip reset event, and a warning interrupt to be sent
5509                  * to all PFs, including the requestor.  Our handler
5510                  * for the warning interrupt will deal with the shutdown
5511                  * and recovery of the switch setup.
5512                  */
5513                 dev_dbg(&pf->pdev->dev, "GlobalR requested\n");
5514                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5515                 val |= I40E_GLGEN_RTRIG_GLOBR_MASK;
5516                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5517
5518         } else if (reset_flags & BIT_ULL(__I40E_CORE_RESET_REQUESTED)) {
5519
5520                 /* Request a Core Reset
5521                  *
5522                  * Same as Global Reset, except does *not* include the MAC/PHY
5523                  */
5524                 dev_dbg(&pf->pdev->dev, "CoreR requested\n");
5525                 val = rd32(&pf->hw, I40E_GLGEN_RTRIG);
5526                 val |= I40E_GLGEN_RTRIG_CORER_MASK;
5527                 wr32(&pf->hw, I40E_GLGEN_RTRIG, val);
5528                 i40e_flush(&pf->hw);
5529
5530         } else if (reset_flags & BIT_ULL(__I40E_PF_RESET_REQUESTED)) {
5531
5532                 /* Request a PF Reset
5533                  *
5534                  * Resets only the PF-specific registers
5535                  *
5536                  * This goes directly to the tear-down and rebuild of
5537                  * the switch, since we need to do all the recovery as
5538                  * for the Core Reset.
5539                  */
5540                 dev_dbg(&pf->pdev->dev, "PFR requested\n");
5541                 i40e_handle_reset_warning(pf);
5542
5543         } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
5544                 int v;
5545
5546                 /* Find the VSI(s) that requested a re-init */
5547                 dev_info(&pf->pdev->dev,
5548                          "VSI reinit requested\n");
5549                 for (v = 0; v < pf->num_alloc_vsi; v++) {
5550                         struct i40e_vsi *vsi = pf->vsi[v];
5551
5552                         if (vsi != NULL &&
5553                             test_bit(__I40E_REINIT_REQUESTED, &vsi->state)) {
5554                                 i40e_vsi_reinit_locked(pf->vsi[v]);
5555                                 clear_bit(__I40E_REINIT_REQUESTED, &vsi->state);
5556                         }
5557                 }
5558         } else if (reset_flags & BIT_ULL(__I40E_DOWN_REQUESTED)) {
5559                 int v;
5560
5561                 /* Find the VSI(s) that needs to be brought down */
5562                 dev_info(&pf->pdev->dev, "VSI down requested\n");
5563                 for (v = 0; v < pf->num_alloc_vsi; v++) {
5564                         struct i40e_vsi *vsi = pf->vsi[v];
5565
5566                         if (vsi != NULL &&
5567                             test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
5568                                 set_bit(__I40E_DOWN, &vsi->state);
5569                                 i40e_down(vsi);
5570                                 clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
5571                         }
5572                 }
5573         } else {
5574                 dev_info(&pf->pdev->dev,
5575                          "bad reset request 0x%08x\n", reset_flags);
5576         }
5577 }
5578
5579 #ifdef CONFIG_I40E_DCB
5580 /**
5581  * i40e_dcb_need_reconfig - Check if DCB needs reconfig
5582  * @pf: board private structure
5583  * @old_cfg: current DCB config
5584  * @new_cfg: new DCB config
5585  **/
5586 bool i40e_dcb_need_reconfig(struct i40e_pf *pf,
5587                             struct i40e_dcbx_config *old_cfg,
5588                             struct i40e_dcbx_config *new_cfg)
5589 {
5590         bool need_reconfig = false;
5591
5592         /* Check if ETS configuration has changed */
5593         if (memcmp(&new_cfg->etscfg,
5594                    &old_cfg->etscfg,
5595                    sizeof(new_cfg->etscfg))) {
5596                 /* If Priority Table has changed reconfig is needed */
5597                 if (memcmp(&new_cfg->etscfg.prioritytable,
5598                            &old_cfg->etscfg.prioritytable,
5599                            sizeof(new_cfg->etscfg.prioritytable))) {
5600                         need_reconfig = true;
5601                         dev_dbg(&pf->pdev->dev, "ETS UP2TC changed.\n");
5602                 }
5603
5604                 if (memcmp(&new_cfg->etscfg.tcbwtable,
5605                            &old_cfg->etscfg.tcbwtable,
5606                            sizeof(new_cfg->etscfg.tcbwtable)))
5607                         dev_dbg(&pf->pdev->dev, "ETS TC BW Table changed.\n");
5608
5609                 if (memcmp(&new_cfg->etscfg.tsatable,
5610                            &old_cfg->etscfg.tsatable,
5611                            sizeof(new_cfg->etscfg.tsatable)))
5612                         dev_dbg(&pf->pdev->dev, "ETS TSA Table changed.\n");
5613         }
5614
5615         /* Check if PFC configuration has changed */
5616         if (memcmp(&new_cfg->pfc,
5617                    &old_cfg->pfc,
5618                    sizeof(new_cfg->pfc))) {
5619                 need_reconfig = true;
5620                 dev_dbg(&pf->pdev->dev, "PFC config change detected.\n");
5621         }
5622
5623         /* Check if APP Table has changed */
5624         if (memcmp(&new_cfg->app,
5625                    &old_cfg->app,
5626                    sizeof(new_cfg->app))) {
5627                 need_reconfig = true;
5628                 dev_dbg(&pf->pdev->dev, "APP Table change detected.\n");
5629         }
5630
5631         dev_dbg(&pf->pdev->dev, "dcb need_reconfig=%d\n", need_reconfig);
5632         return need_reconfig;
5633 }
5634
5635 /**
5636  * i40e_handle_lldp_event - Handle LLDP Change MIB event
5637  * @pf: board private structure
5638  * @e: event info posted on ARQ
5639  **/
5640 static int i40e_handle_lldp_event(struct i40e_pf *pf,
5641                                   struct i40e_arq_event_info *e)
5642 {
5643         struct i40e_aqc_lldp_get_mib *mib =
5644                 (struct i40e_aqc_lldp_get_mib *)&e->desc.params.raw;
5645         struct i40e_hw *hw = &pf->hw;
5646         struct i40e_dcbx_config tmp_dcbx_cfg;
5647         bool need_reconfig = false;
5648         int ret = 0;
5649         u8 type;
5650
5651         /* Not DCB capable or capability disabled */
5652         if (!(pf->flags & I40E_FLAG_DCB_CAPABLE))
5653                 return ret;
5654
5655         /* Ignore if event is not for Nearest Bridge */
5656         type = ((mib->type >> I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT)
5657                 & I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
5658         dev_dbg(&pf->pdev->dev, "LLDP event mib bridge type 0x%x\n", type);
5659         if (type != I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE)
5660                 return ret;
5661
5662         /* Check MIB Type and return if event for Remote MIB update */
5663         type = mib->type & I40E_AQ_LLDP_MIB_TYPE_MASK;
5664         dev_dbg(&pf->pdev->dev,
5665                 "LLDP event mib type %s\n", type ? "remote" : "local");
5666         if (type == I40E_AQ_LLDP_MIB_REMOTE) {
5667                 /* Update the remote cached instance and return */
5668                 ret = i40e_aq_get_dcb_config(hw, I40E_AQ_LLDP_MIB_REMOTE,
5669                                 I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
5670                                 &hw->remote_dcbx_config);
5671                 goto exit;
5672         }
5673
5674         /* Store the old configuration */
5675         tmp_dcbx_cfg = hw->local_dcbx_config;
5676
5677         /* Reset the old DCBx configuration data */
5678         memset(&hw->local_dcbx_config, 0, sizeof(hw->local_dcbx_config));
5679         /* Get updated DCBX data from firmware */
5680         ret = i40e_get_dcb_config(&pf->hw);
5681         if (ret) {
5682                 dev_info(&pf->pdev->dev,
5683                          "Failed querying DCB configuration data from firmware, err %s aq_err %s\n",
5684                          i40e_stat_str(&pf->hw, ret),
5685                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
5686                 goto exit;
5687         }
5688
5689         /* No change detected in DCBX configs */
5690         if (!memcmp(&tmp_dcbx_cfg, &hw->local_dcbx_config,
5691                     sizeof(tmp_dcbx_cfg))) {
5692                 dev_dbg(&pf->pdev->dev, "No change detected in DCBX configuration.\n");
5693                 goto exit;
5694         }
5695
5696         need_reconfig = i40e_dcb_need_reconfig(pf, &tmp_dcbx_cfg,
5697                                                &hw->local_dcbx_config);
5698
5699         i40e_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &hw->local_dcbx_config);
5700
5701         if (!need_reconfig)
5702                 goto exit;
5703
5704         /* Enable DCB tagging only when more than one TC */
5705         if (i40e_dcb_get_num_tc(&hw->local_dcbx_config) > 1)
5706                 pf->flags |= I40E_FLAG_DCB_ENABLED;
5707         else
5708                 pf->flags &= ~I40E_FLAG_DCB_ENABLED;
5709
5710         set_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5711         /* Reconfiguration needed quiesce all VSIs */
5712         i40e_pf_quiesce_all_vsi(pf);
5713
5714         /* Changes in configuration update VEB/VSI */
5715         i40e_dcb_reconfigure(pf);
5716
5717         ret = i40e_resume_port_tx(pf);
5718
5719         clear_bit(__I40E_PORT_TX_SUSPENDED, &pf->state);
5720         /* In case of error no point in resuming VSIs */
5721         if (ret)
5722                 goto exit;
5723
5724         /* Wait for the PF's Tx queues to be disabled */
5725         ret = i40e_pf_wait_txq_disabled(pf);
5726         if (ret) {
5727                 /* Schedule PF reset to recover */
5728                 set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
5729                 i40e_service_event_schedule(pf);
5730         } else {
5731                 i40e_pf_unquiesce_all_vsi(pf);
5732         }
5733
5734 exit:
5735         return ret;
5736 }
5737 #endif /* CONFIG_I40E_DCB */
5738
5739 /**
5740  * i40e_do_reset_safe - Protected reset path for userland calls.
5741  * @pf: board private structure
5742  * @reset_flags: which reset is requested
5743  *
5744  **/
5745 void i40e_do_reset_safe(struct i40e_pf *pf, u32 reset_flags)
5746 {
5747         rtnl_lock();
5748         i40e_do_reset(pf, reset_flags);
5749         rtnl_unlock();
5750 }
5751
5752 /**
5753  * i40e_handle_lan_overflow_event - Handler for LAN queue overflow event
5754  * @pf: board private structure
5755  * @e: event info posted on ARQ
5756  *
5757  * Handler for LAN Queue Overflow Event generated by the firmware for PF
5758  * and VF queues
5759  **/
5760 static void i40e_handle_lan_overflow_event(struct i40e_pf *pf,
5761                                            struct i40e_arq_event_info *e)
5762 {
5763         struct i40e_aqc_lan_overflow *data =
5764                 (struct i40e_aqc_lan_overflow *)&e->desc.params.raw;
5765         u32 queue = le32_to_cpu(data->prtdcb_rupto);
5766         u32 qtx_ctl = le32_to_cpu(data->otx_ctl);
5767         struct i40e_hw *hw = &pf->hw;
5768         struct i40e_vf *vf;
5769         u16 vf_id;
5770
5771         dev_dbg(&pf->pdev->dev, "overflow Rx Queue Number = %d QTX_CTL=0x%08x\n",
5772                 queue, qtx_ctl);
5773
5774         /* Queue belongs to VF, find the VF and issue VF reset */
5775         if (((qtx_ctl & I40E_QTX_CTL_PFVF_Q_MASK)
5776             >> I40E_QTX_CTL_PFVF_Q_SHIFT) == I40E_QTX_CTL_VF_QUEUE) {
5777                 vf_id = (u16)((qtx_ctl & I40E_QTX_CTL_VFVM_INDX_MASK)
5778                          >> I40E_QTX_CTL_VFVM_INDX_SHIFT);
5779                 vf_id -= hw->func_caps.vf_base_id;
5780                 vf = &pf->vf[vf_id];
5781                 i40e_vc_notify_vf_reset(vf);
5782                 /* Allow VF to process pending reset notification */
5783                 msleep(20);
5784                 i40e_reset_vf(vf, false);
5785         }
5786 }
5787
5788 /**
5789  * i40e_service_event_complete - Finish up the service event
5790  * @pf: board private structure
5791  **/
5792 static void i40e_service_event_complete(struct i40e_pf *pf)
5793 {
5794         BUG_ON(!test_bit(__I40E_SERVICE_SCHED, &pf->state));
5795
5796         /* flush memory to make sure state is correct before next watchog */
5797         smp_mb__before_atomic();
5798         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
5799 }
5800
5801 /**
5802  * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
5803  * @pf: board private structure
5804  **/
5805 u32 i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
5806 {
5807         u32 val, fcnt_prog;
5808
5809         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5810         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
5811         return fcnt_prog;
5812 }
5813
5814 /**
5815  * i40e_get_current_fd_count - Get total FD filters programmed for this PF
5816  * @pf: board private structure
5817  **/
5818 u32 i40e_get_current_fd_count(struct i40e_pf *pf)
5819 {
5820         u32 val, fcnt_prog;
5821
5822         val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
5823         fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK) +
5824                     ((val & I40E_PFQF_FDSTAT_BEST_CNT_MASK) >>
5825                       I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
5826         return fcnt_prog;
5827 }
5828
5829 /**
5830  * i40e_get_global_fd_count - Get total FD filters programmed on device
5831  * @pf: board private structure
5832  **/
5833 u32 i40e_get_global_fd_count(struct i40e_pf *pf)
5834 {
5835         u32 val, fcnt_prog;
5836
5837         val = rd32(&pf->hw, I40E_GLQF_FDCNT_0);
5838         fcnt_prog = (val & I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK) +
5839                     ((val & I40E_GLQF_FDCNT_0_BESTCNT_MASK) >>
5840                      I40E_GLQF_FDCNT_0_BESTCNT_SHIFT);
5841         return fcnt_prog;
5842 }
5843
5844 /**
5845  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
5846  * @pf: board private structure
5847  **/
5848 void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
5849 {
5850         struct i40e_fdir_filter *filter;
5851         u32 fcnt_prog, fcnt_avail;
5852         struct hlist_node *node;
5853
5854         if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5855                 return;
5856
5857         /* Check if, FD SB or ATR was auto disabled and if there is enough room
5858          * to re-enable
5859          */
5860         fcnt_prog = i40e_get_global_fd_count(pf);
5861         fcnt_avail = pf->fdir_pf_filter_count;
5862         if ((fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) ||
5863             (pf->fd_add_err == 0) ||
5864             (i40e_get_current_atr_cnt(pf) < pf->fd_atr_cnt)) {
5865                 if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
5866                     (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
5867                         pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
5868                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5869                                 dev_info(&pf->pdev->dev, "FD Sideband/ntuple is being enabled since we have space in the table now\n");
5870                 }
5871         }
5872         /* Wait for some more space to be available to turn on ATR */
5873         if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM * 2)) {
5874                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
5875                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED)) {
5876                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5877                         if (I40E_DEBUG_FD & pf->hw.debug_mask)
5878                                 dev_info(&pf->pdev->dev, "ATR is being enabled since we have space in the table now\n");
5879                 }
5880         }
5881
5882         /* if hw had a problem adding a filter, delete it */
5883         if (pf->fd_inv > 0) {
5884                 hlist_for_each_entry_safe(filter, node,
5885                                           &pf->fdir_filter_list, fdir_node) {
5886                         if (filter->fd_id == pf->fd_inv) {
5887                                 hlist_del(&filter->fdir_node);
5888                                 kfree(filter);
5889                                 pf->fdir_pf_active_filters--;
5890                         }
5891                 }
5892         }
5893 }
5894
5895 #define I40E_MIN_FD_FLUSH_INTERVAL 10
5896 #define I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE 30
5897 /**
5898  * i40e_fdir_flush_and_replay - Function to flush all FD filters and replay SB
5899  * @pf: board private structure
5900  **/
5901 static void i40e_fdir_flush_and_replay(struct i40e_pf *pf)
5902 {
5903         unsigned long min_flush_time;
5904         int flush_wait_retry = 50;
5905         bool disable_atr = false;
5906         int fd_room;
5907         int reg;
5908
5909         if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5910                 return;
5911
5912         if (!time_after(jiffies, pf->fd_flush_timestamp +
5913                                  (I40E_MIN_FD_FLUSH_INTERVAL * HZ)))
5914                 return;
5915
5916         /* If the flush is happening too quick and we have mostly SB rules we
5917          * should not re-enable ATR for some time.
5918          */
5919         min_flush_time = pf->fd_flush_timestamp +
5920                          (I40E_MIN_FD_FLUSH_SB_ATR_UNSTABLE * HZ);
5921         fd_room = pf->fdir_pf_filter_count - pf->fdir_pf_active_filters;
5922
5923         if (!(time_after(jiffies, min_flush_time)) &&
5924             (fd_room < I40E_FDIR_BUFFER_HEAD_ROOM_FOR_ATR)) {
5925                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5926                         dev_info(&pf->pdev->dev, "ATR disabled, not enough FD filter space.\n");
5927                 disable_atr = true;
5928         }
5929
5930         pf->fd_flush_timestamp = jiffies;
5931         pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
5932         /* flush all filters */
5933         wr32(&pf->hw, I40E_PFQF_CTL_1,
5934              I40E_PFQF_CTL_1_CLEARFDTABLE_MASK);
5935         i40e_flush(&pf->hw);
5936         pf->fd_flush_cnt++;
5937         pf->fd_add_err = 0;
5938         do {
5939                 /* Check FD flush status every 5-6msec */
5940                 usleep_range(5000, 6000);
5941                 reg = rd32(&pf->hw, I40E_PFQF_CTL_1);
5942                 if (!(reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK))
5943                         break;
5944         } while (flush_wait_retry--);
5945         if (reg & I40E_PFQF_CTL_1_CLEARFDTABLE_MASK) {
5946                 dev_warn(&pf->pdev->dev, "FD table did not flush, needs more time\n");
5947         } else {
5948                 /* replay sideband filters */
5949                 i40e_fdir_filter_restore(pf->vsi[pf->lan_vsi]);
5950                 if (!disable_atr)
5951                         pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
5952                 clear_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state);
5953                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
5954                         dev_info(&pf->pdev->dev, "FD Filter table flushed and FD-SB replayed.\n");
5955         }
5956
5957 }
5958
5959 /**
5960  * i40e_get_current_atr_count - Get the count of total FD ATR filters programmed
5961  * @pf: board private structure
5962  **/
5963 u32 i40e_get_current_atr_cnt(struct i40e_pf *pf)
5964 {
5965         return i40e_get_current_fd_count(pf) - pf->fdir_pf_active_filters;
5966 }
5967
5968 /* We can see up to 256 filter programming desc in transit if the filters are
5969  * being applied really fast; before we see the first
5970  * filter miss error on Rx queue 0. Accumulating enough error messages before
5971  * reacting will make sure we don't cause flush too often.
5972  */
5973 #define I40E_MAX_FD_PROGRAM_ERROR 256
5974
5975 /**
5976  * i40e_fdir_reinit_subtask - Worker thread to reinit FDIR filter table
5977  * @pf: board private structure
5978  **/
5979 static void i40e_fdir_reinit_subtask(struct i40e_pf *pf)
5980 {
5981
5982         /* if interface is down do nothing */
5983         if (test_bit(__I40E_DOWN, &pf->state))
5984                 return;
5985
5986         if (!(pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED)))
5987                 return;
5988
5989         if (test_bit(__I40E_FD_FLUSH_REQUESTED, &pf->state))
5990                 i40e_fdir_flush_and_replay(pf);
5991
5992         i40e_fdir_check_and_reenable(pf);
5993
5994 }
5995
5996 /**
5997  * i40e_vsi_link_event - notify VSI of a link event
5998  * @vsi: vsi to be notified
5999  * @link_up: link up or down
6000  **/
6001 static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
6002 {
6003         if (!vsi || test_bit(__I40E_DOWN, &vsi->state))
6004                 return;
6005
6006         switch (vsi->type) {
6007         case I40E_VSI_MAIN:
6008 #ifdef I40E_FCOE
6009         case I40E_VSI_FCOE:
6010 #endif
6011                 if (!vsi->netdev || !vsi->netdev_registered)
6012                         break;
6013
6014                 if (link_up) {
6015                         netif_carrier_on(vsi->netdev);
6016                         netif_tx_wake_all_queues(vsi->netdev);
6017                 } else {
6018                         netif_carrier_off(vsi->netdev);
6019                         netif_tx_stop_all_queues(vsi->netdev);
6020                 }
6021                 break;
6022
6023         case I40E_VSI_SRIOV:
6024         case I40E_VSI_VMDQ2:
6025         case I40E_VSI_CTRL:
6026         case I40E_VSI_MIRROR:
6027         default:
6028                 /* there is no notification for other VSIs */
6029                 break;
6030         }
6031 }
6032
6033 /**
6034  * i40e_veb_link_event - notify elements on the veb of a link event
6035  * @veb: veb to be notified
6036  * @link_up: link up or down
6037  **/
6038 static void i40e_veb_link_event(struct i40e_veb *veb, bool link_up)
6039 {
6040         struct i40e_pf *pf;
6041         int i;
6042
6043         if (!veb || !veb->pf)
6044                 return;
6045         pf = veb->pf;
6046
6047         /* depth first... */
6048         for (i = 0; i < I40E_MAX_VEB; i++)
6049                 if (pf->veb[i] && (pf->veb[i]->uplink_seid == veb->seid))
6050                         i40e_veb_link_event(pf->veb[i], link_up);
6051
6052         /* ... now the local VSIs */
6053         for (i = 0; i < pf->num_alloc_vsi; i++)
6054                 if (pf->vsi[i] && (pf->vsi[i]->uplink_seid == veb->seid))
6055                         i40e_vsi_link_event(pf->vsi[i], link_up);
6056 }
6057
6058 /**
6059  * i40e_link_event - Update netif_carrier status
6060  * @pf: board private structure
6061  **/
6062 static void i40e_link_event(struct i40e_pf *pf)
6063 {
6064         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6065         u8 new_link_speed, old_link_speed;
6066         i40e_status status;
6067         bool new_link, old_link;
6068
6069         /* set this to force the get_link_status call to refresh state */
6070         pf->hw.phy.get_link_info = true;
6071
6072         old_link = (pf->hw.phy.link_info_old.link_info & I40E_AQ_LINK_UP);
6073
6074         status = i40e_get_link_status(&pf->hw, &new_link);
6075         if (status) {
6076                 dev_dbg(&pf->pdev->dev, "couldn't get link state, status: %d\n",
6077                         status);
6078                 return;
6079         }
6080
6081         old_link_speed = pf->hw.phy.link_info_old.link_speed;
6082         new_link_speed = pf->hw.phy.link_info.link_speed;
6083
6084         if (new_link == old_link &&
6085             new_link_speed == old_link_speed &&
6086             (test_bit(__I40E_DOWN, &vsi->state) ||
6087              new_link == netif_carrier_ok(vsi->netdev)))
6088                 return;
6089
6090         if (!test_bit(__I40E_DOWN, &vsi->state))
6091                 i40e_print_link_message(vsi, new_link);
6092
6093         /* Notify the base of the switch tree connected to
6094          * the link.  Floating VEBs are not notified.
6095          */
6096         if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
6097                 i40e_veb_link_event(pf->veb[pf->lan_veb], new_link);
6098         else
6099                 i40e_vsi_link_event(vsi, new_link);
6100
6101         if (pf->vf)
6102                 i40e_vc_notify_link_state(pf);
6103
6104         if (pf->flags & I40E_FLAG_PTP)
6105                 i40e_ptp_set_increment(pf);
6106 }
6107
6108 /**
6109  * i40e_watchdog_subtask - periodic checks not using event driven response
6110  * @pf: board private structure
6111  **/
6112 static void i40e_watchdog_subtask(struct i40e_pf *pf)
6113 {
6114         int i;
6115
6116         /* if interface is down do nothing */
6117         if (test_bit(__I40E_DOWN, &pf->state) ||
6118             test_bit(__I40E_CONFIG_BUSY, &pf->state))
6119                 return;
6120
6121         /* make sure we don't do these things too often */
6122         if (time_before(jiffies, (pf->service_timer_previous +
6123                                   pf->service_timer_period)))
6124                 return;
6125         pf->service_timer_previous = jiffies;
6126
6127         if (pf->flags & I40E_FLAG_LINK_POLLING_ENABLED)
6128                 i40e_link_event(pf);
6129
6130         /* Update the stats for active netdevs so the network stack
6131          * can look at updated numbers whenever it cares to
6132          */
6133         for (i = 0; i < pf->num_alloc_vsi; i++)
6134                 if (pf->vsi[i] && pf->vsi[i]->netdev)
6135                         i40e_update_stats(pf->vsi[i]);
6136
6137         if (pf->flags & I40E_FLAG_VEB_STATS_ENABLED) {
6138                 /* Update the stats for the active switching components */
6139                 for (i = 0; i < I40E_MAX_VEB; i++)
6140                         if (pf->veb[i])
6141                                 i40e_update_veb_stats(pf->veb[i]);
6142         }
6143
6144         i40e_ptp_rx_hang(pf->vsi[pf->lan_vsi]);
6145 }
6146
6147 /**
6148  * i40e_reset_subtask - Set up for resetting the device and driver
6149  * @pf: board private structure
6150  **/
6151 static void i40e_reset_subtask(struct i40e_pf *pf)
6152 {
6153         u32 reset_flags = 0;
6154
6155         rtnl_lock();
6156         if (test_bit(__I40E_REINIT_REQUESTED, &pf->state)) {
6157                 reset_flags |= BIT_ULL(__I40E_REINIT_REQUESTED);
6158                 clear_bit(__I40E_REINIT_REQUESTED, &pf->state);
6159         }
6160         if (test_bit(__I40E_PF_RESET_REQUESTED, &pf->state)) {
6161                 reset_flags |= BIT_ULL(__I40E_PF_RESET_REQUESTED);
6162                 clear_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
6163         }
6164         if (test_bit(__I40E_CORE_RESET_REQUESTED, &pf->state)) {
6165                 reset_flags |= BIT_ULL(__I40E_CORE_RESET_REQUESTED);
6166                 clear_bit(__I40E_CORE_RESET_REQUESTED, &pf->state);
6167         }
6168         if (test_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state)) {
6169                 reset_flags |= BIT_ULL(__I40E_GLOBAL_RESET_REQUESTED);
6170                 clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
6171         }
6172         if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
6173                 reset_flags |= BIT_ULL(__I40E_DOWN_REQUESTED);
6174                 clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
6175         }
6176
6177         /* If there's a recovery already waiting, it takes
6178          * precedence before starting a new reset sequence.
6179          */
6180         if (test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state)) {
6181                 i40e_handle_reset_warning(pf);
6182                 goto unlock;
6183         }
6184
6185         /* If we're already down or resetting, just bail */
6186         if (reset_flags &&
6187             !test_bit(__I40E_DOWN, &pf->state) &&
6188             !test_bit(__I40E_CONFIG_BUSY, &pf->state))
6189                 i40e_do_reset(pf, reset_flags);
6190
6191 unlock:
6192         rtnl_unlock();
6193 }
6194
6195 /**
6196  * i40e_handle_link_event - Handle link event
6197  * @pf: board private structure
6198  * @e: event info posted on ARQ
6199  **/
6200 static void i40e_handle_link_event(struct i40e_pf *pf,
6201                                    struct i40e_arq_event_info *e)
6202 {
6203         struct i40e_hw *hw = &pf->hw;
6204         struct i40e_aqc_get_link_status *status =
6205                 (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
6206
6207         /* save off old link status information */
6208         hw->phy.link_info_old = hw->phy.link_info;
6209
6210         /* Do a new status request to re-enable LSE reporting
6211          * and load new status information into the hw struct
6212          * This completely ignores any state information
6213          * in the ARQ event info, instead choosing to always
6214          * issue the AQ update link status command.
6215          */
6216         i40e_link_event(pf);
6217
6218         /* check for unqualified module, if link is down */
6219         if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
6220             (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
6221             (!(status->link_info & I40E_AQ_LINK_UP)))
6222                 dev_err(&pf->pdev->dev,
6223                         "The driver failed to link because an unqualified module was detected.\n");
6224 }
6225
6226 /**
6227  * i40e_clean_adminq_subtask - Clean the AdminQ rings
6228  * @pf: board private structure
6229  **/
6230 static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
6231 {
6232         struct i40e_arq_event_info event;
6233         struct i40e_hw *hw = &pf->hw;
6234         u16 pending, i = 0;
6235         i40e_status ret;
6236         u16 opcode;
6237         u32 oldval;
6238         u32 val;
6239
6240         /* Do not run clean AQ when PF reset fails */
6241         if (test_bit(__I40E_RESET_FAILED, &pf->state))
6242                 return;
6243
6244         /* check for error indications */
6245         val = rd32(&pf->hw, pf->hw.aq.arq.len);
6246         oldval = val;
6247         if (val & I40E_PF_ARQLEN_ARQVFE_MASK) {
6248                 dev_info(&pf->pdev->dev, "ARQ VF Error detected\n");
6249                 val &= ~I40E_PF_ARQLEN_ARQVFE_MASK;
6250         }
6251         if (val & I40E_PF_ARQLEN_ARQOVFL_MASK) {
6252                 dev_info(&pf->pdev->dev, "ARQ Overflow Error detected\n");
6253                 val &= ~I40E_PF_ARQLEN_ARQOVFL_MASK;
6254         }
6255         if (val & I40E_PF_ARQLEN_ARQCRIT_MASK) {
6256                 dev_info(&pf->pdev->dev, "ARQ Critical Error detected\n");
6257                 val &= ~I40E_PF_ARQLEN_ARQCRIT_MASK;
6258         }
6259         if (oldval != val)
6260                 wr32(&pf->hw, pf->hw.aq.arq.len, val);
6261
6262         val = rd32(&pf->hw, pf->hw.aq.asq.len);
6263         oldval = val;
6264         if (val & I40E_PF_ATQLEN_ATQVFE_MASK) {
6265                 dev_info(&pf->pdev->dev, "ASQ VF Error detected\n");
6266                 val &= ~I40E_PF_ATQLEN_ATQVFE_MASK;
6267         }
6268         if (val & I40E_PF_ATQLEN_ATQOVFL_MASK) {
6269                 dev_info(&pf->pdev->dev, "ASQ Overflow Error detected\n");
6270                 val &= ~I40E_PF_ATQLEN_ATQOVFL_MASK;
6271         }
6272         if (val & I40E_PF_ATQLEN_ATQCRIT_MASK) {
6273                 dev_info(&pf->pdev->dev, "ASQ Critical Error detected\n");
6274                 val &= ~I40E_PF_ATQLEN_ATQCRIT_MASK;
6275         }
6276         if (oldval != val)
6277                 wr32(&pf->hw, pf->hw.aq.asq.len, val);
6278
6279         event.buf_len = I40E_MAX_AQ_BUF_SIZE;
6280         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
6281         if (!event.msg_buf)
6282                 return;
6283
6284         do {
6285                 ret = i40e_clean_arq_element(hw, &event, &pending);
6286                 if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
6287                         break;
6288                 else if (ret) {
6289                         dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
6290                         break;
6291                 }
6292
6293                 opcode = le16_to_cpu(event.desc.opcode);
6294                 switch (opcode) {
6295
6296                 case i40e_aqc_opc_get_link_status:
6297                         i40e_handle_link_event(pf, &event);
6298                         break;
6299                 case i40e_aqc_opc_send_msg_to_pf:
6300                         ret = i40e_vc_process_vf_msg(pf,
6301                                         le16_to_cpu(event.desc.retval),
6302                                         le32_to_cpu(event.desc.cookie_high),
6303                                         le32_to_cpu(event.desc.cookie_low),
6304                                         event.msg_buf,
6305                                         event.msg_len);
6306                         break;
6307                 case i40e_aqc_opc_lldp_update_mib:
6308                         dev_dbg(&pf->pdev->dev, "ARQ: Update LLDP MIB event received\n");
6309 #ifdef CONFIG_I40E_DCB
6310                         rtnl_lock();
6311                         ret = i40e_handle_lldp_event(pf, &event);
6312                         rtnl_unlock();
6313 #endif /* CONFIG_I40E_DCB */
6314                         break;
6315                 case i40e_aqc_opc_event_lan_overflow:
6316                         dev_dbg(&pf->pdev->dev, "ARQ LAN queue overflow event received\n");
6317                         i40e_handle_lan_overflow_event(pf, &event);
6318                         break;
6319                 case i40e_aqc_opc_send_msg_to_peer:
6320                         dev_info(&pf->pdev->dev, "ARQ: Msg from other pf\n");
6321                         break;
6322                 case i40e_aqc_opc_nvm_erase:
6323                 case i40e_aqc_opc_nvm_update:
6324                         i40e_debug(&pf->hw, I40E_DEBUG_NVM, "ARQ NVM operation completed\n");
6325                         break;
6326                 default:
6327                         dev_info(&pf->pdev->dev,
6328                                  "ARQ Error: Unknown event 0x%04x received\n",
6329                                  opcode);
6330                         break;
6331                 }
6332         } while (pending && (i++ < pf->adminq_work_limit));
6333
6334         clear_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state);
6335         /* re-enable Admin queue interrupt cause */
6336         val = rd32(hw, I40E_PFINT_ICR0_ENA);
6337         val |=  I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
6338         wr32(hw, I40E_PFINT_ICR0_ENA, val);
6339         i40e_flush(hw);
6340
6341         kfree(event.msg_buf);
6342 }
6343
6344 /**
6345  * i40e_verify_eeprom - make sure eeprom is good to use
6346  * @pf: board private structure
6347  **/
6348 static void i40e_verify_eeprom(struct i40e_pf *pf)
6349 {
6350         int err;
6351
6352         err = i40e_diag_eeprom_test(&pf->hw);
6353         if (err) {
6354                 /* retry in case of garbage read */
6355                 err = i40e_diag_eeprom_test(&pf->hw);
6356                 if (err) {
6357                         dev_info(&pf->pdev->dev, "eeprom check failed (%d), Tx/Rx traffic disabled\n",
6358                                  err);
6359                         set_bit(__I40E_BAD_EEPROM, &pf->state);
6360                 }
6361         }
6362
6363         if (!err && test_bit(__I40E_BAD_EEPROM, &pf->state)) {
6364                 dev_info(&pf->pdev->dev, "eeprom check passed, Tx/Rx traffic enabled\n");
6365                 clear_bit(__I40E_BAD_EEPROM, &pf->state);
6366         }
6367 }
6368
6369 /**
6370  * i40e_enable_pf_switch_lb
6371  * @pf: pointer to the PF structure
6372  *
6373  * enable switch loop back or die - no point in a return value
6374  **/
6375 static void i40e_enable_pf_switch_lb(struct i40e_pf *pf)
6376 {
6377         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6378         struct i40e_vsi_context ctxt;
6379         int ret;
6380
6381         ctxt.seid = pf->main_vsi_seid;
6382         ctxt.pf_num = pf->hw.pf_id;
6383         ctxt.vf_num = 0;
6384         ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6385         if (ret) {
6386                 dev_info(&pf->pdev->dev,
6387                          "couldn't get PF vsi config, err %s aq_err %s\n",
6388                          i40e_stat_str(&pf->hw, ret),
6389                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6390                 return;
6391         }
6392         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6393         ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6394         ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6395
6396         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6397         if (ret) {
6398                 dev_info(&pf->pdev->dev,
6399                          "update vsi switch failed, err %s aq_err %s\n",
6400                          i40e_stat_str(&pf->hw, ret),
6401                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6402         }
6403 }
6404
6405 /**
6406  * i40e_disable_pf_switch_lb
6407  * @pf: pointer to the PF structure
6408  *
6409  * disable switch loop back or die - no point in a return value
6410  **/
6411 static void i40e_disable_pf_switch_lb(struct i40e_pf *pf)
6412 {
6413         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
6414         struct i40e_vsi_context ctxt;
6415         int ret;
6416
6417         ctxt.seid = pf->main_vsi_seid;
6418         ctxt.pf_num = pf->hw.pf_id;
6419         ctxt.vf_num = 0;
6420         ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
6421         if (ret) {
6422                 dev_info(&pf->pdev->dev,
6423                          "couldn't get PF vsi config, err %s aq_err %s\n",
6424                          i40e_stat_str(&pf->hw, ret),
6425                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6426                 return;
6427         }
6428         ctxt.flags = I40E_AQ_VSI_TYPE_PF;
6429         ctxt.info.valid_sections = cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
6430         ctxt.info.switch_id &= ~cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
6431
6432         ret = i40e_aq_update_vsi_params(&vsi->back->hw, &ctxt, NULL);
6433         if (ret) {
6434                 dev_info(&pf->pdev->dev,
6435                          "update vsi switch failed, err %s aq_err %s\n",
6436                          i40e_stat_str(&pf->hw, ret),
6437                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6438         }
6439 }
6440
6441 /**
6442  * i40e_config_bridge_mode - Configure the HW bridge mode
6443  * @veb: pointer to the bridge instance
6444  *
6445  * Configure the loop back mode for the LAN VSI that is downlink to the
6446  * specified HW bridge instance. It is expected this function is called
6447  * when a new HW bridge is instantiated.
6448  **/
6449 static void i40e_config_bridge_mode(struct i40e_veb *veb)
6450 {
6451         struct i40e_pf *pf = veb->pf;
6452
6453         if (pf->hw.debug_mask & I40E_DEBUG_LAN)
6454                 dev_info(&pf->pdev->dev, "enabling bridge mode: %s\n",
6455                          veb->bridge_mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB");
6456         if (veb->bridge_mode & BRIDGE_MODE_VEPA)
6457                 i40e_disable_pf_switch_lb(pf);
6458         else
6459                 i40e_enable_pf_switch_lb(pf);
6460 }
6461
6462 /**
6463  * i40e_reconstitute_veb - rebuild the VEB and anything connected to it
6464  * @veb: pointer to the VEB instance
6465  *
6466  * This is a recursive function that first builds the attached VSIs then
6467  * recurses in to build the next layer of VEB.  We track the connections
6468  * through our own index numbers because the seid's from the HW could
6469  * change across the reset.
6470  **/
6471 static int i40e_reconstitute_veb(struct i40e_veb *veb)
6472 {
6473         struct i40e_vsi *ctl_vsi = NULL;
6474         struct i40e_pf *pf = veb->pf;
6475         int v, veb_idx;
6476         int ret;
6477
6478         /* build VSI that owns this VEB, temporarily attached to base VEB */
6479         for (v = 0; v < pf->num_alloc_vsi && !ctl_vsi; v++) {
6480                 if (pf->vsi[v] &&
6481                     pf->vsi[v]->veb_idx == veb->idx &&
6482                     pf->vsi[v]->flags & I40E_VSI_FLAG_VEB_OWNER) {
6483                         ctl_vsi = pf->vsi[v];
6484                         break;
6485                 }
6486         }
6487         if (!ctl_vsi) {
6488                 dev_info(&pf->pdev->dev,
6489                          "missing owner VSI for veb_idx %d\n", veb->idx);
6490                 ret = -ENOENT;
6491                 goto end_reconstitute;
6492         }
6493         if (ctl_vsi != pf->vsi[pf->lan_vsi])
6494                 ctl_vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
6495         ret = i40e_add_vsi(ctl_vsi);
6496         if (ret) {
6497                 dev_info(&pf->pdev->dev,
6498                          "rebuild of veb_idx %d owner VSI failed: %d\n",
6499                          veb->idx, ret);
6500                 goto end_reconstitute;
6501         }
6502         i40e_vsi_reset_stats(ctl_vsi);
6503
6504         /* create the VEB in the switch and move the VSI onto the VEB */
6505         ret = i40e_add_veb(veb, ctl_vsi);
6506         if (ret)
6507                 goto end_reconstitute;
6508
6509         if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
6510                 veb->bridge_mode = BRIDGE_MODE_VEB;
6511         else
6512                 veb->bridge_mode = BRIDGE_MODE_VEPA;
6513         i40e_config_bridge_mode(veb);
6514
6515         /* create the remaining VSIs attached to this VEB */
6516         for (v = 0; v < pf->num_alloc_vsi; v++) {
6517                 if (!pf->vsi[v] || pf->vsi[v] == ctl_vsi)
6518                         continue;
6519
6520                 if (pf->vsi[v]->veb_idx == veb->idx) {
6521                         struct i40e_vsi *vsi = pf->vsi[v];
6522
6523                         vsi->uplink_seid = veb->seid;
6524                         ret = i40e_add_vsi(vsi);
6525                         if (ret) {
6526                                 dev_info(&pf->pdev->dev,
6527                                          "rebuild of vsi_idx %d failed: %d\n",
6528                                          v, ret);
6529                                 goto end_reconstitute;
6530                         }
6531                         i40e_vsi_reset_stats(vsi);
6532                 }
6533         }
6534
6535         /* create any VEBs attached to this VEB - RECURSION */
6536         for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
6537                 if (pf->veb[veb_idx] && pf->veb[veb_idx]->veb_idx == veb->idx) {
6538                         pf->veb[veb_idx]->uplink_seid = veb->seid;
6539                         ret = i40e_reconstitute_veb(pf->veb[veb_idx]);
6540                         if (ret)
6541                                 break;
6542                 }
6543         }
6544
6545 end_reconstitute:
6546         return ret;
6547 }
6548
6549 /**
6550  * i40e_get_capabilities - get info about the HW
6551  * @pf: the PF struct
6552  **/
6553 static int i40e_get_capabilities(struct i40e_pf *pf)
6554 {
6555         struct i40e_aqc_list_capabilities_element_resp *cap_buf;
6556         u16 data_size;
6557         int buf_len;
6558         int err;
6559
6560         buf_len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
6561         do {
6562                 cap_buf = kzalloc(buf_len, GFP_KERNEL);
6563                 if (!cap_buf)
6564                         return -ENOMEM;
6565
6566                 /* this loads the data into the hw struct for us */
6567                 err = i40e_aq_discover_capabilities(&pf->hw, cap_buf, buf_len,
6568                                             &data_size,
6569                                             i40e_aqc_opc_list_func_capabilities,
6570                                             NULL);
6571                 /* data loaded, buffer no longer needed */
6572                 kfree(cap_buf);
6573
6574                 if (pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) {
6575                         /* retry with a larger buffer */
6576                         buf_len = data_size;
6577                 } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
6578                         dev_info(&pf->pdev->dev,
6579                                  "capability discovery failed, err %s aq_err %s\n",
6580                                  i40e_stat_str(&pf->hw, err),
6581                                  i40e_aq_str(&pf->hw,
6582                                              pf->hw.aq.asq_last_status));
6583                         return -ENODEV;
6584                 }
6585         } while (err);
6586
6587         if (pf->hw.debug_mask & I40E_DEBUG_USER)
6588                 dev_info(&pf->pdev->dev,
6589                          "pf=%d, num_vfs=%d, msix_pf=%d, msix_vf=%d, fd_g=%d, fd_b=%d, pf_max_q=%d num_vsi=%d\n",
6590                          pf->hw.pf_id, pf->hw.func_caps.num_vfs,
6591                          pf->hw.func_caps.num_msix_vectors,
6592                          pf->hw.func_caps.num_msix_vectors_vf,
6593                          pf->hw.func_caps.fd_filters_guaranteed,
6594                          pf->hw.func_caps.fd_filters_best_effort,
6595                          pf->hw.func_caps.num_tx_qp,
6596                          pf->hw.func_caps.num_vsis);
6597
6598 #define DEF_NUM_VSI (1 + (pf->hw.func_caps.fcoe ? 1 : 0) \
6599                        + pf->hw.func_caps.num_vfs)
6600         if (pf->hw.revision_id == 0 && (DEF_NUM_VSI > pf->hw.func_caps.num_vsis)) {
6601                 dev_info(&pf->pdev->dev,
6602                          "got num_vsis %d, setting num_vsis to %d\n",
6603                          pf->hw.func_caps.num_vsis, DEF_NUM_VSI);
6604                 pf->hw.func_caps.num_vsis = DEF_NUM_VSI;
6605         }
6606
6607         return 0;
6608 }
6609
6610 static int i40e_vsi_clear(struct i40e_vsi *vsi);
6611
6612 /**
6613  * i40e_fdir_sb_setup - initialize the Flow Director resources for Sideband
6614  * @pf: board private structure
6615  **/
6616 static void i40e_fdir_sb_setup(struct i40e_pf *pf)
6617 {
6618         struct i40e_vsi *vsi;
6619         int i;
6620
6621         /* quick workaround for an NVM issue that leaves a critical register
6622          * uninitialized
6623          */
6624         if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
6625                 static const u32 hkey[] = {
6626                         0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
6627                         0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
6628                         0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
6629                         0x95b3a76d};
6630
6631                 for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
6632                         wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
6633         }
6634
6635         if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
6636                 return;
6637
6638         /* find existing VSI and see if it needs configuring */
6639         vsi = NULL;
6640         for (i = 0; i < pf->num_alloc_vsi; i++) {
6641                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6642                         vsi = pf->vsi[i];
6643                         break;
6644                 }
6645         }
6646
6647         /* create a new VSI if none exists */
6648         if (!vsi) {
6649                 vsi = i40e_vsi_setup(pf, I40E_VSI_FDIR,
6650                                      pf->vsi[pf->lan_vsi]->seid, 0);
6651                 if (!vsi) {
6652                         dev_info(&pf->pdev->dev, "Couldn't create FDir VSI\n");
6653                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
6654                         return;
6655                 }
6656         }
6657
6658         i40e_vsi_setup_irqhandler(vsi, i40e_fdir_clean_ring);
6659 }
6660
6661 /**
6662  * i40e_fdir_teardown - release the Flow Director resources
6663  * @pf: board private structure
6664  **/
6665 static void i40e_fdir_teardown(struct i40e_pf *pf)
6666 {
6667         int i;
6668
6669         i40e_fdir_filter_exit(pf);
6670         for (i = 0; i < pf->num_alloc_vsi; i++) {
6671                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
6672                         i40e_vsi_release(pf->vsi[i]);
6673                         break;
6674                 }
6675         }
6676 }
6677
6678 /**
6679  * i40e_prep_for_reset - prep for the core to reset
6680  * @pf: board private structure
6681  *
6682  * Close up the VFs and other things in prep for PF Reset.
6683   **/
6684 static void i40e_prep_for_reset(struct i40e_pf *pf)
6685 {
6686         struct i40e_hw *hw = &pf->hw;
6687         i40e_status ret = 0;
6688         u32 v;
6689
6690         clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
6691         if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
6692                 return;
6693
6694         dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
6695
6696         /* quiesce the VSIs and their queues that are not already DOWN */
6697         i40e_pf_quiesce_all_vsi(pf);
6698
6699         for (v = 0; v < pf->num_alloc_vsi; v++) {
6700                 if (pf->vsi[v])
6701                         pf->vsi[v]->seid = 0;
6702         }
6703
6704         i40e_shutdown_adminq(&pf->hw);
6705
6706         /* call shutdown HMC */
6707         if (hw->hmc.hmc_obj) {
6708                 ret = i40e_shutdown_lan_hmc(hw);
6709                 if (ret)
6710                         dev_warn(&pf->pdev->dev,
6711                                  "shutdown_lan_hmc failed: %d\n", ret);
6712         }
6713 }
6714
6715 /**
6716  * i40e_send_version - update firmware with driver version
6717  * @pf: PF struct
6718  */
6719 static void i40e_send_version(struct i40e_pf *pf)
6720 {
6721         struct i40e_driver_version dv;
6722
6723         dv.major_version = DRV_VERSION_MAJOR;
6724         dv.minor_version = DRV_VERSION_MINOR;
6725         dv.build_version = DRV_VERSION_BUILD;
6726         dv.subbuild_version = 0;
6727         strlcpy(dv.driver_string, DRV_VERSION, sizeof(dv.driver_string));
6728         i40e_aq_send_driver_version(&pf->hw, &dv, NULL);
6729 }
6730
6731 /**
6732  * i40e_reset_and_rebuild - reset and rebuild using a saved config
6733  * @pf: board private structure
6734  * @reinit: if the Main VSI needs to re-initialized.
6735  **/
6736 static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
6737 {
6738         struct i40e_hw *hw = &pf->hw;
6739         u8 set_fc_aq_fail = 0;
6740         i40e_status ret;
6741         u32 val;
6742         u32 v;
6743
6744         /* Now we wait for GRST to settle out.
6745          * We don't have to delete the VEBs or VSIs from the hw switch
6746          * because the reset will make them disappear.
6747          */
6748         ret = i40e_pf_reset(hw);
6749         if (ret) {
6750                 dev_info(&pf->pdev->dev, "PF reset failed, %d\n", ret);
6751                 set_bit(__I40E_RESET_FAILED, &pf->state);
6752                 goto clear_recovery;
6753         }
6754         pf->pfr_count++;
6755
6756         if (test_bit(__I40E_DOWN, &pf->state))
6757                 goto clear_recovery;
6758         dev_dbg(&pf->pdev->dev, "Rebuilding internal switch\n");
6759
6760         /* rebuild the basics for the AdminQ, HMC, and initial HW switch */
6761         ret = i40e_init_adminq(&pf->hw);
6762         if (ret) {
6763                 dev_info(&pf->pdev->dev, "Rebuild AdminQ failed, err %s aq_err %s\n",
6764                          i40e_stat_str(&pf->hw, ret),
6765                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6766                 goto clear_recovery;
6767         }
6768
6769         /* re-verify the eeprom if we just had an EMP reset */
6770         if (test_and_clear_bit(__I40E_EMP_RESET_INTR_RECEIVED, &pf->state))
6771                 i40e_verify_eeprom(pf);
6772
6773         i40e_clear_pxe_mode(hw);
6774         ret = i40e_get_capabilities(pf);
6775         if (ret)
6776                 goto end_core_reset;
6777
6778         ret = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
6779                                 hw->func_caps.num_rx_qp,
6780                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
6781         if (ret) {
6782                 dev_info(&pf->pdev->dev, "init_lan_hmc failed: %d\n", ret);
6783                 goto end_core_reset;
6784         }
6785         ret = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
6786         if (ret) {
6787                 dev_info(&pf->pdev->dev, "configure_lan_hmc failed: %d\n", ret);
6788                 goto end_core_reset;
6789         }
6790
6791 #ifdef CONFIG_I40E_DCB
6792         ret = i40e_init_pf_dcb(pf);
6793         if (ret) {
6794                 dev_info(&pf->pdev->dev, "DCB init failed %d, disabled\n", ret);
6795                 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
6796                 /* Continue without DCB enabled */
6797         }
6798 #endif /* CONFIG_I40E_DCB */
6799 #ifdef I40E_FCOE
6800         i40e_init_pf_fcoe(pf);
6801
6802 #endif
6803         /* do basic switch setup */
6804         ret = i40e_setup_pf_switch(pf, reinit);
6805         if (ret)
6806                 goto end_core_reset;
6807
6808         /* driver is only interested in link up/down and module qualification
6809          * reports from firmware
6810          */
6811         ret = i40e_aq_set_phy_int_mask(&pf->hw,
6812                                        I40E_AQ_EVENT_LINK_UPDOWN |
6813                                        I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
6814         if (ret)
6815                 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
6816                          i40e_stat_str(&pf->hw, ret),
6817                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6818
6819         /* make sure our flow control settings are restored */
6820         ret = i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
6821         if (ret)
6822                 dev_dbg(&pf->pdev->dev, "setting flow control: ret = %s last_status = %s\n",
6823                         i40e_stat_str(&pf->hw, ret),
6824                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
6825
6826         /* Rebuild the VSIs and VEBs that existed before reset.
6827          * They are still in our local switch element arrays, so only
6828          * need to rebuild the switch model in the HW.
6829          *
6830          * If there were VEBs but the reconstitution failed, we'll try
6831          * try to recover minimal use by getting the basic PF VSI working.
6832          */
6833         if (pf->vsi[pf->lan_vsi]->uplink_seid != pf->mac_seid) {
6834                 dev_dbg(&pf->pdev->dev, "attempting to rebuild switch\n");
6835                 /* find the one VEB connected to the MAC, and find orphans */
6836                 for (v = 0; v < I40E_MAX_VEB; v++) {
6837                         if (!pf->veb[v])
6838                                 continue;
6839
6840                         if (pf->veb[v]->uplink_seid == pf->mac_seid ||
6841                             pf->veb[v]->uplink_seid == 0) {
6842                                 ret = i40e_reconstitute_veb(pf->veb[v]);
6843
6844                                 if (!ret)
6845                                         continue;
6846
6847                                 /* If Main VEB failed, we're in deep doodoo,
6848                                  * so give up rebuilding the switch and set up
6849                                  * for minimal rebuild of PF VSI.
6850                                  * If orphan failed, we'll report the error
6851                                  * but try to keep going.
6852                                  */
6853                                 if (pf->veb[v]->uplink_seid == pf->mac_seid) {
6854                                         dev_info(&pf->pdev->dev,
6855                                                  "rebuild of switch failed: %d, will try to set up simple PF connection\n",
6856                                                  ret);
6857                                         pf->vsi[pf->lan_vsi]->uplink_seid
6858                                                                 = pf->mac_seid;
6859                                         break;
6860                                 } else if (pf->veb[v]->uplink_seid == 0) {
6861                                         dev_info(&pf->pdev->dev,
6862                                                  "rebuild of orphan VEB failed: %d\n",
6863                                                  ret);
6864                                 }
6865                         }
6866                 }
6867         }
6868
6869         if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
6870                 dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
6871                 /* no VEB, so rebuild only the Main VSI */
6872                 ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
6873                 if (ret) {
6874                         dev_info(&pf->pdev->dev,
6875                                  "rebuild of Main VSI failed: %d\n", ret);
6876                         goto end_core_reset;
6877                 }
6878         }
6879
6880         /* Reconfigure hardware for allowing smaller MSS in the case
6881          * of TSO, so that we avoid the MDD being fired and causing
6882          * a reset in the case of small MSS+TSO.
6883          */
6884 #define I40E_REG_MSS          0x000E64DC
6885 #define I40E_REG_MSS_MIN_MASK 0x3FF0000
6886 #define I40E_64BYTE_MSS       0x400000
6887         val = rd32(hw, I40E_REG_MSS);
6888         if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
6889                 val &= ~I40E_REG_MSS_MIN_MASK;
6890                 val |= I40E_64BYTE_MSS;
6891                 wr32(hw, I40E_REG_MSS, val);
6892         }
6893
6894         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
6895             (pf->hw.aq.fw_maj_ver < 4)) {
6896                 msleep(75);
6897                 ret = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
6898                 if (ret)
6899                         dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
6900                                  i40e_stat_str(&pf->hw, ret),
6901                                  i40e_aq_str(&pf->hw,
6902                                              pf->hw.aq.asq_last_status));
6903         }
6904         /* reinit the misc interrupt */
6905         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
6906                 ret = i40e_setup_misc_vector(pf);
6907
6908         /* Add a filter to drop all Flow control frames from any VSI from being
6909          * transmitted. By doing so we stop a malicious VF from sending out
6910          * PAUSE or PFC frames and potentially controlling traffic for other
6911          * PF/VF VSIs.
6912          * The FW can still send Flow control frames if enabled.
6913          */
6914         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
6915                                                        pf->main_vsi_seid);
6916
6917         /* restart the VSIs that were rebuilt and running before the reset */
6918         i40e_pf_unquiesce_all_vsi(pf);
6919
6920         if (pf->num_alloc_vfs) {
6921                 for (v = 0; v < pf->num_alloc_vfs; v++)
6922                         i40e_reset_vf(&pf->vf[v], true);
6923         }
6924
6925         /* tell the firmware that we're starting */
6926         i40e_send_version(pf);
6927
6928 end_core_reset:
6929         clear_bit(__I40E_RESET_FAILED, &pf->state);
6930 clear_recovery:
6931         clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
6932 }
6933
6934 /**
6935  * i40e_handle_reset_warning - prep for the PF to reset, reset and rebuild
6936  * @pf: board private structure
6937  *
6938  * Close up the VFs and other things in prep for a Core Reset,
6939  * then get ready to rebuild the world.
6940  **/
6941 static void i40e_handle_reset_warning(struct i40e_pf *pf)
6942 {
6943         i40e_prep_for_reset(pf);
6944         i40e_reset_and_rebuild(pf, false);
6945 }
6946
6947 /**
6948  * i40e_handle_mdd_event
6949  * @pf: pointer to the PF structure
6950  *
6951  * Called from the MDD irq handler to identify possibly malicious vfs
6952  **/
6953 static void i40e_handle_mdd_event(struct i40e_pf *pf)
6954 {
6955         struct i40e_hw *hw = &pf->hw;
6956         bool mdd_detected = false;
6957         bool pf_mdd_detected = false;
6958         struct i40e_vf *vf;
6959         u32 reg;
6960         int i;
6961
6962         if (!test_bit(__I40E_MDD_EVENT_PENDING, &pf->state))
6963                 return;
6964
6965         /* find what triggered the MDD event */
6966         reg = rd32(hw, I40E_GL_MDET_TX);
6967         if (reg & I40E_GL_MDET_TX_VALID_MASK) {
6968                 u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
6969                                 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6970                 u16 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6971                                 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6972                 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6973                                 I40E_GL_MDET_TX_EVENT_SHIFT;
6974                 u16 queue = ((reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6975                                 I40E_GL_MDET_TX_QUEUE_SHIFT) -
6976                                 pf->hw.func_caps.base_queue;
6977                 if (netif_msg_tx_err(pf))
6978                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on TX queue %d PF number 0x%02x VF number 0x%02x\n",
6979                                  event, queue, pf_num, vf_num);
6980                 wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
6981                 mdd_detected = true;
6982         }
6983         reg = rd32(hw, I40E_GL_MDET_RX);
6984         if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6985                 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6986                                 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6987                 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6988                                 I40E_GL_MDET_RX_EVENT_SHIFT;
6989                 u16 queue = ((reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6990                                 I40E_GL_MDET_RX_QUEUE_SHIFT) -
6991                                 pf->hw.func_caps.base_queue;
6992                 if (netif_msg_rx_err(pf))
6993                         dev_info(&pf->pdev->dev, "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
6994                                  event, queue, func);
6995                 wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
6996                 mdd_detected = true;
6997         }
6998
6999         if (mdd_detected) {
7000                 reg = rd32(hw, I40E_PF_MDET_TX);
7001                 if (reg & I40E_PF_MDET_TX_VALID_MASK) {
7002                         wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
7003                         dev_info(&pf->pdev->dev, "TX driver issue detected, PF reset issued\n");
7004                         pf_mdd_detected = true;
7005                 }
7006                 reg = rd32(hw, I40E_PF_MDET_RX);
7007                 if (reg & I40E_PF_MDET_RX_VALID_MASK) {
7008                         wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
7009                         dev_info(&pf->pdev->dev, "RX driver issue detected, PF reset issued\n");
7010                         pf_mdd_detected = true;
7011                 }
7012                 /* Queue belongs to the PF, initiate a reset */
7013                 if (pf_mdd_detected) {
7014                         set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
7015                         i40e_service_event_schedule(pf);
7016                 }
7017         }
7018
7019         /* see if one of the VFs needs its hand slapped */
7020         for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
7021                 vf = &(pf->vf[i]);
7022                 reg = rd32(hw, I40E_VP_MDET_TX(i));
7023                 if (reg & I40E_VP_MDET_TX_VALID_MASK) {
7024                         wr32(hw, I40E_VP_MDET_TX(i), 0xFFFF);
7025                         vf->num_mdd_events++;
7026                         dev_info(&pf->pdev->dev, "TX driver issue detected on VF %d\n",
7027                                  i);
7028                 }
7029
7030                 reg = rd32(hw, I40E_VP_MDET_RX(i));
7031                 if (reg & I40E_VP_MDET_RX_VALID_MASK) {
7032                         wr32(hw, I40E_VP_MDET_RX(i), 0xFFFF);
7033                         vf->num_mdd_events++;
7034                         dev_info(&pf->pdev->dev, "RX driver issue detected on VF %d\n",
7035                                  i);
7036                 }
7037
7038                 if (vf->num_mdd_events > I40E_DEFAULT_NUM_MDD_EVENTS_ALLOWED) {
7039                         dev_info(&pf->pdev->dev,
7040                                  "Too many MDD events on VF %d, disabled\n", i);
7041                         dev_info(&pf->pdev->dev,
7042                                  "Use PF Control I/F to re-enable the VF\n");
7043                         set_bit(I40E_VF_STAT_DISABLED, &vf->vf_states);
7044                 }
7045         }
7046
7047         /* re-enable mdd interrupt cause */
7048         clear_bit(__I40E_MDD_EVENT_PENDING, &pf->state);
7049         reg = rd32(hw, I40E_PFINT_ICR0_ENA);
7050         reg |=  I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
7051         wr32(hw, I40E_PFINT_ICR0_ENA, reg);
7052         i40e_flush(hw);
7053 }
7054
7055 #ifdef CONFIG_I40E_VXLAN
7056 /**
7057  * i40e_sync_vxlan_filters_subtask - Sync the VSI filter list with HW
7058  * @pf: board private structure
7059  **/
7060 static void i40e_sync_vxlan_filters_subtask(struct i40e_pf *pf)
7061 {
7062         struct i40e_hw *hw = &pf->hw;
7063         i40e_status ret;
7064         __be16 port;
7065         int i;
7066
7067         if (!(pf->flags & I40E_FLAG_VXLAN_FILTER_SYNC))
7068                 return;
7069
7070         pf->flags &= ~I40E_FLAG_VXLAN_FILTER_SYNC;
7071
7072         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
7073                 if (pf->pending_vxlan_bitmap & BIT_ULL(i)) {
7074                         pf->pending_vxlan_bitmap &= ~BIT_ULL(i);
7075                         port = pf->vxlan_ports[i];
7076                         if (port)
7077                                 ret = i40e_aq_add_udp_tunnel(hw, ntohs(port),
7078                                                      I40E_AQC_TUNNEL_TYPE_VXLAN,
7079                                                      NULL, NULL);
7080                         else
7081                                 ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
7082
7083                         if (ret) {
7084                                 dev_info(&pf->pdev->dev,
7085                                          "%s vxlan port %d, index %d failed, err %s aq_err %s\n",
7086                                          port ? "add" : "delete",
7087                                          ntohs(port), i,
7088                                          i40e_stat_str(&pf->hw, ret),
7089                                          i40e_aq_str(&pf->hw,
7090                                                     pf->hw.aq.asq_last_status));
7091                                 pf->vxlan_ports[i] = 0;
7092                         }
7093                 }
7094         }
7095 }
7096
7097 #endif
7098 /**
7099  * i40e_service_task - Run the driver's async subtasks
7100  * @work: pointer to work_struct containing our data
7101  **/
7102 static void i40e_service_task(struct work_struct *work)
7103 {
7104         struct i40e_pf *pf = container_of(work,
7105                                           struct i40e_pf,
7106                                           service_task);
7107         unsigned long start_time = jiffies;
7108
7109         /* don't bother with service tasks if a reset is in progress */
7110         if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7111                 i40e_service_event_complete(pf);
7112                 return;
7113         }
7114
7115         i40e_detect_recover_hung(pf);
7116         i40e_reset_subtask(pf);
7117         i40e_handle_mdd_event(pf);
7118         i40e_vc_process_vflr_event(pf);
7119         i40e_watchdog_subtask(pf);
7120         i40e_fdir_reinit_subtask(pf);
7121         i40e_sync_filters_subtask(pf);
7122 #ifdef CONFIG_I40E_VXLAN
7123         i40e_sync_vxlan_filters_subtask(pf);
7124 #endif
7125         i40e_clean_adminq_subtask(pf);
7126
7127         i40e_service_event_complete(pf);
7128
7129         /* If the tasks have taken longer than one timer cycle or there
7130          * is more work to be done, reschedule the service task now
7131          * rather than wait for the timer to tick again.
7132          */
7133         if (time_after(jiffies, (start_time + pf->service_timer_period)) ||
7134             test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state)            ||
7135             test_bit(__I40E_MDD_EVENT_PENDING, &pf->state)               ||
7136             test_bit(__I40E_VFLR_EVENT_PENDING, &pf->state))
7137                 i40e_service_event_schedule(pf);
7138 }
7139
7140 /**
7141  * i40e_service_timer - timer callback
7142  * @data: pointer to PF struct
7143  **/
7144 static void i40e_service_timer(unsigned long data)
7145 {
7146         struct i40e_pf *pf = (struct i40e_pf *)data;
7147
7148         mod_timer(&pf->service_timer,
7149                   round_jiffies(jiffies + pf->service_timer_period));
7150         i40e_service_event_schedule(pf);
7151 }
7152
7153 /**
7154  * i40e_set_num_rings_in_vsi - Determine number of rings in the VSI
7155  * @vsi: the VSI being configured
7156  **/
7157 static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
7158 {
7159         struct i40e_pf *pf = vsi->back;
7160
7161         switch (vsi->type) {
7162         case I40E_VSI_MAIN:
7163                 vsi->alloc_queue_pairs = pf->num_lan_qps;
7164                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7165                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7166                 if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7167                         vsi->num_q_vectors = pf->num_lan_msix;
7168                 else
7169                         vsi->num_q_vectors = 1;
7170
7171                 break;
7172
7173         case I40E_VSI_FDIR:
7174                 vsi->alloc_queue_pairs = 1;
7175                 vsi->num_desc = ALIGN(I40E_FDIR_RING_COUNT,
7176                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7177                 vsi->num_q_vectors = 1;
7178                 break;
7179
7180         case I40E_VSI_VMDQ2:
7181                 vsi->alloc_queue_pairs = pf->num_vmdq_qps;
7182                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7183                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7184                 vsi->num_q_vectors = pf->num_vmdq_msix;
7185                 break;
7186
7187         case I40E_VSI_SRIOV:
7188                 vsi->alloc_queue_pairs = pf->num_vf_qps;
7189                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7190                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7191                 break;
7192
7193 #ifdef I40E_FCOE
7194         case I40E_VSI_FCOE:
7195                 vsi->alloc_queue_pairs = pf->num_fcoe_qps;
7196                 vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
7197                                       I40E_REQ_DESCRIPTOR_MULTIPLE);
7198                 vsi->num_q_vectors = pf->num_fcoe_msix;
7199                 break;
7200
7201 #endif /* I40E_FCOE */
7202         default:
7203                 WARN_ON(1);
7204                 return -ENODATA;
7205         }
7206
7207         return 0;
7208 }
7209
7210 /**
7211  * i40e_vsi_alloc_arrays - Allocate queue and vector pointer arrays for the vsi
7212  * @type: VSI pointer
7213  * @alloc_qvectors: a bool to specify if q_vectors need to be allocated.
7214  *
7215  * On error: returns error code (negative)
7216  * On success: returns 0
7217  **/
7218 static int i40e_vsi_alloc_arrays(struct i40e_vsi *vsi, bool alloc_qvectors)
7219 {
7220         int size;
7221         int ret = 0;
7222
7223         /* allocate memory for both Tx and Rx ring pointers */
7224         size = sizeof(struct i40e_ring *) * vsi->alloc_queue_pairs * 2;
7225         vsi->tx_rings = kzalloc(size, GFP_KERNEL);
7226         if (!vsi->tx_rings)
7227                 return -ENOMEM;
7228         vsi->rx_rings = &vsi->tx_rings[vsi->alloc_queue_pairs];
7229
7230         if (alloc_qvectors) {
7231                 /* allocate memory for q_vector pointers */
7232                 size = sizeof(struct i40e_q_vector *) * vsi->num_q_vectors;
7233                 vsi->q_vectors = kzalloc(size, GFP_KERNEL);
7234                 if (!vsi->q_vectors) {
7235                         ret = -ENOMEM;
7236                         goto err_vectors;
7237                 }
7238         }
7239         return ret;
7240
7241 err_vectors:
7242         kfree(vsi->tx_rings);
7243         return ret;
7244 }
7245
7246 /**
7247  * i40e_vsi_mem_alloc - Allocates the next available struct vsi in the PF
7248  * @pf: board private structure
7249  * @type: type of VSI
7250  *
7251  * On error: returns error code (negative)
7252  * On success: returns vsi index in PF (positive)
7253  **/
7254 static int i40e_vsi_mem_alloc(struct i40e_pf *pf, enum i40e_vsi_type type)
7255 {
7256         int ret = -ENODEV;
7257         struct i40e_vsi *vsi;
7258         int vsi_idx;
7259         int i;
7260
7261         /* Need to protect the allocation of the VSIs at the PF level */
7262         mutex_lock(&pf->switch_mutex);
7263
7264         /* VSI list may be fragmented if VSI creation/destruction has
7265          * been happening.  We can afford to do a quick scan to look
7266          * for any free VSIs in the list.
7267          *
7268          * find next empty vsi slot, looping back around if necessary
7269          */
7270         i = pf->next_vsi;
7271         while (i < pf->num_alloc_vsi && pf->vsi[i])
7272                 i++;
7273         if (i >= pf->num_alloc_vsi) {
7274                 i = 0;
7275                 while (i < pf->next_vsi && pf->vsi[i])
7276                         i++;
7277         }
7278
7279         if (i < pf->num_alloc_vsi && !pf->vsi[i]) {
7280                 vsi_idx = i;             /* Found one! */
7281         } else {
7282                 ret = -ENODEV;
7283                 goto unlock_pf;  /* out of VSI slots! */
7284         }
7285         pf->next_vsi = ++i;
7286
7287         vsi = kzalloc(sizeof(*vsi), GFP_KERNEL);
7288         if (!vsi) {
7289                 ret = -ENOMEM;
7290                 goto unlock_pf;
7291         }
7292         vsi->type = type;
7293         vsi->back = pf;
7294         set_bit(__I40E_DOWN, &vsi->state);
7295         vsi->flags = 0;
7296         vsi->idx = vsi_idx;
7297         vsi->rx_itr_setting = pf->rx_itr_default;
7298         vsi->tx_itr_setting = pf->tx_itr_default;
7299         vsi->int_rate_limit = 0;
7300         vsi->rss_table_size = (vsi->type == I40E_VSI_MAIN) ?
7301                                 pf->rss_table_size : 64;
7302         vsi->netdev_registered = false;
7303         vsi->work_limit = I40E_DEFAULT_IRQ_WORK;
7304         INIT_LIST_HEAD(&vsi->mac_filter_list);
7305         vsi->irqs_ready = false;
7306
7307         ret = i40e_set_num_rings_in_vsi(vsi);
7308         if (ret)
7309                 goto err_rings;
7310
7311         ret = i40e_vsi_alloc_arrays(vsi, true);
7312         if (ret)
7313                 goto err_rings;
7314
7315         /* Setup default MSIX irq handler for VSI */
7316         i40e_vsi_setup_irqhandler(vsi, i40e_msix_clean_rings);
7317
7318         /* Initialize VSI lock */
7319         spin_lock_init(&vsi->mac_filter_list_lock);
7320         pf->vsi[vsi_idx] = vsi;
7321         ret = vsi_idx;
7322         goto unlock_pf;
7323
7324 err_rings:
7325         pf->next_vsi = i - 1;
7326         kfree(vsi);
7327 unlock_pf:
7328         mutex_unlock(&pf->switch_mutex);
7329         return ret;
7330 }
7331
7332 /**
7333  * i40e_vsi_free_arrays - Free queue and vector pointer arrays for the VSI
7334  * @type: VSI pointer
7335  * @free_qvectors: a bool to specify if q_vectors need to be freed.
7336  *
7337  * On error: returns error code (negative)
7338  * On success: returns 0
7339  **/
7340 static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
7341 {
7342         /* free the ring and vector containers */
7343         if (free_qvectors) {
7344                 kfree(vsi->q_vectors);
7345                 vsi->q_vectors = NULL;
7346         }
7347         kfree(vsi->tx_rings);
7348         vsi->tx_rings = NULL;
7349         vsi->rx_rings = NULL;
7350 }
7351
7352 /**
7353  * i40e_vsi_clear - Deallocate the VSI provided
7354  * @vsi: the VSI being un-configured
7355  **/
7356 static int i40e_vsi_clear(struct i40e_vsi *vsi)
7357 {
7358         struct i40e_pf *pf;
7359
7360         if (!vsi)
7361                 return 0;
7362
7363         if (!vsi->back)
7364                 goto free_vsi;
7365         pf = vsi->back;
7366
7367         mutex_lock(&pf->switch_mutex);
7368         if (!pf->vsi[vsi->idx]) {
7369                 dev_err(&pf->pdev->dev, "pf->vsi[%d] is NULL, just free vsi[%d](%p,type %d)\n",
7370                         vsi->idx, vsi->idx, vsi, vsi->type);
7371                 goto unlock_vsi;
7372         }
7373
7374         if (pf->vsi[vsi->idx] != vsi) {
7375                 dev_err(&pf->pdev->dev,
7376                         "pf->vsi[%d](%p, type %d) != vsi[%d](%p,type %d): no free!\n",
7377                         pf->vsi[vsi->idx]->idx,
7378                         pf->vsi[vsi->idx],
7379                         pf->vsi[vsi->idx]->type,
7380                         vsi->idx, vsi, vsi->type);
7381                 goto unlock_vsi;
7382         }
7383
7384         /* updates the PF for this cleared vsi */
7385         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
7386         i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
7387
7388         i40e_vsi_free_arrays(vsi, true);
7389
7390         pf->vsi[vsi->idx] = NULL;
7391         if (vsi->idx < pf->next_vsi)
7392                 pf->next_vsi = vsi->idx;
7393
7394 unlock_vsi:
7395         mutex_unlock(&pf->switch_mutex);
7396 free_vsi:
7397         kfree(vsi);
7398
7399         return 0;
7400 }
7401
7402 /**
7403  * i40e_vsi_clear_rings - Deallocates the Rx and Tx rings for the provided VSI
7404  * @vsi: the VSI being cleaned
7405  **/
7406 static void i40e_vsi_clear_rings(struct i40e_vsi *vsi)
7407 {
7408         int i;
7409
7410         if (vsi->tx_rings && vsi->tx_rings[0]) {
7411                 for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7412                         kfree_rcu(vsi->tx_rings[i], rcu);
7413                         vsi->tx_rings[i] = NULL;
7414                         vsi->rx_rings[i] = NULL;
7415                 }
7416         }
7417 }
7418
7419 /**
7420  * i40e_alloc_rings - Allocates the Rx and Tx rings for the provided VSI
7421  * @vsi: the VSI being configured
7422  **/
7423 static int i40e_alloc_rings(struct i40e_vsi *vsi)
7424 {
7425         struct i40e_ring *tx_ring, *rx_ring;
7426         struct i40e_pf *pf = vsi->back;
7427         int i;
7428
7429         /* Set basic values in the rings to be used later during open() */
7430         for (i = 0; i < vsi->alloc_queue_pairs; i++) {
7431                 /* allocate space for both Tx and Rx in one shot */
7432                 tx_ring = kzalloc(sizeof(struct i40e_ring) * 2, GFP_KERNEL);
7433                 if (!tx_ring)
7434                         goto err_out;
7435
7436                 tx_ring->queue_index = i;
7437                 tx_ring->reg_idx = vsi->base_queue + i;
7438                 tx_ring->ring_active = false;
7439                 tx_ring->vsi = vsi;
7440                 tx_ring->netdev = vsi->netdev;
7441                 tx_ring->dev = &pf->pdev->dev;
7442                 tx_ring->count = vsi->num_desc;
7443                 tx_ring->size = 0;
7444                 tx_ring->dcb_tc = 0;
7445                 if (vsi->back->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
7446                         tx_ring->flags = I40E_TXR_FLAGS_WB_ON_ITR;
7447                 if (vsi->back->flags & I40E_FLAG_OUTER_UDP_CSUM_CAPABLE)
7448                         tx_ring->flags |= I40E_TXR_FLAGS_OUTER_UDP_CSUM;
7449                 vsi->tx_rings[i] = tx_ring;
7450
7451                 rx_ring = &tx_ring[1];
7452                 rx_ring->queue_index = i;
7453                 rx_ring->reg_idx = vsi->base_queue + i;
7454                 rx_ring->ring_active = false;
7455                 rx_ring->vsi = vsi;
7456                 rx_ring->netdev = vsi->netdev;
7457                 rx_ring->dev = &pf->pdev->dev;
7458                 rx_ring->count = vsi->num_desc;
7459                 rx_ring->size = 0;
7460                 rx_ring->dcb_tc = 0;
7461                 if (pf->flags & I40E_FLAG_16BYTE_RX_DESC_ENABLED)
7462                         set_ring_16byte_desc_enabled(rx_ring);
7463                 else
7464                         clear_ring_16byte_desc_enabled(rx_ring);
7465                 vsi->rx_rings[i] = rx_ring;
7466         }
7467
7468         return 0;
7469
7470 err_out:
7471         i40e_vsi_clear_rings(vsi);
7472         return -ENOMEM;
7473 }
7474
7475 /**
7476  * i40e_reserve_msix_vectors - Reserve MSI-X vectors in the kernel
7477  * @pf: board private structure
7478  * @vectors: the number of MSI-X vectors to request
7479  *
7480  * Returns the number of vectors reserved, or error
7481  **/
7482 static int i40e_reserve_msix_vectors(struct i40e_pf *pf, int vectors)
7483 {
7484         vectors = pci_enable_msix_range(pf->pdev, pf->msix_entries,
7485                                         I40E_MIN_MSIX, vectors);
7486         if (vectors < 0) {
7487                 dev_info(&pf->pdev->dev,
7488                          "MSI-X vector reservation failed: %d\n", vectors);
7489                 vectors = 0;
7490         }
7491
7492         return vectors;
7493 }
7494
7495 /**
7496  * i40e_init_msix - Setup the MSIX capability
7497  * @pf: board private structure
7498  *
7499  * Work with the OS to set up the MSIX vectors needed.
7500  *
7501  * Returns the number of vectors reserved or negative on failure
7502  **/
7503 static int i40e_init_msix(struct i40e_pf *pf)
7504 {
7505         struct i40e_hw *hw = &pf->hw;
7506         int vectors_left;
7507         int v_budget, i;
7508         int v_actual;
7509
7510         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
7511                 return -ENODEV;
7512
7513         /* The number of vectors we'll request will be comprised of:
7514          *   - Add 1 for "other" cause for Admin Queue events, etc.
7515          *   - The number of LAN queue pairs
7516          *      - Queues being used for RSS.
7517          *              We don't need as many as max_rss_size vectors.
7518          *              use rss_size instead in the calculation since that
7519          *              is governed by number of cpus in the system.
7520          *      - assumes symmetric Tx/Rx pairing
7521          *   - The number of VMDq pairs
7522 #ifdef I40E_FCOE
7523          *   - The number of FCOE qps.
7524 #endif
7525          * Once we count this up, try the request.
7526          *
7527          * If we can't get what we want, we'll simplify to nearly nothing
7528          * and try again.  If that still fails, we punt.
7529          */
7530         vectors_left = hw->func_caps.num_msix_vectors;
7531         v_budget = 0;
7532
7533         /* reserve one vector for miscellaneous handler */
7534         if (vectors_left) {
7535                 v_budget++;
7536                 vectors_left--;
7537         }
7538
7539         /* reserve vectors for the main PF traffic queues */
7540         pf->num_lan_msix = min_t(int, num_online_cpus(), vectors_left);
7541         vectors_left -= pf->num_lan_msix;
7542         v_budget += pf->num_lan_msix;
7543
7544         /* reserve one vector for sideband flow director */
7545         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
7546                 if (vectors_left) {
7547                         v_budget++;
7548                         vectors_left--;
7549                 } else {
7550                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7551                 }
7552         }
7553
7554 #ifdef I40E_FCOE
7555         /* can we reserve enough for FCoE? */
7556         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7557                 if (!vectors_left)
7558                         pf->num_fcoe_msix = 0;
7559                 else if (vectors_left >= pf->num_fcoe_qps)
7560                         pf->num_fcoe_msix = pf->num_fcoe_qps;
7561                 else
7562                         pf->num_fcoe_msix = 1;
7563                 v_budget += pf->num_fcoe_msix;
7564                 vectors_left -= pf->num_fcoe_msix;
7565         }
7566
7567 #endif
7568         /* any vectors left over go for VMDq support */
7569         if (pf->flags & I40E_FLAG_VMDQ_ENABLED) {
7570                 int vmdq_vecs_wanted = pf->num_vmdq_vsis * pf->num_vmdq_qps;
7571                 int vmdq_vecs = min_t(int, vectors_left, vmdq_vecs_wanted);
7572
7573                 /* if we're short on vectors for what's desired, we limit
7574                  * the queues per vmdq.  If this is still more than are
7575                  * available, the user will need to change the number of
7576                  * queues/vectors used by the PF later with the ethtool
7577                  * channels command
7578                  */
7579                 if (vmdq_vecs < vmdq_vecs_wanted)
7580                         pf->num_vmdq_qps = 1;
7581                 pf->num_vmdq_msix = pf->num_vmdq_qps;
7582
7583                 v_budget += vmdq_vecs;
7584                 vectors_left -= vmdq_vecs;
7585         }
7586
7587         pf->msix_entries = kcalloc(v_budget, sizeof(struct msix_entry),
7588                                    GFP_KERNEL);
7589         if (!pf->msix_entries)
7590                 return -ENOMEM;
7591
7592         for (i = 0; i < v_budget; i++)
7593                 pf->msix_entries[i].entry = i;
7594         v_actual = i40e_reserve_msix_vectors(pf, v_budget);
7595
7596         if (v_actual != v_budget) {
7597                 /* If we have limited resources, we will start with no vectors
7598                  * for the special features and then allocate vectors to some
7599                  * of these features based on the policy and at the end disable
7600                  * the features that did not get any vectors.
7601                  */
7602 #ifdef I40E_FCOE
7603                 pf->num_fcoe_qps = 0;
7604                 pf->num_fcoe_msix = 0;
7605 #endif
7606                 pf->num_vmdq_msix = 0;
7607         }
7608
7609         if (v_actual < I40E_MIN_MSIX) {
7610                 pf->flags &= ~I40E_FLAG_MSIX_ENABLED;
7611                 kfree(pf->msix_entries);
7612                 pf->msix_entries = NULL;
7613                 return -ENODEV;
7614
7615         } else if (v_actual == I40E_MIN_MSIX) {
7616                 /* Adjust for minimal MSIX use */
7617                 pf->num_vmdq_vsis = 0;
7618                 pf->num_vmdq_qps = 0;
7619                 pf->num_lan_qps = 1;
7620                 pf->num_lan_msix = 1;
7621
7622         } else if (v_actual != v_budget) {
7623                 int vec;
7624
7625                 /* reserve the misc vector */
7626                 vec = v_actual - 1;
7627
7628                 /* Scale vector usage down */
7629                 pf->num_vmdq_msix = 1;    /* force VMDqs to only one vector */
7630                 pf->num_vmdq_vsis = 1;
7631                 pf->num_vmdq_qps = 1;
7632                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
7633
7634                 /* partition out the remaining vectors */
7635                 switch (vec) {
7636                 case 2:
7637                         pf->num_lan_msix = 1;
7638                         break;
7639                 case 3:
7640 #ifdef I40E_FCOE
7641                         /* give one vector to FCoE */
7642                         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7643                                 pf->num_lan_msix = 1;
7644                                 pf->num_fcoe_msix = 1;
7645                         }
7646 #else
7647                         pf->num_lan_msix = 2;
7648 #endif
7649                         break;
7650                 default:
7651 #ifdef I40E_FCOE
7652                         /* give one vector to FCoE */
7653                         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
7654                                 pf->num_fcoe_msix = 1;
7655                                 vec--;
7656                         }
7657 #endif
7658                         /* give the rest to the PF */
7659                         pf->num_lan_msix = min_t(int, vec, pf->num_lan_qps);
7660                         break;
7661                 }
7662         }
7663
7664         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
7665             (pf->num_vmdq_msix == 0)) {
7666                 dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
7667                 pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
7668         }
7669 #ifdef I40E_FCOE
7670
7671         if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
7672                 dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
7673                 pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
7674         }
7675 #endif
7676         return v_actual;
7677 }
7678
7679 /**
7680  * i40e_vsi_alloc_q_vector - Allocate memory for a single interrupt vector
7681  * @vsi: the VSI being configured
7682  * @v_idx: index of the vector in the vsi struct
7683  *
7684  * We allocate one q_vector.  If allocation fails we return -ENOMEM.
7685  **/
7686 static int i40e_vsi_alloc_q_vector(struct i40e_vsi *vsi, int v_idx)
7687 {
7688         struct i40e_q_vector *q_vector;
7689
7690         /* allocate q_vector */
7691         q_vector = kzalloc(sizeof(struct i40e_q_vector), GFP_KERNEL);
7692         if (!q_vector)
7693                 return -ENOMEM;
7694
7695         q_vector->vsi = vsi;
7696         q_vector->v_idx = v_idx;
7697         cpumask_set_cpu(v_idx, &q_vector->affinity_mask);
7698         if (vsi->netdev)
7699                 netif_napi_add(vsi->netdev, &q_vector->napi,
7700                                i40e_napi_poll, NAPI_POLL_WEIGHT);
7701
7702         q_vector->rx.latency_range = I40E_LOW_LATENCY;
7703         q_vector->tx.latency_range = I40E_LOW_LATENCY;
7704
7705         /* tie q_vector and vsi together */
7706         vsi->q_vectors[v_idx] = q_vector;
7707
7708         return 0;
7709 }
7710
7711 /**
7712  * i40e_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
7713  * @vsi: the VSI being configured
7714  *
7715  * We allocate one q_vector per queue interrupt.  If allocation fails we
7716  * return -ENOMEM.
7717  **/
7718 static int i40e_vsi_alloc_q_vectors(struct i40e_vsi *vsi)
7719 {
7720         struct i40e_pf *pf = vsi->back;
7721         int v_idx, num_q_vectors;
7722         int err;
7723
7724         /* if not MSIX, give the one vector only to the LAN VSI */
7725         if (pf->flags & I40E_FLAG_MSIX_ENABLED)
7726                 num_q_vectors = vsi->num_q_vectors;
7727         else if (vsi == pf->vsi[pf->lan_vsi])
7728                 num_q_vectors = 1;
7729         else
7730                 return -EINVAL;
7731
7732         for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
7733                 err = i40e_vsi_alloc_q_vector(vsi, v_idx);
7734                 if (err)
7735                         goto err_out;
7736         }
7737
7738         return 0;
7739
7740 err_out:
7741         while (v_idx--)
7742                 i40e_free_q_vector(vsi, v_idx);
7743
7744         return err;
7745 }
7746
7747 /**
7748  * i40e_init_interrupt_scheme - Determine proper interrupt scheme
7749  * @pf: board private structure to initialize
7750  **/
7751 static int i40e_init_interrupt_scheme(struct i40e_pf *pf)
7752 {
7753         int vectors = 0;
7754         ssize_t size;
7755
7756         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
7757                 vectors = i40e_init_msix(pf);
7758                 if (vectors < 0) {
7759                         pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
7760 #ifdef I40E_FCOE
7761                                        I40E_FLAG_FCOE_ENABLED   |
7762 #endif
7763                                        I40E_FLAG_RSS_ENABLED    |
7764                                        I40E_FLAG_DCB_CAPABLE    |
7765                                        I40E_FLAG_SRIOV_ENABLED  |
7766                                        I40E_FLAG_FD_SB_ENABLED  |
7767                                        I40E_FLAG_FD_ATR_ENABLED |
7768                                        I40E_FLAG_VMDQ_ENABLED);
7769
7770                         /* rework the queue expectations without MSIX */
7771                         i40e_determine_queue_usage(pf);
7772                 }
7773         }
7774
7775         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED) &&
7776             (pf->flags & I40E_FLAG_MSI_ENABLED)) {
7777                 dev_info(&pf->pdev->dev, "MSI-X not available, trying MSI\n");
7778                 vectors = pci_enable_msi(pf->pdev);
7779                 if (vectors < 0) {
7780                         dev_info(&pf->pdev->dev, "MSI init failed - %d\n",
7781                                  vectors);
7782                         pf->flags &= ~I40E_FLAG_MSI_ENABLED;
7783                 }
7784                 vectors = 1;  /* one MSI or Legacy vector */
7785         }
7786
7787         if (!(pf->flags & (I40E_FLAG_MSIX_ENABLED | I40E_FLAG_MSI_ENABLED)))
7788                 dev_info(&pf->pdev->dev, "MSI-X and MSI not available, falling back to Legacy IRQ\n");
7789
7790         /* set up vector assignment tracking */
7791         size = sizeof(struct i40e_lump_tracking) + (sizeof(u16) * vectors);
7792         pf->irq_pile = kzalloc(size, GFP_KERNEL);
7793         if (!pf->irq_pile) {
7794                 dev_err(&pf->pdev->dev, "error allocating irq_pile memory\n");
7795                 return -ENOMEM;
7796         }
7797         pf->irq_pile->num_entries = vectors;
7798         pf->irq_pile->search_hint = 0;
7799
7800         /* track first vector for misc interrupts, ignore return */
7801         (void)i40e_get_lump(pf, pf->irq_pile, 1, I40E_PILE_VALID_BIT - 1);
7802
7803         return 0;
7804 }
7805
7806 /**
7807  * i40e_setup_misc_vector - Setup the misc vector to handle non queue events
7808  * @pf: board private structure
7809  *
7810  * This sets up the handler for MSIX 0, which is used to manage the
7811  * non-queue interrupts, e.g. AdminQ and errors.  This is not used
7812  * when in MSI or Legacy interrupt mode.
7813  **/
7814 static int i40e_setup_misc_vector(struct i40e_pf *pf)
7815 {
7816         struct i40e_hw *hw = &pf->hw;
7817         int err = 0;
7818
7819         /* Only request the irq if this is the first time through, and
7820          * not when we're rebuilding after a Reset
7821          */
7822         if (!test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
7823                 err = request_irq(pf->msix_entries[0].vector,
7824                                   i40e_intr, 0, pf->int_name, pf);
7825                 if (err) {
7826                         dev_info(&pf->pdev->dev,
7827                                  "request_irq for %s failed: %d\n",
7828                                  pf->int_name, err);
7829                         return -EFAULT;
7830                 }
7831         }
7832
7833         i40e_enable_misc_int_causes(pf);
7834
7835         /* associate no queues to the misc vector */
7836         wr32(hw, I40E_PFINT_LNKLST0, I40E_QUEUE_END_OF_LIST);
7837         wr32(hw, I40E_PFINT_ITR0(I40E_RX_ITR), I40E_ITR_8K);
7838
7839         i40e_flush(hw);
7840
7841         i40e_irq_dynamic_enable_icr0(pf);
7842
7843         return err;
7844 }
7845
7846 /**
7847  * i40e_config_rss_aq - Prepare for RSS using AQ commands
7848  * @vsi: vsi structure
7849  * @seed: RSS hash seed
7850  **/
7851 static int i40e_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed)
7852 {
7853         struct i40e_aqc_get_set_rss_key_data rss_key;
7854         struct i40e_pf *pf = vsi->back;
7855         struct i40e_hw *hw = &pf->hw;
7856         bool pf_lut = false;
7857         u8 *rss_lut;
7858         int ret, i;
7859
7860         memset(&rss_key, 0, sizeof(rss_key));
7861         memcpy(&rss_key, seed, sizeof(rss_key));
7862
7863         rss_lut = kzalloc(pf->rss_table_size, GFP_KERNEL);
7864         if (!rss_lut)
7865                 return -ENOMEM;
7866
7867         /* Populate the LUT with max no. of queues in round robin fashion */
7868         for (i = 0; i < vsi->rss_table_size; i++)
7869                 rss_lut[i] = i % vsi->rss_size;
7870
7871         ret = i40e_aq_set_rss_key(hw, vsi->id, &rss_key);
7872         if (ret) {
7873                 dev_info(&pf->pdev->dev,
7874                          "Cannot set RSS key, err %s aq_err %s\n",
7875                          i40e_stat_str(&pf->hw, ret),
7876                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7877                 goto config_rss_aq_out;
7878         }
7879
7880         if (vsi->type == I40E_VSI_MAIN)
7881                 pf_lut = true;
7882
7883         ret = i40e_aq_set_rss_lut(hw, vsi->id, pf_lut, rss_lut,
7884                                   vsi->rss_table_size);
7885         if (ret)
7886                 dev_info(&pf->pdev->dev,
7887                          "Cannot set RSS lut, err %s aq_err %s\n",
7888                          i40e_stat_str(&pf->hw, ret),
7889                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
7890
7891 config_rss_aq_out:
7892         kfree(rss_lut);
7893         return ret;
7894 }
7895
7896 /**
7897  * i40e_vsi_config_rss - Prepare for VSI(VMDq) RSS if used
7898  * @vsi: VSI structure
7899  **/
7900 static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
7901 {
7902         u8 seed[I40E_HKEY_ARRAY_SIZE];
7903         struct i40e_pf *pf = vsi->back;
7904
7905         netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7906         vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7907
7908         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
7909                 return i40e_config_rss_aq(vsi, seed);
7910
7911         return 0;
7912 }
7913
7914 /**
7915  * i40e_config_rss_reg - Prepare for RSS if used
7916  * @pf: board private structure
7917  * @seed: RSS hash seed
7918  **/
7919 static int i40e_config_rss_reg(struct i40e_pf *pf, const u8 *seed)
7920 {
7921         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7922         struct i40e_hw *hw = &pf->hw;
7923         u32 *seed_dw = (u32 *)seed;
7924         u32 current_queue = 0;
7925         u32 lut = 0;
7926         int i, j;
7927
7928         /* Fill out hash function seed */
7929         for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
7930                 wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
7931
7932         for (i = 0; i <= I40E_PFQF_HLUT_MAX_INDEX; i++) {
7933                 lut = 0;
7934                 for (j = 0; j < 4; j++) {
7935                         if (current_queue == vsi->rss_size)
7936                                 current_queue = 0;
7937                         lut |= ((current_queue) << (8 * j));
7938                         current_queue++;
7939                 }
7940                 wr32(&pf->hw, I40E_PFQF_HLUT(i), lut);
7941         }
7942         i40e_flush(hw);
7943
7944         return 0;
7945 }
7946
7947 /**
7948  * i40e_config_rss - Prepare for RSS if used
7949  * @pf: board private structure
7950  **/
7951 static int i40e_config_rss(struct i40e_pf *pf)
7952 {
7953         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7954         u8 seed[I40E_HKEY_ARRAY_SIZE];
7955         struct i40e_hw *hw = &pf->hw;
7956         u32 reg_val;
7957         u64 hena;
7958
7959         netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
7960
7961         /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
7962         hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) |
7963                 ((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32);
7964         hena |= i40e_pf_get_default_rss_hena(pf);
7965
7966         wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
7967         wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
7968
7969         vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
7970
7971         /* Determine the RSS table size based on the hardware capabilities */
7972         reg_val = rd32(hw, I40E_PFQF_CTL_0);
7973         reg_val = (pf->rss_table_size == 512) ?
7974                         (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
7975                         (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
7976         wr32(hw, I40E_PFQF_CTL_0, reg_val);
7977
7978         if (pf->flags & I40E_FLAG_RSS_AQ_CAPABLE)
7979                 return i40e_config_rss_aq(pf->vsi[pf->lan_vsi], seed);
7980         else
7981                 return i40e_config_rss_reg(pf, seed);
7982 }
7983
7984 /**
7985  * i40e_reconfig_rss_queues - change number of queues for rss and rebuild
7986  * @pf: board private structure
7987  * @queue_count: the requested queue count for rss.
7988  *
7989  * returns 0 if rss is not enabled, if enabled returns the final rss queue
7990  * count which may be different from the requested queue count.
7991  **/
7992 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
7993 {
7994         struct i40e_vsi *vsi = pf->vsi[pf->lan_vsi];
7995         int new_rss_size;
7996
7997         if (!(pf->flags & I40E_FLAG_RSS_ENABLED))
7998                 return 0;
7999
8000         new_rss_size = min_t(int, queue_count, pf->rss_size_max);
8001
8002         if (queue_count != vsi->num_queue_pairs) {
8003                 vsi->req_queue_pairs = queue_count;
8004                 i40e_prep_for_reset(pf);
8005
8006                 pf->rss_size = new_rss_size;
8007
8008                 i40e_reset_and_rebuild(pf, true);
8009                 i40e_config_rss(pf);
8010         }
8011         dev_info(&pf->pdev->dev, "RSS count:  %d\n", pf->rss_size);
8012         return pf->rss_size;
8013 }
8014
8015 /**
8016  * i40e_get_npar_bw_setting - Retrieve BW settings for this PF partition
8017  * @pf: board private structure
8018  **/
8019 i40e_status i40e_get_npar_bw_setting(struct i40e_pf *pf)
8020 {
8021         i40e_status status;
8022         bool min_valid, max_valid;
8023         u32 max_bw, min_bw;
8024
8025         status = i40e_read_bw_from_alt_ram(&pf->hw, &max_bw, &min_bw,
8026                                            &min_valid, &max_valid);
8027
8028         if (!status) {
8029                 if (min_valid)
8030                         pf->npar_min_bw = min_bw;
8031                 if (max_valid)
8032                         pf->npar_max_bw = max_bw;
8033         }
8034
8035         return status;
8036 }
8037
8038 /**
8039  * i40e_set_npar_bw_setting - Set BW settings for this PF partition
8040  * @pf: board private structure
8041  **/
8042 i40e_status i40e_set_npar_bw_setting(struct i40e_pf *pf)
8043 {
8044         struct i40e_aqc_configure_partition_bw_data bw_data;
8045         i40e_status status;
8046
8047         /* Set the valid bit for this PF */
8048         bw_data.pf_valid_bits = cpu_to_le16(BIT(pf->hw.pf_id));
8049         bw_data.max_bw[pf->hw.pf_id] = pf->npar_max_bw & I40E_ALT_BW_VALUE_MASK;
8050         bw_data.min_bw[pf->hw.pf_id] = pf->npar_min_bw & I40E_ALT_BW_VALUE_MASK;
8051
8052         /* Set the new bandwidths */
8053         status = i40e_aq_configure_partition_bw(&pf->hw, &bw_data, NULL);
8054
8055         return status;
8056 }
8057
8058 /**
8059  * i40e_commit_npar_bw_setting - Commit BW settings for this PF partition
8060  * @pf: board private structure
8061  **/
8062 i40e_status i40e_commit_npar_bw_setting(struct i40e_pf *pf)
8063 {
8064         /* Commit temporary BW setting to permanent NVM image */
8065         enum i40e_admin_queue_err last_aq_status;
8066         i40e_status ret;
8067         u16 nvm_word;
8068
8069         if (pf->hw.partition_id != 1) {
8070                 dev_info(&pf->pdev->dev,
8071                          "Commit BW only works on partition 1! This is partition %d",
8072                          pf->hw.partition_id);
8073                 ret = I40E_NOT_SUPPORTED;
8074                 goto bw_commit_out;
8075         }
8076
8077         /* Acquire NVM for read access */
8078         ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_READ);
8079         last_aq_status = pf->hw.aq.asq_last_status;
8080         if (ret) {
8081                 dev_info(&pf->pdev->dev,
8082                          "Cannot acquire NVM for read access, err %s aq_err %s\n",
8083                          i40e_stat_str(&pf->hw, ret),
8084                          i40e_aq_str(&pf->hw, last_aq_status));
8085                 goto bw_commit_out;
8086         }
8087
8088         /* Read word 0x10 of NVM - SW compatibility word 1 */
8089         ret = i40e_aq_read_nvm(&pf->hw,
8090                                I40E_SR_NVM_CONTROL_WORD,
8091                                0x10, sizeof(nvm_word), &nvm_word,
8092                                false, NULL);
8093         /* Save off last admin queue command status before releasing
8094          * the NVM
8095          */
8096         last_aq_status = pf->hw.aq.asq_last_status;
8097         i40e_release_nvm(&pf->hw);
8098         if (ret) {
8099                 dev_info(&pf->pdev->dev, "NVM read error, err %s aq_err %s\n",
8100                          i40e_stat_str(&pf->hw, ret),
8101                          i40e_aq_str(&pf->hw, last_aq_status));
8102                 goto bw_commit_out;
8103         }
8104
8105         /* Wait a bit for NVM release to complete */
8106         msleep(50);
8107
8108         /* Acquire NVM for write access */
8109         ret = i40e_acquire_nvm(&pf->hw, I40E_RESOURCE_WRITE);
8110         last_aq_status = pf->hw.aq.asq_last_status;
8111         if (ret) {
8112                 dev_info(&pf->pdev->dev,
8113                          "Cannot acquire NVM for write access, err %s aq_err %s\n",
8114                          i40e_stat_str(&pf->hw, ret),
8115                          i40e_aq_str(&pf->hw, last_aq_status));
8116                 goto bw_commit_out;
8117         }
8118         /* Write it back out unchanged to initiate update NVM,
8119          * which will force a write of the shadow (alt) RAM to
8120          * the NVM - thus storing the bandwidth values permanently.
8121          */
8122         ret = i40e_aq_update_nvm(&pf->hw,
8123                                  I40E_SR_NVM_CONTROL_WORD,
8124                                  0x10, sizeof(nvm_word),
8125                                  &nvm_word, true, NULL);
8126         /* Save off last admin queue command status before releasing
8127          * the NVM
8128          */
8129         last_aq_status = pf->hw.aq.asq_last_status;
8130         i40e_release_nvm(&pf->hw);
8131         if (ret)
8132                 dev_info(&pf->pdev->dev,
8133                          "BW settings NOT SAVED, err %s aq_err %s\n",
8134                          i40e_stat_str(&pf->hw, ret),
8135                          i40e_aq_str(&pf->hw, last_aq_status));
8136 bw_commit_out:
8137
8138         return ret;
8139 }
8140
8141 /**
8142  * i40e_sw_init - Initialize general software structures (struct i40e_pf)
8143  * @pf: board private structure to initialize
8144  *
8145  * i40e_sw_init initializes the Adapter private data structure.
8146  * Fields are initialized based on PCI device information and
8147  * OS network device settings (MTU size).
8148  **/
8149 static int i40e_sw_init(struct i40e_pf *pf)
8150 {
8151         int err = 0;
8152         int size;
8153         u16 pow;
8154
8155         pf->msg_enable = netif_msg_init(I40E_DEFAULT_MSG_ENABLE,
8156                                 (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK));
8157         pf->hw.debug_mask = pf->msg_enable | I40E_DEBUG_DIAG;
8158         if (debug != -1 && debug != I40E_DEFAULT_MSG_ENABLE) {
8159                 if (I40E_DEBUG_USER & debug)
8160                         pf->hw.debug_mask = debug;
8161                 pf->msg_enable = netif_msg_init((debug & ~I40E_DEBUG_USER),
8162                                                 I40E_DEFAULT_MSG_ENABLE);
8163         }
8164
8165         /* Set default capability flags */
8166         pf->flags = I40E_FLAG_RX_CSUM_ENABLED |
8167                     I40E_FLAG_MSI_ENABLED     |
8168                     I40E_FLAG_LINK_POLLING_ENABLED |
8169                     I40E_FLAG_MSIX_ENABLED;
8170
8171         if (iommu_present(&pci_bus_type))
8172                 pf->flags |= I40E_FLAG_RX_PS_ENABLED;
8173         else
8174                 pf->flags |= I40E_FLAG_RX_1BUF_ENABLED;
8175
8176         /* Set default ITR */
8177         pf->rx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_RX_DEF;
8178         pf->tx_itr_default = I40E_ITR_DYNAMIC | I40E_ITR_TX_DEF;
8179
8180         /* Depending on PF configurations, it is possible that the RSS
8181          * maximum might end up larger than the available queues
8182          */
8183         pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
8184         pf->rss_size = 1;
8185         pf->rss_table_size = pf->hw.func_caps.rss_table_size;
8186         pf->rss_size_max = min_t(int, pf->rss_size_max,
8187                                  pf->hw.func_caps.num_tx_qp);
8188
8189         /* find the next higher power-of-2 of num cpus */
8190         pow = roundup_pow_of_two(num_online_cpus());
8191         pf->rss_size_max = min_t(int, pf->rss_size_max, pow);
8192
8193         if (pf->hw.func_caps.rss) {
8194                 pf->flags |= I40E_FLAG_RSS_ENABLED;
8195                 pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
8196         }
8197
8198         /* MFP mode enabled */
8199         if (pf->hw.func_caps.npar_enable || pf->hw.func_caps.flex10_enable) {
8200                 pf->flags |= I40E_FLAG_MFP_ENABLED;
8201                 dev_info(&pf->pdev->dev, "MFP mode Enabled\n");
8202                 if (i40e_get_npar_bw_setting(pf))
8203                         dev_warn(&pf->pdev->dev,
8204                                  "Could not get NPAR bw settings\n");
8205                 else
8206                         dev_info(&pf->pdev->dev,
8207                                  "Min BW = %8.8x, Max BW = %8.8x\n",
8208                                  pf->npar_min_bw, pf->npar_max_bw);
8209         }
8210
8211         /* FW/NVM is not yet fixed in this regard */
8212         if ((pf->hw.func_caps.fd_filters_guaranteed > 0) ||
8213             (pf->hw.func_caps.fd_filters_best_effort > 0)) {
8214                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8215                 pf->atr_sample_rate = I40E_DEFAULT_ATR_SAMPLE_RATE;
8216                 if (pf->flags & I40E_FLAG_MFP_ENABLED &&
8217                     pf->hw.num_partitions > 1)
8218                         dev_info(&pf->pdev->dev,
8219                                  "Flow Director Sideband mode Disabled in MFP mode\n");
8220                 else
8221                         pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8222                 pf->fdir_pf_filter_count =
8223                                  pf->hw.func_caps.fd_filters_guaranteed;
8224                 pf->hw.fdir_shared_filter_count =
8225                                  pf->hw.func_caps.fd_filters_best_effort;
8226         }
8227
8228         if (pf->hw.func_caps.vmdq) {
8229                 pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
8230                 pf->flags |= I40E_FLAG_VMDQ_ENABLED;
8231                 pf->num_vmdq_qps = i40e_default_queues_per_vmdq(pf);
8232         }
8233
8234 #ifdef I40E_FCOE
8235         i40e_init_pf_fcoe(pf);
8236
8237 #endif /* I40E_FCOE */
8238 #ifdef CONFIG_PCI_IOV
8239         if (pf->hw.func_caps.num_vfs && pf->hw.partition_id == 1) {
8240                 pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
8241                 pf->flags |= I40E_FLAG_SRIOV_ENABLED;
8242                 pf->num_req_vfs = min_t(int,
8243                                         pf->hw.func_caps.num_vfs,
8244                                         I40E_MAX_VF_COUNT);
8245         }
8246 #endif /* CONFIG_PCI_IOV */
8247         if (pf->hw.mac.type == I40E_MAC_X722) {
8248                 pf->flags |= I40E_FLAG_RSS_AQ_CAPABLE |
8249                              I40E_FLAG_128_QP_RSS_CAPABLE |
8250                              I40E_FLAG_HW_ATR_EVICT_CAPABLE |
8251                              I40E_FLAG_OUTER_UDP_CSUM_CAPABLE |
8252                              I40E_FLAG_WB_ON_ITR_CAPABLE |
8253                              I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE;
8254         }
8255         pf->eeprom_version = 0xDEAD;
8256         pf->lan_veb = I40E_NO_VEB;
8257         pf->lan_vsi = I40E_NO_VSI;
8258
8259         /* By default FW has this off for performance reasons */
8260         pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
8261
8262         /* set up queue assignment tracking */
8263         size = sizeof(struct i40e_lump_tracking)
8264                 + (sizeof(u16) * pf->hw.func_caps.num_tx_qp);
8265         pf->qp_pile = kzalloc(size, GFP_KERNEL);
8266         if (!pf->qp_pile) {
8267                 err = -ENOMEM;
8268                 goto sw_init_done;
8269         }
8270         pf->qp_pile->num_entries = pf->hw.func_caps.num_tx_qp;
8271         pf->qp_pile->search_hint = 0;
8272
8273         pf->tx_timeout_recovery_level = 1;
8274
8275         mutex_init(&pf->switch_mutex);
8276
8277         /* If NPAR is enabled nudge the Tx scheduler */
8278         if (pf->hw.func_caps.npar_enable && (!i40e_get_npar_bw_setting(pf)))
8279                 i40e_set_npar_bw_setting(pf);
8280
8281 sw_init_done:
8282         return err;
8283 }
8284
8285 /**
8286  * i40e_set_ntuple - set the ntuple feature flag and take action
8287  * @pf: board private structure to initialize
8288  * @features: the feature set that the stack is suggesting
8289  *
8290  * returns a bool to indicate if reset needs to happen
8291  **/
8292 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
8293 {
8294         bool need_reset = false;
8295
8296         /* Check if Flow Director n-tuple support was enabled or disabled.  If
8297          * the state changed, we need to reset.
8298          */
8299         if (features & NETIF_F_NTUPLE) {
8300                 /* Enable filters and mark for reset */
8301                 if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
8302                         need_reset = true;
8303                 pf->flags |= I40E_FLAG_FD_SB_ENABLED;
8304         } else {
8305                 /* turn off filters, mark for reset and clear SW filter list */
8306                 if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
8307                         need_reset = true;
8308                         i40e_fdir_filter_exit(pf);
8309                 }
8310                 pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
8311                 pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
8312                 /* reset fd counters */
8313                 pf->fd_add_err = pf->fd_atr_cnt = pf->fd_tcp_rule = 0;
8314                 pf->fdir_pf_active_filters = 0;
8315                 pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
8316                 if (I40E_DEBUG_FD & pf->hw.debug_mask)
8317                         dev_info(&pf->pdev->dev, "ATR re-enabled.\n");
8318                 /* if ATR was auto disabled it can be re-enabled. */
8319                 if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
8320                     (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
8321                         pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
8322         }
8323         return need_reset;
8324 }
8325
8326 /**
8327  * i40e_set_features - set the netdev feature flags
8328  * @netdev: ptr to the netdev being adjusted
8329  * @features: the feature set that the stack is suggesting
8330  **/
8331 static int i40e_set_features(struct net_device *netdev,
8332                              netdev_features_t features)
8333 {
8334         struct i40e_netdev_priv *np = netdev_priv(netdev);
8335         struct i40e_vsi *vsi = np->vsi;
8336         struct i40e_pf *pf = vsi->back;
8337         bool need_reset;
8338
8339         if (features & NETIF_F_HW_VLAN_CTAG_RX)
8340                 i40e_vlan_stripping_enable(vsi);
8341         else
8342                 i40e_vlan_stripping_disable(vsi);
8343
8344         need_reset = i40e_set_ntuple(pf, features);
8345
8346         if (need_reset)
8347                 i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8348
8349         return 0;
8350 }
8351
8352 #ifdef CONFIG_I40E_VXLAN
8353 /**
8354  * i40e_get_vxlan_port_idx - Lookup a possibly offloaded for Rx UDP port
8355  * @pf: board private structure
8356  * @port: The UDP port to look up
8357  *
8358  * Returns the index number or I40E_MAX_PF_UDP_OFFLOAD_PORTS if port not found
8359  **/
8360 static u8 i40e_get_vxlan_port_idx(struct i40e_pf *pf, __be16 port)
8361 {
8362         u8 i;
8363
8364         for (i = 0; i < I40E_MAX_PF_UDP_OFFLOAD_PORTS; i++) {
8365                 if (pf->vxlan_ports[i] == port)
8366                         return i;
8367         }
8368
8369         return i;
8370 }
8371
8372 /**
8373  * i40e_add_vxlan_port - Get notifications about VXLAN ports that come up
8374  * @netdev: This physical port's netdev
8375  * @sa_family: Socket Family that VXLAN is notifying us about
8376  * @port: New UDP port number that VXLAN started listening to
8377  **/
8378 static void i40e_add_vxlan_port(struct net_device *netdev,
8379                                 sa_family_t sa_family, __be16 port)
8380 {
8381         struct i40e_netdev_priv *np = netdev_priv(netdev);
8382         struct i40e_vsi *vsi = np->vsi;
8383         struct i40e_pf *pf = vsi->back;
8384         u8 next_idx;
8385         u8 idx;
8386
8387         if (sa_family == AF_INET6)
8388                 return;
8389
8390         idx = i40e_get_vxlan_port_idx(pf, port);
8391
8392         /* Check if port already exists */
8393         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8394                 netdev_info(netdev, "vxlan port %d already offloaded\n",
8395                             ntohs(port));
8396                 return;
8397         }
8398
8399         /* Now check if there is space to add the new port */
8400         next_idx = i40e_get_vxlan_port_idx(pf, 0);
8401
8402         if (next_idx == I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8403                 netdev_info(netdev, "maximum number of vxlan UDP ports reached, not adding port %d\n",
8404                             ntohs(port));
8405                 return;
8406         }
8407
8408         /* New port: add it and mark its index in the bitmap */
8409         pf->vxlan_ports[next_idx] = port;
8410         pf->pending_vxlan_bitmap |= BIT_ULL(next_idx);
8411         pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
8412 }
8413
8414 /**
8415  * i40e_del_vxlan_port - Get notifications about VXLAN ports that go away
8416  * @netdev: This physical port's netdev
8417  * @sa_family: Socket Family that VXLAN is notifying us about
8418  * @port: UDP port number that VXLAN stopped listening to
8419  **/
8420 static void i40e_del_vxlan_port(struct net_device *netdev,
8421                                 sa_family_t sa_family, __be16 port)
8422 {
8423         struct i40e_netdev_priv *np = netdev_priv(netdev);
8424         struct i40e_vsi *vsi = np->vsi;
8425         struct i40e_pf *pf = vsi->back;
8426         u8 idx;
8427
8428         if (sa_family == AF_INET6)
8429                 return;
8430
8431         idx = i40e_get_vxlan_port_idx(pf, port);
8432
8433         /* Check if port already exists */
8434         if (idx < I40E_MAX_PF_UDP_OFFLOAD_PORTS) {
8435                 /* if port exists, set it to 0 (mark for deletion)
8436                  * and make it pending
8437                  */
8438                 pf->vxlan_ports[idx] = 0;
8439                 pf->pending_vxlan_bitmap |= BIT_ULL(idx);
8440                 pf->flags |= I40E_FLAG_VXLAN_FILTER_SYNC;
8441         } else {
8442                 netdev_warn(netdev, "vxlan port %d was not found, not deleting\n",
8443                             ntohs(port));
8444         }
8445 }
8446
8447 #endif
8448 static int i40e_get_phys_port_id(struct net_device *netdev,
8449                                  struct netdev_phys_item_id *ppid)
8450 {
8451         struct i40e_netdev_priv *np = netdev_priv(netdev);
8452         struct i40e_pf *pf = np->vsi->back;
8453         struct i40e_hw *hw = &pf->hw;
8454
8455         if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
8456                 return -EOPNOTSUPP;
8457
8458         ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
8459         memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
8460
8461         return 0;
8462 }
8463
8464 /**
8465  * i40e_ndo_fdb_add - add an entry to the hardware database
8466  * @ndm: the input from the stack
8467  * @tb: pointer to array of nladdr (unused)
8468  * @dev: the net device pointer
8469  * @addr: the MAC address entry being added
8470  * @flags: instructions from stack about fdb operation
8471  */
8472 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
8473                             struct net_device *dev,
8474                             const unsigned char *addr, u16 vid,
8475                             u16 flags)
8476 {
8477         struct i40e_netdev_priv *np = netdev_priv(dev);
8478         struct i40e_pf *pf = np->vsi->back;
8479         int err = 0;
8480
8481         if (!(pf->flags & I40E_FLAG_SRIOV_ENABLED))
8482                 return -EOPNOTSUPP;
8483
8484         if (vid) {
8485                 pr_info("%s: vlans aren't supported yet for dev_uc|mc_add()\n", dev->name);
8486                 return -EINVAL;
8487         }
8488
8489         /* Hardware does not support aging addresses so if a
8490          * ndm_state is given only allow permanent addresses
8491          */
8492         if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) {
8493                 netdev_info(dev, "FDB only supports static addresses\n");
8494                 return -EINVAL;
8495         }
8496
8497         if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
8498                 err = dev_uc_add_excl(dev, addr);
8499         else if (is_multicast_ether_addr(addr))
8500                 err = dev_mc_add_excl(dev, addr);
8501         else
8502                 err = -EINVAL;
8503
8504         /* Only return duplicate errors if NLM_F_EXCL is set */
8505         if (err == -EEXIST && !(flags & NLM_F_EXCL))
8506                 err = 0;
8507
8508         return err;
8509 }
8510
8511 /**
8512  * i40e_ndo_bridge_setlink - Set the hardware bridge mode
8513  * @dev: the netdev being configured
8514  * @nlh: RTNL message
8515  *
8516  * Inserts a new hardware bridge if not already created and
8517  * enables the bridging mode requested (VEB or VEPA). If the
8518  * hardware bridge has already been inserted and the request
8519  * is to change the mode then that requires a PF reset to
8520  * allow rebuild of the components with required hardware
8521  * bridge mode enabled.
8522  **/
8523 static int i40e_ndo_bridge_setlink(struct net_device *dev,
8524                                    struct nlmsghdr *nlh,
8525                                    u16 flags)
8526 {
8527         struct i40e_netdev_priv *np = netdev_priv(dev);
8528         struct i40e_vsi *vsi = np->vsi;
8529         struct i40e_pf *pf = vsi->back;
8530         struct i40e_veb *veb = NULL;
8531         struct nlattr *attr, *br_spec;
8532         int i, rem;
8533
8534         /* Only for PF VSI for now */
8535         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8536                 return -EOPNOTSUPP;
8537
8538         /* Find the HW bridge for PF VSI */
8539         for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8540                 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8541                         veb = pf->veb[i];
8542         }
8543
8544         br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
8545
8546         nla_for_each_nested(attr, br_spec, rem) {
8547                 __u16 mode;
8548
8549                 if (nla_type(attr) != IFLA_BRIDGE_MODE)
8550                         continue;
8551
8552                 mode = nla_get_u16(attr);
8553                 if ((mode != BRIDGE_MODE_VEPA) &&
8554                     (mode != BRIDGE_MODE_VEB))
8555                         return -EINVAL;
8556
8557                 /* Insert a new HW bridge */
8558                 if (!veb) {
8559                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
8560                                              vsi->tc_config.enabled_tc);
8561                         if (veb) {
8562                                 veb->bridge_mode = mode;
8563                                 i40e_config_bridge_mode(veb);
8564                         } else {
8565                                 /* No Bridge HW offload available */
8566                                 return -ENOENT;
8567                         }
8568                         break;
8569                 } else if (mode != veb->bridge_mode) {
8570                         /* Existing HW bridge but different mode needs reset */
8571                         veb->bridge_mode = mode;
8572                         /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
8573                         if (mode == BRIDGE_MODE_VEB)
8574                                 pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
8575                         else
8576                                 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
8577                         i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
8578                         break;
8579                 }
8580         }
8581
8582         return 0;
8583 }
8584
8585 /**
8586  * i40e_ndo_bridge_getlink - Get the hardware bridge mode
8587  * @skb: skb buff
8588  * @pid: process id
8589  * @seq: RTNL message seq #
8590  * @dev: the netdev being configured
8591  * @filter_mask: unused
8592  * @nlflags: netlink flags passed in
8593  *
8594  * Return the mode in which the hardware bridge is operating in
8595  * i.e VEB or VEPA.
8596  **/
8597 static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
8598                                    struct net_device *dev,
8599                                    u32 __always_unused filter_mask,
8600                                    int nlflags)
8601 {
8602         struct i40e_netdev_priv *np = netdev_priv(dev);
8603         struct i40e_vsi *vsi = np->vsi;
8604         struct i40e_pf *pf = vsi->back;
8605         struct i40e_veb *veb = NULL;
8606         int i;
8607
8608         /* Only for PF VSI for now */
8609         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid)
8610                 return -EOPNOTSUPP;
8611
8612         /* Find the HW bridge for the PF VSI */
8613         for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
8614                 if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
8615                         veb = pf->veb[i];
8616         }
8617
8618         if (!veb)
8619                 return 0;
8620
8621         return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
8622                                        0, 0, nlflags, filter_mask, NULL);
8623 }
8624
8625 #define I40E_MAX_TUNNEL_HDR_LEN 80
8626 /**
8627  * i40e_features_check - Validate encapsulated packet conforms to limits
8628  * @skb: skb buff
8629  * @dev: This physical port's netdev
8630  * @features: Offload features that the stack believes apply
8631  **/
8632 static netdev_features_t i40e_features_check(struct sk_buff *skb,
8633                                              struct net_device *dev,
8634                                              netdev_features_t features)
8635 {
8636         if (skb->encapsulation &&
8637             (skb_inner_mac_header(skb) - skb_transport_header(skb) >
8638              I40E_MAX_TUNNEL_HDR_LEN))
8639                 return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
8640
8641         return features;
8642 }
8643
8644 static const struct net_device_ops i40e_netdev_ops = {
8645         .ndo_open               = i40e_open,
8646         .ndo_stop               = i40e_close,
8647         .ndo_start_xmit         = i40e_lan_xmit_frame,
8648         .ndo_get_stats64        = i40e_get_netdev_stats_struct,
8649         .ndo_set_rx_mode        = i40e_set_rx_mode,
8650         .ndo_validate_addr      = eth_validate_addr,
8651         .ndo_set_mac_address    = i40e_set_mac,
8652         .ndo_change_mtu         = i40e_change_mtu,
8653         .ndo_do_ioctl           = i40e_ioctl,
8654         .ndo_tx_timeout         = i40e_tx_timeout,
8655         .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
8656         .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
8657 #ifdef CONFIG_NET_POLL_CONTROLLER
8658         .ndo_poll_controller    = i40e_netpoll,
8659 #endif
8660         .ndo_setup_tc           = i40e_setup_tc,
8661 #ifdef I40E_FCOE
8662         .ndo_fcoe_enable        = i40e_fcoe_enable,
8663         .ndo_fcoe_disable       = i40e_fcoe_disable,
8664 #endif
8665         .ndo_set_features       = i40e_set_features,
8666         .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
8667         .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
8668         .ndo_set_vf_rate        = i40e_ndo_set_vf_bw,
8669         .ndo_get_vf_config      = i40e_ndo_get_vf_config,
8670         .ndo_set_vf_link_state  = i40e_ndo_set_vf_link_state,
8671         .ndo_set_vf_spoofchk    = i40e_ndo_set_vf_spoofchk,
8672 #ifdef CONFIG_I40E_VXLAN
8673         .ndo_add_vxlan_port     = i40e_add_vxlan_port,
8674         .ndo_del_vxlan_port     = i40e_del_vxlan_port,
8675 #endif
8676         .ndo_get_phys_port_id   = i40e_get_phys_port_id,
8677         .ndo_fdb_add            = i40e_ndo_fdb_add,
8678         .ndo_features_check     = i40e_features_check,
8679         .ndo_bridge_getlink     = i40e_ndo_bridge_getlink,
8680         .ndo_bridge_setlink     = i40e_ndo_bridge_setlink,
8681 };
8682
8683 /**
8684  * i40e_config_netdev - Setup the netdev flags
8685  * @vsi: the VSI being configured
8686  *
8687  * Returns 0 on success, negative value on failure
8688  **/
8689 static int i40e_config_netdev(struct i40e_vsi *vsi)
8690 {
8691         u8 brdcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
8692         struct i40e_pf *pf = vsi->back;
8693         struct i40e_hw *hw = &pf->hw;
8694         struct i40e_netdev_priv *np;
8695         struct net_device *netdev;
8696         u8 mac_addr[ETH_ALEN];
8697         int etherdev_size;
8698
8699         etherdev_size = sizeof(struct i40e_netdev_priv);
8700         netdev = alloc_etherdev_mq(etherdev_size, vsi->alloc_queue_pairs);
8701         if (!netdev)
8702                 return -ENOMEM;
8703
8704         vsi->netdev = netdev;
8705         np = netdev_priv(netdev);
8706         np->vsi = vsi;
8707
8708         netdev->hw_enc_features |= NETIF_F_IP_CSUM       |
8709                                   NETIF_F_GSO_UDP_TUNNEL |
8710                                   NETIF_F_GSO_GRE        |
8711                                   NETIF_F_TSO;
8712
8713         netdev->features = NETIF_F_SG                  |
8714                            NETIF_F_IP_CSUM             |
8715                            NETIF_F_SCTP_CSUM           |
8716                            NETIF_F_HIGHDMA             |
8717                            NETIF_F_GSO_UDP_TUNNEL      |
8718                            NETIF_F_GSO_GRE             |
8719                            NETIF_F_HW_VLAN_CTAG_TX     |
8720                            NETIF_F_HW_VLAN_CTAG_RX     |
8721                            NETIF_F_HW_VLAN_CTAG_FILTER |
8722                            NETIF_F_IPV6_CSUM           |
8723                            NETIF_F_TSO                 |
8724                            NETIF_F_TSO_ECN             |
8725                            NETIF_F_TSO6                |
8726                            NETIF_F_RXCSUM              |
8727                            NETIF_F_RXHASH              |
8728                            0;
8729
8730         if (!(pf->flags & I40E_FLAG_MFP_ENABLED))
8731                 netdev->features |= NETIF_F_NTUPLE;
8732
8733         /* copy netdev features into list of user selectable features */
8734         netdev->hw_features |= netdev->features;
8735
8736         if (vsi->type == I40E_VSI_MAIN) {
8737                 SET_NETDEV_DEV(netdev, &pf->pdev->dev);
8738                 ether_addr_copy(mac_addr, hw->mac.perm_addr);
8739                 /* The following steps are necessary to prevent reception
8740                  * of tagged packets - some older NVM configurations load a
8741                  * default a MAC-VLAN filter that accepts any tagged packet
8742                  * which must be replaced by a normal filter.
8743                  */
8744                 if (!i40e_rm_default_mac_filter(vsi, mac_addr)) {
8745                         spin_lock_bh(&vsi->mac_filter_list_lock);
8746                         i40e_add_filter(vsi, mac_addr,
8747                                         I40E_VLAN_ANY, false, true);
8748                         spin_unlock_bh(&vsi->mac_filter_list_lock);
8749                 }
8750         } else {
8751                 /* relate the VSI_VMDQ name to the VSI_MAIN name */
8752                 snprintf(netdev->name, IFNAMSIZ, "%sv%%d",
8753                          pf->vsi[pf->lan_vsi]->netdev->name);
8754                 random_ether_addr(mac_addr);
8755
8756                 spin_lock_bh(&vsi->mac_filter_list_lock);
8757                 i40e_add_filter(vsi, mac_addr, I40E_VLAN_ANY, false, false);
8758                 spin_unlock_bh(&vsi->mac_filter_list_lock);
8759         }
8760
8761         spin_lock_bh(&vsi->mac_filter_list_lock);
8762         i40e_add_filter(vsi, brdcast, I40E_VLAN_ANY, false, false);
8763         spin_unlock_bh(&vsi->mac_filter_list_lock);
8764
8765         ether_addr_copy(netdev->dev_addr, mac_addr);
8766         ether_addr_copy(netdev->perm_addr, mac_addr);
8767         /* vlan gets same features (except vlan offload)
8768          * after any tweaks for specific VSI types
8769          */
8770         netdev->vlan_features = netdev->features & ~(NETIF_F_HW_VLAN_CTAG_TX |
8771                                                      NETIF_F_HW_VLAN_CTAG_RX |
8772                                                    NETIF_F_HW_VLAN_CTAG_FILTER);
8773         netdev->priv_flags |= IFF_UNICAST_FLT;
8774         netdev->priv_flags |= IFF_SUPP_NOFCS;
8775         /* Setup netdev TC information */
8776         i40e_vsi_config_netdev_tc(vsi, vsi->tc_config.enabled_tc);
8777
8778         netdev->netdev_ops = &i40e_netdev_ops;
8779         netdev->watchdog_timeo = 5 * HZ;
8780         i40e_set_ethtool_ops(netdev);
8781 #ifdef I40E_FCOE
8782         i40e_fcoe_config_netdev(netdev, vsi);
8783 #endif
8784
8785         return 0;
8786 }
8787
8788 /**
8789  * i40e_vsi_delete - Delete a VSI from the switch
8790  * @vsi: the VSI being removed
8791  *
8792  * Returns 0 on success, negative value on failure
8793  **/
8794 static void i40e_vsi_delete(struct i40e_vsi *vsi)
8795 {
8796         /* remove default VSI is not allowed */
8797         if (vsi == vsi->back->vsi[vsi->back->lan_vsi])
8798                 return;
8799
8800         i40e_aq_delete_element(&vsi->back->hw, vsi->seid, NULL);
8801 }
8802
8803 /**
8804  * i40e_is_vsi_uplink_mode_veb - Check if the VSI's uplink bridge mode is VEB
8805  * @vsi: the VSI being queried
8806  *
8807  * Returns 1 if HW bridge mode is VEB and return 0 in case of VEPA mode
8808  **/
8809 int i40e_is_vsi_uplink_mode_veb(struct i40e_vsi *vsi)
8810 {
8811         struct i40e_veb *veb;
8812         struct i40e_pf *pf = vsi->back;
8813
8814         /* Uplink is not a bridge so default to VEB */
8815         if (vsi->veb_idx == I40E_NO_VEB)
8816                 return 1;
8817
8818         veb = pf->veb[vsi->veb_idx];
8819         if (!veb) {
8820                 dev_info(&pf->pdev->dev,
8821                          "There is no veb associated with the bridge\n");
8822                 return -ENOENT;
8823         }
8824
8825         /* Uplink is a bridge in VEPA mode */
8826         if (veb->bridge_mode & BRIDGE_MODE_VEPA) {
8827                 return 0;
8828         } else {
8829                 /* Uplink is a bridge in VEB mode */
8830                 return 1;
8831         }
8832
8833         /* VEPA is now default bridge, so return 0 */
8834         return 0;
8835 }
8836
8837 /**
8838  * i40e_add_vsi - Add a VSI to the switch
8839  * @vsi: the VSI being configured
8840  *
8841  * This initializes a VSI context depending on the VSI type to be added and
8842  * passes it down to the add_vsi aq command.
8843  **/
8844 static int i40e_add_vsi(struct i40e_vsi *vsi)
8845 {
8846         int ret = -ENODEV;
8847         u8 laa_macaddr[ETH_ALEN];
8848         bool found_laa_mac_filter = false;
8849         struct i40e_pf *pf = vsi->back;
8850         struct i40e_hw *hw = &pf->hw;
8851         struct i40e_vsi_context ctxt;
8852         struct i40e_mac_filter *f, *ftmp;
8853
8854         u8 enabled_tc = 0x1; /* TC0 enabled */
8855         int f_count = 0;
8856
8857         memset(&ctxt, 0, sizeof(ctxt));
8858         switch (vsi->type) {
8859         case I40E_VSI_MAIN:
8860                 /* The PF's main VSI is already setup as part of the
8861                  * device initialization, so we'll not bother with
8862                  * the add_vsi call, but we will retrieve the current
8863                  * VSI context.
8864                  */
8865                 ctxt.seid = pf->main_vsi_seid;
8866                 ctxt.pf_num = pf->hw.pf_id;
8867                 ctxt.vf_num = 0;
8868                 ret = i40e_aq_get_vsi_params(&pf->hw, &ctxt, NULL);
8869                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8870                 if (ret) {
8871                         dev_info(&pf->pdev->dev,
8872                                  "couldn't get PF vsi config, err %s aq_err %s\n",
8873                                  i40e_stat_str(&pf->hw, ret),
8874                                  i40e_aq_str(&pf->hw,
8875                                              pf->hw.aq.asq_last_status));
8876                         return -ENOENT;
8877                 }
8878                 vsi->info = ctxt.info;
8879                 vsi->info.valid_sections = 0;
8880
8881                 vsi->seid = ctxt.seid;
8882                 vsi->id = ctxt.vsi_number;
8883
8884                 enabled_tc = i40e_pf_get_tc_map(pf);
8885
8886                 /* MFP mode setup queue map and update VSI */
8887                 if ((pf->flags & I40E_FLAG_MFP_ENABLED) &&
8888                     !(pf->hw.func_caps.iscsi)) { /* NIC type PF */
8889                         memset(&ctxt, 0, sizeof(ctxt));
8890                         ctxt.seid = pf->main_vsi_seid;
8891                         ctxt.pf_num = pf->hw.pf_id;
8892                         ctxt.vf_num = 0;
8893                         i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, false);
8894                         ret = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
8895                         if (ret) {
8896                                 dev_info(&pf->pdev->dev,
8897                                          "update vsi failed, err %s aq_err %s\n",
8898                                          i40e_stat_str(&pf->hw, ret),
8899                                          i40e_aq_str(&pf->hw,
8900                                                     pf->hw.aq.asq_last_status));
8901                                 ret = -ENOENT;
8902                                 goto err;
8903                         }
8904                         /* update the local VSI info queue map */
8905                         i40e_vsi_update_queue_map(vsi, &ctxt);
8906                         vsi->info.valid_sections = 0;
8907                 } else {
8908                         /* Default/Main VSI is only enabled for TC0
8909                          * reconfigure it to enable all TCs that are
8910                          * available on the port in SFP mode.
8911                          * For MFP case the iSCSI PF would use this
8912                          * flow to enable LAN+iSCSI TC.
8913                          */
8914                         ret = i40e_vsi_config_tc(vsi, enabled_tc);
8915                         if (ret) {
8916                                 dev_info(&pf->pdev->dev,
8917                                          "failed to configure TCs for main VSI tc_map 0x%08x, err %s aq_err %s\n",
8918                                          enabled_tc,
8919                                          i40e_stat_str(&pf->hw, ret),
8920                                          i40e_aq_str(&pf->hw,
8921                                                     pf->hw.aq.asq_last_status));
8922                                 ret = -ENOENT;
8923                         }
8924                 }
8925                 break;
8926
8927         case I40E_VSI_FDIR:
8928                 ctxt.pf_num = hw->pf_id;
8929                 ctxt.vf_num = 0;
8930                 ctxt.uplink_seid = vsi->uplink_seid;
8931                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8932                 ctxt.flags = I40E_AQ_VSI_TYPE_PF;
8933                 if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
8934                     (i40e_is_vsi_uplink_mode_veb(vsi))) {
8935                         ctxt.info.valid_sections |=
8936                              cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8937                         ctxt.info.switch_id =
8938                            cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8939                 }
8940                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8941                 break;
8942
8943         case I40E_VSI_VMDQ2:
8944                 ctxt.pf_num = hw->pf_id;
8945                 ctxt.vf_num = 0;
8946                 ctxt.uplink_seid = vsi->uplink_seid;
8947                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8948                 ctxt.flags = I40E_AQ_VSI_TYPE_VMDQ2;
8949
8950                 /* This VSI is connected to VEB so the switch_id
8951                  * should be set to zero by default.
8952                  */
8953                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8954                         ctxt.info.valid_sections |=
8955                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8956                         ctxt.info.switch_id =
8957                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8958                 }
8959
8960                 /* Setup the VSI tx/rx queue map for TC0 only for now */
8961                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8962                 break;
8963
8964         case I40E_VSI_SRIOV:
8965                 ctxt.pf_num = hw->pf_id;
8966                 ctxt.vf_num = vsi->vf_id + hw->func_caps.vf_base_id;
8967                 ctxt.uplink_seid = vsi->uplink_seid;
8968                 ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
8969                 ctxt.flags = I40E_AQ_VSI_TYPE_VF;
8970
8971                 /* This VSI is connected to VEB so the switch_id
8972                  * should be set to zero by default.
8973                  */
8974                 if (i40e_is_vsi_uplink_mode_veb(vsi)) {
8975                         ctxt.info.valid_sections |=
8976                                 cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
8977                         ctxt.info.switch_id =
8978                                 cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
8979                 }
8980
8981                 ctxt.info.valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_VLAN_VALID);
8982                 ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_MODE_ALL;
8983                 if (pf->vf[vsi->vf_id].spoofchk) {
8984                         ctxt.info.valid_sections |=
8985                                 cpu_to_le16(I40E_AQ_VSI_PROP_SECURITY_VALID);
8986                         ctxt.info.sec_flags |=
8987                                 (I40E_AQ_VSI_SEC_FLAG_ENABLE_VLAN_CHK |
8988                                  I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK);
8989                 }
8990                 /* Setup the VSI tx/rx queue map for TC0 only for now */
8991                 i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
8992                 break;
8993
8994 #ifdef I40E_FCOE
8995         case I40E_VSI_FCOE:
8996                 ret = i40e_fcoe_vsi_init(vsi, &ctxt);
8997                 if (ret) {
8998                         dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
8999                         return ret;
9000                 }
9001                 break;
9002
9003 #endif /* I40E_FCOE */
9004         default:
9005                 return -ENODEV;
9006         }
9007
9008         if (vsi->type != I40E_VSI_MAIN) {
9009                 ret = i40e_aq_add_vsi(hw, &ctxt, NULL);
9010                 if (ret) {
9011                         dev_info(&vsi->back->pdev->dev,
9012                                  "add vsi failed, err %s aq_err %s\n",
9013                                  i40e_stat_str(&pf->hw, ret),
9014                                  i40e_aq_str(&pf->hw,
9015                                              pf->hw.aq.asq_last_status));
9016                         ret = -ENOENT;
9017                         goto err;
9018                 }
9019                 vsi->info = ctxt.info;
9020                 vsi->info.valid_sections = 0;
9021                 vsi->seid = ctxt.seid;
9022                 vsi->id = ctxt.vsi_number;
9023         }
9024
9025         spin_lock_bh(&vsi->mac_filter_list_lock);
9026         /* If macvlan filters already exist, force them to get loaded */
9027         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
9028                 f->changed = true;
9029                 f_count++;
9030
9031                 /* Expected to have only one MAC filter entry for LAA in list */
9032                 if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
9033                         ether_addr_copy(laa_macaddr, f->macaddr);
9034                         found_laa_mac_filter = true;
9035                 }
9036         }
9037         spin_unlock_bh(&vsi->mac_filter_list_lock);
9038
9039         if (found_laa_mac_filter) {
9040                 struct i40e_aqc_remove_macvlan_element_data element;
9041
9042                 memset(&element, 0, sizeof(element));
9043                 ether_addr_copy(element.mac_addr, laa_macaddr);
9044                 element.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
9045                 ret = i40e_aq_remove_macvlan(hw, vsi->seid,
9046                                              &element, 1, NULL);
9047                 if (ret) {
9048                         /* some older FW has a different default */
9049                         element.flags |=
9050                                        I40E_AQC_MACVLAN_DEL_IGNORE_VLAN;
9051                         i40e_aq_remove_macvlan(hw, vsi->seid,
9052                                                &element, 1, NULL);
9053                 }
9054
9055                 i40e_aq_mac_address_write(hw,
9056                                           I40E_AQC_WRITE_TYPE_LAA_WOL,
9057                                           laa_macaddr, NULL);
9058         }
9059
9060         if (f_count) {
9061                 vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
9062                 pf->flags |= I40E_FLAG_FILTER_SYNC;
9063         }
9064
9065         /* Update VSI BW information */
9066         ret = i40e_vsi_get_bw_info(vsi);
9067         if (ret) {
9068                 dev_info(&pf->pdev->dev,
9069                          "couldn't get vsi bw info, err %s aq_err %s\n",
9070                          i40e_stat_str(&pf->hw, ret),
9071                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9072                 /* VSI is already added so not tearing that up */
9073                 ret = 0;
9074         }
9075
9076 err:
9077         return ret;
9078 }
9079
9080 /**
9081  * i40e_vsi_release - Delete a VSI and free its resources
9082  * @vsi: the VSI being removed
9083  *
9084  * Returns 0 on success or < 0 on error
9085  **/
9086 int i40e_vsi_release(struct i40e_vsi *vsi)
9087 {
9088         struct i40e_mac_filter *f, *ftmp;
9089         struct i40e_veb *veb = NULL;
9090         struct i40e_pf *pf;
9091         u16 uplink_seid;
9092         int i, n;
9093
9094         pf = vsi->back;
9095
9096         /* release of a VEB-owner or last VSI is not allowed */
9097         if (vsi->flags & I40E_VSI_FLAG_VEB_OWNER) {
9098                 dev_info(&pf->pdev->dev, "VSI %d has existing VEB %d\n",
9099                          vsi->seid, vsi->uplink_seid);
9100                 return -ENODEV;
9101         }
9102         if (vsi == pf->vsi[pf->lan_vsi] &&
9103             !test_bit(__I40E_DOWN, &pf->state)) {
9104                 dev_info(&pf->pdev->dev, "Can't remove PF VSI\n");
9105                 return -ENODEV;
9106         }
9107
9108         uplink_seid = vsi->uplink_seid;
9109         if (vsi->type != I40E_VSI_SRIOV) {
9110                 if (vsi->netdev_registered) {
9111                         vsi->netdev_registered = false;
9112                         if (vsi->netdev) {
9113                                 /* results in a call to i40e_close() */
9114                                 unregister_netdev(vsi->netdev);
9115                         }
9116                 } else {
9117                         i40e_vsi_close(vsi);
9118                 }
9119                 i40e_vsi_disable_irq(vsi);
9120         }
9121
9122         spin_lock_bh(&vsi->mac_filter_list_lock);
9123         list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list)
9124                 i40e_del_filter(vsi, f->macaddr, f->vlan,
9125                                 f->is_vf, f->is_netdev);
9126         spin_unlock_bh(&vsi->mac_filter_list_lock);
9127
9128         i40e_sync_vsi_filters(vsi, false);
9129
9130         i40e_vsi_delete(vsi);
9131         i40e_vsi_free_q_vectors(vsi);
9132         if (vsi->netdev) {
9133                 free_netdev(vsi->netdev);
9134                 vsi->netdev = NULL;
9135         }
9136         i40e_vsi_clear_rings(vsi);
9137         i40e_vsi_clear(vsi);
9138
9139         /* If this was the last thing on the VEB, except for the
9140          * controlling VSI, remove the VEB, which puts the controlling
9141          * VSI onto the next level down in the switch.
9142          *
9143          * Well, okay, there's one more exception here: don't remove
9144          * the orphan VEBs yet.  We'll wait for an explicit remove request
9145          * from up the network stack.
9146          */
9147         for (n = 0, i = 0; i < pf->num_alloc_vsi; i++) {
9148                 if (pf->vsi[i] &&
9149                     pf->vsi[i]->uplink_seid == uplink_seid &&
9150                     (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9151                         n++;      /* count the VSIs */
9152                 }
9153         }
9154         for (i = 0; i < I40E_MAX_VEB; i++) {
9155                 if (!pf->veb[i])
9156                         continue;
9157                 if (pf->veb[i]->uplink_seid == uplink_seid)
9158                         n++;     /* count the VEBs */
9159                 if (pf->veb[i]->seid == uplink_seid)
9160                         veb = pf->veb[i];
9161         }
9162         if (n == 0 && veb && veb->uplink_seid != 0)
9163                 i40e_veb_release(veb);
9164
9165         return 0;
9166 }
9167
9168 /**
9169  * i40e_vsi_setup_vectors - Set up the q_vectors for the given VSI
9170  * @vsi: ptr to the VSI
9171  *
9172  * This should only be called after i40e_vsi_mem_alloc() which allocates the
9173  * corresponding SW VSI structure and initializes num_queue_pairs for the
9174  * newly allocated VSI.
9175  *
9176  * Returns 0 on success or negative on failure
9177  **/
9178 static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
9179 {
9180         int ret = -ENOENT;
9181         struct i40e_pf *pf = vsi->back;
9182
9183         if (vsi->q_vectors[0]) {
9184                 dev_info(&pf->pdev->dev, "VSI %d has existing q_vectors\n",
9185                          vsi->seid);
9186                 return -EEXIST;
9187         }
9188
9189         if (vsi->base_vector) {
9190                 dev_info(&pf->pdev->dev, "VSI %d has non-zero base vector %d\n",
9191                          vsi->seid, vsi->base_vector);
9192                 return -EEXIST;
9193         }
9194
9195         ret = i40e_vsi_alloc_q_vectors(vsi);
9196         if (ret) {
9197                 dev_info(&pf->pdev->dev,
9198                          "failed to allocate %d q_vector for VSI %d, ret=%d\n",
9199                          vsi->num_q_vectors, vsi->seid, ret);
9200                 vsi->num_q_vectors = 0;
9201                 goto vector_setup_out;
9202         }
9203
9204         /* In Legacy mode, we do not have to get any other vector since we
9205          * piggyback on the misc/ICR0 for queue interrupts.
9206         */
9207         if (!(pf->flags & I40E_FLAG_MSIX_ENABLED))
9208                 return ret;
9209         if (vsi->num_q_vectors)
9210                 vsi->base_vector = i40e_get_lump(pf, pf->irq_pile,
9211                                                  vsi->num_q_vectors, vsi->idx);
9212         if (vsi->base_vector < 0) {
9213                 dev_info(&pf->pdev->dev,
9214                          "failed to get tracking for %d vectors for VSI %d, err=%d\n",
9215                          vsi->num_q_vectors, vsi->seid, vsi->base_vector);
9216                 i40e_vsi_free_q_vectors(vsi);
9217                 ret = -ENOENT;
9218                 goto vector_setup_out;
9219         }
9220
9221 vector_setup_out:
9222         return ret;
9223 }
9224
9225 /**
9226  * i40e_vsi_reinit_setup - return and reallocate resources for a VSI
9227  * @vsi: pointer to the vsi.
9228  *
9229  * This re-allocates a vsi's queue resources.
9230  *
9231  * Returns pointer to the successfully allocated and configured VSI sw struct
9232  * on success, otherwise returns NULL on failure.
9233  **/
9234 static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
9235 {
9236         struct i40e_pf *pf = vsi->back;
9237         u8 enabled_tc;
9238         int ret;
9239
9240         i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
9241         i40e_vsi_clear_rings(vsi);
9242
9243         i40e_vsi_free_arrays(vsi, false);
9244         i40e_set_num_rings_in_vsi(vsi);
9245         ret = i40e_vsi_alloc_arrays(vsi, false);
9246         if (ret)
9247                 goto err_vsi;
9248
9249         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs, vsi->idx);
9250         if (ret < 0) {
9251                 dev_info(&pf->pdev->dev,
9252                          "failed to get tracking for %d queues for VSI %d err %d\n",
9253                          vsi->alloc_queue_pairs, vsi->seid, ret);
9254                 goto err_vsi;
9255         }
9256         vsi->base_queue = ret;
9257
9258         /* Update the FW view of the VSI. Force a reset of TC and queue
9259          * layout configurations.
9260          */
9261         enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
9262         pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
9263         pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
9264         i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
9265
9266         /* assign it some queues */
9267         ret = i40e_alloc_rings(vsi);
9268         if (ret)
9269                 goto err_rings;
9270
9271         /* map all of the rings to the q_vectors */
9272         i40e_vsi_map_rings_to_vectors(vsi);
9273         return vsi;
9274
9275 err_rings:
9276         i40e_vsi_free_q_vectors(vsi);
9277         if (vsi->netdev_registered) {
9278                 vsi->netdev_registered = false;
9279                 unregister_netdev(vsi->netdev);
9280                 free_netdev(vsi->netdev);
9281                 vsi->netdev = NULL;
9282         }
9283         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9284 err_vsi:
9285         i40e_vsi_clear(vsi);
9286         return NULL;
9287 }
9288
9289 /**
9290  * i40e_vsi_setup - Set up a VSI by a given type
9291  * @pf: board private structure
9292  * @type: VSI type
9293  * @uplink_seid: the switch element to link to
9294  * @param1: usage depends upon VSI type. For VF types, indicates VF id
9295  *
9296  * This allocates the sw VSI structure and its queue resources, then add a VSI
9297  * to the identified VEB.
9298  *
9299  * Returns pointer to the successfully allocated and configure VSI sw struct on
9300  * success, otherwise returns NULL on failure.
9301  **/
9302 struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
9303                                 u16 uplink_seid, u32 param1)
9304 {
9305         struct i40e_vsi *vsi = NULL;
9306         struct i40e_veb *veb = NULL;
9307         int ret, i;
9308         int v_idx;
9309
9310         /* The requested uplink_seid must be either
9311          *     - the PF's port seid
9312          *              no VEB is needed because this is the PF
9313          *              or this is a Flow Director special case VSI
9314          *     - seid of an existing VEB
9315          *     - seid of a VSI that owns an existing VEB
9316          *     - seid of a VSI that doesn't own a VEB
9317          *              a new VEB is created and the VSI becomes the owner
9318          *     - seid of the PF VSI, which is what creates the first VEB
9319          *              this is a special case of the previous
9320          *
9321          * Find which uplink_seid we were given and create a new VEB if needed
9322          */
9323         for (i = 0; i < I40E_MAX_VEB; i++) {
9324                 if (pf->veb[i] && pf->veb[i]->seid == uplink_seid) {
9325                         veb = pf->veb[i];
9326                         break;
9327                 }
9328         }
9329
9330         if (!veb && uplink_seid != pf->mac_seid) {
9331
9332                 for (i = 0; i < pf->num_alloc_vsi; i++) {
9333                         if (pf->vsi[i] && pf->vsi[i]->seid == uplink_seid) {
9334                                 vsi = pf->vsi[i];
9335                                 break;
9336                         }
9337                 }
9338                 if (!vsi) {
9339                         dev_info(&pf->pdev->dev, "no such uplink_seid %d\n",
9340                                  uplink_seid);
9341                         return NULL;
9342                 }
9343
9344                 if (vsi->uplink_seid == pf->mac_seid)
9345                         veb = i40e_veb_setup(pf, 0, pf->mac_seid, vsi->seid,
9346                                              vsi->tc_config.enabled_tc);
9347                 else if ((vsi->flags & I40E_VSI_FLAG_VEB_OWNER) == 0)
9348                         veb = i40e_veb_setup(pf, 0, vsi->uplink_seid, vsi->seid,
9349                                              vsi->tc_config.enabled_tc);
9350                 if (veb) {
9351                         if (vsi->seid != pf->vsi[pf->lan_vsi]->seid) {
9352                                 dev_info(&vsi->back->pdev->dev,
9353                                          "New VSI creation error, uplink seid of LAN VSI expected.\n");
9354                                 return NULL;
9355                         }
9356                         /* We come up by default in VEPA mode if SRIOV is not
9357                          * already enabled, in which case we can't force VEPA
9358                          * mode.
9359                          */
9360                         if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
9361                                 veb->bridge_mode = BRIDGE_MODE_VEPA;
9362                                 pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
9363                         }
9364                         i40e_config_bridge_mode(veb);
9365                 }
9366                 for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
9367                         if (pf->veb[i] && pf->veb[i]->seid == vsi->uplink_seid)
9368                                 veb = pf->veb[i];
9369                 }
9370                 if (!veb) {
9371                         dev_info(&pf->pdev->dev, "couldn't add VEB\n");
9372                         return NULL;
9373                 }
9374
9375                 vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9376                 uplink_seid = veb->seid;
9377         }
9378
9379         /* get vsi sw struct */
9380         v_idx = i40e_vsi_mem_alloc(pf, type);
9381         if (v_idx < 0)
9382                 goto err_alloc;
9383         vsi = pf->vsi[v_idx];
9384         if (!vsi)
9385                 goto err_alloc;
9386         vsi->type = type;
9387         vsi->veb_idx = (veb ? veb->idx : I40E_NO_VEB);
9388
9389         if (type == I40E_VSI_MAIN)
9390                 pf->lan_vsi = v_idx;
9391         else if (type == I40E_VSI_SRIOV)
9392                 vsi->vf_id = param1;
9393         /* assign it some queues */
9394         ret = i40e_get_lump(pf, pf->qp_pile, vsi->alloc_queue_pairs,
9395                                 vsi->idx);
9396         if (ret < 0) {
9397                 dev_info(&pf->pdev->dev,
9398                          "failed to get tracking for %d queues for VSI %d err=%d\n",
9399                          vsi->alloc_queue_pairs, vsi->seid, ret);
9400                 goto err_vsi;
9401         }
9402         vsi->base_queue = ret;
9403
9404         /* get a VSI from the hardware */
9405         vsi->uplink_seid = uplink_seid;
9406         ret = i40e_add_vsi(vsi);
9407         if (ret)
9408                 goto err_vsi;
9409
9410         switch (vsi->type) {
9411         /* setup the netdev if needed */
9412         case I40E_VSI_MAIN:
9413         case I40E_VSI_VMDQ2:
9414         case I40E_VSI_FCOE:
9415                 ret = i40e_config_netdev(vsi);
9416                 if (ret)
9417                         goto err_netdev;
9418                 ret = register_netdev(vsi->netdev);
9419                 if (ret)
9420                         goto err_netdev;
9421                 vsi->netdev_registered = true;
9422                 netif_carrier_off(vsi->netdev);
9423 #ifdef CONFIG_I40E_DCB
9424                 /* Setup DCB netlink interface */
9425                 i40e_dcbnl_setup(vsi);
9426 #endif /* CONFIG_I40E_DCB */
9427                 /* fall through */
9428
9429         case I40E_VSI_FDIR:
9430                 /* set up vectors and rings if needed */
9431                 ret = i40e_vsi_setup_vectors(vsi);
9432                 if (ret)
9433                         goto err_msix;
9434
9435                 ret = i40e_alloc_rings(vsi);
9436                 if (ret)
9437                         goto err_rings;
9438
9439                 /* map all of the rings to the q_vectors */
9440                 i40e_vsi_map_rings_to_vectors(vsi);
9441
9442                 i40e_vsi_reset_stats(vsi);
9443                 break;
9444
9445         default:
9446                 /* no netdev or rings for the other VSI types */
9447                 break;
9448         }
9449
9450         if ((pf->flags & I40E_FLAG_RSS_AQ_CAPABLE) &&
9451             (vsi->type == I40E_VSI_VMDQ2)) {
9452                 ret = i40e_vsi_config_rss(vsi);
9453         }
9454         return vsi;
9455
9456 err_rings:
9457         i40e_vsi_free_q_vectors(vsi);
9458 err_msix:
9459         if (vsi->netdev_registered) {
9460                 vsi->netdev_registered = false;
9461                 unregister_netdev(vsi->netdev);
9462                 free_netdev(vsi->netdev);
9463                 vsi->netdev = NULL;
9464         }
9465 err_netdev:
9466         i40e_aq_delete_element(&pf->hw, vsi->seid, NULL);
9467 err_vsi:
9468         i40e_vsi_clear(vsi);
9469 err_alloc:
9470         return NULL;
9471 }
9472
9473 /**
9474  * i40e_veb_get_bw_info - Query VEB BW information
9475  * @veb: the veb to query
9476  *
9477  * Query the Tx scheduler BW configuration data for given VEB
9478  **/
9479 static int i40e_veb_get_bw_info(struct i40e_veb *veb)
9480 {
9481         struct i40e_aqc_query_switching_comp_ets_config_resp ets_data;
9482         struct i40e_aqc_query_switching_comp_bw_config_resp bw_data;
9483         struct i40e_pf *pf = veb->pf;
9484         struct i40e_hw *hw = &pf->hw;
9485         u32 tc_bw_max;
9486         int ret = 0;
9487         int i;
9488
9489         ret = i40e_aq_query_switch_comp_bw_config(hw, veb->seid,
9490                                                   &bw_data, NULL);
9491         if (ret) {
9492                 dev_info(&pf->pdev->dev,
9493                          "query veb bw config failed, err %s aq_err %s\n",
9494                          i40e_stat_str(&pf->hw, ret),
9495                          i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9496                 goto out;
9497         }
9498
9499         ret = i40e_aq_query_switch_comp_ets_config(hw, veb->seid,
9500                                                    &ets_data, NULL);
9501         if (ret) {
9502                 dev_info(&pf->pdev->dev,
9503                          "query veb bw ets config failed, err %s aq_err %s\n",
9504                          i40e_stat_str(&pf->hw, ret),
9505                          i40e_aq_str(&pf->hw, hw->aq.asq_last_status));
9506                 goto out;
9507         }
9508
9509         veb->bw_limit = le16_to_cpu(ets_data.port_bw_limit);
9510         veb->bw_max_quanta = ets_data.tc_bw_max;
9511         veb->is_abs_credits = bw_data.absolute_credits_enable;
9512         veb->enabled_tc = ets_data.tc_valid_bits;
9513         tc_bw_max = le16_to_cpu(bw_data.tc_bw_max[0]) |
9514                     (le16_to_cpu(bw_data.tc_bw_max[1]) << 16);
9515         for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) {
9516                 veb->bw_tc_share_credits[i] = bw_data.tc_bw_share_credits[i];
9517                 veb->bw_tc_limit_credits[i] =
9518                                         le16_to_cpu(bw_data.tc_bw_limits[i]);
9519                 veb->bw_tc_max_quanta[i] = ((tc_bw_max >> (i*4)) & 0x7);
9520         }
9521
9522 out:
9523         return ret;
9524 }
9525
9526 /**
9527  * i40e_veb_mem_alloc - Allocates the next available struct veb in the PF
9528  * @pf: board private structure
9529  *
9530  * On error: returns error code (negative)
9531  * On success: returns vsi index in PF (positive)
9532  **/
9533 static int i40e_veb_mem_alloc(struct i40e_pf *pf)
9534 {
9535         int ret = -ENOENT;
9536         struct i40e_veb *veb;
9537         int i;
9538
9539         /* Need to protect the allocation of switch elements at the PF level */
9540         mutex_lock(&pf->switch_mutex);
9541
9542         /* VEB list may be fragmented if VEB creation/destruction has
9543          * been happening.  We can afford to do a quick scan to look
9544          * for any free slots in the list.
9545          *
9546          * find next empty veb slot, looping back around if necessary
9547          */
9548         i = 0;
9549         while ((i < I40E_MAX_VEB) && (pf->veb[i] != NULL))
9550                 i++;
9551         if (i >= I40E_MAX_VEB) {
9552                 ret = -ENOMEM;
9553                 goto err_alloc_veb;  /* out of VEB slots! */
9554         }
9555
9556         veb = kzalloc(sizeof(*veb), GFP_KERNEL);
9557         if (!veb) {
9558                 ret = -ENOMEM;
9559                 goto err_alloc_veb;
9560         }
9561         veb->pf = pf;
9562         veb->idx = i;
9563         veb->enabled_tc = 1;
9564
9565         pf->veb[i] = veb;
9566         ret = i;
9567 err_alloc_veb:
9568         mutex_unlock(&pf->switch_mutex);
9569         return ret;
9570 }
9571
9572 /**
9573  * i40e_switch_branch_release - Delete a branch of the switch tree
9574  * @branch: where to start deleting
9575  *
9576  * This uses recursion to find the tips of the branch to be
9577  * removed, deleting until we get back to and can delete this VEB.
9578  **/
9579 static void i40e_switch_branch_release(struct i40e_veb *branch)
9580 {
9581         struct i40e_pf *pf = branch->pf;
9582         u16 branch_seid = branch->seid;
9583         u16 veb_idx = branch->idx;
9584         int i;
9585
9586         /* release any VEBs on this VEB - RECURSION */
9587         for (i = 0; i < I40E_MAX_VEB; i++) {
9588                 if (!pf->veb[i])
9589                         continue;
9590                 if (pf->veb[i]->uplink_seid == branch->seid)
9591                         i40e_switch_branch_release(pf->veb[i]);
9592         }
9593
9594         /* Release the VSIs on this VEB, but not the owner VSI.
9595          *
9596          * NOTE: Removing the last VSI on a VEB has the SIDE EFFECT of removing
9597          *       the VEB itself, so don't use (*branch) after this loop.
9598          */
9599         for (i = 0; i < pf->num_alloc_vsi; i++) {
9600                 if (!pf->vsi[i])
9601                         continue;
9602                 if (pf->vsi[i]->uplink_seid == branch_seid &&
9603                    (pf->vsi[i]->flags & I40E_VSI_FLAG_VEB_OWNER) == 0) {
9604                         i40e_vsi_release(pf->vsi[i]);
9605                 }
9606         }
9607
9608         /* There's one corner case where the VEB might not have been
9609          * removed, so double check it here and remove it if needed.
9610          * This case happens if the veb was created from the debugfs
9611          * commands and no VSIs were added to it.
9612          */
9613         if (pf->veb[veb_idx])
9614                 i40e_veb_release(pf->veb[veb_idx]);
9615 }
9616
9617 /**
9618  * i40e_veb_clear - remove veb struct
9619  * @veb: the veb to remove
9620  **/
9621 static void i40e_veb_clear(struct i40e_veb *veb)
9622 {
9623         if (!veb)
9624                 return;
9625
9626         if (veb->pf) {
9627                 struct i40e_pf *pf = veb->pf;
9628
9629                 mutex_lock(&pf->switch_mutex);
9630                 if (pf->veb[veb->idx] == veb)
9631                         pf->veb[veb->idx] = NULL;
9632                 mutex_unlock(&pf->switch_mutex);
9633         }
9634
9635         kfree(veb);
9636 }
9637
9638 /**
9639  * i40e_veb_release - Delete a VEB and free its resources
9640  * @veb: the VEB being removed
9641  **/
9642 void i40e_veb_release(struct i40e_veb *veb)
9643 {
9644         struct i40e_vsi *vsi = NULL;
9645         struct i40e_pf *pf;
9646         int i, n = 0;
9647
9648         pf = veb->pf;
9649
9650         /* find the remaining VSI and check for extras */
9651         for (i = 0; i < pf->num_alloc_vsi; i++) {
9652                 if (pf->vsi[i] && pf->vsi[i]->uplink_seid == veb->seid) {
9653                         n++;
9654                         vsi = pf->vsi[i];
9655                 }
9656         }
9657         if (n != 1) {
9658                 dev_info(&pf->pdev->dev,
9659                          "can't remove VEB %d with %d VSIs left\n",
9660                          veb->seid, n);
9661                 return;
9662         }
9663
9664         /* move the remaining VSI to uplink veb */
9665         vsi->flags &= ~I40E_VSI_FLAG_VEB_OWNER;
9666         if (veb->uplink_seid) {
9667                 vsi->uplink_seid = veb->uplink_seid;
9668                 if (veb->uplink_seid == pf->mac_seid)
9669                         vsi->veb_idx = I40E_NO_VEB;
9670                 else
9671                         vsi->veb_idx = veb->veb_idx;
9672         } else {
9673                 /* floating VEB */
9674                 vsi->uplink_seid = pf->vsi[pf->lan_vsi]->uplink_seid;
9675                 vsi->veb_idx = pf->vsi[pf->lan_vsi]->veb_idx;
9676         }
9677
9678         i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9679         i40e_veb_clear(veb);
9680 }
9681
9682 /**
9683  * i40e_add_veb - create the VEB in the switch
9684  * @veb: the VEB to be instantiated
9685  * @vsi: the controlling VSI
9686  **/
9687 static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
9688 {
9689         struct i40e_pf *pf = veb->pf;
9690         bool is_default = veb->pf->cur_promisc;
9691         bool is_cloud = false;
9692         int ret;
9693
9694         /* get a VEB from the hardware */
9695         ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
9696                               veb->enabled_tc, is_default,
9697                               is_cloud, &veb->seid, NULL);
9698         if (ret) {
9699                 dev_info(&pf->pdev->dev,
9700                          "couldn't add VEB, err %s aq_err %s\n",
9701                          i40e_stat_str(&pf->hw, ret),
9702                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9703                 return -EPERM;
9704         }
9705
9706         /* get statistics counter */
9707         ret = i40e_aq_get_veb_parameters(&pf->hw, veb->seid, NULL, NULL,
9708                                          &veb->stats_idx, NULL, NULL, NULL);
9709         if (ret) {
9710                 dev_info(&pf->pdev->dev,
9711                          "couldn't get VEB statistics idx, err %s aq_err %s\n",
9712                          i40e_stat_str(&pf->hw, ret),
9713                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9714                 return -EPERM;
9715         }
9716         ret = i40e_veb_get_bw_info(veb);
9717         if (ret) {
9718                 dev_info(&pf->pdev->dev,
9719                          "couldn't get VEB bw info, err %s aq_err %s\n",
9720                          i40e_stat_str(&pf->hw, ret),
9721                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9722                 i40e_aq_delete_element(&pf->hw, veb->seid, NULL);
9723                 return -ENOENT;
9724         }
9725
9726         vsi->uplink_seid = veb->seid;
9727         vsi->veb_idx = veb->idx;
9728         vsi->flags |= I40E_VSI_FLAG_VEB_OWNER;
9729
9730         return 0;
9731 }
9732
9733 /**
9734  * i40e_veb_setup - Set up a VEB
9735  * @pf: board private structure
9736  * @flags: VEB setup flags
9737  * @uplink_seid: the switch element to link to
9738  * @vsi_seid: the initial VSI seid
9739  * @enabled_tc: Enabled TC bit-map
9740  *
9741  * This allocates the sw VEB structure and links it into the switch
9742  * It is possible and legal for this to be a duplicate of an already
9743  * existing VEB.  It is also possible for both uplink and vsi seids
9744  * to be zero, in order to create a floating VEB.
9745  *
9746  * Returns pointer to the successfully allocated VEB sw struct on
9747  * success, otherwise returns NULL on failure.
9748  **/
9749 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags,
9750                                 u16 uplink_seid, u16 vsi_seid,
9751                                 u8 enabled_tc)
9752 {
9753         struct i40e_veb *veb, *uplink_veb = NULL;
9754         int vsi_idx, veb_idx;
9755         int ret;
9756
9757         /* if one seid is 0, the other must be 0 to create a floating relay */
9758         if ((uplink_seid == 0 || vsi_seid == 0) &&
9759             (uplink_seid + vsi_seid != 0)) {
9760                 dev_info(&pf->pdev->dev,
9761                          "one, not both seid's are 0: uplink=%d vsi=%d\n",
9762                          uplink_seid, vsi_seid);
9763                 return NULL;
9764         }
9765
9766         /* make sure there is such a vsi and uplink */
9767         for (vsi_idx = 0; vsi_idx < pf->num_alloc_vsi; vsi_idx++)
9768                 if (pf->vsi[vsi_idx] && pf->vsi[vsi_idx]->seid == vsi_seid)
9769                         break;
9770         if (vsi_idx >= pf->num_alloc_vsi && vsi_seid != 0) {
9771                 dev_info(&pf->pdev->dev, "vsi seid %d not found\n",
9772                          vsi_seid);
9773                 return NULL;
9774         }
9775
9776         if (uplink_seid && uplink_seid != pf->mac_seid) {
9777                 for (veb_idx = 0; veb_idx < I40E_MAX_VEB; veb_idx++) {
9778                         if (pf->veb[veb_idx] &&
9779                             pf->veb[veb_idx]->seid == uplink_seid) {
9780                                 uplink_veb = pf->veb[veb_idx];
9781                                 break;
9782                         }
9783                 }
9784                 if (!uplink_veb) {
9785                         dev_info(&pf->pdev->dev,
9786                                  "uplink seid %d not found\n", uplink_seid);
9787                         return NULL;
9788                 }
9789         }
9790
9791         /* get veb sw struct */
9792         veb_idx = i40e_veb_mem_alloc(pf);
9793         if (veb_idx < 0)
9794                 goto err_alloc;
9795         veb = pf->veb[veb_idx];
9796         veb->flags = flags;
9797         veb->uplink_seid = uplink_seid;
9798         veb->veb_idx = (uplink_veb ? uplink_veb->idx : I40E_NO_VEB);
9799         veb->enabled_tc = (enabled_tc ? enabled_tc : 0x1);
9800
9801         /* create the VEB in the switch */
9802         ret = i40e_add_veb(veb, pf->vsi[vsi_idx]);
9803         if (ret)
9804                 goto err_veb;
9805         if (vsi_idx == pf->lan_vsi)
9806                 pf->lan_veb = veb->idx;
9807
9808         return veb;
9809
9810 err_veb:
9811         i40e_veb_clear(veb);
9812 err_alloc:
9813         return NULL;
9814 }
9815
9816 /**
9817  * i40e_setup_pf_switch_element - set PF vars based on switch type
9818  * @pf: board private structure
9819  * @ele: element we are building info from
9820  * @num_reported: total number of elements
9821  * @printconfig: should we print the contents
9822  *
9823  * helper function to assist in extracting a few useful SEID values.
9824  **/
9825 static void i40e_setup_pf_switch_element(struct i40e_pf *pf,
9826                                 struct i40e_aqc_switch_config_element_resp *ele,
9827                                 u16 num_reported, bool printconfig)
9828 {
9829         u16 downlink_seid = le16_to_cpu(ele->downlink_seid);
9830         u16 uplink_seid = le16_to_cpu(ele->uplink_seid);
9831         u8 element_type = ele->element_type;
9832         u16 seid = le16_to_cpu(ele->seid);
9833
9834         if (printconfig)
9835                 dev_info(&pf->pdev->dev,
9836                          "type=%d seid=%d uplink=%d downlink=%d\n",
9837                          element_type, seid, uplink_seid, downlink_seid);
9838
9839         switch (element_type) {
9840         case I40E_SWITCH_ELEMENT_TYPE_MAC:
9841                 pf->mac_seid = seid;
9842                 break;
9843         case I40E_SWITCH_ELEMENT_TYPE_VEB:
9844                 /* Main VEB? */
9845                 if (uplink_seid != pf->mac_seid)
9846                         break;
9847                 if (pf->lan_veb == I40E_NO_VEB) {
9848                         int v;
9849
9850                         /* find existing or else empty VEB */
9851                         for (v = 0; v < I40E_MAX_VEB; v++) {
9852                                 if (pf->veb[v] && (pf->veb[v]->seid == seid)) {
9853                                         pf->lan_veb = v;
9854                                         break;
9855                                 }
9856                         }
9857                         if (pf->lan_veb == I40E_NO_VEB) {
9858                                 v = i40e_veb_mem_alloc(pf);
9859                                 if (v < 0)
9860                                         break;
9861                                 pf->lan_veb = v;
9862                         }
9863                 }
9864
9865                 pf->veb[pf->lan_veb]->seid = seid;
9866                 pf->veb[pf->lan_veb]->uplink_seid = pf->mac_seid;
9867                 pf->veb[pf->lan_veb]->pf = pf;
9868                 pf->veb[pf->lan_veb]->veb_idx = I40E_NO_VEB;
9869                 break;
9870         case I40E_SWITCH_ELEMENT_TYPE_VSI:
9871                 if (num_reported != 1)
9872                         break;
9873                 /* This is immediately after a reset so we can assume this is
9874                  * the PF's VSI
9875                  */
9876                 pf->mac_seid = uplink_seid;
9877                 pf->pf_seid = downlink_seid;
9878                 pf->main_vsi_seid = seid;
9879                 if (printconfig)
9880                         dev_info(&pf->pdev->dev,
9881                                  "pf_seid=%d main_vsi_seid=%d\n",
9882                                  pf->pf_seid, pf->main_vsi_seid);
9883                 break;
9884         case I40E_SWITCH_ELEMENT_TYPE_PF:
9885         case I40E_SWITCH_ELEMENT_TYPE_VF:
9886         case I40E_SWITCH_ELEMENT_TYPE_EMP:
9887         case I40E_SWITCH_ELEMENT_TYPE_BMC:
9888         case I40E_SWITCH_ELEMENT_TYPE_PE:
9889         case I40E_SWITCH_ELEMENT_TYPE_PA:
9890                 /* ignore these for now */
9891                 break;
9892         default:
9893                 dev_info(&pf->pdev->dev, "unknown element type=%d seid=%d\n",
9894                          element_type, seid);
9895                 break;
9896         }
9897 }
9898
9899 /**
9900  * i40e_fetch_switch_configuration - Get switch config from firmware
9901  * @pf: board private structure
9902  * @printconfig: should we print the contents
9903  *
9904  * Get the current switch configuration from the device and
9905  * extract a few useful SEID values.
9906  **/
9907 int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
9908 {
9909         struct i40e_aqc_get_switch_config_resp *sw_config;
9910         u16 next_seid = 0;
9911         int ret = 0;
9912         u8 *aq_buf;
9913         int i;
9914
9915         aq_buf = kzalloc(I40E_AQ_LARGE_BUF, GFP_KERNEL);
9916         if (!aq_buf)
9917                 return -ENOMEM;
9918
9919         sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
9920         do {
9921                 u16 num_reported, num_total;
9922
9923                 ret = i40e_aq_get_switch_config(&pf->hw, sw_config,
9924                                                 I40E_AQ_LARGE_BUF,
9925                                                 &next_seid, NULL);
9926                 if (ret) {
9927                         dev_info(&pf->pdev->dev,
9928                                  "get switch config failed err %s aq_err %s\n",
9929                                  i40e_stat_str(&pf->hw, ret),
9930                                  i40e_aq_str(&pf->hw,
9931                                              pf->hw.aq.asq_last_status));
9932                         kfree(aq_buf);
9933                         return -ENOENT;
9934                 }
9935
9936                 num_reported = le16_to_cpu(sw_config->header.num_reported);
9937                 num_total = le16_to_cpu(sw_config->header.num_total);
9938
9939                 if (printconfig)
9940                         dev_info(&pf->pdev->dev,
9941                                  "header: %d reported %d total\n",
9942                                  num_reported, num_total);
9943
9944                 for (i = 0; i < num_reported; i++) {
9945                         struct i40e_aqc_switch_config_element_resp *ele =
9946                                 &sw_config->element[i];
9947
9948                         i40e_setup_pf_switch_element(pf, ele, num_reported,
9949                                                      printconfig);
9950                 }
9951         } while (next_seid != 0);
9952
9953         kfree(aq_buf);
9954         return ret;
9955 }
9956
9957 /**
9958  * i40e_setup_pf_switch - Setup the HW switch on startup or after reset
9959  * @pf: board private structure
9960  * @reinit: if the Main VSI needs to re-initialized.
9961  *
9962  * Returns 0 on success, negative value on failure
9963  **/
9964 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
9965 {
9966         int ret;
9967
9968         /* find out what's out there already */
9969         ret = i40e_fetch_switch_configuration(pf, false);
9970         if (ret) {
9971                 dev_info(&pf->pdev->dev,
9972                          "couldn't fetch switch config, err %s aq_err %s\n",
9973                          i40e_stat_str(&pf->hw, ret),
9974                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
9975                 return ret;
9976         }
9977         i40e_pf_reset_stats(pf);
9978
9979         /* first time setup */
9980         if (pf->lan_vsi == I40E_NO_VSI || reinit) {
9981                 struct i40e_vsi *vsi = NULL;
9982                 u16 uplink_seid;
9983
9984                 /* Set up the PF VSI associated with the PF's main VSI
9985                  * that is already in the HW switch
9986                  */
9987                 if (pf->lan_veb != I40E_NO_VEB && pf->veb[pf->lan_veb])
9988                         uplink_seid = pf->veb[pf->lan_veb]->seid;
9989                 else
9990                         uplink_seid = pf->mac_seid;
9991                 if (pf->lan_vsi == I40E_NO_VSI)
9992                         vsi = i40e_vsi_setup(pf, I40E_VSI_MAIN, uplink_seid, 0);
9993                 else if (reinit)
9994                         vsi = i40e_vsi_reinit_setup(pf->vsi[pf->lan_vsi]);
9995                 if (!vsi) {
9996                         dev_info(&pf->pdev->dev, "setup of MAIN VSI failed\n");
9997                         i40e_fdir_teardown(pf);
9998                         return -EAGAIN;
9999                 }
10000         } else {
10001                 /* force a reset of TC and queue layout configurations */
10002                 u8 enabled_tc = pf->vsi[pf->lan_vsi]->tc_config.enabled_tc;
10003
10004                 pf->vsi[pf->lan_vsi]->tc_config.enabled_tc = 0;
10005                 pf->vsi[pf->lan_vsi]->seid = pf->main_vsi_seid;
10006                 i40e_vsi_config_tc(pf->vsi[pf->lan_vsi], enabled_tc);
10007         }
10008         i40e_vlan_stripping_disable(pf->vsi[pf->lan_vsi]);
10009
10010         i40e_fdir_sb_setup(pf);
10011
10012         /* Setup static PF queue filter control settings */
10013         ret = i40e_setup_pf_filter_control(pf);
10014         if (ret) {
10015                 dev_info(&pf->pdev->dev, "setup_pf_filter_control failed: %d\n",
10016                          ret);
10017                 /* Failure here should not stop continuing other steps */
10018         }
10019
10020         /* enable RSS in the HW, even for only one queue, as the stack can use
10021          * the hash
10022          */
10023         if ((pf->flags & I40E_FLAG_RSS_ENABLED))
10024                 i40e_config_rss(pf);
10025
10026         /* fill in link information and enable LSE reporting */
10027         i40e_update_link_info(&pf->hw);
10028         i40e_link_event(pf);
10029
10030         /* Initialize user-specific link properties */
10031         pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
10032                                   I40E_AQ_AN_COMPLETED) ? true : false);
10033
10034         i40e_ptp_init(pf);
10035
10036         return ret;
10037 }
10038
10039 /**
10040  * i40e_determine_queue_usage - Work out queue distribution
10041  * @pf: board private structure
10042  **/
10043 static void i40e_determine_queue_usage(struct i40e_pf *pf)
10044 {
10045         int queues_left;
10046
10047         pf->num_lan_qps = 0;
10048 #ifdef I40E_FCOE
10049         pf->num_fcoe_qps = 0;
10050 #endif
10051
10052         /* Find the max queues to be put into basic use.  We'll always be
10053          * using TC0, whether or not DCB is running, and TC0 will get the
10054          * big RSS set.
10055          */
10056         queues_left = pf->hw.func_caps.num_tx_qp;
10057
10058         if ((queues_left == 1) ||
10059             !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
10060                 /* one qp for PF, no queues for anything else */
10061                 queues_left = 0;
10062                 pf->rss_size = pf->num_lan_qps = 1;
10063
10064                 /* make sure all the fancies are disabled */
10065                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
10066 #ifdef I40E_FCOE
10067                                I40E_FLAG_FCOE_ENABLED   |
10068 #endif
10069                                I40E_FLAG_FD_SB_ENABLED  |
10070                                I40E_FLAG_FD_ATR_ENABLED |
10071                                I40E_FLAG_DCB_CAPABLE    |
10072                                I40E_FLAG_SRIOV_ENABLED  |
10073                                I40E_FLAG_VMDQ_ENABLED);
10074         } else if (!(pf->flags & (I40E_FLAG_RSS_ENABLED |
10075                                   I40E_FLAG_FD_SB_ENABLED |
10076                                   I40E_FLAG_FD_ATR_ENABLED |
10077                                   I40E_FLAG_DCB_CAPABLE))) {
10078                 /* one qp for PF */
10079                 pf->rss_size = pf->num_lan_qps = 1;
10080                 queues_left -= pf->num_lan_qps;
10081
10082                 pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
10083 #ifdef I40E_FCOE
10084                                I40E_FLAG_FCOE_ENABLED   |
10085 #endif
10086                                I40E_FLAG_FD_SB_ENABLED  |
10087                                I40E_FLAG_FD_ATR_ENABLED |
10088                                I40E_FLAG_DCB_ENABLED    |
10089                                I40E_FLAG_VMDQ_ENABLED);
10090         } else {
10091                 /* Not enough queues for all TCs */
10092                 if ((pf->flags & I40E_FLAG_DCB_CAPABLE) &&
10093                     (queues_left < I40E_MAX_TRAFFIC_CLASS)) {
10094                         pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10095                         dev_info(&pf->pdev->dev, "not enough queues for DCB. DCB is disabled.\n");
10096                 }
10097                 pf->num_lan_qps = max_t(int, pf->rss_size_max,
10098                                         num_online_cpus());
10099                 pf->num_lan_qps = min_t(int, pf->num_lan_qps,
10100                                         pf->hw.func_caps.num_tx_qp);
10101
10102                 queues_left -= pf->num_lan_qps;
10103         }
10104
10105 #ifdef I40E_FCOE
10106         if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
10107                 if (I40E_DEFAULT_FCOE <= queues_left) {
10108                         pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
10109                 } else if (I40E_MINIMUM_FCOE <= queues_left) {
10110                         pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
10111                 } else {
10112                         pf->num_fcoe_qps = 0;
10113                         pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
10114                         dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
10115                 }
10116
10117                 queues_left -= pf->num_fcoe_qps;
10118         }
10119
10120 #endif
10121         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10122                 if (queues_left > 1) {
10123                         queues_left -= 1; /* save 1 queue for FD */
10124                 } else {
10125                         pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
10126                         dev_info(&pf->pdev->dev, "not enough queues for Flow Director. Flow Director feature is disabled\n");
10127                 }
10128         }
10129
10130         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10131             pf->num_vf_qps && pf->num_req_vfs && queues_left) {
10132                 pf->num_req_vfs = min_t(int, pf->num_req_vfs,
10133                                         (queues_left / pf->num_vf_qps));
10134                 queues_left -= (pf->num_req_vfs * pf->num_vf_qps);
10135         }
10136
10137         if ((pf->flags & I40E_FLAG_VMDQ_ENABLED) &&
10138             pf->num_vmdq_vsis && pf->num_vmdq_qps && queues_left) {
10139                 pf->num_vmdq_vsis = min_t(int, pf->num_vmdq_vsis,
10140                                           (queues_left / pf->num_vmdq_qps));
10141                 queues_left -= (pf->num_vmdq_vsis * pf->num_vmdq_qps);
10142         }
10143
10144         pf->queues_left = queues_left;
10145         dev_dbg(&pf->pdev->dev,
10146                 "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
10147                 pf->hw.func_caps.num_tx_qp,
10148                 !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
10149                 pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps,
10150                 pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left);
10151 #ifdef I40E_FCOE
10152         dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
10153 #endif
10154 }
10155
10156 /**
10157  * i40e_setup_pf_filter_control - Setup PF static filter control
10158  * @pf: PF to be setup
10159  *
10160  * i40e_setup_pf_filter_control sets up a PF's initial filter control
10161  * settings. If PE/FCoE are enabled then it will also set the per PF
10162  * based filter sizes required for them. It also enables Flow director,
10163  * ethertype and macvlan type filter settings for the pf.
10164  *
10165  * Returns 0 on success, negative on failure
10166  **/
10167 static int i40e_setup_pf_filter_control(struct i40e_pf *pf)
10168 {
10169         struct i40e_filter_control_settings *settings = &pf->filter_settings;
10170
10171         settings->hash_lut_size = I40E_HASH_LUT_SIZE_128;
10172
10173         /* Flow Director is enabled */
10174         if (pf->flags & (I40E_FLAG_FD_SB_ENABLED | I40E_FLAG_FD_ATR_ENABLED))
10175                 settings->enable_fdir = true;
10176
10177         /* Ethtype and MACVLAN filters enabled for PF */
10178         settings->enable_ethtype = true;
10179         settings->enable_macvlan = true;
10180
10181         if (i40e_set_filter_control(&pf->hw, settings))
10182                 return -ENOENT;
10183
10184         return 0;
10185 }
10186
10187 #define INFO_STRING_LEN 255
10188 static void i40e_print_features(struct i40e_pf *pf)
10189 {
10190         struct i40e_hw *hw = &pf->hw;
10191         char *buf, *string;
10192
10193         string = kzalloc(INFO_STRING_LEN, GFP_KERNEL);
10194         if (!string) {
10195                 dev_err(&pf->pdev->dev, "Features string allocation failed\n");
10196                 return;
10197         }
10198
10199         buf = string;
10200
10201         buf += sprintf(string, "Features: PF-id[%d] ", hw->pf_id);
10202 #ifdef CONFIG_PCI_IOV
10203         buf += sprintf(buf, "VFs: %d ", pf->num_req_vfs);
10204 #endif
10205         buf += sprintf(buf, "VSIs: %d QP: %d RX: %s ",
10206                        pf->hw.func_caps.num_vsis,
10207                        pf->vsi[pf->lan_vsi]->num_queue_pairs,
10208                        pf->flags & I40E_FLAG_RX_PS_ENABLED ? "PS" : "1BUF");
10209
10210         if (pf->flags & I40E_FLAG_RSS_ENABLED)
10211                 buf += sprintf(buf, "RSS ");
10212         if (pf->flags & I40E_FLAG_FD_ATR_ENABLED)
10213                 buf += sprintf(buf, "FD_ATR ");
10214         if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
10215                 buf += sprintf(buf, "FD_SB ");
10216                 buf += sprintf(buf, "NTUPLE ");
10217         }
10218         if (pf->flags & I40E_FLAG_DCB_CAPABLE)
10219                 buf += sprintf(buf, "DCB ");
10220 #if IS_ENABLED(CONFIG_VXLAN)
10221         buf += sprintf(buf, "VxLAN ");
10222 #endif
10223         if (pf->flags & I40E_FLAG_PTP)
10224                 buf += sprintf(buf, "PTP ");
10225 #ifdef I40E_FCOE
10226         if (pf->flags & I40E_FLAG_FCOE_ENABLED)
10227                 buf += sprintf(buf, "FCOE ");
10228 #endif
10229         if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
10230                 buf += sprintf(buf, "VEB ");
10231         else
10232                 buf += sprintf(buf, "VEPA ");
10233
10234         BUG_ON(buf > (string + INFO_STRING_LEN));
10235         dev_info(&pf->pdev->dev, "%s\n", string);
10236         kfree(string);
10237 }
10238
10239 /**
10240  * i40e_probe - Device initialization routine
10241  * @pdev: PCI device information struct
10242  * @ent: entry in i40e_pci_tbl
10243  *
10244  * i40e_probe initializes a PF identified by a pci_dev structure.
10245  * The OS initialization, configuring of the PF private structure,
10246  * and a hardware reset occur.
10247  *
10248  * Returns 0 on success, negative on failure
10249  **/
10250 static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
10251 {
10252         struct i40e_aq_get_phy_abilities_resp abilities;
10253         struct i40e_pf *pf;
10254         struct i40e_hw *hw;
10255         static u16 pfs_found;
10256         u16 wol_nvm_bits;
10257         u16 link_status;
10258         int err;
10259         u32 len;
10260         u32 val;
10261         u32 i;
10262         u8 set_fc_aq_fail;
10263
10264         err = pci_enable_device_mem(pdev);
10265         if (err)
10266                 return err;
10267
10268         /* set up for high or low dma */
10269         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
10270         if (err) {
10271                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
10272                 if (err) {
10273                         dev_err(&pdev->dev,
10274                                 "DMA configuration failed: 0x%x\n", err);
10275                         goto err_dma;
10276                 }
10277         }
10278
10279         /* set up pci connections */
10280         err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
10281                                            IORESOURCE_MEM), i40e_driver_name);
10282         if (err) {
10283                 dev_info(&pdev->dev,
10284                          "pci_request_selected_regions failed %d\n", err);
10285                 goto err_pci_reg;
10286         }
10287
10288         pci_enable_pcie_error_reporting(pdev);
10289         pci_set_master(pdev);
10290
10291         /* Now that we have a PCI connection, we need to do the
10292          * low level device setup.  This is primarily setting up
10293          * the Admin Queue structures and then querying for the
10294          * device's current profile information.
10295          */
10296         pf = kzalloc(sizeof(*pf), GFP_KERNEL);
10297         if (!pf) {
10298                 err = -ENOMEM;
10299                 goto err_pf_alloc;
10300         }
10301         pf->next_vsi = 0;
10302         pf->pdev = pdev;
10303         set_bit(__I40E_DOWN, &pf->state);
10304
10305         hw = &pf->hw;
10306         hw->back = pf;
10307
10308         pf->ioremap_len = min_t(int, pci_resource_len(pdev, 0),
10309                                 I40E_MAX_CSR_SPACE);
10310
10311         hw->hw_addr = ioremap(pci_resource_start(pdev, 0), pf->ioremap_len);
10312         if (!hw->hw_addr) {
10313                 err = -EIO;
10314                 dev_info(&pdev->dev, "ioremap(0x%04x, 0x%04x) failed: 0x%x\n",
10315                          (unsigned int)pci_resource_start(pdev, 0),
10316                          pf->ioremap_len, err);
10317                 goto err_ioremap;
10318         }
10319         hw->vendor_id = pdev->vendor;
10320         hw->device_id = pdev->device;
10321         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
10322         hw->subsystem_vendor_id = pdev->subsystem_vendor;
10323         hw->subsystem_device_id = pdev->subsystem_device;
10324         hw->bus.device = PCI_SLOT(pdev->devfn);
10325         hw->bus.func = PCI_FUNC(pdev->devfn);
10326         pf->instance = pfs_found;
10327
10328         if (debug != -1) {
10329                 pf->msg_enable = pf->hw.debug_mask;
10330                 pf->msg_enable = debug;
10331         }
10332
10333         /* do a special CORER for clearing PXE mode once at init */
10334         if (hw->revision_id == 0 &&
10335             (rd32(hw, I40E_GLLAN_RCTL_0) & I40E_GLLAN_RCTL_0_PXE_MODE_MASK)) {
10336                 wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_CORER_MASK);
10337                 i40e_flush(hw);
10338                 msleep(200);
10339                 pf->corer_count++;
10340
10341                 i40e_clear_pxe_mode(hw);
10342         }
10343
10344         /* Reset here to make sure all is clean and to define PF 'n' */
10345         i40e_clear_hw(hw);
10346         err = i40e_pf_reset(hw);
10347         if (err) {
10348                 dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
10349                 goto err_pf_reset;
10350         }
10351         pf->pfr_count++;
10352
10353         hw->aq.num_arq_entries = I40E_AQ_LEN;
10354         hw->aq.num_asq_entries = I40E_AQ_LEN;
10355         hw->aq.arq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10356         hw->aq.asq_buf_size = I40E_MAX_AQ_BUF_SIZE;
10357         pf->adminq_work_limit = I40E_AQ_WORK_LIMIT;
10358
10359         snprintf(pf->int_name, sizeof(pf->int_name) - 1,
10360                  "%s-%s:misc",
10361                  dev_driver_string(&pf->pdev->dev), dev_name(&pdev->dev));
10362
10363         err = i40e_init_shared_code(hw);
10364         if (err) {
10365                 dev_warn(&pdev->dev, "unidentified MAC or BLANK NVM: %d\n",
10366                          err);
10367                 goto err_pf_reset;
10368         }
10369
10370         /* set up a default setting for link flow control */
10371         pf->hw.fc.requested_mode = I40E_FC_NONE;
10372
10373         /* set up the locks for the AQ, do this only once in probe
10374          * and destroy them only once in remove
10375          */
10376         mutex_init(&hw->aq.asq_mutex);
10377         mutex_init(&hw->aq.arq_mutex);
10378
10379         err = i40e_init_adminq(hw);
10380
10381         /* provide nvm, fw, api versions */
10382         dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
10383                  hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
10384                  hw->aq.api_maj_ver, hw->aq.api_min_ver,
10385                  i40e_nvm_version_str(hw));
10386
10387         if (err) {
10388                 dev_info(&pdev->dev,
10389                          "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
10390                 goto err_pf_reset;
10391         }
10392
10393         if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
10394             hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
10395                 dev_info(&pdev->dev,
10396                          "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
10397         else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
10398                  hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
10399                 dev_info(&pdev->dev,
10400                          "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
10401
10402         i40e_verify_eeprom(pf);
10403
10404         /* Rev 0 hardware was never productized */
10405         if (hw->revision_id < 1)
10406                 dev_warn(&pdev->dev, "This device is a pre-production adapter/LOM. Please be aware there may be issues with your hardware. If you are experiencing problems please contact your Intel or hardware representative who provided you with this hardware.\n");
10407
10408         i40e_clear_pxe_mode(hw);
10409         err = i40e_get_capabilities(pf);
10410         if (err)
10411                 goto err_adminq_setup;
10412
10413         err = i40e_sw_init(pf);
10414         if (err) {
10415                 dev_info(&pdev->dev, "sw_init failed: %d\n", err);
10416                 goto err_sw_init;
10417         }
10418
10419         err = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
10420                                 hw->func_caps.num_rx_qp,
10421                                 pf->fcoe_hmc_cntx_num, pf->fcoe_hmc_filt_num);
10422         if (err) {
10423                 dev_info(&pdev->dev, "init_lan_hmc failed: %d\n", err);
10424                 goto err_init_lan_hmc;
10425         }
10426
10427         err = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
10428         if (err) {
10429                 dev_info(&pdev->dev, "configure_lan_hmc failed: %d\n", err);
10430                 err = -ENOENT;
10431                 goto err_configure_lan_hmc;
10432         }
10433
10434         /* Disable LLDP for NICs that have firmware versions lower than v4.3.
10435          * Ignore error return codes because if it was already disabled via
10436          * hardware settings this will fail
10437          */
10438         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
10439             (pf->hw.aq.fw_maj_ver < 4)) {
10440                 dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
10441                 i40e_aq_stop_lldp(hw, true, NULL);
10442         }
10443
10444         i40e_get_mac_addr(hw, hw->mac.addr);
10445         if (!is_valid_ether_addr(hw->mac.addr)) {
10446                 dev_info(&pdev->dev, "invalid MAC address %pM\n", hw->mac.addr);
10447                 err = -EIO;
10448                 goto err_mac_addr;
10449         }
10450         dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
10451         ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
10452         i40e_get_port_mac_addr(hw, hw->mac.port_addr);
10453         if (is_valid_ether_addr(hw->mac.port_addr))
10454                 pf->flags |= I40E_FLAG_PORT_ID_VALID;
10455 #ifdef I40E_FCOE
10456         err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
10457         if (err)
10458                 dev_info(&pdev->dev,
10459                          "(non-fatal) SAN MAC retrieval failed: %d\n", err);
10460         if (!is_valid_ether_addr(hw->mac.san_addr)) {
10461                 dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
10462                          hw->mac.san_addr);
10463                 ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
10464         }
10465         dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
10466 #endif /* I40E_FCOE */
10467
10468         pci_set_drvdata(pdev, pf);
10469         pci_save_state(pdev);
10470 #ifdef CONFIG_I40E_DCB
10471         err = i40e_init_pf_dcb(pf);
10472         if (err) {
10473                 dev_info(&pdev->dev, "DCB init failed %d, disabled\n", err);
10474                 pf->flags &= ~I40E_FLAG_DCB_CAPABLE;
10475                 /* Continue without DCB enabled */
10476         }
10477 #endif /* CONFIG_I40E_DCB */
10478
10479         /* set up periodic task facility */
10480         setup_timer(&pf->service_timer, i40e_service_timer, (unsigned long)pf);
10481         pf->service_timer_period = HZ;
10482
10483         INIT_WORK(&pf->service_task, i40e_service_task);
10484         clear_bit(__I40E_SERVICE_SCHED, &pf->state);
10485         pf->flags |= I40E_FLAG_NEED_LINK_UPDATE;
10486
10487         /* NVM bit on means WoL disabled for the port */
10488         i40e_read_nvm_word(hw, I40E_SR_NVM_WAKE_ON_LAN, &wol_nvm_bits);
10489         if ((1 << hw->port) & wol_nvm_bits || hw->partition_id != 1)
10490                 pf->wol_en = false;
10491         else
10492                 pf->wol_en = true;
10493         device_set_wakeup_enable(&pf->pdev->dev, pf->wol_en);
10494
10495         /* set up the main switch operations */
10496         i40e_determine_queue_usage(pf);
10497         err = i40e_init_interrupt_scheme(pf);
10498         if (err)
10499                 goto err_switch_setup;
10500
10501         /* The number of VSIs reported by the FW is the minimum guaranteed
10502          * to us; HW supports far more and we share the remaining pool with
10503          * the other PFs. We allocate space for more than the guarantee with
10504          * the understanding that we might not get them all later.
10505          */
10506         if (pf->hw.func_caps.num_vsis < I40E_MIN_VSI_ALLOC)
10507                 pf->num_alloc_vsi = I40E_MIN_VSI_ALLOC;
10508         else
10509                 pf->num_alloc_vsi = pf->hw.func_caps.num_vsis;
10510
10511         /* Set up the *vsi struct and our local tracking of the MAIN PF vsi. */
10512         len = sizeof(struct i40e_vsi *) * pf->num_alloc_vsi;
10513         pf->vsi = kzalloc(len, GFP_KERNEL);
10514         if (!pf->vsi) {
10515                 err = -ENOMEM;
10516                 goto err_switch_setup;
10517         }
10518
10519 #ifdef CONFIG_PCI_IOV
10520         /* prep for VF support */
10521         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10522             (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10523             !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10524                 if (pci_num_vf(pdev))
10525                         pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
10526         }
10527 #endif
10528         err = i40e_setup_pf_switch(pf, false);
10529         if (err) {
10530                 dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
10531                 goto err_vsis;
10532         }
10533
10534         /* Make sure flow control is set according to current settings */
10535         err = i40e_set_fc(hw, &set_fc_aq_fail, true);
10536         if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_GET)
10537                 dev_dbg(&pf->pdev->dev,
10538                         "Set fc with err %s aq_err %s on get_phy_cap\n",
10539                         i40e_stat_str(hw, err),
10540                         i40e_aq_str(hw, hw->aq.asq_last_status));
10541         if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_SET)
10542                 dev_dbg(&pf->pdev->dev,
10543                         "Set fc with err %s aq_err %s on set_phy_config\n",
10544                         i40e_stat_str(hw, err),
10545                         i40e_aq_str(hw, hw->aq.asq_last_status));
10546         if (set_fc_aq_fail & I40E_SET_FC_AQ_FAIL_UPDATE)
10547                 dev_dbg(&pf->pdev->dev,
10548                         "Set fc with err %s aq_err %s on get_link_info\n",
10549                         i40e_stat_str(hw, err),
10550                         i40e_aq_str(hw, hw->aq.asq_last_status));
10551
10552         /* if FDIR VSI was set up, start it now */
10553         for (i = 0; i < pf->num_alloc_vsi; i++) {
10554                 if (pf->vsi[i] && pf->vsi[i]->type == I40E_VSI_FDIR) {
10555                         i40e_vsi_open(pf->vsi[i]);
10556                         break;
10557                 }
10558         }
10559
10560         /* driver is only interested in link up/down and module qualification
10561          * reports from firmware
10562          */
10563         err = i40e_aq_set_phy_int_mask(&pf->hw,
10564                                        I40E_AQ_EVENT_LINK_UPDOWN |
10565                                        I40E_AQ_EVENT_MODULE_QUAL_FAIL, NULL);
10566         if (err)
10567                 dev_info(&pf->pdev->dev, "set phy mask fail, err %s aq_err %s\n",
10568                          i40e_stat_str(&pf->hw, err),
10569                          i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10570
10571         /* Reconfigure hardware for allowing smaller MSS in the case
10572          * of TSO, so that we avoid the MDD being fired and causing
10573          * a reset in the case of small MSS+TSO.
10574          */
10575         val = rd32(hw, I40E_REG_MSS);
10576         if ((val & I40E_REG_MSS_MIN_MASK) > I40E_64BYTE_MSS) {
10577                 val &= ~I40E_REG_MSS_MIN_MASK;
10578                 val |= I40E_64BYTE_MSS;
10579                 wr32(hw, I40E_REG_MSS, val);
10580         }
10581
10582         if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
10583             (pf->hw.aq.fw_maj_ver < 4)) {
10584                 msleep(75);
10585                 err = i40e_aq_set_link_restart_an(&pf->hw, true, NULL);
10586                 if (err)
10587                         dev_info(&pf->pdev->dev, "link restart failed, err %s aq_err %s\n",
10588                                  i40e_stat_str(&pf->hw, err),
10589                                  i40e_aq_str(&pf->hw,
10590                                              pf->hw.aq.asq_last_status));
10591         }
10592         /* The main driver is (mostly) up and happy. We need to set this state
10593          * before setting up the misc vector or we get a race and the vector
10594          * ends up disabled forever.
10595          */
10596         clear_bit(__I40E_DOWN, &pf->state);
10597
10598         /* In case of MSIX we are going to setup the misc vector right here
10599          * to handle admin queue events etc. In case of legacy and MSI
10600          * the misc functionality and queue processing is combined in
10601          * the same vector and that gets setup at open.
10602          */
10603         if (pf->flags & I40E_FLAG_MSIX_ENABLED) {
10604                 err = i40e_setup_misc_vector(pf);
10605                 if (err) {
10606                         dev_info(&pdev->dev,
10607                                  "setup of misc vector failed: %d\n", err);
10608                         goto err_vsis;
10609                 }
10610         }
10611
10612 #ifdef CONFIG_PCI_IOV
10613         /* prep for VF support */
10614         if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
10615             (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
10616             !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
10617                 u32 val;
10618
10619                 /* disable link interrupts for VFs */
10620                 val = rd32(hw, I40E_PFGEN_PORTMDIO_NUM);
10621                 val &= ~I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK;
10622                 wr32(hw, I40E_PFGEN_PORTMDIO_NUM, val);
10623                 i40e_flush(hw);
10624
10625                 if (pci_num_vf(pdev)) {
10626                         dev_info(&pdev->dev,
10627                                  "Active VFs found, allocating resources.\n");
10628                         err = i40e_alloc_vfs(pf, pci_num_vf(pdev));
10629                         if (err)
10630                                 dev_info(&pdev->dev,
10631                                          "Error %d allocating resources for existing VFs\n",
10632                                          err);
10633                 }
10634         }
10635 #endif /* CONFIG_PCI_IOV */
10636
10637         pfs_found++;
10638
10639         i40e_dbg_pf_init(pf);
10640
10641         /* tell the firmware that we're starting */
10642         i40e_send_version(pf);
10643
10644         /* since everything's happy, start the service_task timer */
10645         mod_timer(&pf->service_timer,
10646                   round_jiffies(jiffies + pf->service_timer_period));
10647
10648 #ifdef I40E_FCOE
10649         /* create FCoE interface */
10650         i40e_fcoe_vsi_setup(pf);
10651
10652 #endif
10653 #define PCI_SPEED_SIZE 8
10654 #define PCI_WIDTH_SIZE 8
10655         /* Devices on the IOSF bus do not have this information
10656          * and will report PCI Gen 1 x 1 by default so don't bother
10657          * checking them.
10658          */
10659         if (!(pf->flags & I40E_FLAG_NO_PCI_LINK_CHECK)) {
10660                 char speed[PCI_SPEED_SIZE] = "Unknown";
10661                 char width[PCI_WIDTH_SIZE] = "Unknown";
10662
10663                 /* Get the negotiated link width and speed from PCI config
10664                  * space
10665                  */
10666                 pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA,
10667                                           &link_status);
10668
10669                 i40e_set_pci_config_data(hw, link_status);
10670
10671                 switch (hw->bus.speed) {
10672                 case i40e_bus_speed_8000:
10673                         strncpy(speed, "8.0", PCI_SPEED_SIZE); break;
10674                 case i40e_bus_speed_5000:
10675                         strncpy(speed, "5.0", PCI_SPEED_SIZE); break;
10676                 case i40e_bus_speed_2500:
10677                         strncpy(speed, "2.5", PCI_SPEED_SIZE); break;
10678                 default:
10679                         break;
10680                 }
10681                 switch (hw->bus.width) {
10682                 case i40e_bus_width_pcie_x8:
10683                         strncpy(width, "8", PCI_WIDTH_SIZE); break;
10684                 case i40e_bus_width_pcie_x4:
10685                         strncpy(width, "4", PCI_WIDTH_SIZE); break;
10686                 case i40e_bus_width_pcie_x2:
10687                         strncpy(width, "2", PCI_WIDTH_SIZE); break;
10688                 case i40e_bus_width_pcie_x1:
10689                         strncpy(width, "1", PCI_WIDTH_SIZE); break;
10690                 default:
10691                         break;
10692                 }
10693
10694                 dev_info(&pdev->dev, "PCI-Express: Speed %sGT/s Width x%s\n",
10695                          speed, width);
10696
10697                 if (hw->bus.width < i40e_bus_width_pcie_x8 ||
10698                     hw->bus.speed < i40e_bus_speed_8000) {
10699                         dev_warn(&pdev->dev, "PCI-Express bandwidth available for this device may be insufficient for optimal performance.\n");
10700                         dev_warn(&pdev->dev, "Please move the device to a different PCI-e link with more lanes and/or higher transfer rate.\n");
10701                 }
10702         }
10703
10704         /* get the requested speeds from the fw */
10705         err = i40e_aq_get_phy_capabilities(hw, false, false, &abilities, NULL);
10706         if (err)
10707                 dev_dbg(&pf->pdev->dev, "get requested speeds ret =  %s last_status =  %s\n",
10708                         i40e_stat_str(&pf->hw, err),
10709                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10710         pf->hw.phy.link_info.requested_speeds = abilities.link_speed;
10711
10712         /* get the supported phy types from the fw */
10713         err = i40e_aq_get_phy_capabilities(hw, false, true, &abilities, NULL);
10714         if (err)
10715                 dev_dbg(&pf->pdev->dev, "get supported phy types ret =  %s last_status =  %s\n",
10716                         i40e_stat_str(&pf->hw, err),
10717                         i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
10718         pf->hw.phy.phy_types = le32_to_cpu(abilities.phy_type);
10719
10720         /* Add a filter to drop all Flow control frames from any VSI from being
10721          * transmitted. By doing so we stop a malicious VF from sending out
10722          * PAUSE or PFC frames and potentially controlling traffic for other
10723          * PF/VF VSIs.
10724          * The FW can still send Flow control frames if enabled.
10725          */
10726         i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
10727                                                        pf->main_vsi_seid);
10728
10729         /* print a string summarizing features */
10730         i40e_print_features(pf);
10731
10732         return 0;
10733
10734         /* Unwind what we've done if something failed in the setup */
10735 err_vsis:
10736         set_bit(__I40E_DOWN, &pf->state);
10737         i40e_clear_interrupt_scheme(pf);
10738         kfree(pf->vsi);
10739 err_switch_setup:
10740         i40e_reset_interrupt_capability(pf);
10741         del_timer_sync(&pf->service_timer);
10742 err_mac_addr:
10743 err_configure_lan_hmc:
10744         (void)i40e_shutdown_lan_hmc(hw);
10745 err_init_lan_hmc:
10746         kfree(pf->qp_pile);
10747 err_sw_init:
10748 err_adminq_setup:
10749         (void)i40e_shutdown_adminq(hw);
10750 err_pf_reset:
10751         iounmap(hw->hw_addr);
10752 err_ioremap:
10753         kfree(pf);
10754 err_pf_alloc:
10755         pci_disable_pcie_error_reporting(pdev);
10756         pci_release_selected_regions(pdev,
10757                                      pci_select_bars(pdev, IORESOURCE_MEM));
10758 err_pci_reg:
10759 err_dma:
10760         pci_disable_device(pdev);
10761         return err;
10762 }
10763
10764 /**
10765  * i40e_remove - Device removal routine
10766  * @pdev: PCI device information struct
10767  *
10768  * i40e_remove is called by the PCI subsystem to alert the driver
10769  * that is should release a PCI device.  This could be caused by a
10770  * Hot-Plug event, or because the driver is going to be removed from
10771  * memory.
10772  **/
10773 static void i40e_remove(struct pci_dev *pdev)
10774 {
10775         struct i40e_pf *pf = pci_get_drvdata(pdev);
10776         struct i40e_hw *hw = &pf->hw;
10777         i40e_status ret_code;
10778         int i;
10779
10780         i40e_dbg_pf_exit(pf);
10781
10782         i40e_ptp_stop(pf);
10783
10784         /* Disable RSS in hw */
10785         wr32(hw, I40E_PFQF_HENA(0), 0);
10786         wr32(hw, I40E_PFQF_HENA(1), 0);
10787
10788         /* no more scheduling of any task */
10789         set_bit(__I40E_DOWN, &pf->state);
10790         del_timer_sync(&pf->service_timer);
10791         cancel_work_sync(&pf->service_task);
10792
10793         if (pf->flags & I40E_FLAG_SRIOV_ENABLED) {
10794                 i40e_free_vfs(pf);
10795                 pf->flags &= ~I40E_FLAG_SRIOV_ENABLED;
10796         }
10797
10798         i40e_fdir_teardown(pf);
10799
10800         /* If there is a switch structure or any orphans, remove them.
10801          * This will leave only the PF's VSI remaining.
10802          */
10803         for (i = 0; i < I40E_MAX_VEB; i++) {
10804                 if (!pf->veb[i])
10805                         continue;
10806
10807                 if (pf->veb[i]->uplink_seid == pf->mac_seid ||
10808                     pf->veb[i]->uplink_seid == 0)
10809                         i40e_switch_branch_release(pf->veb[i]);
10810         }
10811
10812         /* Now we can shutdown the PF's VSI, just before we kill
10813          * adminq and hmc.
10814          */
10815         if (pf->vsi[pf->lan_vsi])
10816                 i40e_vsi_release(pf->vsi[pf->lan_vsi]);
10817
10818         /* shutdown and destroy the HMC */
10819         if (pf->hw.hmc.hmc_obj) {
10820                 ret_code = i40e_shutdown_lan_hmc(&pf->hw);
10821                 if (ret_code)
10822                         dev_warn(&pdev->dev,
10823                                  "Failed to destroy the HMC resources: %d\n",
10824                                  ret_code);
10825         }
10826
10827         /* shutdown the adminq */
10828         ret_code = i40e_shutdown_adminq(&pf->hw);
10829         if (ret_code)
10830                 dev_warn(&pdev->dev,
10831                          "Failed to destroy the Admin Queue resources: %d\n",
10832                          ret_code);
10833
10834         /* destroy the locks only once, here */
10835         mutex_destroy(&hw->aq.arq_mutex);
10836         mutex_destroy(&hw->aq.asq_mutex);
10837
10838         /* Clear all dynamic memory lists of rings, q_vectors, and VSIs */
10839         rtnl_lock();
10840         i40e_clear_interrupt_scheme(pf);
10841         for (i = 0; i < pf->num_alloc_vsi; i++) {
10842                 if (pf->vsi[i]) {
10843                         i40e_vsi_clear_rings(pf->vsi[i]);
10844                         i40e_vsi_clear(pf->vsi[i]);
10845                         pf->vsi[i] = NULL;
10846                 }
10847         }
10848         rtnl_unlock();
10849
10850         for (i = 0; i < I40E_MAX_VEB; i++) {
10851                 kfree(pf->veb[i]);
10852                 pf->veb[i] = NULL;
10853         }
10854
10855         kfree(pf->qp_pile);
10856         kfree(pf->vsi);
10857
10858         iounmap(pf->hw.hw_addr);
10859         kfree(pf);
10860         pci_release_selected_regions(pdev,
10861                                      pci_select_bars(pdev, IORESOURCE_MEM));
10862
10863         pci_disable_pcie_error_reporting(pdev);
10864         pci_disable_device(pdev);
10865 }
10866
10867 /**
10868  * i40e_pci_error_detected - warning that something funky happened in PCI land
10869  * @pdev: PCI device information struct
10870  *
10871  * Called to warn that something happened and the error handling steps
10872  * are in progress.  Allows the driver to quiesce things, be ready for
10873  * remediation.
10874  **/
10875 static pci_ers_result_t i40e_pci_error_detected(struct pci_dev *pdev,
10876                                                 enum pci_channel_state error)
10877 {
10878         struct i40e_pf *pf = pci_get_drvdata(pdev);
10879
10880         dev_info(&pdev->dev, "%s: error %d\n", __func__, error);
10881
10882         if (!pf) {
10883                 dev_info(&pdev->dev,
10884                          "Cannot recover - error happened during device probe\n");
10885                 return PCI_ERS_RESULT_DISCONNECT;
10886         }
10887
10888         /* shutdown all operations */
10889         if (!test_bit(__I40E_SUSPENDED, &pf->state)) {
10890                 rtnl_lock();
10891                 i40e_prep_for_reset(pf);
10892                 rtnl_unlock();
10893         }
10894
10895         /* Request a slot reset */
10896         return PCI_ERS_RESULT_NEED_RESET;
10897 }
10898
10899 /**
10900  * i40e_pci_error_slot_reset - a PCI slot reset just happened
10901  * @pdev: PCI device information struct
10902  *
10903  * Called to find if the driver can work with the device now that
10904  * the pci slot has been reset.  If a basic connection seems good
10905  * (registers are readable and have sane content) then return a
10906  * happy little PCI_ERS_RESULT_xxx.
10907  **/
10908 static pci_ers_result_t i40e_pci_error_slot_reset(struct pci_dev *pdev)
10909 {
10910         struct i40e_pf *pf = pci_get_drvdata(pdev);
10911         pci_ers_result_t result;
10912         int err;
10913         u32 reg;
10914
10915         dev_dbg(&pdev->dev, "%s\n", __func__);
10916         if (pci_enable_device_mem(pdev)) {
10917                 dev_info(&pdev->dev,
10918                          "Cannot re-enable PCI device after reset.\n");
10919                 result = PCI_ERS_RESULT_DISCONNECT;
10920         } else {
10921                 pci_set_master(pdev);
10922                 pci_restore_state(pdev);
10923                 pci_save_state(pdev);
10924                 pci_wake_from_d3(pdev, false);
10925
10926                 reg = rd32(&pf->hw, I40E_GLGEN_RTRIG);
10927                 if (reg == 0)
10928                         result = PCI_ERS_RESULT_RECOVERED;
10929                 else
10930                         result = PCI_ERS_RESULT_DISCONNECT;
10931         }
10932
10933         err = pci_cleanup_aer_uncorrect_error_status(pdev);
10934         if (err) {
10935                 dev_info(&pdev->dev,
10936                          "pci_cleanup_aer_uncorrect_error_status failed 0x%0x\n",
10937                          err);
10938                 /* non-fatal, continue */
10939         }
10940
10941         return result;
10942 }
10943
10944 /**
10945  * i40e_pci_error_resume - restart operations after PCI error recovery
10946  * @pdev: PCI device information struct
10947  *
10948  * Called to allow the driver to bring things back up after PCI error
10949  * and/or reset recovery has finished.
10950  **/
10951 static void i40e_pci_error_resume(struct pci_dev *pdev)
10952 {
10953         struct i40e_pf *pf = pci_get_drvdata(pdev);
10954
10955         dev_dbg(&pdev->dev, "%s\n", __func__);
10956         if (test_bit(__I40E_SUSPENDED, &pf->state))
10957                 return;
10958
10959         rtnl_lock();
10960         i40e_handle_reset_warning(pf);
10961         rtnl_unlock();
10962 }
10963
10964 /**
10965  * i40e_shutdown - PCI callback for shutting down
10966  * @pdev: PCI device information struct
10967  **/
10968 static void i40e_shutdown(struct pci_dev *pdev)
10969 {
10970         struct i40e_pf *pf = pci_get_drvdata(pdev);
10971         struct i40e_hw *hw = &pf->hw;
10972
10973         set_bit(__I40E_SUSPENDED, &pf->state);
10974         set_bit(__I40E_DOWN, &pf->state);
10975         rtnl_lock();
10976         i40e_prep_for_reset(pf);
10977         rtnl_unlock();
10978
10979         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10980         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10981
10982         del_timer_sync(&pf->service_timer);
10983         cancel_work_sync(&pf->service_task);
10984         i40e_fdir_teardown(pf);
10985
10986         rtnl_lock();
10987         i40e_prep_for_reset(pf);
10988         rtnl_unlock();
10989
10990         wr32(hw, I40E_PFPM_APM,
10991              (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
10992         wr32(hw, I40E_PFPM_WUFC,
10993              (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
10994
10995         /* Since we're going to destroy queues during the
10996          * i40e_clear_interrupt_scheme() we should hold the RTNL lock for this
10997          * whole section
10998          */
10999         rtnl_lock();
11000         i40e_clear_interrupt_scheme(pf);
11001         rtnl_unlock();
11002
11003         if (system_state == SYSTEM_POWER_OFF) {
11004                 pci_wake_from_d3(pdev, pf->wol_en);
11005                 pci_set_power_state(pdev, PCI_D3hot);
11006         }
11007 }
11008
11009 #ifdef CONFIG_PM
11010 /**
11011  * i40e_suspend - PCI callback for moving to D3
11012  * @pdev: PCI device information struct
11013  **/
11014 static int i40e_suspend(struct pci_dev *pdev, pm_message_t state)
11015 {
11016         struct i40e_pf *pf = pci_get_drvdata(pdev);
11017         struct i40e_hw *hw = &pf->hw;
11018
11019         set_bit(__I40E_SUSPENDED, &pf->state);
11020         set_bit(__I40E_DOWN, &pf->state);
11021
11022         rtnl_lock();
11023         i40e_prep_for_reset(pf);
11024         rtnl_unlock();
11025
11026         wr32(hw, I40E_PFPM_APM, (pf->wol_en ? I40E_PFPM_APM_APME_MASK : 0));
11027         wr32(hw, I40E_PFPM_WUFC, (pf->wol_en ? I40E_PFPM_WUFC_MAG_MASK : 0));
11028
11029         pci_wake_from_d3(pdev, pf->wol_en);
11030         pci_set_power_state(pdev, PCI_D3hot);
11031
11032         return 0;
11033 }
11034
11035 /**
11036  * i40e_resume - PCI callback for waking up from D3
11037  * @pdev: PCI device information struct
11038  **/
11039 static int i40e_resume(struct pci_dev *pdev)
11040 {
11041         struct i40e_pf *pf = pci_get_drvdata(pdev);
11042         u32 err;
11043
11044         pci_set_power_state(pdev, PCI_D0);
11045         pci_restore_state(pdev);
11046         /* pci_restore_state() clears dev->state_saves, so
11047          * call pci_save_state() again to restore it.
11048          */
11049         pci_save_state(pdev);
11050
11051         err = pci_enable_device_mem(pdev);
11052         if (err) {
11053                 dev_err(&pdev->dev, "Cannot enable PCI device from suspend\n");
11054                 return err;
11055         }
11056         pci_set_master(pdev);
11057
11058         /* no wakeup events while running */
11059         pci_wake_from_d3(pdev, false);
11060
11061         /* handling the reset will rebuild the device state */
11062         if (test_and_clear_bit(__I40E_SUSPENDED, &pf->state)) {
11063                 clear_bit(__I40E_DOWN, &pf->state);
11064                 rtnl_lock();
11065                 i40e_reset_and_rebuild(pf, false);
11066                 rtnl_unlock();
11067         }
11068
11069         return 0;
11070 }
11071
11072 #endif
11073 static const struct pci_error_handlers i40e_err_handler = {
11074         .error_detected = i40e_pci_error_detected,
11075         .slot_reset = i40e_pci_error_slot_reset,
11076         .resume = i40e_pci_error_resume,
11077 };
11078
11079 static struct pci_driver i40e_driver = {
11080         .name     = i40e_driver_name,
11081         .id_table = i40e_pci_tbl,
11082         .probe    = i40e_probe,
11083         .remove   = i40e_remove,
11084 #ifdef CONFIG_PM
11085         .suspend  = i40e_suspend,
11086         .resume   = i40e_resume,
11087 #endif
11088         .shutdown = i40e_shutdown,
11089         .err_handler = &i40e_err_handler,
11090         .sriov_configure = i40e_pci_sriov_configure,
11091 };
11092
11093 /**
11094  * i40e_init_module - Driver registration routine
11095  *
11096  * i40e_init_module is the first routine called when the driver is
11097  * loaded. All it does is register with the PCI subsystem.
11098  **/
11099 static int __init i40e_init_module(void)
11100 {
11101         pr_info("%s: %s - version %s\n", i40e_driver_name,
11102                 i40e_driver_string, i40e_driver_version_str);
11103         pr_info("%s: %s\n", i40e_driver_name, i40e_copyright);
11104
11105         i40e_dbg_init();
11106         return pci_register_driver(&i40e_driver);
11107 }
11108 module_init(i40e_init_module);
11109
11110 /**
11111  * i40e_exit_module - Driver exit cleanup routine
11112  *
11113  * i40e_exit_module is called just before the driver is removed
11114  * from memory.
11115  **/
11116 static void __exit i40e_exit_module(void)
11117 {
11118         pci_unregister_driver(&i40e_driver);
11119         i40e_dbg_exit();
11120 }
11121 module_exit(i40e_exit_module);