GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / net / ethernet / intel / iavf / iavf_main.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2013 - 2018 Intel Corporation. */
3
4 #include "iavf.h"
5 #include "iavf_prototype.h"
6 #include "iavf_client.h"
7 /* All iavf tracepoints are defined by the include below, which must
8  * be included exactly once across the whole kernel with
9  * CREATE_TRACE_POINTS defined
10  */
11 #define CREATE_TRACE_POINTS
12 #include "iavf_trace.h"
13
14 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter);
15 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter);
16 static int iavf_close(struct net_device *netdev);
17 static int iavf_init_get_resources(struct iavf_adapter *adapter);
18 static int iavf_check_reset_complete(struct iavf_hw *hw);
19
20 char iavf_driver_name[] = "iavf";
21 static const char iavf_driver_string[] =
22         "Intel(R) Ethernet Adaptive Virtual Function Network Driver";
23
24 static const char iavf_copyright[] =
25         "Copyright (c) 2013 - 2018 Intel Corporation.";
26
27 /* iavf_pci_tbl - PCI Device ID Table
28  *
29  * Wildcard entries (PCI_ANY_ID) should come last
30  * Last entry must be all 0s
31  *
32  * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
33  *   Class, Class Mask, private data (not used) }
34  */
35 static const struct pci_device_id iavf_pci_tbl[] = {
36         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF), 0},
37         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_VF_HV), 0},
38         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_X722_VF), 0},
39         {PCI_VDEVICE(INTEL, IAVF_DEV_ID_ADAPTIVE_VF), 0},
40         /* required last entry */
41         {0, }
42 };
43
44 MODULE_DEVICE_TABLE(pci, iavf_pci_tbl);
45
46 MODULE_ALIAS("i40evf");
47 MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
48 MODULE_DESCRIPTION("Intel(R) Ethernet Adaptive Virtual Function Network Driver");
49 MODULE_LICENSE("GPL v2");
50
51 static const struct net_device_ops iavf_netdev_ops;
52 struct workqueue_struct *iavf_wq;
53
54 /**
55  * iavf_allocate_dma_mem_d - OS specific memory alloc for shared code
56  * @hw:   pointer to the HW structure
57  * @mem:  ptr to mem struct to fill out
58  * @size: size of memory requested
59  * @alignment: what to align the allocation to
60  **/
61 enum iavf_status iavf_allocate_dma_mem_d(struct iavf_hw *hw,
62                                          struct iavf_dma_mem *mem,
63                                          u64 size, u32 alignment)
64 {
65         struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
66
67         if (!mem)
68                 return IAVF_ERR_PARAM;
69
70         mem->size = ALIGN(size, alignment);
71         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size,
72                                      (dma_addr_t *)&mem->pa, GFP_KERNEL);
73         if (mem->va)
74                 return 0;
75         else
76                 return IAVF_ERR_NO_MEMORY;
77 }
78
79 /**
80  * iavf_free_dma_mem_d - OS specific memory free for shared code
81  * @hw:   pointer to the HW structure
82  * @mem:  ptr to mem struct to free
83  **/
84 enum iavf_status iavf_free_dma_mem_d(struct iavf_hw *hw,
85                                      struct iavf_dma_mem *mem)
86 {
87         struct iavf_adapter *adapter = (struct iavf_adapter *)hw->back;
88
89         if (!mem || !mem->va)
90                 return IAVF_ERR_PARAM;
91         dma_free_coherent(&adapter->pdev->dev, mem->size,
92                           mem->va, (dma_addr_t)mem->pa);
93         return 0;
94 }
95
96 /**
97  * iavf_allocate_virt_mem_d - OS specific memory alloc for shared code
98  * @hw:   pointer to the HW structure
99  * @mem:  ptr to mem struct to fill out
100  * @size: size of memory requested
101  **/
102 enum iavf_status iavf_allocate_virt_mem_d(struct iavf_hw *hw,
103                                           struct iavf_virt_mem *mem, u32 size)
104 {
105         if (!mem)
106                 return IAVF_ERR_PARAM;
107
108         mem->size = size;
109         mem->va = kzalloc(size, GFP_KERNEL);
110
111         if (mem->va)
112                 return 0;
113         else
114                 return IAVF_ERR_NO_MEMORY;
115 }
116
117 /**
118  * iavf_free_virt_mem_d - OS specific memory free for shared code
119  * @hw:   pointer to the HW structure
120  * @mem:  ptr to mem struct to free
121  **/
122 enum iavf_status iavf_free_virt_mem_d(struct iavf_hw *hw,
123                                       struct iavf_virt_mem *mem)
124 {
125         if (!mem)
126                 return IAVF_ERR_PARAM;
127
128         /* it's ok to kfree a NULL pointer */
129         kfree(mem->va);
130
131         return 0;
132 }
133
134 /**
135  * iavf_lock_timeout - try to set bit but give up after timeout
136  * @adapter: board private structure
137  * @bit: bit to set
138  * @msecs: timeout in msecs
139  *
140  * Returns 0 on success, negative on failure
141  **/
142 static int iavf_lock_timeout(struct iavf_adapter *adapter,
143                              enum iavf_critical_section_t bit,
144                              unsigned int msecs)
145 {
146         unsigned int wait, delay = 10;
147
148         for (wait = 0; wait < msecs; wait += delay) {
149                 if (!test_and_set_bit(bit, &adapter->crit_section))
150                         return 0;
151
152                 msleep(delay);
153         }
154
155         return -1;
156 }
157
158 /**
159  * iavf_schedule_reset - Set the flags and schedule a reset event
160  * @adapter: board private structure
161  **/
162 void iavf_schedule_reset(struct iavf_adapter *adapter)
163 {
164         if (!(adapter->flags &
165               (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED))) {
166                 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
167                 queue_work(iavf_wq, &adapter->reset_task);
168         }
169 }
170
171 /**
172  * iavf_tx_timeout - Respond to a Tx Hang
173  * @netdev: network interface device structure
174  * @txqueue: queue number that is timing out
175  **/
176 static void iavf_tx_timeout(struct net_device *netdev, unsigned int txqueue)
177 {
178         struct iavf_adapter *adapter = netdev_priv(netdev);
179
180         adapter->tx_timeout_count++;
181         iavf_schedule_reset(adapter);
182 }
183
184 /**
185  * iavf_misc_irq_disable - Mask off interrupt generation on the NIC
186  * @adapter: board private structure
187  **/
188 static void iavf_misc_irq_disable(struct iavf_adapter *adapter)
189 {
190         struct iavf_hw *hw = &adapter->hw;
191
192         if (!adapter->msix_entries)
193                 return;
194
195         wr32(hw, IAVF_VFINT_DYN_CTL01, 0);
196
197         iavf_flush(hw);
198
199         synchronize_irq(adapter->msix_entries[0].vector);
200 }
201
202 /**
203  * iavf_misc_irq_enable - Enable default interrupt generation settings
204  * @adapter: board private structure
205  **/
206 static void iavf_misc_irq_enable(struct iavf_adapter *adapter)
207 {
208         struct iavf_hw *hw = &adapter->hw;
209
210         wr32(hw, IAVF_VFINT_DYN_CTL01, IAVF_VFINT_DYN_CTL01_INTENA_MASK |
211                                        IAVF_VFINT_DYN_CTL01_ITR_INDX_MASK);
212         wr32(hw, IAVF_VFINT_ICR0_ENA1, IAVF_VFINT_ICR0_ENA1_ADMINQ_MASK);
213
214         iavf_flush(hw);
215 }
216
217 /**
218  * iavf_irq_disable - Mask off interrupt generation on the NIC
219  * @adapter: board private structure
220  **/
221 static void iavf_irq_disable(struct iavf_adapter *adapter)
222 {
223         int i;
224         struct iavf_hw *hw = &adapter->hw;
225
226         if (!adapter->msix_entries)
227                 return;
228
229         for (i = 1; i < adapter->num_msix_vectors; i++) {
230                 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1), 0);
231                 synchronize_irq(adapter->msix_entries[i].vector);
232         }
233         iavf_flush(hw);
234 }
235
236 /**
237  * iavf_irq_enable_queues - Enable interrupt for all queues
238  * @adapter: board private structure
239  **/
240 void iavf_irq_enable_queues(struct iavf_adapter *adapter)
241 {
242         struct iavf_hw *hw = &adapter->hw;
243         int i;
244
245         for (i = 1; i < adapter->num_msix_vectors; i++) {
246                 wr32(hw, IAVF_VFINT_DYN_CTLN1(i - 1),
247                      IAVF_VFINT_DYN_CTLN1_INTENA_MASK |
248                      IAVF_VFINT_DYN_CTLN1_ITR_INDX_MASK);
249         }
250 }
251
252 /**
253  * iavf_irq_enable - Enable default interrupt generation settings
254  * @adapter: board private structure
255  * @flush: boolean value whether to run rd32()
256  **/
257 void iavf_irq_enable(struct iavf_adapter *adapter, bool flush)
258 {
259         struct iavf_hw *hw = &adapter->hw;
260
261         iavf_misc_irq_enable(adapter);
262         iavf_irq_enable_queues(adapter);
263
264         if (flush)
265                 iavf_flush(hw);
266 }
267
268 /**
269  * iavf_msix_aq - Interrupt handler for vector 0
270  * @irq: interrupt number
271  * @data: pointer to netdev
272  **/
273 static irqreturn_t iavf_msix_aq(int irq, void *data)
274 {
275         struct net_device *netdev = data;
276         struct iavf_adapter *adapter = netdev_priv(netdev);
277         struct iavf_hw *hw = &adapter->hw;
278
279         /* handle non-queue interrupts, these reads clear the registers */
280         rd32(hw, IAVF_VFINT_ICR01);
281         rd32(hw, IAVF_VFINT_ICR0_ENA1);
282
283         /* schedule work on the private workqueue */
284         queue_work(iavf_wq, &adapter->adminq_task);
285
286         return IRQ_HANDLED;
287 }
288
289 /**
290  * iavf_msix_clean_rings - MSIX mode Interrupt Handler
291  * @irq: interrupt number
292  * @data: pointer to a q_vector
293  **/
294 static irqreturn_t iavf_msix_clean_rings(int irq, void *data)
295 {
296         struct iavf_q_vector *q_vector = data;
297
298         if (!q_vector->tx.ring && !q_vector->rx.ring)
299                 return IRQ_HANDLED;
300
301         napi_schedule_irqoff(&q_vector->napi);
302
303         return IRQ_HANDLED;
304 }
305
306 /**
307  * iavf_map_vector_to_rxq - associate irqs with rx queues
308  * @adapter: board private structure
309  * @v_idx: interrupt number
310  * @r_idx: queue number
311  **/
312 static void
313 iavf_map_vector_to_rxq(struct iavf_adapter *adapter, int v_idx, int r_idx)
314 {
315         struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
316         struct iavf_ring *rx_ring = &adapter->rx_rings[r_idx];
317         struct iavf_hw *hw = &adapter->hw;
318
319         rx_ring->q_vector = q_vector;
320         rx_ring->next = q_vector->rx.ring;
321         rx_ring->vsi = &adapter->vsi;
322         q_vector->rx.ring = rx_ring;
323         q_vector->rx.count++;
324         q_vector->rx.next_update = jiffies + 1;
325         q_vector->rx.target_itr = ITR_TO_REG(rx_ring->itr_setting);
326         q_vector->ring_mask |= BIT(r_idx);
327         wr32(hw, IAVF_VFINT_ITRN1(IAVF_RX_ITR, q_vector->reg_idx),
328              q_vector->rx.current_itr >> 1);
329         q_vector->rx.current_itr = q_vector->rx.target_itr;
330 }
331
332 /**
333  * iavf_map_vector_to_txq - associate irqs with tx queues
334  * @adapter: board private structure
335  * @v_idx: interrupt number
336  * @t_idx: queue number
337  **/
338 static void
339 iavf_map_vector_to_txq(struct iavf_adapter *adapter, int v_idx, int t_idx)
340 {
341         struct iavf_q_vector *q_vector = &adapter->q_vectors[v_idx];
342         struct iavf_ring *tx_ring = &adapter->tx_rings[t_idx];
343         struct iavf_hw *hw = &adapter->hw;
344
345         tx_ring->q_vector = q_vector;
346         tx_ring->next = q_vector->tx.ring;
347         tx_ring->vsi = &adapter->vsi;
348         q_vector->tx.ring = tx_ring;
349         q_vector->tx.count++;
350         q_vector->tx.next_update = jiffies + 1;
351         q_vector->tx.target_itr = ITR_TO_REG(tx_ring->itr_setting);
352         q_vector->num_ringpairs++;
353         wr32(hw, IAVF_VFINT_ITRN1(IAVF_TX_ITR, q_vector->reg_idx),
354              q_vector->tx.target_itr >> 1);
355         q_vector->tx.current_itr = q_vector->tx.target_itr;
356 }
357
358 /**
359  * iavf_map_rings_to_vectors - Maps descriptor rings to vectors
360  * @adapter: board private structure to initialize
361  *
362  * This function maps descriptor rings to the queue-specific vectors
363  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
364  * one vector per ring/queue, but on a constrained vector budget, we
365  * group the rings as "efficiently" as possible.  You would add new
366  * mapping configurations in here.
367  **/
368 static void iavf_map_rings_to_vectors(struct iavf_adapter *adapter)
369 {
370         int rings_remaining = adapter->num_active_queues;
371         int ridx = 0, vidx = 0;
372         int q_vectors;
373
374         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
375
376         for (; ridx < rings_remaining; ridx++) {
377                 iavf_map_vector_to_rxq(adapter, vidx, ridx);
378                 iavf_map_vector_to_txq(adapter, vidx, ridx);
379
380                 /* In the case where we have more queues than vectors, continue
381                  * round-robin on vectors until all queues are mapped.
382                  */
383                 if (++vidx >= q_vectors)
384                         vidx = 0;
385         }
386
387         adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
388 }
389
390 /**
391  * iavf_irq_affinity_notify - Callback for affinity changes
392  * @notify: context as to what irq was changed
393  * @mask: the new affinity mask
394  *
395  * This is a callback function used by the irq_set_affinity_notifier function
396  * so that we may register to receive changes to the irq affinity masks.
397  **/
398 static void iavf_irq_affinity_notify(struct irq_affinity_notify *notify,
399                                      const cpumask_t *mask)
400 {
401         struct iavf_q_vector *q_vector =
402                 container_of(notify, struct iavf_q_vector, affinity_notify);
403
404         cpumask_copy(&q_vector->affinity_mask, mask);
405 }
406
407 /**
408  * iavf_irq_affinity_release - Callback for affinity notifier release
409  * @ref: internal core kernel usage
410  *
411  * This is a callback function used by the irq_set_affinity_notifier function
412  * to inform the current notification subscriber that they will no longer
413  * receive notifications.
414  **/
415 static void iavf_irq_affinity_release(struct kref *ref) {}
416
417 /**
418  * iavf_request_traffic_irqs - Initialize MSI-X interrupts
419  * @adapter: board private structure
420  * @basename: device basename
421  *
422  * Allocates MSI-X vectors for tx and rx handling, and requests
423  * interrupts from the kernel.
424  **/
425 static int
426 iavf_request_traffic_irqs(struct iavf_adapter *adapter, char *basename)
427 {
428         unsigned int vector, q_vectors;
429         unsigned int rx_int_idx = 0, tx_int_idx = 0;
430         int irq_num, err;
431         int cpu;
432
433         iavf_irq_disable(adapter);
434         /* Decrement for Other and TCP Timer vectors */
435         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
436
437         for (vector = 0; vector < q_vectors; vector++) {
438                 struct iavf_q_vector *q_vector = &adapter->q_vectors[vector];
439
440                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
441
442                 if (q_vector->tx.ring && q_vector->rx.ring) {
443                         snprintf(q_vector->name, sizeof(q_vector->name),
444                                  "iavf-%s-TxRx-%d", basename, rx_int_idx++);
445                         tx_int_idx++;
446                 } else if (q_vector->rx.ring) {
447                         snprintf(q_vector->name, sizeof(q_vector->name),
448                                  "iavf-%s-rx-%d", basename, rx_int_idx++);
449                 } else if (q_vector->tx.ring) {
450                         snprintf(q_vector->name, sizeof(q_vector->name),
451                                  "iavf-%s-tx-%d", basename, tx_int_idx++);
452                 } else {
453                         /* skip this unused q_vector */
454                         continue;
455                 }
456                 err = request_irq(irq_num,
457                                   iavf_msix_clean_rings,
458                                   0,
459                                   q_vector->name,
460                                   q_vector);
461                 if (err) {
462                         dev_info(&adapter->pdev->dev,
463                                  "Request_irq failed, error: %d\n", err);
464                         goto free_queue_irqs;
465                 }
466                 /* register for affinity change notifications */
467                 q_vector->affinity_notify.notify = iavf_irq_affinity_notify;
468                 q_vector->affinity_notify.release =
469                                                    iavf_irq_affinity_release;
470                 irq_set_affinity_notifier(irq_num, &q_vector->affinity_notify);
471                 /* Spread the IRQ affinity hints across online CPUs. Note that
472                  * get_cpu_mask returns a mask with a permanent lifetime so
473                  * it's safe to use as a hint for irq_set_affinity_hint.
474                  */
475                 cpu = cpumask_local_spread(q_vector->v_idx, -1);
476                 irq_set_affinity_hint(irq_num, get_cpu_mask(cpu));
477         }
478
479         return 0;
480
481 free_queue_irqs:
482         while (vector) {
483                 vector--;
484                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
485                 irq_set_affinity_notifier(irq_num, NULL);
486                 irq_set_affinity_hint(irq_num, NULL);
487                 free_irq(irq_num, &adapter->q_vectors[vector]);
488         }
489         return err;
490 }
491
492 /**
493  * iavf_request_misc_irq - Initialize MSI-X interrupts
494  * @adapter: board private structure
495  *
496  * Allocates MSI-X vector 0 and requests interrupts from the kernel. This
497  * vector is only for the admin queue, and stays active even when the netdev
498  * is closed.
499  **/
500 static int iavf_request_misc_irq(struct iavf_adapter *adapter)
501 {
502         struct net_device *netdev = adapter->netdev;
503         int err;
504
505         snprintf(adapter->misc_vector_name,
506                  sizeof(adapter->misc_vector_name) - 1, "iavf-%s:mbx",
507                  dev_name(&adapter->pdev->dev));
508         err = request_irq(adapter->msix_entries[0].vector,
509                           &iavf_msix_aq, 0,
510                           adapter->misc_vector_name, netdev);
511         if (err) {
512                 dev_err(&adapter->pdev->dev,
513                         "request_irq for %s failed: %d\n",
514                         adapter->misc_vector_name, err);
515                 free_irq(adapter->msix_entries[0].vector, netdev);
516         }
517         return err;
518 }
519
520 /**
521  * iavf_free_traffic_irqs - Free MSI-X interrupts
522  * @adapter: board private structure
523  *
524  * Frees all MSI-X vectors other than 0.
525  **/
526 static void iavf_free_traffic_irqs(struct iavf_adapter *adapter)
527 {
528         int vector, irq_num, q_vectors;
529
530         if (!adapter->msix_entries)
531                 return;
532
533         q_vectors = adapter->num_msix_vectors - NONQ_VECS;
534
535         for (vector = 0; vector < q_vectors; vector++) {
536                 irq_num = adapter->msix_entries[vector + NONQ_VECS].vector;
537                 irq_set_affinity_notifier(irq_num, NULL);
538                 irq_set_affinity_hint(irq_num, NULL);
539                 free_irq(irq_num, &adapter->q_vectors[vector]);
540         }
541 }
542
543 /**
544  * iavf_free_misc_irq - Free MSI-X miscellaneous vector
545  * @adapter: board private structure
546  *
547  * Frees MSI-X vector 0.
548  **/
549 static void iavf_free_misc_irq(struct iavf_adapter *adapter)
550 {
551         struct net_device *netdev = adapter->netdev;
552
553         if (!adapter->msix_entries)
554                 return;
555
556         free_irq(adapter->msix_entries[0].vector, netdev);
557 }
558
559 /**
560  * iavf_configure_tx - Configure Transmit Unit after Reset
561  * @adapter: board private structure
562  *
563  * Configure the Tx unit of the MAC after a reset.
564  **/
565 static void iavf_configure_tx(struct iavf_adapter *adapter)
566 {
567         struct iavf_hw *hw = &adapter->hw;
568         int i;
569
570         for (i = 0; i < adapter->num_active_queues; i++)
571                 adapter->tx_rings[i].tail = hw->hw_addr + IAVF_QTX_TAIL1(i);
572 }
573
574 /**
575  * iavf_configure_rx - Configure Receive Unit after Reset
576  * @adapter: board private structure
577  *
578  * Configure the Rx unit of the MAC after a reset.
579  **/
580 static void iavf_configure_rx(struct iavf_adapter *adapter)
581 {
582         unsigned int rx_buf_len = IAVF_RXBUFFER_2048;
583         struct iavf_hw *hw = &adapter->hw;
584         int i;
585
586         /* Legacy Rx will always default to a 2048 buffer size. */
587 #if (PAGE_SIZE < 8192)
588         if (!(adapter->flags & IAVF_FLAG_LEGACY_RX)) {
589                 struct net_device *netdev = adapter->netdev;
590
591                 /* For jumbo frames on systems with 4K pages we have to use
592                  * an order 1 page, so we might as well increase the size
593                  * of our Rx buffer to make better use of the available space
594                  */
595                 rx_buf_len = IAVF_RXBUFFER_3072;
596
597                 /* We use a 1536 buffer size for configurations with
598                  * standard Ethernet mtu.  On x86 this gives us enough room
599                  * for shared info and 192 bytes of padding.
600                  */
601                 if (!IAVF_2K_TOO_SMALL_WITH_PADDING &&
602                     (netdev->mtu <= ETH_DATA_LEN))
603                         rx_buf_len = IAVF_RXBUFFER_1536 - NET_IP_ALIGN;
604         }
605 #endif
606
607         for (i = 0; i < adapter->num_active_queues; i++) {
608                 adapter->rx_rings[i].tail = hw->hw_addr + IAVF_QRX_TAIL1(i);
609                 adapter->rx_rings[i].rx_buf_len = rx_buf_len;
610
611                 if (adapter->flags & IAVF_FLAG_LEGACY_RX)
612                         clear_ring_build_skb_enabled(&adapter->rx_rings[i]);
613                 else
614                         set_ring_build_skb_enabled(&adapter->rx_rings[i]);
615         }
616 }
617
618 /**
619  * iavf_find_vlan - Search filter list for specific vlan filter
620  * @adapter: board private structure
621  * @vlan: vlan tag
622  *
623  * Returns ptr to the filter object or NULL. Must be called while holding the
624  * mac_vlan_list_lock.
625  **/
626 static struct
627 iavf_vlan_filter *iavf_find_vlan(struct iavf_adapter *adapter, u16 vlan)
628 {
629         struct iavf_vlan_filter *f;
630
631         list_for_each_entry(f, &adapter->vlan_filter_list, list) {
632                 if (vlan == f->vlan)
633                         return f;
634         }
635         return NULL;
636 }
637
638 /**
639  * iavf_add_vlan - Add a vlan filter to the list
640  * @adapter: board private structure
641  * @vlan: VLAN tag
642  *
643  * Returns ptr to the filter object or NULL when no memory available.
644  **/
645 static struct
646 iavf_vlan_filter *iavf_add_vlan(struct iavf_adapter *adapter, u16 vlan)
647 {
648         struct iavf_vlan_filter *f = NULL;
649
650         spin_lock_bh(&adapter->mac_vlan_list_lock);
651
652         f = iavf_find_vlan(adapter, vlan);
653         if (!f) {
654                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
655                 if (!f)
656                         goto clearout;
657
658                 f->vlan = vlan;
659
660                 list_add_tail(&f->list, &adapter->vlan_filter_list);
661                 f->add = true;
662                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
663         }
664
665 clearout:
666         spin_unlock_bh(&adapter->mac_vlan_list_lock);
667         return f;
668 }
669
670 /**
671  * iavf_del_vlan - Remove a vlan filter from the list
672  * @adapter: board private structure
673  * @vlan: VLAN tag
674  **/
675 static void iavf_del_vlan(struct iavf_adapter *adapter, u16 vlan)
676 {
677         struct iavf_vlan_filter *f;
678
679         spin_lock_bh(&adapter->mac_vlan_list_lock);
680
681         f = iavf_find_vlan(adapter, vlan);
682         if (f) {
683                 f->remove = true;
684                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
685         }
686
687         spin_unlock_bh(&adapter->mac_vlan_list_lock);
688 }
689
690 /**
691  * iavf_vlan_rx_add_vid - Add a VLAN filter to a device
692  * @netdev: network device struct
693  * @proto: unused protocol data
694  * @vid: VLAN tag
695  **/
696 static int iavf_vlan_rx_add_vid(struct net_device *netdev,
697                                 __always_unused __be16 proto, u16 vid)
698 {
699         struct iavf_adapter *adapter = netdev_priv(netdev);
700
701         if (!VLAN_ALLOWED(adapter))
702                 return -EIO;
703         if (iavf_add_vlan(adapter, vid) == NULL)
704                 return -ENOMEM;
705         return 0;
706 }
707
708 /**
709  * iavf_vlan_rx_kill_vid - Remove a VLAN filter from a device
710  * @netdev: network device struct
711  * @proto: unused protocol data
712  * @vid: VLAN tag
713  **/
714 static int iavf_vlan_rx_kill_vid(struct net_device *netdev,
715                                  __always_unused __be16 proto, u16 vid)
716 {
717         struct iavf_adapter *adapter = netdev_priv(netdev);
718
719         if (VLAN_ALLOWED(adapter)) {
720                 iavf_del_vlan(adapter, vid);
721                 return 0;
722         }
723         return -EIO;
724 }
725
726 /**
727  * iavf_find_filter - Search filter list for specific mac filter
728  * @adapter: board private structure
729  * @macaddr: the MAC address
730  *
731  * Returns ptr to the filter object or NULL. Must be called while holding the
732  * mac_vlan_list_lock.
733  **/
734 static struct
735 iavf_mac_filter *iavf_find_filter(struct iavf_adapter *adapter,
736                                   const u8 *macaddr)
737 {
738         struct iavf_mac_filter *f;
739
740         if (!macaddr)
741                 return NULL;
742
743         list_for_each_entry(f, &adapter->mac_filter_list, list) {
744                 if (ether_addr_equal(macaddr, f->macaddr))
745                         return f;
746         }
747         return NULL;
748 }
749
750 /**
751  * iavf_add_filter - Add a mac filter to the filter list
752  * @adapter: board private structure
753  * @macaddr: the MAC address
754  *
755  * Returns ptr to the filter object or NULL when no memory available.
756  **/
757 struct iavf_mac_filter *iavf_add_filter(struct iavf_adapter *adapter,
758                                         const u8 *macaddr)
759 {
760         struct iavf_mac_filter *f;
761
762         if (!macaddr)
763                 return NULL;
764
765         f = iavf_find_filter(adapter, macaddr);
766         if (!f) {
767                 f = kzalloc(sizeof(*f), GFP_ATOMIC);
768                 if (!f)
769                         return f;
770
771                 ether_addr_copy(f->macaddr, macaddr);
772
773                 list_add_tail(&f->list, &adapter->mac_filter_list);
774                 f->add = true;
775                 f->is_new_mac = true;
776                 adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
777         } else {
778                 f->remove = false;
779         }
780
781         return f;
782 }
783
784 /**
785  * iavf_set_mac - NDO callback to set port mac address
786  * @netdev: network interface device structure
787  * @p: pointer to an address structure
788  *
789  * Returns 0 on success, negative on failure
790  **/
791 static int iavf_set_mac(struct net_device *netdev, void *p)
792 {
793         struct iavf_adapter *adapter = netdev_priv(netdev);
794         struct iavf_hw *hw = &adapter->hw;
795         struct iavf_mac_filter *f;
796         struct sockaddr *addr = p;
797
798         if (!is_valid_ether_addr(addr->sa_data))
799                 return -EADDRNOTAVAIL;
800
801         if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
802                 return 0;
803
804         spin_lock_bh(&adapter->mac_vlan_list_lock);
805
806         f = iavf_find_filter(adapter, hw->mac.addr);
807         if (f) {
808                 f->remove = true;
809                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
810         }
811
812         f = iavf_add_filter(adapter, addr->sa_data);
813
814         spin_unlock_bh(&adapter->mac_vlan_list_lock);
815
816         if (f) {
817                 ether_addr_copy(hw->mac.addr, addr->sa_data);
818         }
819
820         return (f == NULL) ? -ENOMEM : 0;
821 }
822
823 /**
824  * iavf_addr_sync - Callback for dev_(mc|uc)_sync to add address
825  * @netdev: the netdevice
826  * @addr: address to add
827  *
828  * Called by __dev_(mc|uc)_sync when an address needs to be added. We call
829  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
830  */
831 static int iavf_addr_sync(struct net_device *netdev, const u8 *addr)
832 {
833         struct iavf_adapter *adapter = netdev_priv(netdev);
834
835         if (iavf_add_filter(adapter, addr))
836                 return 0;
837         else
838                 return -ENOMEM;
839 }
840
841 /**
842  * iavf_addr_unsync - Callback for dev_(mc|uc)_sync to remove address
843  * @netdev: the netdevice
844  * @addr: address to add
845  *
846  * Called by __dev_(mc|uc)_sync when an address needs to be removed. We call
847  * __dev_(uc|mc)_sync from .set_rx_mode and guarantee to hold the hash lock.
848  */
849 static int iavf_addr_unsync(struct net_device *netdev, const u8 *addr)
850 {
851         struct iavf_adapter *adapter = netdev_priv(netdev);
852         struct iavf_mac_filter *f;
853
854         /* Under some circumstances, we might receive a request to delete
855          * our own device address from our uc list. Because we store the
856          * device address in the VSI's MAC/VLAN filter list, we need to ignore
857          * such requests and not delete our device address from this list.
858          */
859         if (ether_addr_equal(addr, netdev->dev_addr))
860                 return 0;
861
862         f = iavf_find_filter(adapter, addr);
863         if (f) {
864                 f->remove = true;
865                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_MAC_FILTER;
866         }
867         return 0;
868 }
869
870 /**
871  * iavf_set_rx_mode - NDO callback to set the netdev filters
872  * @netdev: network interface device structure
873  **/
874 static void iavf_set_rx_mode(struct net_device *netdev)
875 {
876         struct iavf_adapter *adapter = netdev_priv(netdev);
877
878         spin_lock_bh(&adapter->mac_vlan_list_lock);
879         __dev_uc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
880         __dev_mc_sync(netdev, iavf_addr_sync, iavf_addr_unsync);
881         spin_unlock_bh(&adapter->mac_vlan_list_lock);
882
883         if (netdev->flags & IFF_PROMISC &&
884             !(adapter->flags & IAVF_FLAG_PROMISC_ON))
885                 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_PROMISC;
886         else if (!(netdev->flags & IFF_PROMISC) &&
887                  adapter->flags & IAVF_FLAG_PROMISC_ON)
888                 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_PROMISC;
889
890         if (netdev->flags & IFF_ALLMULTI &&
891             !(adapter->flags & IAVF_FLAG_ALLMULTI_ON))
892                 adapter->aq_required |= IAVF_FLAG_AQ_REQUEST_ALLMULTI;
893         else if (!(netdev->flags & IFF_ALLMULTI) &&
894                  adapter->flags & IAVF_FLAG_ALLMULTI_ON)
895                 adapter->aq_required |= IAVF_FLAG_AQ_RELEASE_ALLMULTI;
896 }
897
898 /**
899  * iavf_napi_enable_all - enable NAPI on all queue vectors
900  * @adapter: board private structure
901  **/
902 static void iavf_napi_enable_all(struct iavf_adapter *adapter)
903 {
904         int q_idx;
905         struct iavf_q_vector *q_vector;
906         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
907
908         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
909                 struct napi_struct *napi;
910
911                 q_vector = &adapter->q_vectors[q_idx];
912                 napi = &q_vector->napi;
913                 napi_enable(napi);
914         }
915 }
916
917 /**
918  * iavf_napi_disable_all - disable NAPI on all queue vectors
919  * @adapter: board private structure
920  **/
921 static void iavf_napi_disable_all(struct iavf_adapter *adapter)
922 {
923         int q_idx;
924         struct iavf_q_vector *q_vector;
925         int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
926
927         for (q_idx = 0; q_idx < q_vectors; q_idx++) {
928                 q_vector = &adapter->q_vectors[q_idx];
929                 napi_disable(&q_vector->napi);
930         }
931 }
932
933 /**
934  * iavf_configure - set up transmit and receive data structures
935  * @adapter: board private structure
936  **/
937 static void iavf_configure(struct iavf_adapter *adapter)
938 {
939         struct net_device *netdev = adapter->netdev;
940         int i;
941
942         iavf_set_rx_mode(netdev);
943
944         iavf_configure_tx(adapter);
945         iavf_configure_rx(adapter);
946         adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_QUEUES;
947
948         for (i = 0; i < adapter->num_active_queues; i++) {
949                 struct iavf_ring *ring = &adapter->rx_rings[i];
950
951                 iavf_alloc_rx_buffers(ring, IAVF_DESC_UNUSED(ring));
952         }
953 }
954
955 /**
956  * iavf_up_complete - Finish the last steps of bringing up a connection
957  * @adapter: board private structure
958  *
959  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
960  **/
961 static void iavf_up_complete(struct iavf_adapter *adapter)
962 {
963         iavf_change_state(adapter, __IAVF_RUNNING);
964         clear_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
965
966         iavf_napi_enable_all(adapter);
967
968         adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_QUEUES;
969         if (CLIENT_ENABLED(adapter))
970                 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_OPEN;
971         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
972 }
973
974 /**
975  * iavf_down - Shutdown the connection processing
976  * @adapter: board private structure
977  *
978  * Expects to be called while holding the __IAVF_IN_CRITICAL_TASK bit lock.
979  **/
980 void iavf_down(struct iavf_adapter *adapter)
981 {
982         struct net_device *netdev = adapter->netdev;
983         struct iavf_vlan_filter *vlf;
984         struct iavf_mac_filter *f;
985         struct iavf_cloud_filter *cf;
986
987         if (adapter->state <= __IAVF_DOWN_PENDING)
988                 return;
989
990         netif_carrier_off(netdev);
991         netif_tx_disable(netdev);
992         adapter->link_up = false;
993         iavf_napi_disable_all(adapter);
994         iavf_irq_disable(adapter);
995
996         spin_lock_bh(&adapter->mac_vlan_list_lock);
997
998         /* clear the sync flag on all filters */
999         __dev_uc_unsync(adapter->netdev, NULL);
1000         __dev_mc_unsync(adapter->netdev, NULL);
1001
1002         /* remove all MAC filters */
1003         list_for_each_entry(f, &adapter->mac_filter_list, list) {
1004                 f->remove = true;
1005         }
1006
1007         /* remove all VLAN filters */
1008         list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
1009                 vlf->remove = true;
1010         }
1011
1012         spin_unlock_bh(&adapter->mac_vlan_list_lock);
1013
1014         /* remove all cloud filters */
1015         spin_lock_bh(&adapter->cloud_filter_list_lock);
1016         list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
1017                 cf->del = true;
1018         }
1019         spin_unlock_bh(&adapter->cloud_filter_list_lock);
1020
1021         if (!(adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) &&
1022             adapter->state != __IAVF_RESETTING) {
1023                 /* cancel any current operation */
1024                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1025                 /* Schedule operations to close down the HW. Don't wait
1026                  * here for this to complete. The watchdog is still running
1027                  * and it will take care of this.
1028                  */
1029                 adapter->aq_required = IAVF_FLAG_AQ_DEL_MAC_FILTER;
1030                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_VLAN_FILTER;
1031                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
1032                 adapter->aq_required |= IAVF_FLAG_AQ_DISABLE_QUEUES;
1033         }
1034
1035         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 0);
1036 }
1037
1038 /**
1039  * iavf_acquire_msix_vectors - Setup the MSIX capability
1040  * @adapter: board private structure
1041  * @vectors: number of vectors to request
1042  *
1043  * Work with the OS to set up the MSIX vectors needed.
1044  *
1045  * Returns 0 on success, negative on failure
1046  **/
1047 static int
1048 iavf_acquire_msix_vectors(struct iavf_adapter *adapter, int vectors)
1049 {
1050         int err, vector_threshold;
1051
1052         /* We'll want at least 3 (vector_threshold):
1053          * 0) Other (Admin Queue and link, mostly)
1054          * 1) TxQ[0] Cleanup
1055          * 2) RxQ[0] Cleanup
1056          */
1057         vector_threshold = MIN_MSIX_COUNT;
1058
1059         /* The more we get, the more we will assign to Tx/Rx Cleanup
1060          * for the separate queues...where Rx Cleanup >= Tx Cleanup.
1061          * Right now, we simply care about how many we'll get; we'll
1062          * set them up later while requesting irq's.
1063          */
1064         err = pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
1065                                     vector_threshold, vectors);
1066         if (err < 0) {
1067                 dev_err(&adapter->pdev->dev, "Unable to allocate MSI-X interrupts\n");
1068                 kfree(adapter->msix_entries);
1069                 adapter->msix_entries = NULL;
1070                 return err;
1071         }
1072
1073         /* Adjust for only the vectors we'll use, which is minimum
1074          * of max_msix_q_vectors + NONQ_VECS, or the number of
1075          * vectors we were allocated.
1076          */
1077         adapter->num_msix_vectors = err;
1078         return 0;
1079 }
1080
1081 /**
1082  * iavf_free_queues - Free memory for all rings
1083  * @adapter: board private structure to initialize
1084  *
1085  * Free all of the memory associated with queue pairs.
1086  **/
1087 static void iavf_free_queues(struct iavf_adapter *adapter)
1088 {
1089         if (!adapter->vsi_res)
1090                 return;
1091         adapter->num_active_queues = 0;
1092         kfree(adapter->tx_rings);
1093         adapter->tx_rings = NULL;
1094         kfree(adapter->rx_rings);
1095         adapter->rx_rings = NULL;
1096 }
1097
1098 /**
1099  * iavf_alloc_queues - Allocate memory for all rings
1100  * @adapter: board private structure to initialize
1101  *
1102  * We allocate one ring per queue at run-time since we don't know the
1103  * number of queues at compile-time.  The polling_netdev array is
1104  * intended for Multiqueue, but should work fine with a single queue.
1105  **/
1106 static int iavf_alloc_queues(struct iavf_adapter *adapter)
1107 {
1108         int i, num_active_queues;
1109
1110         /* If we're in reset reallocating queues we don't actually know yet for
1111          * certain the PF gave us the number of queues we asked for but we'll
1112          * assume it did.  Once basic reset is finished we'll confirm once we
1113          * start negotiating config with PF.
1114          */
1115         if (adapter->num_req_queues)
1116                 num_active_queues = adapter->num_req_queues;
1117         else if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1118                  adapter->num_tc)
1119                 num_active_queues = adapter->ch_config.total_qps;
1120         else
1121                 num_active_queues = min_t(int,
1122                                           adapter->vsi_res->num_queue_pairs,
1123                                           (int)(num_online_cpus()));
1124
1125
1126         adapter->tx_rings = kcalloc(num_active_queues,
1127                                     sizeof(struct iavf_ring), GFP_KERNEL);
1128         if (!adapter->tx_rings)
1129                 goto err_out;
1130         adapter->rx_rings = kcalloc(num_active_queues,
1131                                     sizeof(struct iavf_ring), GFP_KERNEL);
1132         if (!adapter->rx_rings)
1133                 goto err_out;
1134
1135         for (i = 0; i < num_active_queues; i++) {
1136                 struct iavf_ring *tx_ring;
1137                 struct iavf_ring *rx_ring;
1138
1139                 tx_ring = &adapter->tx_rings[i];
1140
1141                 tx_ring->queue_index = i;
1142                 tx_ring->netdev = adapter->netdev;
1143                 tx_ring->dev = &adapter->pdev->dev;
1144                 tx_ring->count = adapter->tx_desc_count;
1145                 tx_ring->itr_setting = IAVF_ITR_TX_DEF;
1146                 if (adapter->flags & IAVF_FLAG_WB_ON_ITR_CAPABLE)
1147                         tx_ring->flags |= IAVF_TXR_FLAGS_WB_ON_ITR;
1148
1149                 rx_ring = &adapter->rx_rings[i];
1150                 rx_ring->queue_index = i;
1151                 rx_ring->netdev = adapter->netdev;
1152                 rx_ring->dev = &adapter->pdev->dev;
1153                 rx_ring->count = adapter->rx_desc_count;
1154                 rx_ring->itr_setting = IAVF_ITR_RX_DEF;
1155         }
1156
1157         adapter->num_active_queues = num_active_queues;
1158
1159         return 0;
1160
1161 err_out:
1162         iavf_free_queues(adapter);
1163         return -ENOMEM;
1164 }
1165
1166 /**
1167  * iavf_set_interrupt_capability - set MSI-X or FAIL if not supported
1168  * @adapter: board private structure to initialize
1169  *
1170  * Attempt to configure the interrupts using the best available
1171  * capabilities of the hardware and the kernel.
1172  **/
1173 static int iavf_set_interrupt_capability(struct iavf_adapter *adapter)
1174 {
1175         int vector, v_budget;
1176         int pairs = 0;
1177         int err = 0;
1178
1179         if (!adapter->vsi_res) {
1180                 err = -EIO;
1181                 goto out;
1182         }
1183         pairs = adapter->num_active_queues;
1184
1185         /* It's easy to be greedy for MSI-X vectors, but it really doesn't do
1186          * us much good if we have more vectors than CPUs. However, we already
1187          * limit the total number of queues by the number of CPUs so we do not
1188          * need any further limiting here.
1189          */
1190         v_budget = min_t(int, pairs + NONQ_VECS,
1191                          (int)adapter->vf_res->max_vectors);
1192
1193         adapter->msix_entries = kcalloc(v_budget,
1194                                         sizeof(struct msix_entry), GFP_KERNEL);
1195         if (!adapter->msix_entries) {
1196                 err = -ENOMEM;
1197                 goto out;
1198         }
1199
1200         for (vector = 0; vector < v_budget; vector++)
1201                 adapter->msix_entries[vector].entry = vector;
1202
1203         err = iavf_acquire_msix_vectors(adapter, v_budget);
1204
1205 out:
1206         netif_set_real_num_rx_queues(adapter->netdev, pairs);
1207         netif_set_real_num_tx_queues(adapter->netdev, pairs);
1208         return err;
1209 }
1210
1211 /**
1212  * iavf_config_rss_aq - Configure RSS keys and lut by using AQ commands
1213  * @adapter: board private structure
1214  *
1215  * Return 0 on success, negative on failure
1216  **/
1217 static int iavf_config_rss_aq(struct iavf_adapter *adapter)
1218 {
1219         struct iavf_aqc_get_set_rss_key_data *rss_key =
1220                 (struct iavf_aqc_get_set_rss_key_data *)adapter->rss_key;
1221         struct iavf_hw *hw = &adapter->hw;
1222         int ret = 0;
1223
1224         if (adapter->current_op != VIRTCHNL_OP_UNKNOWN) {
1225                 /* bail because we already have a command pending */
1226                 dev_err(&adapter->pdev->dev, "Cannot configure RSS, command %d pending\n",
1227                         adapter->current_op);
1228                 return -EBUSY;
1229         }
1230
1231         ret = iavf_aq_set_rss_key(hw, adapter->vsi.id, rss_key);
1232         if (ret) {
1233                 dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
1234                         iavf_stat_str(hw, ret),
1235                         iavf_aq_str(hw, hw->aq.asq_last_status));
1236                 return ret;
1237
1238         }
1239
1240         ret = iavf_aq_set_rss_lut(hw, adapter->vsi.id, false,
1241                                   adapter->rss_lut, adapter->rss_lut_size);
1242         if (ret) {
1243                 dev_err(&adapter->pdev->dev, "Cannot set RSS lut, err %s aq_err %s\n",
1244                         iavf_stat_str(hw, ret),
1245                         iavf_aq_str(hw, hw->aq.asq_last_status));
1246         }
1247
1248         return ret;
1249
1250 }
1251
1252 /**
1253  * iavf_config_rss_reg - Configure RSS keys and lut by writing registers
1254  * @adapter: board private structure
1255  *
1256  * Returns 0 on success, negative on failure
1257  **/
1258 static int iavf_config_rss_reg(struct iavf_adapter *adapter)
1259 {
1260         struct iavf_hw *hw = &adapter->hw;
1261         u32 *dw;
1262         u16 i;
1263
1264         dw = (u32 *)adapter->rss_key;
1265         for (i = 0; i <= adapter->rss_key_size / 4; i++)
1266                 wr32(hw, IAVF_VFQF_HKEY(i), dw[i]);
1267
1268         dw = (u32 *)adapter->rss_lut;
1269         for (i = 0; i <= adapter->rss_lut_size / 4; i++)
1270                 wr32(hw, IAVF_VFQF_HLUT(i), dw[i]);
1271
1272         iavf_flush(hw);
1273
1274         return 0;
1275 }
1276
1277 /**
1278  * iavf_config_rss - Configure RSS keys and lut
1279  * @adapter: board private structure
1280  *
1281  * Returns 0 on success, negative on failure
1282  **/
1283 int iavf_config_rss(struct iavf_adapter *adapter)
1284 {
1285
1286         if (RSS_PF(adapter)) {
1287                 adapter->aq_required |= IAVF_FLAG_AQ_SET_RSS_LUT |
1288                                         IAVF_FLAG_AQ_SET_RSS_KEY;
1289                 return 0;
1290         } else if (RSS_AQ(adapter)) {
1291                 return iavf_config_rss_aq(adapter);
1292         } else {
1293                 return iavf_config_rss_reg(adapter);
1294         }
1295 }
1296
1297 /**
1298  * iavf_fill_rss_lut - Fill the lut with default values
1299  * @adapter: board private structure
1300  **/
1301 static void iavf_fill_rss_lut(struct iavf_adapter *adapter)
1302 {
1303         u16 i;
1304
1305         for (i = 0; i < adapter->rss_lut_size; i++)
1306                 adapter->rss_lut[i] = i % adapter->num_active_queues;
1307 }
1308
1309 /**
1310  * iavf_init_rss - Prepare for RSS
1311  * @adapter: board private structure
1312  *
1313  * Return 0 on success, negative on failure
1314  **/
1315 static int iavf_init_rss(struct iavf_adapter *adapter)
1316 {
1317         struct iavf_hw *hw = &adapter->hw;
1318
1319         if (!RSS_PF(adapter)) {
1320                 /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
1321                 if (adapter->vf_res->vf_cap_flags &
1322                     VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2)
1323                         adapter->hena = IAVF_DEFAULT_RSS_HENA_EXPANDED;
1324                 else
1325                         adapter->hena = IAVF_DEFAULT_RSS_HENA;
1326
1327                 wr32(hw, IAVF_VFQF_HENA(0), (u32)adapter->hena);
1328                 wr32(hw, IAVF_VFQF_HENA(1), (u32)(adapter->hena >> 32));
1329         }
1330
1331         iavf_fill_rss_lut(adapter);
1332         netdev_rss_key_fill((void *)adapter->rss_key, adapter->rss_key_size);
1333
1334         return iavf_config_rss(adapter);
1335 }
1336
1337 /**
1338  * iavf_alloc_q_vectors - Allocate memory for interrupt vectors
1339  * @adapter: board private structure to initialize
1340  *
1341  * We allocate one q_vector per queue interrupt.  If allocation fails we
1342  * return -ENOMEM.
1343  **/
1344 static int iavf_alloc_q_vectors(struct iavf_adapter *adapter)
1345 {
1346         int q_idx = 0, num_q_vectors;
1347         struct iavf_q_vector *q_vector;
1348
1349         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1350         adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
1351                                      GFP_KERNEL);
1352         if (!adapter->q_vectors)
1353                 return -ENOMEM;
1354
1355         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1356                 q_vector = &adapter->q_vectors[q_idx];
1357                 q_vector->adapter = adapter;
1358                 q_vector->vsi = &adapter->vsi;
1359                 q_vector->v_idx = q_idx;
1360                 q_vector->reg_idx = q_idx;
1361                 cpumask_copy(&q_vector->affinity_mask, cpu_possible_mask);
1362                 netif_napi_add(adapter->netdev, &q_vector->napi,
1363                                iavf_napi_poll, NAPI_POLL_WEIGHT);
1364         }
1365
1366         return 0;
1367 }
1368
1369 /**
1370  * iavf_free_q_vectors - Free memory allocated for interrupt vectors
1371  * @adapter: board private structure to initialize
1372  *
1373  * This function frees the memory allocated to the q_vectors.  In addition if
1374  * NAPI is enabled it will delete any references to the NAPI struct prior
1375  * to freeing the q_vector.
1376  **/
1377 static void iavf_free_q_vectors(struct iavf_adapter *adapter)
1378 {
1379         int q_idx, num_q_vectors;
1380
1381         if (!adapter->q_vectors)
1382                 return;
1383
1384         num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
1385
1386         for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
1387                 struct iavf_q_vector *q_vector = &adapter->q_vectors[q_idx];
1388
1389                 netif_napi_del(&q_vector->napi);
1390         }
1391         kfree(adapter->q_vectors);
1392         adapter->q_vectors = NULL;
1393 }
1394
1395 /**
1396  * iavf_reset_interrupt_capability - Reset MSIX setup
1397  * @adapter: board private structure
1398  *
1399  **/
1400 void iavf_reset_interrupt_capability(struct iavf_adapter *adapter)
1401 {
1402         if (!adapter->msix_entries)
1403                 return;
1404
1405         pci_disable_msix(adapter->pdev);
1406         kfree(adapter->msix_entries);
1407         adapter->msix_entries = NULL;
1408 }
1409
1410 /**
1411  * iavf_init_interrupt_scheme - Determine if MSIX is supported and init
1412  * @adapter: board private structure to initialize
1413  *
1414  **/
1415 int iavf_init_interrupt_scheme(struct iavf_adapter *adapter)
1416 {
1417         int err;
1418
1419         err = iavf_alloc_queues(adapter);
1420         if (err) {
1421                 dev_err(&adapter->pdev->dev,
1422                         "Unable to allocate memory for queues\n");
1423                 goto err_alloc_queues;
1424         }
1425
1426         rtnl_lock();
1427         err = iavf_set_interrupt_capability(adapter);
1428         rtnl_unlock();
1429         if (err) {
1430                 dev_err(&adapter->pdev->dev,
1431                         "Unable to setup interrupt capabilities\n");
1432                 goto err_set_interrupt;
1433         }
1434
1435         err = iavf_alloc_q_vectors(adapter);
1436         if (err) {
1437                 dev_err(&adapter->pdev->dev,
1438                         "Unable to allocate memory for queue vectors\n");
1439                 goto err_alloc_q_vectors;
1440         }
1441
1442         /* If we've made it so far while ADq flag being ON, then we haven't
1443          * bailed out anywhere in middle. And ADq isn't just enabled but actual
1444          * resources have been allocated in the reset path.
1445          * Now we can truly claim that ADq is enabled.
1446          */
1447         if ((adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
1448             adapter->num_tc)
1449                 dev_info(&adapter->pdev->dev, "ADq Enabled, %u TCs created",
1450                          adapter->num_tc);
1451
1452         dev_info(&adapter->pdev->dev, "Multiqueue %s: Queue pair count = %u",
1453                  (adapter->num_active_queues > 1) ? "Enabled" : "Disabled",
1454                  adapter->num_active_queues);
1455
1456         return 0;
1457 err_alloc_q_vectors:
1458         iavf_reset_interrupt_capability(adapter);
1459 err_set_interrupt:
1460         iavf_free_queues(adapter);
1461 err_alloc_queues:
1462         return err;
1463 }
1464
1465 /**
1466  * iavf_free_rss - Free memory used by RSS structs
1467  * @adapter: board private structure
1468  **/
1469 static void iavf_free_rss(struct iavf_adapter *adapter)
1470 {
1471         kfree(adapter->rss_key);
1472         adapter->rss_key = NULL;
1473
1474         kfree(adapter->rss_lut);
1475         adapter->rss_lut = NULL;
1476 }
1477
1478 /**
1479  * iavf_reinit_interrupt_scheme - Reallocate queues and vectors
1480  * @adapter: board private structure
1481  *
1482  * Returns 0 on success, negative on failure
1483  **/
1484 static int iavf_reinit_interrupt_scheme(struct iavf_adapter *adapter)
1485 {
1486         struct net_device *netdev = adapter->netdev;
1487         int err;
1488
1489         if (netif_running(netdev))
1490                 iavf_free_traffic_irqs(adapter);
1491         iavf_free_misc_irq(adapter);
1492         iavf_reset_interrupt_capability(adapter);
1493         iavf_free_q_vectors(adapter);
1494         iavf_free_queues(adapter);
1495
1496         err =  iavf_init_interrupt_scheme(adapter);
1497         if (err)
1498                 goto err;
1499
1500         netif_tx_stop_all_queues(netdev);
1501
1502         err = iavf_request_misc_irq(adapter);
1503         if (err)
1504                 goto err;
1505
1506         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1507
1508         iavf_map_rings_to_vectors(adapter);
1509 err:
1510         return err;
1511 }
1512
1513 /**
1514  * iavf_process_aq_command - process aq_required flags
1515  * and sends aq command
1516  * @adapter: pointer to iavf adapter structure
1517  *
1518  * Returns 0 on success
1519  * Returns error code if no command was sent
1520  * or error code if the command failed.
1521  **/
1522 static int iavf_process_aq_command(struct iavf_adapter *adapter)
1523 {
1524         if (adapter->aq_required & IAVF_FLAG_AQ_GET_CONFIG)
1525                 return iavf_send_vf_config_msg(adapter);
1526         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_QUEUES) {
1527                 iavf_disable_queues(adapter);
1528                 return 0;
1529         }
1530
1531         if (adapter->aq_required & IAVF_FLAG_AQ_MAP_VECTORS) {
1532                 iavf_map_queues(adapter);
1533                 return 0;
1534         }
1535
1536         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_MAC_FILTER) {
1537                 iavf_add_ether_addrs(adapter);
1538                 return 0;
1539         }
1540
1541         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_VLAN_FILTER) {
1542                 iavf_add_vlans(adapter);
1543                 return 0;
1544         }
1545
1546         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_MAC_FILTER) {
1547                 iavf_del_ether_addrs(adapter);
1548                 return 0;
1549         }
1550
1551         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_VLAN_FILTER) {
1552                 iavf_del_vlans(adapter);
1553                 return 0;
1554         }
1555
1556         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING) {
1557                 iavf_enable_vlan_stripping(adapter);
1558                 return 0;
1559         }
1560
1561         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING) {
1562                 iavf_disable_vlan_stripping(adapter);
1563                 return 0;
1564         }
1565
1566         if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_QUEUES) {
1567                 iavf_configure_queues(adapter);
1568                 return 0;
1569         }
1570
1571         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_QUEUES) {
1572                 iavf_enable_queues(adapter);
1573                 return 0;
1574         }
1575
1576         if (adapter->aq_required & IAVF_FLAG_AQ_CONFIGURE_RSS) {
1577                 /* This message goes straight to the firmware, not the
1578                  * PF, so we don't have to set current_op as we will
1579                  * not get a response through the ARQ.
1580                  */
1581                 adapter->aq_required &= ~IAVF_FLAG_AQ_CONFIGURE_RSS;
1582                 return 0;
1583         }
1584         if (adapter->aq_required & IAVF_FLAG_AQ_GET_HENA) {
1585                 iavf_get_hena(adapter);
1586                 return 0;
1587         }
1588         if (adapter->aq_required & IAVF_FLAG_AQ_SET_HENA) {
1589                 iavf_set_hena(adapter);
1590                 return 0;
1591         }
1592         if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_KEY) {
1593                 iavf_set_rss_key(adapter);
1594                 return 0;
1595         }
1596         if (adapter->aq_required & IAVF_FLAG_AQ_SET_RSS_LUT) {
1597                 iavf_set_rss_lut(adapter);
1598                 return 0;
1599         }
1600
1601         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_PROMISC) {
1602                 iavf_set_promiscuous(adapter, FLAG_VF_UNICAST_PROMISC |
1603                                        FLAG_VF_MULTICAST_PROMISC);
1604                 return 0;
1605         }
1606
1607         if (adapter->aq_required & IAVF_FLAG_AQ_REQUEST_ALLMULTI) {
1608                 iavf_set_promiscuous(adapter, FLAG_VF_MULTICAST_PROMISC);
1609                 return 0;
1610         }
1611         if ((adapter->aq_required & IAVF_FLAG_AQ_RELEASE_PROMISC) ||
1612             (adapter->aq_required & IAVF_FLAG_AQ_RELEASE_ALLMULTI)) {
1613                 iavf_set_promiscuous(adapter, 0);
1614                 return 0;
1615         }
1616
1617         if (adapter->aq_required & IAVF_FLAG_AQ_ENABLE_CHANNELS) {
1618                 iavf_enable_channels(adapter);
1619                 return 0;
1620         }
1621
1622         if (adapter->aq_required & IAVF_FLAG_AQ_DISABLE_CHANNELS) {
1623                 iavf_disable_channels(adapter);
1624                 return 0;
1625         }
1626         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1627                 iavf_add_cloud_filter(adapter);
1628                 return 0;
1629         }
1630
1631         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1632                 iavf_del_cloud_filter(adapter);
1633                 return 0;
1634         }
1635         if (adapter->aq_required & IAVF_FLAG_AQ_DEL_CLOUD_FILTER) {
1636                 iavf_del_cloud_filter(adapter);
1637                 return 0;
1638         }
1639         if (adapter->aq_required & IAVF_FLAG_AQ_ADD_CLOUD_FILTER) {
1640                 iavf_add_cloud_filter(adapter);
1641                 return 0;
1642         }
1643         return -EAGAIN;
1644 }
1645
1646 /**
1647  * iavf_startup - first step of driver startup
1648  * @adapter: board private structure
1649  *
1650  * Function process __IAVF_STARTUP driver state.
1651  * When success the state is changed to __IAVF_INIT_VERSION_CHECK
1652  * when fails it returns -EAGAIN
1653  **/
1654 static int iavf_startup(struct iavf_adapter *adapter)
1655 {
1656         struct pci_dev *pdev = adapter->pdev;
1657         struct iavf_hw *hw = &adapter->hw;
1658         int err;
1659
1660         WARN_ON(adapter->state != __IAVF_STARTUP);
1661
1662         /* driver loaded, probe complete */
1663         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1664         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
1665         err = iavf_set_mac_type(hw);
1666         if (err) {
1667                 dev_err(&pdev->dev, "Failed to set MAC type (%d)\n", err);
1668                 goto err;
1669         }
1670
1671         err = iavf_check_reset_complete(hw);
1672         if (err) {
1673                 dev_info(&pdev->dev, "Device is still in reset (%d), retrying\n",
1674                          err);
1675                 goto err;
1676         }
1677         hw->aq.num_arq_entries = IAVF_AQ_LEN;
1678         hw->aq.num_asq_entries = IAVF_AQ_LEN;
1679         hw->aq.arq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1680         hw->aq.asq_buf_size = IAVF_MAX_AQ_BUF_SIZE;
1681
1682         err = iavf_init_adminq(hw);
1683         if (err) {
1684                 dev_err(&pdev->dev, "Failed to init Admin Queue (%d)\n", err);
1685                 goto err;
1686         }
1687         err = iavf_send_api_ver(adapter);
1688         if (err) {
1689                 dev_err(&pdev->dev, "Unable to send to PF (%d)\n", err);
1690                 iavf_shutdown_adminq(hw);
1691                 goto err;
1692         }
1693         iavf_change_state(adapter, __IAVF_INIT_VERSION_CHECK);
1694 err:
1695         return err;
1696 }
1697
1698 /**
1699  * iavf_init_version_check - second step of driver startup
1700  * @adapter: board private structure
1701  *
1702  * Function process __IAVF_INIT_VERSION_CHECK driver state.
1703  * When success the state is changed to __IAVF_INIT_GET_RESOURCES
1704  * when fails it returns -EAGAIN
1705  **/
1706 static int iavf_init_version_check(struct iavf_adapter *adapter)
1707 {
1708         struct pci_dev *pdev = adapter->pdev;
1709         struct iavf_hw *hw = &adapter->hw;
1710         int err = -EAGAIN;
1711
1712         WARN_ON(adapter->state != __IAVF_INIT_VERSION_CHECK);
1713
1714         if (!iavf_asq_done(hw)) {
1715                 dev_err(&pdev->dev, "Admin queue command never completed\n");
1716                 iavf_shutdown_adminq(hw);
1717                 iavf_change_state(adapter, __IAVF_STARTUP);
1718                 goto err;
1719         }
1720
1721         /* aq msg sent, awaiting reply */
1722         err = iavf_verify_api_ver(adapter);
1723         if (err) {
1724                 if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK)
1725                         err = iavf_send_api_ver(adapter);
1726                 else
1727                         dev_err(&pdev->dev, "Unsupported PF API version %d.%d, expected %d.%d\n",
1728                                 adapter->pf_version.major,
1729                                 adapter->pf_version.minor,
1730                                 VIRTCHNL_VERSION_MAJOR,
1731                                 VIRTCHNL_VERSION_MINOR);
1732                 goto err;
1733         }
1734         err = iavf_send_vf_config_msg(adapter);
1735         if (err) {
1736                 dev_err(&pdev->dev, "Unable to send config request (%d)\n",
1737                         err);
1738                 goto err;
1739         }
1740         iavf_change_state(adapter, __IAVF_INIT_GET_RESOURCES);
1741 err:
1742         return err;
1743 }
1744
1745 /**
1746  * iavf_init_get_resources - third step of driver startup
1747  * @adapter: board private structure
1748  *
1749  * Function process __IAVF_INIT_GET_RESOURCES driver state and
1750  * finishes driver initialization procedure.
1751  * When success the state is changed to __IAVF_DOWN
1752  * when fails it returns -EAGAIN
1753  **/
1754 static int iavf_init_get_resources(struct iavf_adapter *adapter)
1755 {
1756         struct net_device *netdev = adapter->netdev;
1757         struct pci_dev *pdev = adapter->pdev;
1758         struct iavf_hw *hw = &adapter->hw;
1759         int err;
1760
1761         WARN_ON(adapter->state != __IAVF_INIT_GET_RESOURCES);
1762         /* aq msg sent, awaiting reply */
1763         if (!adapter->vf_res) {
1764                 adapter->vf_res = kzalloc(IAVF_VIRTCHNL_VF_RESOURCE_SIZE,
1765                                           GFP_KERNEL);
1766                 if (!adapter->vf_res) {
1767                         err = -ENOMEM;
1768                         goto err;
1769                 }
1770         }
1771         err = iavf_get_vf_config(adapter);
1772         if (err == IAVF_ERR_ADMIN_QUEUE_NO_WORK) {
1773                 err = iavf_send_vf_config_msg(adapter);
1774                 goto err;
1775         } else if (err == IAVF_ERR_PARAM) {
1776                 /* We only get ERR_PARAM if the device is in a very bad
1777                  * state or if we've been disabled for previous bad
1778                  * behavior. Either way, we're done now.
1779                  */
1780                 iavf_shutdown_adminq(hw);
1781                 dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
1782                 return 0;
1783         }
1784         if (err) {
1785                 dev_err(&pdev->dev, "Unable to get VF config (%d)\n", err);
1786                 goto err_alloc;
1787         }
1788
1789         err = iavf_process_config(adapter);
1790         if (err)
1791                 goto err_alloc;
1792         adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1793
1794         adapter->flags |= IAVF_FLAG_RX_CSUM_ENABLED;
1795
1796         netdev->netdev_ops = &iavf_netdev_ops;
1797         iavf_set_ethtool_ops(netdev);
1798         netdev->watchdog_timeo = 5 * HZ;
1799
1800         /* MTU range: 68 - 9710 */
1801         netdev->min_mtu = ETH_MIN_MTU;
1802         netdev->max_mtu = IAVF_MAX_RXBUFFER - IAVF_PACKET_HDR_PAD;
1803
1804         if (!is_valid_ether_addr(adapter->hw.mac.addr)) {
1805                 dev_info(&pdev->dev, "Invalid MAC address %pM, using random\n",
1806                          adapter->hw.mac.addr);
1807                 eth_hw_addr_random(netdev);
1808                 ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
1809         } else {
1810                 ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
1811                 ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
1812         }
1813
1814         adapter->tx_desc_count = IAVF_DEFAULT_TXD;
1815         adapter->rx_desc_count = IAVF_DEFAULT_RXD;
1816         err = iavf_init_interrupt_scheme(adapter);
1817         if (err)
1818                 goto err_sw_init;
1819         iavf_map_rings_to_vectors(adapter);
1820         if (adapter->vf_res->vf_cap_flags &
1821                 VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
1822                 adapter->flags |= IAVF_FLAG_WB_ON_ITR_CAPABLE;
1823
1824         err = iavf_request_misc_irq(adapter);
1825         if (err)
1826                 goto err_sw_init;
1827
1828         netif_carrier_off(netdev);
1829         adapter->link_up = false;
1830
1831         /* set the semaphore to prevent any callbacks after device registration
1832          * up to time when state of driver will be set to __IAVF_DOWN
1833          */
1834         rtnl_lock();
1835         if (!adapter->netdev_registered) {
1836                 err = register_netdevice(netdev);
1837                 if (err) {
1838                         rtnl_unlock();
1839                         goto err_register;
1840                 }
1841         }
1842
1843         adapter->netdev_registered = true;
1844
1845         netif_tx_stop_all_queues(netdev);
1846         if (CLIENT_ALLOWED(adapter)) {
1847                 err = iavf_lan_add_device(adapter);
1848                 if (err)
1849                         dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
1850                                  err);
1851         }
1852         dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
1853         if (netdev->features & NETIF_F_GRO)
1854                 dev_info(&pdev->dev, "GRO is enabled\n");
1855
1856         iavf_change_state(adapter, __IAVF_DOWN);
1857         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
1858         rtnl_unlock();
1859
1860         iavf_misc_irq_enable(adapter);
1861         wake_up(&adapter->down_waitqueue);
1862
1863         adapter->rss_key = kzalloc(adapter->rss_key_size, GFP_KERNEL);
1864         adapter->rss_lut = kzalloc(adapter->rss_lut_size, GFP_KERNEL);
1865         if (!adapter->rss_key || !adapter->rss_lut) {
1866                 err = -ENOMEM;
1867                 goto err_mem;
1868         }
1869         if (RSS_AQ(adapter))
1870                 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
1871         else
1872                 iavf_init_rss(adapter);
1873
1874         return err;
1875 err_mem:
1876         iavf_free_rss(adapter);
1877 err_register:
1878         iavf_free_misc_irq(adapter);
1879 err_sw_init:
1880         iavf_reset_interrupt_capability(adapter);
1881 err_alloc:
1882         kfree(adapter->vf_res);
1883         adapter->vf_res = NULL;
1884 err:
1885         return err;
1886 }
1887
1888 /**
1889  * iavf_watchdog_task - Periodic call-back task
1890  * @work: pointer to work_struct
1891  **/
1892 static void iavf_watchdog_task(struct work_struct *work)
1893 {
1894         struct iavf_adapter *adapter = container_of(work,
1895                                                     struct iavf_adapter,
1896                                                     watchdog_task.work);
1897         struct iavf_hw *hw = &adapter->hw;
1898         u32 reg_val;
1899
1900         if (test_and_set_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section))
1901                 goto restart_watchdog;
1902
1903         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
1904                 iavf_change_state(adapter, __IAVF_COMM_FAILED);
1905
1906         switch (adapter->state) {
1907         case __IAVF_COMM_FAILED:
1908                 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
1909                           IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
1910                 if (reg_val == VIRTCHNL_VFR_VFACTIVE ||
1911                     reg_val == VIRTCHNL_VFR_COMPLETED) {
1912                         /* A chance for redemption! */
1913                         dev_err(&adapter->pdev->dev,
1914                                 "Hardware came out of reset. Attempting reinit.\n");
1915                         iavf_change_state(adapter, __IAVF_STARTUP);
1916                         adapter->flags &= ~IAVF_FLAG_PF_COMMS_FAILED;
1917                         queue_delayed_work(iavf_wq, &adapter->init_task, 10);
1918                         clear_bit(__IAVF_IN_CRITICAL_TASK,
1919                                   &adapter->crit_section);
1920                         /* Don't reschedule the watchdog, since we've restarted
1921                          * the init task. When init_task contacts the PF and
1922                          * gets everything set up again, it'll restart the
1923                          * watchdog for us. Down, boy. Sit. Stay. Woof.
1924                          */
1925                         return;
1926                 }
1927                 adapter->aq_required = 0;
1928                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1929                 clear_bit(__IAVF_IN_CRITICAL_TASK,
1930                           &adapter->crit_section);
1931                 queue_delayed_work(iavf_wq,
1932                                    &adapter->watchdog_task,
1933                                    msecs_to_jiffies(10));
1934                 goto watchdog_done;
1935         case __IAVF_RESETTING:
1936                 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1937                 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
1938                 return;
1939         case __IAVF_DOWN:
1940         case __IAVF_DOWN_PENDING:
1941         case __IAVF_TESTING:
1942         case __IAVF_RUNNING:
1943                 if (adapter->current_op) {
1944                         if (!iavf_asq_done(hw)) {
1945                                 dev_dbg(&adapter->pdev->dev,
1946                                         "Admin queue timeout\n");
1947                                 iavf_send_api_ver(adapter);
1948                         }
1949                 } else {
1950                         /* An error will be returned if no commands were
1951                          * processed; use this opportunity to update stats
1952                          */
1953                         if (iavf_process_aq_command(adapter) &&
1954                             adapter->state == __IAVF_RUNNING)
1955                                 iavf_request_stats(adapter);
1956                 }
1957                 break;
1958         case __IAVF_REMOVE:
1959                 clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1960                 return;
1961         default:
1962                 goto restart_watchdog;
1963         }
1964
1965         /* check for hw reset */
1966         reg_val = rd32(hw, IAVF_VF_ARQLEN1) & IAVF_VF_ARQLEN1_ARQENABLE_MASK;
1967         if (!reg_val) {
1968                 iavf_change_state(adapter, __IAVF_RESETTING);
1969                 adapter->flags |= IAVF_FLAG_RESET_PENDING;
1970                 adapter->aq_required = 0;
1971                 adapter->current_op = VIRTCHNL_OP_UNKNOWN;
1972                 dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
1973                 queue_work(iavf_wq, &adapter->reset_task);
1974                 goto watchdog_done;
1975         }
1976
1977         schedule_delayed_work(&adapter->client_task, msecs_to_jiffies(5));
1978 watchdog_done:
1979         if (adapter->state == __IAVF_RUNNING ||
1980             adapter->state == __IAVF_COMM_FAILED)
1981                 iavf_detect_recover_hung(&adapter->vsi);
1982         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
1983 restart_watchdog:
1984         if (adapter->aq_required)
1985                 queue_delayed_work(iavf_wq, &adapter->watchdog_task,
1986                                    msecs_to_jiffies(20));
1987         else
1988                 queue_delayed_work(iavf_wq, &adapter->watchdog_task, HZ * 2);
1989         queue_work(iavf_wq, &adapter->adminq_task);
1990 }
1991
1992 static void iavf_disable_vf(struct iavf_adapter *adapter)
1993 {
1994         struct iavf_mac_filter *f, *ftmp;
1995         struct iavf_vlan_filter *fv, *fvtmp;
1996         struct iavf_cloud_filter *cf, *cftmp;
1997
1998         adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
1999
2000         /* We don't use netif_running() because it may be true prior to
2001          * ndo_open() returning, so we can't assume it means all our open
2002          * tasks have finished, since we're not holding the rtnl_lock here.
2003          */
2004         if (adapter->state == __IAVF_RUNNING) {
2005                 set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
2006                 netif_carrier_off(adapter->netdev);
2007                 netif_tx_disable(adapter->netdev);
2008                 adapter->link_up = false;
2009                 iavf_napi_disable_all(adapter);
2010                 iavf_irq_disable(adapter);
2011                 iavf_free_traffic_irqs(adapter);
2012                 iavf_free_all_tx_resources(adapter);
2013                 iavf_free_all_rx_resources(adapter);
2014         }
2015
2016         spin_lock_bh(&adapter->mac_vlan_list_lock);
2017
2018         /* Delete all of the filters */
2019         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2020                 list_del(&f->list);
2021                 kfree(f);
2022         }
2023
2024         list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list, list) {
2025                 list_del(&fv->list);
2026                 kfree(fv);
2027         }
2028
2029         spin_unlock_bh(&adapter->mac_vlan_list_lock);
2030
2031         spin_lock_bh(&adapter->cloud_filter_list_lock);
2032         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
2033                 list_del(&cf->list);
2034                 kfree(cf);
2035                 adapter->num_cloud_filters--;
2036         }
2037         spin_unlock_bh(&adapter->cloud_filter_list_lock);
2038
2039         iavf_free_misc_irq(adapter);
2040         iavf_reset_interrupt_capability(adapter);
2041         iavf_free_q_vectors(adapter);
2042         iavf_free_queues(adapter);
2043         memset(adapter->vf_res, 0, IAVF_VIRTCHNL_VF_RESOURCE_SIZE);
2044         iavf_shutdown_adminq(&adapter->hw);
2045         adapter->netdev->flags &= ~IFF_UP;
2046         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2047         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2048         iavf_change_state(adapter, __IAVF_DOWN);
2049         wake_up(&adapter->down_waitqueue);
2050         dev_info(&adapter->pdev->dev, "Reset task did not complete, VF disabled\n");
2051 }
2052
2053 /**
2054  * iavf_reset_task - Call-back task to handle hardware reset
2055  * @work: pointer to work_struct
2056  *
2057  * During reset we need to shut down and reinitialize the admin queue
2058  * before we can use it to communicate with the PF again. We also clear
2059  * and reinit the rings because that context is lost as well.
2060  **/
2061 static void iavf_reset_task(struct work_struct *work)
2062 {
2063         struct iavf_adapter *adapter = container_of(work,
2064                                                       struct iavf_adapter,
2065                                                       reset_task);
2066         struct virtchnl_vf_resource *vfres = adapter->vf_res;
2067         struct net_device *netdev = adapter->netdev;
2068         struct iavf_hw *hw = &adapter->hw;
2069         struct iavf_mac_filter *f, *ftmp;
2070         struct iavf_vlan_filter *vlf;
2071         struct iavf_cloud_filter *cf;
2072         u32 reg_val;
2073         int i = 0, err;
2074         bool running;
2075
2076         /* When device is being removed it doesn't make sense to run the reset
2077          * task, just return in such a case.
2078          */
2079         if (test_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section))
2080                 return;
2081
2082         if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200)) {
2083                 schedule_work(&adapter->reset_task);
2084                 return;
2085         }
2086         while (test_and_set_bit(__IAVF_IN_CLIENT_TASK,
2087                                 &adapter->crit_section))
2088                 usleep_range(500, 1000);
2089         if (CLIENT_ENABLED(adapter)) {
2090                 adapter->flags &= ~(IAVF_FLAG_CLIENT_NEEDS_OPEN |
2091                                     IAVF_FLAG_CLIENT_NEEDS_CLOSE |
2092                                     IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS |
2093                                     IAVF_FLAG_SERVICE_CLIENT_REQUESTED);
2094                 cancel_delayed_work_sync(&adapter->client_task);
2095                 iavf_notify_client_close(&adapter->vsi, true);
2096         }
2097         iavf_misc_irq_disable(adapter);
2098         if (adapter->flags & IAVF_FLAG_RESET_NEEDED) {
2099                 adapter->flags &= ~IAVF_FLAG_RESET_NEEDED;
2100                 /* Restart the AQ here. If we have been reset but didn't
2101                  * detect it, or if the PF had to reinit, our AQ will be hosed.
2102                  */
2103                 iavf_shutdown_adminq(hw);
2104                 iavf_init_adminq(hw);
2105                 iavf_request_reset(adapter);
2106         }
2107         adapter->flags |= IAVF_FLAG_RESET_PENDING;
2108
2109         /* poll until we see the reset actually happen */
2110         for (i = 0; i < IAVF_RESET_WAIT_DETECTED_COUNT; i++) {
2111                 reg_val = rd32(hw, IAVF_VF_ARQLEN1) &
2112                           IAVF_VF_ARQLEN1_ARQENABLE_MASK;
2113                 if (!reg_val)
2114                         break;
2115                 usleep_range(5000, 10000);
2116         }
2117         if (i == IAVF_RESET_WAIT_DETECTED_COUNT) {
2118                 dev_info(&adapter->pdev->dev, "Never saw reset\n");
2119                 goto continue_reset; /* act like the reset happened */
2120         }
2121
2122         /* wait until the reset is complete and the PF is responding to us */
2123         for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
2124                 /* sleep first to make sure a minimum wait time is met */
2125                 msleep(IAVF_RESET_WAIT_MS);
2126
2127                 reg_val = rd32(hw, IAVF_VFGEN_RSTAT) &
2128                           IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
2129                 if (reg_val == VIRTCHNL_VFR_VFACTIVE)
2130                         break;
2131         }
2132
2133         pci_set_master(adapter->pdev);
2134         pci_restore_msi_state(adapter->pdev);
2135
2136         if (i == IAVF_RESET_WAIT_COMPLETE_COUNT) {
2137                 dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
2138                         reg_val);
2139                 iavf_disable_vf(adapter);
2140                 clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2141                 return; /* Do not attempt to reinit. It's dead, Jim. */
2142         }
2143
2144 continue_reset:
2145         /* We don't use netif_running() because it may be true prior to
2146          * ndo_open() returning, so we can't assume it means all our open
2147          * tasks have finished, since we're not holding the rtnl_lock here.
2148          */
2149         running = ((adapter->state == __IAVF_RUNNING) ||
2150                    (adapter->state == __IAVF_RESETTING));
2151
2152         if (running) {
2153                 netif_carrier_off(netdev);
2154                 netif_tx_stop_all_queues(netdev);
2155                 adapter->link_up = false;
2156                 iavf_napi_disable_all(adapter);
2157         }
2158         iavf_irq_disable(adapter);
2159
2160         iavf_change_state(adapter, __IAVF_RESETTING);
2161         adapter->flags &= ~IAVF_FLAG_RESET_PENDING;
2162
2163         /* free the Tx/Rx rings and descriptors, might be better to just
2164          * re-use them sometime in the future
2165          */
2166         iavf_free_all_rx_resources(adapter);
2167         iavf_free_all_tx_resources(adapter);
2168
2169         adapter->flags |= IAVF_FLAG_QUEUES_DISABLED;
2170         /* kill and reinit the admin queue */
2171         iavf_shutdown_adminq(hw);
2172         adapter->current_op = VIRTCHNL_OP_UNKNOWN;
2173         err = iavf_init_adminq(hw);
2174         if (err)
2175                 dev_info(&adapter->pdev->dev, "Failed to init adminq: %d\n",
2176                          err);
2177         adapter->aq_required = 0;
2178
2179         if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2180                 err = iavf_reinit_interrupt_scheme(adapter);
2181                 if (err)
2182                         goto reset_err;
2183         }
2184
2185         if (RSS_AQ(adapter)) {
2186                 adapter->aq_required |= IAVF_FLAG_AQ_CONFIGURE_RSS;
2187         } else {
2188                 err = iavf_init_rss(adapter);
2189                 if (err)
2190                         goto reset_err;
2191         }
2192
2193         adapter->aq_required |= IAVF_FLAG_AQ_GET_CONFIG;
2194         adapter->aq_required |= IAVF_FLAG_AQ_MAP_VECTORS;
2195
2196         spin_lock_bh(&adapter->mac_vlan_list_lock);
2197
2198         /* Delete filter for the current MAC address, it could have
2199          * been changed by the PF via administratively set MAC.
2200          * Will be re-added via VIRTCHNL_OP_GET_VF_RESOURCES.
2201          */
2202         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
2203                 if (ether_addr_equal(f->macaddr, adapter->hw.mac.addr)) {
2204                         list_del(&f->list);
2205                         kfree(f);
2206                 }
2207         }
2208         /* re-add all MAC filters */
2209         list_for_each_entry(f, &adapter->mac_filter_list, list) {
2210                 f->add = true;
2211         }
2212         /* re-add all VLAN filters */
2213         list_for_each_entry(vlf, &adapter->vlan_filter_list, list) {
2214                 vlf->add = true;
2215         }
2216
2217         spin_unlock_bh(&adapter->mac_vlan_list_lock);
2218
2219         /* check if TCs are running and re-add all cloud filters */
2220         spin_lock_bh(&adapter->cloud_filter_list_lock);
2221         if ((vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ) &&
2222             adapter->num_tc) {
2223                 list_for_each_entry(cf, &adapter->cloud_filter_list, list) {
2224                         cf->add = true;
2225                 }
2226         }
2227         spin_unlock_bh(&adapter->cloud_filter_list_lock);
2228
2229         adapter->aq_required |= IAVF_FLAG_AQ_ADD_MAC_FILTER;
2230         adapter->aq_required |= IAVF_FLAG_AQ_ADD_VLAN_FILTER;
2231         adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
2232         iavf_misc_irq_enable(adapter);
2233
2234         mod_delayed_work(iavf_wq, &adapter->watchdog_task, 2);
2235
2236         /* We were running when the reset started, so we need to restore some
2237          * state here.
2238          */
2239         if (running) {
2240                 /* allocate transmit descriptors */
2241                 err = iavf_setup_all_tx_resources(adapter);
2242                 if (err)
2243                         goto reset_err;
2244
2245                 /* allocate receive descriptors */
2246                 err = iavf_setup_all_rx_resources(adapter);
2247                 if (err)
2248                         goto reset_err;
2249
2250                 if (adapter->flags & IAVF_FLAG_REINIT_ITR_NEEDED) {
2251                         err = iavf_request_traffic_irqs(adapter, netdev->name);
2252                         if (err)
2253                                 goto reset_err;
2254
2255                         adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
2256                 }
2257
2258                 iavf_configure(adapter);
2259
2260                 /* iavf_up_complete() will switch device back
2261                  * to __IAVF_RUNNING
2262                  */
2263                 iavf_up_complete(adapter);
2264
2265                 iavf_irq_enable(adapter, true);
2266         } else {
2267                 iavf_change_state(adapter, __IAVF_DOWN);
2268                 wake_up(&adapter->down_waitqueue);
2269         }
2270         clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2271         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2272
2273         return;
2274 reset_err:
2275         clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2276         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2277         dev_err(&adapter->pdev->dev, "failed to allocate resources during reinit\n");
2278         iavf_close(netdev);
2279 }
2280
2281 /**
2282  * iavf_adminq_task - worker thread to clean the admin queue
2283  * @work: pointer to work_struct containing our data
2284  **/
2285 static void iavf_adminq_task(struct work_struct *work)
2286 {
2287         struct iavf_adapter *adapter =
2288                 container_of(work, struct iavf_adapter, adminq_task);
2289         struct iavf_hw *hw = &adapter->hw;
2290         struct iavf_arq_event_info event;
2291         enum virtchnl_ops v_op;
2292         enum iavf_status ret, v_ret;
2293         u32 val, oldval;
2294         u16 pending;
2295
2296         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED)
2297                 goto out;
2298
2299         event.buf_len = IAVF_MAX_AQ_BUF_SIZE;
2300         event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL);
2301         if (!event.msg_buf)
2302                 goto out;
2303
2304         if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 200))
2305                 goto freedom;
2306         do {
2307                 ret = iavf_clean_arq_element(hw, &event, &pending);
2308                 v_op = (enum virtchnl_ops)le32_to_cpu(event.desc.cookie_high);
2309                 v_ret = (enum iavf_status)le32_to_cpu(event.desc.cookie_low);
2310
2311                 if (ret || !v_op)
2312                         break; /* No event to process or error cleaning ARQ */
2313
2314                 iavf_virtchnl_completion(adapter, v_op, v_ret, event.msg_buf,
2315                                          event.msg_len);
2316                 if (pending != 0)
2317                         memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE);
2318         } while (pending);
2319         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
2320
2321         if ((adapter->flags &
2322              (IAVF_FLAG_RESET_PENDING | IAVF_FLAG_RESET_NEEDED)) ||
2323             adapter->state == __IAVF_RESETTING)
2324                 goto freedom;
2325
2326         /* check for error indications */
2327         val = rd32(hw, hw->aq.arq.len);
2328         if (val == 0xdeadbeef || val == 0xffffffff) /* device in reset */
2329                 goto freedom;
2330         oldval = val;
2331         if (val & IAVF_VF_ARQLEN1_ARQVFE_MASK) {
2332                 dev_info(&adapter->pdev->dev, "ARQ VF Error detected\n");
2333                 val &= ~IAVF_VF_ARQLEN1_ARQVFE_MASK;
2334         }
2335         if (val & IAVF_VF_ARQLEN1_ARQOVFL_MASK) {
2336                 dev_info(&adapter->pdev->dev, "ARQ Overflow Error detected\n");
2337                 val &= ~IAVF_VF_ARQLEN1_ARQOVFL_MASK;
2338         }
2339         if (val & IAVF_VF_ARQLEN1_ARQCRIT_MASK) {
2340                 dev_info(&adapter->pdev->dev, "ARQ Critical Error detected\n");
2341                 val &= ~IAVF_VF_ARQLEN1_ARQCRIT_MASK;
2342         }
2343         if (oldval != val)
2344                 wr32(hw, hw->aq.arq.len, val);
2345
2346         val = rd32(hw, hw->aq.asq.len);
2347         oldval = val;
2348         if (val & IAVF_VF_ATQLEN1_ATQVFE_MASK) {
2349                 dev_info(&adapter->pdev->dev, "ASQ VF Error detected\n");
2350                 val &= ~IAVF_VF_ATQLEN1_ATQVFE_MASK;
2351         }
2352         if (val & IAVF_VF_ATQLEN1_ATQOVFL_MASK) {
2353                 dev_info(&adapter->pdev->dev, "ASQ Overflow Error detected\n");
2354                 val &= ~IAVF_VF_ATQLEN1_ATQOVFL_MASK;
2355         }
2356         if (val & IAVF_VF_ATQLEN1_ATQCRIT_MASK) {
2357                 dev_info(&adapter->pdev->dev, "ASQ Critical Error detected\n");
2358                 val &= ~IAVF_VF_ATQLEN1_ATQCRIT_MASK;
2359         }
2360         if (oldval != val)
2361                 wr32(hw, hw->aq.asq.len, val);
2362
2363 freedom:
2364         kfree(event.msg_buf);
2365 out:
2366         /* re-enable Admin queue interrupt cause */
2367         iavf_misc_irq_enable(adapter);
2368 }
2369
2370 /**
2371  * iavf_client_task - worker thread to perform client work
2372  * @work: pointer to work_struct containing our data
2373  *
2374  * This task handles client interactions. Because client calls can be
2375  * reentrant, we can't handle them in the watchdog.
2376  **/
2377 static void iavf_client_task(struct work_struct *work)
2378 {
2379         struct iavf_adapter *adapter =
2380                 container_of(work, struct iavf_adapter, client_task.work);
2381
2382         /* If we can't get the client bit, just give up. We'll be rescheduled
2383          * later.
2384          */
2385
2386         if (test_and_set_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section))
2387                 return;
2388
2389         if (adapter->flags & IAVF_FLAG_SERVICE_CLIENT_REQUESTED) {
2390                 iavf_client_subtask(adapter);
2391                 adapter->flags &= ~IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
2392                 goto out;
2393         }
2394         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS) {
2395                 iavf_notify_client_l2_params(&adapter->vsi);
2396                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_L2_PARAMS;
2397                 goto out;
2398         }
2399         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_CLOSE) {
2400                 iavf_notify_client_close(&adapter->vsi, false);
2401                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_CLOSE;
2402                 goto out;
2403         }
2404         if (adapter->flags & IAVF_FLAG_CLIENT_NEEDS_OPEN) {
2405                 iavf_notify_client_open(&adapter->vsi);
2406                 adapter->flags &= ~IAVF_FLAG_CLIENT_NEEDS_OPEN;
2407         }
2408 out:
2409         clear_bit(__IAVF_IN_CLIENT_TASK, &adapter->crit_section);
2410 }
2411
2412 /**
2413  * iavf_free_all_tx_resources - Free Tx Resources for All Queues
2414  * @adapter: board private structure
2415  *
2416  * Free all transmit software resources
2417  **/
2418 void iavf_free_all_tx_resources(struct iavf_adapter *adapter)
2419 {
2420         int i;
2421
2422         if (!adapter->tx_rings)
2423                 return;
2424
2425         for (i = 0; i < adapter->num_active_queues; i++)
2426                 if (adapter->tx_rings[i].desc)
2427                         iavf_free_tx_resources(&adapter->tx_rings[i]);
2428 }
2429
2430 /**
2431  * iavf_setup_all_tx_resources - allocate all queues Tx resources
2432  * @adapter: board private structure
2433  *
2434  * If this function returns with an error, then it's possible one or
2435  * more of the rings is populated (while the rest are not).  It is the
2436  * callers duty to clean those orphaned rings.
2437  *
2438  * Return 0 on success, negative on failure
2439  **/
2440 static int iavf_setup_all_tx_resources(struct iavf_adapter *adapter)
2441 {
2442         int i, err = 0;
2443
2444         for (i = 0; i < adapter->num_active_queues; i++) {
2445                 adapter->tx_rings[i].count = adapter->tx_desc_count;
2446                 err = iavf_setup_tx_descriptors(&adapter->tx_rings[i]);
2447                 if (!err)
2448                         continue;
2449                 dev_err(&adapter->pdev->dev,
2450                         "Allocation for Tx Queue %u failed\n", i);
2451                 break;
2452         }
2453
2454         return err;
2455 }
2456
2457 /**
2458  * iavf_setup_all_rx_resources - allocate all queues Rx resources
2459  * @adapter: board private structure
2460  *
2461  * If this function returns with an error, then it's possible one or
2462  * more of the rings is populated (while the rest are not).  It is the
2463  * callers duty to clean those orphaned rings.
2464  *
2465  * Return 0 on success, negative on failure
2466  **/
2467 static int iavf_setup_all_rx_resources(struct iavf_adapter *adapter)
2468 {
2469         int i, err = 0;
2470
2471         for (i = 0; i < adapter->num_active_queues; i++) {
2472                 adapter->rx_rings[i].count = adapter->rx_desc_count;
2473                 err = iavf_setup_rx_descriptors(&adapter->rx_rings[i]);
2474                 if (!err)
2475                         continue;
2476                 dev_err(&adapter->pdev->dev,
2477                         "Allocation for Rx Queue %u failed\n", i);
2478                 break;
2479         }
2480         return err;
2481 }
2482
2483 /**
2484  * iavf_free_all_rx_resources - Free Rx Resources for All Queues
2485  * @adapter: board private structure
2486  *
2487  * Free all receive software resources
2488  **/
2489 void iavf_free_all_rx_resources(struct iavf_adapter *adapter)
2490 {
2491         int i;
2492
2493         if (!adapter->rx_rings)
2494                 return;
2495
2496         for (i = 0; i < adapter->num_active_queues; i++)
2497                 if (adapter->rx_rings[i].desc)
2498                         iavf_free_rx_resources(&adapter->rx_rings[i]);
2499 }
2500
2501 /**
2502  * iavf_validate_tx_bandwidth - validate the max Tx bandwidth
2503  * @adapter: board private structure
2504  * @max_tx_rate: max Tx bw for a tc
2505  **/
2506 static int iavf_validate_tx_bandwidth(struct iavf_adapter *adapter,
2507                                       u64 max_tx_rate)
2508 {
2509         int speed = 0, ret = 0;
2510
2511         if (ADV_LINK_SUPPORT(adapter)) {
2512                 if (adapter->link_speed_mbps < U32_MAX) {
2513                         speed = adapter->link_speed_mbps;
2514                         goto validate_bw;
2515                 } else {
2516                         dev_err(&adapter->pdev->dev, "Unknown link speed\n");
2517                         return -EINVAL;
2518                 }
2519         }
2520
2521         switch (adapter->link_speed) {
2522         case VIRTCHNL_LINK_SPEED_40GB:
2523                 speed = SPEED_40000;
2524                 break;
2525         case VIRTCHNL_LINK_SPEED_25GB:
2526                 speed = SPEED_25000;
2527                 break;
2528         case VIRTCHNL_LINK_SPEED_20GB:
2529                 speed = SPEED_20000;
2530                 break;
2531         case VIRTCHNL_LINK_SPEED_10GB:
2532                 speed = SPEED_10000;
2533                 break;
2534         case VIRTCHNL_LINK_SPEED_5GB:
2535                 speed = SPEED_5000;
2536                 break;
2537         case VIRTCHNL_LINK_SPEED_2_5GB:
2538                 speed = SPEED_2500;
2539                 break;
2540         case VIRTCHNL_LINK_SPEED_1GB:
2541                 speed = SPEED_1000;
2542                 break;
2543         case VIRTCHNL_LINK_SPEED_100MB:
2544                 speed = SPEED_100;
2545                 break;
2546         default:
2547                 break;
2548         }
2549
2550 validate_bw:
2551         if (max_tx_rate > speed) {
2552                 dev_err(&adapter->pdev->dev,
2553                         "Invalid tx rate specified\n");
2554                 ret = -EINVAL;
2555         }
2556
2557         return ret;
2558 }
2559
2560 /**
2561  * iavf_validate_channel_config - validate queue mapping info
2562  * @adapter: board private structure
2563  * @mqprio_qopt: queue parameters
2564  *
2565  * This function validates if the config provided by the user to
2566  * configure queue channels is valid or not. Returns 0 on a valid
2567  * config.
2568  **/
2569 static int iavf_validate_ch_config(struct iavf_adapter *adapter,
2570                                    struct tc_mqprio_qopt_offload *mqprio_qopt)
2571 {
2572         u64 total_max_rate = 0;
2573         u32 tx_rate_rem = 0;
2574         int i, num_qps = 0;
2575         u64 tx_rate = 0;
2576         int ret = 0;
2577
2578         if (mqprio_qopt->qopt.num_tc > IAVF_MAX_TRAFFIC_CLASS ||
2579             mqprio_qopt->qopt.num_tc < 1)
2580                 return -EINVAL;
2581
2582         for (i = 0; i <= mqprio_qopt->qopt.num_tc - 1; i++) {
2583                 if (!mqprio_qopt->qopt.count[i] ||
2584                     mqprio_qopt->qopt.offset[i] != num_qps)
2585                         return -EINVAL;
2586                 if (mqprio_qopt->min_rate[i]) {
2587                         dev_err(&adapter->pdev->dev,
2588                                 "Invalid min tx rate (greater than 0) specified for TC%d\n",
2589                                 i);
2590                         return -EINVAL;
2591                 }
2592
2593                 /* convert to Mbps */
2594                 tx_rate = div_u64(mqprio_qopt->max_rate[i],
2595                                   IAVF_MBPS_DIVISOR);
2596
2597                 if (mqprio_qopt->max_rate[i] &&
2598                     tx_rate < IAVF_MBPS_QUANTA) {
2599                         dev_err(&adapter->pdev->dev,
2600                                 "Invalid max tx rate for TC%d, minimum %dMbps\n",
2601                                 i, IAVF_MBPS_QUANTA);
2602                         return -EINVAL;
2603                 }
2604
2605                 (void)div_u64_rem(tx_rate, IAVF_MBPS_QUANTA, &tx_rate_rem);
2606
2607                 if (tx_rate_rem != 0) {
2608                         dev_err(&adapter->pdev->dev,
2609                                 "Invalid max tx rate for TC%d, not divisible by %d\n",
2610                                 i, IAVF_MBPS_QUANTA);
2611                         return -EINVAL;
2612                 }
2613
2614                 total_max_rate += tx_rate;
2615                 num_qps += mqprio_qopt->qopt.count[i];
2616         }
2617         if (num_qps > adapter->num_active_queues) {
2618                 dev_err(&adapter->pdev->dev,
2619                         "Cannot support requested number of queues\n");
2620                 return -EINVAL;
2621         }
2622
2623         ret = iavf_validate_tx_bandwidth(adapter, total_max_rate);
2624         return ret;
2625 }
2626
2627 /**
2628  * iavf_del_all_cloud_filters - delete all cloud filters on the traffic classes
2629  * @adapter: board private structure
2630  **/
2631 static void iavf_del_all_cloud_filters(struct iavf_adapter *adapter)
2632 {
2633         struct iavf_cloud_filter *cf, *cftmp;
2634
2635         spin_lock_bh(&adapter->cloud_filter_list_lock);
2636         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list,
2637                                  list) {
2638                 list_del(&cf->list);
2639                 kfree(cf);
2640                 adapter->num_cloud_filters--;
2641         }
2642         spin_unlock_bh(&adapter->cloud_filter_list_lock);
2643 }
2644
2645 /**
2646  * __iavf_setup_tc - configure multiple traffic classes
2647  * @netdev: network interface device structure
2648  * @type_data: tc offload data
2649  *
2650  * This function processes the config information provided by the
2651  * user to configure traffic classes/queue channels and packages the
2652  * information to request the PF to setup traffic classes.
2653  *
2654  * Returns 0 on success.
2655  **/
2656 static int __iavf_setup_tc(struct net_device *netdev, void *type_data)
2657 {
2658         struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
2659         struct iavf_adapter *adapter = netdev_priv(netdev);
2660         struct virtchnl_vf_resource *vfres = adapter->vf_res;
2661         u8 num_tc = 0, total_qps = 0;
2662         int ret = 0, netdev_tc = 0;
2663         u64 max_tx_rate;
2664         u16 mode;
2665         int i;
2666
2667         num_tc = mqprio_qopt->qopt.num_tc;
2668         mode = mqprio_qopt->mode;
2669
2670         /* delete queue_channel */
2671         if (!mqprio_qopt->qopt.hw) {
2672                 if (adapter->ch_config.state == __IAVF_TC_RUNNING) {
2673                         /* reset the tc configuration */
2674                         netdev_reset_tc(netdev);
2675                         adapter->num_tc = 0;
2676                         netif_tx_stop_all_queues(netdev);
2677                         netif_tx_disable(netdev);
2678                         iavf_del_all_cloud_filters(adapter);
2679                         adapter->aq_required = IAVF_FLAG_AQ_DISABLE_CHANNELS;
2680                         goto exit;
2681                 } else {
2682                         return -EINVAL;
2683                 }
2684         }
2685
2686         /* add queue channel */
2687         if (mode == TC_MQPRIO_MODE_CHANNEL) {
2688                 if (!(vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)) {
2689                         dev_err(&adapter->pdev->dev, "ADq not supported\n");
2690                         return -EOPNOTSUPP;
2691                 }
2692                 if (adapter->ch_config.state != __IAVF_TC_INVALID) {
2693                         dev_err(&adapter->pdev->dev, "TC configuration already exists\n");
2694                         return -EINVAL;
2695                 }
2696
2697                 ret = iavf_validate_ch_config(adapter, mqprio_qopt);
2698                 if (ret)
2699                         return ret;
2700                 /* Return if same TC config is requested */
2701                 if (adapter->num_tc == num_tc)
2702                         return 0;
2703                 adapter->num_tc = num_tc;
2704
2705                 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2706                         if (i < num_tc) {
2707                                 adapter->ch_config.ch_info[i].count =
2708                                         mqprio_qopt->qopt.count[i];
2709                                 adapter->ch_config.ch_info[i].offset =
2710                                         mqprio_qopt->qopt.offset[i];
2711                                 total_qps += mqprio_qopt->qopt.count[i];
2712                                 max_tx_rate = mqprio_qopt->max_rate[i];
2713                                 /* convert to Mbps */
2714                                 max_tx_rate = div_u64(max_tx_rate,
2715                                                       IAVF_MBPS_DIVISOR);
2716                                 adapter->ch_config.ch_info[i].max_tx_rate =
2717                                         max_tx_rate;
2718                         } else {
2719                                 adapter->ch_config.ch_info[i].count = 1;
2720                                 adapter->ch_config.ch_info[i].offset = 0;
2721                         }
2722                 }
2723                 adapter->ch_config.total_qps = total_qps;
2724                 netif_tx_stop_all_queues(netdev);
2725                 netif_tx_disable(netdev);
2726                 adapter->aq_required |= IAVF_FLAG_AQ_ENABLE_CHANNELS;
2727                 netdev_reset_tc(netdev);
2728                 /* Report the tc mapping up the stack */
2729                 netdev_set_num_tc(adapter->netdev, num_tc);
2730                 for (i = 0; i < IAVF_MAX_TRAFFIC_CLASS; i++) {
2731                         u16 qcount = mqprio_qopt->qopt.count[i];
2732                         u16 qoffset = mqprio_qopt->qopt.offset[i];
2733
2734                         if (i < num_tc)
2735                                 netdev_set_tc_queue(netdev, netdev_tc++, qcount,
2736                                                     qoffset);
2737                 }
2738         }
2739 exit:
2740         return ret;
2741 }
2742
2743 /**
2744  * iavf_parse_cls_flower - Parse tc flower filters provided by kernel
2745  * @adapter: board private structure
2746  * @f: pointer to struct flow_cls_offload
2747  * @filter: pointer to cloud filter structure
2748  */
2749 static int iavf_parse_cls_flower(struct iavf_adapter *adapter,
2750                                  struct flow_cls_offload *f,
2751                                  struct iavf_cloud_filter *filter)
2752 {
2753         struct flow_rule *rule = flow_cls_offload_flow_rule(f);
2754         struct flow_dissector *dissector = rule->match.dissector;
2755         u16 n_proto_mask = 0;
2756         u16 n_proto_key = 0;
2757         u8 field_flags = 0;
2758         u16 addr_type = 0;
2759         u16 n_proto = 0;
2760         int i = 0;
2761         struct virtchnl_filter *vf = &filter->f;
2762
2763         if (dissector->used_keys &
2764             ~(BIT(FLOW_DISSECTOR_KEY_CONTROL) |
2765               BIT(FLOW_DISSECTOR_KEY_BASIC) |
2766               BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
2767               BIT(FLOW_DISSECTOR_KEY_VLAN) |
2768               BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
2769               BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
2770               BIT(FLOW_DISSECTOR_KEY_PORTS) |
2771               BIT(FLOW_DISSECTOR_KEY_ENC_KEYID))) {
2772                 dev_err(&adapter->pdev->dev, "Unsupported key used: 0x%x\n",
2773                         dissector->used_keys);
2774                 return -EOPNOTSUPP;
2775         }
2776
2777         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
2778                 struct flow_match_enc_keyid match;
2779
2780                 flow_rule_match_enc_keyid(rule, &match);
2781                 if (match.mask->keyid != 0)
2782                         field_flags |= IAVF_CLOUD_FIELD_TEN_ID;
2783         }
2784
2785         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
2786                 struct flow_match_basic match;
2787
2788                 flow_rule_match_basic(rule, &match);
2789                 n_proto_key = ntohs(match.key->n_proto);
2790                 n_proto_mask = ntohs(match.mask->n_proto);
2791
2792                 if (n_proto_key == ETH_P_ALL) {
2793                         n_proto_key = 0;
2794                         n_proto_mask = 0;
2795                 }
2796                 n_proto = n_proto_key & n_proto_mask;
2797                 if (n_proto != ETH_P_IP && n_proto != ETH_P_IPV6)
2798                         return -EINVAL;
2799                 if (n_proto == ETH_P_IPV6) {
2800                         /* specify flow type as TCP IPv6 */
2801                         vf->flow_type = VIRTCHNL_TCP_V6_FLOW;
2802                 }
2803
2804                 if (match.key->ip_proto != IPPROTO_TCP) {
2805                         dev_info(&adapter->pdev->dev, "Only TCP transport is supported\n");
2806                         return -EINVAL;
2807                 }
2808         }
2809
2810         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
2811                 struct flow_match_eth_addrs match;
2812
2813                 flow_rule_match_eth_addrs(rule, &match);
2814
2815                 /* use is_broadcast and is_zero to check for all 0xf or 0 */
2816                 if (!is_zero_ether_addr(match.mask->dst)) {
2817                         if (is_broadcast_ether_addr(match.mask->dst)) {
2818                                 field_flags |= IAVF_CLOUD_FIELD_OMAC;
2819                         } else {
2820                                 dev_err(&adapter->pdev->dev, "Bad ether dest mask %pM\n",
2821                                         match.mask->dst);
2822                                 return IAVF_ERR_CONFIG;
2823                         }
2824                 }
2825
2826                 if (!is_zero_ether_addr(match.mask->src)) {
2827                         if (is_broadcast_ether_addr(match.mask->src)) {
2828                                 field_flags |= IAVF_CLOUD_FIELD_IMAC;
2829                         } else {
2830                                 dev_err(&adapter->pdev->dev, "Bad ether src mask %pM\n",
2831                                         match.mask->src);
2832                                 return IAVF_ERR_CONFIG;
2833                         }
2834                 }
2835
2836                 if (!is_zero_ether_addr(match.key->dst))
2837                         if (is_valid_ether_addr(match.key->dst) ||
2838                             is_multicast_ether_addr(match.key->dst)) {
2839                                 /* set the mask if a valid dst_mac address */
2840                                 for (i = 0; i < ETH_ALEN; i++)
2841                                         vf->mask.tcp_spec.dst_mac[i] |= 0xff;
2842                                 ether_addr_copy(vf->data.tcp_spec.dst_mac,
2843                                                 match.key->dst);
2844                         }
2845
2846                 if (!is_zero_ether_addr(match.key->src))
2847                         if (is_valid_ether_addr(match.key->src) ||
2848                             is_multicast_ether_addr(match.key->src)) {
2849                                 /* set the mask if a valid dst_mac address */
2850                                 for (i = 0; i < ETH_ALEN; i++)
2851                                         vf->mask.tcp_spec.src_mac[i] |= 0xff;
2852                                 ether_addr_copy(vf->data.tcp_spec.src_mac,
2853                                                 match.key->src);
2854                 }
2855         }
2856
2857         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
2858                 struct flow_match_vlan match;
2859
2860                 flow_rule_match_vlan(rule, &match);
2861                 if (match.mask->vlan_id) {
2862                         if (match.mask->vlan_id == VLAN_VID_MASK) {
2863                                 field_flags |= IAVF_CLOUD_FIELD_IVLAN;
2864                         } else {
2865                                 dev_err(&adapter->pdev->dev, "Bad vlan mask %u\n",
2866                                         match.mask->vlan_id);
2867                                 return IAVF_ERR_CONFIG;
2868                         }
2869                 }
2870                 vf->mask.tcp_spec.vlan_id |= cpu_to_be16(0xffff);
2871                 vf->data.tcp_spec.vlan_id = cpu_to_be16(match.key->vlan_id);
2872         }
2873
2874         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
2875                 struct flow_match_control match;
2876
2877                 flow_rule_match_control(rule, &match);
2878                 addr_type = match.key->addr_type;
2879         }
2880
2881         if (addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2882                 struct flow_match_ipv4_addrs match;
2883
2884                 flow_rule_match_ipv4_addrs(rule, &match);
2885                 if (match.mask->dst) {
2886                         if (match.mask->dst == cpu_to_be32(0xffffffff)) {
2887                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
2888                         } else {
2889                                 dev_err(&adapter->pdev->dev, "Bad ip dst mask 0x%08x\n",
2890                                         be32_to_cpu(match.mask->dst));
2891                                 return IAVF_ERR_CONFIG;
2892                         }
2893                 }
2894
2895                 if (match.mask->src) {
2896                         if (match.mask->src == cpu_to_be32(0xffffffff)) {
2897                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
2898                         } else {
2899                                 dev_err(&adapter->pdev->dev, "Bad ip src mask 0x%08x\n",
2900                                         be32_to_cpu(match.mask->dst));
2901                                 return IAVF_ERR_CONFIG;
2902                         }
2903                 }
2904
2905                 if (field_flags & IAVF_CLOUD_FIELD_TEN_ID) {
2906                         dev_info(&adapter->pdev->dev, "Tenant id not allowed for ip filter\n");
2907                         return IAVF_ERR_CONFIG;
2908                 }
2909                 if (match.key->dst) {
2910                         vf->mask.tcp_spec.dst_ip[0] |= cpu_to_be32(0xffffffff);
2911                         vf->data.tcp_spec.dst_ip[0] = match.key->dst;
2912                 }
2913                 if (match.key->src) {
2914                         vf->mask.tcp_spec.src_ip[0] |= cpu_to_be32(0xffffffff);
2915                         vf->data.tcp_spec.src_ip[0] = match.key->src;
2916                 }
2917         }
2918
2919         if (addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2920                 struct flow_match_ipv6_addrs match;
2921
2922                 flow_rule_match_ipv6_addrs(rule, &match);
2923
2924                 /* validate mask, make sure it is not IPV6_ADDR_ANY */
2925                 if (ipv6_addr_any(&match.mask->dst)) {
2926                         dev_err(&adapter->pdev->dev, "Bad ipv6 dst mask 0x%02x\n",
2927                                 IPV6_ADDR_ANY);
2928                         return IAVF_ERR_CONFIG;
2929                 }
2930
2931                 /* src and dest IPv6 address should not be LOOPBACK
2932                  * (0:0:0:0:0:0:0:1) which can be represented as ::1
2933                  */
2934                 if (ipv6_addr_loopback(&match.key->dst) ||
2935                     ipv6_addr_loopback(&match.key->src)) {
2936                         dev_err(&adapter->pdev->dev,
2937                                 "ipv6 addr should not be loopback\n");
2938                         return IAVF_ERR_CONFIG;
2939                 }
2940                 if (!ipv6_addr_any(&match.mask->dst) ||
2941                     !ipv6_addr_any(&match.mask->src))
2942                         field_flags |= IAVF_CLOUD_FIELD_IIP;
2943
2944                 for (i = 0; i < 4; i++)
2945                         vf->mask.tcp_spec.dst_ip[i] |= cpu_to_be32(0xffffffff);
2946                 memcpy(&vf->data.tcp_spec.dst_ip, &match.key->dst.s6_addr32,
2947                        sizeof(vf->data.tcp_spec.dst_ip));
2948                 for (i = 0; i < 4; i++)
2949                         vf->mask.tcp_spec.src_ip[i] |= cpu_to_be32(0xffffffff);
2950                 memcpy(&vf->data.tcp_spec.src_ip, &match.key->src.s6_addr32,
2951                        sizeof(vf->data.tcp_spec.src_ip));
2952         }
2953         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
2954                 struct flow_match_ports match;
2955
2956                 flow_rule_match_ports(rule, &match);
2957                 if (match.mask->src) {
2958                         if (match.mask->src == cpu_to_be16(0xffff)) {
2959                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
2960                         } else {
2961                                 dev_err(&adapter->pdev->dev, "Bad src port mask %u\n",
2962                                         be16_to_cpu(match.mask->src));
2963                                 return IAVF_ERR_CONFIG;
2964                         }
2965                 }
2966
2967                 if (match.mask->dst) {
2968                         if (match.mask->dst == cpu_to_be16(0xffff)) {
2969                                 field_flags |= IAVF_CLOUD_FIELD_IIP;
2970                         } else {
2971                                 dev_err(&adapter->pdev->dev, "Bad dst port mask %u\n",
2972                                         be16_to_cpu(match.mask->dst));
2973                                 return IAVF_ERR_CONFIG;
2974                         }
2975                 }
2976                 if (match.key->dst) {
2977                         vf->mask.tcp_spec.dst_port |= cpu_to_be16(0xffff);
2978                         vf->data.tcp_spec.dst_port = match.key->dst;
2979                 }
2980
2981                 if (match.key->src) {
2982                         vf->mask.tcp_spec.src_port |= cpu_to_be16(0xffff);
2983                         vf->data.tcp_spec.src_port = match.key->src;
2984                 }
2985         }
2986         vf->field_flags = field_flags;
2987
2988         return 0;
2989 }
2990
2991 /**
2992  * iavf_handle_tclass - Forward to a traffic class on the device
2993  * @adapter: board private structure
2994  * @tc: traffic class index on the device
2995  * @filter: pointer to cloud filter structure
2996  */
2997 static int iavf_handle_tclass(struct iavf_adapter *adapter, u32 tc,
2998                               struct iavf_cloud_filter *filter)
2999 {
3000         if (tc == 0)
3001                 return 0;
3002         if (tc < adapter->num_tc) {
3003                 if (!filter->f.data.tcp_spec.dst_port) {
3004                         dev_err(&adapter->pdev->dev,
3005                                 "Specify destination port to redirect to traffic class other than TC0\n");
3006                         return -EINVAL;
3007                 }
3008         }
3009         /* redirect to a traffic class on the same device */
3010         filter->f.action = VIRTCHNL_ACTION_TC_REDIRECT;
3011         filter->f.action_meta = tc;
3012         return 0;
3013 }
3014
3015 /**
3016  * iavf_configure_clsflower - Add tc flower filters
3017  * @adapter: board private structure
3018  * @cls_flower: Pointer to struct flow_cls_offload
3019  */
3020 static int iavf_configure_clsflower(struct iavf_adapter *adapter,
3021                                     struct flow_cls_offload *cls_flower)
3022 {
3023         int tc = tc_classid_to_hwtc(adapter->netdev, cls_flower->classid);
3024         struct iavf_cloud_filter *filter = NULL;
3025         int err = -EINVAL, count = 50;
3026
3027         if (tc < 0) {
3028                 dev_err(&adapter->pdev->dev, "Invalid traffic class\n");
3029                 return -EINVAL;
3030         }
3031
3032         filter = kzalloc(sizeof(*filter), GFP_KERNEL);
3033         if (!filter)
3034                 return -ENOMEM;
3035
3036         while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3037                                 &adapter->crit_section)) {
3038                 if (--count == 0)
3039                         goto err;
3040                 udelay(1);
3041         }
3042
3043         filter->cookie = cls_flower->cookie;
3044
3045         /* set the mask to all zeroes to begin with */
3046         memset(&filter->f.mask.tcp_spec, 0, sizeof(struct virtchnl_l4_spec));
3047         /* start out with flow type and eth type IPv4 to begin with */
3048         filter->f.flow_type = VIRTCHNL_TCP_V4_FLOW;
3049         err = iavf_parse_cls_flower(adapter, cls_flower, filter);
3050         if (err)
3051                 goto err;
3052
3053         err = iavf_handle_tclass(adapter, tc, filter);
3054         if (err)
3055                 goto err;
3056
3057         /* add filter to the list */
3058         spin_lock_bh(&adapter->cloud_filter_list_lock);
3059         list_add_tail(&filter->list, &adapter->cloud_filter_list);
3060         adapter->num_cloud_filters++;
3061         filter->add = true;
3062         adapter->aq_required |= IAVF_FLAG_AQ_ADD_CLOUD_FILTER;
3063         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3064 err:
3065         if (err)
3066                 kfree(filter);
3067
3068         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3069         return err;
3070 }
3071
3072 /* iavf_find_cf - Find the cloud filter in the list
3073  * @adapter: Board private structure
3074  * @cookie: filter specific cookie
3075  *
3076  * Returns ptr to the filter object or NULL. Must be called while holding the
3077  * cloud_filter_list_lock.
3078  */
3079 static struct iavf_cloud_filter *iavf_find_cf(struct iavf_adapter *adapter,
3080                                               unsigned long *cookie)
3081 {
3082         struct iavf_cloud_filter *filter = NULL;
3083
3084         if (!cookie)
3085                 return NULL;
3086
3087         list_for_each_entry(filter, &adapter->cloud_filter_list, list) {
3088                 if (!memcmp(cookie, &filter->cookie, sizeof(filter->cookie)))
3089                         return filter;
3090         }
3091         return NULL;
3092 }
3093
3094 /**
3095  * iavf_delete_clsflower - Remove tc flower filters
3096  * @adapter: board private structure
3097  * @cls_flower: Pointer to struct flow_cls_offload
3098  */
3099 static int iavf_delete_clsflower(struct iavf_adapter *adapter,
3100                                  struct flow_cls_offload *cls_flower)
3101 {
3102         struct iavf_cloud_filter *filter = NULL;
3103         int err = 0;
3104
3105         spin_lock_bh(&adapter->cloud_filter_list_lock);
3106         filter = iavf_find_cf(adapter, &cls_flower->cookie);
3107         if (filter) {
3108                 filter->del = true;
3109                 adapter->aq_required |= IAVF_FLAG_AQ_DEL_CLOUD_FILTER;
3110         } else {
3111                 err = -EINVAL;
3112         }
3113         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3114
3115         return err;
3116 }
3117
3118 /**
3119  * iavf_setup_tc_cls_flower - flower classifier offloads
3120  * @adapter: board private structure
3121  * @cls_flower: pointer to flow_cls_offload struct with flow info
3122  */
3123 static int iavf_setup_tc_cls_flower(struct iavf_adapter *adapter,
3124                                     struct flow_cls_offload *cls_flower)
3125 {
3126         switch (cls_flower->command) {
3127         case FLOW_CLS_REPLACE:
3128                 return iavf_configure_clsflower(adapter, cls_flower);
3129         case FLOW_CLS_DESTROY:
3130                 return iavf_delete_clsflower(adapter, cls_flower);
3131         case FLOW_CLS_STATS:
3132                 return -EOPNOTSUPP;
3133         default:
3134                 return -EOPNOTSUPP;
3135         }
3136 }
3137
3138 /**
3139  * iavf_setup_tc_block_cb - block callback for tc
3140  * @type: type of offload
3141  * @type_data: offload data
3142  * @cb_priv:
3143  *
3144  * This function is the block callback for traffic classes
3145  **/
3146 static int iavf_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
3147                                   void *cb_priv)
3148 {
3149         struct iavf_adapter *adapter = cb_priv;
3150
3151         if (!tc_cls_can_offload_and_chain0(adapter->netdev, type_data))
3152                 return -EOPNOTSUPP;
3153
3154         switch (type) {
3155         case TC_SETUP_CLSFLOWER:
3156                 return iavf_setup_tc_cls_flower(cb_priv, type_data);
3157         default:
3158                 return -EOPNOTSUPP;
3159         }
3160 }
3161
3162 static LIST_HEAD(iavf_block_cb_list);
3163
3164 /**
3165  * iavf_setup_tc - configure multiple traffic classes
3166  * @netdev: network interface device structure
3167  * @type: type of offload
3168  * @type_data: tc offload data
3169  *
3170  * This function is the callback to ndo_setup_tc in the
3171  * netdev_ops.
3172  *
3173  * Returns 0 on success
3174  **/
3175 static int iavf_setup_tc(struct net_device *netdev, enum tc_setup_type type,
3176                          void *type_data)
3177 {
3178         struct iavf_adapter *adapter = netdev_priv(netdev);
3179
3180         switch (type) {
3181         case TC_SETUP_QDISC_MQPRIO:
3182                 return __iavf_setup_tc(netdev, type_data);
3183         case TC_SETUP_BLOCK:
3184                 return flow_block_cb_setup_simple(type_data,
3185                                                   &iavf_block_cb_list,
3186                                                   iavf_setup_tc_block_cb,
3187                                                   adapter, adapter, true);
3188         default:
3189                 return -EOPNOTSUPP;
3190         }
3191 }
3192
3193 /**
3194  * iavf_open - Called when a network interface is made active
3195  * @netdev: network interface device structure
3196  *
3197  * Returns 0 on success, negative value on failure
3198  *
3199  * The open entry point is called when a network interface is made
3200  * active by the system (IFF_UP).  At this point all resources needed
3201  * for transmit and receive operations are allocated, the interrupt
3202  * handler is registered with the OS, the watchdog is started,
3203  * and the stack is notified that the interface is ready.
3204  **/
3205 static int iavf_open(struct net_device *netdev)
3206 {
3207         struct iavf_adapter *adapter = netdev_priv(netdev);
3208         int err;
3209
3210         if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) {
3211                 dev_err(&adapter->pdev->dev, "Unable to open device due to PF driver failure.\n");
3212                 return -EIO;
3213         }
3214
3215         while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3216                                 &adapter->crit_section))
3217                 usleep_range(500, 1000);
3218
3219         if (adapter->state != __IAVF_DOWN) {
3220                 err = -EBUSY;
3221                 goto err_unlock;
3222         }
3223
3224         /* allocate transmit descriptors */
3225         err = iavf_setup_all_tx_resources(adapter);
3226         if (err)
3227                 goto err_setup_tx;
3228
3229         /* allocate receive descriptors */
3230         err = iavf_setup_all_rx_resources(adapter);
3231         if (err)
3232                 goto err_setup_rx;
3233
3234         /* clear any pending interrupts, may auto mask */
3235         err = iavf_request_traffic_irqs(adapter, netdev->name);
3236         if (err)
3237                 goto err_req_irq;
3238
3239         spin_lock_bh(&adapter->mac_vlan_list_lock);
3240
3241         iavf_add_filter(adapter, adapter->hw.mac.addr);
3242
3243         spin_unlock_bh(&adapter->mac_vlan_list_lock);
3244
3245         iavf_configure(adapter);
3246
3247         iavf_up_complete(adapter);
3248
3249         iavf_irq_enable(adapter, true);
3250
3251         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3252
3253         return 0;
3254
3255 err_req_irq:
3256         iavf_down(adapter);
3257         iavf_free_traffic_irqs(adapter);
3258 err_setup_rx:
3259         iavf_free_all_rx_resources(adapter);
3260 err_setup_tx:
3261         iavf_free_all_tx_resources(adapter);
3262 err_unlock:
3263         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3264
3265         return err;
3266 }
3267
3268 /**
3269  * iavf_close - Disables a network interface
3270  * @netdev: network interface device structure
3271  *
3272  * Returns 0, this is not allowed to fail
3273  *
3274  * The close entry point is called when an interface is de-activated
3275  * by the OS.  The hardware is still under the drivers control, but
3276  * needs to be disabled. All IRQs except vector 0 (reserved for admin queue)
3277  * are freed, along with all transmit and receive resources.
3278  **/
3279 static int iavf_close(struct net_device *netdev)
3280 {
3281         struct iavf_adapter *adapter = netdev_priv(netdev);
3282         int status;
3283
3284         if (adapter->state <= __IAVF_DOWN_PENDING)
3285                 return 0;
3286
3287         while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3288                                 &adapter->crit_section))
3289                 usleep_range(500, 1000);
3290
3291         set_bit(__IAVF_VSI_DOWN, adapter->vsi.state);
3292         if (CLIENT_ENABLED(adapter))
3293                 adapter->flags |= IAVF_FLAG_CLIENT_NEEDS_CLOSE;
3294
3295         iavf_down(adapter);
3296         iavf_change_state(adapter, __IAVF_DOWN_PENDING);
3297         iavf_free_traffic_irqs(adapter);
3298
3299         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3300
3301         /* We explicitly don't free resources here because the hardware is
3302          * still active and can DMA into memory. Resources are cleared in
3303          * iavf_virtchnl_completion() after we get confirmation from the PF
3304          * driver that the rings have been stopped.
3305          *
3306          * Also, we wait for state to transition to __IAVF_DOWN before
3307          * returning. State change occurs in iavf_virtchnl_completion() after
3308          * VF resources are released (which occurs after PF driver processes and
3309          * responds to admin queue commands).
3310          */
3311
3312         status = wait_event_timeout(adapter->down_waitqueue,
3313                                     adapter->state == __IAVF_DOWN,
3314                                     msecs_to_jiffies(500));
3315         if (!status)
3316                 netdev_warn(netdev, "Device resources not yet released\n");
3317         return 0;
3318 }
3319
3320 /**
3321  * iavf_change_mtu - Change the Maximum Transfer Unit
3322  * @netdev: network interface device structure
3323  * @new_mtu: new value for maximum frame size
3324  *
3325  * Returns 0 on success, negative on failure
3326  **/
3327 static int iavf_change_mtu(struct net_device *netdev, int new_mtu)
3328 {
3329         struct iavf_adapter *adapter = netdev_priv(netdev);
3330
3331         netdev->mtu = new_mtu;
3332         if (CLIENT_ENABLED(adapter)) {
3333                 iavf_notify_client_l2_params(&adapter->vsi);
3334                 adapter->flags |= IAVF_FLAG_SERVICE_CLIENT_REQUESTED;
3335         }
3336
3337         if (netif_running(netdev)) {
3338                 adapter->flags |= IAVF_FLAG_RESET_NEEDED;
3339                 queue_work(iavf_wq, &adapter->reset_task);
3340         }
3341
3342         return 0;
3343 }
3344
3345 /**
3346  * iavf_set_features - set the netdev feature flags
3347  * @netdev: ptr to the netdev being adjusted
3348  * @features: the feature set that the stack is suggesting
3349  * Note: expects to be called while under rtnl_lock()
3350  **/
3351 static int iavf_set_features(struct net_device *netdev,
3352                              netdev_features_t features)
3353 {
3354         struct iavf_adapter *adapter = netdev_priv(netdev);
3355
3356         /* Don't allow changing VLAN_RX flag when adapter is not capable
3357          * of VLAN offload
3358          */
3359         if (!VLAN_ALLOWED(adapter)) {
3360                 if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX)
3361                         return -EINVAL;
3362         } else if ((netdev->features ^ features) & NETIF_F_HW_VLAN_CTAG_RX) {
3363                 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3364                         adapter->aq_required |=
3365                                 IAVF_FLAG_AQ_ENABLE_VLAN_STRIPPING;
3366                 else
3367                         adapter->aq_required |=
3368                                 IAVF_FLAG_AQ_DISABLE_VLAN_STRIPPING;
3369         }
3370
3371         return 0;
3372 }
3373
3374 /**
3375  * iavf_features_check - Validate encapsulated packet conforms to limits
3376  * @skb: skb buff
3377  * @dev: This physical port's netdev
3378  * @features: Offload features that the stack believes apply
3379  **/
3380 static netdev_features_t iavf_features_check(struct sk_buff *skb,
3381                                              struct net_device *dev,
3382                                              netdev_features_t features)
3383 {
3384         size_t len;
3385
3386         /* No point in doing any of this if neither checksum nor GSO are
3387          * being requested for this frame.  We can rule out both by just
3388          * checking for CHECKSUM_PARTIAL
3389          */
3390         if (skb->ip_summed != CHECKSUM_PARTIAL)
3391                 return features;
3392
3393         /* We cannot support GSO if the MSS is going to be less than
3394          * 64 bytes.  If it is then we need to drop support for GSO.
3395          */
3396         if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
3397                 features &= ~NETIF_F_GSO_MASK;
3398
3399         /* MACLEN can support at most 63 words */
3400         len = skb_network_header(skb) - skb->data;
3401         if (len & ~(63 * 2))
3402                 goto out_err;
3403
3404         /* IPLEN and EIPLEN can support at most 127 dwords */
3405         len = skb_transport_header(skb) - skb_network_header(skb);
3406         if (len & ~(127 * 4))
3407                 goto out_err;
3408
3409         if (skb->encapsulation) {
3410                 /* L4TUNLEN can support 127 words */
3411                 len = skb_inner_network_header(skb) - skb_transport_header(skb);
3412                 if (len & ~(127 * 2))
3413                         goto out_err;
3414
3415                 /* IPLEN can support at most 127 dwords */
3416                 len = skb_inner_transport_header(skb) -
3417                       skb_inner_network_header(skb);
3418                 if (len & ~(127 * 4))
3419                         goto out_err;
3420         }
3421
3422         /* No need to validate L4LEN as TCP is the only protocol with a
3423          * a flexible value and we support all possible values supported
3424          * by TCP, which is at most 15 dwords
3425          */
3426
3427         return features;
3428 out_err:
3429         return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
3430 }
3431
3432 /**
3433  * iavf_fix_features - fix up the netdev feature bits
3434  * @netdev: our net device
3435  * @features: desired feature bits
3436  *
3437  * Returns fixed-up features bits
3438  **/
3439 static netdev_features_t iavf_fix_features(struct net_device *netdev,
3440                                            netdev_features_t features)
3441 {
3442         struct iavf_adapter *adapter = netdev_priv(netdev);
3443
3444         if (adapter->vf_res &&
3445             !(adapter->vf_res->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN))
3446                 features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
3447                               NETIF_F_HW_VLAN_CTAG_RX |
3448                               NETIF_F_HW_VLAN_CTAG_FILTER);
3449
3450         return features;
3451 }
3452
3453 static const struct net_device_ops iavf_netdev_ops = {
3454         .ndo_open               = iavf_open,
3455         .ndo_stop               = iavf_close,
3456         .ndo_start_xmit         = iavf_xmit_frame,
3457         .ndo_set_rx_mode        = iavf_set_rx_mode,
3458         .ndo_validate_addr      = eth_validate_addr,
3459         .ndo_set_mac_address    = iavf_set_mac,
3460         .ndo_change_mtu         = iavf_change_mtu,
3461         .ndo_tx_timeout         = iavf_tx_timeout,
3462         .ndo_vlan_rx_add_vid    = iavf_vlan_rx_add_vid,
3463         .ndo_vlan_rx_kill_vid   = iavf_vlan_rx_kill_vid,
3464         .ndo_features_check     = iavf_features_check,
3465         .ndo_fix_features       = iavf_fix_features,
3466         .ndo_set_features       = iavf_set_features,
3467         .ndo_setup_tc           = iavf_setup_tc,
3468 };
3469
3470 /**
3471  * iavf_check_reset_complete - check that VF reset is complete
3472  * @hw: pointer to hw struct
3473  *
3474  * Returns 0 if device is ready to use, or -EBUSY if it's in reset.
3475  **/
3476 static int iavf_check_reset_complete(struct iavf_hw *hw)
3477 {
3478         u32 rstat;
3479         int i;
3480
3481         for (i = 0; i < IAVF_RESET_WAIT_COMPLETE_COUNT; i++) {
3482                 rstat = rd32(hw, IAVF_VFGEN_RSTAT) &
3483                              IAVF_VFGEN_RSTAT_VFR_STATE_MASK;
3484                 if ((rstat == VIRTCHNL_VFR_VFACTIVE) ||
3485                     (rstat == VIRTCHNL_VFR_COMPLETED))
3486                         return 0;
3487                 usleep_range(10, 20);
3488         }
3489         return -EBUSY;
3490 }
3491
3492 /**
3493  * iavf_process_config - Process the config information we got from the PF
3494  * @adapter: board private structure
3495  *
3496  * Verify that we have a valid config struct, and set up our netdev features
3497  * and our VSI struct.
3498  **/
3499 int iavf_process_config(struct iavf_adapter *adapter)
3500 {
3501         struct virtchnl_vf_resource *vfres = adapter->vf_res;
3502         int i, num_req_queues = adapter->num_req_queues;
3503         struct net_device *netdev = adapter->netdev;
3504         struct iavf_vsi *vsi = &adapter->vsi;
3505         netdev_features_t hw_enc_features;
3506         netdev_features_t hw_features;
3507
3508         /* got VF config message back from PF, now we can parse it */
3509         for (i = 0; i < vfres->num_vsis; i++) {
3510                 if (vfres->vsi_res[i].vsi_type == VIRTCHNL_VSI_SRIOV)
3511                         adapter->vsi_res = &vfres->vsi_res[i];
3512         }
3513         if (!adapter->vsi_res) {
3514                 dev_err(&adapter->pdev->dev, "No LAN VSI found\n");
3515                 return -ENODEV;
3516         }
3517
3518         if (num_req_queues &&
3519             num_req_queues > adapter->vsi_res->num_queue_pairs) {
3520                 /* Problem.  The PF gave us fewer queues than what we had
3521                  * negotiated in our request.  Need a reset to see if we can't
3522                  * get back to a working state.
3523                  */
3524                 dev_err(&adapter->pdev->dev,
3525                         "Requested %d queues, but PF only gave us %d.\n",
3526                         num_req_queues,
3527                         adapter->vsi_res->num_queue_pairs);
3528                 adapter->flags |= IAVF_FLAG_REINIT_ITR_NEEDED;
3529                 adapter->num_req_queues = adapter->vsi_res->num_queue_pairs;
3530                 iavf_schedule_reset(adapter);
3531                 return -ENODEV;
3532         }
3533         adapter->num_req_queues = 0;
3534
3535         hw_enc_features = NETIF_F_SG                    |
3536                           NETIF_F_IP_CSUM               |
3537                           NETIF_F_IPV6_CSUM             |
3538                           NETIF_F_HIGHDMA               |
3539                           NETIF_F_SOFT_FEATURES |
3540                           NETIF_F_TSO                   |
3541                           NETIF_F_TSO_ECN               |
3542                           NETIF_F_TSO6                  |
3543                           NETIF_F_SCTP_CRC              |
3544                           NETIF_F_RXHASH                |
3545                           NETIF_F_RXCSUM                |
3546                           0;
3547
3548         /* advertise to stack only if offloads for encapsulated packets is
3549          * supported
3550          */
3551         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ENCAP) {
3552                 hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL       |
3553                                    NETIF_F_GSO_GRE              |
3554                                    NETIF_F_GSO_GRE_CSUM         |
3555                                    NETIF_F_GSO_IPXIP4           |
3556                                    NETIF_F_GSO_IPXIP6           |
3557                                    NETIF_F_GSO_UDP_TUNNEL_CSUM  |
3558                                    NETIF_F_GSO_PARTIAL          |
3559                                    0;
3560
3561                 if (!(vfres->vf_cap_flags &
3562                       VIRTCHNL_VF_OFFLOAD_ENCAP_CSUM))
3563                         netdev->gso_partial_features |=
3564                                 NETIF_F_GSO_UDP_TUNNEL_CSUM;
3565
3566                 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
3567                 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
3568                 netdev->hw_enc_features |= hw_enc_features;
3569         }
3570         /* record features VLANs can make use of */
3571         netdev->vlan_features |= hw_enc_features | NETIF_F_TSO_MANGLEID;
3572
3573         /* Write features and hw_features separately to avoid polluting
3574          * with, or dropping, features that are set when we registered.
3575          */
3576         hw_features = hw_enc_features;
3577
3578         /* Enable VLAN features if supported */
3579         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3580                 hw_features |= (NETIF_F_HW_VLAN_CTAG_TX |
3581                                 NETIF_F_HW_VLAN_CTAG_RX);
3582         /* Enable cloud filter if ADQ is supported */
3583         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_ADQ)
3584                 hw_features |= NETIF_F_HW_TC;
3585
3586         netdev->hw_features |= hw_features;
3587
3588         netdev->features |= hw_features;
3589
3590         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_VLAN)
3591                 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3592
3593         netdev->priv_flags |= IFF_UNICAST_FLT;
3594
3595         /* Do not turn on offloads when they are requested to be turned off.
3596          * TSO needs minimum 576 bytes to work correctly.
3597          */
3598         if (netdev->wanted_features) {
3599                 if (!(netdev->wanted_features & NETIF_F_TSO) ||
3600                     netdev->mtu < 576)
3601                         netdev->features &= ~NETIF_F_TSO;
3602                 if (!(netdev->wanted_features & NETIF_F_TSO6) ||
3603                     netdev->mtu < 576)
3604                         netdev->features &= ~NETIF_F_TSO6;
3605                 if (!(netdev->wanted_features & NETIF_F_TSO_ECN))
3606                         netdev->features &= ~NETIF_F_TSO_ECN;
3607                 if (!(netdev->wanted_features & NETIF_F_GRO))
3608                         netdev->features &= ~NETIF_F_GRO;
3609                 if (!(netdev->wanted_features & NETIF_F_GSO))
3610                         netdev->features &= ~NETIF_F_GSO;
3611         }
3612
3613         adapter->vsi.id = adapter->vsi_res->vsi_id;
3614
3615         adapter->vsi.back = adapter;
3616         adapter->vsi.base_vector = 1;
3617         adapter->vsi.work_limit = IAVF_DEFAULT_IRQ_WORK;
3618         vsi->netdev = adapter->netdev;
3619         vsi->qs_handle = adapter->vsi_res->qset_handle;
3620         if (vfres->vf_cap_flags & VIRTCHNL_VF_OFFLOAD_RSS_PF) {
3621                 adapter->rss_key_size = vfres->rss_key_size;
3622                 adapter->rss_lut_size = vfres->rss_lut_size;
3623         } else {
3624                 adapter->rss_key_size = IAVF_HKEY_ARRAY_SIZE;
3625                 adapter->rss_lut_size = IAVF_HLUT_ARRAY_SIZE;
3626         }
3627
3628         return 0;
3629 }
3630
3631 /**
3632  * iavf_init_task - worker thread to perform delayed initialization
3633  * @work: pointer to work_struct containing our data
3634  *
3635  * This task completes the work that was begun in probe. Due to the nature
3636  * of VF-PF communications, we may need to wait tens of milliseconds to get
3637  * responses back from the PF. Rather than busy-wait in probe and bog down the
3638  * whole system, we'll do it in a task so we can sleep.
3639  * This task only runs during driver init. Once we've established
3640  * communications with the PF driver and set up our netdev, the watchdog
3641  * takes over.
3642  **/
3643 static void iavf_init_task(struct work_struct *work)
3644 {
3645         struct iavf_adapter *adapter = container_of(work,
3646                                                     struct iavf_adapter,
3647                                                     init_task.work);
3648         struct iavf_hw *hw = &adapter->hw;
3649
3650         if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000)) {
3651                 dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
3652                 return;
3653         }
3654         switch (adapter->state) {
3655         case __IAVF_STARTUP:
3656                 if (iavf_startup(adapter) < 0)
3657                         goto init_failed;
3658                 break;
3659         case __IAVF_INIT_VERSION_CHECK:
3660                 if (iavf_init_version_check(adapter) < 0)
3661                         goto init_failed;
3662                 break;
3663         case __IAVF_INIT_GET_RESOURCES:
3664                 if (iavf_init_get_resources(adapter) < 0)
3665                         goto init_failed;
3666                 goto out;
3667         default:
3668                 goto init_failed;
3669         }
3670
3671         queue_delayed_work(iavf_wq, &adapter->init_task,
3672                            msecs_to_jiffies(30));
3673         goto out;
3674 init_failed:
3675         if (++adapter->aq_wait_count > IAVF_AQ_MAX_ERR) {
3676                 dev_err(&adapter->pdev->dev,
3677                         "Failed to communicate with PF; waiting before retry\n");
3678                 adapter->flags |= IAVF_FLAG_PF_COMMS_FAILED;
3679                 iavf_shutdown_adminq(hw);
3680                 iavf_change_state(adapter, __IAVF_STARTUP);
3681                 queue_delayed_work(iavf_wq, &adapter->init_task, HZ * 5);
3682                 goto out;
3683         }
3684         queue_delayed_work(iavf_wq, &adapter->init_task, HZ);
3685 out:
3686         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3687 }
3688
3689 /**
3690  * iavf_shutdown - Shutdown the device in preparation for a reboot
3691  * @pdev: pci device structure
3692  **/
3693 static void iavf_shutdown(struct pci_dev *pdev)
3694 {
3695         struct net_device *netdev = pci_get_drvdata(pdev);
3696         struct iavf_adapter *adapter = netdev_priv(netdev);
3697
3698         netif_device_detach(netdev);
3699
3700         if (netif_running(netdev))
3701                 iavf_close(netdev);
3702
3703         if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000))
3704                 dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
3705         /* Prevent the watchdog from running. */
3706         iavf_change_state(adapter, __IAVF_REMOVE);
3707         adapter->aq_required = 0;
3708         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3709
3710 #ifdef CONFIG_PM
3711         pci_save_state(pdev);
3712
3713 #endif
3714         pci_disable_device(pdev);
3715 }
3716
3717 /**
3718  * iavf_probe - Device Initialization Routine
3719  * @pdev: PCI device information struct
3720  * @ent: entry in iavf_pci_tbl
3721  *
3722  * Returns 0 on success, negative on failure
3723  *
3724  * iavf_probe initializes an adapter identified by a pci_dev structure.
3725  * The OS initialization, configuring of the adapter private structure,
3726  * and a hardware reset occur.
3727  **/
3728 static int iavf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
3729 {
3730         struct net_device *netdev;
3731         struct iavf_adapter *adapter = NULL;
3732         struct iavf_hw *hw = NULL;
3733         int err;
3734
3735         err = pci_enable_device(pdev);
3736         if (err)
3737                 return err;
3738
3739         err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
3740         if (err) {
3741                 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3742                 if (err) {
3743                         dev_err(&pdev->dev,
3744                                 "DMA configuration failed: 0x%x\n", err);
3745                         goto err_dma;
3746                 }
3747         }
3748
3749         err = pci_request_regions(pdev, iavf_driver_name);
3750         if (err) {
3751                 dev_err(&pdev->dev,
3752                         "pci_request_regions failed 0x%x\n", err);
3753                 goto err_pci_reg;
3754         }
3755
3756         pci_enable_pcie_error_reporting(pdev);
3757
3758         pci_set_master(pdev);
3759
3760         netdev = alloc_etherdev_mq(sizeof(struct iavf_adapter),
3761                                    IAVF_MAX_REQ_QUEUES);
3762         if (!netdev) {
3763                 err = -ENOMEM;
3764                 goto err_alloc_etherdev;
3765         }
3766
3767         SET_NETDEV_DEV(netdev, &pdev->dev);
3768
3769         pci_set_drvdata(pdev, netdev);
3770         adapter = netdev_priv(netdev);
3771
3772         adapter->netdev = netdev;
3773         adapter->pdev = pdev;
3774
3775         hw = &adapter->hw;
3776         hw->back = adapter;
3777
3778         adapter->msg_enable = BIT(DEFAULT_DEBUG_LEVEL_SHIFT) - 1;
3779         iavf_change_state(adapter, __IAVF_STARTUP);
3780
3781         /* Call save state here because it relies on the adapter struct. */
3782         pci_save_state(pdev);
3783
3784         hw->hw_addr = ioremap(pci_resource_start(pdev, 0),
3785                               pci_resource_len(pdev, 0));
3786         if (!hw->hw_addr) {
3787                 err = -EIO;
3788                 goto err_ioremap;
3789         }
3790         hw->vendor_id = pdev->vendor;
3791         hw->device_id = pdev->device;
3792         pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
3793         hw->subsystem_vendor_id = pdev->subsystem_vendor;
3794         hw->subsystem_device_id = pdev->subsystem_device;
3795         hw->bus.device = PCI_SLOT(pdev->devfn);
3796         hw->bus.func = PCI_FUNC(pdev->devfn);
3797         hw->bus.bus_id = pdev->bus->number;
3798
3799         /* set up the locks for the AQ, do this only once in probe
3800          * and destroy them only once in remove
3801          */
3802         mutex_init(&hw->aq.asq_mutex);
3803         mutex_init(&hw->aq.arq_mutex);
3804
3805         spin_lock_init(&adapter->mac_vlan_list_lock);
3806         spin_lock_init(&adapter->cloud_filter_list_lock);
3807
3808         INIT_LIST_HEAD(&adapter->mac_filter_list);
3809         INIT_LIST_HEAD(&adapter->vlan_filter_list);
3810         INIT_LIST_HEAD(&adapter->cloud_filter_list);
3811
3812         INIT_WORK(&adapter->reset_task, iavf_reset_task);
3813         INIT_WORK(&adapter->adminq_task, iavf_adminq_task);
3814         INIT_DELAYED_WORK(&adapter->watchdog_task, iavf_watchdog_task);
3815         INIT_DELAYED_WORK(&adapter->client_task, iavf_client_task);
3816         INIT_DELAYED_WORK(&adapter->init_task, iavf_init_task);
3817         queue_delayed_work(iavf_wq, &adapter->init_task,
3818                            msecs_to_jiffies(5 * (pdev->devfn & 0x07)));
3819
3820         /* Setup the wait queue for indicating transition to down status */
3821         init_waitqueue_head(&adapter->down_waitqueue);
3822
3823         return 0;
3824
3825 err_ioremap:
3826         free_netdev(netdev);
3827 err_alloc_etherdev:
3828         pci_disable_pcie_error_reporting(pdev);
3829         pci_release_regions(pdev);
3830 err_pci_reg:
3831 err_dma:
3832         pci_disable_device(pdev);
3833         return err;
3834 }
3835
3836 /**
3837  * iavf_suspend - Power management suspend routine
3838  * @dev_d: device info pointer
3839  *
3840  * Called when the system (VM) is entering sleep/suspend.
3841  **/
3842 static int __maybe_unused iavf_suspend(struct device *dev_d)
3843 {
3844         struct net_device *netdev = dev_get_drvdata(dev_d);
3845         struct iavf_adapter *adapter = netdev_priv(netdev);
3846
3847         netif_device_detach(netdev);
3848
3849         while (test_and_set_bit(__IAVF_IN_CRITICAL_TASK,
3850                                 &adapter->crit_section))
3851                 usleep_range(500, 1000);
3852
3853         if (netif_running(netdev)) {
3854                 rtnl_lock();
3855                 iavf_down(adapter);
3856                 rtnl_unlock();
3857         }
3858         iavf_free_misc_irq(adapter);
3859         iavf_reset_interrupt_capability(adapter);
3860
3861         clear_bit(__IAVF_IN_CRITICAL_TASK, &adapter->crit_section);
3862
3863         return 0;
3864 }
3865
3866 /**
3867  * iavf_resume - Power management resume routine
3868  * @dev_d: device info pointer
3869  *
3870  * Called when the system (VM) is resumed from sleep/suspend.
3871  **/
3872 static int __maybe_unused iavf_resume(struct device *dev_d)
3873 {
3874         struct pci_dev *pdev = to_pci_dev(dev_d);
3875         struct net_device *netdev = pci_get_drvdata(pdev);
3876         struct iavf_adapter *adapter = netdev_priv(netdev);
3877         u32 err;
3878
3879         pci_set_master(pdev);
3880
3881         rtnl_lock();
3882         err = iavf_set_interrupt_capability(adapter);
3883         if (err) {
3884                 rtnl_unlock();
3885                 dev_err(&pdev->dev, "Cannot enable MSI-X interrupts.\n");
3886                 return err;
3887         }
3888         err = iavf_request_misc_irq(adapter);
3889         rtnl_unlock();
3890         if (err) {
3891                 dev_err(&pdev->dev, "Cannot get interrupt vector.\n");
3892                 return err;
3893         }
3894
3895         queue_work(iavf_wq, &adapter->reset_task);
3896
3897         netif_device_attach(netdev);
3898
3899         return err;
3900 }
3901
3902 /**
3903  * iavf_remove - Device Removal Routine
3904  * @pdev: PCI device information struct
3905  *
3906  * iavf_remove is called by the PCI subsystem to alert the driver
3907  * that it should release a PCI device.  The could be caused by a
3908  * Hot-Plug event, or because the driver is going to be removed from
3909  * memory.
3910  **/
3911 static void iavf_remove(struct pci_dev *pdev)
3912 {
3913         struct net_device *netdev = pci_get_drvdata(pdev);
3914         struct iavf_adapter *adapter = netdev_priv(netdev);
3915         struct iavf_vlan_filter *vlf, *vlftmp;
3916         struct iavf_mac_filter *f, *ftmp;
3917         struct iavf_cloud_filter *cf, *cftmp;
3918         struct iavf_hw *hw = &adapter->hw;
3919         int err;
3920         /* Indicate we are in remove and not to run reset_task */
3921         set_bit(__IAVF_IN_REMOVE_TASK, &adapter->crit_section);
3922         cancel_delayed_work_sync(&adapter->init_task);
3923         cancel_work_sync(&adapter->reset_task);
3924         cancel_delayed_work_sync(&adapter->client_task);
3925         if (adapter->netdev_registered) {
3926                 unregister_netdev(netdev);
3927                 adapter->netdev_registered = false;
3928         }
3929         if (CLIENT_ALLOWED(adapter)) {
3930                 err = iavf_lan_del_device(adapter);
3931                 if (err)
3932                         dev_warn(&pdev->dev, "Failed to delete client device: %d\n",
3933                                  err);
3934         }
3935
3936         iavf_request_reset(adapter);
3937         msleep(50);
3938         /* If the FW isn't responding, kick it once, but only once. */
3939         if (!iavf_asq_done(hw)) {
3940                 iavf_request_reset(adapter);
3941                 msleep(50);
3942         }
3943         if (iavf_lock_timeout(adapter, __IAVF_IN_CRITICAL_TASK, 5000))
3944                 dev_warn(&adapter->pdev->dev, "failed to set __IAVF_IN_CRITICAL_TASK in %s\n", __FUNCTION__);
3945
3946         /* Shut down all the garbage mashers on the detention level */
3947         iavf_change_state(adapter, __IAVF_REMOVE);
3948         adapter->aq_required = 0;
3949         adapter->flags &= ~IAVF_FLAG_REINIT_ITR_NEEDED;
3950         iavf_free_all_tx_resources(adapter);
3951         iavf_free_all_rx_resources(adapter);
3952         iavf_misc_irq_disable(adapter);
3953         iavf_free_misc_irq(adapter);
3954         iavf_reset_interrupt_capability(adapter);
3955         iavf_free_q_vectors(adapter);
3956
3957         cancel_delayed_work_sync(&adapter->watchdog_task);
3958
3959         cancel_work_sync(&adapter->adminq_task);
3960
3961         iavf_free_rss(adapter);
3962
3963         if (hw->aq.asq.count)
3964                 iavf_shutdown_adminq(hw);
3965
3966         /* destroy the locks only once, here */
3967         mutex_destroy(&hw->aq.arq_mutex);
3968         mutex_destroy(&hw->aq.asq_mutex);
3969
3970         iounmap(hw->hw_addr);
3971         pci_release_regions(pdev);
3972         iavf_free_queues(adapter);
3973         kfree(adapter->vf_res);
3974         spin_lock_bh(&adapter->mac_vlan_list_lock);
3975         /* If we got removed before an up/down sequence, we've got a filter
3976          * hanging out there that we need to get rid of.
3977          */
3978         list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
3979                 list_del(&f->list);
3980                 kfree(f);
3981         }
3982         list_for_each_entry_safe(vlf, vlftmp, &adapter->vlan_filter_list,
3983                                  list) {
3984                 list_del(&vlf->list);
3985                 kfree(vlf);
3986         }
3987
3988         spin_unlock_bh(&adapter->mac_vlan_list_lock);
3989
3990         spin_lock_bh(&adapter->cloud_filter_list_lock);
3991         list_for_each_entry_safe(cf, cftmp, &adapter->cloud_filter_list, list) {
3992                 list_del(&cf->list);
3993                 kfree(cf);
3994         }
3995         spin_unlock_bh(&adapter->cloud_filter_list_lock);
3996
3997         free_netdev(netdev);
3998
3999         pci_disable_pcie_error_reporting(pdev);
4000
4001         pci_disable_device(pdev);
4002 }
4003
4004 static SIMPLE_DEV_PM_OPS(iavf_pm_ops, iavf_suspend, iavf_resume);
4005
4006 static struct pci_driver iavf_driver = {
4007         .name      = iavf_driver_name,
4008         .id_table  = iavf_pci_tbl,
4009         .probe     = iavf_probe,
4010         .remove    = iavf_remove,
4011         .driver.pm = &iavf_pm_ops,
4012         .shutdown  = iavf_shutdown,
4013 };
4014
4015 /**
4016  * iavf_init_module - Driver Registration Routine
4017  *
4018  * iavf_init_module is the first routine called when the driver is
4019  * loaded. All it does is register with the PCI subsystem.
4020  **/
4021 static int __init iavf_init_module(void)
4022 {
4023         int ret;
4024
4025         pr_info("iavf: %s\n", iavf_driver_string);
4026
4027         pr_info("%s\n", iavf_copyright);
4028
4029         iavf_wq = alloc_workqueue("%s", WQ_UNBOUND | WQ_MEM_RECLAIM, 1,
4030                                   iavf_driver_name);
4031         if (!iavf_wq) {
4032                 pr_err("%s: Failed to create workqueue\n", iavf_driver_name);
4033                 return -ENOMEM;
4034         }
4035
4036         ret = pci_register_driver(&iavf_driver);
4037         if (ret)
4038                 destroy_workqueue(iavf_wq);
4039
4040         return ret;
4041 }
4042
4043 module_init(iavf_init_module);
4044
4045 /**
4046  * iavf_exit_module - Driver Exit Cleanup Routine
4047  *
4048  * iavf_exit_module is called just before the driver is removed
4049  * from memory.
4050  **/
4051 static void __exit iavf_exit_module(void)
4052 {
4053         pci_unregister_driver(&iavf_driver);
4054         destroy_workqueue(iavf_wq);
4055 }
4056
4057 module_exit(iavf_exit_module);
4058
4059 /* iavf_main.c */