GNU Linux-libre 4.19.245-gnu1
[releases.git] / drivers / net / ethernet / intel / ixgbevf / ethtool.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
3
4 /* ethtool support for ixgbevf */
5
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/vmalloc.h>
15 #include <linux/if_vlan.h>
16 #include <linux/uaccess.h>
17
18 #include "ixgbevf.h"
19
20 #define IXGBE_ALL_RAR_ENTRIES 16
21
22 enum {NETDEV_STATS, IXGBEVF_STATS};
23
24 struct ixgbe_stats {
25         char stat_string[ETH_GSTRING_LEN];
26         int type;
27         int sizeof_stat;
28         int stat_offset;
29 };
30
31 #define IXGBEVF_STAT(_name, _stat) { \
32         .stat_string = _name, \
33         .type = IXGBEVF_STATS, \
34         .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
35         .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
36 }
37
38 #define IXGBEVF_NETDEV_STAT(_net_stat) { \
39         .stat_string = #_net_stat, \
40         .type = NETDEV_STATS, \
41         .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
42         .stat_offset = offsetof(struct net_device_stats, _net_stat) \
43 }
44
45 static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
46         IXGBEVF_NETDEV_STAT(rx_packets),
47         IXGBEVF_NETDEV_STAT(tx_packets),
48         IXGBEVF_NETDEV_STAT(rx_bytes),
49         IXGBEVF_NETDEV_STAT(tx_bytes),
50         IXGBEVF_STAT("tx_busy", tx_busy),
51         IXGBEVF_STAT("tx_restart_queue", restart_queue),
52         IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
53         IXGBEVF_NETDEV_STAT(multicast),
54         IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
55         IXGBEVF_STAT("alloc_rx_page", alloc_rx_page),
56         IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed),
57         IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
58 };
59
60 #define IXGBEVF_QUEUE_STATS_LEN ( \
61         (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
62          ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \
63          ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
64          (sizeof(struct ixgbevf_stats) / sizeof(u64)))
65 #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
66
67 #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
68 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
69         "Register test  (offline)",
70         "Link test   (on/offline)"
71 };
72
73 #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
74
75 static const char ixgbevf_priv_flags_strings[][ETH_GSTRING_LEN] = {
76 #define IXGBEVF_PRIV_FLAGS_LEGACY_RX    BIT(0)
77         "legacy-rx",
78 };
79
80 #define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings)
81
82 static int ixgbevf_get_link_ksettings(struct net_device *netdev,
83                                       struct ethtool_link_ksettings *cmd)
84 {
85         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
86         struct ixgbe_hw *hw = &adapter->hw;
87         u32 link_speed = 0;
88         bool link_up;
89
90         ethtool_link_ksettings_zero_link_mode(cmd, supported);
91         ethtool_link_ksettings_add_link_mode(cmd, supported, 10000baseT_Full);
92         cmd->base.autoneg = AUTONEG_DISABLE;
93         cmd->base.port = -1;
94
95         hw->mac.get_link_status = 1;
96         hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
97
98         if (link_up) {
99                 __u32 speed = SPEED_10000;
100
101                 switch (link_speed) {
102                 case IXGBE_LINK_SPEED_10GB_FULL:
103                         speed = SPEED_10000;
104                         break;
105                 case IXGBE_LINK_SPEED_1GB_FULL:
106                         speed = SPEED_1000;
107                         break;
108                 case IXGBE_LINK_SPEED_100_FULL:
109                         speed = SPEED_100;
110                         break;
111                 }
112
113                 cmd->base.speed = speed;
114                 cmd->base.duplex = DUPLEX_FULL;
115         } else {
116                 cmd->base.speed = SPEED_UNKNOWN;
117                 cmd->base.duplex = DUPLEX_UNKNOWN;
118         }
119
120         return 0;
121 }
122
123 static u32 ixgbevf_get_msglevel(struct net_device *netdev)
124 {
125         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
126
127         return adapter->msg_enable;
128 }
129
130 static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
131 {
132         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
133
134         adapter->msg_enable = data;
135 }
136
137 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
138
139 static int ixgbevf_get_regs_len(struct net_device *netdev)
140 {
141 #define IXGBE_REGS_LEN 45
142         return IXGBE_REGS_LEN * sizeof(u32);
143 }
144
145 static void ixgbevf_get_regs(struct net_device *netdev,
146                              struct ethtool_regs *regs,
147                              void *p)
148 {
149         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
150         struct ixgbe_hw *hw = &adapter->hw;
151         u32 *regs_buff = p;
152         u32 regs_len = ixgbevf_get_regs_len(netdev);
153         u8 i;
154
155         memset(p, 0, regs_len);
156
157         /* generate a number suitable for ethtool's register version */
158         regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
159
160         /* General Registers */
161         regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
162         regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
163         regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
164         regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
165         regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
166
167         /* Interrupt */
168         /* don't read EICR because it can clear interrupt causes, instead
169          * read EICS which is a shadow but doesn't clear EICR
170          */
171         regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
172         regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
173         regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
174         regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
175         regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
176         regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
177         regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
178         regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
179         regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
180
181         /* Receive DMA */
182         for (i = 0; i < 2; i++)
183                 regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
184         for (i = 0; i < 2; i++)
185                 regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
186         for (i = 0; i < 2; i++)
187                 regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
188         for (i = 0; i < 2; i++)
189                 regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
190         for (i = 0; i < 2; i++)
191                 regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
192         for (i = 0; i < 2; i++)
193                 regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
194         for (i = 0; i < 2; i++)
195                 regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
196
197         /* Receive */
198         regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
199
200         /* Transmit */
201         for (i = 0; i < 2; i++)
202                 regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
203         for (i = 0; i < 2; i++)
204                 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
205         for (i = 0; i < 2; i++)
206                 regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
207         for (i = 0; i < 2; i++)
208                 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
209         for (i = 0; i < 2; i++)
210                 regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
211         for (i = 0; i < 2; i++)
212                 regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
213         for (i = 0; i < 2; i++)
214                 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
215         for (i = 0; i < 2; i++)
216                 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
217 }
218
219 static void ixgbevf_get_drvinfo(struct net_device *netdev,
220                                 struct ethtool_drvinfo *drvinfo)
221 {
222         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
223
224         strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
225         strlcpy(drvinfo->version, ixgbevf_driver_version,
226                 sizeof(drvinfo->version));
227         strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
228                 sizeof(drvinfo->bus_info));
229
230         drvinfo->n_priv_flags = IXGBEVF_PRIV_FLAGS_STR_LEN;
231 }
232
233 static void ixgbevf_get_ringparam(struct net_device *netdev,
234                                   struct ethtool_ringparam *ring)
235 {
236         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
237
238         ring->rx_max_pending = IXGBEVF_MAX_RXD;
239         ring->tx_max_pending = IXGBEVF_MAX_TXD;
240         ring->rx_pending = adapter->rx_ring_count;
241         ring->tx_pending = adapter->tx_ring_count;
242 }
243
244 static int ixgbevf_set_ringparam(struct net_device *netdev,
245                                  struct ethtool_ringparam *ring)
246 {
247         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
248         struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
249         u32 new_rx_count, new_tx_count;
250         int i, j, err = 0;
251
252         if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
253                 return -EINVAL;
254
255         new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
256         new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
257         new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
258
259         new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
260         new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
261         new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
262
263         /* if nothing to do return success */
264         if ((new_tx_count == adapter->tx_ring_count) &&
265             (new_rx_count == adapter->rx_ring_count))
266                 return 0;
267
268         while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
269                 usleep_range(1000, 2000);
270
271         if (!netif_running(adapter->netdev)) {
272                 for (i = 0; i < adapter->num_tx_queues; i++)
273                         adapter->tx_ring[i]->count = new_tx_count;
274                 for (i = 0; i < adapter->num_xdp_queues; i++)
275                         adapter->xdp_ring[i]->count = new_tx_count;
276                 for (i = 0; i < adapter->num_rx_queues; i++)
277                         adapter->rx_ring[i]->count = new_rx_count;
278                 adapter->tx_ring_count = new_tx_count;
279                 adapter->xdp_ring_count = new_tx_count;
280                 adapter->rx_ring_count = new_rx_count;
281                 goto clear_reset;
282         }
283
284         if (new_tx_count != adapter->tx_ring_count) {
285                 tx_ring = vmalloc(array_size(sizeof(*tx_ring),
286                                              adapter->num_tx_queues +
287                                                 adapter->num_xdp_queues));
288                 if (!tx_ring) {
289                         err = -ENOMEM;
290                         goto clear_reset;
291                 }
292
293                 for (i = 0; i < adapter->num_tx_queues; i++) {
294                         /* clone ring and setup updated count */
295                         tx_ring[i] = *adapter->tx_ring[i];
296                         tx_ring[i].count = new_tx_count;
297                         err = ixgbevf_setup_tx_resources(&tx_ring[i]);
298                         if (err) {
299                                 while (i) {
300                                         i--;
301                                         ixgbevf_free_tx_resources(&tx_ring[i]);
302                                 }
303
304                                 vfree(tx_ring);
305                                 tx_ring = NULL;
306
307                                 goto clear_reset;
308                         }
309                 }
310
311                 for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
312                         /* clone ring and setup updated count */
313                         tx_ring[i] = *adapter->xdp_ring[j];
314                         tx_ring[i].count = new_tx_count;
315                         err = ixgbevf_setup_tx_resources(&tx_ring[i]);
316                         if (err) {
317                                 while (i) {
318                                         i--;
319                                         ixgbevf_free_tx_resources(&tx_ring[i]);
320                                 }
321
322                                 vfree(tx_ring);
323                                 tx_ring = NULL;
324
325                                 goto clear_reset;
326                         }
327                 }
328         }
329
330         if (new_rx_count != adapter->rx_ring_count) {
331                 rx_ring = vmalloc(array_size(sizeof(*rx_ring),
332                                              adapter->num_rx_queues));
333                 if (!rx_ring) {
334                         err = -ENOMEM;
335                         goto clear_reset;
336                 }
337
338                 for (i = 0; i < adapter->num_rx_queues; i++) {
339                         /* clone ring and setup updated count */
340                         rx_ring[i] = *adapter->rx_ring[i];
341
342                         /* Clear copied XDP RX-queue info */
343                         memset(&rx_ring[i].xdp_rxq, 0,
344                                sizeof(rx_ring[i].xdp_rxq));
345
346                         rx_ring[i].count = new_rx_count;
347                         err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
348                         if (err) {
349                                 while (i) {
350                                         i--;
351                                         ixgbevf_free_rx_resources(&rx_ring[i]);
352                                 }
353
354                                 vfree(rx_ring);
355                                 rx_ring = NULL;
356
357                                 goto clear_reset;
358                         }
359                 }
360         }
361
362         /* bring interface down to prepare for update */
363         ixgbevf_down(adapter);
364
365         /* Tx */
366         if (tx_ring) {
367                 for (i = 0; i < adapter->num_tx_queues; i++) {
368                         ixgbevf_free_tx_resources(adapter->tx_ring[i]);
369                         *adapter->tx_ring[i] = tx_ring[i];
370                 }
371                 adapter->tx_ring_count = new_tx_count;
372
373                 for (j = 0; j < adapter->num_xdp_queues; i++, j++) {
374                         ixgbevf_free_tx_resources(adapter->xdp_ring[j]);
375                         *adapter->xdp_ring[j] = tx_ring[i];
376                 }
377                 adapter->xdp_ring_count = new_tx_count;
378
379                 vfree(tx_ring);
380                 tx_ring = NULL;
381         }
382
383         /* Rx */
384         if (rx_ring) {
385                 for (i = 0; i < adapter->num_rx_queues; i++) {
386                         ixgbevf_free_rx_resources(adapter->rx_ring[i]);
387                         *adapter->rx_ring[i] = rx_ring[i];
388                 }
389                 adapter->rx_ring_count = new_rx_count;
390
391                 vfree(rx_ring);
392                 rx_ring = NULL;
393         }
394
395         /* restore interface using new values */
396         ixgbevf_up(adapter);
397
398 clear_reset:
399         /* free Tx resources if Rx error is encountered */
400         if (tx_ring) {
401                 for (i = 0;
402                      i < adapter->num_tx_queues + adapter->num_xdp_queues; i++)
403                         ixgbevf_free_tx_resources(&tx_ring[i]);
404                 vfree(tx_ring);
405         }
406
407         clear_bit(__IXGBEVF_RESETTING, &adapter->state);
408         return err;
409 }
410
411 static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
412 {
413         switch (stringset) {
414         case ETH_SS_TEST:
415                 return IXGBEVF_TEST_LEN;
416         case ETH_SS_STATS:
417                 return IXGBEVF_STATS_LEN;
418         case ETH_SS_PRIV_FLAGS:
419                 return IXGBEVF_PRIV_FLAGS_STR_LEN;
420         default:
421                 return -EINVAL;
422         }
423 }
424
425 static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
426                                       struct ethtool_stats *stats, u64 *data)
427 {
428         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
429         struct rtnl_link_stats64 temp;
430         const struct rtnl_link_stats64 *net_stats;
431         unsigned int start;
432         struct ixgbevf_ring *ring;
433         int i, j;
434         char *p;
435
436         ixgbevf_update_stats(adapter);
437         net_stats = dev_get_stats(netdev, &temp);
438         for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
439                 switch (ixgbevf_gstrings_stats[i].type) {
440                 case NETDEV_STATS:
441                         p = (char *)net_stats +
442                                         ixgbevf_gstrings_stats[i].stat_offset;
443                         break;
444                 case IXGBEVF_STATS:
445                         p = (char *)adapter +
446                                         ixgbevf_gstrings_stats[i].stat_offset;
447                         break;
448                 default:
449                         data[i] = 0;
450                         continue;
451                 }
452
453                 data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
454                            sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
455         }
456
457         /* populate Tx queue data */
458         for (j = 0; j < adapter->num_tx_queues; j++) {
459                 ring = adapter->tx_ring[j];
460                 if (!ring) {
461                         data[i++] = 0;
462                         data[i++] = 0;
463                         continue;
464                 }
465
466                 do {
467                         start = u64_stats_fetch_begin_irq(&ring->syncp);
468                         data[i]   = ring->stats.packets;
469                         data[i + 1] = ring->stats.bytes;
470                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
471                 i += 2;
472         }
473
474         /* populate XDP queue data */
475         for (j = 0; j < adapter->num_xdp_queues; j++) {
476                 ring = adapter->xdp_ring[j];
477                 if (!ring) {
478                         data[i++] = 0;
479                         data[i++] = 0;
480                         continue;
481                 }
482
483                 do {
484                         start = u64_stats_fetch_begin_irq(&ring->syncp);
485                         data[i] = ring->stats.packets;
486                         data[i + 1] = ring->stats.bytes;
487                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
488                 i += 2;
489         }
490
491         /* populate Rx queue data */
492         for (j = 0; j < adapter->num_rx_queues; j++) {
493                 ring = adapter->rx_ring[j];
494                 if (!ring) {
495                         data[i++] = 0;
496                         data[i++] = 0;
497                         continue;
498                 }
499
500                 do {
501                         start = u64_stats_fetch_begin_irq(&ring->syncp);
502                         data[i]   = ring->stats.packets;
503                         data[i + 1] = ring->stats.bytes;
504                 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
505                 i += 2;
506         }
507 }
508
509 static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
510                                 u8 *data)
511 {
512         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
513         char *p = (char *)data;
514         int i;
515
516         switch (stringset) {
517         case ETH_SS_TEST:
518                 memcpy(data, *ixgbe_gstrings_test,
519                        IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
520                 break;
521         case ETH_SS_STATS:
522                 for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
523                         memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
524                                ETH_GSTRING_LEN);
525                         p += ETH_GSTRING_LEN;
526                 }
527
528                 for (i = 0; i < adapter->num_tx_queues; i++) {
529                         sprintf(p, "tx_queue_%u_packets", i);
530                         p += ETH_GSTRING_LEN;
531                         sprintf(p, "tx_queue_%u_bytes", i);
532                         p += ETH_GSTRING_LEN;
533                 }
534                 for (i = 0; i < adapter->num_xdp_queues; i++) {
535                         sprintf(p, "xdp_queue_%u_packets", i);
536                         p += ETH_GSTRING_LEN;
537                         sprintf(p, "xdp_queue_%u_bytes", i);
538                         p += ETH_GSTRING_LEN;
539                 }
540                 for (i = 0; i < adapter->num_rx_queues; i++) {
541                         sprintf(p, "rx_queue_%u_packets", i);
542                         p += ETH_GSTRING_LEN;
543                         sprintf(p, "rx_queue_%u_bytes", i);
544                         p += ETH_GSTRING_LEN;
545                 }
546                 break;
547         case ETH_SS_PRIV_FLAGS:
548                 memcpy(data, ixgbevf_priv_flags_strings,
549                        IXGBEVF_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
550                 break;
551         }
552 }
553
554 static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
555 {
556         struct ixgbe_hw *hw = &adapter->hw;
557         bool link_up;
558         u32 link_speed = 0;
559         *data = 0;
560
561         hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
562         if (!link_up)
563                 *data = 1;
564
565         return *data;
566 }
567
568 /* ethtool register test data */
569 struct ixgbevf_reg_test {
570         u16 reg;
571         u8  array_len;
572         u8  test_type;
573         u32 mask;
574         u32 write;
575 };
576
577 /* In the hardware, registers are laid out either singly, in arrays
578  * spaced 0x40 bytes apart, or in contiguous tables.  We assume
579  * most tests take place on arrays or single registers (handled
580  * as a single-element array) and special-case the tables.
581  * Table tests are always pattern tests.
582  *
583  * We also make provision for some required setup steps by specifying
584  * registers to be written without any read-back testing.
585  */
586
587 #define PATTERN_TEST    1
588 #define SET_READ_TEST   2
589 #define WRITE_NO_TEST   3
590 #define TABLE32_TEST    4
591 #define TABLE64_TEST_LO 5
592 #define TABLE64_TEST_HI 6
593
594 /* default VF register test */
595 static const struct ixgbevf_reg_test reg_test_vf[] = {
596         { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
597         { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
598         { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
599         { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
600         { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
601         { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
602         { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
603         { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
604         { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
605         { .reg = 0 }
606 };
607
608 static const u32 register_test_patterns[] = {
609         0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
610 };
611
612 static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
613                              int reg, u32 mask, u32 write)
614 {
615         u32 pat, val, before;
616
617         if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
618                 *data = 1;
619                 return true;
620         }
621         for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
622                 before = ixgbevf_read_reg(&adapter->hw, reg);
623                 ixgbe_write_reg(&adapter->hw, reg,
624                                 register_test_patterns[pat] & write);
625                 val = ixgbevf_read_reg(&adapter->hw, reg);
626                 if (val != (register_test_patterns[pat] & write & mask)) {
627                         hw_dbg(&adapter->hw,
628                                "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
629                                reg, val,
630                                register_test_patterns[pat] & write & mask);
631                         *data = reg;
632                         ixgbe_write_reg(&adapter->hw, reg, before);
633                         return true;
634                 }
635                 ixgbe_write_reg(&adapter->hw, reg, before);
636         }
637         return false;
638 }
639
640 static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
641                               int reg, u32 mask, u32 write)
642 {
643         u32 val, before;
644
645         if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
646                 *data = 1;
647                 return true;
648         }
649         before = ixgbevf_read_reg(&adapter->hw, reg);
650         ixgbe_write_reg(&adapter->hw, reg, write & mask);
651         val = ixgbevf_read_reg(&adapter->hw, reg);
652         if ((write & mask) != (val & mask)) {
653                 pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
654                        reg, (val & mask), write & mask);
655                 *data = reg;
656                 ixgbe_write_reg(&adapter->hw, reg, before);
657                 return true;
658         }
659         ixgbe_write_reg(&adapter->hw, reg, before);
660         return false;
661 }
662
663 static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
664 {
665         const struct ixgbevf_reg_test *test;
666         u32 i;
667
668         if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
669                 dev_err(&adapter->pdev->dev,
670                         "Adapter removed - register test blocked\n");
671                 *data = 1;
672                 return 1;
673         }
674         test = reg_test_vf;
675
676         /* Perform the register test, looping through the test table
677          * until we either fail or reach the null entry.
678          */
679         while (test->reg) {
680                 for (i = 0; i < test->array_len; i++) {
681                         bool b = false;
682
683                         switch (test->test_type) {
684                         case PATTERN_TEST:
685                                 b = reg_pattern_test(adapter, data,
686                                                      test->reg + (i * 0x40),
687                                                      test->mask,
688                                                      test->write);
689                                 break;
690                         case SET_READ_TEST:
691                                 b = reg_set_and_check(adapter, data,
692                                                       test->reg + (i * 0x40),
693                                                       test->mask,
694                                                       test->write);
695                                 break;
696                         case WRITE_NO_TEST:
697                                 ixgbe_write_reg(&adapter->hw,
698                                                 test->reg + (i * 0x40),
699                                                 test->write);
700                                 break;
701                         case TABLE32_TEST:
702                                 b = reg_pattern_test(adapter, data,
703                                                      test->reg + (i * 4),
704                                                      test->mask,
705                                                      test->write);
706                                 break;
707                         case TABLE64_TEST_LO:
708                                 b = reg_pattern_test(adapter, data,
709                                                      test->reg + (i * 8),
710                                                      test->mask,
711                                                      test->write);
712                                 break;
713                         case TABLE64_TEST_HI:
714                                 b = reg_pattern_test(adapter, data,
715                                                      test->reg + 4 + (i * 8),
716                                                      test->mask,
717                                                      test->write);
718                                 break;
719                         }
720                         if (b)
721                                 return 1;
722                 }
723                 test++;
724         }
725
726         *data = 0;
727         return *data;
728 }
729
730 static void ixgbevf_diag_test(struct net_device *netdev,
731                               struct ethtool_test *eth_test, u64 *data)
732 {
733         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
734         bool if_running = netif_running(netdev);
735
736         if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
737                 dev_err(&adapter->pdev->dev,
738                         "Adapter removed - test blocked\n");
739                 data[0] = 1;
740                 data[1] = 1;
741                 eth_test->flags |= ETH_TEST_FL_FAILED;
742                 return;
743         }
744         set_bit(__IXGBEVF_TESTING, &adapter->state);
745         if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
746                 /* Offline tests */
747
748                 hw_dbg(&adapter->hw, "offline testing starting\n");
749
750                 /* Link test performed before hardware reset so autoneg doesn't
751                  * interfere with test result
752                  */
753                 if (ixgbevf_link_test(adapter, &data[1]))
754                         eth_test->flags |= ETH_TEST_FL_FAILED;
755
756                 if (if_running)
757                         /* indicate we're in test mode */
758                         ixgbevf_close(netdev);
759                 else
760                         ixgbevf_reset(adapter);
761
762                 hw_dbg(&adapter->hw, "register testing starting\n");
763                 if (ixgbevf_reg_test(adapter, &data[0]))
764                         eth_test->flags |= ETH_TEST_FL_FAILED;
765
766                 ixgbevf_reset(adapter);
767
768                 clear_bit(__IXGBEVF_TESTING, &adapter->state);
769                 if (if_running)
770                         ixgbevf_open(netdev);
771         } else {
772                 hw_dbg(&adapter->hw, "online testing starting\n");
773                 /* Online tests */
774                 if (ixgbevf_link_test(adapter, &data[1]))
775                         eth_test->flags |= ETH_TEST_FL_FAILED;
776
777                 /* Online tests aren't run; pass by default */
778                 data[0] = 0;
779
780                 clear_bit(__IXGBEVF_TESTING, &adapter->state);
781         }
782         msleep_interruptible(4 * 1000);
783 }
784
785 static int ixgbevf_nway_reset(struct net_device *netdev)
786 {
787         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
788
789         if (netif_running(netdev))
790                 ixgbevf_reinit_locked(adapter);
791
792         return 0;
793 }
794
795 static int ixgbevf_get_coalesce(struct net_device *netdev,
796                                 struct ethtool_coalesce *ec)
797 {
798         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
799
800         /* only valid if in constant ITR mode */
801         if (adapter->rx_itr_setting <= 1)
802                 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
803         else
804                 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
805
806         /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */
807         if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
808                 return 0;
809
810         /* only valid if in constant ITR mode */
811         if (adapter->tx_itr_setting <= 1)
812                 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
813         else
814                 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
815
816         return 0;
817 }
818
819 static int ixgbevf_set_coalesce(struct net_device *netdev,
820                                 struct ethtool_coalesce *ec)
821 {
822         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
823         struct ixgbevf_q_vector *q_vector;
824         int num_vectors, i;
825         u16 tx_itr_param, rx_itr_param;
826
827         /* don't accept Tx specific changes if we've got mixed RxTx vectors */
828         if (adapter->q_vector[0]->tx.count &&
829             adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs)
830                 return -EINVAL;
831
832         if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
833             (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
834                 return -EINVAL;
835
836         if (ec->rx_coalesce_usecs > 1)
837                 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
838         else
839                 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
840
841         if (adapter->rx_itr_setting == 1)
842                 rx_itr_param = IXGBE_20K_ITR;
843         else
844                 rx_itr_param = adapter->rx_itr_setting;
845
846         if (ec->tx_coalesce_usecs > 1)
847                 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
848         else
849                 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
850
851         if (adapter->tx_itr_setting == 1)
852                 tx_itr_param = IXGBE_12K_ITR;
853         else
854                 tx_itr_param = adapter->tx_itr_setting;
855
856         num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
857
858         for (i = 0; i < num_vectors; i++) {
859                 q_vector = adapter->q_vector[i];
860                 if (q_vector->tx.count && !q_vector->rx.count)
861                         /* Tx only */
862                         q_vector->itr = tx_itr_param;
863                 else
864                         /* Rx only or mixed */
865                         q_vector->itr = rx_itr_param;
866                 ixgbevf_write_eitr(q_vector);
867         }
868
869         return 0;
870 }
871
872 static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
873                              u32 *rules __always_unused)
874 {
875         struct ixgbevf_adapter *adapter = netdev_priv(dev);
876
877         switch (info->cmd) {
878         case ETHTOOL_GRXRINGS:
879                 info->data = adapter->num_rx_queues;
880                 return 0;
881         default:
882                 hw_dbg(&adapter->hw, "Command parameters not supported\n");
883                 return -EOPNOTSUPP;
884         }
885 }
886
887 static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
888 {
889         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
890
891         if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
892                 return IXGBEVF_X550_VFRETA_SIZE;
893
894         return IXGBEVF_82599_RETA_SIZE;
895 }
896
897 static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
898 {
899         return IXGBEVF_RSS_HASH_KEY_SIZE;
900 }
901
902 static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
903                             u8 *hfunc)
904 {
905         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
906         int err = 0;
907
908         if (hfunc)
909                 *hfunc = ETH_RSS_HASH_TOP;
910
911         if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
912                 if (key)
913                         memcpy(key, adapter->rss_key,
914                                ixgbevf_get_rxfh_key_size(netdev));
915
916                 if (indir) {
917                         int i;
918
919                         for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
920                                 indir[i] = adapter->rss_indir_tbl[i];
921                 }
922         } else {
923                 /* If neither indirection table nor hash key was requested
924                  *  - just return a success avoiding taking any locks.
925                  */
926                 if (!indir && !key)
927                         return 0;
928
929                 spin_lock_bh(&adapter->mbx_lock);
930                 if (indir)
931                         err = ixgbevf_get_reta_locked(&adapter->hw, indir,
932                                                       adapter->num_rx_queues);
933
934                 if (!err && key)
935                         err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
936
937                 spin_unlock_bh(&adapter->mbx_lock);
938         }
939
940         return err;
941 }
942
943 static u32 ixgbevf_get_priv_flags(struct net_device *netdev)
944 {
945         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
946         u32 priv_flags = 0;
947
948         if (adapter->flags & IXGBEVF_FLAGS_LEGACY_RX)
949                 priv_flags |= IXGBEVF_PRIV_FLAGS_LEGACY_RX;
950
951         return priv_flags;
952 }
953
954 static int ixgbevf_set_priv_flags(struct net_device *netdev, u32 priv_flags)
955 {
956         struct ixgbevf_adapter *adapter = netdev_priv(netdev);
957         unsigned int flags = adapter->flags;
958
959         flags &= ~IXGBEVF_FLAGS_LEGACY_RX;
960         if (priv_flags & IXGBEVF_PRIV_FLAGS_LEGACY_RX)
961                 flags |= IXGBEVF_FLAGS_LEGACY_RX;
962
963         if (flags != adapter->flags) {
964                 adapter->flags = flags;
965
966                 /* reset interface to repopulate queues */
967                 if (netif_running(netdev))
968                         ixgbevf_reinit_locked(adapter);
969         }
970
971         return 0;
972 }
973
974 static const struct ethtool_ops ixgbevf_ethtool_ops = {
975         .get_drvinfo            = ixgbevf_get_drvinfo,
976         .get_regs_len           = ixgbevf_get_regs_len,
977         .get_regs               = ixgbevf_get_regs,
978         .nway_reset             = ixgbevf_nway_reset,
979         .get_link               = ethtool_op_get_link,
980         .get_ringparam          = ixgbevf_get_ringparam,
981         .set_ringparam          = ixgbevf_set_ringparam,
982         .get_msglevel           = ixgbevf_get_msglevel,
983         .set_msglevel           = ixgbevf_set_msglevel,
984         .self_test              = ixgbevf_diag_test,
985         .get_sset_count         = ixgbevf_get_sset_count,
986         .get_strings            = ixgbevf_get_strings,
987         .get_ethtool_stats      = ixgbevf_get_ethtool_stats,
988         .get_coalesce           = ixgbevf_get_coalesce,
989         .set_coalesce           = ixgbevf_set_coalesce,
990         .get_rxnfc              = ixgbevf_get_rxnfc,
991         .get_rxfh_indir_size    = ixgbevf_get_rxfh_indir_size,
992         .get_rxfh_key_size      = ixgbevf_get_rxfh_key_size,
993         .get_rxfh               = ixgbevf_get_rxfh,
994         .get_link_ksettings     = ixgbevf_get_link_ksettings,
995         .get_priv_flags         = ixgbevf_get_priv_flags,
996         .set_priv_flags         = ixgbevf_set_priv_flags,
997 };
998
999 void ixgbevf_set_ethtool_ops(struct net_device *netdev)
1000 {
1001         netdev->ethtool_ops = &ixgbevf_ethtool_ops;
1002 }