GNU Linux-libre 4.19.295-gnu1
[releases.git] / drivers / net / ethernet / intel / ice / ice_ethtool.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, Intel Corporation. */
3
4 /* ethtool support for ice */
5
6 #include "ice.h"
7
8 struct ice_stats {
9         char stat_string[ETH_GSTRING_LEN];
10         int sizeof_stat;
11         int stat_offset;
12 };
13
14 #define ICE_STAT(_type, _name, _stat) { \
15         .stat_string = _name, \
16         .sizeof_stat = FIELD_SIZEOF(_type, _stat), \
17         .stat_offset = offsetof(_type, _stat) \
18 }
19
20 #define ICE_VSI_STAT(_name, _stat) \
21                 ICE_STAT(struct ice_vsi, _name, _stat)
22 #define ICE_PF_STAT(_name, _stat) \
23                 ICE_STAT(struct ice_pf, _name, _stat)
24
25 static int ice_q_stats_len(struct net_device *netdev)
26 {
27         struct ice_netdev_priv *np = netdev_priv(netdev);
28
29         return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
30                 (sizeof(struct ice_q_stats) / sizeof(u64)));
31 }
32
33 #define ICE_PF_STATS_LEN        ARRAY_SIZE(ice_gstrings_pf_stats)
34 #define ICE_VSI_STATS_LEN       ARRAY_SIZE(ice_gstrings_vsi_stats)
35
36 #define ICE_ALL_STATS_LEN(n)    (ICE_PF_STATS_LEN + ICE_VSI_STATS_LEN + \
37                                  ice_q_stats_len(n))
38
39 static const struct ice_stats ice_gstrings_vsi_stats[] = {
40         ICE_VSI_STAT("tx_unicast", eth_stats.tx_unicast),
41         ICE_VSI_STAT("rx_unicast", eth_stats.rx_unicast),
42         ICE_VSI_STAT("tx_multicast", eth_stats.tx_multicast),
43         ICE_VSI_STAT("rx_multicast", eth_stats.rx_multicast),
44         ICE_VSI_STAT("tx_broadcast", eth_stats.tx_broadcast),
45         ICE_VSI_STAT("rx_broadcast", eth_stats.rx_broadcast),
46         ICE_VSI_STAT("tx_bytes", eth_stats.tx_bytes),
47         ICE_VSI_STAT("rx_bytes", eth_stats.rx_bytes),
48         ICE_VSI_STAT("rx_discards", eth_stats.rx_discards),
49         ICE_VSI_STAT("tx_errors", eth_stats.tx_errors),
50         ICE_VSI_STAT("tx_linearize", tx_linearize),
51         ICE_VSI_STAT("rx_unknown_protocol", eth_stats.rx_unknown_protocol),
52         ICE_VSI_STAT("rx_alloc_fail", rx_buf_failed),
53         ICE_VSI_STAT("rx_pg_alloc_fail", rx_page_failed),
54 };
55
56 /* These PF_STATs might look like duplicates of some NETDEV_STATs,
57  * but they aren't. This device is capable of supporting multiple
58  * VSIs/netdevs on a single PF. The NETDEV_STATs are for individual
59  * netdevs whereas the PF_STATs are for the physical function that's
60  * hosting these netdevs.
61  *
62  * The PF_STATs are appended to the netdev stats only when ethtool -S
63  * is queried on the base PF netdev.
64  */
65 static struct ice_stats ice_gstrings_pf_stats[] = {
66         ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
67         ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
68         ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
69         ICE_PF_STAT("rx_unicast", stats.eth.rx_unicast),
70         ICE_PF_STAT("tx_multicast", stats.eth.tx_multicast),
71         ICE_PF_STAT("rx_multicast", stats.eth.rx_multicast),
72         ICE_PF_STAT("tx_broadcast", stats.eth.tx_broadcast),
73         ICE_PF_STAT("rx_broadcast", stats.eth.rx_broadcast),
74         ICE_PF_STAT("tx_errors", stats.eth.tx_errors),
75         ICE_PF_STAT("tx_size_64", stats.tx_size_64),
76         ICE_PF_STAT("rx_size_64", stats.rx_size_64),
77         ICE_PF_STAT("tx_size_127", stats.tx_size_127),
78         ICE_PF_STAT("rx_size_127", stats.rx_size_127),
79         ICE_PF_STAT("tx_size_255", stats.tx_size_255),
80         ICE_PF_STAT("rx_size_255", stats.rx_size_255),
81         ICE_PF_STAT("tx_size_511", stats.tx_size_511),
82         ICE_PF_STAT("rx_size_511", stats.rx_size_511),
83         ICE_PF_STAT("tx_size_1023", stats.tx_size_1023),
84         ICE_PF_STAT("rx_size_1023", stats.rx_size_1023),
85         ICE_PF_STAT("tx_size_1522", stats.tx_size_1522),
86         ICE_PF_STAT("rx_size_1522", stats.rx_size_1522),
87         ICE_PF_STAT("tx_size_big", stats.tx_size_big),
88         ICE_PF_STAT("rx_size_big", stats.rx_size_big),
89         ICE_PF_STAT("link_xon_tx", stats.link_xon_tx),
90         ICE_PF_STAT("link_xon_rx", stats.link_xon_rx),
91         ICE_PF_STAT("link_xoff_tx", stats.link_xoff_tx),
92         ICE_PF_STAT("link_xoff_rx", stats.link_xoff_rx),
93         ICE_PF_STAT("tx_dropped_link_down", stats.tx_dropped_link_down),
94         ICE_PF_STAT("rx_undersize", stats.rx_undersize),
95         ICE_PF_STAT("rx_fragments", stats.rx_fragments),
96         ICE_PF_STAT("rx_oversize", stats.rx_oversize),
97         ICE_PF_STAT("rx_jabber", stats.rx_jabber),
98         ICE_PF_STAT("rx_csum_bad", hw_csum_rx_error),
99         ICE_PF_STAT("rx_length_errors", stats.rx_len_errors),
100         ICE_PF_STAT("rx_dropped", stats.eth.rx_discards),
101         ICE_PF_STAT("rx_crc_errors", stats.crc_errors),
102         ICE_PF_STAT("illegal_bytes", stats.illegal_bytes),
103         ICE_PF_STAT("mac_local_faults", stats.mac_local_faults),
104         ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
105 };
106
107 static u32 ice_regs_dump_list[] = {
108         PFGEN_STATE,
109         PRTGEN_STATUS,
110         QRX_CTRL(0),
111         QINT_TQCTL(0),
112         QINT_RQCTL(0),
113         PFINT_OICR_ENA,
114         QRX_ITR(0),
115 };
116
117 /**
118  * ice_nvm_version_str - format the NVM version strings
119  * @hw: ptr to the hardware info
120  */
121 static char *ice_nvm_version_str(struct ice_hw *hw)
122 {
123         static char buf[ICE_ETHTOOL_FWVER_LEN];
124         u8 ver, patch;
125         u32 full_ver;
126         u16 build;
127
128         full_ver = hw->nvm.oem_ver;
129         ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
130         build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>
131                       ICE_OEM_VER_BUILD_SHIFT);
132         patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);
133
134         snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d",
135                  (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,
136                  (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,
137                  hw->nvm.eetrack, ver, build, patch);
138
139         return buf;
140 }
141
142 static void
143 ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
144 {
145         struct ice_netdev_priv *np = netdev_priv(netdev);
146         struct ice_vsi *vsi = np->vsi;
147         struct ice_pf *pf = vsi->back;
148
149         strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
150         strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
151         strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
152                 sizeof(drvinfo->fw_version));
153         strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
154                 sizeof(drvinfo->bus_info));
155 }
156
157 static int ice_get_regs_len(struct net_device __always_unused *netdev)
158 {
159         return sizeof(ice_regs_dump_list);
160 }
161
162 static void
163 ice_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
164 {
165         struct ice_netdev_priv *np = netdev_priv(netdev);
166         struct ice_pf *pf = np->vsi->back;
167         struct ice_hw *hw = &pf->hw;
168         u32 *regs_buf = (u32 *)p;
169         int i;
170
171         regs->version = 1;
172
173         for (i = 0; i < ARRAY_SIZE(ice_regs_dump_list); ++i)
174                 regs_buf[i] = rd32(hw, ice_regs_dump_list[i]);
175 }
176
177 static u32 ice_get_msglevel(struct net_device *netdev)
178 {
179         struct ice_netdev_priv *np = netdev_priv(netdev);
180         struct ice_pf *pf = np->vsi->back;
181
182 #ifndef CONFIG_DYNAMIC_DEBUG
183         if (pf->hw.debug_mask)
184                 netdev_info(netdev, "hw debug_mask: 0x%llX\n",
185                             pf->hw.debug_mask);
186 #endif /* !CONFIG_DYNAMIC_DEBUG */
187
188         return pf->msg_enable;
189 }
190
191 static void ice_set_msglevel(struct net_device *netdev, u32 data)
192 {
193         struct ice_netdev_priv *np = netdev_priv(netdev);
194         struct ice_pf *pf = np->vsi->back;
195
196 #ifndef CONFIG_DYNAMIC_DEBUG
197         if (ICE_DBG_USER & data)
198                 pf->hw.debug_mask = data;
199         else
200                 pf->msg_enable = data;
201 #else
202         pf->msg_enable = data;
203 #endif /* !CONFIG_DYNAMIC_DEBUG */
204 }
205
206 static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
207 {
208         struct ice_netdev_priv *np = netdev_priv(netdev);
209         struct ice_vsi *vsi = np->vsi;
210         char *p = (char *)data;
211         unsigned int i;
212
213         switch (stringset) {
214         case ETH_SS_STATS:
215                 for (i = 0; i < ICE_VSI_STATS_LEN; i++) {
216                         snprintf(p, ETH_GSTRING_LEN, "%s",
217                                  ice_gstrings_vsi_stats[i].stat_string);
218                         p += ETH_GSTRING_LEN;
219                 }
220
221                 ice_for_each_alloc_txq(vsi, i) {
222                         snprintf(p, ETH_GSTRING_LEN,
223                                  "tx-queue-%u.tx_packets", i);
224                         p += ETH_GSTRING_LEN;
225                         snprintf(p, ETH_GSTRING_LEN, "tx-queue-%u.tx_bytes", i);
226                         p += ETH_GSTRING_LEN;
227                 }
228
229                 ice_for_each_alloc_rxq(vsi, i) {
230                         snprintf(p, ETH_GSTRING_LEN,
231                                  "rx-queue-%u.rx_packets", i);
232                         p += ETH_GSTRING_LEN;
233                         snprintf(p, ETH_GSTRING_LEN, "rx-queue-%u.rx_bytes", i);
234                         p += ETH_GSTRING_LEN;
235                 }
236
237                 if (vsi->type != ICE_VSI_PF)
238                         return;
239
240                 for (i = 0; i < ICE_PF_STATS_LEN; i++) {
241                         snprintf(p, ETH_GSTRING_LEN, "port.%s",
242                                  ice_gstrings_pf_stats[i].stat_string);
243                         p += ETH_GSTRING_LEN;
244                 }
245
246                 break;
247         default:
248                 break;
249         }
250 }
251
252 static int ice_get_sset_count(struct net_device *netdev, int sset)
253 {
254         switch (sset) {
255         case ETH_SS_STATS:
256                 /* The number (and order) of strings reported *must* remain
257                  * constant for a given netdevice. This function must not
258                  * report a different number based on run time parameters
259                  * (such as the number of queues in use, or the setting of
260                  * a private ethtool flag). This is due to the nature of the
261                  * ethtool stats API.
262                  *
263                  * User space programs such as ethtool must make 3 separate
264                  * ioctl requests, one for size, one for the strings, and
265                  * finally one for the stats. Since these cross into
266                  * user space, changes to the number or size could result in
267                  * undefined memory access or incorrect string<->value
268                  * correlations for statistics.
269                  *
270                  * Even if it appears to be safe, changes to the size or
271                  * order of strings will suffer from race conditions and are
272                  * not safe.
273                  */
274                 return ICE_ALL_STATS_LEN(netdev);
275         default:
276                 return -EOPNOTSUPP;
277         }
278 }
279
280 static void
281 ice_get_ethtool_stats(struct net_device *netdev,
282                       struct ethtool_stats __always_unused *stats, u64 *data)
283 {
284         struct ice_netdev_priv *np = netdev_priv(netdev);
285         struct ice_vsi *vsi = np->vsi;
286         struct ice_pf *pf = vsi->back;
287         struct ice_ring *ring;
288         unsigned int j = 0;
289         int i = 0;
290         char *p;
291
292         for (j = 0; j < ICE_VSI_STATS_LEN; j++) {
293                 p = (char *)vsi + ice_gstrings_vsi_stats[j].stat_offset;
294                 data[i++] = (ice_gstrings_vsi_stats[j].sizeof_stat ==
295                             sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
296         }
297
298         /* populate per queue stats */
299         rcu_read_lock();
300
301         ice_for_each_alloc_txq(vsi, j) {
302                 ring = READ_ONCE(vsi->tx_rings[j]);
303                 if (ring) {
304                         data[i++] = ring->stats.pkts;
305                         data[i++] = ring->stats.bytes;
306                 } else {
307                         data[i++] = 0;
308                         data[i++] = 0;
309                 }
310         }
311
312         ice_for_each_alloc_rxq(vsi, j) {
313                 ring = READ_ONCE(vsi->rx_rings[j]);
314                 if (ring) {
315                         data[i++] = ring->stats.pkts;
316                         data[i++] = ring->stats.bytes;
317                 } else {
318                         data[i++] = 0;
319                         data[i++] = 0;
320                 }
321         }
322
323         rcu_read_unlock();
324
325         if (vsi->type != ICE_VSI_PF)
326                 return;
327
328         for (j = 0; j < ICE_PF_STATS_LEN; j++) {
329                 p = (char *)pf + ice_gstrings_pf_stats[j].stat_offset;
330                 data[i++] = (ice_gstrings_pf_stats[j].sizeof_stat ==
331                              sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
332         }
333 }
334
335 static int
336 ice_get_link_ksettings(struct net_device *netdev,
337                        struct ethtool_link_ksettings *ks)
338 {
339         struct ice_netdev_priv *np = netdev_priv(netdev);
340         struct ice_link_status *hw_link_info;
341         struct ice_vsi *vsi = np->vsi;
342         bool link_up;
343
344         hw_link_info = &vsi->port_info->phy.link_info;
345         link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
346
347         ethtool_link_ksettings_add_link_mode(ks, supported,
348                                              10000baseT_Full);
349         ethtool_link_ksettings_add_link_mode(ks, advertising,
350                                              10000baseT_Full);
351
352         /* set speed and duplex */
353         if (link_up) {
354                 switch (hw_link_info->link_speed) {
355                 case ICE_AQ_LINK_SPEED_100MB:
356                         ks->base.speed = SPEED_100;
357                         break;
358                 case ICE_AQ_LINK_SPEED_2500MB:
359                         ks->base.speed = SPEED_2500;
360                         break;
361                 case ICE_AQ_LINK_SPEED_5GB:
362                         ks->base.speed = SPEED_5000;
363                         break;
364                 case ICE_AQ_LINK_SPEED_10GB:
365                         ks->base.speed = SPEED_10000;
366                         break;
367                 case ICE_AQ_LINK_SPEED_25GB:
368                         ks->base.speed = SPEED_25000;
369                         break;
370                 case ICE_AQ_LINK_SPEED_40GB:
371                         ks->base.speed = SPEED_40000;
372                         break;
373                 default:
374                         ks->base.speed = SPEED_UNKNOWN;
375                         break;
376                 }
377
378                 ks->base.duplex = DUPLEX_FULL;
379         } else {
380                 ks->base.speed = SPEED_UNKNOWN;
381                 ks->base.duplex = DUPLEX_UNKNOWN;
382         }
383
384         /* set autoneg settings */
385         ks->base.autoneg = ((hw_link_info->an_info & ICE_AQ_AN_COMPLETED) ?
386                             AUTONEG_ENABLE : AUTONEG_DISABLE);
387
388         /* set media type settings */
389         switch (vsi->port_info->phy.media_type) {
390         case ICE_MEDIA_FIBER:
391                 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
392                 ks->base.port = PORT_FIBRE;
393                 break;
394         case ICE_MEDIA_BASET:
395                 ethtool_link_ksettings_add_link_mode(ks, supported, TP);
396                 ethtool_link_ksettings_add_link_mode(ks, advertising, TP);
397                 ks->base.port = PORT_TP;
398                 break;
399         case ICE_MEDIA_BACKPLANE:
400                 ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
401                 ethtool_link_ksettings_add_link_mode(ks, supported, Backplane);
402                 ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
403                 ethtool_link_ksettings_add_link_mode(ks, advertising,
404                                                      Backplane);
405                 ks->base.port = PORT_NONE;
406                 break;
407         case ICE_MEDIA_DA:
408                 ethtool_link_ksettings_add_link_mode(ks, supported, FIBRE);
409                 ethtool_link_ksettings_add_link_mode(ks, advertising, FIBRE);
410                 ks->base.port = PORT_DA;
411                 break;
412         default:
413                 ks->base.port = PORT_OTHER;
414                 break;
415         }
416
417         /* flow control is symmetric and always supported */
418         ethtool_link_ksettings_add_link_mode(ks, supported, Pause);
419
420         switch (vsi->port_info->fc.req_mode) {
421         case ICE_FC_FULL:
422                 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
423                 break;
424         case ICE_FC_TX_PAUSE:
425                 ethtool_link_ksettings_add_link_mode(ks, advertising,
426                                                      Asym_Pause);
427                 break;
428         case ICE_FC_RX_PAUSE:
429                 ethtool_link_ksettings_add_link_mode(ks, advertising, Pause);
430                 ethtool_link_ksettings_add_link_mode(ks, advertising,
431                                                      Asym_Pause);
432                 break;
433         case ICE_FC_PFC:
434         default:
435                 ethtool_link_ksettings_del_link_mode(ks, advertising, Pause);
436                 ethtool_link_ksettings_del_link_mode(ks, advertising,
437                                                      Asym_Pause);
438                 break;
439         }
440
441         return 0;
442 }
443
444 /**
445  * ice_get_rxnfc - command to get RX flow classification rules
446  * @netdev: network interface device structure
447  * @cmd: ethtool rxnfc command
448  * @rule_locs: buffer to rturn Rx flow classification rules
449  *
450  * Returns Success if the command is supported.
451  */
452 static int ice_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
453                          u32 __always_unused *rule_locs)
454 {
455         struct ice_netdev_priv *np = netdev_priv(netdev);
456         struct ice_vsi *vsi = np->vsi;
457         int ret = -EOPNOTSUPP;
458
459         switch (cmd->cmd) {
460         case ETHTOOL_GRXRINGS:
461                 cmd->data = vsi->rss_size;
462                 ret = 0;
463                 break;
464         default:
465                 break;
466         }
467
468         return ret;
469 }
470
471 static void
472 ice_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
473 {
474         struct ice_netdev_priv *np = netdev_priv(netdev);
475         struct ice_vsi *vsi = np->vsi;
476
477         ring->rx_max_pending = ICE_MAX_NUM_DESC;
478         ring->tx_max_pending = ICE_MAX_NUM_DESC;
479         ring->rx_pending = vsi->rx_rings[0]->count;
480         ring->tx_pending = vsi->tx_rings[0]->count;
481
482         /* Rx mini and jumbo rings are not supported */
483         ring->rx_mini_max_pending = 0;
484         ring->rx_jumbo_max_pending = 0;
485         ring->rx_mini_pending = 0;
486         ring->rx_jumbo_pending = 0;
487 }
488
489 static int
490 ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
491 {
492         struct ice_ring *tx_rings = NULL, *rx_rings = NULL;
493         struct ice_netdev_priv *np = netdev_priv(netdev);
494         struct ice_vsi *vsi = np->vsi;
495         struct ice_pf *pf = vsi->back;
496         int i, timeout = 50, err = 0;
497         u32 new_rx_cnt, new_tx_cnt;
498
499         if (ring->tx_pending > ICE_MAX_NUM_DESC ||
500             ring->tx_pending < ICE_MIN_NUM_DESC ||
501             ring->rx_pending > ICE_MAX_NUM_DESC ||
502             ring->rx_pending < ICE_MIN_NUM_DESC) {
503                 netdev_err(netdev, "Descriptors requested (Tx: %d / Rx: %d) out of range [%d-%d] (increment %d)\n",
504                            ring->tx_pending, ring->rx_pending,
505                            ICE_MIN_NUM_DESC, ICE_MAX_NUM_DESC,
506                            ICE_REQ_DESC_MULTIPLE);
507                 return -EINVAL;
508         }
509
510         new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
511         if (new_tx_cnt != ring->tx_pending)
512                 netdev_info(netdev,
513                             "Requested Tx descriptor count rounded up to %d\n",
514                             new_tx_cnt);
515         new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
516         if (new_rx_cnt != ring->rx_pending)
517                 netdev_info(netdev,
518                             "Requested Rx descriptor count rounded up to %d\n",
519                             new_rx_cnt);
520
521         /* if nothing to do return success */
522         if (new_tx_cnt == vsi->tx_rings[0]->count &&
523             new_rx_cnt == vsi->rx_rings[0]->count) {
524                 netdev_dbg(netdev, "Nothing to change, descriptor count is same as requested\n");
525                 return 0;
526         }
527
528         while (test_and_set_bit(__ICE_CFG_BUSY, pf->state)) {
529                 timeout--;
530                 if (!timeout)
531                         return -EBUSY;
532                 usleep_range(1000, 2000);
533         }
534
535         /* set for the next time the netdev is started */
536         if (!netif_running(vsi->netdev)) {
537                 for (i = 0; i < vsi->alloc_txq; i++)
538                         vsi->tx_rings[i]->count = new_tx_cnt;
539                 for (i = 0; i < vsi->alloc_rxq; i++)
540                         vsi->rx_rings[i]->count = new_rx_cnt;
541                 netdev_dbg(netdev, "Link is down, descriptor count change happens when link is brought up\n");
542                 goto done;
543         }
544
545         if (new_tx_cnt == vsi->tx_rings[0]->count)
546                 goto process_rx;
547
548         /* alloc updated Tx resources */
549         netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
550                     vsi->tx_rings[0]->count, new_tx_cnt);
551
552         tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
553                                 sizeof(struct ice_ring), GFP_KERNEL);
554         if (!tx_rings) {
555                 err = -ENOMEM;
556                 goto done;
557         }
558
559         for (i = 0; i < vsi->alloc_txq; i++) {
560                 /* clone ring and setup updated count */
561                 tx_rings[i] = *vsi->tx_rings[i];
562                 tx_rings[i].count = new_tx_cnt;
563                 tx_rings[i].desc = NULL;
564                 tx_rings[i].tx_buf = NULL;
565                 err = ice_setup_tx_ring(&tx_rings[i]);
566                 if (err) {
567                         while (i) {
568                                 i--;
569                                 ice_clean_tx_ring(&tx_rings[i]);
570                         }
571                         devm_kfree(&pf->pdev->dev, tx_rings);
572                         goto done;
573                 }
574         }
575
576 process_rx:
577         if (new_rx_cnt == vsi->rx_rings[0]->count)
578                 goto process_link;
579
580         /* alloc updated Rx resources */
581         netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
582                     vsi->rx_rings[0]->count, new_rx_cnt);
583
584         rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
585                                 sizeof(struct ice_ring), GFP_KERNEL);
586         if (!rx_rings) {
587                 err = -ENOMEM;
588                 goto done;
589         }
590
591         for (i = 0; i < vsi->alloc_rxq; i++) {
592                 /* clone ring and setup updated count */
593                 rx_rings[i] = *vsi->rx_rings[i];
594                 rx_rings[i].count = new_rx_cnt;
595                 rx_rings[i].desc = NULL;
596                 rx_rings[i].rx_buf = NULL;
597                 /* this is to allow wr32 to have something to write to
598                  * during early allocation of Rx buffers
599                  */
600                 rx_rings[i].tail = vsi->back->hw.hw_addr + PRTGEN_STATUS;
601
602                 err = ice_setup_rx_ring(&rx_rings[i]);
603                 if (err)
604                         goto rx_unwind;
605
606                 /* allocate Rx buffers */
607                 err = ice_alloc_rx_bufs(&rx_rings[i],
608                                         ICE_DESC_UNUSED(&rx_rings[i]));
609 rx_unwind:
610                 if (err) {
611                         while (i) {
612                                 i--;
613                                 ice_free_rx_ring(&rx_rings[i]);
614                         }
615                         devm_kfree(&pf->pdev->dev, rx_rings);
616                         err = -ENOMEM;
617                         goto free_tx;
618                 }
619         }
620
621 process_link:
622         /* Bring interface down, copy in the new ring info, then restore the
623          * interface. if VSI is up, bring it down and then back up
624          */
625         if (!test_and_set_bit(__ICE_DOWN, vsi->state)) {
626                 ice_down(vsi);
627
628                 if (tx_rings) {
629                         for (i = 0; i < vsi->alloc_txq; i++) {
630                                 ice_free_tx_ring(vsi->tx_rings[i]);
631                                 *vsi->tx_rings[i] = tx_rings[i];
632                         }
633                         devm_kfree(&pf->pdev->dev, tx_rings);
634                 }
635
636                 if (rx_rings) {
637                         for (i = 0; i < vsi->alloc_rxq; i++) {
638                                 ice_free_rx_ring(vsi->rx_rings[i]);
639                                 /* copy the real tail offset */
640                                 rx_rings[i].tail = vsi->rx_rings[i]->tail;
641                                 /* this is to fake out the allocation routine
642                                  * into thinking it has to realloc everything
643                                  * but the recycling logic will let us re-use
644                                  * the buffers allocated above
645                                  */
646                                 rx_rings[i].next_to_use = 0;
647                                 rx_rings[i].next_to_clean = 0;
648                                 rx_rings[i].next_to_alloc = 0;
649                                 *vsi->rx_rings[i] = rx_rings[i];
650                         }
651                         devm_kfree(&pf->pdev->dev, rx_rings);
652                 }
653
654                 ice_up(vsi);
655         }
656         goto done;
657
658 free_tx:
659         /* error cleanup if the Rx allocations failed after getting Tx */
660         if (tx_rings) {
661                 for (i = 0; i < vsi->alloc_txq; i++)
662                         ice_free_tx_ring(&tx_rings[i]);
663                 devm_kfree(&pf->pdev->dev, tx_rings);
664         }
665
666 done:
667         clear_bit(__ICE_CFG_BUSY, pf->state);
668         return err;
669 }
670
671 static int ice_nway_reset(struct net_device *netdev)
672 {
673         /* restart autonegotiation */
674         struct ice_netdev_priv *np = netdev_priv(netdev);
675         struct ice_link_status *hw_link_info;
676         struct ice_vsi *vsi = np->vsi;
677         struct ice_port_info *pi;
678         enum ice_status status;
679         bool link_up;
680
681         pi = vsi->port_info;
682         hw_link_info = &pi->phy.link_info;
683         link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
684
685         status = ice_aq_set_link_restart_an(pi, link_up, NULL);
686         if (status) {
687                 netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
688                             status, pi->hw->adminq.sq_last_status);
689                 return -EIO;
690         }
691
692         return 0;
693 }
694
695 /**
696  * ice_get_pauseparam - Get Flow Control status
697  * @netdev: network interface device structure
698  * @pause: ethernet pause (flow control) parameters
699  */
700 static void
701 ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
702 {
703         struct ice_netdev_priv *np = netdev_priv(netdev);
704         struct ice_port_info *pi;
705
706         pi = np->vsi->port_info;
707         pause->autoneg =
708                 ((pi->phy.link_info.an_info & ICE_AQ_AN_COMPLETED) ?
709                  AUTONEG_ENABLE : AUTONEG_DISABLE);
710
711         if (pi->fc.current_mode == ICE_FC_RX_PAUSE) {
712                 pause->rx_pause = 1;
713         } else if (pi->fc.current_mode == ICE_FC_TX_PAUSE) {
714                 pause->tx_pause = 1;
715         } else if (pi->fc.current_mode == ICE_FC_FULL) {
716                 pause->rx_pause = 1;
717                 pause->tx_pause = 1;
718         }
719 }
720
721 /**
722  * ice_set_pauseparam - Set Flow Control parameter
723  * @netdev: network interface device structure
724  * @pause: return tx/rx flow control status
725  */
726 static int
727 ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
728 {
729         struct ice_netdev_priv *np = netdev_priv(netdev);
730         struct ice_link_status *hw_link_info;
731         struct ice_pf *pf = np->vsi->back;
732         struct ice_vsi *vsi = np->vsi;
733         struct ice_hw *hw = &pf->hw;
734         struct ice_port_info *pi;
735         enum ice_status status;
736         u8 aq_failures;
737         bool link_up;
738         int err = 0;
739
740         pi = vsi->port_info;
741         hw_link_info = &pi->phy.link_info;
742         link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
743
744         /* Changing the port's flow control is not supported if this isn't the
745          * PF VSI
746          */
747         if (vsi->type != ICE_VSI_PF) {
748                 netdev_info(netdev, "Changing flow control parameters only supported for PF VSI\n");
749                 return -EOPNOTSUPP;
750         }
751
752         if (pause->autoneg != (hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
753                 netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
754                 return -EOPNOTSUPP;
755         }
756
757         /* If we have link and don't have autoneg */
758         if (!test_bit(__ICE_DOWN, pf->state) &&
759             !(hw_link_info->an_info & ICE_AQ_AN_COMPLETED)) {
760                 /* Send message that it might not necessarily work*/
761                 netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
762         }
763
764         if (pause->rx_pause && pause->tx_pause)
765                 pi->fc.req_mode = ICE_FC_FULL;
766         else if (pause->rx_pause && !pause->tx_pause)
767                 pi->fc.req_mode = ICE_FC_RX_PAUSE;
768         else if (!pause->rx_pause && pause->tx_pause)
769                 pi->fc.req_mode = ICE_FC_TX_PAUSE;
770         else if (!pause->rx_pause && !pause->tx_pause)
771                 pi->fc.req_mode = ICE_FC_NONE;
772         else
773                 return -EINVAL;
774
775         /* Set the FC mode and only restart AN if link is up */
776         status = ice_set_fc(pi, &aq_failures, link_up);
777
778         if (aq_failures & ICE_SET_FC_AQ_FAIL_GET) {
779                 netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with err %d aq_err %d\n",
780                             status, hw->adminq.sq_last_status);
781                 err = -EAGAIN;
782         } else if (aq_failures & ICE_SET_FC_AQ_FAIL_SET) {
783                 netdev_info(netdev, "Set fc failed on the set_phy_config call with err %d aq_err %d\n",
784                             status, hw->adminq.sq_last_status);
785                 err = -EAGAIN;
786         } else if (aq_failures & ICE_SET_FC_AQ_FAIL_UPDATE) {
787                 netdev_info(netdev, "Set fc failed on the get_link_info call with err %d aq_err %d\n",
788                             status, hw->adminq.sq_last_status);
789                 err = -EAGAIN;
790         }
791
792         if (!test_bit(__ICE_DOWN, pf->state)) {
793                 /* Give it a little more time to try to come back. If still
794                  * down, restart autoneg link or reinitialize the interface.
795                  */
796                 msleep(75);
797                 if (!test_bit(__ICE_DOWN, pf->state))
798                         return ice_nway_reset(netdev);
799
800                 ice_down(vsi);
801                 ice_up(vsi);
802         }
803
804         return err;
805 }
806
807 /**
808  * ice_get_rxfh_key_size - get the RSS hash key size
809  * @netdev: network interface device structure
810  *
811  * Returns the table size.
812  */
813 static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
814 {
815         return ICE_VSIQF_HKEY_ARRAY_SIZE;
816 }
817
818 /**
819  * ice_get_rxfh_indir_size - get the rx flow hash indirection table size
820  * @netdev: network interface device structure
821  *
822  * Returns the table size.
823  */
824 static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
825 {
826         struct ice_netdev_priv *np = netdev_priv(netdev);
827
828         return np->vsi->rss_table_size;
829 }
830
831 /**
832  * ice_get_rxfh - get the rx flow hash indirection table
833  * @netdev: network interface device structure
834  * @indir: indirection table
835  * @key: hash key
836  * @hfunc: hash function
837  *
838  * Reads the indirection table directly from the hardware.
839  */
840 static int
841 ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
842 {
843         struct ice_netdev_priv *np = netdev_priv(netdev);
844         struct ice_vsi *vsi = np->vsi;
845         struct ice_pf *pf = vsi->back;
846         int ret = 0, i;
847         u8 *lut;
848
849         if (hfunc)
850                 *hfunc = ETH_RSS_HASH_TOP;
851
852         if (!indir)
853                 return 0;
854
855         if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
856                 /* RSS not supported return error here */
857                 netdev_warn(netdev, "RSS is not configured on this VSI!\n");
858                 return -EIO;
859         }
860
861         lut = devm_kzalloc(&pf->pdev->dev, vsi->rss_table_size, GFP_KERNEL);
862         if (!lut)
863                 return -ENOMEM;
864
865         if (ice_get_rss(vsi, key, lut, vsi->rss_table_size)) {
866                 ret = -EIO;
867                 goto out;
868         }
869
870         for (i = 0; i < vsi->rss_table_size; i++)
871                 indir[i] = (u32)(lut[i]);
872
873 out:
874         devm_kfree(&pf->pdev->dev, lut);
875         return ret;
876 }
877
878 /**
879  * ice_set_rxfh - set the rx flow hash indirection table
880  * @netdev: network interface device structure
881  * @indir: indirection table
882  * @key: hash key
883  * @hfunc: hash function
884  *
885  * Returns -EINVAL if the table specifies an invalid queue id, otherwise
886  * returns 0 after programming the table.
887  */
888 static int ice_set_rxfh(struct net_device *netdev, const u32 *indir,
889                         const u8 *key, const u8 hfunc)
890 {
891         struct ice_netdev_priv *np = netdev_priv(netdev);
892         struct ice_vsi *vsi = np->vsi;
893         struct ice_pf *pf = vsi->back;
894         u8 *seed = NULL;
895
896         if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
897                 return -EOPNOTSUPP;
898
899         if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) {
900                 /* RSS not supported return error here */
901                 netdev_warn(netdev, "RSS is not configured on this VSI!\n");
902                 return -EIO;
903         }
904
905         if (key) {
906                 if (!vsi->rss_hkey_user) {
907                         vsi->rss_hkey_user =
908                                 devm_kzalloc(&pf->pdev->dev,
909                                              ICE_VSIQF_HKEY_ARRAY_SIZE,
910                                              GFP_KERNEL);
911                         if (!vsi->rss_hkey_user)
912                                 return -ENOMEM;
913                 }
914                 memcpy(vsi->rss_hkey_user, key, ICE_VSIQF_HKEY_ARRAY_SIZE);
915                 seed = vsi->rss_hkey_user;
916         }
917
918         if (!vsi->rss_lut_user) {
919                 vsi->rss_lut_user = devm_kzalloc(&pf->pdev->dev,
920                                                  vsi->rss_table_size,
921                                                  GFP_KERNEL);
922                 if (!vsi->rss_lut_user)
923                         return -ENOMEM;
924         }
925
926         /* Each 32 bits pointed by 'indir' is stored with a lut entry */
927         if (indir) {
928                 int i;
929
930                 for (i = 0; i < vsi->rss_table_size; i++)
931                         vsi->rss_lut_user[i] = (u8)(indir[i]);
932         } else {
933                 ice_fill_rss_lut(vsi->rss_lut_user, vsi->rss_table_size,
934                                  vsi->rss_size);
935         }
936
937         if (ice_set_rss(vsi, seed, vsi->rss_lut_user, vsi->rss_table_size))
938                 return -EIO;
939
940         return 0;
941 }
942
943 static const struct ethtool_ops ice_ethtool_ops = {
944         .get_link_ksettings     = ice_get_link_ksettings,
945         .get_drvinfo            = ice_get_drvinfo,
946         .get_regs_len           = ice_get_regs_len,
947         .get_regs               = ice_get_regs,
948         .get_msglevel           = ice_get_msglevel,
949         .set_msglevel           = ice_set_msglevel,
950         .get_link               = ethtool_op_get_link,
951         .get_strings            = ice_get_strings,
952         .get_ethtool_stats      = ice_get_ethtool_stats,
953         .get_sset_count         = ice_get_sset_count,
954         .get_rxnfc              = ice_get_rxnfc,
955         .get_ringparam          = ice_get_ringparam,
956         .set_ringparam          = ice_set_ringparam,
957         .nway_reset             = ice_nway_reset,
958         .get_pauseparam         = ice_get_pauseparam,
959         .set_pauseparam         = ice_set_pauseparam,
960         .get_rxfh_key_size      = ice_get_rxfh_key_size,
961         .get_rxfh_indir_size    = ice_get_rxfh_indir_size,
962         .get_rxfh               = ice_get_rxfh,
963         .set_rxfh               = ice_set_rxfh,
964 };
965
966 /**
967  * ice_set_ethtool_ops - setup netdev ethtool ops
968  * @netdev: network interface device structure
969  *
970  * setup netdev ethtool ops with ice specific ops
971  */
972 void ice_set_ethtool_ops(struct net_device *netdev)
973 {
974         netdev->ethtool_ops = &ice_ethtool_ops;
975 }