GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/types.h>
7 #include <linux/pci.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/skbuff.h>
14 #include <linux/if_vlan.h>
15 #include <linux/if_bridge.h>
16 #include <linux/workqueue.h>
17 #include <linux/jiffies.h>
18 #include <linux/bitops.h>
19 #include <linux/list.h>
20 #include <linux/notifier.h>
21 #include <linux/dcbnl.h>
22 #include <linux/inetdevice.h>
23 #include <linux/netlink.h>
24 #include <linux/jhash.h>
25 #include <linux/log2.h>
26 #include <linux/refcount.h>
27 #include <linux/rhashtable.h>
28 #include <net/switchdev.h>
29 #include <net/pkt_cls.h>
30 #include <net/netevent.h>
31 #include <net/addrconf.h>
32 #include <linux/ptp_classify.h>
33
34 #include "spectrum.h"
35 #include "pci.h"
36 #include "core.h"
37 #include "core_env.h"
38 #include "reg.h"
39 #include "port.h"
40 #include "trap.h"
41 #include "txheader.h"
42 #include "spectrum_cnt.h"
43 #include "spectrum_dpipe.h"
44 #include "spectrum_acl_flex_actions.h"
45 #include "spectrum_span.h"
46 #include "spectrum_ptp.h"
47 #include "spectrum_trap.h"
48
49 #define MLXSW_SP_FWREV_MINOR 2010
50 #define MLXSW_SP_FWREV_SUBMINOR 1006
51
52 #define MLXSW_SP1_FWREV_MAJOR 13
53 #define MLXSW_SP1_FWREV_CAN_RESET_MINOR 1702
54
55 static const struct mlxsw_fw_rev mlxsw_sp1_fw_rev = {
56         .major = MLXSW_SP1_FWREV_MAJOR,
57         .minor = MLXSW_SP_FWREV_MINOR,
58         .subminor = MLXSW_SP_FWREV_SUBMINOR,
59         .can_reset_minor = MLXSW_SP1_FWREV_CAN_RESET_MINOR,
60 };
61
62 #define MLXSW_SP1_FW_FILENAME \
63         "/*(DEBLOBBED)*/"
64
65 #define MLXSW_SP2_FWREV_MAJOR 29
66
67 static const struct mlxsw_fw_rev mlxsw_sp2_fw_rev = {
68         .major = MLXSW_SP2_FWREV_MAJOR,
69         .minor = MLXSW_SP_FWREV_MINOR,
70         .subminor = MLXSW_SP_FWREV_SUBMINOR,
71 };
72
73 #define MLXSW_SP2_FW_FILENAME \
74         "/*(DEBLOBBED)*/"
75
76 #define MLXSW_SP3_FWREV_MAJOR 30
77
78 static const struct mlxsw_fw_rev mlxsw_sp3_fw_rev = {
79         .major = MLXSW_SP3_FWREV_MAJOR,
80         .minor = MLXSW_SP_FWREV_MINOR,
81         .subminor = MLXSW_SP_FWREV_SUBMINOR,
82 };
83
84 #define MLXSW_SP3_FW_FILENAME \
85         "/*(DEBLOBBED)*/"
86
87 #define MLXSW_SP_LINECARDS_INI_BUNDLE_FILENAME \
88         "/*(DEBLOBBED)*/"
89
90 static const char mlxsw_sp1_driver_name[] = "mlxsw_spectrum";
91 static const char mlxsw_sp2_driver_name[] = "mlxsw_spectrum2";
92 static const char mlxsw_sp3_driver_name[] = "mlxsw_spectrum3";
93 static const char mlxsw_sp4_driver_name[] = "mlxsw_spectrum4";
94
95 static const unsigned char mlxsw_sp1_mac_mask[ETH_ALEN] = {
96         0xff, 0xff, 0xff, 0xff, 0xfc, 0x00
97 };
98 static const unsigned char mlxsw_sp2_mac_mask[ETH_ALEN] = {
99         0xff, 0xff, 0xff, 0xff, 0xf0, 0x00
100 };
101
102 /* tx_hdr_version
103  * Tx header version.
104  * Must be set to 1.
105  */
106 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
107
108 /* tx_hdr_ctl
109  * Packet control type.
110  * 0 - Ethernet control (e.g. EMADs, LACP)
111  * 1 - Ethernet data
112  */
113 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
114
115 /* tx_hdr_proto
116  * Packet protocol type. Must be set to 1 (Ethernet).
117  */
118 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
119
120 /* tx_hdr_rx_is_router
121  * Packet is sent from the router. Valid for data packets only.
122  */
123 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
124
125 /* tx_hdr_fid_valid
126  * Indicates if the 'fid' field is valid and should be used for
127  * forwarding lookup. Valid for data packets only.
128  */
129 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
130
131 /* tx_hdr_swid
132  * Switch partition ID. Must be set to 0.
133  */
134 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
135
136 /* tx_hdr_control_tclass
137  * Indicates if the packet should use the control TClass and not one
138  * of the data TClasses.
139  */
140 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
141
142 /* tx_hdr_etclass
143  * Egress TClass to be used on the egress device on the egress port.
144  */
145 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
146
147 /* tx_hdr_port_mid
148  * Destination local port for unicast packets.
149  * Destination multicast ID for multicast packets.
150  *
151  * Control packets are directed to a specific egress port, while data
152  * packets are transmitted through the CPU port (0) into the switch partition,
153  * where forwarding rules are applied.
154  */
155 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
156
157 /* tx_hdr_fid
158  * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
159  * set, otherwise calculated based on the packet's VID using VID to FID mapping.
160  * Valid for data packets only.
161  */
162 MLXSW_ITEM32(tx, hdr, fid, 0x08, 16, 16);
163
164 /* tx_hdr_type
165  * 0 - Data packets
166  * 6 - Control packets
167  */
168 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
169
170 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
171                               unsigned int counter_index, u64 *packets,
172                               u64 *bytes)
173 {
174         char mgpc_pl[MLXSW_REG_MGPC_LEN];
175         int err;
176
177         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
178                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
179         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
180         if (err)
181                 return err;
182         if (packets)
183                 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
184         if (bytes)
185                 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
186         return 0;
187 }
188
189 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
190                                        unsigned int counter_index)
191 {
192         char mgpc_pl[MLXSW_REG_MGPC_LEN];
193
194         mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
195                             MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
196         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
197 }
198
199 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
200                                 unsigned int *p_counter_index)
201 {
202         int err;
203
204         err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
205                                      p_counter_index);
206         if (err)
207                 return err;
208         err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
209         if (err)
210                 goto err_counter_clear;
211         return 0;
212
213 err_counter_clear:
214         mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
215                               *p_counter_index);
216         return err;
217 }
218
219 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
220                                 unsigned int counter_index)
221 {
222          mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
223                                counter_index);
224 }
225
226 void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
227                               const struct mlxsw_tx_info *tx_info)
228 {
229         char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
230
231         memset(txhdr, 0, MLXSW_TXHDR_LEN);
232
233         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
234         mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
235         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
236         mlxsw_tx_hdr_swid_set(txhdr, 0);
237         mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
238         mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
239         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
240 }
241
242 int
243 mlxsw_sp_txhdr_ptp_data_construct(struct mlxsw_core *mlxsw_core,
244                                   struct mlxsw_sp_port *mlxsw_sp_port,
245                                   struct sk_buff *skb,
246                                   const struct mlxsw_tx_info *tx_info)
247 {
248         char *txhdr;
249         u16 max_fid;
250         int err;
251
252         if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
253                 err = -ENOMEM;
254                 goto err_skb_cow_head;
255         }
256
257         if (!MLXSW_CORE_RES_VALID(mlxsw_core, FID)) {
258                 err = -EIO;
259                 goto err_res_valid;
260         }
261         max_fid = MLXSW_CORE_RES_GET(mlxsw_core, FID);
262
263         txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
264         memset(txhdr, 0, MLXSW_TXHDR_LEN);
265
266         mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
267         mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
268         mlxsw_tx_hdr_rx_is_router_set(txhdr, true);
269         mlxsw_tx_hdr_fid_valid_set(txhdr, true);
270         mlxsw_tx_hdr_fid_set(txhdr, max_fid + tx_info->local_port - 1);
271         mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_DATA);
272         return 0;
273
274 err_res_valid:
275 err_skb_cow_head:
276         this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
277         dev_kfree_skb_any(skb);
278         return err;
279 }
280
281 static bool mlxsw_sp_skb_requires_ts(struct sk_buff *skb)
282 {
283         unsigned int type;
284
285         if (!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
286                 return false;
287
288         type = ptp_classify_raw(skb);
289         return !!ptp_parse_header(skb, type);
290 }
291
292 static int mlxsw_sp_txhdr_handle(struct mlxsw_core *mlxsw_core,
293                                  struct mlxsw_sp_port *mlxsw_sp_port,
294                                  struct sk_buff *skb,
295                                  const struct mlxsw_tx_info *tx_info)
296 {
297         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
298
299         /* In Spectrum-2 and Spectrum-3, PTP events that require a time stamp
300          * need special handling and cannot be transmitted as regular control
301          * packets.
302          */
303         if (unlikely(mlxsw_sp_skb_requires_ts(skb)))
304                 return mlxsw_sp->ptp_ops->txhdr_construct(mlxsw_core,
305                                                           mlxsw_sp_port, skb,
306                                                           tx_info);
307
308         if (skb_cow_head(skb, MLXSW_TXHDR_LEN)) {
309                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
310                 dev_kfree_skb_any(skb);
311                 return -ENOMEM;
312         }
313
314         mlxsw_sp_txhdr_construct(skb, tx_info);
315         return 0;
316 }
317
318 enum mlxsw_reg_spms_state mlxsw_sp_stp_spms_state(u8 state)
319 {
320         switch (state) {
321         case BR_STATE_FORWARDING:
322                 return MLXSW_REG_SPMS_STATE_FORWARDING;
323         case BR_STATE_LEARNING:
324                 return MLXSW_REG_SPMS_STATE_LEARNING;
325         case BR_STATE_LISTENING:
326         case BR_STATE_DISABLED:
327         case BR_STATE_BLOCKING:
328                 return MLXSW_REG_SPMS_STATE_DISCARDING;
329         default:
330                 BUG();
331         }
332 }
333
334 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
335                               u8 state)
336 {
337         enum mlxsw_reg_spms_state spms_state = mlxsw_sp_stp_spms_state(state);
338         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
339         char *spms_pl;
340         int err;
341
342         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
343         if (!spms_pl)
344                 return -ENOMEM;
345         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
346         mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
347
348         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
349         kfree(spms_pl);
350         return err;
351 }
352
353 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
354 {
355         char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
356         int err;
357
358         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
359         if (err)
360                 return err;
361         mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
362         return 0;
363 }
364
365 int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
366                                    bool is_up)
367 {
368         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
369         char paos_pl[MLXSW_REG_PAOS_LEN];
370
371         mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
372                             is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
373                             MLXSW_PORT_ADMIN_STATUS_DOWN);
374         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
375 }
376
377 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
378                                       const unsigned char *addr)
379 {
380         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
381         char ppad_pl[MLXSW_REG_PPAD_LEN];
382
383         mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
384         mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
385         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
386 }
387
388 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
389 {
390         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
391
392         eth_hw_addr_gen(mlxsw_sp_port->dev, mlxsw_sp->base_mac,
393                         mlxsw_sp_port->local_port);
394         return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port,
395                                           mlxsw_sp_port->dev->dev_addr);
396 }
397
398 static int mlxsw_sp_port_max_mtu_get(struct mlxsw_sp_port *mlxsw_sp_port, int *p_max_mtu)
399 {
400         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
401         char pmtu_pl[MLXSW_REG_PMTU_LEN];
402         int err;
403
404         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
405         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
406         if (err)
407                 return err;
408
409         *p_max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
410         return 0;
411 }
412
413 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
414 {
415         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
416         char pmtu_pl[MLXSW_REG_PMTU_LEN];
417
418         mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
419         if (mtu > mlxsw_sp_port->max_mtu)
420                 return -EINVAL;
421
422         mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
423         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
424 }
425
426 static int mlxsw_sp_port_swid_set(struct mlxsw_sp *mlxsw_sp,
427                                   u16 local_port, u8 swid)
428 {
429         char pspa_pl[MLXSW_REG_PSPA_LEN];
430
431         mlxsw_reg_pspa_pack(pspa_pl, swid, local_port);
432         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
433 }
434
435 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
436 {
437         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
438         char svpe_pl[MLXSW_REG_SVPE_LEN];
439
440         mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
441         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
442 }
443
444 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
445                                    bool learn_enable)
446 {
447         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
448         char *spvmlr_pl;
449         int err;
450
451         spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
452         if (!spvmlr_pl)
453                 return -ENOMEM;
454         mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
455                               learn_enable);
456         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
457         kfree(spvmlr_pl);
458         return err;
459 }
460
461 int mlxsw_sp_port_security_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
462 {
463         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
464         char spfsr_pl[MLXSW_REG_SPFSR_LEN];
465         int err;
466
467         if (mlxsw_sp_port->security == enable)
468                 return 0;
469
470         mlxsw_reg_spfsr_pack(spfsr_pl, mlxsw_sp_port->local_port, enable);
471         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spfsr), spfsr_pl);
472         if (err)
473                 return err;
474
475         mlxsw_sp_port->security = enable;
476         return 0;
477 }
478
479 int mlxsw_sp_ethtype_to_sver_type(u16 ethtype, u8 *p_sver_type)
480 {
481         switch (ethtype) {
482         case ETH_P_8021Q:
483                 *p_sver_type = 0;
484                 break;
485         case ETH_P_8021AD:
486                 *p_sver_type = 1;
487                 break;
488         default:
489                 return -EINVAL;
490         }
491
492         return 0;
493 }
494
495 int mlxsw_sp_port_egress_ethtype_set(struct mlxsw_sp_port *mlxsw_sp_port,
496                                      u16 ethtype)
497 {
498         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
499         char spevet_pl[MLXSW_REG_SPEVET_LEN];
500         u8 sver_type;
501         int err;
502
503         err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
504         if (err)
505                 return err;
506
507         mlxsw_reg_spevet_pack(spevet_pl, mlxsw_sp_port->local_port, sver_type);
508         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spevet), spevet_pl);
509 }
510
511 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
512                                     u16 vid, u16 ethtype)
513 {
514         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
515         char spvid_pl[MLXSW_REG_SPVID_LEN];
516         u8 sver_type;
517         int err;
518
519         err = mlxsw_sp_ethtype_to_sver_type(ethtype, &sver_type);
520         if (err)
521                 return err;
522
523         mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid,
524                              sver_type);
525
526         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
527 }
528
529 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
530                                             bool allow)
531 {
532         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
533         char spaft_pl[MLXSW_REG_SPAFT_LEN];
534
535         mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
536         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
537 }
538
539 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
540                            u16 ethtype)
541 {
542         int err;
543
544         if (!vid) {
545                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
546                 if (err)
547                         return err;
548         } else {
549                 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid, ethtype);
550                 if (err)
551                         return err;
552                 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
553                 if (err)
554                         goto err_port_allow_untagged_set;
555         }
556
557         mlxsw_sp_port->pvid = vid;
558         return 0;
559
560 err_port_allow_untagged_set:
561         __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid, ethtype);
562         return err;
563 }
564
565 static int
566 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
567 {
568         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
569         char sspr_pl[MLXSW_REG_SSPR_LEN];
570
571         mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
572         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
573 }
574
575 static int
576 mlxsw_sp_port_module_info_parse(struct mlxsw_sp *mlxsw_sp,
577                                 u16 local_port, char *pmlp_pl,
578                                 struct mlxsw_sp_port_mapping *port_mapping)
579 {
580         bool separate_rxtx;
581         u8 first_lane;
582         u8 slot_index;
583         u8 module;
584         u8 width;
585         int i;
586
587         module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
588         slot_index = mlxsw_reg_pmlp_slot_index_get(pmlp_pl, 0);
589         width = mlxsw_reg_pmlp_width_get(pmlp_pl);
590         separate_rxtx = mlxsw_reg_pmlp_rxtx_get(pmlp_pl);
591         first_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
592
593         if (width && !is_power_of_2(width)) {
594                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: width value is not power of 2\n",
595                         local_port);
596                 return -EINVAL;
597         }
598
599         for (i = 0; i < width; i++) {
600                 if (mlxsw_reg_pmlp_module_get(pmlp_pl, i) != module) {
601                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple modules\n",
602                                 local_port);
603                         return -EINVAL;
604                 }
605                 if (mlxsw_reg_pmlp_slot_index_get(pmlp_pl, i) != slot_index) {
606                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: contains multiple slot indexes\n",
607                                 local_port);
608                         return -EINVAL;
609                 }
610                 if (separate_rxtx &&
611                     mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) !=
612                     mlxsw_reg_pmlp_rx_lane_get(pmlp_pl, i)) {
613                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are different\n",
614                                 local_port);
615                         return -EINVAL;
616                 }
617                 if (mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, i) != i + first_lane) {
618                         dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unsupported module config: TX and RX lane numbers are not sequential\n",
619                                 local_port);
620                         return -EINVAL;
621                 }
622         }
623
624         port_mapping->module = module;
625         port_mapping->slot_index = slot_index;
626         port_mapping->width = width;
627         port_mapping->module_width = width;
628         port_mapping->lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
629         return 0;
630 }
631
632 static int
633 mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp, u16 local_port,
634                               struct mlxsw_sp_port_mapping *port_mapping)
635 {
636         char pmlp_pl[MLXSW_REG_PMLP_LEN];
637         int err;
638
639         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
640         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
641         if (err)
642                 return err;
643         return mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
644                                                pmlp_pl, port_mapping);
645 }
646
647 static int
648 mlxsw_sp_port_module_map(struct mlxsw_sp *mlxsw_sp, u16 local_port,
649                          const struct mlxsw_sp_port_mapping *port_mapping)
650 {
651         char pmlp_pl[MLXSW_REG_PMLP_LEN];
652         int i, err;
653
654         mlxsw_env_module_port_map(mlxsw_sp->core, port_mapping->slot_index,
655                                   port_mapping->module);
656
657         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
658         mlxsw_reg_pmlp_width_set(pmlp_pl, port_mapping->width);
659         for (i = 0; i < port_mapping->width; i++) {
660                 mlxsw_reg_pmlp_slot_index_set(pmlp_pl, i,
661                                               port_mapping->slot_index);
662                 mlxsw_reg_pmlp_module_set(pmlp_pl, i, port_mapping->module);
663                 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, port_mapping->lane + i); /* Rx & Tx */
664         }
665
666         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
667         if (err)
668                 goto err_pmlp_write;
669         return 0;
670
671 err_pmlp_write:
672         mlxsw_env_module_port_unmap(mlxsw_sp->core, port_mapping->slot_index,
673                                     port_mapping->module);
674         return err;
675 }
676
677 static void mlxsw_sp_port_module_unmap(struct mlxsw_sp *mlxsw_sp, u16 local_port,
678                                        u8 slot_index, u8 module)
679 {
680         char pmlp_pl[MLXSW_REG_PMLP_LEN];
681
682         mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
683         mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
684         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
685         mlxsw_env_module_port_unmap(mlxsw_sp->core, slot_index, module);
686 }
687
688 static int mlxsw_sp_port_open(struct net_device *dev)
689 {
690         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
691         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
692         int err;
693
694         err = mlxsw_env_module_port_up(mlxsw_sp->core,
695                                        mlxsw_sp_port->mapping.slot_index,
696                                        mlxsw_sp_port->mapping.module);
697         if (err)
698                 return err;
699         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
700         if (err)
701                 goto err_port_admin_status_set;
702         netif_start_queue(dev);
703         return 0;
704
705 err_port_admin_status_set:
706         mlxsw_env_module_port_down(mlxsw_sp->core,
707                                    mlxsw_sp_port->mapping.slot_index,
708                                    mlxsw_sp_port->mapping.module);
709         return err;
710 }
711
712 static int mlxsw_sp_port_stop(struct net_device *dev)
713 {
714         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
715         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
716
717         netif_stop_queue(dev);
718         mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
719         mlxsw_env_module_port_down(mlxsw_sp->core,
720                                    mlxsw_sp_port->mapping.slot_index,
721                                    mlxsw_sp_port->mapping.module);
722         return 0;
723 }
724
725 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
726                                       struct net_device *dev)
727 {
728         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
729         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
730         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
731         const struct mlxsw_tx_info tx_info = {
732                 .local_port = mlxsw_sp_port->local_port,
733                 .is_emad = false,
734         };
735         u64 len;
736         int err;
737
738         memset(skb->cb, 0, sizeof(struct mlxsw_skb_cb));
739
740         if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
741                 return NETDEV_TX_BUSY;
742
743         if (eth_skb_pad(skb)) {
744                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
745                 return NETDEV_TX_OK;
746         }
747
748         err = mlxsw_sp_txhdr_handle(mlxsw_sp->core, mlxsw_sp_port, skb,
749                                     &tx_info);
750         if (err)
751                 return NETDEV_TX_OK;
752
753         /* TX header is consumed by HW on the way so we shouldn't count its
754          * bytes as being sent.
755          */
756         len = skb->len - MLXSW_TXHDR_LEN;
757
758         /* Due to a race we might fail here because of a full queue. In that
759          * unlikely case we simply drop the packet.
760          */
761         err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
762
763         if (!err) {
764                 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
765                 u64_stats_update_begin(&pcpu_stats->syncp);
766                 pcpu_stats->tx_packets++;
767                 pcpu_stats->tx_bytes += len;
768                 u64_stats_update_end(&pcpu_stats->syncp);
769         } else {
770                 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
771                 dev_kfree_skb_any(skb);
772         }
773         return NETDEV_TX_OK;
774 }
775
776 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
777 {
778 }
779
780 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
781 {
782         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
783         struct sockaddr *addr = p;
784         int err;
785
786         if (!is_valid_ether_addr(addr->sa_data))
787                 return -EADDRNOTAVAIL;
788
789         err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
790         if (err)
791                 return err;
792         eth_hw_addr_set(dev, addr->sa_data);
793         return 0;
794 }
795
796 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
797 {
798         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
799         struct mlxsw_sp_hdroom orig_hdroom;
800         struct mlxsw_sp_hdroom hdroom;
801         int err;
802
803         orig_hdroom = *mlxsw_sp_port->hdroom;
804
805         hdroom = orig_hdroom;
806         hdroom.mtu = mtu;
807         mlxsw_sp_hdroom_bufs_reset_sizes(mlxsw_sp_port, &hdroom);
808
809         err = mlxsw_sp_hdroom_configure(mlxsw_sp_port, &hdroom);
810         if (err) {
811                 netdev_err(dev, "Failed to configure port's headroom\n");
812                 return err;
813         }
814
815         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
816         if (err)
817                 goto err_port_mtu_set;
818         dev->mtu = mtu;
819         return 0;
820
821 err_port_mtu_set:
822         mlxsw_sp_hdroom_configure(mlxsw_sp_port, &orig_hdroom);
823         return err;
824 }
825
826 static int
827 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
828                              struct rtnl_link_stats64 *stats)
829 {
830         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
831         struct mlxsw_sp_port_pcpu_stats *p;
832         u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
833         u32 tx_dropped = 0;
834         unsigned int start;
835         int i;
836
837         for_each_possible_cpu(i) {
838                 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
839                 do {
840                         start = u64_stats_fetch_begin(&p->syncp);
841                         rx_packets      = p->rx_packets;
842                         rx_bytes        = p->rx_bytes;
843                         tx_packets      = p->tx_packets;
844                         tx_bytes        = p->tx_bytes;
845                 } while (u64_stats_fetch_retry(&p->syncp, start));
846
847                 stats->rx_packets       += rx_packets;
848                 stats->rx_bytes         += rx_bytes;
849                 stats->tx_packets       += tx_packets;
850                 stats->tx_bytes         += tx_bytes;
851                 /* tx_dropped is u32, updated without syncp protection. */
852                 tx_dropped      += p->tx_dropped;
853         }
854         stats->tx_dropped       = tx_dropped;
855         return 0;
856 }
857
858 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
859 {
860         switch (attr_id) {
861         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
862                 return true;
863         }
864
865         return false;
866 }
867
868 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
869                                            void *sp)
870 {
871         switch (attr_id) {
872         case IFLA_OFFLOAD_XSTATS_CPU_HIT:
873                 return mlxsw_sp_port_get_sw_stats64(dev, sp);
874         }
875
876         return -EINVAL;
877 }
878
879 int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
880                                 int prio, char *ppcnt_pl)
881 {
882         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
883         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
884
885         mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
886         return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
887 }
888
889 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
890                                       struct rtnl_link_stats64 *stats)
891 {
892         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
893         int err;
894
895         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
896                                           0, ppcnt_pl);
897         if (err)
898                 goto out;
899
900         stats->tx_packets =
901                 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
902         stats->rx_packets =
903                 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
904         stats->tx_bytes =
905                 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
906         stats->rx_bytes =
907                 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
908         stats->multicast =
909                 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
910
911         stats->rx_crc_errors =
912                 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
913         stats->rx_frame_errors =
914                 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
915
916         stats->rx_length_errors = (
917                 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
918                 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
919                 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
920
921         stats->rx_errors = (stats->rx_crc_errors +
922                 stats->rx_frame_errors + stats->rx_length_errors);
923
924 out:
925         return err;
926 }
927
928 static void
929 mlxsw_sp_port_get_hw_xstats(struct net_device *dev,
930                             struct mlxsw_sp_port_xstats *xstats)
931 {
932         char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
933         int err, i;
934
935         err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_EXT_CNT, 0,
936                                           ppcnt_pl);
937         if (!err)
938                 xstats->ecn = mlxsw_reg_ppcnt_ecn_marked_get(ppcnt_pl);
939
940         for (i = 0; i < TC_MAX_QUEUE; i++) {
941                 err = mlxsw_sp_port_get_stats_raw(dev,
942                                                   MLXSW_REG_PPCNT_TC_CONG_CNT,
943                                                   i, ppcnt_pl);
944                 if (err)
945                         goto tc_cnt;
946
947                 xstats->wred_drop[i] =
948                         mlxsw_reg_ppcnt_wred_discard_get(ppcnt_pl);
949                 xstats->tc_ecn[i] = mlxsw_reg_ppcnt_ecn_marked_tc_get(ppcnt_pl);
950
951 tc_cnt:
952                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_TC_CNT,
953                                                   i, ppcnt_pl);
954                 if (err)
955                         continue;
956
957                 xstats->backlog[i] =
958                         mlxsw_reg_ppcnt_tc_transmit_queue_get(ppcnt_pl);
959                 xstats->tail_drop[i] =
960                         mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get(ppcnt_pl);
961         }
962
963         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
964                 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_PRIO_CNT,
965                                                   i, ppcnt_pl);
966                 if (err)
967                         continue;
968
969                 xstats->tx_packets[i] = mlxsw_reg_ppcnt_tx_frames_get(ppcnt_pl);
970                 xstats->tx_bytes[i] = mlxsw_reg_ppcnt_tx_octets_get(ppcnt_pl);
971         }
972 }
973
974 static void update_stats_cache(struct work_struct *work)
975 {
976         struct mlxsw_sp_port *mlxsw_sp_port =
977                 container_of(work, struct mlxsw_sp_port,
978                              periodic_hw_stats.update_dw.work);
979
980         if (!netif_carrier_ok(mlxsw_sp_port->dev))
981                 /* Note: mlxsw_sp_port_down_wipe_counters() clears the cache as
982                  * necessary when port goes down.
983                  */
984                 goto out;
985
986         mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
987                                    &mlxsw_sp_port->periodic_hw_stats.stats);
988         mlxsw_sp_port_get_hw_xstats(mlxsw_sp_port->dev,
989                                     &mlxsw_sp_port->periodic_hw_stats.xstats);
990
991 out:
992         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw,
993                                MLXSW_HW_STATS_UPDATE_TIME);
994 }
995
996 /* Return the stats from a cache that is updated periodically,
997  * as this function might get called in an atomic context.
998  */
999 static void
1000 mlxsw_sp_port_get_stats64(struct net_device *dev,
1001                           struct rtnl_link_stats64 *stats)
1002 {
1003         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1004
1005         memcpy(stats, &mlxsw_sp_port->periodic_hw_stats.stats, sizeof(*stats));
1006 }
1007
1008 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1009                                     u16 vid_begin, u16 vid_end,
1010                                     bool is_member, bool untagged)
1011 {
1012         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1013         char *spvm_pl;
1014         int err;
1015
1016         spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1017         if (!spvm_pl)
1018                 return -ENOMEM;
1019
1020         mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1021                             vid_end, is_member, untagged);
1022         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1023         kfree(spvm_pl);
1024         return err;
1025 }
1026
1027 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1028                            u16 vid_end, bool is_member, bool untagged)
1029 {
1030         u16 vid, vid_e;
1031         int err;
1032
1033         for (vid = vid_begin; vid <= vid_end;
1034              vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1035                 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1036                             vid_end);
1037
1038                 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1039                                                is_member, untagged);
1040                 if (err)
1041                         return err;
1042         }
1043
1044         return 0;
1045 }
1046
1047 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port,
1048                                      bool flush_default)
1049 {
1050         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1051
1052         list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1053                                  &mlxsw_sp_port->vlans_list, list) {
1054                 if (!flush_default &&
1055                     mlxsw_sp_port_vlan->vid == MLXSW_SP_DEFAULT_VID)
1056                         continue;
1057                 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1058         }
1059 }
1060
1061 static void
1062 mlxsw_sp_port_vlan_cleanup(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1063 {
1064         if (mlxsw_sp_port_vlan->bridge_port)
1065                 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1066         else if (mlxsw_sp_port_vlan->fid)
1067                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1068 }
1069
1070 struct mlxsw_sp_port_vlan *
1071 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1072 {
1073         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1074         bool untagged = vid == MLXSW_SP_DEFAULT_VID;
1075         int err;
1076
1077         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1078         if (mlxsw_sp_port_vlan)
1079                 return ERR_PTR(-EEXIST);
1080
1081         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1082         if (err)
1083                 return ERR_PTR(err);
1084
1085         mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1086         if (!mlxsw_sp_port_vlan) {
1087                 err = -ENOMEM;
1088                 goto err_port_vlan_alloc;
1089         }
1090
1091         mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1092         mlxsw_sp_port_vlan->vid = vid;
1093         list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1094
1095         return mlxsw_sp_port_vlan;
1096
1097 err_port_vlan_alloc:
1098         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1099         return ERR_PTR(err);
1100 }
1101
1102 void mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1103 {
1104         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1105         u16 vid = mlxsw_sp_port_vlan->vid;
1106
1107         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port_vlan);
1108         list_del(&mlxsw_sp_port_vlan->list);
1109         kfree(mlxsw_sp_port_vlan);
1110         mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1111 }
1112
1113 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1114                                  __be16 __always_unused proto, u16 vid)
1115 {
1116         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1117
1118         /* VLAN 0 is added to HW filter when device goes up, but it is
1119          * reserved in our case, so simply return.
1120          */
1121         if (!vid)
1122                 return 0;
1123
1124         return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid));
1125 }
1126
1127 int mlxsw_sp_port_kill_vid(struct net_device *dev,
1128                            __be16 __always_unused proto, u16 vid)
1129 {
1130         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1131         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1132
1133         /* VLAN 0 is removed from HW filter when device goes down, but
1134          * it is reserved in our case, so simply return.
1135          */
1136         if (!vid)
1137                 return 0;
1138
1139         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1140         if (!mlxsw_sp_port_vlan)
1141                 return 0;
1142         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1143
1144         return 0;
1145 }
1146
1147 static int mlxsw_sp_setup_tc_block(struct mlxsw_sp_port *mlxsw_sp_port,
1148                                    struct flow_block_offload *f)
1149 {
1150         switch (f->binder_type) {
1151         case FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS:
1152                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, true);
1153         case FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS:
1154                 return mlxsw_sp_setup_tc_block_clsact(mlxsw_sp_port, f, false);
1155         case FLOW_BLOCK_BINDER_TYPE_RED_EARLY_DROP:
1156                 return mlxsw_sp_setup_tc_block_qevent_early_drop(mlxsw_sp_port, f);
1157         case FLOW_BLOCK_BINDER_TYPE_RED_MARK:
1158                 return mlxsw_sp_setup_tc_block_qevent_mark(mlxsw_sp_port, f);
1159         default:
1160                 return -EOPNOTSUPP;
1161         }
1162 }
1163
1164 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1165                              void *type_data)
1166 {
1167         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1168
1169         switch (type) {
1170         case TC_SETUP_BLOCK:
1171                 return mlxsw_sp_setup_tc_block(mlxsw_sp_port, type_data);
1172         case TC_SETUP_QDISC_RED:
1173                 return mlxsw_sp_setup_tc_red(mlxsw_sp_port, type_data);
1174         case TC_SETUP_QDISC_PRIO:
1175                 return mlxsw_sp_setup_tc_prio(mlxsw_sp_port, type_data);
1176         case TC_SETUP_QDISC_ETS:
1177                 return mlxsw_sp_setup_tc_ets(mlxsw_sp_port, type_data);
1178         case TC_SETUP_QDISC_TBF:
1179                 return mlxsw_sp_setup_tc_tbf(mlxsw_sp_port, type_data);
1180         case TC_SETUP_QDISC_FIFO:
1181                 return mlxsw_sp_setup_tc_fifo(mlxsw_sp_port, type_data);
1182         default:
1183                 return -EOPNOTSUPP;
1184         }
1185 }
1186
1187 static int mlxsw_sp_feature_hw_tc(struct net_device *dev, bool enable)
1188 {
1189         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1190
1191         if (!enable) {
1192                 if (mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->ing_flow_block) ||
1193                     mlxsw_sp_flow_block_rule_count(mlxsw_sp_port->eg_flow_block)) {
1194                         netdev_err(dev, "Active offloaded tc filters, can't turn hw_tc_offload off\n");
1195                         return -EINVAL;
1196                 }
1197                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->ing_flow_block);
1198                 mlxsw_sp_flow_block_disable_inc(mlxsw_sp_port->eg_flow_block);
1199         } else {
1200                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->ing_flow_block);
1201                 mlxsw_sp_flow_block_disable_dec(mlxsw_sp_port->eg_flow_block);
1202         }
1203         return 0;
1204 }
1205
1206 static int mlxsw_sp_feature_loopback(struct net_device *dev, bool enable)
1207 {
1208         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1209         char pplr_pl[MLXSW_REG_PPLR_LEN];
1210         int err;
1211
1212         if (netif_running(dev))
1213                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1214
1215         mlxsw_reg_pplr_pack(pplr_pl, mlxsw_sp_port->local_port, enable);
1216         err = mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pplr),
1217                               pplr_pl);
1218
1219         if (netif_running(dev))
1220                 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1221
1222         return err;
1223 }
1224
1225 typedef int (*mlxsw_sp_feature_handler)(struct net_device *dev, bool enable);
1226
1227 static int mlxsw_sp_handle_feature(struct net_device *dev,
1228                                    netdev_features_t wanted_features,
1229                                    netdev_features_t feature,
1230                                    mlxsw_sp_feature_handler feature_handler)
1231 {
1232         netdev_features_t changes = wanted_features ^ dev->features;
1233         bool enable = !!(wanted_features & feature);
1234         int err;
1235
1236         if (!(changes & feature))
1237                 return 0;
1238
1239         err = feature_handler(dev, enable);
1240         if (err) {
1241                 netdev_err(dev, "%s feature %pNF failed, err %d\n",
1242                            enable ? "Enable" : "Disable", &feature, err);
1243                 return err;
1244         }
1245
1246         if (enable)
1247                 dev->features |= feature;
1248         else
1249                 dev->features &= ~feature;
1250
1251         return 0;
1252 }
1253 static int mlxsw_sp_set_features(struct net_device *dev,
1254                                  netdev_features_t features)
1255 {
1256         netdev_features_t oper_features = dev->features;
1257         int err = 0;
1258
1259         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_HW_TC,
1260                                        mlxsw_sp_feature_hw_tc);
1261         err |= mlxsw_sp_handle_feature(dev, features, NETIF_F_LOOPBACK,
1262                                        mlxsw_sp_feature_loopback);
1263
1264         if (err) {
1265                 dev->features = oper_features;
1266                 return -EINVAL;
1267         }
1268
1269         return 0;
1270 }
1271
1272 static int mlxsw_sp_port_hwtstamp_set(struct mlxsw_sp_port *mlxsw_sp_port,
1273                                       struct ifreq *ifr)
1274 {
1275         struct hwtstamp_config config;
1276         int err;
1277
1278         if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
1279                 return -EFAULT;
1280
1281         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port,
1282                                                              &config);
1283         if (err)
1284                 return err;
1285
1286         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1287                 return -EFAULT;
1288
1289         return 0;
1290 }
1291
1292 static int mlxsw_sp_port_hwtstamp_get(struct mlxsw_sp_port *mlxsw_sp_port,
1293                                       struct ifreq *ifr)
1294 {
1295         struct hwtstamp_config config;
1296         int err;
1297
1298         err = mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_get(mlxsw_sp_port,
1299                                                              &config);
1300         if (err)
1301                 return err;
1302
1303         if (copy_to_user(ifr->ifr_data, &config, sizeof(config)))
1304                 return -EFAULT;
1305
1306         return 0;
1307 }
1308
1309 static inline void mlxsw_sp_port_ptp_clear(struct mlxsw_sp_port *mlxsw_sp_port)
1310 {
1311         struct hwtstamp_config config = {0};
1312
1313         mlxsw_sp_port->mlxsw_sp->ptp_ops->hwtstamp_set(mlxsw_sp_port, &config);
1314 }
1315
1316 static int
1317 mlxsw_sp_port_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1318 {
1319         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1320
1321         switch (cmd) {
1322         case SIOCSHWTSTAMP:
1323                 return mlxsw_sp_port_hwtstamp_set(mlxsw_sp_port, ifr);
1324         case SIOCGHWTSTAMP:
1325                 return mlxsw_sp_port_hwtstamp_get(mlxsw_sp_port, ifr);
1326         default:
1327                 return -EOPNOTSUPP;
1328         }
1329 }
1330
1331 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1332         .ndo_open               = mlxsw_sp_port_open,
1333         .ndo_stop               = mlxsw_sp_port_stop,
1334         .ndo_start_xmit         = mlxsw_sp_port_xmit,
1335         .ndo_setup_tc           = mlxsw_sp_setup_tc,
1336         .ndo_set_rx_mode        = mlxsw_sp_set_rx_mode,
1337         .ndo_set_mac_address    = mlxsw_sp_port_set_mac_address,
1338         .ndo_change_mtu         = mlxsw_sp_port_change_mtu,
1339         .ndo_get_stats64        = mlxsw_sp_port_get_stats64,
1340         .ndo_has_offload_stats  = mlxsw_sp_port_has_offload_stats,
1341         .ndo_get_offload_stats  = mlxsw_sp_port_get_offload_stats,
1342         .ndo_vlan_rx_add_vid    = mlxsw_sp_port_add_vid,
1343         .ndo_vlan_rx_kill_vid   = mlxsw_sp_port_kill_vid,
1344         .ndo_set_features       = mlxsw_sp_set_features,
1345         .ndo_eth_ioctl          = mlxsw_sp_port_ioctl,
1346 };
1347
1348 static int
1349 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port)
1350 {
1351         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1352         u32 eth_proto_cap, eth_proto_admin, eth_proto_oper;
1353         const struct mlxsw_sp_port_type_speed_ops *ops;
1354         char ptys_pl[MLXSW_REG_PTYS_LEN];
1355         u32 eth_proto_cap_masked;
1356         int err;
1357
1358         ops = mlxsw_sp->port_type_speed_ops;
1359
1360         /* Set advertised speeds to speeds supported by both the driver
1361          * and the device.
1362          */
1363         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1364                                0, false);
1365         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1366         if (err)
1367                 return err;
1368
1369         ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, &eth_proto_cap,
1370                                  &eth_proto_admin, &eth_proto_oper);
1371         eth_proto_cap_masked = ops->ptys_proto_cap_masked_get(eth_proto_cap);
1372         ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl, mlxsw_sp_port->local_port,
1373                                eth_proto_cap_masked,
1374                                mlxsw_sp_port->link.autoneg);
1375         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1376 }
1377
1378 int mlxsw_sp_port_speed_get(struct mlxsw_sp_port *mlxsw_sp_port, u32 *speed)
1379 {
1380         const struct mlxsw_sp_port_type_speed_ops *port_type_speed_ops;
1381         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1382         char ptys_pl[MLXSW_REG_PTYS_LEN];
1383         u32 eth_proto_oper;
1384         int err;
1385
1386         port_type_speed_ops = mlxsw_sp->port_type_speed_ops;
1387         port_type_speed_ops->reg_ptys_eth_pack(mlxsw_sp, ptys_pl,
1388                                                mlxsw_sp_port->local_port, 0,
1389                                                false);
1390         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
1391         if (err)
1392                 return err;
1393         port_type_speed_ops->reg_ptys_eth_unpack(mlxsw_sp, ptys_pl, NULL, NULL,
1394                                                  &eth_proto_oper);
1395         *speed = port_type_speed_ops->from_ptys_speed(mlxsw_sp, eth_proto_oper);
1396         return 0;
1397 }
1398
1399 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
1400                           enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
1401                           bool dwrr, u8 dwrr_weight)
1402 {
1403         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1404         char qeec_pl[MLXSW_REG_QEEC_LEN];
1405
1406         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1407                             next_index);
1408         mlxsw_reg_qeec_de_set(qeec_pl, true);
1409         mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
1410         mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
1411         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1412 }
1413
1414 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
1415                                   enum mlxsw_reg_qeec_hr hr, u8 index,
1416                                   u8 next_index, u32 maxrate, u8 burst_size)
1417 {
1418         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1419         char qeec_pl[MLXSW_REG_QEEC_LEN];
1420
1421         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1422                             next_index);
1423         mlxsw_reg_qeec_mase_set(qeec_pl, true);
1424         mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
1425         mlxsw_reg_qeec_max_shaper_bs_set(qeec_pl, burst_size);
1426         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1427 }
1428
1429 static int mlxsw_sp_port_min_bw_set(struct mlxsw_sp_port *mlxsw_sp_port,
1430                                     enum mlxsw_reg_qeec_hr hr, u8 index,
1431                                     u8 next_index, u32 minrate)
1432 {
1433         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1434         char qeec_pl[MLXSW_REG_QEEC_LEN];
1435
1436         mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
1437                             next_index);
1438         mlxsw_reg_qeec_mise_set(qeec_pl, true);
1439         mlxsw_reg_qeec_min_shaper_rate_set(qeec_pl, minrate);
1440
1441         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
1442 }
1443
1444 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
1445                               u8 switch_prio, u8 tclass)
1446 {
1447         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1448         char qtct_pl[MLXSW_REG_QTCT_LEN];
1449
1450         mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
1451                             tclass);
1452         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
1453 }
1454
1455 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
1456 {
1457         int err, i;
1458
1459         /* Setup the elements hierarcy, so that each TC is linked to
1460          * one subgroup, which are all member in the same group.
1461          */
1462         err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1463                                     MLXSW_REG_QEEC_HR_GROUP, 0, 0, false, 0);
1464         if (err)
1465                 return err;
1466         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1467                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1468                                             MLXSW_REG_QEEC_HR_SUBGROUP, i,
1469                                             0, false, 0);
1470                 if (err)
1471                         return err;
1472         }
1473         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1474                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1475                                             MLXSW_REG_QEEC_HR_TC, i, i,
1476                                             false, 0);
1477                 if (err)
1478                         return err;
1479
1480                 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
1481                                             MLXSW_REG_QEEC_HR_TC,
1482                                             i + 8, i,
1483                                             true, 100);
1484                 if (err)
1485                         return err;
1486         }
1487
1488         /* Make sure the max shaper is disabled in all hierarchies that support
1489          * it. Note that this disables ptps (PTP shaper), but that is intended
1490          * for the initial configuration.
1491          */
1492         err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1493                                             MLXSW_REG_QEEC_HR_PORT, 0, 0,
1494                                             MLXSW_REG_QEEC_MAS_DIS, 0);
1495         if (err)
1496                 return err;
1497         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1498                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1499                                                     MLXSW_REG_QEEC_HR_SUBGROUP,
1500                                                     i, 0,
1501                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1502                 if (err)
1503                         return err;
1504         }
1505         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1506                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1507                                                     MLXSW_REG_QEEC_HR_TC,
1508                                                     i, i,
1509                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1510                 if (err)
1511                         return err;
1512
1513                 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
1514                                                     MLXSW_REG_QEEC_HR_TC,
1515                                                     i + 8, i,
1516                                                     MLXSW_REG_QEEC_MAS_DIS, 0);
1517                 if (err)
1518                         return err;
1519         }
1520
1521         /* Configure the min shaper for multicast TCs. */
1522         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1523                 err = mlxsw_sp_port_min_bw_set(mlxsw_sp_port,
1524                                                MLXSW_REG_QEEC_HR_TC,
1525                                                i + 8, i,
1526                                                MLXSW_REG_QEEC_MIS_MIN);
1527                 if (err)
1528                         return err;
1529         }
1530
1531         /* Map all priorities to traffic class 0. */
1532         for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1533                 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
1534                 if (err)
1535                         return err;
1536         }
1537
1538         return 0;
1539 }
1540
1541 static int mlxsw_sp_port_tc_mc_mode_set(struct mlxsw_sp_port *mlxsw_sp_port,
1542                                         bool enable)
1543 {
1544         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1545         char qtctm_pl[MLXSW_REG_QTCTM_LEN];
1546
1547         mlxsw_reg_qtctm_pack(qtctm_pl, mlxsw_sp_port->local_port, enable);
1548         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtctm), qtctm_pl);
1549 }
1550
1551 static int mlxsw_sp_port_overheat_init_val_set(struct mlxsw_sp_port *mlxsw_sp_port)
1552 {
1553         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1554         u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1555         u8 module = mlxsw_sp_port->mapping.module;
1556         u64 overheat_counter;
1557         int err;
1558
1559         err = mlxsw_env_module_overheat_counter_get(mlxsw_sp->core, slot_index,
1560                                                     module, &overheat_counter);
1561         if (err)
1562                 return err;
1563
1564         mlxsw_sp_port->module_overheat_initial_val = overheat_counter;
1565         return 0;
1566 }
1567
1568 int
1569 mlxsw_sp_port_vlan_classification_set(struct mlxsw_sp_port *mlxsw_sp_port,
1570                                       bool is_8021ad_tagged,
1571                                       bool is_8021q_tagged)
1572 {
1573         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1574         char spvc_pl[MLXSW_REG_SPVC_LEN];
1575
1576         mlxsw_reg_spvc_pack(spvc_pl, mlxsw_sp_port->local_port,
1577                             is_8021ad_tagged, is_8021q_tagged);
1578         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvc), spvc_pl);
1579 }
1580
1581 static int mlxsw_sp_port_label_info_get(struct mlxsw_sp *mlxsw_sp,
1582                                         u16 local_port, u8 *port_number,
1583                                         u8 *split_port_subnumber,
1584                                         u8 *slot_index)
1585 {
1586         char pllp_pl[MLXSW_REG_PLLP_LEN];
1587         int err;
1588
1589         mlxsw_reg_pllp_pack(pllp_pl, local_port);
1590         err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pllp), pllp_pl);
1591         if (err)
1592                 return err;
1593         mlxsw_reg_pllp_unpack(pllp_pl, port_number,
1594                               split_port_subnumber, slot_index);
1595         return 0;
1596 }
1597
1598 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u16 local_port,
1599                                 bool split,
1600                                 struct mlxsw_sp_port_mapping *port_mapping)
1601 {
1602         struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1603         struct mlxsw_sp_port *mlxsw_sp_port;
1604         u32 lanes = port_mapping->width;
1605         u8 split_port_subnumber;
1606         struct net_device *dev;
1607         u8 port_number;
1608         u8 slot_index;
1609         bool splittable;
1610         int err;
1611
1612         err = mlxsw_sp_port_module_map(mlxsw_sp, local_port, port_mapping);
1613         if (err) {
1614                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
1615                         local_port);
1616                 return err;
1617         }
1618
1619         err = mlxsw_sp_port_swid_set(mlxsw_sp, local_port, 0);
1620         if (err) {
1621                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
1622                         local_port);
1623                 goto err_port_swid_set;
1624         }
1625
1626         err = mlxsw_sp_port_label_info_get(mlxsw_sp, local_port, &port_number,
1627                                            &split_port_subnumber, &slot_index);
1628         if (err) {
1629                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get port label information\n",
1630                         local_port);
1631                 goto err_port_label_info_get;
1632         }
1633
1634         splittable = lanes > 1 && !split;
1635         err = mlxsw_core_port_init(mlxsw_sp->core, local_port, slot_index,
1636                                    port_number, split, split_port_subnumber,
1637                                    splittable, lanes, mlxsw_sp->base_mac,
1638                                    sizeof(mlxsw_sp->base_mac));
1639         if (err) {
1640                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
1641                         local_port);
1642                 goto err_core_port_init;
1643         }
1644
1645         dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
1646         if (!dev) {
1647                 err = -ENOMEM;
1648                 goto err_alloc_etherdev;
1649         }
1650         SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
1651         dev_net_set(dev, mlxsw_sp_net(mlxsw_sp));
1652         mlxsw_sp_port = netdev_priv(dev);
1653         mlxsw_core_port_netdev_link(mlxsw_sp->core, local_port,
1654                                     mlxsw_sp_port, dev);
1655         mlxsw_sp_port->dev = dev;
1656         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1657         mlxsw_sp_port->local_port = local_port;
1658         mlxsw_sp_port->pvid = MLXSW_SP_DEFAULT_VID;
1659         mlxsw_sp_port->split = split;
1660         mlxsw_sp_port->mapping = *port_mapping;
1661         mlxsw_sp_port->link.autoneg = 1;
1662         INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
1663
1664         mlxsw_sp_port->pcpu_stats =
1665                 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
1666         if (!mlxsw_sp_port->pcpu_stats) {
1667                 err = -ENOMEM;
1668                 goto err_alloc_stats;
1669         }
1670
1671         INIT_DELAYED_WORK(&mlxsw_sp_port->periodic_hw_stats.update_dw,
1672                           &update_stats_cache);
1673
1674         dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
1675         dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
1676
1677         err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
1678         if (err) {
1679                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
1680                         mlxsw_sp_port->local_port);
1681                 goto err_dev_addr_init;
1682         }
1683
1684         netif_carrier_off(dev);
1685
1686         dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
1687                          NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
1688         dev->hw_features |= NETIF_F_HW_TC | NETIF_F_LOOPBACK;
1689
1690         dev->min_mtu = 0;
1691         dev->max_mtu = ETH_MAX_MTU;
1692
1693         /* Each packet needs to have a Tx header (metadata) on top all other
1694          * headers.
1695          */
1696         dev->needed_headroom = MLXSW_TXHDR_LEN;
1697
1698         err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
1699         if (err) {
1700                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
1701                         mlxsw_sp_port->local_port);
1702                 goto err_port_system_port_mapping_set;
1703         }
1704
1705         err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port);
1706         if (err) {
1707                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
1708                         mlxsw_sp_port->local_port);
1709                 goto err_port_speed_by_width_set;
1710         }
1711
1712         err = mlxsw_sp->port_type_speed_ops->ptys_max_speed(mlxsw_sp_port,
1713                                                             &mlxsw_sp_port->max_speed);
1714         if (err) {
1715                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum speed\n",
1716                         mlxsw_sp_port->local_port);
1717                 goto err_max_speed_get;
1718         }
1719
1720         err = mlxsw_sp_port_max_mtu_get(mlxsw_sp_port, &mlxsw_sp_port->max_mtu);
1721         if (err) {
1722                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to get maximum MTU\n",
1723                         mlxsw_sp_port->local_port);
1724                 goto err_port_max_mtu_get;
1725         }
1726
1727         err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
1728         if (err) {
1729                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
1730                         mlxsw_sp_port->local_port);
1731                 goto err_port_mtu_set;
1732         }
1733
1734         err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1735         if (err)
1736                 goto err_port_admin_status_set;
1737
1738         err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
1739         if (err) {
1740                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
1741                         mlxsw_sp_port->local_port);
1742                 goto err_port_buffers_init;
1743         }
1744
1745         err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
1746         if (err) {
1747                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
1748                         mlxsw_sp_port->local_port);
1749                 goto err_port_ets_init;
1750         }
1751
1752         err = mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, true);
1753         if (err) {
1754                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC MC mode\n",
1755                         mlxsw_sp_port->local_port);
1756                 goto err_port_tc_mc_mode;
1757         }
1758
1759         /* ETS and buffers must be initialized before DCB. */
1760         err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
1761         if (err) {
1762                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
1763                         mlxsw_sp_port->local_port);
1764                 goto err_port_dcb_init;
1765         }
1766
1767         err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
1768         if (err) {
1769                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
1770                         mlxsw_sp_port->local_port);
1771                 goto err_port_fids_init;
1772         }
1773
1774         err = mlxsw_sp_tc_qdisc_init(mlxsw_sp_port);
1775         if (err) {
1776                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize TC qdiscs\n",
1777                         mlxsw_sp_port->local_port);
1778                 goto err_port_qdiscs_init;
1779         }
1780
1781         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 0, VLAN_N_VID - 1, false,
1782                                      false);
1783         if (err) {
1784                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to clear VLAN filter\n",
1785                         mlxsw_sp_port->local_port);
1786                 goto err_port_vlan_clear;
1787         }
1788
1789         err = mlxsw_sp_port_nve_init(mlxsw_sp_port);
1790         if (err) {
1791                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize NVE\n",
1792                         mlxsw_sp_port->local_port);
1793                 goto err_port_nve_init;
1794         }
1795
1796         err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
1797                                      ETH_P_8021Q);
1798         if (err) {
1799                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set PVID\n",
1800                         mlxsw_sp_port->local_port);
1801                 goto err_port_pvid_set;
1802         }
1803
1804         mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_create(mlxsw_sp_port,
1805                                                        MLXSW_SP_DEFAULT_VID);
1806         if (IS_ERR(mlxsw_sp_port_vlan)) {
1807                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
1808                         mlxsw_sp_port->local_port);
1809                 err = PTR_ERR(mlxsw_sp_port_vlan);
1810                 goto err_port_vlan_create;
1811         }
1812         mlxsw_sp_port->default_vlan = mlxsw_sp_port_vlan;
1813
1814         /* Set SPVC.et0=true and SPVC.et1=false to make the local port to treat
1815          * only packets with 802.1q header as tagged packets.
1816          */
1817         err = mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, false, true);
1818         if (err) {
1819                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set default VLAN classification\n",
1820                         local_port);
1821                 goto err_port_vlan_classification_set;
1822         }
1823
1824         INIT_DELAYED_WORK(&mlxsw_sp_port->ptp.shaper_dw,
1825                           mlxsw_sp->ptp_ops->shaper_work);
1826
1827         mlxsw_sp->ports[local_port] = mlxsw_sp_port;
1828
1829         err = mlxsw_sp_port_overheat_init_val_set(mlxsw_sp_port);
1830         if (err) {
1831                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set overheat initial value\n",
1832                         mlxsw_sp_port->local_port);
1833                 goto err_port_overheat_init_val_set;
1834         }
1835
1836         err = register_netdev(dev);
1837         if (err) {
1838                 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
1839                         mlxsw_sp_port->local_port);
1840                 goto err_register_netdev;
1841         }
1842
1843         mlxsw_core_schedule_dw(&mlxsw_sp_port->periodic_hw_stats.update_dw, 0);
1844         return 0;
1845
1846 err_register_netdev:
1847 err_port_overheat_init_val_set:
1848         mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1849 err_port_vlan_classification_set:
1850         mlxsw_sp->ports[local_port] = NULL;
1851         mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1852 err_port_vlan_create:
1853 err_port_pvid_set:
1854         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1855 err_port_nve_init:
1856 err_port_vlan_clear:
1857         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1858 err_port_qdiscs_init:
1859         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1860 err_port_fids_init:
1861         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1862 err_port_dcb_init:
1863         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1864 err_port_tc_mc_mode:
1865 err_port_ets_init:
1866         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1867 err_port_buffers_init:
1868 err_port_admin_status_set:
1869 err_port_mtu_set:
1870 err_port_max_mtu_get:
1871 err_max_speed_get:
1872 err_port_speed_by_width_set:
1873 err_port_system_port_mapping_set:
1874 err_dev_addr_init:
1875         free_percpu(mlxsw_sp_port->pcpu_stats);
1876 err_alloc_stats:
1877         free_netdev(dev);
1878 err_alloc_etherdev:
1879         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1880 err_core_port_init:
1881 err_port_label_info_get:
1882         mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1883                                MLXSW_PORT_SWID_DISABLED_PORT);
1884 err_port_swid_set:
1885         mlxsw_sp_port_module_unmap(mlxsw_sp, local_port,
1886                                    port_mapping->slot_index,
1887                                    port_mapping->module);
1888         return err;
1889 }
1890
1891 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1892 {
1893         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
1894         u8 slot_index = mlxsw_sp_port->mapping.slot_index;
1895         u8 module = mlxsw_sp_port->mapping.module;
1896
1897         cancel_delayed_work_sync(&mlxsw_sp_port->periodic_hw_stats.update_dw);
1898         cancel_delayed_work_sync(&mlxsw_sp_port->ptp.shaper_dw);
1899         unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
1900         mlxsw_sp_port_ptp_clear(mlxsw_sp_port);
1901         mlxsw_sp_port_vlan_classification_set(mlxsw_sp_port, true, true);
1902         mlxsw_sp->ports[local_port] = NULL;
1903         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, true);
1904         mlxsw_sp_port_nve_fini(mlxsw_sp_port);
1905         mlxsw_sp_tc_qdisc_fini(mlxsw_sp_port);
1906         mlxsw_sp_port_fids_fini(mlxsw_sp_port);
1907         mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
1908         mlxsw_sp_port_tc_mc_mode_set(mlxsw_sp_port, false);
1909         mlxsw_sp_port_buffers_fini(mlxsw_sp_port);
1910         free_percpu(mlxsw_sp_port->pcpu_stats);
1911         WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
1912         free_netdev(mlxsw_sp_port->dev);
1913         mlxsw_core_port_fini(mlxsw_sp->core, local_port);
1914         mlxsw_sp_port_swid_set(mlxsw_sp, local_port,
1915                                MLXSW_PORT_SWID_DISABLED_PORT);
1916         mlxsw_sp_port_module_unmap(mlxsw_sp, local_port, slot_index, module);
1917 }
1918
1919 static int mlxsw_sp_cpu_port_create(struct mlxsw_sp *mlxsw_sp)
1920 {
1921         struct mlxsw_sp_port *mlxsw_sp_port;
1922         int err;
1923
1924         mlxsw_sp_port = kzalloc(sizeof(*mlxsw_sp_port), GFP_KERNEL);
1925         if (!mlxsw_sp_port)
1926                 return -ENOMEM;
1927
1928         mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
1929         mlxsw_sp_port->local_port = MLXSW_PORT_CPU_PORT;
1930
1931         err = mlxsw_core_cpu_port_init(mlxsw_sp->core,
1932                                        mlxsw_sp_port,
1933                                        mlxsw_sp->base_mac,
1934                                        sizeof(mlxsw_sp->base_mac));
1935         if (err) {
1936                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize core CPU port\n");
1937                 goto err_core_cpu_port_init;
1938         }
1939
1940         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = mlxsw_sp_port;
1941         return 0;
1942
1943 err_core_cpu_port_init:
1944         kfree(mlxsw_sp_port);
1945         return err;
1946 }
1947
1948 static void mlxsw_sp_cpu_port_remove(struct mlxsw_sp *mlxsw_sp)
1949 {
1950         struct mlxsw_sp_port *mlxsw_sp_port =
1951                                 mlxsw_sp->ports[MLXSW_PORT_CPU_PORT];
1952
1953         mlxsw_core_cpu_port_fini(mlxsw_sp->core);
1954         mlxsw_sp->ports[MLXSW_PORT_CPU_PORT] = NULL;
1955         kfree(mlxsw_sp_port);
1956 }
1957
1958 static bool mlxsw_sp_local_port_valid(u16 local_port)
1959 {
1960         return local_port != MLXSW_PORT_CPU_PORT;
1961 }
1962
1963 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u16 local_port)
1964 {
1965         if (!mlxsw_sp_local_port_valid(local_port))
1966                 return false;
1967         return mlxsw_sp->ports[local_port] != NULL;
1968 }
1969
1970 static int mlxsw_sp_port_mapping_event_set(struct mlxsw_sp *mlxsw_sp,
1971                                            u16 local_port, bool enable)
1972 {
1973         char pmecr_pl[MLXSW_REG_PMECR_LEN];
1974
1975         mlxsw_reg_pmecr_pack(pmecr_pl, local_port,
1976                              enable ? MLXSW_REG_PMECR_E_GENERATE_EVENT :
1977                                       MLXSW_REG_PMECR_E_DO_NOT_GENERATE_EVENT);
1978         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmecr), pmecr_pl);
1979 }
1980
1981 struct mlxsw_sp_port_mapping_event {
1982         struct list_head list;
1983         char pmlp_pl[MLXSW_REG_PMLP_LEN];
1984 };
1985
1986 static void mlxsw_sp_port_mapping_events_work(struct work_struct *work)
1987 {
1988         struct mlxsw_sp_port_mapping_event *event, *next_event;
1989         struct mlxsw_sp_port_mapping_events *events;
1990         struct mlxsw_sp_port_mapping port_mapping;
1991         struct mlxsw_sp *mlxsw_sp;
1992         struct devlink *devlink;
1993         LIST_HEAD(event_queue);
1994         u16 local_port;
1995         int err;
1996
1997         events = container_of(work, struct mlxsw_sp_port_mapping_events, work);
1998         mlxsw_sp = container_of(events, struct mlxsw_sp, port_mapping_events);
1999         devlink = priv_to_devlink(mlxsw_sp->core);
2000
2001         spin_lock_bh(&events->queue_lock);
2002         list_splice_init(&events->queue, &event_queue);
2003         spin_unlock_bh(&events->queue_lock);
2004
2005         list_for_each_entry_safe(event, next_event, &event_queue, list) {
2006                 local_port = mlxsw_reg_pmlp_local_port_get(event->pmlp_pl);
2007                 err = mlxsw_sp_port_module_info_parse(mlxsw_sp, local_port,
2008                                                       event->pmlp_pl, &port_mapping);
2009                 if (err)
2010                         goto out;
2011
2012                 if (WARN_ON_ONCE(!port_mapping.width))
2013                         goto out;
2014
2015                 devl_lock(devlink);
2016
2017                 if (!mlxsw_sp_port_created(mlxsw_sp, local_port))
2018                         mlxsw_sp_port_create(mlxsw_sp, local_port,
2019                                              false, &port_mapping);
2020                 else
2021                         WARN_ON_ONCE(1);
2022
2023                 devl_unlock(devlink);
2024
2025                 mlxsw_sp->port_mapping[local_port] = port_mapping;
2026
2027 out:
2028                 kfree(event);
2029         }
2030 }
2031
2032 static void
2033 mlxsw_sp_port_mapping_listener_func(const struct mlxsw_reg_info *reg,
2034                                     char *pmlp_pl, void *priv)
2035 {
2036         struct mlxsw_sp_port_mapping_events *events;
2037         struct mlxsw_sp_port_mapping_event *event;
2038         struct mlxsw_sp *mlxsw_sp = priv;
2039         u16 local_port;
2040
2041         local_port = mlxsw_reg_pmlp_local_port_get(pmlp_pl);
2042         if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2043                 return;
2044
2045         events = &mlxsw_sp->port_mapping_events;
2046         event = kmalloc(sizeof(*event), GFP_ATOMIC);
2047         if (!event)
2048                 return;
2049         memcpy(event->pmlp_pl, pmlp_pl, sizeof(event->pmlp_pl));
2050         spin_lock(&events->queue_lock);
2051         list_add_tail(&event->list, &events->queue);
2052         spin_unlock(&events->queue_lock);
2053         mlxsw_core_schedule_work(&events->work);
2054 }
2055
2056 static void
2057 __mlxsw_sp_port_mapping_events_cancel(struct mlxsw_sp *mlxsw_sp)
2058 {
2059         struct mlxsw_sp_port_mapping_event *event, *next_event;
2060         struct mlxsw_sp_port_mapping_events *events;
2061
2062         events = &mlxsw_sp->port_mapping_events;
2063
2064         /* Caller needs to make sure that no new event is going to appear. */
2065         cancel_work_sync(&events->work);
2066         list_for_each_entry_safe(event, next_event, &events->queue, list) {
2067                 list_del(&event->list);
2068                 kfree(event);
2069         }
2070 }
2071
2072 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
2073 {
2074         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2075         int i;
2076
2077         for (i = 1; i < max_ports; i++)
2078                 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2079         /* Make sure all scheduled events are processed */
2080         __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2081
2082         for (i = 1; i < max_ports; i++)
2083                 if (mlxsw_sp_port_created(mlxsw_sp, i))
2084                         mlxsw_sp_port_remove(mlxsw_sp, i);
2085         mlxsw_sp_cpu_port_remove(mlxsw_sp);
2086         kfree(mlxsw_sp->ports);
2087         mlxsw_sp->ports = NULL;
2088 }
2089
2090 static void
2091 mlxsw_sp_ports_remove_selected(struct mlxsw_core *mlxsw_core,
2092                                bool (*selector)(void *priv, u16 local_port),
2093                                void *priv)
2094 {
2095         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2096         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_core);
2097         int i;
2098
2099         for (i = 1; i < max_ports; i++)
2100                 if (mlxsw_sp_port_created(mlxsw_sp, i) && selector(priv, i))
2101                         mlxsw_sp_port_remove(mlxsw_sp, i);
2102 }
2103
2104 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
2105 {
2106         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2107         struct mlxsw_sp_port_mapping_events *events;
2108         struct mlxsw_sp_port_mapping *port_mapping;
2109         size_t alloc_size;
2110         int i;
2111         int err;
2112
2113         alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
2114         mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
2115         if (!mlxsw_sp->ports)
2116                 return -ENOMEM;
2117
2118         events = &mlxsw_sp->port_mapping_events;
2119         INIT_LIST_HEAD(&events->queue);
2120         spin_lock_init(&events->queue_lock);
2121         INIT_WORK(&events->work, mlxsw_sp_port_mapping_events_work);
2122
2123         for (i = 1; i < max_ports; i++) {
2124                 err = mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, true);
2125                 if (err)
2126                         goto err_event_enable;
2127         }
2128
2129         err = mlxsw_sp_cpu_port_create(mlxsw_sp);
2130         if (err)
2131                 goto err_cpu_port_create;
2132
2133         for (i = 1; i < max_ports; i++) {
2134                 port_mapping = &mlxsw_sp->port_mapping[i];
2135                 if (!port_mapping->width)
2136                         continue;
2137                 err = mlxsw_sp_port_create(mlxsw_sp, i, false, port_mapping);
2138                 if (err)
2139                         goto err_port_create;
2140         }
2141         return 0;
2142
2143 err_port_create:
2144         for (i--; i >= 1; i--)
2145                 if (mlxsw_sp_port_created(mlxsw_sp, i))
2146                         mlxsw_sp_port_remove(mlxsw_sp, i);
2147         i = max_ports;
2148         mlxsw_sp_cpu_port_remove(mlxsw_sp);
2149 err_cpu_port_create:
2150 err_event_enable:
2151         for (i--; i >= 1; i--)
2152                 mlxsw_sp_port_mapping_event_set(mlxsw_sp, i, false);
2153         /* Make sure all scheduled events are processed */
2154         __mlxsw_sp_port_mapping_events_cancel(mlxsw_sp);
2155         kfree(mlxsw_sp->ports);
2156         mlxsw_sp->ports = NULL;
2157         return err;
2158 }
2159
2160 static int mlxsw_sp_port_module_info_init(struct mlxsw_sp *mlxsw_sp)
2161 {
2162         unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
2163         struct mlxsw_sp_port_mapping *port_mapping;
2164         int i;
2165         int err;
2166
2167         mlxsw_sp->port_mapping = kcalloc(max_ports,
2168                                          sizeof(struct mlxsw_sp_port_mapping),
2169                                          GFP_KERNEL);
2170         if (!mlxsw_sp->port_mapping)
2171                 return -ENOMEM;
2172
2173         for (i = 1; i < max_ports; i++) {
2174                 port_mapping = &mlxsw_sp->port_mapping[i];
2175                 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, port_mapping);
2176                 if (err)
2177                         goto err_port_module_info_get;
2178         }
2179         return 0;
2180
2181 err_port_module_info_get:
2182         kfree(mlxsw_sp->port_mapping);
2183         return err;
2184 }
2185
2186 static void mlxsw_sp_port_module_info_fini(struct mlxsw_sp *mlxsw_sp)
2187 {
2188         kfree(mlxsw_sp->port_mapping);
2189 }
2190
2191 static int
2192 mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp,
2193                            struct mlxsw_sp_port_mapping *port_mapping,
2194                            unsigned int count, const char *pmtdb_pl)
2195 {
2196         struct mlxsw_sp_port_mapping split_port_mapping;
2197         int err, i;
2198
2199         split_port_mapping = *port_mapping;
2200         split_port_mapping.width /= count;
2201         for (i = 0; i < count; i++) {
2202                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2203
2204                 if (!mlxsw_sp_local_port_valid(s_local_port))
2205                         continue;
2206
2207                 err = mlxsw_sp_port_create(mlxsw_sp, s_local_port,
2208                                            true, &split_port_mapping);
2209                 if (err)
2210                         goto err_port_create;
2211                 split_port_mapping.lane += split_port_mapping.width;
2212         }
2213
2214         return 0;
2215
2216 err_port_create:
2217         for (i--; i >= 0; i--) {
2218                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2219
2220                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2221                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2222         }
2223         return err;
2224 }
2225
2226 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
2227                                          unsigned int count,
2228                                          const char *pmtdb_pl)
2229 {
2230         struct mlxsw_sp_port_mapping *port_mapping;
2231         int i;
2232
2233         /* Go over original unsplit ports in the gap and recreate them. */
2234         for (i = 0; i < count; i++) {
2235                 u16 local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2236
2237                 port_mapping = &mlxsw_sp->port_mapping[local_port];
2238                 if (!port_mapping->width || !mlxsw_sp_local_port_valid(local_port))
2239                         continue;
2240                 mlxsw_sp_port_create(mlxsw_sp, local_port,
2241                                      false, port_mapping);
2242         }
2243 }
2244
2245 static struct mlxsw_sp_port *
2246 mlxsw_sp_port_get_by_local_port(struct mlxsw_sp *mlxsw_sp, u16 local_port)
2247 {
2248         if (mlxsw_sp->ports && mlxsw_sp->ports[local_port])
2249                 return mlxsw_sp->ports[local_port];
2250         return NULL;
2251 }
2252
2253 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u16 local_port,
2254                                unsigned int count,
2255                                struct netlink_ext_ack *extack)
2256 {
2257         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2258         struct mlxsw_sp_port_mapping port_mapping;
2259         struct mlxsw_sp_port *mlxsw_sp_port;
2260         enum mlxsw_reg_pmtdb_status status;
2261         char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2262         int i;
2263         int err;
2264
2265         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2266         if (!mlxsw_sp_port) {
2267                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2268                         local_port);
2269                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2270                 return -EINVAL;
2271         }
2272
2273         if (mlxsw_sp_port->split) {
2274                 NL_SET_ERR_MSG_MOD(extack, "Port is already split");
2275                 return -EINVAL;
2276         }
2277
2278         mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2279                              mlxsw_sp_port->mapping.module,
2280                              mlxsw_sp_port->mapping.module_width / count,
2281                              count);
2282         err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2283         if (err) {
2284                 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2285                 return err;
2286         }
2287
2288         status = mlxsw_reg_pmtdb_status_get(pmtdb_pl);
2289         if (status != MLXSW_REG_PMTDB_STATUS_SUCCESS) {
2290                 NL_SET_ERR_MSG_MOD(extack, "Unsupported split configuration");
2291                 return -EINVAL;
2292         }
2293
2294         port_mapping = mlxsw_sp_port->mapping;
2295
2296         for (i = 0; i < count; i++) {
2297                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2298
2299                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2300                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2301         }
2302
2303         err = mlxsw_sp_port_split_create(mlxsw_sp, &port_mapping,
2304                                          count, pmtdb_pl);
2305         if (err) {
2306                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
2307                 goto err_port_split_create;
2308         }
2309
2310         return 0;
2311
2312 err_port_split_create:
2313         mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2314
2315         return err;
2316 }
2317
2318 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u16 local_port,
2319                                  struct netlink_ext_ack *extack)
2320 {
2321         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2322         struct mlxsw_sp_port *mlxsw_sp_port;
2323         char pmtdb_pl[MLXSW_REG_PMTDB_LEN];
2324         unsigned int count;
2325         int i;
2326         int err;
2327
2328         mlxsw_sp_port = mlxsw_sp_port_get_by_local_port(mlxsw_sp, local_port);
2329         if (!mlxsw_sp_port) {
2330                 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
2331                         local_port);
2332                 NL_SET_ERR_MSG_MOD(extack, "Port number does not exist");
2333                 return -EINVAL;
2334         }
2335
2336         if (!mlxsw_sp_port->split) {
2337                 NL_SET_ERR_MSG_MOD(extack, "Port was not split");
2338                 return -EINVAL;
2339         }
2340
2341         count = mlxsw_sp_port->mapping.module_width /
2342                 mlxsw_sp_port->mapping.width;
2343
2344         mlxsw_reg_pmtdb_pack(pmtdb_pl, mlxsw_sp_port->mapping.slot_index,
2345                              mlxsw_sp_port->mapping.module,
2346                              mlxsw_sp_port->mapping.module_width / count,
2347                              count);
2348         err = mlxsw_reg_query(mlxsw_core, MLXSW_REG(pmtdb), pmtdb_pl);
2349         if (err) {
2350                 NL_SET_ERR_MSG_MOD(extack, "Failed to query split info");
2351                 return err;
2352         }
2353
2354         for (i = 0; i < count; i++) {
2355                 u16 s_local_port = mlxsw_reg_pmtdb_port_num_get(pmtdb_pl, i);
2356
2357                 if (mlxsw_sp_port_created(mlxsw_sp, s_local_port))
2358                         mlxsw_sp_port_remove(mlxsw_sp, s_local_port);
2359         }
2360
2361         mlxsw_sp_port_unsplit_create(mlxsw_sp, count, pmtdb_pl);
2362
2363         return 0;
2364 }
2365
2366 static void
2367 mlxsw_sp_port_down_wipe_counters(struct mlxsw_sp_port *mlxsw_sp_port)
2368 {
2369         int i;
2370
2371         for (i = 0; i < TC_MAX_QUEUE; i++)
2372                 mlxsw_sp_port->periodic_hw_stats.xstats.backlog[i] = 0;
2373 }
2374
2375 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
2376                                      char *pude_pl, void *priv)
2377 {
2378         struct mlxsw_sp *mlxsw_sp = priv;
2379         struct mlxsw_sp_port *mlxsw_sp_port;
2380         enum mlxsw_reg_pude_oper_status status;
2381         u16 local_port;
2382
2383         local_port = mlxsw_reg_pude_local_port_get(pude_pl);
2384
2385         if (WARN_ON_ONCE(!mlxsw_sp_local_port_is_valid(mlxsw_sp, local_port)))
2386                 return;
2387         mlxsw_sp_port = mlxsw_sp->ports[local_port];
2388         if (!mlxsw_sp_port)
2389                 return;
2390
2391         status = mlxsw_reg_pude_oper_status_get(pude_pl);
2392         if (status == MLXSW_PORT_OPER_STATUS_UP) {
2393                 netdev_info(mlxsw_sp_port->dev, "link up\n");
2394                 netif_carrier_on(mlxsw_sp_port->dev);
2395                 mlxsw_core_schedule_dw(&mlxsw_sp_port->ptp.shaper_dw, 0);
2396         } else {
2397                 netdev_info(mlxsw_sp_port->dev, "link down\n");
2398                 netif_carrier_off(mlxsw_sp_port->dev);
2399                 mlxsw_sp_port_down_wipe_counters(mlxsw_sp_port);
2400         }
2401 }
2402
2403 static void mlxsw_sp1_ptp_fifo_event_func(struct mlxsw_sp *mlxsw_sp,
2404                                           char *mtpptr_pl, bool ingress)
2405 {
2406         u16 local_port;
2407         u8 num_rec;
2408         int i;
2409
2410         local_port = mlxsw_reg_mtpptr_local_port_get(mtpptr_pl);
2411         num_rec = mlxsw_reg_mtpptr_num_rec_get(mtpptr_pl);
2412         for (i = 0; i < num_rec; i++) {
2413                 u8 domain_number;
2414                 u8 message_type;
2415                 u16 sequence_id;
2416                 u64 timestamp;
2417
2418                 mlxsw_reg_mtpptr_unpack(mtpptr_pl, i, &message_type,
2419                                         &domain_number, &sequence_id,
2420                                         &timestamp);
2421                 mlxsw_sp1_ptp_got_timestamp(mlxsw_sp, ingress, local_port,
2422                                             message_type, domain_number,
2423                                             sequence_id, timestamp);
2424         }
2425 }
2426
2427 static void mlxsw_sp1_ptp_ing_fifo_event_func(const struct mlxsw_reg_info *reg,
2428                                               char *mtpptr_pl, void *priv)
2429 {
2430         struct mlxsw_sp *mlxsw_sp = priv;
2431
2432         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, true);
2433 }
2434
2435 static void mlxsw_sp1_ptp_egr_fifo_event_func(const struct mlxsw_reg_info *reg,
2436                                               char *mtpptr_pl, void *priv)
2437 {
2438         struct mlxsw_sp *mlxsw_sp = priv;
2439
2440         mlxsw_sp1_ptp_fifo_event_func(mlxsw_sp, mtpptr_pl, false);
2441 }
2442
2443 void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
2444                                        u16 local_port, void *priv)
2445 {
2446         struct mlxsw_sp *mlxsw_sp = priv;
2447         struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
2448         struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
2449
2450         if (unlikely(!mlxsw_sp_port)) {
2451                 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
2452                                      local_port);
2453                 return;
2454         }
2455
2456         skb->dev = mlxsw_sp_port->dev;
2457
2458         pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
2459         u64_stats_update_begin(&pcpu_stats->syncp);
2460         pcpu_stats->rx_packets++;
2461         pcpu_stats->rx_bytes += skb->len;
2462         u64_stats_update_end(&pcpu_stats->syncp);
2463
2464         skb->protocol = eth_type_trans(skb, skb->dev);
2465         netif_receive_skb(skb);
2466 }
2467
2468 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u16 local_port,
2469                                            void *priv)
2470 {
2471         skb->offload_fwd_mark = 1;
2472         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2473 }
2474
2475 static void mlxsw_sp_rx_listener_l3_mark_func(struct sk_buff *skb,
2476                                               u16 local_port, void *priv)
2477 {
2478         skb->offload_l3_fwd_mark = 1;
2479         skb->offload_fwd_mark = 1;
2480         return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
2481 }
2482
2483 void mlxsw_sp_ptp_receive(struct mlxsw_sp *mlxsw_sp, struct sk_buff *skb,
2484                           u16 local_port)
2485 {
2486         mlxsw_sp->ptp_ops->receive(mlxsw_sp, skb, local_port);
2487 }
2488
2489 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2490         MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
2491                   _is_ctrl, SP_##_trap_group, DISCARD)
2492
2493 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl)     \
2494         MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action,    \
2495                 _is_ctrl, SP_##_trap_group, DISCARD)
2496
2497 #define MLXSW_SP_RXL_L3_MARK(_trap_id, _action, _trap_group, _is_ctrl)  \
2498         MLXSW_RXL(mlxsw_sp_rx_listener_l3_mark_func, _trap_id, _action, \
2499                 _is_ctrl, SP_##_trap_group, DISCARD)
2500
2501 #define MLXSW_SP_EVENTL(_func, _trap_id)                \
2502         MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
2503
2504 static const struct mlxsw_listener mlxsw_sp_listener[] = {
2505         /* Events */
2506         MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
2507         /* L2 traps */
2508         MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, FID_MISS, false),
2509         /* L3 traps */
2510         MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
2511                           false),
2512         MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
2513         MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
2514                           false),
2515         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_CLASS_E, FORWARD,
2516                              ROUTER_EXP, false),
2517         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_MC_DMAC, FORWARD,
2518                              ROUTER_EXP, false),
2519         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_SIP_DIP, FORWARD,
2520                              ROUTER_EXP, false),
2521         MLXSW_SP_RXL_NO_MARK(DISCARD_ING_ROUTER_DIP_LINK_LOCAL, FORWARD,
2522                              ROUTER_EXP, false),
2523         /* Multicast Router Traps */
2524         MLXSW_SP_RXL_MARK(ACL1, TRAP_TO_CPU, MULTICAST, false),
2525         MLXSW_SP_RXL_L3_MARK(ACL2, TRAP_TO_CPU, MULTICAST, false),
2526         /* NVE traps */
2527         MLXSW_SP_RXL_MARK(NVE_ENCAP_ARP, TRAP_TO_CPU, NEIGH_DISCOVERY, false),
2528 };
2529
2530 static const struct mlxsw_listener mlxsw_sp1_listener[] = {
2531         /* Events */
2532         MLXSW_EVENTL(mlxsw_sp1_ptp_egr_fifo_event_func, PTP_EGR_FIFO, SP_PTP0),
2533         MLXSW_EVENTL(mlxsw_sp1_ptp_ing_fifo_event_func, PTP_ING_FIFO, SP_PTP0),
2534 };
2535
2536 static const struct mlxsw_listener mlxsw_sp2_listener[] = {
2537         /* Events */
2538         MLXSW_SP_EVENTL(mlxsw_sp_port_mapping_listener_func, PMLPE),
2539 };
2540
2541 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
2542 {
2543         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
2544         char qpcr_pl[MLXSW_REG_QPCR_LEN];
2545         enum mlxsw_reg_qpcr_ir_units ir_units;
2546         int max_cpu_policers;
2547         bool is_bytes;
2548         u8 burst_size;
2549         u32 rate;
2550         int i, err;
2551
2552         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
2553                 return -EIO;
2554
2555         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2556
2557         ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
2558         for (i = 0; i < max_cpu_policers; i++) {
2559                 is_bytes = false;
2560                 switch (i) {
2561                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2562                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2563                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2564                         rate = 1024;
2565                         burst_size = 7;
2566                         break;
2567                 default:
2568                         continue;
2569                 }
2570
2571                 __set_bit(i, mlxsw_sp->trap->policers_usage);
2572                 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
2573                                     burst_size);
2574                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
2575                 if (err)
2576                         return err;
2577         }
2578
2579         return 0;
2580 }
2581
2582 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
2583 {
2584         char htgt_pl[MLXSW_REG_HTGT_LEN];
2585         enum mlxsw_reg_htgt_trap_group i;
2586         int max_cpu_policers;
2587         int max_trap_groups;
2588         u8 priority, tc;
2589         u16 policer_id;
2590         int err;
2591
2592         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
2593                 return -EIO;
2594
2595         max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
2596         max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
2597
2598         for (i = 0; i < max_trap_groups; i++) {
2599                 policer_id = i;
2600                 switch (i) {
2601                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
2602                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_MULTICAST:
2603                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_FID_MISS:
2604                         priority = 1;
2605                         tc = 1;
2606                         break;
2607                 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
2608                         priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
2609                         tc = MLXSW_REG_HTGT_DEFAULT_TC;
2610                         policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
2611                         break;
2612                 default:
2613                         continue;
2614                 }
2615
2616                 if (max_cpu_policers <= policer_id &&
2617                     policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
2618                         return -EIO;
2619
2620                 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
2621                 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
2622                 if (err)
2623                         return err;
2624         }
2625
2626         return 0;
2627 }
2628
2629 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
2630 {
2631         struct mlxsw_sp_trap *trap;
2632         u64 max_policers;
2633         int err;
2634
2635         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_CPU_POLICERS))
2636                 return -EIO;
2637         max_policers = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_CPU_POLICERS);
2638         trap = kzalloc(struct_size(trap, policers_usage,
2639                                    BITS_TO_LONGS(max_policers)), GFP_KERNEL);
2640         if (!trap)
2641                 return -ENOMEM;
2642         trap->max_policers = max_policers;
2643         mlxsw_sp->trap = trap;
2644
2645         err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
2646         if (err)
2647                 goto err_cpu_policers_set;
2648
2649         err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
2650         if (err)
2651                 goto err_trap_groups_set;
2652
2653         err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp_listener,
2654                                         ARRAY_SIZE(mlxsw_sp_listener),
2655                                         mlxsw_sp);
2656         if (err)
2657                 goto err_traps_register;
2658
2659         err = mlxsw_core_traps_register(mlxsw_sp->core, mlxsw_sp->listeners,
2660                                         mlxsw_sp->listeners_count, mlxsw_sp);
2661         if (err)
2662                 goto err_extra_traps_init;
2663
2664         return 0;
2665
2666 err_extra_traps_init:
2667         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2668                                     ARRAY_SIZE(mlxsw_sp_listener),
2669                                     mlxsw_sp);
2670 err_traps_register:
2671 err_trap_groups_set:
2672 err_cpu_policers_set:
2673         kfree(trap);
2674         return err;
2675 }
2676
2677 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
2678 {
2679         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp->listeners,
2680                                     mlxsw_sp->listeners_count,
2681                                     mlxsw_sp);
2682         mlxsw_core_traps_unregister(mlxsw_sp->core, mlxsw_sp_listener,
2683                                     ARRAY_SIZE(mlxsw_sp_listener), mlxsw_sp);
2684         kfree(mlxsw_sp->trap);
2685 }
2686
2687 static int mlxsw_sp_lag_pgt_init(struct mlxsw_sp *mlxsw_sp)
2688 {
2689         char sgcr_pl[MLXSW_REG_SGCR_LEN];
2690         u16 max_lag;
2691         int err;
2692
2693         if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
2694             MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2695                 return 0;
2696
2697         err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
2698         if (err)
2699                 return err;
2700
2701         /* In DDD mode, which we by default use, each LAG entry is 8 PGT
2702          * entries. The LAG table address needs to be 8-aligned, but that ought
2703          * to be the case, since the LAG table is allocated first.
2704          */
2705         err = mlxsw_sp_pgt_mid_alloc_range(mlxsw_sp, &mlxsw_sp->lag_pgt_base,
2706                                            max_lag * 8);
2707         if (err)
2708                 return err;
2709         if (WARN_ON_ONCE(mlxsw_sp->lag_pgt_base % 8)) {
2710                 err = -EINVAL;
2711                 goto err_mid_alloc_range;
2712         }
2713
2714         mlxsw_reg_sgcr_pack(sgcr_pl, mlxsw_sp->lag_pgt_base);
2715         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sgcr), sgcr_pl);
2716         if (err)
2717                 goto err_mid_alloc_range;
2718
2719         return 0;
2720
2721 err_mid_alloc_range:
2722         mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
2723                                     max_lag * 8);
2724         return err;
2725 }
2726
2727 static void mlxsw_sp_lag_pgt_fini(struct mlxsw_sp *mlxsw_sp)
2728 {
2729         u16 max_lag;
2730         int err;
2731
2732         if (mlxsw_core_lag_mode(mlxsw_sp->core) !=
2733             MLXSW_CMD_MBOX_CONFIG_PROFILE_LAG_MODE_SW)
2734                 return;
2735
2736         err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
2737         if (err)
2738                 return;
2739
2740         mlxsw_sp_pgt_mid_free_range(mlxsw_sp, mlxsw_sp->lag_pgt_base,
2741                                     max_lag * 8);
2742 }
2743
2744 #define MLXSW_SP_LAG_SEED_INIT 0xcafecafe
2745
2746 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
2747 {
2748         char slcr_pl[MLXSW_REG_SLCR_LEN];
2749         u16 max_lag;
2750         u32 seed;
2751         int err;
2752
2753         seed = jhash(mlxsw_sp->base_mac, sizeof(mlxsw_sp->base_mac),
2754                      MLXSW_SP_LAG_SEED_INIT);
2755         mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
2756                                      MLXSW_REG_SLCR_LAG_HASH_DMAC |
2757                                      MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
2758                                      MLXSW_REG_SLCR_LAG_HASH_VLANID |
2759                                      MLXSW_REG_SLCR_LAG_HASH_SIP |
2760                                      MLXSW_REG_SLCR_LAG_HASH_DIP |
2761                                      MLXSW_REG_SLCR_LAG_HASH_SPORT |
2762                                      MLXSW_REG_SLCR_LAG_HASH_DPORT |
2763                                      MLXSW_REG_SLCR_LAG_HASH_IPPROTO, seed);
2764         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
2765         if (err)
2766                 return err;
2767
2768         err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
2769         if (err)
2770                 return err;
2771
2772         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
2773                 return -EIO;
2774
2775         err = mlxsw_sp_lag_pgt_init(mlxsw_sp);
2776         if (err)
2777                 return err;
2778
2779         mlxsw_sp->lags = kcalloc(max_lag, sizeof(struct mlxsw_sp_upper),
2780                                  GFP_KERNEL);
2781         if (!mlxsw_sp->lags) {
2782                 err = -ENOMEM;
2783                 goto err_kcalloc;
2784         }
2785
2786         return 0;
2787
2788 err_kcalloc:
2789         mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2790         return err;
2791 }
2792
2793 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
2794 {
2795         mlxsw_sp_lag_pgt_fini(mlxsw_sp);
2796         kfree(mlxsw_sp->lags);
2797 }
2798
2799 static const struct mlxsw_sp_ptp_ops mlxsw_sp1_ptp_ops = {
2800         .clock_init     = mlxsw_sp1_ptp_clock_init,
2801         .clock_fini     = mlxsw_sp1_ptp_clock_fini,
2802         .init           = mlxsw_sp1_ptp_init,
2803         .fini           = mlxsw_sp1_ptp_fini,
2804         .receive        = mlxsw_sp1_ptp_receive,
2805         .transmitted    = mlxsw_sp1_ptp_transmitted,
2806         .hwtstamp_get   = mlxsw_sp1_ptp_hwtstamp_get,
2807         .hwtstamp_set   = mlxsw_sp1_ptp_hwtstamp_set,
2808         .shaper_work    = mlxsw_sp1_ptp_shaper_work,
2809         .get_ts_info    = mlxsw_sp1_ptp_get_ts_info,
2810         .get_stats_count = mlxsw_sp1_get_stats_count,
2811         .get_stats_strings = mlxsw_sp1_get_stats_strings,
2812         .get_stats      = mlxsw_sp1_get_stats,
2813         .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2814 };
2815
2816 static const struct mlxsw_sp_ptp_ops mlxsw_sp2_ptp_ops = {
2817         .clock_init     = mlxsw_sp2_ptp_clock_init,
2818         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2819         .init           = mlxsw_sp2_ptp_init,
2820         .fini           = mlxsw_sp2_ptp_fini,
2821         .receive        = mlxsw_sp2_ptp_receive,
2822         .transmitted    = mlxsw_sp2_ptp_transmitted,
2823         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2824         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2825         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2826         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2827         .get_stats_count = mlxsw_sp2_get_stats_count,
2828         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2829         .get_stats      = mlxsw_sp2_get_stats,
2830         .txhdr_construct = mlxsw_sp2_ptp_txhdr_construct,
2831 };
2832
2833 static const struct mlxsw_sp_ptp_ops mlxsw_sp4_ptp_ops = {
2834         .clock_init     = mlxsw_sp2_ptp_clock_init,
2835         .clock_fini     = mlxsw_sp2_ptp_clock_fini,
2836         .init           = mlxsw_sp2_ptp_init,
2837         .fini           = mlxsw_sp2_ptp_fini,
2838         .receive        = mlxsw_sp2_ptp_receive,
2839         .transmitted    = mlxsw_sp2_ptp_transmitted,
2840         .hwtstamp_get   = mlxsw_sp2_ptp_hwtstamp_get,
2841         .hwtstamp_set   = mlxsw_sp2_ptp_hwtstamp_set,
2842         .shaper_work    = mlxsw_sp2_ptp_shaper_work,
2843         .get_ts_info    = mlxsw_sp2_ptp_get_ts_info,
2844         .get_stats_count = mlxsw_sp2_get_stats_count,
2845         .get_stats_strings = mlxsw_sp2_get_stats_strings,
2846         .get_stats      = mlxsw_sp2_get_stats,
2847         .txhdr_construct = mlxsw_sp_ptp_txhdr_construct,
2848 };
2849
2850 struct mlxsw_sp_sample_trigger_node {
2851         struct mlxsw_sp_sample_trigger trigger;
2852         struct mlxsw_sp_sample_params params;
2853         struct rhash_head ht_node;
2854         struct rcu_head rcu;
2855         refcount_t refcount;
2856 };
2857
2858 static const struct rhashtable_params mlxsw_sp_sample_trigger_ht_params = {
2859         .key_offset = offsetof(struct mlxsw_sp_sample_trigger_node, trigger),
2860         .head_offset = offsetof(struct mlxsw_sp_sample_trigger_node, ht_node),
2861         .key_len = sizeof(struct mlxsw_sp_sample_trigger),
2862         .automatic_shrinking = true,
2863 };
2864
2865 static void
2866 mlxsw_sp_sample_trigger_key_init(struct mlxsw_sp_sample_trigger *key,
2867                                  const struct mlxsw_sp_sample_trigger *trigger)
2868 {
2869         memset(key, 0, sizeof(*key));
2870         key->type = trigger->type;
2871         key->local_port = trigger->local_port;
2872 }
2873
2874 /* RCU read lock must be held */
2875 struct mlxsw_sp_sample_params *
2876 mlxsw_sp_sample_trigger_params_lookup(struct mlxsw_sp *mlxsw_sp,
2877                                       const struct mlxsw_sp_sample_trigger *trigger)
2878 {
2879         struct mlxsw_sp_sample_trigger_node *trigger_node;
2880         struct mlxsw_sp_sample_trigger key;
2881
2882         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2883         trigger_node = rhashtable_lookup(&mlxsw_sp->sample_trigger_ht, &key,
2884                                          mlxsw_sp_sample_trigger_ht_params);
2885         if (!trigger_node)
2886                 return NULL;
2887
2888         return &trigger_node->params;
2889 }
2890
2891 static int
2892 mlxsw_sp_sample_trigger_node_init(struct mlxsw_sp *mlxsw_sp,
2893                                   const struct mlxsw_sp_sample_trigger *trigger,
2894                                   const struct mlxsw_sp_sample_params *params)
2895 {
2896         struct mlxsw_sp_sample_trigger_node *trigger_node;
2897         int err;
2898
2899         trigger_node = kzalloc(sizeof(*trigger_node), GFP_KERNEL);
2900         if (!trigger_node)
2901                 return -ENOMEM;
2902
2903         trigger_node->trigger = *trigger;
2904         trigger_node->params = *params;
2905         refcount_set(&trigger_node->refcount, 1);
2906
2907         err = rhashtable_insert_fast(&mlxsw_sp->sample_trigger_ht,
2908                                      &trigger_node->ht_node,
2909                                      mlxsw_sp_sample_trigger_ht_params);
2910         if (err)
2911                 goto err_rhashtable_insert;
2912
2913         return 0;
2914
2915 err_rhashtable_insert:
2916         kfree(trigger_node);
2917         return err;
2918 }
2919
2920 static void
2921 mlxsw_sp_sample_trigger_node_fini(struct mlxsw_sp *mlxsw_sp,
2922                                   struct mlxsw_sp_sample_trigger_node *trigger_node)
2923 {
2924         rhashtable_remove_fast(&mlxsw_sp->sample_trigger_ht,
2925                                &trigger_node->ht_node,
2926                                mlxsw_sp_sample_trigger_ht_params);
2927         kfree_rcu(trigger_node, rcu);
2928 }
2929
2930 int
2931 mlxsw_sp_sample_trigger_params_set(struct mlxsw_sp *mlxsw_sp,
2932                                    const struct mlxsw_sp_sample_trigger *trigger,
2933                                    const struct mlxsw_sp_sample_params *params,
2934                                    struct netlink_ext_ack *extack)
2935 {
2936         struct mlxsw_sp_sample_trigger_node *trigger_node;
2937         struct mlxsw_sp_sample_trigger key;
2938
2939         ASSERT_RTNL();
2940
2941         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2942
2943         trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2944                                               &key,
2945                                               mlxsw_sp_sample_trigger_ht_params);
2946         if (!trigger_node)
2947                 return mlxsw_sp_sample_trigger_node_init(mlxsw_sp, &key,
2948                                                          params);
2949
2950         if (trigger_node->trigger.local_port) {
2951                 NL_SET_ERR_MSG_MOD(extack, "Sampling already enabled on port");
2952                 return -EINVAL;
2953         }
2954
2955         if (trigger_node->params.psample_group != params->psample_group ||
2956             trigger_node->params.truncate != params->truncate ||
2957             trigger_node->params.rate != params->rate ||
2958             trigger_node->params.trunc_size != params->trunc_size) {
2959                 NL_SET_ERR_MSG_MOD(extack, "Sampling parameters do not match for an existing sampling trigger");
2960                 return -EINVAL;
2961         }
2962
2963         refcount_inc(&trigger_node->refcount);
2964
2965         return 0;
2966 }
2967
2968 void
2969 mlxsw_sp_sample_trigger_params_unset(struct mlxsw_sp *mlxsw_sp,
2970                                      const struct mlxsw_sp_sample_trigger *trigger)
2971 {
2972         struct mlxsw_sp_sample_trigger_node *trigger_node;
2973         struct mlxsw_sp_sample_trigger key;
2974
2975         ASSERT_RTNL();
2976
2977         mlxsw_sp_sample_trigger_key_init(&key, trigger);
2978
2979         trigger_node = rhashtable_lookup_fast(&mlxsw_sp->sample_trigger_ht,
2980                                               &key,
2981                                               mlxsw_sp_sample_trigger_ht_params);
2982         if (!trigger_node)
2983                 return;
2984
2985         if (!refcount_dec_and_test(&trigger_node->refcount))
2986                 return;
2987
2988         mlxsw_sp_sample_trigger_node_fini(mlxsw_sp, trigger_node);
2989 }
2990
2991 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
2992                                     unsigned long event, void *ptr);
2993
2994 #define MLXSW_SP_DEFAULT_PARSING_DEPTH 96
2995 #define MLXSW_SP_INCREASED_PARSING_DEPTH 128
2996 #define MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT 4789
2997
2998 static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp)
2999 {
3000         refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0);
3001         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
3002         mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT;
3003         mutex_init(&mlxsw_sp->parsing.lock);
3004 }
3005
3006 static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp)
3007 {
3008         mutex_destroy(&mlxsw_sp->parsing.lock);
3009         WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref));
3010 }
3011
3012 struct mlxsw_sp_ipv6_addr_node {
3013         struct in6_addr key;
3014         struct rhash_head ht_node;
3015         u32 kvdl_index;
3016         refcount_t refcount;
3017 };
3018
3019 static const struct rhashtable_params mlxsw_sp_ipv6_addr_ht_params = {
3020         .key_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, key),
3021         .head_offset = offsetof(struct mlxsw_sp_ipv6_addr_node, ht_node),
3022         .key_len = sizeof(struct in6_addr),
3023         .automatic_shrinking = true,
3024 };
3025
3026 static int
3027 mlxsw_sp_ipv6_addr_init(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6,
3028                         u32 *p_kvdl_index)
3029 {
3030         struct mlxsw_sp_ipv6_addr_node *node;
3031         char rips_pl[MLXSW_REG_RIPS_LEN];
3032         int err;
3033
3034         err = mlxsw_sp_kvdl_alloc(mlxsw_sp,
3035                                   MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3036                                   p_kvdl_index);
3037         if (err)
3038                 return err;
3039
3040         mlxsw_reg_rips_pack(rips_pl, *p_kvdl_index, addr6);
3041         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(rips), rips_pl);
3042         if (err)
3043                 goto err_rips_write;
3044
3045         node = kzalloc(sizeof(*node), GFP_KERNEL);
3046         if (!node) {
3047                 err = -ENOMEM;
3048                 goto err_node_alloc;
3049         }
3050
3051         node->key = *addr6;
3052         node->kvdl_index = *p_kvdl_index;
3053         refcount_set(&node->refcount, 1);
3054
3055         err = rhashtable_insert_fast(&mlxsw_sp->ipv6_addr_ht,
3056                                      &node->ht_node,
3057                                      mlxsw_sp_ipv6_addr_ht_params);
3058         if (err)
3059                 goto err_rhashtable_insert;
3060
3061         return 0;
3062
3063 err_rhashtable_insert:
3064         kfree(node);
3065 err_node_alloc:
3066 err_rips_write:
3067         mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3068                            *p_kvdl_index);
3069         return err;
3070 }
3071
3072 static void mlxsw_sp_ipv6_addr_fini(struct mlxsw_sp *mlxsw_sp,
3073                                     struct mlxsw_sp_ipv6_addr_node *node)
3074 {
3075         u32 kvdl_index = node->kvdl_index;
3076
3077         rhashtable_remove_fast(&mlxsw_sp->ipv6_addr_ht, &node->ht_node,
3078                                mlxsw_sp_ipv6_addr_ht_params);
3079         kfree(node);
3080         mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_IPV6_ADDRESS, 1,
3081                            kvdl_index);
3082 }
3083
3084 int mlxsw_sp_ipv6_addr_kvdl_index_get(struct mlxsw_sp *mlxsw_sp,
3085                                       const struct in6_addr *addr6,
3086                                       u32 *p_kvdl_index)
3087 {
3088         struct mlxsw_sp_ipv6_addr_node *node;
3089         int err = 0;
3090
3091         mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3092         node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3093                                       mlxsw_sp_ipv6_addr_ht_params);
3094         if (node) {
3095                 refcount_inc(&node->refcount);
3096                 *p_kvdl_index = node->kvdl_index;
3097                 goto out_unlock;
3098         }
3099
3100         err = mlxsw_sp_ipv6_addr_init(mlxsw_sp, addr6, p_kvdl_index);
3101
3102 out_unlock:
3103         mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3104         return err;
3105 }
3106
3107 void
3108 mlxsw_sp_ipv6_addr_put(struct mlxsw_sp *mlxsw_sp, const struct in6_addr *addr6)
3109 {
3110         struct mlxsw_sp_ipv6_addr_node *node;
3111
3112         mutex_lock(&mlxsw_sp->ipv6_addr_ht_lock);
3113         node = rhashtable_lookup_fast(&mlxsw_sp->ipv6_addr_ht, addr6,
3114                                       mlxsw_sp_ipv6_addr_ht_params);
3115         if (WARN_ON(!node))
3116                 goto out_unlock;
3117
3118         if (!refcount_dec_and_test(&node->refcount))
3119                 goto out_unlock;
3120
3121         mlxsw_sp_ipv6_addr_fini(mlxsw_sp, node);
3122
3123 out_unlock:
3124         mutex_unlock(&mlxsw_sp->ipv6_addr_ht_lock);
3125 }
3126
3127 static int mlxsw_sp_ipv6_addr_ht_init(struct mlxsw_sp *mlxsw_sp)
3128 {
3129         int err;
3130
3131         err = rhashtable_init(&mlxsw_sp->ipv6_addr_ht,
3132                               &mlxsw_sp_ipv6_addr_ht_params);
3133         if (err)
3134                 return err;
3135
3136         mutex_init(&mlxsw_sp->ipv6_addr_ht_lock);
3137         return 0;
3138 }
3139
3140 static void mlxsw_sp_ipv6_addr_ht_fini(struct mlxsw_sp *mlxsw_sp)
3141 {
3142         mutex_destroy(&mlxsw_sp->ipv6_addr_ht_lock);
3143         rhashtable_destroy(&mlxsw_sp->ipv6_addr_ht);
3144 }
3145
3146 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3147                          const struct mlxsw_bus_info *mlxsw_bus_info,
3148                          struct netlink_ext_ack *extack)
3149 {
3150         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3151         int err;
3152
3153         mlxsw_sp->core = mlxsw_core;
3154         mlxsw_sp->bus_info = mlxsw_bus_info;
3155
3156         mlxsw_sp_parsing_init(mlxsw_sp);
3157
3158         err = mlxsw_sp_base_mac_get(mlxsw_sp);
3159         if (err) {
3160                 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3161                 return err;
3162         }
3163
3164         err = mlxsw_sp_kvdl_init(mlxsw_sp);
3165         if (err) {
3166                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize KVDL\n");
3167                 return err;
3168         }
3169
3170         err = mlxsw_sp_pgt_init(mlxsw_sp);
3171         if (err) {
3172                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PGT\n");
3173                 goto err_pgt_init;
3174         }
3175
3176         /* Initialize before FIDs so that the LAG table is at the start of PGT
3177          * and 8-aligned without overallocation.
3178          */
3179         err = mlxsw_sp_lag_init(mlxsw_sp);
3180         if (err) {
3181                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3182                 goto err_lag_init;
3183         }
3184
3185         err = mlxsw_sp->fid_core_ops->init(mlxsw_sp);
3186         if (err) {
3187                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3188                 goto err_fid_core_init;
3189         }
3190
3191         err = mlxsw_sp_policers_init(mlxsw_sp);
3192         if (err) {
3193                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize policers\n");
3194                 goto err_policers_init;
3195         }
3196
3197         err = mlxsw_sp_traps_init(mlxsw_sp);
3198         if (err) {
3199                 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3200                 goto err_traps_init;
3201         }
3202
3203         err = mlxsw_sp_devlink_traps_init(mlxsw_sp);
3204         if (err) {
3205                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize devlink traps\n");
3206                 goto err_devlink_traps_init;
3207         }
3208
3209         err = mlxsw_sp_buffers_init(mlxsw_sp);
3210         if (err) {
3211                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3212                 goto err_buffers_init;
3213         }
3214
3215         /* Initialize SPAN before router and switchdev, so that those components
3216          * can call mlxsw_sp_span_respin().
3217          */
3218         err = mlxsw_sp_span_init(mlxsw_sp);
3219         if (err) {
3220                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3221                 goto err_span_init;
3222         }
3223
3224         err = mlxsw_sp_switchdev_init(mlxsw_sp);
3225         if (err) {
3226                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3227                 goto err_switchdev_init;
3228         }
3229
3230         err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3231         if (err) {
3232                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3233                 goto err_counter_pool_init;
3234         }
3235
3236         err = mlxsw_sp_afa_init(mlxsw_sp);
3237         if (err) {
3238                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL actions\n");
3239                 goto err_afa_init;
3240         }
3241
3242         err = mlxsw_sp_ipv6_addr_ht_init(mlxsw_sp);
3243         if (err) {
3244                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize hash table for IPv6 addresses\n");
3245                 goto err_ipv6_addr_ht_init;
3246         }
3247
3248         err = mlxsw_sp_nve_init(mlxsw_sp);
3249         if (err) {
3250                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize NVE\n");
3251                 goto err_nve_init;
3252         }
3253
3254         err = mlxsw_sp_port_range_init(mlxsw_sp);
3255         if (err) {
3256                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize port ranges\n");
3257                 goto err_port_range_init;
3258         }
3259
3260         err = mlxsw_sp_acl_init(mlxsw_sp);
3261         if (err) {
3262                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3263                 goto err_acl_init;
3264         }
3265
3266         err = mlxsw_sp_router_init(mlxsw_sp, extack);
3267         if (err) {
3268                 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3269                 goto err_router_init;
3270         }
3271
3272         if (mlxsw_sp->bus_info->read_clock_capable) {
3273                 /* NULL is a valid return value from clock_init */
3274                 mlxsw_sp->clock =
3275                         mlxsw_sp->ptp_ops->clock_init(mlxsw_sp,
3276                                                       mlxsw_sp->bus_info->dev);
3277                 if (IS_ERR(mlxsw_sp->clock)) {
3278                         err = PTR_ERR(mlxsw_sp->clock);
3279                         dev_err(mlxsw_sp->bus_info->dev, "Failed to init ptp clock\n");
3280                         goto err_ptp_clock_init;
3281                 }
3282         }
3283
3284         if (mlxsw_sp->clock) {
3285                 /* NULL is a valid return value from ptp_ops->init */
3286                 mlxsw_sp->ptp_state = mlxsw_sp->ptp_ops->init(mlxsw_sp);
3287                 if (IS_ERR(mlxsw_sp->ptp_state)) {
3288                         err = PTR_ERR(mlxsw_sp->ptp_state);
3289                         dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize PTP\n");
3290                         goto err_ptp_init;
3291                 }
3292         }
3293
3294         /* Initialize netdevice notifier after SPAN is initialized, so that the
3295          * event handler can call SPAN respin.
3296          */
3297         mlxsw_sp->netdevice_nb.notifier_call = mlxsw_sp_netdevice_event;
3298         err = register_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3299                                               &mlxsw_sp->netdevice_nb);
3300         if (err) {
3301                 dev_err(mlxsw_sp->bus_info->dev, "Failed to register netdev notifier\n");
3302                 goto err_netdev_notifier;
3303         }
3304
3305         err = mlxsw_sp_dpipe_init(mlxsw_sp);
3306         if (err) {
3307                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3308                 goto err_dpipe_init;
3309         }
3310
3311         err = mlxsw_sp_port_module_info_init(mlxsw_sp);
3312         if (err) {
3313                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init port module info\n");
3314                 goto err_port_module_info_init;
3315         }
3316
3317         err = rhashtable_init(&mlxsw_sp->sample_trigger_ht,
3318                               &mlxsw_sp_sample_trigger_ht_params);
3319         if (err) {
3320                 dev_err(mlxsw_sp->bus_info->dev, "Failed to init sampling trigger hashtable\n");
3321                 goto err_sample_trigger_init;
3322         }
3323
3324         err = mlxsw_sp_ports_create(mlxsw_sp);
3325         if (err) {
3326                 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3327                 goto err_ports_create;
3328         }
3329
3330         return 0;
3331
3332 err_ports_create:
3333         rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3334 err_sample_trigger_init:
3335         mlxsw_sp_port_module_info_fini(mlxsw_sp);
3336 err_port_module_info_init:
3337         mlxsw_sp_dpipe_fini(mlxsw_sp);
3338 err_dpipe_init:
3339         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3340                                           &mlxsw_sp->netdevice_nb);
3341 err_netdev_notifier:
3342         if (mlxsw_sp->clock)
3343                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3344 err_ptp_init:
3345         if (mlxsw_sp->clock)
3346                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3347 err_ptp_clock_init:
3348         mlxsw_sp_router_fini(mlxsw_sp);
3349 err_router_init:
3350         mlxsw_sp_acl_fini(mlxsw_sp);
3351 err_acl_init:
3352         mlxsw_sp_port_range_fini(mlxsw_sp);
3353 err_port_range_init:
3354         mlxsw_sp_nve_fini(mlxsw_sp);
3355 err_nve_init:
3356         mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3357 err_ipv6_addr_ht_init:
3358         mlxsw_sp_afa_fini(mlxsw_sp);
3359 err_afa_init:
3360         mlxsw_sp_counter_pool_fini(mlxsw_sp);
3361 err_counter_pool_init:
3362         mlxsw_sp_switchdev_fini(mlxsw_sp);
3363 err_switchdev_init:
3364         mlxsw_sp_span_fini(mlxsw_sp);
3365 err_span_init:
3366         mlxsw_sp_buffers_fini(mlxsw_sp);
3367 err_buffers_init:
3368         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3369 err_devlink_traps_init:
3370         mlxsw_sp_traps_fini(mlxsw_sp);
3371 err_traps_init:
3372         mlxsw_sp_policers_fini(mlxsw_sp);
3373 err_policers_init:
3374         mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
3375 err_fid_core_init:
3376         mlxsw_sp_lag_fini(mlxsw_sp);
3377 err_lag_init:
3378         mlxsw_sp_pgt_fini(mlxsw_sp);
3379 err_pgt_init:
3380         mlxsw_sp_kvdl_fini(mlxsw_sp);
3381         mlxsw_sp_parsing_fini(mlxsw_sp);
3382         return err;
3383 }
3384
3385 static int mlxsw_sp1_init(struct mlxsw_core *mlxsw_core,
3386                           const struct mlxsw_bus_info *mlxsw_bus_info,
3387                           struct netlink_ext_ack *extack)
3388 {
3389         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3390
3391         mlxsw_sp->switchdev_ops = &mlxsw_sp1_switchdev_ops;
3392         mlxsw_sp->kvdl_ops = &mlxsw_sp1_kvdl_ops;
3393         mlxsw_sp->afa_ops = &mlxsw_sp1_act_afa_ops;
3394         mlxsw_sp->afk_ops = &mlxsw_sp1_afk_ops;
3395         mlxsw_sp->mr_tcam_ops = &mlxsw_sp1_mr_tcam_ops;
3396         mlxsw_sp->acl_rulei_ops = &mlxsw_sp1_acl_rulei_ops;
3397         mlxsw_sp->acl_tcam_ops = &mlxsw_sp1_acl_tcam_ops;
3398         mlxsw_sp->nve_ops_arr = mlxsw_sp1_nve_ops_arr;
3399         mlxsw_sp->mac_mask = mlxsw_sp1_mac_mask;
3400         mlxsw_sp->sb_vals = &mlxsw_sp1_sb_vals;
3401         mlxsw_sp->sb_ops = &mlxsw_sp1_sb_ops;
3402         mlxsw_sp->port_type_speed_ops = &mlxsw_sp1_port_type_speed_ops;
3403         mlxsw_sp->ptp_ops = &mlxsw_sp1_ptp_ops;
3404         mlxsw_sp->span_ops = &mlxsw_sp1_span_ops;
3405         mlxsw_sp->policer_core_ops = &mlxsw_sp1_policer_core_ops;
3406         mlxsw_sp->trap_ops = &mlxsw_sp1_trap_ops;
3407         mlxsw_sp->mall_ops = &mlxsw_sp1_mall_ops;
3408         mlxsw_sp->router_ops = &mlxsw_sp1_router_ops;
3409         mlxsw_sp->listeners = mlxsw_sp1_listener;
3410         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp1_listener);
3411         mlxsw_sp->fid_core_ops = &mlxsw_sp1_fid_core_ops;
3412         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP1;
3413         mlxsw_sp->pgt_smpe_index_valid = true;
3414
3415         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3416 }
3417
3418 static int mlxsw_sp2_init(struct mlxsw_core *mlxsw_core,
3419                           const struct mlxsw_bus_info *mlxsw_bus_info,
3420                           struct netlink_ext_ack *extack)
3421 {
3422         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3423
3424         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3425         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3426         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3427         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3428         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3429         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3430         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3431         mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3432         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3433         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3434         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3435         mlxsw_sp->sb_ops = &mlxsw_sp2_sb_ops;
3436         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3437         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3438         mlxsw_sp->span_ops = &mlxsw_sp2_span_ops;
3439         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3440         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3441         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3442         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3443         mlxsw_sp->listeners = mlxsw_sp2_listener;
3444         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3445         mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3446         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP2;
3447         mlxsw_sp->pgt_smpe_index_valid = false;
3448
3449         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3450 }
3451
3452 static int mlxsw_sp3_init(struct mlxsw_core *mlxsw_core,
3453                           const struct mlxsw_bus_info *mlxsw_bus_info,
3454                           struct netlink_ext_ack *extack)
3455 {
3456         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3457
3458         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3459         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3460         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3461         mlxsw_sp->afk_ops = &mlxsw_sp2_afk_ops;
3462         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3463         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3464         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3465         mlxsw_sp->acl_bf_ops = &mlxsw_sp2_acl_bf_ops;
3466         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3467         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3468         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3469         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3470         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3471         mlxsw_sp->ptp_ops = &mlxsw_sp2_ptp_ops;
3472         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3473         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3474         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3475         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3476         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3477         mlxsw_sp->listeners = mlxsw_sp2_listener;
3478         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3479         mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3480         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP3;
3481         mlxsw_sp->pgt_smpe_index_valid = false;
3482
3483         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3484 }
3485
3486 static int mlxsw_sp4_init(struct mlxsw_core *mlxsw_core,
3487                           const struct mlxsw_bus_info *mlxsw_bus_info,
3488                           struct netlink_ext_ack *extack)
3489 {
3490         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3491
3492         mlxsw_sp->switchdev_ops = &mlxsw_sp2_switchdev_ops;
3493         mlxsw_sp->kvdl_ops = &mlxsw_sp2_kvdl_ops;
3494         mlxsw_sp->afa_ops = &mlxsw_sp2_act_afa_ops;
3495         mlxsw_sp->afk_ops = &mlxsw_sp4_afk_ops;
3496         mlxsw_sp->mr_tcam_ops = &mlxsw_sp2_mr_tcam_ops;
3497         mlxsw_sp->acl_rulei_ops = &mlxsw_sp2_acl_rulei_ops;
3498         mlxsw_sp->acl_tcam_ops = &mlxsw_sp2_acl_tcam_ops;
3499         mlxsw_sp->acl_bf_ops = &mlxsw_sp4_acl_bf_ops;
3500         mlxsw_sp->nve_ops_arr = mlxsw_sp2_nve_ops_arr;
3501         mlxsw_sp->mac_mask = mlxsw_sp2_mac_mask;
3502         mlxsw_sp->sb_vals = &mlxsw_sp2_sb_vals;
3503         mlxsw_sp->sb_ops = &mlxsw_sp3_sb_ops;
3504         mlxsw_sp->port_type_speed_ops = &mlxsw_sp2_port_type_speed_ops;
3505         mlxsw_sp->ptp_ops = &mlxsw_sp4_ptp_ops;
3506         mlxsw_sp->span_ops = &mlxsw_sp3_span_ops;
3507         mlxsw_sp->policer_core_ops = &mlxsw_sp2_policer_core_ops;
3508         mlxsw_sp->trap_ops = &mlxsw_sp2_trap_ops;
3509         mlxsw_sp->mall_ops = &mlxsw_sp2_mall_ops;
3510         mlxsw_sp->router_ops = &mlxsw_sp2_router_ops;
3511         mlxsw_sp->listeners = mlxsw_sp2_listener;
3512         mlxsw_sp->listeners_count = ARRAY_SIZE(mlxsw_sp2_listener);
3513         mlxsw_sp->fid_core_ops = &mlxsw_sp2_fid_core_ops;
3514         mlxsw_sp->lowest_shaper_bs = MLXSW_REG_QEEC_LOWEST_SHAPER_BS_SP4;
3515         mlxsw_sp->pgt_smpe_index_valid = false;
3516
3517         return mlxsw_sp_init(mlxsw_core, mlxsw_bus_info, extack);
3518 }
3519
3520 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3521 {
3522         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3523
3524         mlxsw_sp_ports_remove(mlxsw_sp);
3525         rhashtable_destroy(&mlxsw_sp->sample_trigger_ht);
3526         mlxsw_sp_port_module_info_fini(mlxsw_sp);
3527         mlxsw_sp_dpipe_fini(mlxsw_sp);
3528         unregister_netdevice_notifier_net(mlxsw_sp_net(mlxsw_sp),
3529                                           &mlxsw_sp->netdevice_nb);
3530         if (mlxsw_sp->clock) {
3531                 mlxsw_sp->ptp_ops->fini(mlxsw_sp->ptp_state);
3532                 mlxsw_sp->ptp_ops->clock_fini(mlxsw_sp->clock);
3533         }
3534         mlxsw_sp_router_fini(mlxsw_sp);
3535         mlxsw_sp_acl_fini(mlxsw_sp);
3536         mlxsw_sp_port_range_fini(mlxsw_sp);
3537         mlxsw_sp_nve_fini(mlxsw_sp);
3538         mlxsw_sp_ipv6_addr_ht_fini(mlxsw_sp);
3539         mlxsw_sp_afa_fini(mlxsw_sp);
3540         mlxsw_sp_counter_pool_fini(mlxsw_sp);
3541         mlxsw_sp_switchdev_fini(mlxsw_sp);
3542         mlxsw_sp_span_fini(mlxsw_sp);
3543         mlxsw_sp_buffers_fini(mlxsw_sp);
3544         mlxsw_sp_devlink_traps_fini(mlxsw_sp);
3545         mlxsw_sp_traps_fini(mlxsw_sp);
3546         mlxsw_sp_policers_fini(mlxsw_sp);
3547         mlxsw_sp->fid_core_ops->fini(mlxsw_sp);
3548         mlxsw_sp_lag_fini(mlxsw_sp);
3549         mlxsw_sp_pgt_fini(mlxsw_sp);
3550         mlxsw_sp_kvdl_fini(mlxsw_sp);
3551         mlxsw_sp_parsing_fini(mlxsw_sp);
3552 }
3553
3554 static const struct mlxsw_config_profile mlxsw_sp1_config_profile = {
3555         .used_flood_mode                = 1,
3556         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3557         .used_max_ib_mc                 = 1,
3558         .max_ib_mc                      = 0,
3559         .used_max_pkey                  = 1,
3560         .max_pkey                       = 0,
3561         .used_ubridge                   = 1,
3562         .ubridge                        = 1,
3563         .used_kvd_sizes                 = 1,
3564         .kvd_hash_single_parts          = 59,
3565         .kvd_hash_double_parts          = 41,
3566         .kvd_linear_size                = MLXSW_SP_KVD_LINEAR_SIZE,
3567         .swid_config                    = {
3568                 {
3569                         .used_type      = 1,
3570                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3571                 }
3572         },
3573 };
3574
3575 static const struct mlxsw_config_profile mlxsw_sp2_config_profile = {
3576         .used_flood_mode                = 1,
3577         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3578         .used_max_ib_mc                 = 1,
3579         .max_ib_mc                      = 0,
3580         .used_max_pkey                  = 1,
3581         .max_pkey                       = 0,
3582         .used_ubridge                   = 1,
3583         .ubridge                        = 1,
3584         .swid_config                    = {
3585                 {
3586                         .used_type      = 1,
3587                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3588                 }
3589         },
3590         .used_cqe_time_stamp_type       = 1,
3591         .cqe_time_stamp_type            = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3592         .lag_mode_prefer_sw             = true,
3593         .flood_mode_prefer_cff          = true,
3594 };
3595
3596 /* Reduce number of LAGs from full capacity (256) to the maximum supported LAGs
3597  * in Spectrum-2/3, to avoid regression in number of free entries in the PGT
3598  * table.
3599  */
3600 #define MLXSW_SP4_CONFIG_PROFILE_MAX_LAG 128
3601
3602 static const struct mlxsw_config_profile mlxsw_sp4_config_profile = {
3603         .used_max_lag                   = 1,
3604         .max_lag                        = MLXSW_SP4_CONFIG_PROFILE_MAX_LAG,
3605         .used_flood_mode                = 1,
3606         .flood_mode                     = MLXSW_CMD_MBOX_CONFIG_PROFILE_FLOOD_MODE_CONTROLLED,
3607         .used_max_ib_mc                 = 1,
3608         .max_ib_mc                      = 0,
3609         .used_max_pkey                  = 1,
3610         .max_pkey                       = 0,
3611         .used_ubridge                   = 1,
3612         .ubridge                        = 1,
3613         .swid_config                    = {
3614                 {
3615                         .used_type      = 1,
3616                         .type           = MLXSW_PORT_SWID_TYPE_ETH,
3617                 }
3618         },
3619         .used_cqe_time_stamp_type       = 1,
3620         .cqe_time_stamp_type            = MLXSW_CMD_MBOX_CONFIG_PROFILE_CQE_TIME_STAMP_TYPE_UTC,
3621         .lag_mode_prefer_sw             = true,
3622         .flood_mode_prefer_cff          = true,
3623 };
3624
3625 static void
3626 mlxsw_sp_resource_size_params_prepare(struct mlxsw_core *mlxsw_core,
3627                                       struct devlink_resource_size_params *kvd_size_params,
3628                                       struct devlink_resource_size_params *linear_size_params,
3629                                       struct devlink_resource_size_params *hash_double_size_params,
3630                                       struct devlink_resource_size_params *hash_single_size_params)
3631 {
3632         u32 single_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3633                                                  KVD_SINGLE_MIN_SIZE);
3634         u32 double_size_min = MLXSW_CORE_RES_GET(mlxsw_core,
3635                                                  KVD_DOUBLE_MIN_SIZE);
3636         u32 kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3637         u32 linear_size_min = 0;
3638
3639         devlink_resource_size_params_init(kvd_size_params, kvd_size, kvd_size,
3640                                           MLXSW_SP_KVD_GRANULARITY,
3641                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3642         devlink_resource_size_params_init(linear_size_params, linear_size_min,
3643                                           kvd_size - single_size_min -
3644                                           double_size_min,
3645                                           MLXSW_SP_KVD_GRANULARITY,
3646                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3647         devlink_resource_size_params_init(hash_double_size_params,
3648                                           double_size_min,
3649                                           kvd_size - single_size_min -
3650                                           linear_size_min,
3651                                           MLXSW_SP_KVD_GRANULARITY,
3652                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3653         devlink_resource_size_params_init(hash_single_size_params,
3654                                           single_size_min,
3655                                           kvd_size - double_size_min -
3656                                           linear_size_min,
3657                                           MLXSW_SP_KVD_GRANULARITY,
3658                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3659 }
3660
3661 static int mlxsw_sp1_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3662 {
3663         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3664         struct devlink_resource_size_params hash_single_size_params;
3665         struct devlink_resource_size_params hash_double_size_params;
3666         struct devlink_resource_size_params linear_size_params;
3667         struct devlink_resource_size_params kvd_size_params;
3668         u32 kvd_size, single_size, double_size, linear_size;
3669         const struct mlxsw_config_profile *profile;
3670         int err;
3671
3672         profile = &mlxsw_sp1_config_profile;
3673         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3674                 return -EIO;
3675
3676         mlxsw_sp_resource_size_params_prepare(mlxsw_core, &kvd_size_params,
3677                                               &linear_size_params,
3678                                               &hash_double_size_params,
3679                                               &hash_single_size_params);
3680
3681         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3682         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3683                                      kvd_size, MLXSW_SP_RESOURCE_KVD,
3684                                      DEVLINK_RESOURCE_ID_PARENT_TOP,
3685                                      &kvd_size_params);
3686         if (err)
3687                 return err;
3688
3689         linear_size = profile->kvd_linear_size;
3690         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_LINEAR,
3691                                      linear_size,
3692                                      MLXSW_SP_RESOURCE_KVD_LINEAR,
3693                                      MLXSW_SP_RESOURCE_KVD,
3694                                      &linear_size_params);
3695         if (err)
3696                 return err;
3697
3698         err = mlxsw_sp1_kvdl_resources_register(mlxsw_core);
3699         if  (err)
3700                 return err;
3701
3702         double_size = kvd_size - linear_size;
3703         double_size *= profile->kvd_hash_double_parts;
3704         double_size /= profile->kvd_hash_double_parts +
3705                        profile->kvd_hash_single_parts;
3706         double_size = rounddown(double_size, MLXSW_SP_KVD_GRANULARITY);
3707         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_DOUBLE,
3708                                      double_size,
3709                                      MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3710                                      MLXSW_SP_RESOURCE_KVD,
3711                                      &hash_double_size_params);
3712         if (err)
3713                 return err;
3714
3715         single_size = kvd_size - double_size - linear_size;
3716         err = devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD_HASH_SINGLE,
3717                                      single_size,
3718                                      MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3719                                      MLXSW_SP_RESOURCE_KVD,
3720                                      &hash_single_size_params);
3721         if (err)
3722                 return err;
3723
3724         return 0;
3725 }
3726
3727 static int mlxsw_sp2_resources_kvd_register(struct mlxsw_core *mlxsw_core)
3728 {
3729         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3730         struct devlink_resource_size_params kvd_size_params;
3731         u32 kvd_size;
3732
3733         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SIZE))
3734                 return -EIO;
3735
3736         kvd_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE);
3737         devlink_resource_size_params_init(&kvd_size_params, kvd_size, kvd_size,
3738                                           MLXSW_SP_KVD_GRANULARITY,
3739                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3740
3741         return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_KVD,
3742                                       kvd_size, MLXSW_SP_RESOURCE_KVD,
3743                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3744                                       &kvd_size_params);
3745 }
3746
3747 static int mlxsw_sp_resources_span_register(struct mlxsw_core *mlxsw_core)
3748 {
3749         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3750         struct devlink_resource_size_params span_size_params;
3751         u32 max_span;
3752
3753         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SPAN))
3754                 return -EIO;
3755
3756         max_span = MLXSW_CORE_RES_GET(mlxsw_core, MAX_SPAN);
3757         devlink_resource_size_params_init(&span_size_params, max_span, max_span,
3758                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3759
3760         return devl_resource_register(devlink, MLXSW_SP_RESOURCE_NAME_SPAN,
3761                                       max_span, MLXSW_SP_RESOURCE_SPAN,
3762                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3763                                       &span_size_params);
3764 }
3765
3766 static int
3767 mlxsw_sp_resources_rif_mac_profile_register(struct mlxsw_core *mlxsw_core)
3768 {
3769         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3770         struct devlink_resource_size_params size_params;
3771         u8 max_rif_mac_profiles;
3772
3773         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIF_MAC_PROFILES))
3774                 max_rif_mac_profiles = 1;
3775         else
3776                 max_rif_mac_profiles = MLXSW_CORE_RES_GET(mlxsw_core,
3777                                                           MAX_RIF_MAC_PROFILES);
3778         devlink_resource_size_params_init(&size_params, max_rif_mac_profiles,
3779                                           max_rif_mac_profiles, 1,
3780                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3781
3782         return devl_resource_register(devlink,
3783                                       "rif_mac_profiles",
3784                                       max_rif_mac_profiles,
3785                                       MLXSW_SP_RESOURCE_RIF_MAC_PROFILES,
3786                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3787                                       &size_params);
3788 }
3789
3790 static int mlxsw_sp_resources_rifs_register(struct mlxsw_core *mlxsw_core)
3791 {
3792         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3793         struct devlink_resource_size_params size_params;
3794         u64 max_rifs;
3795
3796         if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_RIFS))
3797                 return -EIO;
3798
3799         max_rifs = MLXSW_CORE_RES_GET(mlxsw_core, MAX_RIFS);
3800         devlink_resource_size_params_init(&size_params, max_rifs, max_rifs,
3801                                           1, DEVLINK_RESOURCE_UNIT_ENTRY);
3802
3803         return devl_resource_register(devlink, "rifs", max_rifs,
3804                                       MLXSW_SP_RESOURCE_RIFS,
3805                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3806                                       &size_params);
3807 }
3808
3809 static int
3810 mlxsw_sp_resources_port_range_register(struct mlxsw_core *mlxsw_core)
3811 {
3812         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3813         struct devlink_resource_size_params size_params;
3814         u64 max;
3815
3816         if (!MLXSW_CORE_RES_VALID(mlxsw_core, ACL_MAX_L4_PORT_RANGE))
3817                 return -EIO;
3818
3819         max = MLXSW_CORE_RES_GET(mlxsw_core, ACL_MAX_L4_PORT_RANGE);
3820         devlink_resource_size_params_init(&size_params, max, max, 1,
3821                                           DEVLINK_RESOURCE_UNIT_ENTRY);
3822
3823         return devl_resource_register(devlink, "port_range_registers", max,
3824                                       MLXSW_SP_RESOURCE_PORT_RANGE_REGISTERS,
3825                                       DEVLINK_RESOURCE_ID_PARENT_TOP,
3826                                       &size_params);
3827 }
3828
3829 static int mlxsw_sp1_resources_register(struct mlxsw_core *mlxsw_core)
3830 {
3831         int err;
3832
3833         err = mlxsw_sp1_resources_kvd_register(mlxsw_core);
3834         if (err)
3835                 return err;
3836
3837         err = mlxsw_sp_resources_span_register(mlxsw_core);
3838         if (err)
3839                 goto err_resources_span_register;
3840
3841         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3842         if (err)
3843                 goto err_resources_counter_register;
3844
3845         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3846         if (err)
3847                 goto err_policer_resources_register;
3848
3849         err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3850         if (err)
3851                 goto err_resources_rif_mac_profile_register;
3852
3853         err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3854         if (err)
3855                 goto err_resources_rifs_register;
3856
3857         err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3858         if (err)
3859                 goto err_resources_port_range_register;
3860
3861         return 0;
3862
3863 err_resources_port_range_register:
3864 err_resources_rifs_register:
3865 err_resources_rif_mac_profile_register:
3866 err_policer_resources_register:
3867 err_resources_counter_register:
3868 err_resources_span_register:
3869         devl_resources_unregister(priv_to_devlink(mlxsw_core));
3870         return err;
3871 }
3872
3873 static int mlxsw_sp2_resources_register(struct mlxsw_core *mlxsw_core)
3874 {
3875         int err;
3876
3877         err = mlxsw_sp2_resources_kvd_register(mlxsw_core);
3878         if (err)
3879                 return err;
3880
3881         err = mlxsw_sp_resources_span_register(mlxsw_core);
3882         if (err)
3883                 goto err_resources_span_register;
3884
3885         err = mlxsw_sp_counter_resources_register(mlxsw_core);
3886         if (err)
3887                 goto err_resources_counter_register;
3888
3889         err = mlxsw_sp_policer_resources_register(mlxsw_core);
3890         if (err)
3891                 goto err_policer_resources_register;
3892
3893         err = mlxsw_sp_resources_rif_mac_profile_register(mlxsw_core);
3894         if (err)
3895                 goto err_resources_rif_mac_profile_register;
3896
3897         err = mlxsw_sp_resources_rifs_register(mlxsw_core);
3898         if (err)
3899                 goto err_resources_rifs_register;
3900
3901         err = mlxsw_sp_resources_port_range_register(mlxsw_core);
3902         if (err)
3903                 goto err_resources_port_range_register;
3904
3905         return 0;
3906
3907 err_resources_port_range_register:
3908 err_resources_rifs_register:
3909 err_resources_rif_mac_profile_register:
3910 err_policer_resources_register:
3911 err_resources_counter_register:
3912 err_resources_span_register:
3913         devl_resources_unregister(priv_to_devlink(mlxsw_core));
3914         return err;
3915 }
3916
3917 static int mlxsw_sp_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
3918                                   const struct mlxsw_config_profile *profile,
3919                                   u64 *p_single_size, u64 *p_double_size,
3920                                   u64 *p_linear_size)
3921 {
3922         struct devlink *devlink = priv_to_devlink(mlxsw_core);
3923         u32 double_size;
3924         int err;
3925
3926         if (!MLXSW_CORE_RES_VALID(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3927             !MLXSW_CORE_RES_VALID(mlxsw_core, KVD_DOUBLE_MIN_SIZE))
3928                 return -EIO;
3929
3930         /* The hash part is what left of the kvd without the
3931          * linear part. It is split to the single size and
3932          * double size by the parts ratio from the profile.
3933          * Both sizes must be a multiplications of the
3934          * granularity from the profile. In case the user
3935          * provided the sizes they are obtained via devlink.
3936          */
3937         err = devl_resource_size_get(devlink,
3938                                      MLXSW_SP_RESOURCE_KVD_LINEAR,
3939                                      p_linear_size);
3940         if (err)
3941                 *p_linear_size = profile->kvd_linear_size;
3942
3943         err = devl_resource_size_get(devlink,
3944                                      MLXSW_SP_RESOURCE_KVD_HASH_DOUBLE,
3945                                      p_double_size);
3946         if (err) {
3947                 double_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3948                               *p_linear_size;
3949                 double_size *= profile->kvd_hash_double_parts;
3950                 double_size /= profile->kvd_hash_double_parts +
3951                                profile->kvd_hash_single_parts;
3952                 *p_double_size = rounddown(double_size,
3953                                            MLXSW_SP_KVD_GRANULARITY);
3954         }
3955
3956         err = devl_resource_size_get(devlink,
3957                                      MLXSW_SP_RESOURCE_KVD_HASH_SINGLE,
3958                                      p_single_size);
3959         if (err)
3960                 *p_single_size = MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) -
3961                                  *p_double_size - *p_linear_size;
3962
3963         /* Check results are legal. */
3964         if (*p_single_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_SINGLE_MIN_SIZE) ||
3965             *p_double_size < MLXSW_CORE_RES_GET(mlxsw_core, KVD_DOUBLE_MIN_SIZE) ||
3966             MLXSW_CORE_RES_GET(mlxsw_core, KVD_SIZE) < *p_linear_size)
3967                 return -EIO;
3968
3969         return 0;
3970 }
3971
3972 static void mlxsw_sp_ptp_transmitted(struct mlxsw_core *mlxsw_core,
3973                                      struct sk_buff *skb, u16 local_port)
3974 {
3975         struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3976
3977         skb_pull(skb, MLXSW_TXHDR_LEN);
3978         mlxsw_sp->ptp_ops->transmitted(mlxsw_sp, skb, local_port);
3979 }
3980
3981 static struct mlxsw_driver mlxsw_sp1_driver = {
3982         .kind                           = mlxsw_sp1_driver_name,
3983         .priv_size                      = sizeof(struct mlxsw_sp),
3984         .fw_req_rev                     = &mlxsw_sp1_fw_rev,
3985         .fw_filename                    = MLXSW_SP1_FW_FILENAME,
3986         .init                           = mlxsw_sp1_init,
3987         .fini                           = mlxsw_sp_fini,
3988         .port_split                     = mlxsw_sp_port_split,
3989         .port_unsplit                   = mlxsw_sp_port_unsplit,
3990         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
3991         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
3992         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
3993         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
3994         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
3995         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
3996         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
3997         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
3998         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
3999         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4000         .trap_init                      = mlxsw_sp_trap_init,
4001         .trap_fini                      = mlxsw_sp_trap_fini,
4002         .trap_action_set                = mlxsw_sp_trap_action_set,
4003         .trap_group_init                = mlxsw_sp_trap_group_init,
4004         .trap_group_set                 = mlxsw_sp_trap_group_set,
4005         .trap_policer_init              = mlxsw_sp_trap_policer_init,
4006         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
4007         .trap_policer_set               = mlxsw_sp_trap_policer_set,
4008         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
4009         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4010         .resources_register             = mlxsw_sp1_resources_register,
4011         .kvd_sizes_get                  = mlxsw_sp_kvd_sizes_get,
4012         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4013         .txhdr_len                      = MLXSW_TXHDR_LEN,
4014         .profile                        = &mlxsw_sp1_config_profile,
4015         .sdq_supports_cqe_v2            = false,
4016 };
4017
4018 static struct mlxsw_driver mlxsw_sp2_driver = {
4019         .kind                           = mlxsw_sp2_driver_name,
4020         .priv_size                      = sizeof(struct mlxsw_sp),
4021         .fw_req_rev                     = &mlxsw_sp2_fw_rev,
4022         .fw_filename                    = MLXSW_SP2_FW_FILENAME,
4023         .init                           = mlxsw_sp2_init,
4024         .fini                           = mlxsw_sp_fini,
4025         .port_split                     = mlxsw_sp_port_split,
4026         .port_unsplit                   = mlxsw_sp_port_unsplit,
4027         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
4028         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4029         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4030         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4031         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4032         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4033         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4034         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4035         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4036         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4037         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4038         .trap_init                      = mlxsw_sp_trap_init,
4039         .trap_fini                      = mlxsw_sp_trap_fini,
4040         .trap_action_set                = mlxsw_sp_trap_action_set,
4041         .trap_group_init                = mlxsw_sp_trap_group_init,
4042         .trap_group_set                 = mlxsw_sp_trap_group_set,
4043         .trap_policer_init              = mlxsw_sp_trap_policer_init,
4044         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
4045         .trap_policer_set               = mlxsw_sp_trap_policer_set,
4046         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
4047         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4048         .resources_register             = mlxsw_sp2_resources_register,
4049         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4050         .txhdr_len                      = MLXSW_TXHDR_LEN,
4051         .profile                        = &mlxsw_sp2_config_profile,
4052         .sdq_supports_cqe_v2            = true,
4053 };
4054
4055 static struct mlxsw_driver mlxsw_sp3_driver = {
4056         .kind                           = mlxsw_sp3_driver_name,
4057         .priv_size                      = sizeof(struct mlxsw_sp),
4058         .fw_req_rev                     = &mlxsw_sp3_fw_rev,
4059         .fw_filename                    = MLXSW_SP3_FW_FILENAME,
4060         .init                           = mlxsw_sp3_init,
4061         .fini                           = mlxsw_sp_fini,
4062         .port_split                     = mlxsw_sp_port_split,
4063         .port_unsplit                   = mlxsw_sp_port_unsplit,
4064         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
4065         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4066         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4067         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4068         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4069         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4070         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4071         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4072         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4073         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4074         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4075         .trap_init                      = mlxsw_sp_trap_init,
4076         .trap_fini                      = mlxsw_sp_trap_fini,
4077         .trap_action_set                = mlxsw_sp_trap_action_set,
4078         .trap_group_init                = mlxsw_sp_trap_group_init,
4079         .trap_group_set                 = mlxsw_sp_trap_group_set,
4080         .trap_policer_init              = mlxsw_sp_trap_policer_init,
4081         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
4082         .trap_policer_set               = mlxsw_sp_trap_policer_set,
4083         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
4084         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4085         .resources_register             = mlxsw_sp2_resources_register,
4086         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4087         .txhdr_len                      = MLXSW_TXHDR_LEN,
4088         .profile                        = &mlxsw_sp2_config_profile,
4089         .sdq_supports_cqe_v2            = true,
4090 };
4091
4092 static struct mlxsw_driver mlxsw_sp4_driver = {
4093         .kind                           = mlxsw_sp4_driver_name,
4094         .priv_size                      = sizeof(struct mlxsw_sp),
4095         .init                           = mlxsw_sp4_init,
4096         .fini                           = mlxsw_sp_fini,
4097         .port_split                     = mlxsw_sp_port_split,
4098         .port_unsplit                   = mlxsw_sp_port_unsplit,
4099         .ports_remove_selected          = mlxsw_sp_ports_remove_selected,
4100         .sb_pool_get                    = mlxsw_sp_sb_pool_get,
4101         .sb_pool_set                    = mlxsw_sp_sb_pool_set,
4102         .sb_port_pool_get               = mlxsw_sp_sb_port_pool_get,
4103         .sb_port_pool_set               = mlxsw_sp_sb_port_pool_set,
4104         .sb_tc_pool_bind_get            = mlxsw_sp_sb_tc_pool_bind_get,
4105         .sb_tc_pool_bind_set            = mlxsw_sp_sb_tc_pool_bind_set,
4106         .sb_occ_snapshot                = mlxsw_sp_sb_occ_snapshot,
4107         .sb_occ_max_clear               = mlxsw_sp_sb_occ_max_clear,
4108         .sb_occ_port_pool_get           = mlxsw_sp_sb_occ_port_pool_get,
4109         .sb_occ_tc_port_bind_get        = mlxsw_sp_sb_occ_tc_port_bind_get,
4110         .trap_init                      = mlxsw_sp_trap_init,
4111         .trap_fini                      = mlxsw_sp_trap_fini,
4112         .trap_action_set                = mlxsw_sp_trap_action_set,
4113         .trap_group_init                = mlxsw_sp_trap_group_init,
4114         .trap_group_set                 = mlxsw_sp_trap_group_set,
4115         .trap_policer_init              = mlxsw_sp_trap_policer_init,
4116         .trap_policer_fini              = mlxsw_sp_trap_policer_fini,
4117         .trap_policer_set               = mlxsw_sp_trap_policer_set,
4118         .trap_policer_counter_get       = mlxsw_sp_trap_policer_counter_get,
4119         .txhdr_construct                = mlxsw_sp_txhdr_construct,
4120         .resources_register             = mlxsw_sp2_resources_register,
4121         .ptp_transmitted                = mlxsw_sp_ptp_transmitted,
4122         .txhdr_len                      = MLXSW_TXHDR_LEN,
4123         .profile                        = &mlxsw_sp4_config_profile,
4124         .sdq_supports_cqe_v2            = true,
4125 };
4126
4127 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
4128 {
4129         return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
4130 }
4131
4132 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev,
4133                                    struct netdev_nested_priv *priv)
4134 {
4135         int ret = 0;
4136
4137         if (mlxsw_sp_port_dev_check(lower_dev)) {
4138                 priv->data = (void *)netdev_priv(lower_dev);
4139                 ret = 1;
4140         }
4141
4142         return ret;
4143 }
4144
4145 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
4146 {
4147         struct netdev_nested_priv priv = {
4148                 .data = NULL,
4149         };
4150
4151         if (mlxsw_sp_port_dev_check(dev))
4152                 return netdev_priv(dev);
4153
4154         netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &priv);
4155
4156         return (struct mlxsw_sp_port *)priv.data;
4157 }
4158
4159 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
4160 {
4161         struct mlxsw_sp_port *mlxsw_sp_port;
4162
4163         mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
4164         return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
4165 }
4166
4167 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
4168 {
4169         struct netdev_nested_priv priv = {
4170                 .data = NULL,
4171         };
4172
4173         if (mlxsw_sp_port_dev_check(dev))
4174                 return netdev_priv(dev);
4175
4176         netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
4177                                       &priv);
4178
4179         return (struct mlxsw_sp_port *)priv.data;
4180 }
4181
4182 int mlxsw_sp_parsing_depth_inc(struct mlxsw_sp *mlxsw_sp)
4183 {
4184         char mprs_pl[MLXSW_REG_MPRS_LEN];
4185         int err = 0;
4186
4187         mutex_lock(&mlxsw_sp->parsing.lock);
4188
4189         if (refcount_inc_not_zero(&mlxsw_sp->parsing.parsing_depth_ref))
4190                 goto out_unlock;
4191
4192         mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_INCREASED_PARSING_DEPTH,
4193                             mlxsw_sp->parsing.vxlan_udp_dport);
4194         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4195         if (err)
4196                 goto out_unlock;
4197
4198         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_INCREASED_PARSING_DEPTH;
4199         refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 1);
4200
4201 out_unlock:
4202         mutex_unlock(&mlxsw_sp->parsing.lock);
4203         return err;
4204 }
4205
4206 void mlxsw_sp_parsing_depth_dec(struct mlxsw_sp *mlxsw_sp)
4207 {
4208         char mprs_pl[MLXSW_REG_MPRS_LEN];
4209
4210         mutex_lock(&mlxsw_sp->parsing.lock);
4211
4212         if (!refcount_dec_and_test(&mlxsw_sp->parsing.parsing_depth_ref))
4213                 goto out_unlock;
4214
4215         mlxsw_reg_mprs_pack(mprs_pl, MLXSW_SP_DEFAULT_PARSING_DEPTH,
4216                             mlxsw_sp->parsing.vxlan_udp_dport);
4217         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4218         mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH;
4219
4220 out_unlock:
4221         mutex_unlock(&mlxsw_sp->parsing.lock);
4222 }
4223
4224 int mlxsw_sp_parsing_vxlan_udp_dport_set(struct mlxsw_sp *mlxsw_sp,
4225                                          __be16 udp_dport)
4226 {
4227         char mprs_pl[MLXSW_REG_MPRS_LEN];
4228         int err;
4229
4230         mutex_lock(&mlxsw_sp->parsing.lock);
4231
4232         mlxsw_reg_mprs_pack(mprs_pl, mlxsw_sp->parsing.parsing_depth,
4233                             be16_to_cpu(udp_dport));
4234         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mprs), mprs_pl);
4235         if (err)
4236                 goto out_unlock;
4237
4238         mlxsw_sp->parsing.vxlan_udp_dport = be16_to_cpu(udp_dport);
4239
4240 out_unlock:
4241         mutex_unlock(&mlxsw_sp->parsing.lock);
4242         return err;
4243 }
4244
4245 static void
4246 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
4247                                  struct net_device *lag_dev)
4248 {
4249         struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
4250         struct net_device *upper_dev;
4251         struct list_head *iter;
4252
4253         if (netif_is_bridge_port(lag_dev))
4254                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
4255
4256         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4257                 if (!netif_is_bridge_port(upper_dev))
4258                         continue;
4259                 br_dev = netdev_master_upper_dev_get(upper_dev);
4260                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
4261         }
4262 }
4263
4264 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4265 {
4266         char sldr_pl[MLXSW_REG_SLDR_LEN];
4267
4268         mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
4269         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4270 }
4271
4272 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
4273 {
4274         char sldr_pl[MLXSW_REG_SLDR_LEN];
4275
4276         mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
4277         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4278 }
4279
4280 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4281                                      u16 lag_id, u8 port_index)
4282 {
4283         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4284         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4285
4286         mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
4287                                       lag_id, port_index);
4288         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4289 }
4290
4291 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4292                                         u16 lag_id)
4293 {
4294         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4295         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4296
4297         mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
4298                                          lag_id);
4299         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4300 }
4301
4302 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
4303                                         u16 lag_id)
4304 {
4305         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4306         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4307
4308         mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
4309                                         lag_id);
4310         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4311 }
4312
4313 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
4314                                          u16 lag_id)
4315 {
4316         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4317         char slcor_pl[MLXSW_REG_SLCOR_LEN];
4318
4319         mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
4320                                          lag_id);
4321         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4322 }
4323
4324 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4325                                   struct net_device *lag_dev,
4326                                   u16 *p_lag_id)
4327 {
4328         struct mlxsw_sp_upper *lag;
4329         int free_lag_id = -1;
4330         u16 max_lag;
4331         int err, i;
4332
4333         err = mlxsw_core_max_lag(mlxsw_sp->core, &max_lag);
4334         if (err)
4335                 return err;
4336
4337         for (i = 0; i < max_lag; i++) {
4338                 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
4339                 if (lag->ref_count) {
4340                         if (lag->dev == lag_dev) {
4341                                 *p_lag_id = i;
4342                                 return 0;
4343                         }
4344                 } else if (free_lag_id < 0) {
4345                         free_lag_id = i;
4346                 }
4347         }
4348         if (free_lag_id < 0)
4349                 return -EBUSY;
4350         *p_lag_id = free_lag_id;
4351         return 0;
4352 }
4353
4354 static bool
4355 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4356                           struct net_device *lag_dev,
4357                           struct netdev_lag_upper_info *lag_upper_info,
4358                           struct netlink_ext_ack *extack)
4359 {
4360         u16 lag_id;
4361
4362         if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0) {
4363                 NL_SET_ERR_MSG_MOD(extack, "Exceeded number of supported LAG devices");
4364                 return false;
4365         }
4366         if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH) {
4367                 NL_SET_ERR_MSG_MOD(extack, "LAG device using unsupported Tx type");
4368                 return false;
4369         }
4370         return true;
4371 }
4372
4373 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4374                                        u16 lag_id, u8 *p_port_index)
4375 {
4376         u64 max_lag_members;
4377         int i;
4378
4379         max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4380                                              MAX_LAG_MEMBERS);
4381         for (i = 0; i < max_lag_members; i++) {
4382                 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4383                         *p_port_index = i;
4384                         return 0;
4385                 }
4386         }
4387         return -EBUSY;
4388 }
4389
4390 static int mlxsw_sp_lag_uppers_bridge_join(struct mlxsw_sp_port *mlxsw_sp_port,
4391                                            struct net_device *lag_dev,
4392                                            struct netlink_ext_ack *extack)
4393 {
4394         struct net_device *upper_dev;
4395         struct net_device *master;
4396         struct list_head *iter;
4397         int done = 0;
4398         int err;
4399
4400         master = netdev_master_upper_dev_get(lag_dev);
4401         if (master && netif_is_bridge_master(master)) {
4402                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port, lag_dev, master,
4403                                                 extack);
4404                 if (err)
4405                         return err;
4406         }
4407
4408         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4409                 if (!is_vlan_dev(upper_dev))
4410                         continue;
4411
4412                 master = netdev_master_upper_dev_get(upper_dev);
4413                 if (master && netif_is_bridge_master(master)) {
4414                         err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4415                                                         upper_dev, master,
4416                                                         extack);
4417                         if (err)
4418                                 goto err_port_bridge_join;
4419                 }
4420
4421                 ++done;
4422         }
4423
4424         return 0;
4425
4426 err_port_bridge_join:
4427         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4428                 if (!is_vlan_dev(upper_dev))
4429                         continue;
4430
4431                 master = netdev_master_upper_dev_get(upper_dev);
4432                 if (!master || !netif_is_bridge_master(master))
4433                         continue;
4434
4435                 if (!done--)
4436                         break;
4437
4438                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4439         }
4440
4441         master = netdev_master_upper_dev_get(lag_dev);
4442         if (master && netif_is_bridge_master(master))
4443                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4444
4445         return err;
4446 }
4447
4448 static void
4449 mlxsw_sp_lag_uppers_bridge_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4450                                  struct net_device *lag_dev)
4451 {
4452         struct net_device *upper_dev;
4453         struct net_device *master;
4454         struct list_head *iter;
4455
4456         netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
4457                 if (!is_vlan_dev(upper_dev))
4458                         continue;
4459
4460                 master = netdev_master_upper_dev_get(upper_dev);
4461                 if (!master)
4462                         continue;
4463
4464                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, master);
4465         }
4466
4467         master = netdev_master_upper_dev_get(lag_dev);
4468         if (master)
4469                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, master);
4470 }
4471
4472 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4473                                   struct net_device *lag_dev,
4474                                   struct netlink_ext_ack *extack)
4475 {
4476         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4477         struct mlxsw_sp_upper *lag;
4478         u16 lag_id;
4479         u8 port_index;
4480         int err;
4481
4482         err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4483         if (err)
4484                 return err;
4485         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4486         if (!lag->ref_count) {
4487                 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4488                 if (err)
4489                         return err;
4490                 lag->dev = lag_dev;
4491         }
4492
4493         err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4494         if (err)
4495                 return err;
4496
4497         err = mlxsw_sp_lag_uppers_bridge_join(mlxsw_sp_port, lag_dev,
4498                                               extack);
4499         if (err)
4500                 goto err_lag_uppers_bridge_join;
4501
4502         err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4503         if (err)
4504                 goto err_col_port_add;
4505
4506         mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4507                                    mlxsw_sp_port->local_port);
4508         mlxsw_sp_port->lag_id = lag_id;
4509         mlxsw_sp_port->lagged = 1;
4510         lag->ref_count++;
4511
4512         err = mlxsw_sp_fid_port_join_lag(mlxsw_sp_port);
4513         if (err)
4514                 goto err_fid_port_join_lag;
4515
4516         /* Port is no longer usable as a router interface */
4517         if (mlxsw_sp_port->default_vlan->fid)
4518                 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port->default_vlan);
4519
4520         /* Join a router interface configured on the LAG, if exists */
4521         err = mlxsw_sp_router_port_join_lag(mlxsw_sp_port, lag_dev,
4522                                             extack);
4523         if (err)
4524                 goto err_router_join;
4525
4526         err = mlxsw_sp_netdevice_enslavement_replay(mlxsw_sp, lag_dev, extack);
4527         if (err)
4528                 goto err_replay;
4529
4530         return 0;
4531
4532 err_replay:
4533         mlxsw_sp_router_port_leave_lag(mlxsw_sp_port, lag_dev);
4534 err_router_join:
4535         mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
4536 err_fid_port_join_lag:
4537         lag->ref_count--;
4538         mlxsw_sp_port->lagged = 0;
4539         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4540                                      mlxsw_sp_port->local_port);
4541         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4542 err_col_port_add:
4543         mlxsw_sp_lag_uppers_bridge_leave(mlxsw_sp_port, lag_dev);
4544 err_lag_uppers_bridge_join:
4545         if (!lag->ref_count)
4546                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4547         return err;
4548 }
4549
4550 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4551                                     struct net_device *lag_dev)
4552 {
4553         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4554         u16 lag_id = mlxsw_sp_port->lag_id;
4555         struct mlxsw_sp_upper *lag;
4556
4557         if (!mlxsw_sp_port->lagged)
4558                 return;
4559         lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4560         WARN_ON(lag->ref_count == 0);
4561
4562         mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4563
4564         /* Any VLANs configured on the port are no longer valid */
4565         mlxsw_sp_port_vlan_flush(mlxsw_sp_port, false);
4566         mlxsw_sp_port_vlan_cleanup(mlxsw_sp_port->default_vlan);
4567         /* Make the LAG and its directly linked uppers leave bridges they
4568          * are memeber in
4569          */
4570         mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4571
4572         mlxsw_sp_fid_port_leave_lag(mlxsw_sp_port);
4573
4574         if (lag->ref_count == 1)
4575                 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4576
4577         mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4578                                      mlxsw_sp_port->local_port);
4579         mlxsw_sp_port->lagged = 0;
4580         lag->ref_count--;
4581
4582         /* Make sure untagged frames are allowed to ingress */
4583         mlxsw_sp_port_pvid_set(mlxsw_sp_port, MLXSW_SP_DEFAULT_VID,
4584                                ETH_P_8021Q);
4585 }
4586
4587 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4588                                       u16 lag_id)
4589 {
4590         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4591         char sldr_pl[MLXSW_REG_SLDR_LEN];
4592
4593         mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4594                                          mlxsw_sp_port->local_port);
4595         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4596 }
4597
4598 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4599                                          u16 lag_id)
4600 {
4601         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4602         char sldr_pl[MLXSW_REG_SLDR_LEN];
4603
4604         mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4605                                             mlxsw_sp_port->local_port);
4606         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4607 }
4608
4609 static int
4610 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4611 {
4612         int err;
4613
4614         err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4615                                            mlxsw_sp_port->lag_id);
4616         if (err)
4617                 return err;
4618
4619         err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4620         if (err)
4621                 goto err_dist_port_add;
4622
4623         return 0;
4624
4625 err_dist_port_add:
4626         mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4627         return err;
4628 }
4629
4630 static int
4631 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4632 {
4633         int err;
4634
4635         err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4636                                             mlxsw_sp_port->lag_id);
4637         if (err)
4638                 return err;
4639
4640         err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4641                                             mlxsw_sp_port->lag_id);
4642         if (err)
4643                 goto err_col_port_disable;
4644
4645         return 0;
4646
4647 err_col_port_disable:
4648         mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4649         return err;
4650 }
4651
4652 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4653                                      struct netdev_lag_lower_state_info *info)
4654 {
4655         if (info->tx_enabled)
4656                 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4657         else
4658                 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4659 }
4660
4661 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4662                                  bool enable)
4663 {
4664         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4665         enum mlxsw_reg_spms_state spms_state;
4666         char *spms_pl;
4667         u16 vid;
4668         int err;
4669
4670         spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4671                               MLXSW_REG_SPMS_STATE_DISCARDING;
4672
4673         spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4674         if (!spms_pl)
4675                 return -ENOMEM;
4676         mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4677
4678         for (vid = 0; vid < VLAN_N_VID; vid++)
4679                 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4680
4681         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4682         kfree(spms_pl);
4683         return err;
4684 }
4685
4686 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4687 {
4688         u16 vid = 1;
4689         int err;
4690
4691         err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4692         if (err)
4693                 return err;
4694         err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4695         if (err)
4696                 goto err_port_stp_set;
4697         err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4698                                      true, false);
4699         if (err)
4700                 goto err_port_vlan_set;
4701
4702         for (; vid <= VLAN_N_VID - 1; vid++) {
4703                 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4704                                                      vid, false);
4705                 if (err)
4706                         goto err_vid_learning_set;
4707         }
4708
4709         return 0;
4710
4711 err_vid_learning_set:
4712         for (vid--; vid >= 1; vid--)
4713                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4714 err_port_vlan_set:
4715         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4716 err_port_stp_set:
4717         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4718         return err;
4719 }
4720
4721 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4722 {
4723         u16 vid;
4724
4725         for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4726                 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4727                                                vid, true);
4728
4729         mlxsw_sp_port_vlan_set(mlxsw_sp_port, 1, VLAN_N_VID - 2,
4730                                false, false);
4731         mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4732         mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4733 }
4734
4735 static bool mlxsw_sp_bridge_has_multiple_vxlans(struct net_device *br_dev)
4736 {
4737         unsigned int num_vxlans = 0;
4738         struct net_device *dev;
4739         struct list_head *iter;
4740
4741         netdev_for_each_lower_dev(br_dev, dev, iter) {
4742                 if (netif_is_vxlan(dev))
4743                         num_vxlans++;
4744         }
4745
4746         return num_vxlans > 1;
4747 }
4748
4749 static bool mlxsw_sp_bridge_vxlan_vlan_is_valid(struct net_device *br_dev)
4750 {
4751         DECLARE_BITMAP(vlans, VLAN_N_VID) = {0};
4752         struct net_device *dev;
4753         struct list_head *iter;
4754
4755         netdev_for_each_lower_dev(br_dev, dev, iter) {
4756                 u16 pvid;
4757                 int err;
4758
4759                 if (!netif_is_vxlan(dev))
4760                         continue;
4761
4762                 err = mlxsw_sp_vxlan_mapped_vid(dev, &pvid);
4763                 if (err || !pvid)
4764                         continue;
4765
4766                 if (test_and_set_bit(pvid, vlans))
4767                         return false;
4768         }
4769
4770         return true;
4771 }
4772
4773 static bool mlxsw_sp_bridge_vxlan_is_valid(struct net_device *br_dev,
4774                                            struct netlink_ext_ack *extack)
4775 {
4776         if (br_multicast_enabled(br_dev)) {
4777                 NL_SET_ERR_MSG_MOD(extack, "Multicast can not be enabled on a bridge with a VxLAN device");
4778                 return false;
4779         }
4780
4781         if (!br_vlan_enabled(br_dev) &&
4782             mlxsw_sp_bridge_has_multiple_vxlans(br_dev)) {
4783                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices are not supported in a VLAN-unaware bridge");
4784                 return false;
4785         }
4786
4787         if (br_vlan_enabled(br_dev) &&
4788             !mlxsw_sp_bridge_vxlan_vlan_is_valid(br_dev)) {
4789                 NL_SET_ERR_MSG_MOD(extack, "Multiple VxLAN devices cannot have the same VLAN as PVID and egress untagged");
4790                 return false;
4791         }
4792
4793         return true;
4794 }
4795
4796 static bool mlxsw_sp_netdev_is_master(struct net_device *upper_dev,
4797                                       struct net_device *dev)
4798 {
4799         return upper_dev == netdev_master_upper_dev_get(dev);
4800 }
4801
4802 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
4803                                       unsigned long event, void *ptr,
4804                                       bool process_foreign);
4805
4806 static int mlxsw_sp_netdevice_validate_uppers(struct mlxsw_sp *mlxsw_sp,
4807                                               struct net_device *dev,
4808                                               struct netlink_ext_ack *extack)
4809 {
4810         struct net_device *upper_dev;
4811         struct list_head *iter;
4812         int err;
4813
4814         netdev_for_each_upper_dev_rcu(dev, upper_dev, iter) {
4815                 struct netdev_notifier_changeupper_info info = {
4816                         .info = {
4817                                 .dev = dev,
4818                                 .extack = extack,
4819                         },
4820                         .master = mlxsw_sp_netdev_is_master(upper_dev, dev),
4821                         .upper_dev = upper_dev,
4822                         .linking = true,
4823
4824                         /* upper_info is relevant for LAG devices. But we would
4825                          * only need this if LAG were a valid upper above
4826                          * another upper (e.g. a bridge that is a member of a
4827                          * LAG), and that is never a valid configuration. So we
4828                          * can keep this as NULL.
4829                          */
4830                         .upper_info = NULL,
4831                 };
4832
4833                 err = __mlxsw_sp_netdevice_event(mlxsw_sp,
4834                                                  NETDEV_PRECHANGEUPPER,
4835                                                  &info, true);
4836                 if (err)
4837                         return err;
4838
4839                 err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp, upper_dev,
4840                                                          extack);
4841                 if (err)
4842                         return err;
4843         }
4844
4845         return 0;
4846 }
4847
4848 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4849                                                struct net_device *dev,
4850                                                unsigned long event, void *ptr,
4851                                                bool replay_deslavement)
4852 {
4853         struct netdev_notifier_changeupper_info *info;
4854         struct mlxsw_sp_port *mlxsw_sp_port;
4855         struct netlink_ext_ack *extack;
4856         struct net_device *upper_dev;
4857         struct mlxsw_sp *mlxsw_sp;
4858         int err = 0;
4859         u16 proto;
4860
4861         mlxsw_sp_port = netdev_priv(dev);
4862         mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4863         info = ptr;
4864         extack = netdev_notifier_info_to_extack(&info->info);
4865
4866         switch (event) {
4867         case NETDEV_PRECHANGEUPPER:
4868                 upper_dev = info->upper_dev;
4869                 if (!is_vlan_dev(upper_dev) &&
4870                     !netif_is_lag_master(upper_dev) &&
4871                     !netif_is_bridge_master(upper_dev) &&
4872                     !netif_is_ovs_master(upper_dev) &&
4873                     !netif_is_macvlan(upper_dev) &&
4874                     !netif_is_l3_master(upper_dev)) {
4875                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
4876                         return -EINVAL;
4877                 }
4878                 if (!info->linking)
4879                         break;
4880                 if (netif_is_bridge_master(upper_dev) &&
4881                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
4882                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
4883                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
4884                         return -EOPNOTSUPP;
4885                 if (netdev_has_any_upper_dev(upper_dev) &&
4886                     (!netif_is_bridge_master(upper_dev) ||
4887                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4888                                                           upper_dev))) {
4889                         err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
4890                                                                  upper_dev,
4891                                                                  extack);
4892                         if (err)
4893                                 return err;
4894                 }
4895                 if (netif_is_lag_master(upper_dev) &&
4896                     !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4897                                                info->upper_info, extack))
4898                         return -EINVAL;
4899                 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev)) {
4900                         NL_SET_ERR_MSG_MOD(extack, "Master device is a LAG master and this device has a VLAN");
4901                         return -EINVAL;
4902                 }
4903                 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4904                     !netif_is_lag_master(vlan_dev_real_dev(upper_dev))) {
4905                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on a LAG port");
4906                         return -EINVAL;
4907                 }
4908                 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev)) {
4909                         NL_SET_ERR_MSG_MOD(extack, "Master device is an OVS master and this device has a VLAN");
4910                         return -EINVAL;
4911                 }
4912                 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev)) {
4913                         NL_SET_ERR_MSG_MOD(extack, "Can not put a VLAN on an OVS port");
4914                         return -EINVAL;
4915                 }
4916                 if (netif_is_bridge_master(upper_dev)) {
4917                         br_vlan_get_proto(upper_dev, &proto);
4918                         if (br_vlan_enabled(upper_dev) &&
4919                             proto != ETH_P_8021Q && proto != ETH_P_8021AD) {
4920                                 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port to a bridge with unknown VLAN protocol is not supported");
4921                                 return -EOPNOTSUPP;
4922                         }
4923                         if (vlan_uses_dev(lower_dev) &&
4924                             br_vlan_enabled(upper_dev) &&
4925                             proto == ETH_P_8021AD) {
4926                                 NL_SET_ERR_MSG_MOD(extack, "Enslaving a port that already has a VLAN upper to an 802.1ad bridge is not supported");
4927                                 return -EOPNOTSUPP;
4928                         }
4929                 }
4930                 if (netif_is_bridge_port(lower_dev) && is_vlan_dev(upper_dev)) {
4931                         struct net_device *br_dev = netdev_master_upper_dev_get(lower_dev);
4932
4933                         if (br_vlan_enabled(br_dev)) {
4934                                 br_vlan_get_proto(br_dev, &proto);
4935                                 if (proto == ETH_P_8021AD) {
4936                                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a port enslaved to an 802.1ad bridge");
4937                                         return -EOPNOTSUPP;
4938                                 }
4939                         }
4940                 }
4941                 if (is_vlan_dev(upper_dev) &&
4942                     ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
4943                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
4944                         return -EOPNOTSUPP;
4945                 }
4946                 if (is_vlan_dev(upper_dev) && mlxsw_sp_port->security) {
4947                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are not supported on a locked port");
4948                         return -EOPNOTSUPP;
4949                 }
4950                 break;
4951         case NETDEV_CHANGEUPPER:
4952                 upper_dev = info->upper_dev;
4953                 if (netif_is_bridge_master(upper_dev)) {
4954                         if (info->linking) {
4955                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4956                                                                 lower_dev,
4957                                                                 upper_dev,
4958                                                                 extack);
4959                         } else {
4960                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4961                                                            lower_dev,
4962                                                            upper_dev);
4963                                 if (!replay_deslavement)
4964                                         break;
4965                                 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4966                                                                       lower_dev);
4967                         }
4968                 } else if (netif_is_lag_master(upper_dev)) {
4969                         if (info->linking) {
4970                                 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4971                                                              upper_dev, extack);
4972                         } else {
4973                                 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4974                                 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4975                                                         upper_dev);
4976                                 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
4977                                                                       dev);
4978                         }
4979                 } else if (netif_is_ovs_master(upper_dev)) {
4980                         if (info->linking)
4981                                 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4982                         else
4983                                 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4984                 } else if (netif_is_macvlan(upper_dev)) {
4985                         if (!info->linking)
4986                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
4987                 } else if (is_vlan_dev(upper_dev)) {
4988                         struct net_device *br_dev;
4989
4990                         if (!netif_is_bridge_port(upper_dev))
4991                                 break;
4992                         if (info->linking)
4993                                 break;
4994                         br_dev = netdev_master_upper_dev_get(upper_dev);
4995                         mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev,
4996                                                    br_dev);
4997                 }
4998                 break;
4999         }
5000
5001         return err;
5002 }
5003
5004 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
5005                                                unsigned long event, void *ptr)
5006 {
5007         struct netdev_notifier_changelowerstate_info *info;
5008         struct mlxsw_sp_port *mlxsw_sp_port;
5009         int err;
5010
5011         mlxsw_sp_port = netdev_priv(dev);
5012         info = ptr;
5013
5014         switch (event) {
5015         case NETDEV_CHANGELOWERSTATE:
5016                 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
5017                         err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
5018                                                         info->lower_state_info);
5019                         if (err)
5020                                 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
5021                 }
5022                 break;
5023         }
5024
5025         return 0;
5026 }
5027
5028 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
5029                                          struct net_device *port_dev,
5030                                          unsigned long event, void *ptr,
5031                                          bool replay_deslavement)
5032 {
5033         switch (event) {
5034         case NETDEV_PRECHANGEUPPER:
5035         case NETDEV_CHANGEUPPER:
5036                 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
5037                                                            event, ptr,
5038                                                            replay_deslavement);
5039         case NETDEV_CHANGELOWERSTATE:
5040                 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
5041                                                            ptr);
5042         }
5043
5044         return 0;
5045 }
5046
5047 /* Called for LAG or its upper VLAN after the per-LAG-lower processing was done,
5048  * to do any per-LAG / per-LAG-upper processing.
5049  */
5050 static int mlxsw_sp_netdevice_post_lag_event(struct net_device *dev,
5051                                              unsigned long event,
5052                                              void *ptr)
5053 {
5054         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(dev);
5055         struct netdev_notifier_changeupper_info *info = ptr;
5056
5057         if (!mlxsw_sp)
5058                 return 0;
5059
5060         switch (event) {
5061         case NETDEV_CHANGEUPPER:
5062                 if (info->linking)
5063                         break;
5064                 if (netif_is_bridge_master(info->upper_dev))
5065                         mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp, dev);
5066                 break;
5067         }
5068         return 0;
5069 }
5070
5071 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
5072                                         unsigned long event, void *ptr)
5073 {
5074         struct net_device *dev;
5075         struct list_head *iter;
5076         int ret;
5077
5078         netdev_for_each_lower_dev(lag_dev, dev, iter) {
5079                 if (mlxsw_sp_port_dev_check(dev)) {
5080                         ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
5081                                                             ptr, false);
5082                         if (ret)
5083                                 return ret;
5084                 }
5085         }
5086
5087         return mlxsw_sp_netdevice_post_lag_event(lag_dev, event, ptr);
5088 }
5089
5090 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
5091                                               struct net_device *dev,
5092                                               unsigned long event, void *ptr,
5093                                               u16 vid, bool replay_deslavement)
5094 {
5095         struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
5096         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
5097         struct netdev_notifier_changeupper_info *info = ptr;
5098         struct netlink_ext_ack *extack;
5099         struct net_device *upper_dev;
5100         int err = 0;
5101
5102         extack = netdev_notifier_info_to_extack(&info->info);
5103
5104         switch (event) {
5105         case NETDEV_PRECHANGEUPPER:
5106                 upper_dev = info->upper_dev;
5107                 if (!netif_is_bridge_master(upper_dev) &&
5108                     !netif_is_macvlan(upper_dev) &&
5109                     !netif_is_l3_master(upper_dev)) {
5110                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5111                         return -EINVAL;
5112                 }
5113                 if (!info->linking)
5114                         break;
5115                 if (netif_is_bridge_master(upper_dev) &&
5116                     !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp, upper_dev) &&
5117                     mlxsw_sp_bridge_has_vxlan(upper_dev) &&
5118                     !mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5119                         return -EOPNOTSUPP;
5120                 if (netdev_has_any_upper_dev(upper_dev) &&
5121                     (!netif_is_bridge_master(upper_dev) ||
5122                      !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
5123                                                           upper_dev))) {
5124                         err = mlxsw_sp_netdevice_validate_uppers(mlxsw_sp,
5125                                                                  upper_dev,
5126                                                                  extack);
5127                         if (err)
5128                                 return err;
5129                 }
5130                 break;
5131         case NETDEV_CHANGEUPPER:
5132                 upper_dev = info->upper_dev;
5133                 if (netif_is_bridge_master(upper_dev)) {
5134                         if (info->linking) {
5135                                 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
5136                                                                 vlan_dev,
5137                                                                 upper_dev,
5138                                                                 extack);
5139                         } else {
5140                                 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
5141                                                            vlan_dev,
5142                                                            upper_dev);
5143                                 if (!replay_deslavement)
5144                                         break;
5145                                 mlxsw_sp_netdevice_deslavement_replay(mlxsw_sp,
5146                                                                       vlan_dev);
5147                         }
5148                 } else if (netif_is_macvlan(upper_dev)) {
5149                         if (!info->linking)
5150                                 mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5151                 }
5152                 break;
5153         }
5154
5155         return err;
5156 }
5157
5158 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
5159                                                   struct net_device *lag_dev,
5160                                                   unsigned long event,
5161                                                   void *ptr, u16 vid)
5162 {
5163         struct net_device *dev;
5164         struct list_head *iter;
5165         int ret;
5166
5167         netdev_for_each_lower_dev(lag_dev, dev, iter) {
5168                 if (mlxsw_sp_port_dev_check(dev)) {
5169                         ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
5170                                                                  event, ptr,
5171                                                                  vid, false);
5172                         if (ret)
5173                                 return ret;
5174                 }
5175         }
5176
5177         return mlxsw_sp_netdevice_post_lag_event(vlan_dev, event, ptr);
5178 }
5179
5180 static int mlxsw_sp_netdevice_bridge_vlan_event(struct mlxsw_sp *mlxsw_sp,
5181                                                 struct net_device *vlan_dev,
5182                                                 struct net_device *br_dev,
5183                                                 unsigned long event, void *ptr,
5184                                                 u16 vid, bool process_foreign)
5185 {
5186         struct netdev_notifier_changeupper_info *info = ptr;
5187         struct netlink_ext_ack *extack;
5188         struct net_device *upper_dev;
5189
5190         if (!process_foreign && !mlxsw_sp_lower_get(vlan_dev))
5191                 return 0;
5192
5193         extack = netdev_notifier_info_to_extack(&info->info);
5194
5195         switch (event) {
5196         case NETDEV_PRECHANGEUPPER:
5197                 upper_dev = info->upper_dev;
5198                 if (!netif_is_macvlan(upper_dev) &&
5199                     !netif_is_l3_master(upper_dev)) {
5200                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5201                         return -EOPNOTSUPP;
5202                 }
5203                 break;
5204         case NETDEV_CHANGEUPPER:
5205                 upper_dev = info->upper_dev;
5206                 if (info->linking)
5207                         break;
5208                 if (netif_is_macvlan(upper_dev))
5209                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5210                 break;
5211         }
5212
5213         return 0;
5214 }
5215
5216 static int mlxsw_sp_netdevice_vlan_event(struct mlxsw_sp *mlxsw_sp,
5217                                          struct net_device *vlan_dev,
5218                                          unsigned long event, void *ptr,
5219                                          bool process_foreign)
5220 {
5221         struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
5222         u16 vid = vlan_dev_vlan_id(vlan_dev);
5223
5224         if (mlxsw_sp_port_dev_check(real_dev))
5225                 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
5226                                                           event, ptr, vid,
5227                                                           true);
5228         else if (netif_is_lag_master(real_dev))
5229                 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
5230                                                               real_dev, event,
5231                                                               ptr, vid);
5232         else if (netif_is_bridge_master(real_dev))
5233                 return mlxsw_sp_netdevice_bridge_vlan_event(mlxsw_sp, vlan_dev,
5234                                                             real_dev, event,
5235                                                             ptr, vid,
5236                                                             process_foreign);
5237
5238         return 0;
5239 }
5240
5241 static int mlxsw_sp_netdevice_bridge_event(struct mlxsw_sp *mlxsw_sp,
5242                                            struct net_device *br_dev,
5243                                            unsigned long event, void *ptr,
5244                                            bool process_foreign)
5245 {
5246         struct netdev_notifier_changeupper_info *info = ptr;
5247         struct netlink_ext_ack *extack;
5248         struct net_device *upper_dev;
5249         u16 proto;
5250
5251         if (!process_foreign && !mlxsw_sp_lower_get(br_dev))
5252                 return 0;
5253
5254         extack = netdev_notifier_info_to_extack(&info->info);
5255
5256         switch (event) {
5257         case NETDEV_PRECHANGEUPPER:
5258                 upper_dev = info->upper_dev;
5259                 if (!is_vlan_dev(upper_dev) &&
5260                     !netif_is_macvlan(upper_dev) &&
5261                     !netif_is_l3_master(upper_dev)) {
5262                         NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5263                         return -EOPNOTSUPP;
5264                 }
5265                 if (!info->linking)
5266                         break;
5267                 if (br_vlan_enabled(br_dev)) {
5268                         br_vlan_get_proto(br_dev, &proto);
5269                         if (proto == ETH_P_8021AD) {
5270                                 NL_SET_ERR_MSG_MOD(extack, "Upper devices are not supported on top of an 802.1ad bridge");
5271                                 return -EOPNOTSUPP;
5272                         }
5273                 }
5274                 if (is_vlan_dev(upper_dev) &&
5275                     ntohs(vlan_dev_vlan_proto(upper_dev)) != ETH_P_8021Q) {
5276                         NL_SET_ERR_MSG_MOD(extack, "VLAN uppers are only supported with 802.1q VLAN protocol");
5277                         return -EOPNOTSUPP;
5278                 }
5279                 break;
5280         case NETDEV_CHANGEUPPER:
5281                 upper_dev = info->upper_dev;
5282                 if (info->linking)
5283                         break;
5284                 if (is_vlan_dev(upper_dev))
5285                         mlxsw_sp_rif_destroy_by_dev(mlxsw_sp, upper_dev);
5286                 if (netif_is_macvlan(upper_dev))
5287                         mlxsw_sp_rif_macvlan_del(mlxsw_sp, upper_dev);
5288                 break;
5289         }
5290
5291         return 0;
5292 }
5293
5294 static int mlxsw_sp_netdevice_macvlan_event(struct net_device *macvlan_dev,
5295                                             unsigned long event, void *ptr)
5296 {
5297         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_lower_get(macvlan_dev);
5298         struct netdev_notifier_changeupper_info *info = ptr;
5299         struct netlink_ext_ack *extack;
5300         struct net_device *upper_dev;
5301
5302         if (!mlxsw_sp || event != NETDEV_PRECHANGEUPPER)
5303                 return 0;
5304
5305         extack = netdev_notifier_info_to_extack(&info->info);
5306         upper_dev = info->upper_dev;
5307
5308         if (!netif_is_l3_master(upper_dev)) {
5309                 NL_SET_ERR_MSG_MOD(extack, "Unknown upper device type");
5310                 return -EOPNOTSUPP;
5311         }
5312
5313         return 0;
5314 }
5315
5316 static int mlxsw_sp_netdevice_vxlan_event(struct mlxsw_sp *mlxsw_sp,
5317                                           struct net_device *dev,
5318                                           unsigned long event, void *ptr)
5319 {
5320         struct netdev_notifier_changeupper_info *cu_info;
5321         struct netdev_notifier_info *info = ptr;
5322         struct netlink_ext_ack *extack;
5323         struct net_device *upper_dev;
5324
5325         extack = netdev_notifier_info_to_extack(info);
5326
5327         switch (event) {
5328         case NETDEV_CHANGEUPPER:
5329                 cu_info = container_of(info,
5330                                        struct netdev_notifier_changeupper_info,
5331                                        info);
5332                 upper_dev = cu_info->upper_dev;
5333                 if (!netif_is_bridge_master(upper_dev))
5334                         return 0;
5335                 if (!mlxsw_sp_lower_get(upper_dev))
5336                         return 0;
5337                 if (!mlxsw_sp_bridge_vxlan_is_valid(upper_dev, extack))
5338                         return -EOPNOTSUPP;
5339                 if (cu_info->linking) {
5340                         if (!netif_running(dev))
5341                                 return 0;
5342                         /* When the bridge is VLAN-aware, the VNI of the VxLAN
5343                          * device needs to be mapped to a VLAN, but at this
5344                          * point no VLANs are configured on the VxLAN device
5345                          */
5346                         if (br_vlan_enabled(upper_dev))
5347                                 return 0;
5348                         return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev,
5349                                                           dev, 0, extack);
5350                 } else {
5351                         /* VLANs were already flushed, which triggered the
5352                          * necessary cleanup
5353                          */
5354                         if (br_vlan_enabled(upper_dev))
5355                                 return 0;
5356                         mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5357                 }
5358                 break;
5359         case NETDEV_PRE_UP:
5360                 upper_dev = netdev_master_upper_dev_get(dev);
5361                 if (!upper_dev)
5362                         return 0;
5363                 if (!netif_is_bridge_master(upper_dev))
5364                         return 0;
5365                 if (!mlxsw_sp_lower_get(upper_dev))
5366                         return 0;
5367                 return mlxsw_sp_bridge_vxlan_join(mlxsw_sp, upper_dev, dev, 0,
5368                                                   extack);
5369         case NETDEV_DOWN:
5370                 upper_dev = netdev_master_upper_dev_get(dev);
5371                 if (!upper_dev)
5372                         return 0;
5373                 if (!netif_is_bridge_master(upper_dev))
5374                         return 0;
5375                 if (!mlxsw_sp_lower_get(upper_dev))
5376                         return 0;
5377                 mlxsw_sp_bridge_vxlan_leave(mlxsw_sp, dev);
5378                 break;
5379         }
5380
5381         return 0;
5382 }
5383
5384 static int __mlxsw_sp_netdevice_event(struct mlxsw_sp *mlxsw_sp,
5385                                       unsigned long event, void *ptr,
5386                                       bool process_foreign)
5387 {
5388         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
5389         struct mlxsw_sp_span_entry *span_entry;
5390         int err = 0;
5391
5392         if (event == NETDEV_UNREGISTER) {
5393                 span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, dev);
5394                 if (span_entry)
5395                         mlxsw_sp_span_entry_invalidate(mlxsw_sp, span_entry);
5396         }
5397
5398         if (netif_is_vxlan(dev))
5399                 err = mlxsw_sp_netdevice_vxlan_event(mlxsw_sp, dev, event, ptr);
5400         else if (mlxsw_sp_port_dev_check(dev))
5401                 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr, true);
5402         else if (netif_is_lag_master(dev))
5403                 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
5404         else if (is_vlan_dev(dev))
5405                 err = mlxsw_sp_netdevice_vlan_event(mlxsw_sp, dev, event, ptr,
5406                                                     process_foreign);
5407         else if (netif_is_bridge_master(dev))
5408                 err = mlxsw_sp_netdevice_bridge_event(mlxsw_sp, dev, event, ptr,
5409                                                       process_foreign);
5410         else if (netif_is_macvlan(dev))
5411                 err = mlxsw_sp_netdevice_macvlan_event(dev, event, ptr);
5412
5413         return err;
5414 }
5415
5416 static int mlxsw_sp_netdevice_event(struct notifier_block *nb,
5417                                     unsigned long event, void *ptr)
5418 {
5419         struct mlxsw_sp *mlxsw_sp;
5420         int err;
5421
5422         mlxsw_sp = container_of(nb, struct mlxsw_sp, netdevice_nb);
5423         mlxsw_sp_span_respin(mlxsw_sp);
5424         err = __mlxsw_sp_netdevice_event(mlxsw_sp, event, ptr, false);
5425
5426         return notifier_from_errno(err);
5427 }
5428
5429 static const struct pci_device_id mlxsw_sp1_pci_id_table[] = {
5430         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
5431         {0, },
5432 };
5433
5434 static struct pci_driver mlxsw_sp1_pci_driver = {
5435         .name = mlxsw_sp1_driver_name,
5436         .id_table = mlxsw_sp1_pci_id_table,
5437 };
5438
5439 static const struct pci_device_id mlxsw_sp2_pci_id_table[] = {
5440         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM2), 0},
5441         {0, },
5442 };
5443
5444 static struct pci_driver mlxsw_sp2_pci_driver = {
5445         .name = mlxsw_sp2_driver_name,
5446         .id_table = mlxsw_sp2_pci_id_table,
5447 };
5448
5449 static const struct pci_device_id mlxsw_sp3_pci_id_table[] = {
5450         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM3), 0},
5451         {0, },
5452 };
5453
5454 static struct pci_driver mlxsw_sp3_pci_driver = {
5455         .name = mlxsw_sp3_driver_name,
5456         .id_table = mlxsw_sp3_pci_id_table,
5457 };
5458
5459 static const struct pci_device_id mlxsw_sp4_pci_id_table[] = {
5460         {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM4), 0},
5461         {0, },
5462 };
5463
5464 static struct pci_driver mlxsw_sp4_pci_driver = {
5465         .name = mlxsw_sp4_driver_name,
5466         .id_table = mlxsw_sp4_pci_id_table,
5467 };
5468
5469 static int __init mlxsw_sp_module_init(void)
5470 {
5471         int err;
5472
5473         err = mlxsw_core_driver_register(&mlxsw_sp1_driver);
5474         if (err)
5475                 return err;
5476
5477         err = mlxsw_core_driver_register(&mlxsw_sp2_driver);
5478         if (err)
5479                 goto err_sp2_core_driver_register;
5480
5481         err = mlxsw_core_driver_register(&mlxsw_sp3_driver);
5482         if (err)
5483                 goto err_sp3_core_driver_register;
5484
5485         err = mlxsw_core_driver_register(&mlxsw_sp4_driver);
5486         if (err)
5487                 goto err_sp4_core_driver_register;
5488
5489         err = mlxsw_pci_driver_register(&mlxsw_sp1_pci_driver);
5490         if (err)
5491                 goto err_sp1_pci_driver_register;
5492
5493         err = mlxsw_pci_driver_register(&mlxsw_sp2_pci_driver);
5494         if (err)
5495                 goto err_sp2_pci_driver_register;
5496
5497         err = mlxsw_pci_driver_register(&mlxsw_sp3_pci_driver);
5498         if (err)
5499                 goto err_sp3_pci_driver_register;
5500
5501         err = mlxsw_pci_driver_register(&mlxsw_sp4_pci_driver);
5502         if (err)
5503                 goto err_sp4_pci_driver_register;
5504
5505         return 0;
5506
5507 err_sp4_pci_driver_register:
5508         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5509 err_sp3_pci_driver_register:
5510         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5511 err_sp2_pci_driver_register:
5512         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5513 err_sp1_pci_driver_register:
5514         mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5515 err_sp4_core_driver_register:
5516         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5517 err_sp3_core_driver_register:
5518         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5519 err_sp2_core_driver_register:
5520         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5521         return err;
5522 }
5523
5524 static void __exit mlxsw_sp_module_exit(void)
5525 {
5526         mlxsw_pci_driver_unregister(&mlxsw_sp4_pci_driver);
5527         mlxsw_pci_driver_unregister(&mlxsw_sp3_pci_driver);
5528         mlxsw_pci_driver_unregister(&mlxsw_sp2_pci_driver);
5529         mlxsw_pci_driver_unregister(&mlxsw_sp1_pci_driver);
5530         mlxsw_core_driver_unregister(&mlxsw_sp4_driver);
5531         mlxsw_core_driver_unregister(&mlxsw_sp3_driver);
5532         mlxsw_core_driver_unregister(&mlxsw_sp2_driver);
5533         mlxsw_core_driver_unregister(&mlxsw_sp1_driver);
5534 }
5535
5536 module_init(mlxsw_sp_module_init);
5537 module_exit(mlxsw_sp_module_exit);
5538
5539 MODULE_LICENSE("Dual BSD/GPL");
5540 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
5541 MODULE_DESCRIPTION("Mellanox Spectrum driver");
5542 MODULE_DEVICE_TABLE(pci, mlxsw_sp1_pci_id_table);
5543 MODULE_DEVICE_TABLE(pci, mlxsw_sp2_pci_id_table);
5544 MODULE_DEVICE_TABLE(pci, mlxsw_sp3_pci_id_table);
5545 MODULE_DEVICE_TABLE(pci, mlxsw_sp4_pci_id_table);
5546 /*(DEBLOBBED)*/