2 * drivers/net/ethernet/mellanox/mlxsw/spectrum.c
3 * Copyright (c) 2015-2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015-2017 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/types.h>
40 #include <linux/pci.h>
41 #include <linux/netdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/ethtool.h>
44 #include <linux/slab.h>
45 #include <linux/device.h>
46 #include <linux/skbuff.h>
47 #include <linux/if_vlan.h>
48 #include <linux/if_bridge.h>
49 #include <linux/workqueue.h>
50 #include <linux/jiffies.h>
51 #include <linux/bitops.h>
52 #include <linux/list.h>
53 #include <linux/notifier.h>
54 #include <linux/dcbnl.h>
55 #include <linux/inetdevice.h>
56 #include <net/switchdev.h>
57 #include <net/pkt_cls.h>
58 #include <net/tc_act/tc_mirred.h>
59 #include <net/netevent.h>
60 #include <net/tc_act/tc_sample.h>
61 #include <net/addrconf.h>
70 #include "spectrum_cnt.h"
71 #include "spectrum_dpipe.h"
72 #include "../mlxfw/mlxfw.h"
74 #define MLXSW_FWREV_MAJOR 13
75 #define MLXSW_FWREV_MINOR 1420
76 #define MLXSW_FWREV_SUBMINOR 122
78 static const struct mlxsw_fw_rev mlxsw_sp_supported_fw_rev = {
79 .major = MLXSW_FWREV_MAJOR,
80 .minor = MLXSW_FWREV_MINOR,
81 .subminor = MLXSW_FWREV_SUBMINOR
84 #define MLXSW_SP_FW_FILENAME \
87 static const char mlxsw_sp_driver_name[] = "mlxsw_spectrum";
88 static const char mlxsw_sp_driver_version[] = "1.0";
94 MLXSW_ITEM32(tx, hdr, version, 0x00, 28, 4);
97 * Packet control type.
98 * 0 - Ethernet control (e.g. EMADs, LACP)
101 MLXSW_ITEM32(tx, hdr, ctl, 0x00, 26, 2);
104 * Packet protocol type. Must be set to 1 (Ethernet).
106 MLXSW_ITEM32(tx, hdr, proto, 0x00, 21, 3);
108 /* tx_hdr_rx_is_router
109 * Packet is sent from the router. Valid for data packets only.
111 MLXSW_ITEM32(tx, hdr, rx_is_router, 0x00, 19, 1);
114 * Indicates if the 'fid' field is valid and should be used for
115 * forwarding lookup. Valid for data packets only.
117 MLXSW_ITEM32(tx, hdr, fid_valid, 0x00, 16, 1);
120 * Switch partition ID. Must be set to 0.
122 MLXSW_ITEM32(tx, hdr, swid, 0x00, 12, 3);
124 /* tx_hdr_control_tclass
125 * Indicates if the packet should use the control TClass and not one
126 * of the data TClasses.
128 MLXSW_ITEM32(tx, hdr, control_tclass, 0x00, 6, 1);
131 * Egress TClass to be used on the egress device on the egress port.
133 MLXSW_ITEM32(tx, hdr, etclass, 0x00, 0, 4);
136 * Destination local port for unicast packets.
137 * Destination multicast ID for multicast packets.
139 * Control packets are directed to a specific egress port, while data
140 * packets are transmitted through the CPU port (0) into the switch partition,
141 * where forwarding rules are applied.
143 MLXSW_ITEM32(tx, hdr, port_mid, 0x04, 16, 16);
146 * Forwarding ID used for L2 forwarding lookup. Valid only if 'fid_valid' is
147 * set, otherwise calculated based on the packet's VID using VID to FID mapping.
148 * Valid for data packets only.
150 MLXSW_ITEM32(tx, hdr, fid, 0x08, 0, 16);
154 * 6 - Control packets
156 MLXSW_ITEM32(tx, hdr, type, 0x0C, 0, 4);
158 struct mlxsw_sp_mlxfw_dev {
159 struct mlxfw_dev mlxfw_dev;
160 struct mlxsw_sp *mlxsw_sp;
163 static int mlxsw_sp_component_query(struct mlxfw_dev *mlxfw_dev,
164 u16 component_index, u32 *p_max_size,
165 u8 *p_align_bits, u16 *p_max_write_size)
167 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
168 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
169 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
170 char mcqi_pl[MLXSW_REG_MCQI_LEN];
173 mlxsw_reg_mcqi_pack(mcqi_pl, component_index);
174 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcqi), mcqi_pl);
177 mlxsw_reg_mcqi_unpack(mcqi_pl, p_max_size, p_align_bits,
180 *p_align_bits = max_t(u8, *p_align_bits, 2);
181 *p_max_write_size = min_t(u16, *p_max_write_size,
182 MLXSW_REG_MCDA_MAX_DATA_LEN);
186 static int mlxsw_sp_fsm_lock(struct mlxfw_dev *mlxfw_dev, u32 *fwhandle)
188 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
189 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
190 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
191 char mcc_pl[MLXSW_REG_MCC_LEN];
195 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, 0, 0);
196 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
200 mlxsw_reg_mcc_unpack(mcc_pl, fwhandle, NULL, &control_state);
201 if (control_state != MLXFW_FSM_STATE_IDLE)
204 mlxsw_reg_mcc_pack(mcc_pl,
205 MLXSW_REG_MCC_INSTRUCTION_LOCK_UPDATE_HANDLE,
207 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
210 static int mlxsw_sp_fsm_component_update(struct mlxfw_dev *mlxfw_dev,
211 u32 fwhandle, u16 component_index,
214 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
215 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
217 char mcc_pl[MLXSW_REG_MCC_LEN];
219 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_UPDATE_COMPONENT,
220 component_index, fwhandle, component_size);
221 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
224 static int mlxsw_sp_fsm_block_download(struct mlxfw_dev *mlxfw_dev,
225 u32 fwhandle, u8 *data, u16 size,
228 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
229 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
230 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
231 char mcda_pl[MLXSW_REG_MCDA_LEN];
233 mlxsw_reg_mcda_pack(mcda_pl, fwhandle, offset, size, data);
234 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcda), mcda_pl);
237 static int mlxsw_sp_fsm_component_verify(struct mlxfw_dev *mlxfw_dev,
238 u32 fwhandle, u16 component_index)
240 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
241 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
242 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
243 char mcc_pl[MLXSW_REG_MCC_LEN];
245 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_VERIFY_COMPONENT,
246 component_index, fwhandle, 0);
247 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
250 static int mlxsw_sp_fsm_activate(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
252 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
253 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
254 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
255 char mcc_pl[MLXSW_REG_MCC_LEN];
257 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_ACTIVATE, 0,
259 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
262 static int mlxsw_sp_fsm_query_state(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
263 enum mlxfw_fsm_state *fsm_state,
264 enum mlxfw_fsm_state_err *fsm_state_err)
266 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
267 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
268 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
269 char mcc_pl[MLXSW_REG_MCC_LEN];
274 mlxsw_reg_mcc_pack(mcc_pl, 0, 0, fwhandle, 0);
275 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
279 mlxsw_reg_mcc_unpack(mcc_pl, NULL, &error_code, &control_state);
280 *fsm_state = control_state;
281 *fsm_state_err = min_t(enum mlxfw_fsm_state_err, error_code,
282 MLXFW_FSM_STATE_ERR_MAX);
286 static void mlxsw_sp_fsm_cancel(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
288 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
289 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
290 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
291 char mcc_pl[MLXSW_REG_MCC_LEN];
293 mlxsw_reg_mcc_pack(mcc_pl, MLXSW_REG_MCC_INSTRUCTION_CANCEL, 0,
295 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
298 static void mlxsw_sp_fsm_release(struct mlxfw_dev *mlxfw_dev, u32 fwhandle)
300 struct mlxsw_sp_mlxfw_dev *mlxsw_sp_mlxfw_dev =
301 container_of(mlxfw_dev, struct mlxsw_sp_mlxfw_dev, mlxfw_dev);
302 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_mlxfw_dev->mlxsw_sp;
303 char mcc_pl[MLXSW_REG_MCC_LEN];
305 mlxsw_reg_mcc_pack(mcc_pl,
306 MLXSW_REG_MCC_INSTRUCTION_RELEASE_UPDATE_HANDLE, 0,
308 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mcc), mcc_pl);
311 static const struct mlxfw_dev_ops mlxsw_sp_mlxfw_dev_ops = {
312 .component_query = mlxsw_sp_component_query,
313 .fsm_lock = mlxsw_sp_fsm_lock,
314 .fsm_component_update = mlxsw_sp_fsm_component_update,
315 .fsm_block_download = mlxsw_sp_fsm_block_download,
316 .fsm_component_verify = mlxsw_sp_fsm_component_verify,
317 .fsm_activate = mlxsw_sp_fsm_activate,
318 .fsm_query_state = mlxsw_sp_fsm_query_state,
319 .fsm_cancel = mlxsw_sp_fsm_cancel,
320 .fsm_release = mlxsw_sp_fsm_release
323 static int mlxsw_sp_firmware_flash(struct mlxsw_sp *mlxsw_sp,
324 const struct firmware *firmware)
326 struct mlxsw_sp_mlxfw_dev mlxsw_sp_mlxfw_dev = {
328 .ops = &mlxsw_sp_mlxfw_dev_ops,
329 .psid = mlxsw_sp->bus_info->psid,
330 .psid_size = strlen(mlxsw_sp->bus_info->psid),
336 mlxsw_core_fw_flash_start(mlxsw_sp->core);
337 err = mlxfw_firmware_flash(&mlxsw_sp_mlxfw_dev.mlxfw_dev, firmware);
338 mlxsw_core_fw_flash_end(mlxsw_sp->core);
343 static bool mlxsw_sp_fw_rev_ge(const struct mlxsw_fw_rev *a,
344 const struct mlxsw_fw_rev *b)
346 if (a->major != b->major)
347 return a->major > b->major;
348 if (a->minor != b->minor)
349 return a->minor > b->minor;
350 return a->subminor >= b->subminor;
353 static int mlxsw_sp_fw_rev_validate(struct mlxsw_sp *mlxsw_sp)
355 const struct mlxsw_fw_rev *rev = &mlxsw_sp->bus_info->fw_rev;
356 const struct firmware *firmware;
359 if (mlxsw_sp_fw_rev_ge(rev, &mlxsw_sp_supported_fw_rev))
362 dev_info(mlxsw_sp->bus_info->dev, "The firmware version %d.%d.%d out of data\n",
363 rev->major, rev->minor, rev->subminor);
364 dev_info(mlxsw_sp->bus_info->dev, "Upgrading firmware using file %s\n",
365 MLXSW_SP_FW_FILENAME);
367 err = reject_firmware_direct(&firmware, MLXSW_SP_FW_FILENAME,
368 mlxsw_sp->bus_info->dev);
370 dev_err(mlxsw_sp->bus_info->dev, "Could not request firmware file %s\n",
371 MLXSW_SP_FW_FILENAME);
375 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
376 release_firmware(firmware);
380 int mlxsw_sp_flow_counter_get(struct mlxsw_sp *mlxsw_sp,
381 unsigned int counter_index, u64 *packets,
384 char mgpc_pl[MLXSW_REG_MGPC_LEN];
387 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_NOP,
388 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
389 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
393 *packets = mlxsw_reg_mgpc_packet_counter_get(mgpc_pl);
395 *bytes = mlxsw_reg_mgpc_byte_counter_get(mgpc_pl);
399 static int mlxsw_sp_flow_counter_clear(struct mlxsw_sp *mlxsw_sp,
400 unsigned int counter_index)
402 char mgpc_pl[MLXSW_REG_MGPC_LEN];
404 mlxsw_reg_mgpc_pack(mgpc_pl, counter_index, MLXSW_REG_MGPC_OPCODE_CLEAR,
405 MLXSW_REG_FLOW_COUNTER_SET_TYPE_PACKETS_BYTES);
406 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mgpc), mgpc_pl);
409 int mlxsw_sp_flow_counter_alloc(struct mlxsw_sp *mlxsw_sp,
410 unsigned int *p_counter_index)
414 err = mlxsw_sp_counter_alloc(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
418 err = mlxsw_sp_flow_counter_clear(mlxsw_sp, *p_counter_index);
420 goto err_counter_clear;
424 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
429 void mlxsw_sp_flow_counter_free(struct mlxsw_sp *mlxsw_sp,
430 unsigned int counter_index)
432 mlxsw_sp_counter_free(mlxsw_sp, MLXSW_SP_COUNTER_SUB_POOL_FLOW,
436 static void mlxsw_sp_txhdr_construct(struct sk_buff *skb,
437 const struct mlxsw_tx_info *tx_info)
439 char *txhdr = skb_push(skb, MLXSW_TXHDR_LEN);
441 memset(txhdr, 0, MLXSW_TXHDR_LEN);
443 mlxsw_tx_hdr_version_set(txhdr, MLXSW_TXHDR_VERSION_1);
444 mlxsw_tx_hdr_ctl_set(txhdr, MLXSW_TXHDR_ETH_CTL);
445 mlxsw_tx_hdr_proto_set(txhdr, MLXSW_TXHDR_PROTO_ETH);
446 mlxsw_tx_hdr_swid_set(txhdr, 0);
447 mlxsw_tx_hdr_control_tclass_set(txhdr, 1);
448 mlxsw_tx_hdr_port_mid_set(txhdr, tx_info->local_port);
449 mlxsw_tx_hdr_type_set(txhdr, MLXSW_TXHDR_TYPE_CONTROL);
452 int mlxsw_sp_port_vid_stp_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
455 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
456 enum mlxsw_reg_spms_state spms_state;
461 case BR_STATE_FORWARDING:
462 spms_state = MLXSW_REG_SPMS_STATE_FORWARDING;
464 case BR_STATE_LEARNING:
465 spms_state = MLXSW_REG_SPMS_STATE_LEARNING;
467 case BR_STATE_LISTENING: /* fall-through */
468 case BR_STATE_DISABLED: /* fall-through */
469 case BR_STATE_BLOCKING:
470 spms_state = MLXSW_REG_SPMS_STATE_DISCARDING;
476 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
479 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
480 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
482 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
487 static int mlxsw_sp_base_mac_get(struct mlxsw_sp *mlxsw_sp)
489 char spad_pl[MLXSW_REG_SPAD_LEN] = {0};
492 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(spad), spad_pl);
495 mlxsw_reg_spad_base_mac_memcpy_from(spad_pl, mlxsw_sp->base_mac);
499 static int mlxsw_sp_span_init(struct mlxsw_sp *mlxsw_sp)
503 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_SPAN))
506 mlxsw_sp->span.entries_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
508 mlxsw_sp->span.entries = kcalloc(mlxsw_sp->span.entries_count,
509 sizeof(struct mlxsw_sp_span_entry),
511 if (!mlxsw_sp->span.entries)
514 for (i = 0; i < mlxsw_sp->span.entries_count; i++)
515 INIT_LIST_HEAD(&mlxsw_sp->span.entries[i].bound_ports_list);
520 static void mlxsw_sp_span_fini(struct mlxsw_sp *mlxsw_sp)
524 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
525 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
527 WARN_ON_ONCE(!list_empty(&curr->bound_ports_list));
529 kfree(mlxsw_sp->span.entries);
532 static struct mlxsw_sp_span_entry *
533 mlxsw_sp_span_entry_create(struct mlxsw_sp_port *port)
535 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
536 struct mlxsw_sp_span_entry *span_entry;
537 char mpat_pl[MLXSW_REG_MPAT_LEN];
538 u8 local_port = port->local_port;
543 /* find a free entry to use */
545 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
546 if (!mlxsw_sp->span.entries[i].used) {
548 span_entry = &mlxsw_sp->span.entries[i];
555 /* create a new port analayzer entry for local_port */
556 mlxsw_reg_mpat_pack(mpat_pl, index, local_port, true);
557 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
561 span_entry->used = true;
562 span_entry->id = index;
563 span_entry->ref_count = 1;
564 span_entry->local_port = local_port;
568 static void mlxsw_sp_span_entry_destroy(struct mlxsw_sp *mlxsw_sp,
569 struct mlxsw_sp_span_entry *span_entry)
571 u8 local_port = span_entry->local_port;
572 char mpat_pl[MLXSW_REG_MPAT_LEN];
573 int pa_id = span_entry->id;
575 mlxsw_reg_mpat_pack(mpat_pl, pa_id, local_port, false);
576 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpat), mpat_pl);
577 span_entry->used = false;
580 static struct mlxsw_sp_span_entry *
581 mlxsw_sp_span_entry_find(struct mlxsw_sp *mlxsw_sp, u8 local_port)
585 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
586 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
588 if (curr->used && curr->local_port == local_port)
594 static struct mlxsw_sp_span_entry
595 *mlxsw_sp_span_entry_get(struct mlxsw_sp_port *port)
597 struct mlxsw_sp_span_entry *span_entry;
599 span_entry = mlxsw_sp_span_entry_find(port->mlxsw_sp,
602 /* Already exists, just take a reference */
603 span_entry->ref_count++;
607 return mlxsw_sp_span_entry_create(port);
610 static int mlxsw_sp_span_entry_put(struct mlxsw_sp *mlxsw_sp,
611 struct mlxsw_sp_span_entry *span_entry)
613 WARN_ON(!span_entry->ref_count);
614 if (--span_entry->ref_count == 0)
615 mlxsw_sp_span_entry_destroy(mlxsw_sp, span_entry);
619 static bool mlxsw_sp_span_is_egress_mirror(struct mlxsw_sp_port *port)
621 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
622 struct mlxsw_sp_span_inspected_port *p;
625 for (i = 0; i < mlxsw_sp->span.entries_count; i++) {
626 struct mlxsw_sp_span_entry *curr = &mlxsw_sp->span.entries[i];
628 list_for_each_entry(p, &curr->bound_ports_list, list)
629 if (p->local_port == port->local_port &&
630 p->type == MLXSW_SP_SPAN_EGRESS)
637 static int mlxsw_sp_span_mtu_to_buffsize(const struct mlxsw_sp *mlxsw_sp,
640 return mlxsw_sp_bytes_cells(mlxsw_sp, mtu * 5 / 2) + 1;
643 static int mlxsw_sp_span_port_mtu_update(struct mlxsw_sp_port *port, u16 mtu)
645 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
646 char sbib_pl[MLXSW_REG_SBIB_LEN];
649 /* If port is egress mirrored, the shared buffer size should be
650 * updated according to the mtu value
652 if (mlxsw_sp_span_is_egress_mirror(port)) {
653 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp, mtu);
655 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
656 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
658 netdev_err(port->dev, "Could not update shared buffer for mirroring\n");
666 static struct mlxsw_sp_span_inspected_port *
667 mlxsw_sp_span_entry_bound_port_find(struct mlxsw_sp_port *port,
668 struct mlxsw_sp_span_entry *span_entry)
670 struct mlxsw_sp_span_inspected_port *p;
672 list_for_each_entry(p, &span_entry->bound_ports_list, list)
673 if (port->local_port == p->local_port)
679 mlxsw_sp_span_inspected_port_bind(struct mlxsw_sp_port *port,
680 struct mlxsw_sp_span_entry *span_entry,
681 enum mlxsw_sp_span_type type)
683 struct mlxsw_sp_span_inspected_port *inspected_port;
684 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
685 char mpar_pl[MLXSW_REG_MPAR_LEN];
686 char sbib_pl[MLXSW_REG_SBIB_LEN];
687 int pa_id = span_entry->id;
690 /* if it is an egress SPAN, bind a shared buffer to it */
691 if (type == MLXSW_SP_SPAN_EGRESS) {
692 u32 buffsize = mlxsw_sp_span_mtu_to_buffsize(mlxsw_sp,
695 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, buffsize);
696 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
698 netdev_err(port->dev, "Could not create shared buffer for mirroring\n");
703 /* bind the port to the SPAN entry */
704 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
705 (enum mlxsw_reg_mpar_i_e) type, true, pa_id);
706 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
708 goto err_mpar_reg_write;
710 inspected_port = kzalloc(sizeof(*inspected_port), GFP_KERNEL);
711 if (!inspected_port) {
713 goto err_inspected_port_alloc;
715 inspected_port->local_port = port->local_port;
716 inspected_port->type = type;
717 list_add_tail(&inspected_port->list, &span_entry->bound_ports_list);
722 err_inspected_port_alloc:
723 if (type == MLXSW_SP_SPAN_EGRESS) {
724 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
725 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
731 mlxsw_sp_span_inspected_port_unbind(struct mlxsw_sp_port *port,
732 struct mlxsw_sp_span_entry *span_entry,
733 enum mlxsw_sp_span_type type)
735 struct mlxsw_sp_span_inspected_port *inspected_port;
736 struct mlxsw_sp *mlxsw_sp = port->mlxsw_sp;
737 char mpar_pl[MLXSW_REG_MPAR_LEN];
738 char sbib_pl[MLXSW_REG_SBIB_LEN];
739 int pa_id = span_entry->id;
741 inspected_port = mlxsw_sp_span_entry_bound_port_find(port, span_entry);
745 /* remove the inspected port */
746 mlxsw_reg_mpar_pack(mpar_pl, port->local_port,
747 (enum mlxsw_reg_mpar_i_e) type, false, pa_id);
748 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpar), mpar_pl);
750 /* remove the SBIB buffer if it was egress SPAN */
751 if (type == MLXSW_SP_SPAN_EGRESS) {
752 mlxsw_reg_sbib_pack(sbib_pl, port->local_port, 0);
753 mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sbib), sbib_pl);
756 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
758 list_del(&inspected_port->list);
759 kfree(inspected_port);
762 static int mlxsw_sp_span_mirror_add(struct mlxsw_sp_port *from,
763 struct mlxsw_sp_port *to,
764 enum mlxsw_sp_span_type type)
766 struct mlxsw_sp *mlxsw_sp = from->mlxsw_sp;
767 struct mlxsw_sp_span_entry *span_entry;
770 span_entry = mlxsw_sp_span_entry_get(to);
774 netdev_dbg(from->dev, "Adding inspected port to SPAN entry %d\n",
777 err = mlxsw_sp_span_inspected_port_bind(from, span_entry, type);
784 mlxsw_sp_span_entry_put(mlxsw_sp, span_entry);
788 static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
790 enum mlxsw_sp_span_type type)
792 struct mlxsw_sp_span_entry *span_entry;
794 span_entry = mlxsw_sp_span_entry_find(from->mlxsw_sp,
797 netdev_err(from->dev, "no span entry found\n");
801 netdev_dbg(from->dev, "removing inspected port from SPAN entry %d\n",
803 mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
806 static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
807 bool enable, u32 rate)
809 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
810 char mpsc_pl[MLXSW_REG_MPSC_LEN];
812 mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
813 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
816 static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
819 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
820 char paos_pl[MLXSW_REG_PAOS_LEN];
822 mlxsw_reg_paos_pack(paos_pl, mlxsw_sp_port->local_port,
823 is_up ? MLXSW_PORT_ADMIN_STATUS_UP :
824 MLXSW_PORT_ADMIN_STATUS_DOWN);
825 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(paos), paos_pl);
828 static int mlxsw_sp_port_dev_addr_set(struct mlxsw_sp_port *mlxsw_sp_port,
831 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
832 char ppad_pl[MLXSW_REG_PPAD_LEN];
834 mlxsw_reg_ppad_pack(ppad_pl, true, mlxsw_sp_port->local_port);
835 mlxsw_reg_ppad_mac_memcpy_to(ppad_pl, addr);
836 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppad), ppad_pl);
839 static int mlxsw_sp_port_dev_addr_init(struct mlxsw_sp_port *mlxsw_sp_port)
841 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
842 unsigned char *addr = mlxsw_sp_port->dev->dev_addr;
844 ether_addr_copy(addr, mlxsw_sp->base_mac);
845 addr[ETH_ALEN - 1] += mlxsw_sp_port->local_port;
846 return mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr);
849 static int mlxsw_sp_port_mtu_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 mtu)
851 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
852 char pmtu_pl[MLXSW_REG_PMTU_LEN];
856 mtu += MLXSW_TXHDR_LEN + ETH_HLEN;
857 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, 0);
858 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
861 max_mtu = mlxsw_reg_pmtu_max_mtu_get(pmtu_pl);
866 mlxsw_reg_pmtu_pack(pmtu_pl, mlxsw_sp_port->local_port, mtu);
867 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmtu), pmtu_pl);
870 static int mlxsw_sp_port_swid_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 swid)
872 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
873 char pspa_pl[MLXSW_REG_PSPA_LEN];
875 mlxsw_reg_pspa_pack(pspa_pl, swid, mlxsw_sp_port->local_port);
876 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pspa), pspa_pl);
879 int mlxsw_sp_port_vp_mode_set(struct mlxsw_sp_port *mlxsw_sp_port, bool enable)
881 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
882 char svpe_pl[MLXSW_REG_SVPE_LEN];
884 mlxsw_reg_svpe_pack(svpe_pl, mlxsw_sp_port->local_port, enable);
885 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(svpe), svpe_pl);
888 int mlxsw_sp_port_vid_learning_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid,
891 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
895 spvmlr_pl = kmalloc(MLXSW_REG_SPVMLR_LEN, GFP_KERNEL);
898 mlxsw_reg_spvmlr_pack(spvmlr_pl, mlxsw_sp_port->local_port, vid, vid,
900 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvmlr), spvmlr_pl);
905 static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
908 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
909 char spvid_pl[MLXSW_REG_SPVID_LEN];
911 mlxsw_reg_spvid_pack(spvid_pl, mlxsw_sp_port->local_port, vid);
912 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
915 static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
918 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
919 char spaft_pl[MLXSW_REG_SPAFT_LEN];
921 mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
922 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
925 int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
930 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
934 err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
937 err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, true);
939 goto err_port_allow_untagged_set;
942 mlxsw_sp_port->pvid = vid;
945 err_port_allow_untagged_set:
946 __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
951 mlxsw_sp_port_system_port_mapping_set(struct mlxsw_sp_port *mlxsw_sp_port)
953 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
954 char sspr_pl[MLXSW_REG_SSPR_LEN];
956 mlxsw_reg_sspr_pack(sspr_pl, mlxsw_sp_port->local_port);
957 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sspr), sspr_pl);
960 static int mlxsw_sp_port_module_info_get(struct mlxsw_sp *mlxsw_sp,
961 u8 local_port, u8 *p_module,
962 u8 *p_width, u8 *p_lane)
964 char pmlp_pl[MLXSW_REG_PMLP_LEN];
967 mlxsw_reg_pmlp_pack(pmlp_pl, local_port);
968 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
971 *p_module = mlxsw_reg_pmlp_module_get(pmlp_pl, 0);
972 *p_width = mlxsw_reg_pmlp_width_get(pmlp_pl);
973 *p_lane = mlxsw_reg_pmlp_tx_lane_get(pmlp_pl, 0);
977 static int mlxsw_sp_port_module_map(struct mlxsw_sp_port *mlxsw_sp_port,
978 u8 module, u8 width, u8 lane)
980 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
981 char pmlp_pl[MLXSW_REG_PMLP_LEN];
984 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
985 mlxsw_reg_pmlp_width_set(pmlp_pl, width);
986 for (i = 0; i < width; i++) {
987 mlxsw_reg_pmlp_module_set(pmlp_pl, i, module);
988 mlxsw_reg_pmlp_tx_lane_set(pmlp_pl, i, lane + i); /* Rx & Tx */
991 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
994 static int mlxsw_sp_port_module_unmap(struct mlxsw_sp_port *mlxsw_sp_port)
996 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
997 char pmlp_pl[MLXSW_REG_PMLP_LEN];
999 mlxsw_reg_pmlp_pack(pmlp_pl, mlxsw_sp_port->local_port);
1000 mlxsw_reg_pmlp_width_set(pmlp_pl, 0);
1001 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pmlp), pmlp_pl);
1004 static int mlxsw_sp_port_open(struct net_device *dev)
1006 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1009 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
1012 netif_start_queue(dev);
1016 static int mlxsw_sp_port_stop(struct net_device *dev)
1018 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1020 netif_stop_queue(dev);
1021 return mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
1024 static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
1025 struct net_device *dev)
1027 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1028 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1029 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
1030 const struct mlxsw_tx_info tx_info = {
1031 .local_port = mlxsw_sp_port->local_port,
1037 if (mlxsw_core_skb_transmit_busy(mlxsw_sp->core, &tx_info))
1038 return NETDEV_TX_BUSY;
1040 if (unlikely(skb_headroom(skb) < MLXSW_TXHDR_LEN)) {
1041 struct sk_buff *skb_orig = skb;
1043 skb = skb_realloc_headroom(skb, MLXSW_TXHDR_LEN);
1045 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1046 dev_kfree_skb_any(skb_orig);
1047 return NETDEV_TX_OK;
1049 dev_consume_skb_any(skb_orig);
1052 if (eth_skb_pad(skb)) {
1053 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1054 return NETDEV_TX_OK;
1057 mlxsw_sp_txhdr_construct(skb, &tx_info);
1058 /* TX header is consumed by HW on the way so we shouldn't count its
1059 * bytes as being sent.
1061 len = skb->len - MLXSW_TXHDR_LEN;
1063 /* Due to a race we might fail here because of a full queue. In that
1064 * unlikely case we simply drop the packet.
1066 err = mlxsw_core_skb_transmit(mlxsw_sp->core, skb, &tx_info);
1069 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
1070 u64_stats_update_begin(&pcpu_stats->syncp);
1071 pcpu_stats->tx_packets++;
1072 pcpu_stats->tx_bytes += len;
1073 u64_stats_update_end(&pcpu_stats->syncp);
1075 this_cpu_inc(mlxsw_sp_port->pcpu_stats->tx_dropped);
1076 dev_kfree_skb_any(skb);
1078 return NETDEV_TX_OK;
1081 static void mlxsw_sp_set_rx_mode(struct net_device *dev)
1085 static int mlxsw_sp_port_set_mac_address(struct net_device *dev, void *p)
1087 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1088 struct sockaddr *addr = p;
1091 if (!is_valid_ether_addr(addr->sa_data))
1092 return -EADDRNOTAVAIL;
1094 err = mlxsw_sp_port_dev_addr_set(mlxsw_sp_port, addr->sa_data);
1097 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1101 static u16 mlxsw_sp_pg_buf_threshold_get(const struct mlxsw_sp *mlxsw_sp,
1104 return 2 * mlxsw_sp_bytes_cells(mlxsw_sp, mtu);
1107 #define MLXSW_SP_CELL_FACTOR 2 /* 2 * cell_size / (IPG + cell_size + 1) */
1109 static u16 mlxsw_sp_pfc_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1112 delay = mlxsw_sp_bytes_cells(mlxsw_sp, DIV_ROUND_UP(delay,
1114 return MLXSW_SP_CELL_FACTOR * delay + mlxsw_sp_bytes_cells(mlxsw_sp,
1118 /* Maximum delay buffer needed in case of PAUSE frames, in bytes.
1119 * Assumes 100m cable and maximum MTU.
1121 #define MLXSW_SP_PAUSE_DELAY 58752
1123 static u16 mlxsw_sp_pg_buf_delay_get(const struct mlxsw_sp *mlxsw_sp, int mtu,
1124 u16 delay, bool pfc, bool pause)
1127 return mlxsw_sp_pfc_delay_get(mlxsw_sp, mtu, delay);
1129 return mlxsw_sp_bytes_cells(mlxsw_sp, MLXSW_SP_PAUSE_DELAY);
1134 static void mlxsw_sp_pg_buf_pack(char *pbmc_pl, int index, u16 size, u16 thres,
1138 mlxsw_reg_pbmc_lossy_buffer_pack(pbmc_pl, index, size);
1140 mlxsw_reg_pbmc_lossless_buffer_pack(pbmc_pl, index, size,
1144 int __mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port, int mtu,
1145 u8 *prio_tc, bool pause_en,
1146 struct ieee_pfc *my_pfc)
1148 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1149 u8 pfc_en = !!my_pfc ? my_pfc->pfc_en : 0;
1150 u16 delay = !!my_pfc ? my_pfc->delay : 0;
1151 char pbmc_pl[MLXSW_REG_PBMC_LEN];
1154 mlxsw_reg_pbmc_pack(pbmc_pl, mlxsw_sp_port->local_port, 0, 0);
1155 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1159 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
1160 bool configure = false;
1166 for (j = 0; j < IEEE_8021QAZ_MAX_TCS; j++) {
1167 if (prio_tc[j] == i) {
1168 pfc = pfc_en & BIT(j);
1177 lossy = !(pfc || pause_en);
1178 thres_cells = mlxsw_sp_pg_buf_threshold_get(mlxsw_sp, mtu);
1179 delay_cells = mlxsw_sp_pg_buf_delay_get(mlxsw_sp, mtu, delay,
1181 mlxsw_sp_pg_buf_pack(pbmc_pl, i, thres_cells + delay_cells,
1182 thres_cells, lossy);
1185 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pbmc), pbmc_pl);
1188 static int mlxsw_sp_port_headroom_set(struct mlxsw_sp_port *mlxsw_sp_port,
1189 int mtu, bool pause_en)
1191 u8 def_prio_tc[IEEE_8021QAZ_MAX_TCS] = {0};
1192 bool dcb_en = !!mlxsw_sp_port->dcb.ets;
1193 struct ieee_pfc *my_pfc;
1196 prio_tc = dcb_en ? mlxsw_sp_port->dcb.ets->prio_tc : def_prio_tc;
1197 my_pfc = dcb_en ? mlxsw_sp_port->dcb.pfc : NULL;
1199 return __mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, prio_tc,
1203 static int mlxsw_sp_port_change_mtu(struct net_device *dev, int mtu)
1205 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1206 bool pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1209 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, mtu, pause_en);
1212 err = mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, mtu);
1214 goto err_span_port_mtu_update;
1215 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, mtu);
1217 goto err_port_mtu_set;
1222 mlxsw_sp_span_port_mtu_update(mlxsw_sp_port, dev->mtu);
1223 err_span_port_mtu_update:
1224 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1229 mlxsw_sp_port_get_sw_stats64(const struct net_device *dev,
1230 struct rtnl_link_stats64 *stats)
1232 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1233 struct mlxsw_sp_port_pcpu_stats *p;
1234 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1239 for_each_possible_cpu(i) {
1240 p = per_cpu_ptr(mlxsw_sp_port->pcpu_stats, i);
1242 start = u64_stats_fetch_begin_irq(&p->syncp);
1243 rx_packets = p->rx_packets;
1244 rx_bytes = p->rx_bytes;
1245 tx_packets = p->tx_packets;
1246 tx_bytes = p->tx_bytes;
1247 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1249 stats->rx_packets += rx_packets;
1250 stats->rx_bytes += rx_bytes;
1251 stats->tx_packets += tx_packets;
1252 stats->tx_bytes += tx_bytes;
1253 /* tx_dropped is u32, updated without syncp protection. */
1254 tx_dropped += p->tx_dropped;
1256 stats->tx_dropped = tx_dropped;
1260 static bool mlxsw_sp_port_has_offload_stats(const struct net_device *dev, int attr_id)
1263 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1270 static int mlxsw_sp_port_get_offload_stats(int attr_id, const struct net_device *dev,
1274 case IFLA_OFFLOAD_XSTATS_CPU_HIT:
1275 return mlxsw_sp_port_get_sw_stats64(dev, sp);
1281 static int mlxsw_sp_port_get_stats_raw(struct net_device *dev, int grp,
1282 int prio, char *ppcnt_pl)
1284 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1285 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1287 mlxsw_reg_ppcnt_pack(ppcnt_pl, mlxsw_sp_port->local_port, grp, prio);
1288 return mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ppcnt), ppcnt_pl);
1291 static int mlxsw_sp_port_get_hw_stats(struct net_device *dev,
1292 struct rtnl_link_stats64 *stats)
1294 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
1297 err = mlxsw_sp_port_get_stats_raw(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT,
1303 mlxsw_reg_ppcnt_a_frames_transmitted_ok_get(ppcnt_pl);
1305 mlxsw_reg_ppcnt_a_frames_received_ok_get(ppcnt_pl);
1307 mlxsw_reg_ppcnt_a_octets_transmitted_ok_get(ppcnt_pl);
1309 mlxsw_reg_ppcnt_a_octets_received_ok_get(ppcnt_pl);
1311 mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get(ppcnt_pl);
1313 stats->rx_crc_errors =
1314 mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get(ppcnt_pl);
1315 stats->rx_frame_errors =
1316 mlxsw_reg_ppcnt_a_alignment_errors_get(ppcnt_pl);
1318 stats->rx_length_errors = (
1319 mlxsw_reg_ppcnt_a_in_range_length_errors_get(ppcnt_pl) +
1320 mlxsw_reg_ppcnt_a_out_of_range_length_field_get(ppcnt_pl) +
1321 mlxsw_reg_ppcnt_a_frame_too_long_errors_get(ppcnt_pl));
1323 stats->rx_errors = (stats->rx_crc_errors +
1324 stats->rx_frame_errors + stats->rx_length_errors);
1330 static void update_stats_cache(struct work_struct *work)
1332 struct mlxsw_sp_port *mlxsw_sp_port =
1333 container_of(work, struct mlxsw_sp_port,
1334 hw_stats.update_dw.work);
1336 if (!netif_carrier_ok(mlxsw_sp_port->dev))
1339 mlxsw_sp_port_get_hw_stats(mlxsw_sp_port->dev,
1340 mlxsw_sp_port->hw_stats.cache);
1343 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw,
1344 MLXSW_HW_STATS_UPDATE_TIME);
1347 /* Return the stats from a cache that is updated periodically,
1348 * as this function might get called in an atomic context.
1351 mlxsw_sp_port_get_stats64(struct net_device *dev,
1352 struct rtnl_link_stats64 *stats)
1354 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1356 memcpy(stats, mlxsw_sp_port->hw_stats.cache, sizeof(*stats));
1359 static int __mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port,
1360 u16 vid_begin, u16 vid_end,
1361 bool is_member, bool untagged)
1363 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1367 spvm_pl = kmalloc(MLXSW_REG_SPVM_LEN, GFP_KERNEL);
1371 mlxsw_reg_spvm_pack(spvm_pl, mlxsw_sp_port->local_port, vid_begin,
1372 vid_end, is_member, untagged);
1373 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvm), spvm_pl);
1378 int mlxsw_sp_port_vlan_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid_begin,
1379 u16 vid_end, bool is_member, bool untagged)
1384 for (vid = vid_begin; vid <= vid_end;
1385 vid += MLXSW_REG_SPVM_REC_MAX_COUNT) {
1386 vid_e = min((u16) (vid + MLXSW_REG_SPVM_REC_MAX_COUNT - 1),
1389 err = __mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid_e,
1390 is_member, untagged);
1398 static void mlxsw_sp_port_vlan_flush(struct mlxsw_sp_port *mlxsw_sp_port)
1400 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan, *tmp;
1402 list_for_each_entry_safe(mlxsw_sp_port_vlan, tmp,
1403 &mlxsw_sp_port->vlans_list, list)
1404 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1407 static struct mlxsw_sp_port_vlan *
1408 mlxsw_sp_port_vlan_create(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1410 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1411 bool untagged = vid == 1;
1414 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, true, untagged);
1416 return ERR_PTR(err);
1418 mlxsw_sp_port_vlan = kzalloc(sizeof(*mlxsw_sp_port_vlan), GFP_KERNEL);
1419 if (!mlxsw_sp_port_vlan) {
1421 goto err_port_vlan_alloc;
1424 mlxsw_sp_port_vlan->mlxsw_sp_port = mlxsw_sp_port;
1425 mlxsw_sp_port_vlan->ref_count = 1;
1426 mlxsw_sp_port_vlan->vid = vid;
1427 list_add(&mlxsw_sp_port_vlan->list, &mlxsw_sp_port->vlans_list);
1429 return mlxsw_sp_port_vlan;
1431 err_port_vlan_alloc:
1432 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1433 return ERR_PTR(err);
1437 mlxsw_sp_port_vlan_destroy(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1439 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp_port_vlan->mlxsw_sp_port;
1440 u16 vid = mlxsw_sp_port_vlan->vid;
1442 list_del(&mlxsw_sp_port_vlan->list);
1443 kfree(mlxsw_sp_port_vlan);
1444 mlxsw_sp_port_vlan_set(mlxsw_sp_port, vid, vid, false, false);
1447 struct mlxsw_sp_port_vlan *
1448 mlxsw_sp_port_vlan_get(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
1450 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1452 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1453 if (mlxsw_sp_port_vlan) {
1454 mlxsw_sp_port_vlan->ref_count++;
1455 return mlxsw_sp_port_vlan;
1458 return mlxsw_sp_port_vlan_create(mlxsw_sp_port, vid);
1461 void mlxsw_sp_port_vlan_put(struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan)
1463 struct mlxsw_sp_fid *fid = mlxsw_sp_port_vlan->fid;
1465 if (--mlxsw_sp_port_vlan->ref_count != 0)
1468 if (mlxsw_sp_port_vlan->bridge_port)
1469 mlxsw_sp_port_vlan_bridge_leave(mlxsw_sp_port_vlan);
1471 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
1473 mlxsw_sp_port_vlan_destroy(mlxsw_sp_port_vlan);
1476 static int mlxsw_sp_port_add_vid(struct net_device *dev,
1477 __be16 __always_unused proto, u16 vid)
1479 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1481 /* VLAN 0 is added to HW filter when device goes up, but it is
1482 * reserved in our case, so simply return.
1487 return PTR_ERR_OR_ZERO(mlxsw_sp_port_vlan_get(mlxsw_sp_port, vid));
1490 static int mlxsw_sp_port_kill_vid(struct net_device *dev,
1491 __be16 __always_unused proto, u16 vid)
1493 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1494 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
1496 /* VLAN 0 is removed from HW filter when device goes down, but
1497 * it is reserved in our case, so simply return.
1502 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, vid);
1503 if (!mlxsw_sp_port_vlan)
1505 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
1510 static int mlxsw_sp_port_get_phys_port_name(struct net_device *dev, char *name,
1513 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1514 u8 module = mlxsw_sp_port->mapping.module;
1515 u8 width = mlxsw_sp_port->mapping.width;
1516 u8 lane = mlxsw_sp_port->mapping.lane;
1519 if (!mlxsw_sp_port->split)
1520 err = snprintf(name, len, "p%d", module + 1);
1522 err = snprintf(name, len, "p%ds%d", module + 1,
1531 static struct mlxsw_sp_port_mall_tc_entry *
1532 mlxsw_sp_port_mall_tc_entry_find(struct mlxsw_sp_port *port,
1533 unsigned long cookie) {
1534 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1536 list_for_each_entry(mall_tc_entry, &port->mall_tc_list, list)
1537 if (mall_tc_entry->cookie == cookie)
1538 return mall_tc_entry;
1544 mlxsw_sp_port_add_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1545 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror,
1546 const struct tc_action *a,
1549 struct net *net = dev_net(mlxsw_sp_port->dev);
1550 enum mlxsw_sp_span_type span_type;
1551 struct mlxsw_sp_port *to_port;
1552 struct net_device *to_dev;
1555 ifindex = tcf_mirred_ifindex(a);
1556 to_dev = __dev_get_by_index(net, ifindex);
1558 netdev_err(mlxsw_sp_port->dev, "Could not find requested device\n");
1562 if (!mlxsw_sp_port_dev_check(to_dev)) {
1563 netdev_err(mlxsw_sp_port->dev, "Cannot mirror to a non-spectrum port");
1566 to_port = netdev_priv(to_dev);
1568 mirror->to_local_port = to_port->local_port;
1569 mirror->ingress = ingress;
1570 span_type = ingress ? MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1571 return mlxsw_sp_span_mirror_add(mlxsw_sp_port, to_port, span_type);
1575 mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
1576 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror)
1578 enum mlxsw_sp_span_type span_type;
1580 span_type = mirror->ingress ?
1581 MLXSW_SP_SPAN_INGRESS : MLXSW_SP_SPAN_EGRESS;
1582 mlxsw_sp_span_mirror_remove(mlxsw_sp_port, mirror->to_local_port,
1587 mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
1588 struct tc_cls_matchall_offload *cls,
1589 const struct tc_action *a,
1594 if (!mlxsw_sp_port->sample)
1596 if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
1597 netdev_err(mlxsw_sp_port->dev, "sample already active\n");
1600 if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
1601 netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
1605 rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
1606 tcf_sample_psample_group(a));
1607 mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
1608 mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
1609 mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
1611 err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
1613 goto err_port_sample_set;
1616 err_port_sample_set:
1617 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1622 mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
1624 if (!mlxsw_sp_port->sample)
1627 mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
1628 RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
1631 static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1632 struct tc_cls_matchall_offload *f,
1635 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1636 __be16 protocol = f->common.protocol;
1637 const struct tc_action *a;
1641 if (!tcf_exts_has_one_action(f->exts)) {
1642 netdev_err(mlxsw_sp_port->dev, "only singular actions are supported\n");
1646 mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
1649 mall_tc_entry->cookie = f->cookie;
1651 tcf_exts_to_list(f->exts, &actions);
1652 a = list_first_entry(&actions, struct tc_action, list);
1654 if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
1655 struct mlxsw_sp_port_mall_mirror_tc_entry *mirror;
1657 mall_tc_entry->type = MLXSW_SP_PORT_MALL_MIRROR;
1658 mirror = &mall_tc_entry->mirror;
1659 err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
1660 mirror, a, ingress);
1661 } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
1662 mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
1663 err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, f,
1670 goto err_add_action;
1672 list_add_tail(&mall_tc_entry->list, &mlxsw_sp_port->mall_tc_list);
1676 kfree(mall_tc_entry);
1680 static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1681 struct tc_cls_matchall_offload *f)
1683 struct mlxsw_sp_port_mall_tc_entry *mall_tc_entry;
1685 mall_tc_entry = mlxsw_sp_port_mall_tc_entry_find(mlxsw_sp_port,
1687 if (!mall_tc_entry) {
1688 netdev_dbg(mlxsw_sp_port->dev, "tc entry not found on port\n");
1691 list_del(&mall_tc_entry->list);
1693 switch (mall_tc_entry->type) {
1694 case MLXSW_SP_PORT_MALL_MIRROR:
1695 mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
1696 &mall_tc_entry->mirror);
1698 case MLXSW_SP_PORT_MALL_SAMPLE:
1699 mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
1705 kfree(mall_tc_entry);
1708 static int mlxsw_sp_setup_tc_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
1709 struct tc_cls_matchall_offload *f)
1713 if (is_classid_clsact_ingress(f->common.classid))
1715 else if (is_classid_clsact_egress(f->common.classid))
1720 if (f->common.chain_index)
1723 switch (f->command) {
1724 case TC_CLSMATCHALL_REPLACE:
1725 return mlxsw_sp_port_add_cls_matchall(mlxsw_sp_port, f,
1727 case TC_CLSMATCHALL_DESTROY:
1728 mlxsw_sp_port_del_cls_matchall(mlxsw_sp_port, f);
1736 mlxsw_sp_setup_tc_cls_flower(struct mlxsw_sp_port *mlxsw_sp_port,
1737 struct tc_cls_flower_offload *f)
1741 if (is_classid_clsact_ingress(f->common.classid))
1743 else if (is_classid_clsact_egress(f->common.classid))
1748 switch (f->command) {
1749 case TC_CLSFLOWER_REPLACE:
1750 return mlxsw_sp_flower_replace(mlxsw_sp_port, ingress, f);
1751 case TC_CLSFLOWER_DESTROY:
1752 mlxsw_sp_flower_destroy(mlxsw_sp_port, ingress, f);
1754 case TC_CLSFLOWER_STATS:
1755 return mlxsw_sp_flower_stats(mlxsw_sp_port, ingress, f);
1761 static int mlxsw_sp_setup_tc(struct net_device *dev, enum tc_setup_type type,
1764 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1767 case TC_SETUP_CLSMATCHALL:
1768 return mlxsw_sp_setup_tc_cls_matchall(mlxsw_sp_port, type_data);
1769 case TC_SETUP_CLSFLOWER:
1770 return mlxsw_sp_setup_tc_cls_flower(mlxsw_sp_port, type_data);
1776 static const struct net_device_ops mlxsw_sp_port_netdev_ops = {
1777 .ndo_open = mlxsw_sp_port_open,
1778 .ndo_stop = mlxsw_sp_port_stop,
1779 .ndo_start_xmit = mlxsw_sp_port_xmit,
1780 .ndo_setup_tc = mlxsw_sp_setup_tc,
1781 .ndo_set_rx_mode = mlxsw_sp_set_rx_mode,
1782 .ndo_set_mac_address = mlxsw_sp_port_set_mac_address,
1783 .ndo_change_mtu = mlxsw_sp_port_change_mtu,
1784 .ndo_get_stats64 = mlxsw_sp_port_get_stats64,
1785 .ndo_has_offload_stats = mlxsw_sp_port_has_offload_stats,
1786 .ndo_get_offload_stats = mlxsw_sp_port_get_offload_stats,
1787 .ndo_vlan_rx_add_vid = mlxsw_sp_port_add_vid,
1788 .ndo_vlan_rx_kill_vid = mlxsw_sp_port_kill_vid,
1789 .ndo_get_phys_port_name = mlxsw_sp_port_get_phys_port_name,
1792 static void mlxsw_sp_port_get_drvinfo(struct net_device *dev,
1793 struct ethtool_drvinfo *drvinfo)
1795 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1796 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1798 strlcpy(drvinfo->driver, mlxsw_sp_driver_name, sizeof(drvinfo->driver));
1799 strlcpy(drvinfo->version, mlxsw_sp_driver_version,
1800 sizeof(drvinfo->version));
1801 snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
1803 mlxsw_sp->bus_info->fw_rev.major,
1804 mlxsw_sp->bus_info->fw_rev.minor,
1805 mlxsw_sp->bus_info->fw_rev.subminor);
1806 strlcpy(drvinfo->bus_info, mlxsw_sp->bus_info->device_name,
1807 sizeof(drvinfo->bus_info));
1810 static void mlxsw_sp_port_get_pauseparam(struct net_device *dev,
1811 struct ethtool_pauseparam *pause)
1813 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1815 pause->rx_pause = mlxsw_sp_port->link.rx_pause;
1816 pause->tx_pause = mlxsw_sp_port->link.tx_pause;
1819 static int mlxsw_sp_port_pause_set(struct mlxsw_sp_port *mlxsw_sp_port,
1820 struct ethtool_pauseparam *pause)
1822 char pfcc_pl[MLXSW_REG_PFCC_LEN];
1824 mlxsw_reg_pfcc_pack(pfcc_pl, mlxsw_sp_port->local_port);
1825 mlxsw_reg_pfcc_pprx_set(pfcc_pl, pause->rx_pause);
1826 mlxsw_reg_pfcc_pptx_set(pfcc_pl, pause->tx_pause);
1828 return mlxsw_reg_write(mlxsw_sp_port->mlxsw_sp->core, MLXSW_REG(pfcc),
1832 static int mlxsw_sp_port_set_pauseparam(struct net_device *dev,
1833 struct ethtool_pauseparam *pause)
1835 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
1836 bool pause_en = pause->tx_pause || pause->rx_pause;
1839 if (mlxsw_sp_port->dcb.pfc && mlxsw_sp_port->dcb.pfc->pfc_en) {
1840 netdev_err(dev, "PFC already enabled on port\n");
1844 if (pause->autoneg) {
1845 netdev_err(dev, "PAUSE frames autonegotiation isn't supported\n");
1849 err = mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1851 netdev_err(dev, "Failed to configure port's headroom\n");
1855 err = mlxsw_sp_port_pause_set(mlxsw_sp_port, pause);
1857 netdev_err(dev, "Failed to set PAUSE parameters\n");
1858 goto err_port_pause_configure;
1861 mlxsw_sp_port->link.rx_pause = pause->rx_pause;
1862 mlxsw_sp_port->link.tx_pause = pause->tx_pause;
1866 err_port_pause_configure:
1867 pause_en = mlxsw_sp_port_is_pause_en(mlxsw_sp_port);
1868 mlxsw_sp_port_headroom_set(mlxsw_sp_port, dev->mtu, pause_en);
1872 struct mlxsw_sp_port_hw_stats {
1873 char str[ETH_GSTRING_LEN];
1874 u64 (*getter)(const char *payload);
1878 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_stats[] = {
1880 .str = "a_frames_transmitted_ok",
1881 .getter = mlxsw_reg_ppcnt_a_frames_transmitted_ok_get,
1884 .str = "a_frames_received_ok",
1885 .getter = mlxsw_reg_ppcnt_a_frames_received_ok_get,
1888 .str = "a_frame_check_sequence_errors",
1889 .getter = mlxsw_reg_ppcnt_a_frame_check_sequence_errors_get,
1892 .str = "a_alignment_errors",
1893 .getter = mlxsw_reg_ppcnt_a_alignment_errors_get,
1896 .str = "a_octets_transmitted_ok",
1897 .getter = mlxsw_reg_ppcnt_a_octets_transmitted_ok_get,
1900 .str = "a_octets_received_ok",
1901 .getter = mlxsw_reg_ppcnt_a_octets_received_ok_get,
1904 .str = "a_multicast_frames_xmitted_ok",
1905 .getter = mlxsw_reg_ppcnt_a_multicast_frames_xmitted_ok_get,
1908 .str = "a_broadcast_frames_xmitted_ok",
1909 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_xmitted_ok_get,
1912 .str = "a_multicast_frames_received_ok",
1913 .getter = mlxsw_reg_ppcnt_a_multicast_frames_received_ok_get,
1916 .str = "a_broadcast_frames_received_ok",
1917 .getter = mlxsw_reg_ppcnt_a_broadcast_frames_received_ok_get,
1920 .str = "a_in_range_length_errors",
1921 .getter = mlxsw_reg_ppcnt_a_in_range_length_errors_get,
1924 .str = "a_out_of_range_length_field",
1925 .getter = mlxsw_reg_ppcnt_a_out_of_range_length_field_get,
1928 .str = "a_frame_too_long_errors",
1929 .getter = mlxsw_reg_ppcnt_a_frame_too_long_errors_get,
1932 .str = "a_symbol_error_during_carrier",
1933 .getter = mlxsw_reg_ppcnt_a_symbol_error_during_carrier_get,
1936 .str = "a_mac_control_frames_transmitted",
1937 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_transmitted_get,
1940 .str = "a_mac_control_frames_received",
1941 .getter = mlxsw_reg_ppcnt_a_mac_control_frames_received_get,
1944 .str = "a_unsupported_opcodes_received",
1945 .getter = mlxsw_reg_ppcnt_a_unsupported_opcodes_received_get,
1948 .str = "a_pause_mac_ctrl_frames_received",
1949 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_received_get,
1952 .str = "a_pause_mac_ctrl_frames_xmitted",
1953 .getter = mlxsw_reg_ppcnt_a_pause_mac_ctrl_frames_transmitted_get,
1957 #define MLXSW_SP_PORT_HW_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_stats)
1959 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_prio_stats[] = {
1961 .str = "rx_octets_prio",
1962 .getter = mlxsw_reg_ppcnt_rx_octets_get,
1965 .str = "rx_frames_prio",
1966 .getter = mlxsw_reg_ppcnt_rx_frames_get,
1969 .str = "tx_octets_prio",
1970 .getter = mlxsw_reg_ppcnt_tx_octets_get,
1973 .str = "tx_frames_prio",
1974 .getter = mlxsw_reg_ppcnt_tx_frames_get,
1977 .str = "rx_pause_prio",
1978 .getter = mlxsw_reg_ppcnt_rx_pause_get,
1981 .str = "rx_pause_duration_prio",
1982 .getter = mlxsw_reg_ppcnt_rx_pause_duration_get,
1985 .str = "tx_pause_prio",
1986 .getter = mlxsw_reg_ppcnt_tx_pause_get,
1989 .str = "tx_pause_duration_prio",
1990 .getter = mlxsw_reg_ppcnt_tx_pause_duration_get,
1994 #define MLXSW_SP_PORT_HW_PRIO_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_prio_stats)
1996 static struct mlxsw_sp_port_hw_stats mlxsw_sp_port_hw_tc_stats[] = {
1998 .str = "tc_transmit_queue_tc",
1999 .getter = mlxsw_reg_ppcnt_tc_transmit_queue_get,
2000 .cells_bytes = true,
2003 .str = "tc_no_buffer_discard_uc_tc",
2004 .getter = mlxsw_reg_ppcnt_tc_no_buffer_discard_uc_get,
2008 #define MLXSW_SP_PORT_HW_TC_STATS_LEN ARRAY_SIZE(mlxsw_sp_port_hw_tc_stats)
2010 #define MLXSW_SP_PORT_ETHTOOL_STATS_LEN (MLXSW_SP_PORT_HW_STATS_LEN + \
2011 (MLXSW_SP_PORT_HW_PRIO_STATS_LEN + \
2012 MLXSW_SP_PORT_HW_TC_STATS_LEN) * \
2013 IEEE_8021QAZ_MAX_TCS)
2015 static void mlxsw_sp_port_get_prio_strings(u8 **p, int prio)
2019 for (i = 0; i < MLXSW_SP_PORT_HW_PRIO_STATS_LEN; i++) {
2020 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
2021 mlxsw_sp_port_hw_prio_stats[i].str, prio);
2022 *p += ETH_GSTRING_LEN;
2026 static void mlxsw_sp_port_get_tc_strings(u8 **p, int tc)
2030 for (i = 0; i < MLXSW_SP_PORT_HW_TC_STATS_LEN; i++) {
2031 snprintf(*p, ETH_GSTRING_LEN, "%.29s_%.1d",
2032 mlxsw_sp_port_hw_tc_stats[i].str, tc);
2033 *p += ETH_GSTRING_LEN;
2037 static void mlxsw_sp_port_get_strings(struct net_device *dev,
2038 u32 stringset, u8 *data)
2043 switch (stringset) {
2045 for (i = 0; i < MLXSW_SP_PORT_HW_STATS_LEN; i++) {
2046 memcpy(p, mlxsw_sp_port_hw_stats[i].str,
2048 p += ETH_GSTRING_LEN;
2051 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2052 mlxsw_sp_port_get_prio_strings(&p, i);
2054 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++)
2055 mlxsw_sp_port_get_tc_strings(&p, i);
2061 static int mlxsw_sp_port_set_phys_id(struct net_device *dev,
2062 enum ethtool_phys_id_state state)
2064 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2065 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2066 char mlcr_pl[MLXSW_REG_MLCR_LEN];
2070 case ETHTOOL_ID_ACTIVE:
2073 case ETHTOOL_ID_INACTIVE:
2080 mlxsw_reg_mlcr_pack(mlcr_pl, mlxsw_sp_port->local_port, active);
2081 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mlcr), mlcr_pl);
2085 mlxsw_sp_get_hw_stats_by_group(struct mlxsw_sp_port_hw_stats **p_hw_stats,
2086 int *p_len, enum mlxsw_reg_ppcnt_grp grp)
2089 case MLXSW_REG_PPCNT_IEEE_8023_CNT:
2090 *p_hw_stats = mlxsw_sp_port_hw_stats;
2091 *p_len = MLXSW_SP_PORT_HW_STATS_LEN;
2093 case MLXSW_REG_PPCNT_PRIO_CNT:
2094 *p_hw_stats = mlxsw_sp_port_hw_prio_stats;
2095 *p_len = MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2097 case MLXSW_REG_PPCNT_TC_CNT:
2098 *p_hw_stats = mlxsw_sp_port_hw_tc_stats;
2099 *p_len = MLXSW_SP_PORT_HW_TC_STATS_LEN;
2108 static void __mlxsw_sp_port_get_stats(struct net_device *dev,
2109 enum mlxsw_reg_ppcnt_grp grp, int prio,
2110 u64 *data, int data_index)
2112 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2113 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2114 struct mlxsw_sp_port_hw_stats *hw_stats;
2115 char ppcnt_pl[MLXSW_REG_PPCNT_LEN];
2119 err = mlxsw_sp_get_hw_stats_by_group(&hw_stats, &len, grp);
2122 mlxsw_sp_port_get_stats_raw(dev, grp, prio, ppcnt_pl);
2123 for (i = 0; i < len; i++) {
2124 data[data_index + i] = hw_stats[i].getter(ppcnt_pl);
2125 if (!hw_stats[i].cells_bytes)
2127 data[data_index + i] = mlxsw_sp_cells_bytes(mlxsw_sp,
2128 data[data_index + i]);
2132 static void mlxsw_sp_port_get_stats(struct net_device *dev,
2133 struct ethtool_stats *stats, u64 *data)
2135 int i, data_index = 0;
2137 /* IEEE 802.3 Counters */
2138 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_IEEE_8023_CNT, 0,
2140 data_index = MLXSW_SP_PORT_HW_STATS_LEN;
2142 /* Per-Priority Counters */
2143 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2144 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_PRIO_CNT, i,
2146 data_index += MLXSW_SP_PORT_HW_PRIO_STATS_LEN;
2149 /* Per-TC Counters */
2150 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2151 __mlxsw_sp_port_get_stats(dev, MLXSW_REG_PPCNT_TC_CNT, i,
2153 data_index += MLXSW_SP_PORT_HW_TC_STATS_LEN;
2157 static int mlxsw_sp_port_get_sset_count(struct net_device *dev, int sset)
2161 return MLXSW_SP_PORT_ETHTOOL_STATS_LEN;
2167 struct mlxsw_sp_port_link_mode {
2168 enum ethtool_link_mode_bit_indices mask_ethtool;
2173 static const struct mlxsw_sp_port_link_mode mlxsw_sp_port_link_mode[] = {
2175 .mask = MLXSW_REG_PTYS_ETH_SPEED_100BASE_T,
2176 .mask_ethtool = ETHTOOL_LINK_MODE_100baseT_Full_BIT,
2180 .mask = MLXSW_REG_PTYS_ETH_SPEED_SGMII |
2181 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX,
2182 .mask_ethtool = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT,
2183 .speed = SPEED_1000,
2186 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_T,
2187 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseT_Full_BIT,
2188 .speed = SPEED_10000,
2191 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CX4 |
2192 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4,
2193 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT,
2194 .speed = SPEED_10000,
2197 .mask = MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2198 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2199 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2200 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_ER_LR,
2201 .mask_ethtool = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT,
2202 .speed = SPEED_10000,
2205 .mask = MLXSW_REG_PTYS_ETH_SPEED_20GBASE_KR2,
2206 .mask_ethtool = ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT,
2207 .speed = SPEED_20000,
2210 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4,
2211 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT,
2212 .speed = SPEED_40000,
2215 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4,
2216 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT,
2217 .speed = SPEED_40000,
2220 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4,
2221 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT,
2222 .speed = SPEED_40000,
2225 .mask = MLXSW_REG_PTYS_ETH_SPEED_40GBASE_LR4_ER4,
2226 .mask_ethtool = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT,
2227 .speed = SPEED_40000,
2230 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_CR,
2231 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT,
2232 .speed = SPEED_25000,
2235 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_KR,
2236 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT,
2237 .speed = SPEED_25000,
2240 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2241 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2242 .speed = SPEED_25000,
2245 .mask = MLXSW_REG_PTYS_ETH_SPEED_25GBASE_SR,
2246 .mask_ethtool = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT,
2247 .speed = SPEED_25000,
2250 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_CR2,
2251 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT,
2252 .speed = SPEED_50000,
2255 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_KR2,
2256 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT,
2257 .speed = SPEED_50000,
2260 .mask = MLXSW_REG_PTYS_ETH_SPEED_50GBASE_SR2,
2261 .mask_ethtool = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT,
2262 .speed = SPEED_50000,
2265 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2266 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseKR4_Full_BIT,
2267 .speed = SPEED_56000,
2270 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2271 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseCR4_Full_BIT,
2272 .speed = SPEED_56000,
2275 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2276 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseSR4_Full_BIT,
2277 .speed = SPEED_56000,
2280 .mask = MLXSW_REG_PTYS_ETH_SPEED_56GBASE_R4,
2281 .mask_ethtool = ETHTOOL_LINK_MODE_56000baseLR4_Full_BIT,
2282 .speed = SPEED_56000,
2285 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4,
2286 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT,
2287 .speed = SPEED_100000,
2290 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4,
2291 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT,
2292 .speed = SPEED_100000,
2295 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4,
2296 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT,
2297 .speed = SPEED_100000,
2300 .mask = MLXSW_REG_PTYS_ETH_SPEED_100GBASE_LR4_ER4,
2301 .mask_ethtool = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT,
2302 .speed = SPEED_100000,
2306 #define MLXSW_SP_PORT_LINK_MODE_LEN ARRAY_SIZE(mlxsw_sp_port_link_mode)
2309 mlxsw_sp_from_ptys_supported_port(u32 ptys_eth_proto,
2310 struct ethtool_link_ksettings *cmd)
2312 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2313 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2314 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2315 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2316 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2317 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2318 ethtool_link_ksettings_add_link_mode(cmd, supported, FIBRE);
2320 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2321 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2322 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2323 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4 |
2324 MLXSW_REG_PTYS_ETH_SPEED_1000BASE_KX))
2325 ethtool_link_ksettings_add_link_mode(cmd, supported, Backplane);
2328 static void mlxsw_sp_from_ptys_link(u32 ptys_eth_proto, unsigned long *mode)
2332 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2333 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask)
2334 __set_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2339 static void mlxsw_sp_from_ptys_speed_duplex(bool carrier_ok, u32 ptys_eth_proto,
2340 struct ethtool_link_ksettings *cmd)
2342 u32 speed = SPEED_UNKNOWN;
2343 u8 duplex = DUPLEX_UNKNOWN;
2349 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2350 if (ptys_eth_proto & mlxsw_sp_port_link_mode[i].mask) {
2351 speed = mlxsw_sp_port_link_mode[i].speed;
2352 duplex = DUPLEX_FULL;
2357 cmd->base.speed = speed;
2358 cmd->base.duplex = duplex;
2361 static u8 mlxsw_sp_port_connector_port(u32 ptys_eth_proto)
2363 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_SR |
2364 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_SR4 |
2365 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_SR4 |
2366 MLXSW_REG_PTYS_ETH_SPEED_SGMII))
2369 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_CR |
2370 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_CR4 |
2371 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_CR4))
2374 if (ptys_eth_proto & (MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KR |
2375 MLXSW_REG_PTYS_ETH_SPEED_10GBASE_KX4 |
2376 MLXSW_REG_PTYS_ETH_SPEED_40GBASE_KR4 |
2377 MLXSW_REG_PTYS_ETH_SPEED_100GBASE_KR4))
2384 mlxsw_sp_to_ptys_advert_link(const struct ethtool_link_ksettings *cmd)
2389 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2390 if (test_bit(mlxsw_sp_port_link_mode[i].mask_ethtool,
2391 cmd->link_modes.advertising))
2392 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2397 static u32 mlxsw_sp_to_ptys_speed(u32 speed)
2402 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2403 if (speed == mlxsw_sp_port_link_mode[i].speed)
2404 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2409 static u32 mlxsw_sp_to_ptys_upper_speed(u32 upper_speed)
2414 for (i = 0; i < MLXSW_SP_PORT_LINK_MODE_LEN; i++) {
2415 if (mlxsw_sp_port_link_mode[i].speed <= upper_speed)
2416 ptys_proto |= mlxsw_sp_port_link_mode[i].mask;
2421 static void mlxsw_sp_port_get_link_supported(u32 eth_proto_cap,
2422 struct ethtool_link_ksettings *cmd)
2424 ethtool_link_ksettings_add_link_mode(cmd, supported, Asym_Pause);
2425 ethtool_link_ksettings_add_link_mode(cmd, supported, Autoneg);
2426 ethtool_link_ksettings_add_link_mode(cmd, supported, Pause);
2428 mlxsw_sp_from_ptys_supported_port(eth_proto_cap, cmd);
2429 mlxsw_sp_from_ptys_link(eth_proto_cap, cmd->link_modes.supported);
2432 static void mlxsw_sp_port_get_link_advertise(u32 eth_proto_admin, bool autoneg,
2433 struct ethtool_link_ksettings *cmd)
2438 ethtool_link_ksettings_add_link_mode(cmd, advertising, Autoneg);
2439 mlxsw_sp_from_ptys_link(eth_proto_admin, cmd->link_modes.advertising);
2443 mlxsw_sp_port_get_link_lp_advertise(u32 eth_proto_lp, u8 autoneg_status,
2444 struct ethtool_link_ksettings *cmd)
2446 if (autoneg_status != MLXSW_REG_PTYS_AN_STATUS_OK || !eth_proto_lp)
2449 ethtool_link_ksettings_add_link_mode(cmd, lp_advertising, Autoneg);
2450 mlxsw_sp_from_ptys_link(eth_proto_lp, cmd->link_modes.lp_advertising);
2453 static int mlxsw_sp_port_get_link_ksettings(struct net_device *dev,
2454 struct ethtool_link_ksettings *cmd)
2456 u32 eth_proto_cap, eth_proto_admin, eth_proto_oper, eth_proto_lp;
2457 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2458 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2459 char ptys_pl[MLXSW_REG_PTYS_LEN];
2464 autoneg = mlxsw_sp_port->link.autoneg;
2465 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2466 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2469 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, ð_proto_admin,
2472 mlxsw_sp_port_get_link_supported(eth_proto_cap, cmd);
2474 mlxsw_sp_port_get_link_advertise(eth_proto_admin, autoneg, cmd);
2476 eth_proto_lp = mlxsw_reg_ptys_eth_proto_lp_advertise_get(ptys_pl);
2477 autoneg_status = mlxsw_reg_ptys_an_status_get(ptys_pl);
2478 mlxsw_sp_port_get_link_lp_advertise(eth_proto_lp, autoneg_status, cmd);
2480 cmd->base.autoneg = autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE;
2481 cmd->base.port = mlxsw_sp_port_connector_port(eth_proto_oper);
2482 mlxsw_sp_from_ptys_speed_duplex(netif_carrier_ok(dev), eth_proto_oper,
2489 mlxsw_sp_port_set_link_ksettings(struct net_device *dev,
2490 const struct ethtool_link_ksettings *cmd)
2492 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2493 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2494 char ptys_pl[MLXSW_REG_PTYS_LEN];
2495 u32 eth_proto_cap, eth_proto_new;
2499 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port, 0);
2500 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2503 mlxsw_reg_ptys_eth_unpack(ptys_pl, ð_proto_cap, NULL, NULL);
2505 autoneg = cmd->base.autoneg == AUTONEG_ENABLE;
2506 if (!autoneg && cmd->base.speed == SPEED_56000) {
2507 netdev_err(dev, "56G not supported with autoneg off\n");
2510 eth_proto_new = autoneg ?
2511 mlxsw_sp_to_ptys_advert_link(cmd) :
2512 mlxsw_sp_to_ptys_speed(cmd->base.speed);
2514 eth_proto_new = eth_proto_new & eth_proto_cap;
2515 if (!eth_proto_new) {
2516 netdev_err(dev, "No supported speed requested\n");
2520 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2522 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2526 mlxsw_sp_port->link.autoneg = autoneg;
2528 if (!netif_running(dev))
2531 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2532 mlxsw_sp_port_admin_status_set(mlxsw_sp_port, true);
2537 static int mlxsw_sp_flash_device(struct net_device *dev,
2538 struct ethtool_flash *flash)
2540 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
2541 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2542 const struct firmware *firmware;
2545 if (flash->region != ETHTOOL_FLASH_ALL_REGIONS)
2551 err = request_firmware_direct(&firmware, flash->data, &dev->dev);
2554 err = mlxsw_sp_firmware_flash(mlxsw_sp, firmware);
2555 release_firmware(firmware);
2562 #define MLXSW_SP_I2C_ADDR_LOW 0x50
2563 #define MLXSW_SP_I2C_ADDR_HIGH 0x51
2564 #define MLXSW_SP_EEPROM_PAGE_LENGTH 256
2566 static int mlxsw_sp_query_module_eeprom(struct mlxsw_sp_port *mlxsw_sp_port,
2567 u16 offset, u16 size, void *data,
2568 unsigned int *p_read_size)
2570 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2571 char eeprom_tmp[MLXSW_SP_REG_MCIA_EEPROM_SIZE];
2572 char mcia_pl[MLXSW_REG_MCIA_LEN];
2577 size = min_t(u16, size, MLXSW_SP_REG_MCIA_EEPROM_SIZE);
2579 if (offset < MLXSW_SP_EEPROM_PAGE_LENGTH &&
2580 offset + size > MLXSW_SP_EEPROM_PAGE_LENGTH)
2581 /* Cross pages read, read until offset 256 in low page */
2582 size = MLXSW_SP_EEPROM_PAGE_LENGTH - offset;
2584 i2c_addr = MLXSW_SP_I2C_ADDR_LOW;
2585 if (offset >= MLXSW_SP_EEPROM_PAGE_LENGTH) {
2586 i2c_addr = MLXSW_SP_I2C_ADDR_HIGH;
2587 offset -= MLXSW_SP_EEPROM_PAGE_LENGTH;
2590 mlxsw_reg_mcia_pack(mcia_pl, mlxsw_sp_port->mapping.module,
2591 0, 0, offset, size, i2c_addr);
2593 err = mlxsw_reg_query(mlxsw_sp->core, MLXSW_REG(mcia), mcia_pl);
2597 status = mlxsw_reg_mcia_status_get(mcia_pl);
2601 mlxsw_reg_mcia_eeprom_memcpy_from(mcia_pl, eeprom_tmp);
2602 memcpy(data, eeprom_tmp, size);
2603 *p_read_size = size;
2608 enum mlxsw_sp_eeprom_module_info_rev_id {
2609 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_UNSPC = 0x00,
2610 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8436 = 0x01,
2611 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636 = 0x03,
2614 enum mlxsw_sp_eeprom_module_info_id {
2615 MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP = 0x03,
2616 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP = 0x0C,
2617 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS = 0x0D,
2618 MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 = 0x11,
2621 enum mlxsw_sp_eeprom_module_info {
2622 MLXSW_SP_EEPROM_MODULE_INFO_ID,
2623 MLXSW_SP_EEPROM_MODULE_INFO_REV_ID,
2624 MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2627 static int mlxsw_sp_get_module_info(struct net_device *netdev,
2628 struct ethtool_modinfo *modinfo)
2630 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2631 u8 module_info[MLXSW_SP_EEPROM_MODULE_INFO_SIZE];
2632 u8 module_rev_id, module_id;
2633 unsigned int read_size;
2636 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, 0,
2637 MLXSW_SP_EEPROM_MODULE_INFO_SIZE,
2638 module_info, &read_size);
2642 if (read_size < MLXSW_SP_EEPROM_MODULE_INFO_SIZE)
2645 module_rev_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_REV_ID];
2646 module_id = module_info[MLXSW_SP_EEPROM_MODULE_INFO_ID];
2648 switch (module_id) {
2649 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP:
2650 modinfo->type = ETH_MODULE_SFF_8436;
2651 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2653 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP_PLUS:
2654 case MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28:
2655 if (module_id == MLXSW_SP_EEPROM_MODULE_INFO_ID_QSFP28 ||
2656 module_rev_id >= MLXSW_SP_EEPROM_MODULE_INFO_REV_ID_8636) {
2657 modinfo->type = ETH_MODULE_SFF_8636;
2658 modinfo->eeprom_len = ETH_MODULE_SFF_8636_LEN;
2660 modinfo->type = ETH_MODULE_SFF_8436;
2661 modinfo->eeprom_len = ETH_MODULE_SFF_8436_LEN;
2664 case MLXSW_SP_EEPROM_MODULE_INFO_ID_SFP:
2665 modinfo->type = ETH_MODULE_SFF_8472;
2666 modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
2675 static int mlxsw_sp_get_module_eeprom(struct net_device *netdev,
2676 struct ethtool_eeprom *ee,
2679 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(netdev);
2680 int offset = ee->offset;
2681 unsigned int read_size;
2688 memset(data, 0, ee->len);
2690 while (i < ee->len) {
2691 err = mlxsw_sp_query_module_eeprom(mlxsw_sp_port, offset,
2692 ee->len - i, data + i,
2695 netdev_err(mlxsw_sp_port->dev, "Eeprom query failed\n");
2700 offset += read_size;
2706 static const struct ethtool_ops mlxsw_sp_port_ethtool_ops = {
2707 .get_drvinfo = mlxsw_sp_port_get_drvinfo,
2708 .get_link = ethtool_op_get_link,
2709 .get_pauseparam = mlxsw_sp_port_get_pauseparam,
2710 .set_pauseparam = mlxsw_sp_port_set_pauseparam,
2711 .get_strings = mlxsw_sp_port_get_strings,
2712 .set_phys_id = mlxsw_sp_port_set_phys_id,
2713 .get_ethtool_stats = mlxsw_sp_port_get_stats,
2714 .get_sset_count = mlxsw_sp_port_get_sset_count,
2715 .get_link_ksettings = mlxsw_sp_port_get_link_ksettings,
2716 .set_link_ksettings = mlxsw_sp_port_set_link_ksettings,
2717 .flash_device = mlxsw_sp_flash_device,
2718 .get_module_info = mlxsw_sp_get_module_info,
2719 .get_module_eeprom = mlxsw_sp_get_module_eeprom,
2723 mlxsw_sp_port_speed_by_width_set(struct mlxsw_sp_port *mlxsw_sp_port, u8 width)
2725 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2726 u32 upper_speed = MLXSW_SP_PORT_BASE_SPEED * width;
2727 char ptys_pl[MLXSW_REG_PTYS_LEN];
2728 u32 eth_proto_admin;
2730 eth_proto_admin = mlxsw_sp_to_ptys_upper_speed(upper_speed);
2731 mlxsw_reg_ptys_eth_pack(ptys_pl, mlxsw_sp_port->local_port,
2733 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptys), ptys_pl);
2736 int mlxsw_sp_port_ets_set(struct mlxsw_sp_port *mlxsw_sp_port,
2737 enum mlxsw_reg_qeec_hr hr, u8 index, u8 next_index,
2738 bool dwrr, u8 dwrr_weight)
2740 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2741 char qeec_pl[MLXSW_REG_QEEC_LEN];
2743 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2745 mlxsw_reg_qeec_de_set(qeec_pl, true);
2746 mlxsw_reg_qeec_dwrr_set(qeec_pl, dwrr);
2747 mlxsw_reg_qeec_dwrr_weight_set(qeec_pl, dwrr_weight);
2748 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2751 int mlxsw_sp_port_ets_maxrate_set(struct mlxsw_sp_port *mlxsw_sp_port,
2752 enum mlxsw_reg_qeec_hr hr, u8 index,
2753 u8 next_index, u32 maxrate)
2755 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2756 char qeec_pl[MLXSW_REG_QEEC_LEN];
2758 mlxsw_reg_qeec_pack(qeec_pl, mlxsw_sp_port->local_port, hr, index,
2760 mlxsw_reg_qeec_mase_set(qeec_pl, true);
2761 mlxsw_reg_qeec_max_shaper_rate_set(qeec_pl, maxrate);
2762 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qeec), qeec_pl);
2765 int mlxsw_sp_port_prio_tc_set(struct mlxsw_sp_port *mlxsw_sp_port,
2766 u8 switch_prio, u8 tclass)
2768 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
2769 char qtct_pl[MLXSW_REG_QTCT_LEN];
2771 mlxsw_reg_qtct_pack(qtct_pl, mlxsw_sp_port->local_port, switch_prio,
2773 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(qtct), qtct_pl);
2776 static int mlxsw_sp_port_ets_init(struct mlxsw_sp_port *mlxsw_sp_port)
2780 /* Setup the elements hierarcy, so that each TC is linked to
2781 * one subgroup, which are all member in the same group.
2783 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2784 MLXSW_REG_QEEC_HIERARCY_GROUP, 0, 0, false,
2788 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2789 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2790 MLXSW_REG_QEEC_HIERARCY_SUBGROUP, i,
2795 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2796 err = mlxsw_sp_port_ets_set(mlxsw_sp_port,
2797 MLXSW_REG_QEEC_HIERARCY_TC, i, i,
2803 /* Make sure the max shaper is disabled in all hierarcies that
2806 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2807 MLXSW_REG_QEEC_HIERARCY_PORT, 0, 0,
2808 MLXSW_REG_QEEC_MAS_DIS);
2811 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2812 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2813 MLXSW_REG_QEEC_HIERARCY_SUBGROUP,
2815 MLXSW_REG_QEEC_MAS_DIS);
2819 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2820 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2821 MLXSW_REG_QEEC_HIERARCY_TC,
2823 MLXSW_REG_QEEC_MAS_DIS);
2827 err = mlxsw_sp_port_ets_maxrate_set(mlxsw_sp_port,
2828 MLXSW_REG_QEEC_HIERARCY_TC,
2830 MLXSW_REG_QEEC_MAS_DIS);
2835 /* Map all priorities to traffic class 0. */
2836 for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
2837 err = mlxsw_sp_port_prio_tc_set(mlxsw_sp_port, i, 0);
2845 static int mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
2846 bool split, u8 module, u8 width, u8 lane)
2848 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
2849 struct mlxsw_sp_port *mlxsw_sp_port;
2850 struct net_device *dev;
2853 err = mlxsw_core_port_init(mlxsw_sp->core, local_port);
2855 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to init core port\n",
2860 dev = alloc_etherdev(sizeof(struct mlxsw_sp_port));
2863 goto err_alloc_etherdev;
2865 SET_NETDEV_DEV(dev, mlxsw_sp->bus_info->dev);
2866 mlxsw_sp_port = netdev_priv(dev);
2867 mlxsw_sp_port->dev = dev;
2868 mlxsw_sp_port->mlxsw_sp = mlxsw_sp;
2869 mlxsw_sp_port->local_port = local_port;
2870 mlxsw_sp_port->pvid = 1;
2871 mlxsw_sp_port->split = split;
2872 mlxsw_sp_port->mapping.module = module;
2873 mlxsw_sp_port->mapping.width = width;
2874 mlxsw_sp_port->mapping.lane = lane;
2875 mlxsw_sp_port->link.autoneg = 1;
2876 INIT_LIST_HEAD(&mlxsw_sp_port->vlans_list);
2877 INIT_LIST_HEAD(&mlxsw_sp_port->mall_tc_list);
2879 mlxsw_sp_port->pcpu_stats =
2880 netdev_alloc_pcpu_stats(struct mlxsw_sp_port_pcpu_stats);
2881 if (!mlxsw_sp_port->pcpu_stats) {
2883 goto err_alloc_stats;
2886 mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
2888 if (!mlxsw_sp_port->sample) {
2890 goto err_alloc_sample;
2893 mlxsw_sp_port->hw_stats.cache =
2894 kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
2896 if (!mlxsw_sp_port->hw_stats.cache) {
2898 goto err_alloc_hw_stats;
2900 INIT_DELAYED_WORK(&mlxsw_sp_port->hw_stats.update_dw,
2901 &update_stats_cache);
2903 dev->netdev_ops = &mlxsw_sp_port_netdev_ops;
2904 dev->ethtool_ops = &mlxsw_sp_port_ethtool_ops;
2906 err = mlxsw_sp_port_module_map(mlxsw_sp_port, module, width, lane);
2908 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to map module\n",
2909 mlxsw_sp_port->local_port);
2910 goto err_port_module_map;
2913 err = mlxsw_sp_port_swid_set(mlxsw_sp_port, 0);
2915 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set SWID\n",
2916 mlxsw_sp_port->local_port);
2917 goto err_port_swid_set;
2920 err = mlxsw_sp_port_dev_addr_init(mlxsw_sp_port);
2922 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Unable to init port mac address\n",
2923 mlxsw_sp_port->local_port);
2924 goto err_dev_addr_init;
2927 netif_carrier_off(dev);
2929 dev->features |= NETIF_F_NETNS_LOCAL | NETIF_F_LLTX | NETIF_F_SG |
2930 NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_HW_TC;
2931 dev->hw_features |= NETIF_F_HW_TC;
2934 dev->max_mtu = ETH_MAX_MTU;
2936 /* Each packet needs to have a Tx header (metadata) on top all other
2939 dev->needed_headroom = MLXSW_TXHDR_LEN;
2941 err = mlxsw_sp_port_system_port_mapping_set(mlxsw_sp_port);
2943 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set system port mapping\n",
2944 mlxsw_sp_port->local_port);
2945 goto err_port_system_port_mapping_set;
2948 err = mlxsw_sp_port_speed_by_width_set(mlxsw_sp_port, width);
2950 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to enable speeds\n",
2951 mlxsw_sp_port->local_port);
2952 goto err_port_speed_by_width_set;
2955 err = mlxsw_sp_port_mtu_set(mlxsw_sp_port, ETH_DATA_LEN);
2957 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to set MTU\n",
2958 mlxsw_sp_port->local_port);
2959 goto err_port_mtu_set;
2962 err = mlxsw_sp_port_admin_status_set(mlxsw_sp_port, false);
2964 goto err_port_admin_status_set;
2966 err = mlxsw_sp_port_buffers_init(mlxsw_sp_port);
2968 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize buffers\n",
2969 mlxsw_sp_port->local_port);
2970 goto err_port_buffers_init;
2973 err = mlxsw_sp_port_ets_init(mlxsw_sp_port);
2975 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize ETS\n",
2976 mlxsw_sp_port->local_port);
2977 goto err_port_ets_init;
2980 /* ETS and buffers must be initialized before DCB. */
2981 err = mlxsw_sp_port_dcb_init(mlxsw_sp_port);
2983 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize DCB\n",
2984 mlxsw_sp_port->local_port);
2985 goto err_port_dcb_init;
2988 err = mlxsw_sp_port_fids_init(mlxsw_sp_port);
2990 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to initialize FIDs\n",
2991 mlxsw_sp_port->local_port);
2992 goto err_port_fids_init;
2995 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
2996 if (IS_ERR(mlxsw_sp_port_vlan)) {
2997 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to create VID 1\n",
2998 mlxsw_sp_port->local_port);
2999 err = PTR_ERR(mlxsw_sp_port_vlan);
3000 goto err_port_vlan_get;
3003 mlxsw_sp_port_switchdev_init(mlxsw_sp_port);
3004 mlxsw_sp->ports[local_port] = mlxsw_sp_port;
3005 err = register_netdev(dev);
3007 dev_err(mlxsw_sp->bus_info->dev, "Port %d: Failed to register netdev\n",
3008 mlxsw_sp_port->local_port);
3009 goto err_register_netdev;
3012 mlxsw_core_port_eth_set(mlxsw_sp->core, mlxsw_sp_port->local_port,
3013 mlxsw_sp_port, dev, mlxsw_sp_port->split,
3015 mlxsw_core_schedule_dw(&mlxsw_sp_port->hw_stats.update_dw, 0);
3018 err_register_netdev:
3019 mlxsw_sp->ports[local_port] = NULL;
3020 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3021 mlxsw_sp_port_vlan_put(mlxsw_sp_port_vlan);
3023 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3025 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3028 err_port_buffers_init:
3029 err_port_admin_status_set:
3031 err_port_speed_by_width_set:
3032 err_port_system_port_mapping_set:
3034 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3036 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3037 err_port_module_map:
3038 kfree(mlxsw_sp_port->hw_stats.cache);
3040 kfree(mlxsw_sp_port->sample);
3042 free_percpu(mlxsw_sp_port->pcpu_stats);
3046 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3050 static void mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3052 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3054 cancel_delayed_work_sync(&mlxsw_sp_port->hw_stats.update_dw);
3055 mlxsw_core_port_clear(mlxsw_sp->core, local_port, mlxsw_sp);
3056 unregister_netdev(mlxsw_sp_port->dev); /* This calls ndo_stop */
3057 mlxsw_sp->ports[local_port] = NULL;
3058 mlxsw_sp_port_switchdev_fini(mlxsw_sp_port);
3059 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
3060 mlxsw_sp_port_fids_fini(mlxsw_sp_port);
3061 mlxsw_sp_port_dcb_fini(mlxsw_sp_port);
3062 mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
3063 mlxsw_sp_port_module_unmap(mlxsw_sp_port);
3064 kfree(mlxsw_sp_port->hw_stats.cache);
3065 kfree(mlxsw_sp_port->sample);
3066 free_percpu(mlxsw_sp_port->pcpu_stats);
3067 WARN_ON_ONCE(!list_empty(&mlxsw_sp_port->vlans_list));
3068 free_netdev(mlxsw_sp_port->dev);
3069 mlxsw_core_port_fini(mlxsw_sp->core, local_port);
3072 static bool mlxsw_sp_port_created(struct mlxsw_sp *mlxsw_sp, u8 local_port)
3074 return mlxsw_sp->ports[local_port] != NULL;
3077 static void mlxsw_sp_ports_remove(struct mlxsw_sp *mlxsw_sp)
3081 for (i = 1; i < mlxsw_core_max_ports(mlxsw_sp->core); i++)
3082 if (mlxsw_sp_port_created(mlxsw_sp, i))
3083 mlxsw_sp_port_remove(mlxsw_sp, i);
3084 kfree(mlxsw_sp->port_to_module);
3085 kfree(mlxsw_sp->ports);
3088 static int mlxsw_sp_ports_create(struct mlxsw_sp *mlxsw_sp)
3090 unsigned int max_ports = mlxsw_core_max_ports(mlxsw_sp->core);
3091 u8 module, width, lane;
3096 alloc_size = sizeof(struct mlxsw_sp_port *) * max_ports;
3097 mlxsw_sp->ports = kzalloc(alloc_size, GFP_KERNEL);
3098 if (!mlxsw_sp->ports)
3101 mlxsw_sp->port_to_module = kcalloc(max_ports, sizeof(u8), GFP_KERNEL);
3102 if (!mlxsw_sp->port_to_module) {
3104 goto err_port_to_module_alloc;
3107 for (i = 1; i < max_ports; i++) {
3108 err = mlxsw_sp_port_module_info_get(mlxsw_sp, i, &module,
3111 goto err_port_module_info_get;
3114 mlxsw_sp->port_to_module[i] = module;
3115 err = mlxsw_sp_port_create(mlxsw_sp, i, false,
3116 module, width, lane);
3118 goto err_port_create;
3123 err_port_module_info_get:
3124 for (i--; i >= 1; i--)
3125 if (mlxsw_sp_port_created(mlxsw_sp, i))
3126 mlxsw_sp_port_remove(mlxsw_sp, i);
3127 kfree(mlxsw_sp->port_to_module);
3128 err_port_to_module_alloc:
3129 kfree(mlxsw_sp->ports);
3133 static u8 mlxsw_sp_cluster_base_port_get(u8 local_port)
3135 u8 offset = (local_port - 1) % MLXSW_SP_PORTS_PER_CLUSTER_MAX;
3137 return local_port - offset;
3140 static int mlxsw_sp_port_split_create(struct mlxsw_sp *mlxsw_sp, u8 base_port,
3141 u8 module, unsigned int count)
3143 u8 width = MLXSW_PORT_MODULE_MAX_WIDTH / count;
3146 for (i = 0; i < count; i++) {
3147 err = mlxsw_sp_port_create(mlxsw_sp, base_port + i, true,
3148 module, width, i * width);
3150 goto err_port_create;
3156 for (i--; i >= 0; i--)
3157 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3158 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3162 static void mlxsw_sp_port_unsplit_create(struct mlxsw_sp *mlxsw_sp,
3163 u8 base_port, unsigned int count)
3165 u8 local_port, module, width = MLXSW_PORT_MODULE_MAX_WIDTH;
3168 /* Split by four means we need to re-create two ports, otherwise
3173 for (i = 0; i < count; i++) {
3174 local_port = base_port + i * 2;
3175 module = mlxsw_sp->port_to_module[local_port];
3177 mlxsw_sp_port_create(mlxsw_sp, local_port, false, module,
3182 static int mlxsw_sp_port_split(struct mlxsw_core *mlxsw_core, u8 local_port,
3185 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3186 struct mlxsw_sp_port *mlxsw_sp_port;
3187 u8 module, cur_width, base_port;
3191 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3192 if (!mlxsw_sp_port) {
3193 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3198 module = mlxsw_sp_port->mapping.module;
3199 cur_width = mlxsw_sp_port->mapping.width;
3201 if (count != 2 && count != 4) {
3202 netdev_err(mlxsw_sp_port->dev, "Port can only be split into 2 or 4 ports\n");
3206 if (cur_width != MLXSW_PORT_MODULE_MAX_WIDTH) {
3207 netdev_err(mlxsw_sp_port->dev, "Port cannot be split further\n");
3211 /* Make sure we have enough slave (even) ports for the split. */
3213 base_port = local_port;
3214 if (mlxsw_sp->ports[base_port + 1]) {
3215 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3219 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3220 if (mlxsw_sp->ports[base_port + 1] ||
3221 mlxsw_sp->ports[base_port + 3]) {
3222 netdev_err(mlxsw_sp_port->dev, "Invalid split configuration\n");
3227 for (i = 0; i < count; i++)
3228 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3229 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3231 err = mlxsw_sp_port_split_create(mlxsw_sp, base_port, module, count);
3233 dev_err(mlxsw_sp->bus_info->dev, "Failed to create split ports\n");
3234 goto err_port_split_create;
3239 err_port_split_create:
3240 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3244 static int mlxsw_sp_port_unsplit(struct mlxsw_core *mlxsw_core, u8 local_port)
3246 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3247 struct mlxsw_sp_port *mlxsw_sp_port;
3248 u8 cur_width, base_port;
3252 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3253 if (!mlxsw_sp_port) {
3254 dev_err(mlxsw_sp->bus_info->dev, "Port number \"%d\" does not exist\n",
3259 if (!mlxsw_sp_port->split) {
3260 netdev_err(mlxsw_sp_port->dev, "Port wasn't split\n");
3264 cur_width = mlxsw_sp_port->mapping.width;
3265 count = cur_width == 1 ? 4 : 2;
3267 base_port = mlxsw_sp_cluster_base_port_get(local_port);
3269 /* Determine which ports to remove. */
3270 if (count == 2 && local_port >= base_port + 2)
3271 base_port = base_port + 2;
3273 for (i = 0; i < count; i++)
3274 if (mlxsw_sp_port_created(mlxsw_sp, base_port + i))
3275 mlxsw_sp_port_remove(mlxsw_sp, base_port + i);
3277 mlxsw_sp_port_unsplit_create(mlxsw_sp, base_port, count);
3282 static void mlxsw_sp_pude_event_func(const struct mlxsw_reg_info *reg,
3283 char *pude_pl, void *priv)
3285 struct mlxsw_sp *mlxsw_sp = priv;
3286 struct mlxsw_sp_port *mlxsw_sp_port;
3287 enum mlxsw_reg_pude_oper_status status;
3290 local_port = mlxsw_reg_pude_local_port_get(pude_pl);
3291 mlxsw_sp_port = mlxsw_sp->ports[local_port];
3295 status = mlxsw_reg_pude_oper_status_get(pude_pl);
3296 if (status == MLXSW_PORT_OPER_STATUS_UP) {
3297 netdev_info(mlxsw_sp_port->dev, "link up\n");
3298 netif_carrier_on(mlxsw_sp_port->dev);
3300 netdev_info(mlxsw_sp_port->dev, "link down\n");
3301 netif_carrier_off(mlxsw_sp_port->dev);
3305 static void mlxsw_sp_rx_listener_no_mark_func(struct sk_buff *skb,
3306 u8 local_port, void *priv)
3308 struct mlxsw_sp *mlxsw_sp = priv;
3309 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3310 struct mlxsw_sp_port_pcpu_stats *pcpu_stats;
3312 if (unlikely(!mlxsw_sp_port)) {
3313 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: skb received for non-existent port\n",
3318 skb->dev = mlxsw_sp_port->dev;
3320 pcpu_stats = this_cpu_ptr(mlxsw_sp_port->pcpu_stats);
3321 u64_stats_update_begin(&pcpu_stats->syncp);
3322 pcpu_stats->rx_packets++;
3323 pcpu_stats->rx_bytes += skb->len;
3324 u64_stats_update_end(&pcpu_stats->syncp);
3326 skb->protocol = eth_type_trans(skb, skb->dev);
3327 netif_receive_skb(skb);
3330 static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
3333 skb->offload_fwd_mark = 1;
3334 return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
3337 static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
3340 struct mlxsw_sp *mlxsw_sp = priv;
3341 struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
3342 struct psample_group *psample_group;
3345 if (unlikely(!mlxsw_sp_port)) {
3346 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
3350 if (unlikely(!mlxsw_sp_port->sample)) {
3351 dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
3356 size = mlxsw_sp_port->sample->truncate ?
3357 mlxsw_sp_port->sample->trunc_size : skb->len;
3360 psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
3363 psample_sample_packet(psample_group, skb, size,
3364 mlxsw_sp_port->dev->ifindex, 0,
3365 mlxsw_sp_port->sample->rate);
3372 #define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3373 MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
3374 _is_ctrl, SP_##_trap_group, DISCARD)
3376 #define MLXSW_SP_RXL_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
3377 MLXSW_RXL(mlxsw_sp_rx_listener_mark_func, _trap_id, _action, \
3378 _is_ctrl, SP_##_trap_group, DISCARD)
3380 #define MLXSW_SP_EVENTL(_func, _trap_id) \
3381 MLXSW_EVENTL(_func, _trap_id, SP_EVENT)
3383 static const struct mlxsw_listener mlxsw_sp_listener[] = {
3385 MLXSW_SP_EVENTL(mlxsw_sp_pude_event_func, PUDE),
3387 MLXSW_SP_RXL_NO_MARK(STP, TRAP_TO_CPU, STP, true),
3388 MLXSW_SP_RXL_NO_MARK(LACP, TRAP_TO_CPU, LACP, true),
3389 MLXSW_SP_RXL_NO_MARK(LLDP, TRAP_TO_CPU, LLDP, true),
3390 MLXSW_SP_RXL_MARK(DHCP, MIRROR_TO_CPU, DHCP, false),
3391 MLXSW_SP_RXL_MARK(IGMP_QUERY, MIRROR_TO_CPU, IGMP, false),
3392 MLXSW_SP_RXL_NO_MARK(IGMP_V1_REPORT, TRAP_TO_CPU, IGMP, false),
3393 MLXSW_SP_RXL_NO_MARK(IGMP_V2_REPORT, TRAP_TO_CPU, IGMP, false),
3394 MLXSW_SP_RXL_NO_MARK(IGMP_V2_LEAVE, TRAP_TO_CPU, IGMP, false),
3395 MLXSW_SP_RXL_NO_MARK(IGMP_V3_REPORT, TRAP_TO_CPU, IGMP, false),
3396 MLXSW_SP_RXL_MARK(ARPBC, MIRROR_TO_CPU, ARP, false),
3397 MLXSW_SP_RXL_MARK(ARPUC, MIRROR_TO_CPU, ARP, false),
3398 MLXSW_SP_RXL_NO_MARK(FID_MISS, TRAP_TO_CPU, IP2ME, false),
3399 MLXSW_SP_RXL_MARK(IPV6_MLDV12_LISTENER_QUERY, MIRROR_TO_CPU, IPV6_MLD,
3401 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3403 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV1_LISTENER_DONE, TRAP_TO_CPU, IPV6_MLD,
3405 MLXSW_SP_RXL_NO_MARK(IPV6_MLDV2_LISTENER_REPORT, TRAP_TO_CPU, IPV6_MLD,
3408 MLXSW_SP_RXL_MARK(MTUERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3409 MLXSW_SP_RXL_MARK(TTLERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3410 MLXSW_SP_RXL_MARK(LBERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3411 MLXSW_SP_RXL_MARK(IP2ME, TRAP_TO_CPU, IP2ME, false),
3412 MLXSW_SP_RXL_MARK(IPV6_UNSPECIFIED_ADDRESS, TRAP_TO_CPU, ROUTER_EXP,
3414 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP, false),
3415 MLXSW_SP_RXL_MARK(IPV6_LINK_LOCAL_SRC, TRAP_TO_CPU, ROUTER_EXP, false),
3416 MLXSW_SP_RXL_MARK(IPV6_ALL_NODES_LINK, TRAP_TO_CPU, ROUTER_EXP, false),
3417 MLXSW_SP_RXL_MARK(IPV6_ALL_ROUTERS_LINK, TRAP_TO_CPU, ROUTER_EXP,
3419 MLXSW_SP_RXL_MARK(IPV4_OSPF, TRAP_TO_CPU, OSPF, false),
3420 MLXSW_SP_RXL_MARK(IPV6_OSPF, TRAP_TO_CPU, OSPF, false),
3421 MLXSW_SP_RXL_MARK(IPV6_DHCP, TRAP_TO_CPU, DHCP, false),
3422 MLXSW_SP_RXL_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
3423 MLXSW_SP_RXL_MARK(IPV4_BGP, TRAP_TO_CPU, BGP, false),
3424 MLXSW_SP_RXL_MARK(IPV6_BGP, TRAP_TO_CPU, BGP, false),
3425 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3427 MLXSW_SP_RXL_MARK(L3_IPV6_ROUTER_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3429 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_SOLICITATION, TRAP_TO_CPU, IPV6_ND,
3431 MLXSW_SP_RXL_MARK(L3_IPV6_NEIGHBOR_ADVERTISMENT, TRAP_TO_CPU, IPV6_ND,
3433 MLXSW_SP_RXL_MARK(L3_IPV6_REDIRECTION, TRAP_TO_CPU, IPV6_ND, false),
3434 MLXSW_SP_RXL_MARK(IPV6_MC_LINK_LOCAL_DEST, TRAP_TO_CPU, ROUTER_EXP,
3436 MLXSW_SP_RXL_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, HOST_MISS, false),
3437 MLXSW_SP_RXL_MARK(HOST_MISS_IPV6, TRAP_TO_CPU, HOST_MISS, false),
3438 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV4, TRAP_TO_CPU, ROUTER_EXP, false),
3439 MLXSW_SP_RXL_MARK(ROUTER_ALERT_IPV6, TRAP_TO_CPU, ROUTER_EXP, false),
3440 MLXSW_SP_RXL_MARK(IPIP_DECAP_ERROR, TRAP_TO_CPU, ROUTER_EXP, false),
3441 /* PKT Sample trap */
3442 MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
3443 false, SP_IP2ME, DISCARD),
3445 MLXSW_SP_RXL_NO_MARK(ACL0, TRAP_TO_CPU, IP2ME, false),
3448 static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
3450 char qpcr_pl[MLXSW_REG_QPCR_LEN];
3451 enum mlxsw_reg_qpcr_ir_units ir_units;
3452 int max_cpu_policers;
3458 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_CPU_POLICERS))
3461 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3463 ir_units = MLXSW_REG_QPCR_IR_UNITS_M;
3464 for (i = 0; i < max_cpu_policers; i++) {
3467 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3468 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3469 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3470 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3474 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3475 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3479 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3480 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3481 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3482 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3483 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3484 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3485 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3489 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3497 mlxsw_reg_qpcr_pack(qpcr_pl, i, ir_units, is_bytes, rate,
3499 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(qpcr), qpcr_pl);
3507 static int mlxsw_sp_trap_groups_set(struct mlxsw_core *mlxsw_core)
3509 char htgt_pl[MLXSW_REG_HTGT_LEN];
3510 enum mlxsw_reg_htgt_trap_group i;
3511 int max_cpu_policers;
3512 int max_trap_groups;
3517 if (!MLXSW_CORE_RES_VALID(mlxsw_core, MAX_TRAP_GROUPS))
3520 max_trap_groups = MLXSW_CORE_RES_GET(mlxsw_core, MAX_TRAP_GROUPS);
3521 max_cpu_policers = MLXSW_CORE_RES_GET(mlxsw_core, MAX_CPU_POLICERS);
3523 for (i = 0; i < max_trap_groups; i++) {
3526 case MLXSW_REG_HTGT_TRAP_GROUP_SP_STP:
3527 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LACP:
3528 case MLXSW_REG_HTGT_TRAP_GROUP_SP_LLDP:
3529 case MLXSW_REG_HTGT_TRAP_GROUP_SP_OSPF:
3533 case MLXSW_REG_HTGT_TRAP_GROUP_SP_BGP:
3534 case MLXSW_REG_HTGT_TRAP_GROUP_SP_DHCP:
3538 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IGMP:
3539 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IP2ME:
3540 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_MLD:
3544 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ARP:
3545 case MLXSW_REG_HTGT_TRAP_GROUP_SP_IPV6_ND:
3549 case MLXSW_REG_HTGT_TRAP_GROUP_SP_HOST_MISS:
3550 case MLXSW_REG_HTGT_TRAP_GROUP_SP_ROUTER_EXP:
3551 case MLXSW_REG_HTGT_TRAP_GROUP_SP_REMOTE_ROUTE:
3555 case MLXSW_REG_HTGT_TRAP_GROUP_SP_EVENT:
3556 priority = MLXSW_REG_HTGT_DEFAULT_PRIORITY;
3557 tc = MLXSW_REG_HTGT_DEFAULT_TC;
3558 policer_id = MLXSW_REG_HTGT_INVALID_POLICER;
3564 if (max_cpu_policers <= policer_id &&
3565 policer_id != MLXSW_REG_HTGT_INVALID_POLICER)
3568 mlxsw_reg_htgt_pack(htgt_pl, i, policer_id, priority, tc);
3569 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3577 static int mlxsw_sp_traps_init(struct mlxsw_sp *mlxsw_sp)
3582 err = mlxsw_sp_cpu_policers_set(mlxsw_sp->core);
3586 err = mlxsw_sp_trap_groups_set(mlxsw_sp->core);
3590 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3591 err = mlxsw_core_trap_register(mlxsw_sp->core,
3592 &mlxsw_sp_listener[i],
3595 goto err_listener_register;
3600 err_listener_register:
3601 for (i--; i >= 0; i--) {
3602 mlxsw_core_trap_unregister(mlxsw_sp->core,
3603 &mlxsw_sp_listener[i],
3609 static void mlxsw_sp_traps_fini(struct mlxsw_sp *mlxsw_sp)
3613 for (i = 0; i < ARRAY_SIZE(mlxsw_sp_listener); i++) {
3614 mlxsw_core_trap_unregister(mlxsw_sp->core,
3615 &mlxsw_sp_listener[i],
3620 static int mlxsw_sp_lag_init(struct mlxsw_sp *mlxsw_sp)
3622 char slcr_pl[MLXSW_REG_SLCR_LEN];
3625 mlxsw_reg_slcr_pack(slcr_pl, MLXSW_REG_SLCR_LAG_HASH_SMAC |
3626 MLXSW_REG_SLCR_LAG_HASH_DMAC |
3627 MLXSW_REG_SLCR_LAG_HASH_ETHERTYPE |
3628 MLXSW_REG_SLCR_LAG_HASH_VLANID |
3629 MLXSW_REG_SLCR_LAG_HASH_SIP |
3630 MLXSW_REG_SLCR_LAG_HASH_DIP |
3631 MLXSW_REG_SLCR_LAG_HASH_SPORT |
3632 MLXSW_REG_SLCR_LAG_HASH_DPORT |
3633 MLXSW_REG_SLCR_LAG_HASH_IPPROTO);
3634 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcr), slcr_pl);
3638 if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG) ||
3639 !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_LAG_MEMBERS))
3642 mlxsw_sp->lags = kcalloc(MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG),
3643 sizeof(struct mlxsw_sp_upper),
3645 if (!mlxsw_sp->lags)
3651 static void mlxsw_sp_lag_fini(struct mlxsw_sp *mlxsw_sp)
3653 kfree(mlxsw_sp->lags);
3656 static int mlxsw_sp_basic_trap_groups_set(struct mlxsw_core *mlxsw_core)
3658 char htgt_pl[MLXSW_REG_HTGT_LEN];
3660 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD,
3661 MLXSW_REG_HTGT_INVALID_POLICER,
3662 MLXSW_REG_HTGT_DEFAULT_PRIORITY,
3663 MLXSW_REG_HTGT_DEFAULT_TC);
3664 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
3667 static int mlxsw_sp_init(struct mlxsw_core *mlxsw_core,
3668 const struct mlxsw_bus_info *mlxsw_bus_info)
3670 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3673 mlxsw_sp->core = mlxsw_core;
3674 mlxsw_sp->bus_info = mlxsw_bus_info;
3676 err = mlxsw_sp_fw_rev_validate(mlxsw_sp);
3678 dev_err(mlxsw_sp->bus_info->dev, "Could not upgrade firmware\n");
3682 err = mlxsw_sp_base_mac_get(mlxsw_sp);
3684 dev_err(mlxsw_sp->bus_info->dev, "Failed to get base mac\n");
3688 err = mlxsw_sp_fids_init(mlxsw_sp);
3690 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize FIDs\n");
3694 err = mlxsw_sp_traps_init(mlxsw_sp);
3696 dev_err(mlxsw_sp->bus_info->dev, "Failed to set traps\n");
3697 goto err_traps_init;
3700 err = mlxsw_sp_buffers_init(mlxsw_sp);
3702 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize buffers\n");
3703 goto err_buffers_init;
3706 err = mlxsw_sp_lag_init(mlxsw_sp);
3708 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize LAG\n");
3712 err = mlxsw_sp_switchdev_init(mlxsw_sp);
3714 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize switchdev\n");
3715 goto err_switchdev_init;
3718 err = mlxsw_sp_router_init(mlxsw_sp);
3720 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize router\n");
3721 goto err_router_init;
3724 err = mlxsw_sp_span_init(mlxsw_sp);
3726 dev_err(mlxsw_sp->bus_info->dev, "Failed to init span system\n");
3730 err = mlxsw_sp_acl_init(mlxsw_sp);
3732 dev_err(mlxsw_sp->bus_info->dev, "Failed to initialize ACL\n");
3736 err = mlxsw_sp_counter_pool_init(mlxsw_sp);
3738 dev_err(mlxsw_sp->bus_info->dev, "Failed to init counter pool\n");
3739 goto err_counter_pool_init;
3742 err = mlxsw_sp_dpipe_init(mlxsw_sp);
3744 dev_err(mlxsw_sp->bus_info->dev, "Failed to init pipeline debug\n");
3745 goto err_dpipe_init;
3748 err = mlxsw_sp_ports_create(mlxsw_sp);
3750 dev_err(mlxsw_sp->bus_info->dev, "Failed to create ports\n");
3751 goto err_ports_create;
3757 mlxsw_sp_dpipe_fini(mlxsw_sp);
3759 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3760 err_counter_pool_init:
3761 mlxsw_sp_acl_fini(mlxsw_sp);
3763 mlxsw_sp_span_fini(mlxsw_sp);
3765 mlxsw_sp_router_fini(mlxsw_sp);
3767 mlxsw_sp_switchdev_fini(mlxsw_sp);
3769 mlxsw_sp_lag_fini(mlxsw_sp);
3771 mlxsw_sp_buffers_fini(mlxsw_sp);
3773 mlxsw_sp_traps_fini(mlxsw_sp);
3775 mlxsw_sp_fids_fini(mlxsw_sp);
3779 static void mlxsw_sp_fini(struct mlxsw_core *mlxsw_core)
3781 struct mlxsw_sp *mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
3783 mlxsw_sp_ports_remove(mlxsw_sp);
3784 mlxsw_sp_dpipe_fini(mlxsw_sp);
3785 mlxsw_sp_counter_pool_fini(mlxsw_sp);
3786 mlxsw_sp_acl_fini(mlxsw_sp);
3787 mlxsw_sp_span_fini(mlxsw_sp);
3788 mlxsw_sp_router_fini(mlxsw_sp);
3789 mlxsw_sp_switchdev_fini(mlxsw_sp);
3790 mlxsw_sp_lag_fini(mlxsw_sp);
3791 mlxsw_sp_buffers_fini(mlxsw_sp);
3792 mlxsw_sp_traps_fini(mlxsw_sp);
3793 mlxsw_sp_fids_fini(mlxsw_sp);
3796 static const struct mlxsw_config_profile mlxsw_sp_config_profile = {
3797 .used_max_vepa_channels = 1,
3798 .max_vepa_channels = 0,
3800 .max_mid = MLXSW_SP_MID_MAX,
3803 .used_flood_tables = 1,
3804 .used_flood_mode = 1,
3806 .max_fid_offset_flood_tables = 3,
3807 .fid_offset_flood_table_size = VLAN_N_VID - 1,
3808 .max_fid_flood_tables = 3,
3809 .fid_flood_table_size = MLXSW_SP_FID_8021D_MAX,
3810 .used_max_ib_mc = 1,
3814 .used_kvd_split_data = 1,
3815 .kvd_hash_granularity = MLXSW_SP_KVD_GRANULARITY,
3816 .kvd_hash_single_parts = 2,
3817 .kvd_hash_double_parts = 1,
3818 .kvd_linear_size = MLXSW_SP_KVD_LINEAR_SIZE,
3822 .type = MLXSW_PORT_SWID_TYPE_ETH,
3825 .resource_query_enable = 1,
3828 static struct mlxsw_driver mlxsw_sp_driver = {
3829 .kind = mlxsw_sp_driver_name,
3830 .priv_size = sizeof(struct mlxsw_sp),
3831 .init = mlxsw_sp_init,
3832 .fini = mlxsw_sp_fini,
3833 .basic_trap_groups_set = mlxsw_sp_basic_trap_groups_set,
3834 .port_split = mlxsw_sp_port_split,
3835 .port_unsplit = mlxsw_sp_port_unsplit,
3836 .sb_pool_get = mlxsw_sp_sb_pool_get,
3837 .sb_pool_set = mlxsw_sp_sb_pool_set,
3838 .sb_port_pool_get = mlxsw_sp_sb_port_pool_get,
3839 .sb_port_pool_set = mlxsw_sp_sb_port_pool_set,
3840 .sb_tc_pool_bind_get = mlxsw_sp_sb_tc_pool_bind_get,
3841 .sb_tc_pool_bind_set = mlxsw_sp_sb_tc_pool_bind_set,
3842 .sb_occ_snapshot = mlxsw_sp_sb_occ_snapshot,
3843 .sb_occ_max_clear = mlxsw_sp_sb_occ_max_clear,
3844 .sb_occ_port_pool_get = mlxsw_sp_sb_occ_port_pool_get,
3845 .sb_occ_tc_port_bind_get = mlxsw_sp_sb_occ_tc_port_bind_get,
3846 .txhdr_construct = mlxsw_sp_txhdr_construct,
3847 .txhdr_len = MLXSW_TXHDR_LEN,
3848 .profile = &mlxsw_sp_config_profile,
3851 bool mlxsw_sp_port_dev_check(const struct net_device *dev)
3853 return dev->netdev_ops == &mlxsw_sp_port_netdev_ops;
3856 static int mlxsw_sp_lower_dev_walk(struct net_device *lower_dev, void *data)
3858 struct mlxsw_sp_port **p_mlxsw_sp_port = data;
3861 if (mlxsw_sp_port_dev_check(lower_dev)) {
3862 *p_mlxsw_sp_port = netdev_priv(lower_dev);
3869 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find(struct net_device *dev)
3871 struct mlxsw_sp_port *mlxsw_sp_port;
3873 if (mlxsw_sp_port_dev_check(dev))
3874 return netdev_priv(dev);
3876 mlxsw_sp_port = NULL;
3877 netdev_walk_all_lower_dev(dev, mlxsw_sp_lower_dev_walk, &mlxsw_sp_port);
3879 return mlxsw_sp_port;
3882 struct mlxsw_sp *mlxsw_sp_lower_get(struct net_device *dev)
3884 struct mlxsw_sp_port *mlxsw_sp_port;
3886 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find(dev);
3887 return mlxsw_sp_port ? mlxsw_sp_port->mlxsw_sp : NULL;
3890 struct mlxsw_sp_port *mlxsw_sp_port_dev_lower_find_rcu(struct net_device *dev)
3892 struct mlxsw_sp_port *mlxsw_sp_port;
3894 if (mlxsw_sp_port_dev_check(dev))
3895 return netdev_priv(dev);
3897 mlxsw_sp_port = NULL;
3898 netdev_walk_all_lower_dev_rcu(dev, mlxsw_sp_lower_dev_walk,
3901 return mlxsw_sp_port;
3904 struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev)
3906 struct mlxsw_sp_port *mlxsw_sp_port;
3909 mlxsw_sp_port = mlxsw_sp_port_dev_lower_find_rcu(dev);
3911 dev_hold(mlxsw_sp_port->dev);
3913 return mlxsw_sp_port;
3916 void mlxsw_sp_port_dev_put(struct mlxsw_sp_port *mlxsw_sp_port)
3918 dev_put(mlxsw_sp_port->dev);
3922 mlxsw_sp_port_lag_uppers_cleanup(struct mlxsw_sp_port *mlxsw_sp_port,
3923 struct net_device *lag_dev)
3925 struct net_device *br_dev = netdev_master_upper_dev_get(lag_dev);
3926 struct net_device *upper_dev;
3927 struct list_head *iter;
3929 if (netif_is_bridge_port(lag_dev))
3930 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, lag_dev, br_dev);
3932 netdev_for_each_upper_dev_rcu(lag_dev, upper_dev, iter) {
3933 if (!netif_is_bridge_port(upper_dev))
3935 br_dev = netdev_master_upper_dev_get(upper_dev);
3936 mlxsw_sp_port_bridge_leave(mlxsw_sp_port, upper_dev, br_dev);
3940 static int mlxsw_sp_lag_create(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3942 char sldr_pl[MLXSW_REG_SLDR_LEN];
3944 mlxsw_reg_sldr_lag_create_pack(sldr_pl, lag_id);
3945 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3948 static int mlxsw_sp_lag_destroy(struct mlxsw_sp *mlxsw_sp, u16 lag_id)
3950 char sldr_pl[MLXSW_REG_SLDR_LEN];
3952 mlxsw_reg_sldr_lag_destroy_pack(sldr_pl, lag_id);
3953 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
3956 static int mlxsw_sp_lag_col_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
3957 u16 lag_id, u8 port_index)
3959 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3960 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3962 mlxsw_reg_slcor_port_add_pack(slcor_pl, mlxsw_sp_port->local_port,
3963 lag_id, port_index);
3964 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3967 static int mlxsw_sp_lag_col_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
3970 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3971 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3973 mlxsw_reg_slcor_port_remove_pack(slcor_pl, mlxsw_sp_port->local_port,
3975 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3978 static int mlxsw_sp_lag_col_port_enable(struct mlxsw_sp_port *mlxsw_sp_port,
3981 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3982 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3984 mlxsw_reg_slcor_col_enable_pack(slcor_pl, mlxsw_sp_port->local_port,
3986 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
3989 static int mlxsw_sp_lag_col_port_disable(struct mlxsw_sp_port *mlxsw_sp_port,
3992 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
3993 char slcor_pl[MLXSW_REG_SLCOR_LEN];
3995 mlxsw_reg_slcor_col_disable_pack(slcor_pl, mlxsw_sp_port->local_port,
3997 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(slcor), slcor_pl);
4000 static int mlxsw_sp_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4001 struct net_device *lag_dev,
4004 struct mlxsw_sp_upper *lag;
4005 int free_lag_id = -1;
4009 max_lag = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_LAG);
4010 for (i = 0; i < max_lag; i++) {
4011 lag = mlxsw_sp_lag_get(mlxsw_sp, i);
4012 if (lag->ref_count) {
4013 if (lag->dev == lag_dev) {
4017 } else if (free_lag_id < 0) {
4021 if (free_lag_id < 0)
4023 *p_lag_id = free_lag_id;
4028 mlxsw_sp_master_lag_check(struct mlxsw_sp *mlxsw_sp,
4029 struct net_device *lag_dev,
4030 struct netdev_lag_upper_info *lag_upper_info)
4034 if (mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id) != 0)
4036 if (lag_upper_info->tx_type != NETDEV_LAG_TX_TYPE_HASH)
4041 static int mlxsw_sp_port_lag_index_get(struct mlxsw_sp *mlxsw_sp,
4042 u16 lag_id, u8 *p_port_index)
4044 u64 max_lag_members;
4047 max_lag_members = MLXSW_CORE_RES_GET(mlxsw_sp->core,
4049 for (i = 0; i < max_lag_members; i++) {
4050 if (!mlxsw_sp_port_lagged_get(mlxsw_sp, lag_id, i)) {
4058 static int mlxsw_sp_port_lag_join(struct mlxsw_sp_port *mlxsw_sp_port,
4059 struct net_device *lag_dev)
4061 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4062 struct mlxsw_sp_port_vlan *mlxsw_sp_port_vlan;
4063 struct mlxsw_sp_upper *lag;
4068 err = mlxsw_sp_lag_index_get(mlxsw_sp, lag_dev, &lag_id);
4071 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4072 if (!lag->ref_count) {
4073 err = mlxsw_sp_lag_create(mlxsw_sp, lag_id);
4079 err = mlxsw_sp_port_lag_index_get(mlxsw_sp, lag_id, &port_index);
4082 err = mlxsw_sp_lag_col_port_add(mlxsw_sp_port, lag_id, port_index);
4084 goto err_col_port_add;
4086 mlxsw_core_lag_mapping_set(mlxsw_sp->core, lag_id, port_index,
4087 mlxsw_sp_port->local_port);
4088 mlxsw_sp_port->lag_id = lag_id;
4089 mlxsw_sp_port->lagged = 1;
4092 /* Port is no longer usable as a router interface */
4093 mlxsw_sp_port_vlan = mlxsw_sp_port_vlan_find_by_vid(mlxsw_sp_port, 1);
4094 if (mlxsw_sp_port_vlan->fid)
4095 mlxsw_sp_port_vlan_router_leave(mlxsw_sp_port_vlan);
4100 if (!lag->ref_count)
4101 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4105 static void mlxsw_sp_port_lag_leave(struct mlxsw_sp_port *mlxsw_sp_port,
4106 struct net_device *lag_dev)
4108 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4109 u16 lag_id = mlxsw_sp_port->lag_id;
4110 struct mlxsw_sp_upper *lag;
4112 if (!mlxsw_sp_port->lagged)
4114 lag = mlxsw_sp_lag_get(mlxsw_sp, lag_id);
4115 WARN_ON(lag->ref_count == 0);
4117 mlxsw_sp_lag_col_port_remove(mlxsw_sp_port, lag_id);
4119 /* Any VLANs configured on the port are no longer valid */
4120 mlxsw_sp_port_vlan_flush(mlxsw_sp_port);
4121 /* Make the LAG and its directly linked uppers leave bridges they
4124 mlxsw_sp_port_lag_uppers_cleanup(mlxsw_sp_port, lag_dev);
4126 if (lag->ref_count == 1)
4127 mlxsw_sp_lag_destroy(mlxsw_sp, lag_id);
4129 mlxsw_core_lag_mapping_clear(mlxsw_sp->core, lag_id,
4130 mlxsw_sp_port->local_port);
4131 mlxsw_sp_port->lagged = 0;
4134 mlxsw_sp_port_vlan_get(mlxsw_sp_port, 1);
4135 /* Make sure untagged frames are allowed to ingress */
4136 mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
4139 static int mlxsw_sp_lag_dist_port_add(struct mlxsw_sp_port *mlxsw_sp_port,
4142 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4143 char sldr_pl[MLXSW_REG_SLDR_LEN];
4145 mlxsw_reg_sldr_lag_add_port_pack(sldr_pl, lag_id,
4146 mlxsw_sp_port->local_port);
4147 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4150 static int mlxsw_sp_lag_dist_port_remove(struct mlxsw_sp_port *mlxsw_sp_port,
4153 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4154 char sldr_pl[MLXSW_REG_SLDR_LEN];
4156 mlxsw_reg_sldr_lag_remove_port_pack(sldr_pl, lag_id,
4157 mlxsw_sp_port->local_port);
4158 return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sldr), sldr_pl);
4162 mlxsw_sp_port_lag_col_dist_enable(struct mlxsw_sp_port *mlxsw_sp_port)
4166 err = mlxsw_sp_lag_col_port_enable(mlxsw_sp_port,
4167 mlxsw_sp_port->lag_id);
4171 err = mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4173 goto err_dist_port_add;
4178 mlxsw_sp_lag_col_port_disable(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4183 mlxsw_sp_port_lag_col_dist_disable(struct mlxsw_sp_port *mlxsw_sp_port)
4187 err = mlxsw_sp_lag_dist_port_remove(mlxsw_sp_port,
4188 mlxsw_sp_port->lag_id);
4192 err = mlxsw_sp_lag_col_port_disable(mlxsw_sp_port,
4193 mlxsw_sp_port->lag_id);
4195 goto err_col_port_disable;
4199 err_col_port_disable:
4200 mlxsw_sp_lag_dist_port_add(mlxsw_sp_port, mlxsw_sp_port->lag_id);
4204 static int mlxsw_sp_port_lag_changed(struct mlxsw_sp_port *mlxsw_sp_port,
4205 struct netdev_lag_lower_state_info *info)
4207 if (info->tx_enabled)
4208 return mlxsw_sp_port_lag_col_dist_enable(mlxsw_sp_port);
4210 return mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4213 static int mlxsw_sp_port_stp_set(struct mlxsw_sp_port *mlxsw_sp_port,
4216 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4217 enum mlxsw_reg_spms_state spms_state;
4222 spms_state = enable ? MLXSW_REG_SPMS_STATE_FORWARDING :
4223 MLXSW_REG_SPMS_STATE_DISCARDING;
4225 spms_pl = kmalloc(MLXSW_REG_SPMS_LEN, GFP_KERNEL);
4228 mlxsw_reg_spms_pack(spms_pl, mlxsw_sp_port->local_port);
4230 for (vid = 0; vid < VLAN_N_VID; vid++)
4231 mlxsw_reg_spms_vid_pack(spms_pl, vid, spms_state);
4233 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spms), spms_pl);
4238 static int mlxsw_sp_port_ovs_join(struct mlxsw_sp_port *mlxsw_sp_port)
4243 err = mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, true);
4246 err = mlxsw_sp_port_stp_set(mlxsw_sp_port, true);
4248 goto err_port_stp_set;
4249 err = mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4252 goto err_port_vlan_set;
4254 for (; vid <= VLAN_N_VID - 1; vid++) {
4255 err = mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4258 goto err_vid_learning_set;
4263 err_vid_learning_set:
4264 for (vid--; vid >= 1; vid--)
4265 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port, vid, true);
4267 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4269 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4273 static void mlxsw_sp_port_ovs_leave(struct mlxsw_sp_port *mlxsw_sp_port)
4277 for (vid = VLAN_N_VID - 1; vid >= 1; vid--)
4278 mlxsw_sp_port_vid_learning_set(mlxsw_sp_port,
4281 mlxsw_sp_port_vlan_set(mlxsw_sp_port, 2, VLAN_N_VID - 1,
4283 mlxsw_sp_port_stp_set(mlxsw_sp_port, false);
4284 mlxsw_sp_port_vp_mode_set(mlxsw_sp_port, false);
4287 static int mlxsw_sp_netdevice_port_upper_event(struct net_device *lower_dev,
4288 struct net_device *dev,
4289 unsigned long event, void *ptr)
4291 struct netdev_notifier_changeupper_info *info;
4292 struct mlxsw_sp_port *mlxsw_sp_port;
4293 struct net_device *upper_dev;
4294 struct mlxsw_sp *mlxsw_sp;
4297 mlxsw_sp_port = netdev_priv(dev);
4298 mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4302 case NETDEV_PRECHANGEUPPER:
4303 upper_dev = info->upper_dev;
4304 if (!is_vlan_dev(upper_dev) &&
4305 !netif_is_lag_master(upper_dev) &&
4306 !netif_is_bridge_master(upper_dev) &&
4307 !netif_is_ovs_master(upper_dev))
4311 if (netdev_has_any_upper_dev(upper_dev) &&
4312 (!netif_is_bridge_master(upper_dev) ||
4313 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4316 if (netif_is_lag_master(upper_dev) &&
4317 !mlxsw_sp_master_lag_check(mlxsw_sp, upper_dev,
4320 if (netif_is_lag_master(upper_dev) && vlan_uses_dev(dev))
4322 if (netif_is_lag_port(dev) && is_vlan_dev(upper_dev) &&
4323 !netif_is_lag_master(vlan_dev_real_dev(upper_dev)))
4325 if (netif_is_ovs_master(upper_dev) && vlan_uses_dev(dev))
4327 if (netif_is_ovs_port(dev) && is_vlan_dev(upper_dev))
4330 case NETDEV_CHANGEUPPER:
4331 upper_dev = info->upper_dev;
4332 if (netif_is_bridge_master(upper_dev)) {
4334 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4338 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4341 } else if (netif_is_lag_master(upper_dev)) {
4342 if (info->linking) {
4343 err = mlxsw_sp_port_lag_join(mlxsw_sp_port,
4346 mlxsw_sp_port_lag_col_dist_disable(mlxsw_sp_port);
4347 mlxsw_sp_port_lag_leave(mlxsw_sp_port,
4350 } else if (netif_is_ovs_master(upper_dev)) {
4352 err = mlxsw_sp_port_ovs_join(mlxsw_sp_port);
4354 mlxsw_sp_port_ovs_leave(mlxsw_sp_port);
4362 static int mlxsw_sp_netdevice_port_lower_event(struct net_device *dev,
4363 unsigned long event, void *ptr)
4365 struct netdev_notifier_changelowerstate_info *info;
4366 struct mlxsw_sp_port *mlxsw_sp_port;
4369 mlxsw_sp_port = netdev_priv(dev);
4373 case NETDEV_CHANGELOWERSTATE:
4374 if (netif_is_lag_port(dev) && mlxsw_sp_port->lagged) {
4375 err = mlxsw_sp_port_lag_changed(mlxsw_sp_port,
4376 info->lower_state_info);
4378 netdev_err(dev, "Failed to reflect link aggregation lower state change\n");
4386 static int mlxsw_sp_netdevice_port_event(struct net_device *lower_dev,
4387 struct net_device *port_dev,
4388 unsigned long event, void *ptr)
4391 case NETDEV_PRECHANGEUPPER:
4392 case NETDEV_CHANGEUPPER:
4393 return mlxsw_sp_netdevice_port_upper_event(lower_dev, port_dev,
4395 case NETDEV_CHANGELOWERSTATE:
4396 return mlxsw_sp_netdevice_port_lower_event(port_dev, event,
4403 static int mlxsw_sp_netdevice_lag_event(struct net_device *lag_dev,
4404 unsigned long event, void *ptr)
4406 struct net_device *dev;
4407 struct list_head *iter;
4410 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4411 if (mlxsw_sp_port_dev_check(dev)) {
4412 ret = mlxsw_sp_netdevice_port_event(lag_dev, dev, event,
4422 static int mlxsw_sp_netdevice_port_vlan_event(struct net_device *vlan_dev,
4423 struct net_device *dev,
4424 unsigned long event, void *ptr,
4427 struct mlxsw_sp_port *mlxsw_sp_port = netdev_priv(dev);
4428 struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
4429 struct netdev_notifier_changeupper_info *info = ptr;
4430 struct net_device *upper_dev;
4434 case NETDEV_PRECHANGEUPPER:
4435 upper_dev = info->upper_dev;
4436 if (!netif_is_bridge_master(upper_dev))
4440 if (netdev_has_any_upper_dev(upper_dev) &&
4441 (!netif_is_bridge_master(upper_dev) ||
4442 !mlxsw_sp_bridge_device_is_offloaded(mlxsw_sp,
4446 case NETDEV_CHANGEUPPER:
4447 upper_dev = info->upper_dev;
4448 if (netif_is_bridge_master(upper_dev)) {
4450 err = mlxsw_sp_port_bridge_join(mlxsw_sp_port,
4454 mlxsw_sp_port_bridge_leave(mlxsw_sp_port,
4467 static int mlxsw_sp_netdevice_lag_port_vlan_event(struct net_device *vlan_dev,
4468 struct net_device *lag_dev,
4469 unsigned long event,
4472 struct net_device *dev;
4473 struct list_head *iter;
4476 netdev_for_each_lower_dev(lag_dev, dev, iter) {
4477 if (mlxsw_sp_port_dev_check(dev)) {
4478 ret = mlxsw_sp_netdevice_port_vlan_event(vlan_dev, dev,
4489 static int mlxsw_sp_netdevice_vlan_event(struct net_device *vlan_dev,
4490 unsigned long event, void *ptr)
4492 struct net_device *real_dev = vlan_dev_real_dev(vlan_dev);
4493 u16 vid = vlan_dev_vlan_id(vlan_dev);
4495 if (mlxsw_sp_port_dev_check(real_dev))
4496 return mlxsw_sp_netdevice_port_vlan_event(vlan_dev, real_dev,
4498 else if (netif_is_lag_master(real_dev))
4499 return mlxsw_sp_netdevice_lag_port_vlan_event(vlan_dev,
4506 static bool mlxsw_sp_is_vrf_event(unsigned long event, void *ptr)
4508 struct netdev_notifier_changeupper_info *info = ptr;
4510 if (event != NETDEV_PRECHANGEUPPER && event != NETDEV_CHANGEUPPER)
4512 return netif_is_l3_master(info->upper_dev);
4515 static int mlxsw_sp_netdevice_event(struct notifier_block *unused,
4516 unsigned long event, void *ptr)
4518 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4521 if (event == NETDEV_CHANGEADDR || event == NETDEV_CHANGEMTU)
4522 err = mlxsw_sp_netdevice_router_port_event(dev);
4523 else if (mlxsw_sp_is_vrf_event(event, ptr))
4524 err = mlxsw_sp_netdevice_vrf_event(dev, event, ptr);
4525 else if (mlxsw_sp_port_dev_check(dev))
4526 err = mlxsw_sp_netdevice_port_event(dev, dev, event, ptr);
4527 else if (netif_is_lag_master(dev))
4528 err = mlxsw_sp_netdevice_lag_event(dev, event, ptr);
4529 else if (is_vlan_dev(dev))
4530 err = mlxsw_sp_netdevice_vlan_event(dev, event, ptr);
4532 return notifier_from_errno(err);
4535 static struct notifier_block mlxsw_sp_netdevice_nb __read_mostly = {
4536 .notifier_call = mlxsw_sp_netdevice_event,
4539 static struct notifier_block mlxsw_sp_inetaddr_nb __read_mostly = {
4540 .notifier_call = mlxsw_sp_inetaddr_event,
4541 .priority = 10, /* Must be called before FIB notifier block */
4544 static struct notifier_block mlxsw_sp_inet6addr_nb __read_mostly = {
4545 .notifier_call = mlxsw_sp_inet6addr_event,
4548 static struct notifier_block mlxsw_sp_router_netevent_nb __read_mostly = {
4549 .notifier_call = mlxsw_sp_router_netevent_event,
4552 static const struct pci_device_id mlxsw_sp_pci_id_table[] = {
4553 {PCI_VDEVICE(MELLANOX, PCI_DEVICE_ID_MELLANOX_SPECTRUM), 0},
4557 static struct pci_driver mlxsw_sp_pci_driver = {
4558 .name = mlxsw_sp_driver_name,
4559 .id_table = mlxsw_sp_pci_id_table,
4562 static int __init mlxsw_sp_module_init(void)
4566 register_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4567 register_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4568 register_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4569 register_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4571 err = mlxsw_core_driver_register(&mlxsw_sp_driver);
4573 goto err_core_driver_register;
4575 err = mlxsw_pci_driver_register(&mlxsw_sp_pci_driver);
4577 goto err_pci_driver_register;
4581 err_pci_driver_register:
4582 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4583 err_core_driver_register:
4584 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4585 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4586 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4587 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4591 static void __exit mlxsw_sp_module_exit(void)
4593 mlxsw_pci_driver_unregister(&mlxsw_sp_pci_driver);
4594 mlxsw_core_driver_unregister(&mlxsw_sp_driver);
4595 unregister_netevent_notifier(&mlxsw_sp_router_netevent_nb);
4596 unregister_inet6addr_notifier(&mlxsw_sp_inet6addr_nb);
4597 unregister_inetaddr_notifier(&mlxsw_sp_inetaddr_nb);
4598 unregister_netdevice_notifier(&mlxsw_sp_netdevice_nb);
4601 module_init(mlxsw_sp_module_init);
4602 module_exit(mlxsw_sp_module_exit);
4604 MODULE_LICENSE("Dual BSD/GPL");
4605 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
4606 MODULE_DESCRIPTION("Mellanox Spectrum driver");
4607 MODULE_DEVICE_TABLE(pci, mlxsw_sp_pci_id_table);