2 * drivers/net/ethernet/mellanox/mlxsw/core.c
3 * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5 * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6 * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/u64_stats_sync.h>
46 #include <linux/netdevice.h>
47 #include <linux/completion.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/types.h>
51 #include <linux/string.h>
52 #include <linux/gfp.h>
53 #include <linux/random.h>
54 #include <linux/jiffies.h>
55 #include <linux/mutex.h>
56 #include <linux/rcupdate.h>
57 #include <linux/slab.h>
58 #include <linux/workqueue.h>
59 #include <asm/byteorder.h>
60 #include <net/devlink.h>
61 #include <trace/events/devlink.h>
71 static LIST_HEAD(mlxsw_core_driver_list);
72 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
74 static const char mlxsw_core_driver_name[] = "mlxsw_core";
76 static struct dentry *mlxsw_core_dbg_root;
78 static struct workqueue_struct *mlxsw_wq;
80 struct mlxsw_core_pcpu_stats {
81 u64 trap_rx_packets[MLXSW_TRAP_ID_MAX];
82 u64 trap_rx_bytes[MLXSW_TRAP_ID_MAX];
83 u64 port_rx_packets[MLXSW_PORT_MAX_PORTS];
84 u64 port_rx_bytes[MLXSW_PORT_MAX_PORTS];
85 struct u64_stats_sync syncp;
86 u32 trap_rx_dropped[MLXSW_TRAP_ID_MAX];
87 u32 port_rx_dropped[MLXSW_PORT_MAX_PORTS];
93 struct mlxsw_driver *driver;
94 const struct mlxsw_bus *bus;
96 const struct mlxsw_bus_info *bus_info;
97 struct list_head rx_listener_list;
98 struct list_head event_listener_list;
101 struct list_head trans_list;
102 spinlock_t trans_list_lock; /* protects trans_list writes */
105 struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
106 struct dentry *dbg_dir;
108 struct debugfs_blob_wrapper vsd_blob;
109 struct debugfs_blob_wrapper psid_blob;
112 u8 *mapping; /* lag_id+port_index to local_port mapping */
114 struct mlxsw_resources resources;
115 struct mlxsw_hwmon *hwmon;
116 unsigned long driver_priv[0];
117 /* driver_priv has to be always the last item */
120 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
122 return mlxsw_core->driver_priv;
124 EXPORT_SYMBOL(mlxsw_core_driver_priv);
126 struct mlxsw_rx_listener_item {
127 struct list_head list;
128 struct mlxsw_rx_listener rxl;
132 struct mlxsw_event_listener_item {
133 struct list_head list;
134 struct mlxsw_event_listener el;
143 * Destination MAC in EMAD's Ethernet header.
144 * Must be set to 01:02:c9:00:00:01
146 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
149 * Source MAC in EMAD's Ethernet header.
150 * Must be set to 00:02:c9:01:02:03
152 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
154 /* emad_eth_hdr_ethertype
155 * Ethertype in EMAD's Ethernet header.
156 * Must be set to 0x8932
158 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
160 /* emad_eth_hdr_mlx_proto
162 * Must be set to 0x0.
164 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
167 * Mellanox protocol version.
168 * Must be set to 0x0.
170 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
174 * Must be set to 0x1 (operation TLV).
176 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
179 * Length of the operation TLV in u32.
180 * Must be set to 0x4.
182 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
185 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
186 * EMAD. DR TLV must follow.
188 * Note: Currently not supported and must not be set.
190 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
192 /* emad_op_tlv_status
193 * Returned status in case of EMAD response. Must be set to 0 in case
196 * 0x1 - device is busy. Requester should retry
197 * 0x2 - Mellanox protocol version not supported
199 * 0x4 - register not supported
200 * 0x5 - operation class not supported
201 * 0x6 - EMAD method not supported
202 * 0x7 - bad parameter (e.g. port out of range)
203 * 0x8 - resource not available
204 * 0x9 - message receipt acknowledgment. Requester should retry
205 * 0x70 - internal error
207 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
209 /* emad_op_tlv_register_id
210 * Register ID of register within register TLV.
212 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
215 * Response bit. Setting to 1 indicates Response, otherwise request.
217 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
219 /* emad_op_tlv_method
223 * 0x3 - send (currently not supported)
226 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
229 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
231 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
234 * EMAD transaction ID. Used for pairing request and response EMADs.
236 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
240 * Must be set to 0x3 (register TLV).
242 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
245 * Length of the operation TLV in u32.
247 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
251 * Must be set to 0x0 (end TLV).
253 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
256 * Length of the end TLV in u32.
259 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
261 enum mlxsw_core_reg_access_type {
262 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
263 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
266 static inline const char *
267 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
270 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
272 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
278 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
280 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
281 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
284 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
285 const struct mlxsw_reg_info *reg,
288 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
289 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
290 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
293 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
294 const struct mlxsw_reg_info *reg,
295 enum mlxsw_core_reg_access_type type,
298 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
299 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
300 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
301 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
302 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
303 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
304 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
305 mlxsw_emad_op_tlv_method_set(op_tlv,
306 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
308 mlxsw_emad_op_tlv_method_set(op_tlv,
309 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
310 mlxsw_emad_op_tlv_class_set(op_tlv,
311 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
312 mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
315 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
317 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
319 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
320 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
321 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
322 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
323 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
325 skb_reset_mac_header(skb);
330 static void mlxsw_emad_construct(struct sk_buff *skb,
331 const struct mlxsw_reg_info *reg,
333 enum mlxsw_core_reg_access_type type,
338 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
339 mlxsw_emad_pack_end_tlv(buf);
341 buf = skb_push(skb, reg->len + sizeof(u32));
342 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
344 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
345 mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
347 mlxsw_emad_construct_eth_hdr(skb);
350 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
352 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
355 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
357 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
358 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
361 static char *mlxsw_emad_reg_payload(const char *op_tlv)
363 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
366 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
370 op_tlv = mlxsw_emad_op_tlv(skb);
371 return mlxsw_emad_op_tlv_tid_get(op_tlv);
374 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
378 op_tlv = mlxsw_emad_op_tlv(skb);
379 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
382 static int mlxsw_emad_process_status(char *op_tlv,
383 enum mlxsw_emad_op_tlv_status *p_status)
385 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
388 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
390 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
391 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
393 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
394 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
395 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
396 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
397 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
398 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
399 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
400 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
407 mlxsw_emad_process_status_skb(struct sk_buff *skb,
408 enum mlxsw_emad_op_tlv_status *p_status)
410 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
413 struct mlxsw_reg_trans {
414 struct list_head list;
415 struct list_head bulk_list;
416 struct mlxsw_core *core;
417 struct sk_buff *tx_skb;
418 struct mlxsw_tx_info tx_info;
419 struct delayed_work timeout_dw;
420 unsigned int retries;
422 struct completion completion;
424 mlxsw_reg_trans_cb_t *cb;
425 unsigned long cb_priv;
426 const struct mlxsw_reg_info *reg;
427 enum mlxsw_core_reg_access_type type;
429 enum mlxsw_emad_op_tlv_status emad_status;
433 #define MLXSW_EMAD_TIMEOUT_MS 200
435 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
437 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
439 mlxsw_core_schedule_dw(&trans->timeout_dw, timeout << trans->retries);
442 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
443 struct mlxsw_reg_trans *trans)
448 skb = skb_copy(trans->tx_skb, GFP_KERNEL);
452 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
453 skb->data + mlxsw_core->driver->txhdr_len,
454 skb->len - mlxsw_core->driver->txhdr_len);
456 atomic_set(&trans->active, 1);
457 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
462 mlxsw_emad_trans_timeout_schedule(trans);
466 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
468 struct mlxsw_core *mlxsw_core = trans->core;
470 dev_kfree_skb(trans->tx_skb);
471 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
472 list_del_rcu(&trans->list);
473 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
475 complete(&trans->completion);
478 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
479 struct mlxsw_reg_trans *trans)
483 if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
485 err = mlxsw_emad_transmit(trans->core, trans);
489 if (!atomic_dec_and_test(&trans->active))
494 mlxsw_emad_trans_finish(trans, err);
497 static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
499 struct mlxsw_reg_trans *trans = container_of(work,
500 struct mlxsw_reg_trans,
503 if (!atomic_dec_and_test(&trans->active))
506 mlxsw_emad_transmit_retry(trans->core, trans);
509 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
510 struct mlxsw_reg_trans *trans,
515 if (!atomic_dec_and_test(&trans->active))
518 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
519 if (err == -EAGAIN) {
520 mlxsw_emad_transmit_retry(mlxsw_core, trans);
523 char *op_tlv = mlxsw_emad_op_tlv(skb);
526 trans->cb(mlxsw_core,
527 mlxsw_emad_reg_payload(op_tlv),
528 trans->reg->len, trans->cb_priv);
530 mlxsw_emad_trans_finish(trans, err);
534 /* called with rcu read lock held */
535 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
538 struct mlxsw_core *mlxsw_core = priv;
539 struct mlxsw_reg_trans *trans;
541 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
542 skb->data, skb->len);
544 if (!mlxsw_emad_is_resp(skb))
547 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
548 if (mlxsw_emad_get_tid(skb) == trans->tid) {
549 mlxsw_emad_process_response(mlxsw_core, trans, skb);
558 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
559 .func = mlxsw_emad_rx_listener_func,
560 .local_port = MLXSW_PORT_DONT_CARE,
561 .trap_id = MLXSW_TRAP_ID_ETHEMAD,
564 static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
566 char htgt_pl[MLXSW_REG_HTGT_LEN];
567 char hpkt_pl[MLXSW_REG_HPKT_LEN];
570 mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
571 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
575 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
576 MLXSW_TRAP_ID_ETHEMAD);
577 return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
580 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
585 /* Set the upper 32 bits of the transaction ID field to a random
586 * number. This allows us to discard EMADs addressed to other
589 get_random_bytes(&tid, 4);
591 atomic64_set(&mlxsw_core->emad.tid, tid);
593 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
594 spin_lock_init(&mlxsw_core->emad.trans_list_lock);
596 err = mlxsw_core_rx_listener_register(mlxsw_core,
597 &mlxsw_emad_rx_listener,
602 err = mlxsw_emad_traps_set(mlxsw_core);
604 goto err_emad_trap_set;
606 mlxsw_core->emad.use_emad = true;
611 mlxsw_core_rx_listener_unregister(mlxsw_core,
612 &mlxsw_emad_rx_listener,
617 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
619 char hpkt_pl[MLXSW_REG_HPKT_LEN];
621 mlxsw_core->emad.use_emad = false;
622 mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
623 MLXSW_TRAP_ID_ETHEMAD);
624 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
626 mlxsw_core_rx_listener_unregister(mlxsw_core,
627 &mlxsw_emad_rx_listener,
631 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
637 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
638 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
639 sizeof(u32) + mlxsw_core->driver->txhdr_len);
640 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
643 skb = netdev_alloc_skb(NULL, emad_len);
646 memset(skb->data, 0, emad_len);
647 skb_reserve(skb, emad_len);
652 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
653 const struct mlxsw_reg_info *reg,
655 enum mlxsw_core_reg_access_type type,
656 struct mlxsw_reg_trans *trans,
657 struct list_head *bulk_list,
658 mlxsw_reg_trans_cb_t *cb,
659 unsigned long cb_priv, u64 tid)
664 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
665 trans->tid, reg->id, mlxsw_reg_id_str(reg->id),
666 mlxsw_core_reg_access_type_str(type));
668 skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
672 list_add_tail(&trans->bulk_list, bulk_list);
673 trans->core = mlxsw_core;
675 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
676 trans->tx_info.is_emad = true;
677 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
679 init_completion(&trans->completion);
681 trans->cb_priv = cb_priv;
685 mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
686 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
688 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
689 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
690 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
691 err = mlxsw_emad_transmit(mlxsw_core, trans);
697 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
698 list_del_rcu(&trans->list);
699 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
700 list_del(&trans->bulk_list);
701 dev_kfree_skb(trans->tx_skb);
709 static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
711 struct mlxsw_core *mlxsw_core = file->private;
712 struct mlxsw_core_pcpu_stats *p;
713 u64 rx_packets, rx_bytes;
714 u64 tmp_rx_packets, tmp_rx_bytes;
715 u32 rx_dropped, rx_invalid;
719 static const char hdr[] =
720 " NUM RX_PACKETS RX_BYTES RX_DROPPED\n";
722 seq_printf(file, hdr);
723 for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
727 for_each_possible_cpu(j) {
728 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
730 start = u64_stats_fetch_begin(&p->syncp);
731 tmp_rx_packets = p->trap_rx_packets[i];
732 tmp_rx_bytes = p->trap_rx_bytes[i];
733 } while (u64_stats_fetch_retry(&p->syncp, start));
735 rx_packets += tmp_rx_packets;
736 rx_bytes += tmp_rx_bytes;
737 rx_dropped += p->trap_rx_dropped[i];
739 seq_printf(file, "trap %3d %12llu %12llu %10u\n",
740 i, rx_packets, rx_bytes, rx_dropped);
743 for_each_possible_cpu(j) {
744 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
745 rx_invalid += p->trap_rx_invalid;
747 seq_printf(file, "trap INV %10u\n",
750 for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
754 for_each_possible_cpu(j) {
755 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
757 start = u64_stats_fetch_begin(&p->syncp);
758 tmp_rx_packets = p->port_rx_packets[i];
759 tmp_rx_bytes = p->port_rx_bytes[i];
760 } while (u64_stats_fetch_retry(&p->syncp, start));
762 rx_packets += tmp_rx_packets;
763 rx_bytes += tmp_rx_bytes;
764 rx_dropped += p->port_rx_dropped[i];
766 seq_printf(file, "port %3d %12llu %12llu %10u\n",
767 i, rx_packets, rx_bytes, rx_dropped);
770 for_each_possible_cpu(j) {
771 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
772 rx_invalid += p->port_rx_invalid;
774 seq_printf(file, "port INV %10u\n",
779 static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
781 struct mlxsw_core *mlxsw_core = inode->i_private;
783 return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
786 static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
787 .owner = THIS_MODULE,
788 .open = mlxsw_core_rx_stats_dbg_open,
789 .release = single_release,
794 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
796 spin_lock(&mlxsw_core_driver_list_lock);
797 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
798 spin_unlock(&mlxsw_core_driver_list_lock);
801 EXPORT_SYMBOL(mlxsw_core_driver_register);
803 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
805 spin_lock(&mlxsw_core_driver_list_lock);
806 list_del(&mlxsw_driver->list);
807 spin_unlock(&mlxsw_core_driver_list_lock);
809 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
811 static struct mlxsw_driver *__driver_find(const char *kind)
813 struct mlxsw_driver *mlxsw_driver;
815 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
816 if (strcmp(mlxsw_driver->kind, kind) == 0)
822 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
824 struct mlxsw_driver *mlxsw_driver;
826 spin_lock(&mlxsw_core_driver_list_lock);
827 mlxsw_driver = __driver_find(kind);
829 spin_unlock(&mlxsw_core_driver_list_lock);
830 request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
831 spin_lock(&mlxsw_core_driver_list_lock);
832 mlxsw_driver = __driver_find(kind);
835 if (!try_module_get(mlxsw_driver->owner))
839 spin_unlock(&mlxsw_core_driver_list_lock);
843 static void mlxsw_core_driver_put(const char *kind)
845 struct mlxsw_driver *mlxsw_driver;
847 spin_lock(&mlxsw_core_driver_list_lock);
848 mlxsw_driver = __driver_find(kind);
849 spin_unlock(&mlxsw_core_driver_list_lock);
852 module_put(mlxsw_driver->owner);
855 static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
857 const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
859 mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
860 mlxsw_core_dbg_root);
861 if (!mlxsw_core->dbg_dir)
863 debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
864 mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
865 mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
866 mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
867 debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
868 &mlxsw_core->dbg.vsd_blob);
869 mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
870 mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
871 debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
872 &mlxsw_core->dbg.psid_blob);
876 static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
878 debugfs_remove_recursive(mlxsw_core->dbg_dir);
881 static int mlxsw_devlink_port_split(struct devlink *devlink,
882 unsigned int port_index,
885 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
887 if (port_index >= MLXSW_PORT_MAX_PORTS)
889 if (!mlxsw_core->driver->port_split)
891 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
894 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
895 unsigned int port_index)
897 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
899 if (port_index >= MLXSW_PORT_MAX_PORTS)
901 if (!mlxsw_core->driver->port_unsplit)
903 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
907 mlxsw_devlink_sb_pool_get(struct devlink *devlink,
908 unsigned int sb_index, u16 pool_index,
909 struct devlink_sb_pool_info *pool_info)
911 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
912 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
914 if (!mlxsw_driver->sb_pool_get)
916 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
917 pool_index, pool_info);
921 mlxsw_devlink_sb_pool_set(struct devlink *devlink,
922 unsigned int sb_index, u16 pool_index, u32 size,
923 enum devlink_sb_threshold_type threshold_type)
925 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
926 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
928 if (!mlxsw_driver->sb_pool_set)
930 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
931 pool_index, size, threshold_type);
934 static void *__dl_port(struct devlink_port *devlink_port)
936 return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
939 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
940 unsigned int sb_index, u16 pool_index,
943 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
944 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
945 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
947 if (!mlxsw_driver->sb_port_pool_get)
949 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
950 pool_index, p_threshold);
953 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
954 unsigned int sb_index, u16 pool_index,
957 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
958 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
959 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
961 if (!mlxsw_driver->sb_port_pool_set)
963 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
964 pool_index, threshold);
968 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
969 unsigned int sb_index, u16 tc_index,
970 enum devlink_sb_pool_type pool_type,
971 u16 *p_pool_index, u32 *p_threshold)
973 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
974 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
975 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
977 if (!mlxsw_driver->sb_tc_pool_bind_get)
979 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
981 p_pool_index, p_threshold);
985 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
986 unsigned int sb_index, u16 tc_index,
987 enum devlink_sb_pool_type pool_type,
988 u16 pool_index, u32 threshold)
990 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
991 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
992 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
994 if (!mlxsw_driver->sb_tc_pool_bind_set)
996 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
998 pool_index, threshold);
1001 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
1002 unsigned int sb_index)
1004 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1005 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1007 if (!mlxsw_driver->sb_occ_snapshot)
1009 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
1012 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
1013 unsigned int sb_index)
1015 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1016 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1018 if (!mlxsw_driver->sb_occ_max_clear)
1020 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
1024 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
1025 unsigned int sb_index, u16 pool_index,
1026 u32 *p_cur, u32 *p_max)
1028 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1029 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1030 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1032 if (!mlxsw_driver->sb_occ_port_pool_get)
1034 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
1035 pool_index, p_cur, p_max);
1039 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
1040 unsigned int sb_index, u16 tc_index,
1041 enum devlink_sb_pool_type pool_type,
1042 u32 *p_cur, u32 *p_max)
1044 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1045 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1046 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1048 if (!mlxsw_driver->sb_occ_tc_port_bind_get)
1050 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
1052 pool_type, p_cur, p_max);
1055 static const struct devlink_ops mlxsw_devlink_ops = {
1056 .port_split = mlxsw_devlink_port_split,
1057 .port_unsplit = mlxsw_devlink_port_unsplit,
1058 .sb_pool_get = mlxsw_devlink_sb_pool_get,
1059 .sb_pool_set = mlxsw_devlink_sb_pool_set,
1060 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
1061 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
1062 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
1063 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
1064 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
1065 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
1066 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
1067 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
1070 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1071 const struct mlxsw_bus *mlxsw_bus,
1074 const char *device_kind = mlxsw_bus_info->device_kind;
1075 struct mlxsw_core *mlxsw_core;
1076 struct mlxsw_driver *mlxsw_driver;
1077 struct devlink *devlink;
1081 mlxsw_driver = mlxsw_core_driver_get(device_kind);
1084 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
1085 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
1088 goto err_devlink_alloc;
1091 mlxsw_core = devlink_priv(devlink);
1092 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
1093 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
1094 mlxsw_core->driver = mlxsw_driver;
1095 mlxsw_core->bus = mlxsw_bus;
1096 mlxsw_core->bus_priv = bus_priv;
1097 mlxsw_core->bus_info = mlxsw_bus_info;
1099 mlxsw_core->pcpu_stats =
1100 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
1101 if (!mlxsw_core->pcpu_stats) {
1103 goto err_alloc_stats;
1106 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
1107 &mlxsw_core->resources);
1111 if (mlxsw_core->resources.max_lag_valid &&
1112 mlxsw_core->resources.max_ports_in_lag_valid) {
1113 alloc_size = sizeof(u8) * mlxsw_core->resources.max_lag *
1114 mlxsw_core->resources.max_ports_in_lag;
1115 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1116 if (!mlxsw_core->lag.mapping) {
1118 goto err_alloc_lag_mapping;
1122 err = mlxsw_emad_init(mlxsw_core);
1126 err = devlink_register(devlink, mlxsw_bus_info->dev);
1128 goto err_devlink_register;
1130 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1132 goto err_hwmon_init;
1134 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
1136 goto err_driver_init;
1138 err = mlxsw_core_debugfs_init(mlxsw_core);
1140 goto err_debugfs_init;
1145 mlxsw_core->driver->fini(mlxsw_core);
1148 devlink_unregister(devlink);
1149 err_devlink_register:
1150 mlxsw_emad_fini(mlxsw_core);
1152 kfree(mlxsw_core->lag.mapping);
1153 err_alloc_lag_mapping:
1154 mlxsw_bus->fini(bus_priv);
1156 free_percpu(mlxsw_core->pcpu_stats);
1158 devlink_free(devlink);
1160 mlxsw_core_driver_put(device_kind);
1163 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
1165 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
1167 const char *device_kind = mlxsw_core->bus_info->device_kind;
1168 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1170 mlxsw_core_debugfs_fini(mlxsw_core);
1171 mlxsw_core->driver->fini(mlxsw_core);
1172 devlink_unregister(devlink);
1173 mlxsw_emad_fini(mlxsw_core);
1174 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
1175 kfree(mlxsw_core->lag.mapping);
1176 free_percpu(mlxsw_core->pcpu_stats);
1177 devlink_free(devlink);
1178 mlxsw_core_driver_put(device_kind);
1180 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
1182 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
1183 const struct mlxsw_tx_info *tx_info)
1185 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
1188 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
1190 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1191 const struct mlxsw_tx_info *tx_info)
1193 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
1196 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
1198 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
1199 const struct mlxsw_rx_listener *rxl_b)
1201 return (rxl_a->func == rxl_b->func &&
1202 rxl_a->local_port == rxl_b->local_port &&
1203 rxl_a->trap_id == rxl_b->trap_id);
1206 static struct mlxsw_rx_listener_item *
1207 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
1208 const struct mlxsw_rx_listener *rxl,
1211 struct mlxsw_rx_listener_item *rxl_item;
1213 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
1214 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
1215 rxl_item->priv == priv)
1221 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
1222 const struct mlxsw_rx_listener *rxl,
1225 struct mlxsw_rx_listener_item *rxl_item;
1227 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1230 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
1233 rxl_item->rxl = *rxl;
1234 rxl_item->priv = priv;
1236 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
1239 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
1241 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
1242 const struct mlxsw_rx_listener *rxl,
1245 struct mlxsw_rx_listener_item *rxl_item;
1247 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1250 list_del_rcu(&rxl_item->list);
1254 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
1256 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
1259 struct mlxsw_event_listener_item *event_listener_item = priv;
1260 struct mlxsw_reg_info reg;
1262 char *op_tlv = mlxsw_emad_op_tlv(skb);
1263 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
1265 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
1266 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
1267 payload = mlxsw_emad_reg_payload(op_tlv);
1268 event_listener_item->el.func(®, payload, event_listener_item->priv);
1272 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
1273 const struct mlxsw_event_listener *el_b)
1275 return (el_a->func == el_b->func &&
1276 el_a->trap_id == el_b->trap_id);
1279 static struct mlxsw_event_listener_item *
1280 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
1281 const struct mlxsw_event_listener *el,
1284 struct mlxsw_event_listener_item *el_item;
1286 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1287 if (__is_event_listener_equal(&el_item->el, el) &&
1288 el_item->priv == priv)
1294 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1295 const struct mlxsw_event_listener *el,
1299 struct mlxsw_event_listener_item *el_item;
1300 const struct mlxsw_rx_listener rxl = {
1301 .func = mlxsw_core_event_listener_func,
1302 .local_port = MLXSW_PORT_DONT_CARE,
1303 .trap_id = el->trap_id,
1306 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1309 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1313 el_item->priv = priv;
1315 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1317 goto err_rx_listener_register;
1319 /* No reason to save item if we did not manage to register an RX
1322 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1326 err_rx_listener_register:
1330 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1332 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1333 const struct mlxsw_event_listener *el,
1336 struct mlxsw_event_listener_item *el_item;
1337 const struct mlxsw_rx_listener rxl = {
1338 .func = mlxsw_core_event_listener_func,
1339 .local_port = MLXSW_PORT_DONT_CARE,
1340 .trap_id = el->trap_id,
1343 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1346 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1347 list_del(&el_item->list);
1350 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1352 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
1354 return atomic64_inc_return(&mlxsw_core->emad.tid);
1357 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1358 const struct mlxsw_reg_info *reg,
1360 enum mlxsw_core_reg_access_type type,
1361 struct list_head *bulk_list,
1362 mlxsw_reg_trans_cb_t *cb,
1363 unsigned long cb_priv)
1365 u64 tid = mlxsw_core_tid_get(mlxsw_core);
1366 struct mlxsw_reg_trans *trans;
1369 trans = kzalloc(sizeof(*trans), GFP_KERNEL);
1373 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1374 bulk_list, cb, cb_priv, tid);
1376 kfree_rcu(trans, rcu);
1382 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
1383 const struct mlxsw_reg_info *reg, char *payload,
1384 struct list_head *bulk_list,
1385 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1387 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1388 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
1389 bulk_list, cb, cb_priv);
1391 EXPORT_SYMBOL(mlxsw_reg_trans_query);
1393 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
1394 const struct mlxsw_reg_info *reg, char *payload,
1395 struct list_head *bulk_list,
1396 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1398 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1399 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
1400 bulk_list, cb, cb_priv);
1402 EXPORT_SYMBOL(mlxsw_reg_trans_write);
1404 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
1406 struct mlxsw_core *mlxsw_core = trans->core;
1409 wait_for_completion(&trans->completion);
1410 cancel_delayed_work_sync(&trans->timeout_dw);
1414 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
1415 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
1417 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1418 trans->tid, trans->reg->id,
1419 mlxsw_reg_id_str(trans->reg->id),
1420 mlxsw_core_reg_access_type_str(trans->type),
1422 mlxsw_emad_op_tlv_status_str(trans->emad_status));
1424 list_del(&trans->bulk_list);
1425 kfree_rcu(trans, rcu);
1429 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
1431 struct mlxsw_reg_trans *trans;
1432 struct mlxsw_reg_trans *tmp;
1436 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
1437 err = mlxsw_reg_trans_wait(trans);
1438 if (err && sum_err == 0)
1439 sum_err = err; /* first error to be returned */
1443 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
1445 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1446 const struct mlxsw_reg_info *reg,
1448 enum mlxsw_core_reg_access_type type)
1450 enum mlxsw_emad_op_tlv_status status;
1452 char *in_mbox, *out_mbox, *tmp;
1454 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1455 reg->id, mlxsw_reg_id_str(reg->id),
1456 mlxsw_core_reg_access_type_str(type));
1458 in_mbox = mlxsw_cmd_mbox_alloc();
1462 out_mbox = mlxsw_cmd_mbox_alloc();
1468 mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
1469 mlxsw_core_tid_get(mlxsw_core));
1470 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1471 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1475 err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1477 err = mlxsw_emad_process_status(out_mbox, &status);
1479 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1481 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
1482 status, mlxsw_emad_op_tlv_status_str(status));
1487 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1490 mlxsw_cmd_mbox_free(out_mbox);
1492 mlxsw_cmd_mbox_free(in_mbox);
1494 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1495 reg->id, mlxsw_reg_id_str(reg->id),
1496 mlxsw_core_reg_access_type_str(type));
1500 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
1501 char *payload, size_t payload_len,
1502 unsigned long cb_priv)
1504 char *orig_payload = (char *) cb_priv;
1506 memcpy(orig_payload, payload, payload_len);
1509 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1510 const struct mlxsw_reg_info *reg,
1512 enum mlxsw_core_reg_access_type type)
1514 LIST_HEAD(bulk_list);
1517 /* During initialization EMAD interface is not available to us,
1518 * so we default to command interface. We switch to EMAD interface
1519 * after setting the appropriate traps.
1521 if (!mlxsw_core->emad.use_emad)
1522 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1525 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1526 payload, type, &bulk_list,
1527 mlxsw_core_reg_access_cb,
1528 (unsigned long) payload);
1531 return mlxsw_reg_trans_bulk_wait(&bulk_list);
1534 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1535 const struct mlxsw_reg_info *reg, char *payload)
1537 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1538 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1540 EXPORT_SYMBOL(mlxsw_reg_query);
1542 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1543 const struct mlxsw_reg_info *reg, char *payload)
1545 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1546 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1548 EXPORT_SYMBOL(mlxsw_reg_write);
1550 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1551 struct mlxsw_rx_info *rx_info)
1553 struct mlxsw_rx_listener_item *rxl_item;
1554 const struct mlxsw_rx_listener *rxl;
1555 struct mlxsw_core_pcpu_stats *pcpu_stats;
1559 if (rx_info->is_lag) {
1560 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1561 __func__, rx_info->u.lag_id,
1563 /* Upper layer does not care if the skb came from LAG or not,
1564 * so just get the local_port for the lag port and push it up.
1566 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1568 rx_info->lag_port_index);
1570 local_port = rx_info->u.sys_port;
1573 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1574 __func__, local_port, rx_info->trap_id);
1576 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1577 (local_port >= MLXSW_PORT_MAX_PORTS))
1581 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1582 rxl = &rxl_item->rxl;
1583 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1584 rxl->local_port == local_port) &&
1585 rxl->trap_id == rx_info->trap_id) {
1595 pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
1596 u64_stats_update_begin(&pcpu_stats->syncp);
1597 pcpu_stats->port_rx_packets[local_port]++;
1598 pcpu_stats->port_rx_bytes[local_port] += skb->len;
1599 pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
1600 pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
1601 u64_stats_update_end(&pcpu_stats->syncp);
1603 rxl->func(skb, local_port, rxl_item->priv);
1608 if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
1609 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
1611 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
1612 if (local_port >= MLXSW_PORT_MAX_PORTS)
1613 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
1615 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
1618 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1620 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1621 u16 lag_id, u8 port_index)
1623 return mlxsw_core->resources.max_ports_in_lag * lag_id +
1627 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1628 u16 lag_id, u8 port_index, u8 local_port)
1630 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1631 lag_id, port_index);
1633 mlxsw_core->lag.mapping[index] = local_port;
1635 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1637 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1638 u16 lag_id, u8 port_index)
1640 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1641 lag_id, port_index);
1643 return mlxsw_core->lag.mapping[index];
1645 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1647 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1648 u16 lag_id, u8 local_port)
1652 for (i = 0; i < mlxsw_core->resources.max_ports_in_lag; i++) {
1653 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1656 if (mlxsw_core->lag.mapping[index] == local_port)
1657 mlxsw_core->lag.mapping[index] = 0;
1660 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1662 struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core)
1664 return &mlxsw_core->resources;
1666 EXPORT_SYMBOL(mlxsw_core_resources_get);
1668 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
1669 struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
1670 struct net_device *dev, bool split, u32 split_group)
1672 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1673 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1676 devlink_port_split_set(devlink_port, split_group);
1677 devlink_port_type_eth_set(devlink_port, dev);
1678 return devlink_port_register(devlink, devlink_port, local_port);
1680 EXPORT_SYMBOL(mlxsw_core_port_init);
1682 void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
1684 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1686 devlink_port_unregister(devlink_port);
1688 EXPORT_SYMBOL(mlxsw_core_port_fini);
1690 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
1691 const char *buf, size_t size)
1693 __be32 *m = (__be32 *) buf;
1695 int count = size / sizeof(__be32);
1697 for (i = count - 1; i >= 0; i--)
1702 for (i = 0; i < count; i += 4)
1703 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
1704 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
1705 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
1708 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1709 u32 in_mod, bool out_mbox_direct,
1710 char *in_mbox, size_t in_mbox_size,
1711 char *out_mbox, size_t out_mbox_size)
1716 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1717 if (!mlxsw_core->bus->cmd_exec)
1720 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1721 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1723 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1724 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1727 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1728 opcode_mod, in_mod, out_mbox_direct,
1729 in_mbox, in_mbox_size,
1730 out_mbox, out_mbox_size, &status);
1732 if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1733 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1734 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1735 in_mod, status, mlxsw_cmd_status_str(status));
1736 } else if (err == -ETIMEDOUT) {
1737 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1738 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1742 if (!err && out_mbox) {
1743 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1744 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1748 EXPORT_SYMBOL(mlxsw_cmd_exec);
1750 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
1752 return queue_delayed_work(mlxsw_wq, dwork, delay);
1754 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
1756 static int __init mlxsw_core_module_init(void)
1760 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
1763 mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
1764 if (!mlxsw_core_dbg_root) {
1766 goto err_debugfs_create_dir;
1770 err_debugfs_create_dir:
1771 destroy_workqueue(mlxsw_wq);
1775 static void __exit mlxsw_core_module_exit(void)
1777 debugfs_remove_recursive(mlxsw_core_dbg_root);
1778 destroy_workqueue(mlxsw_wq);
1781 module_init(mlxsw_core_module_init);
1782 module_exit(mlxsw_core_module_exit);
1784 MODULE_LICENSE("Dual BSD/GPL");
1785 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1786 MODULE_DESCRIPTION("Mellanox switch device core driver");