1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2015-2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/export.h>
9 #include <linux/if_link.h>
10 #include <linux/netdevice.h>
11 #include <linux/completion.h>
12 #include <linux/skbuff.h>
13 #include <linux/etherdevice.h>
14 #include <linux/types.h>
15 #include <linux/string.h>
16 #include <linux/gfp.h>
17 #include <linux/random.h>
18 #include <linux/jiffies.h>
19 #include <linux/mutex.h>
20 #include <linux/rcupdate.h>
21 #include <linux/slab.h>
22 #include <linux/workqueue.h>
23 #include <asm/byteorder.h>
24 #include <net/devlink.h>
25 #include <trace/events/devlink.h>
34 #include "resources.h"
36 static LIST_HEAD(mlxsw_core_driver_list);
37 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
39 static const char mlxsw_core_driver_name[] = "mlxsw_core";
41 static struct workqueue_struct *mlxsw_wq;
42 static struct workqueue_struct *mlxsw_owq;
44 struct mlxsw_core_port {
45 struct devlink_port devlink_port;
46 void *port_driver_priv;
50 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
52 return mlxsw_core_port->port_driver_priv;
54 EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
56 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
58 return mlxsw_core_port->port_driver_priv != NULL;
62 struct mlxsw_driver *driver;
63 const struct mlxsw_bus *bus;
65 const struct mlxsw_bus_info *bus_info;
66 struct workqueue_struct *emad_wq;
67 struct list_head rx_listener_list;
68 struct list_head event_listener_list;
71 struct list_head trans_list;
72 spinlock_t trans_list_lock; /* protects trans_list writes */
76 u8 *mapping; /* lag_id+port_index to local_port mapping */
79 struct mlxsw_hwmon *hwmon;
80 struct mlxsw_thermal *thermal;
81 struct mlxsw_core_port *ports;
82 unsigned int max_ports;
84 bool fw_flash_in_progress;
85 unsigned long driver_priv[0];
86 /* driver_priv has to be always the last item */
89 #define MLXSW_PORT_MAX_PORTS_DEFAULT 0x40
91 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
93 /* Switch ports are numbered from 1 to queried value */
94 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
95 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
98 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
100 mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
101 sizeof(struct mlxsw_core_port), GFP_KERNEL);
102 if (!mlxsw_core->ports)
108 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
110 kfree(mlxsw_core->ports);
113 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
115 return mlxsw_core->max_ports;
117 EXPORT_SYMBOL(mlxsw_core_max_ports);
119 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
121 return mlxsw_core->driver_priv;
123 EXPORT_SYMBOL(mlxsw_core_driver_priv);
125 struct mlxsw_rx_listener_item {
126 struct list_head list;
127 struct mlxsw_rx_listener rxl;
131 struct mlxsw_event_listener_item {
132 struct list_head list;
133 struct mlxsw_event_listener el;
142 * Destination MAC in EMAD's Ethernet header.
143 * Must be set to 01:02:c9:00:00:01
145 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
148 * Source MAC in EMAD's Ethernet header.
149 * Must be set to 00:02:c9:01:02:03
151 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
153 /* emad_eth_hdr_ethertype
154 * Ethertype in EMAD's Ethernet header.
155 * Must be set to 0x8932
157 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
159 /* emad_eth_hdr_mlx_proto
161 * Must be set to 0x0.
163 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
166 * Mellanox protocol version.
167 * Must be set to 0x0.
169 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
173 * Must be set to 0x1 (operation TLV).
175 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
178 * Length of the operation TLV in u32.
179 * Must be set to 0x4.
181 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
184 * Direct route bit. Setting to 1 indicates the EMAD is a direct route
185 * EMAD. DR TLV must follow.
187 * Note: Currently not supported and must not be set.
189 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
191 /* emad_op_tlv_status
192 * Returned status in case of EMAD response. Must be set to 0 in case
195 * 0x1 - device is busy. Requester should retry
196 * 0x2 - Mellanox protocol version not supported
198 * 0x4 - register not supported
199 * 0x5 - operation class not supported
200 * 0x6 - EMAD method not supported
201 * 0x7 - bad parameter (e.g. port out of range)
202 * 0x8 - resource not available
203 * 0x9 - message receipt acknowledgment. Requester should retry
204 * 0x70 - internal error
206 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
208 /* emad_op_tlv_register_id
209 * Register ID of register within register TLV.
211 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
214 * Response bit. Setting to 1 indicates Response, otherwise request.
216 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
218 /* emad_op_tlv_method
222 * 0x3 - send (currently not supported)
225 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
228 * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
230 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
233 * EMAD transaction ID. Used for pairing request and response EMADs.
235 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
239 * Must be set to 0x3 (register TLV).
241 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
244 * Length of the operation TLV in u32.
246 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
250 * Must be set to 0x0 (end TLV).
252 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
255 * Length of the end TLV in u32.
258 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
260 enum mlxsw_core_reg_access_type {
261 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
262 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
265 static inline const char *
266 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
269 case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
271 case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
277 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
279 mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
280 mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
283 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
284 const struct mlxsw_reg_info *reg,
287 mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
288 mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
289 memcpy(reg_tlv + sizeof(u32), payload, reg->len);
292 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
293 const struct mlxsw_reg_info *reg,
294 enum mlxsw_core_reg_access_type type,
297 mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
298 mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
299 mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
300 mlxsw_emad_op_tlv_status_set(op_tlv, 0);
301 mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
302 mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
303 if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
304 mlxsw_emad_op_tlv_method_set(op_tlv,
305 MLXSW_EMAD_OP_TLV_METHOD_QUERY);
307 mlxsw_emad_op_tlv_method_set(op_tlv,
308 MLXSW_EMAD_OP_TLV_METHOD_WRITE);
309 mlxsw_emad_op_tlv_class_set(op_tlv,
310 MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
311 mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
314 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
316 char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
318 mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
319 mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
320 mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
321 mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
322 mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
324 skb_reset_mac_header(skb);
329 static void mlxsw_emad_construct(struct sk_buff *skb,
330 const struct mlxsw_reg_info *reg,
332 enum mlxsw_core_reg_access_type type,
337 buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
338 mlxsw_emad_pack_end_tlv(buf);
340 buf = skb_push(skb, reg->len + sizeof(u32));
341 mlxsw_emad_pack_reg_tlv(buf, reg, payload);
343 buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
344 mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
346 mlxsw_emad_construct_eth_hdr(skb);
349 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
351 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
354 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
356 return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
357 MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
360 static char *mlxsw_emad_reg_payload(const char *op_tlv)
362 return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
365 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
369 op_tlv = mlxsw_emad_op_tlv(skb);
370 return mlxsw_emad_op_tlv_tid_get(op_tlv);
373 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
377 op_tlv = mlxsw_emad_op_tlv(skb);
378 return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
381 static int mlxsw_emad_process_status(char *op_tlv,
382 enum mlxsw_emad_op_tlv_status *p_status)
384 *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
387 case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
389 case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
390 case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
392 case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
393 case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
394 case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
395 case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
396 case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
397 case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
398 case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
399 case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
406 mlxsw_emad_process_status_skb(struct sk_buff *skb,
407 enum mlxsw_emad_op_tlv_status *p_status)
409 return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
412 struct mlxsw_reg_trans {
413 struct list_head list;
414 struct list_head bulk_list;
415 struct mlxsw_core *core;
416 struct sk_buff *tx_skb;
417 struct mlxsw_tx_info tx_info;
418 struct delayed_work timeout_dw;
419 unsigned int retries;
421 struct completion completion;
423 mlxsw_reg_trans_cb_t *cb;
424 unsigned long cb_priv;
425 const struct mlxsw_reg_info *reg;
426 enum mlxsw_core_reg_access_type type;
428 enum mlxsw_emad_op_tlv_status emad_status;
432 #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS 3000
433 #define MLXSW_EMAD_TIMEOUT_MS 200
435 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
437 unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
439 if (trans->core->fw_flash_in_progress)
440 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
442 queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
443 timeout << trans->retries);
446 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
447 struct mlxsw_reg_trans *trans)
452 skb = skb_copy(trans->tx_skb, GFP_KERNEL);
456 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
457 skb->data + mlxsw_core->driver->txhdr_len,
458 skb->len - mlxsw_core->driver->txhdr_len);
460 atomic_set(&trans->active, 1);
461 err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
466 mlxsw_emad_trans_timeout_schedule(trans);
470 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
472 struct mlxsw_core *mlxsw_core = trans->core;
474 dev_kfree_skb(trans->tx_skb);
475 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
476 list_del_rcu(&trans->list);
477 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
479 complete(&trans->completion);
482 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
483 struct mlxsw_reg_trans *trans)
487 if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
489 err = mlxsw_emad_transmit(trans->core, trans);
493 if (!atomic_dec_and_test(&trans->active))
498 mlxsw_emad_trans_finish(trans, err);
501 static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
503 struct mlxsw_reg_trans *trans = container_of(work,
504 struct mlxsw_reg_trans,
507 if (!atomic_dec_and_test(&trans->active))
510 mlxsw_emad_transmit_retry(trans->core, trans);
513 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
514 struct mlxsw_reg_trans *trans,
519 if (!atomic_dec_and_test(&trans->active))
522 err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
523 if (err == -EAGAIN) {
524 mlxsw_emad_transmit_retry(mlxsw_core, trans);
527 char *op_tlv = mlxsw_emad_op_tlv(skb);
530 trans->cb(mlxsw_core,
531 mlxsw_emad_reg_payload(op_tlv),
532 trans->reg->len, trans->cb_priv);
534 mlxsw_emad_trans_finish(trans, err);
538 /* called with rcu read lock held */
539 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
542 struct mlxsw_core *mlxsw_core = priv;
543 struct mlxsw_reg_trans *trans;
545 trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
546 skb->data, skb->len);
548 if (!mlxsw_emad_is_resp(skb))
551 list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
552 if (mlxsw_emad_get_tid(skb) == trans->tid) {
553 mlxsw_emad_process_response(mlxsw_core, trans, skb);
562 static const struct mlxsw_listener mlxsw_emad_rx_listener =
563 MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
566 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
568 struct workqueue_struct *emad_wq;
572 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
575 emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
578 mlxsw_core->emad_wq = emad_wq;
580 /* Set the upper 32 bits of the transaction ID field to a random
581 * number. This allows us to discard EMADs addressed to other
584 get_random_bytes(&tid, 4);
586 atomic64_set(&mlxsw_core->emad.tid, tid);
588 INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
589 spin_lock_init(&mlxsw_core->emad.trans_list_lock);
591 err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
594 goto err_trap_register;
596 err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
598 goto err_emad_trap_set;
599 mlxsw_core->emad.use_emad = true;
604 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
607 destroy_workqueue(mlxsw_core->emad_wq);
611 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
614 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
617 mlxsw_core->emad.use_emad = false;
618 mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
620 destroy_workqueue(mlxsw_core->emad_wq);
623 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
629 emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
630 (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
631 sizeof(u32) + mlxsw_core->driver->txhdr_len);
632 if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
635 skb = netdev_alloc_skb(NULL, emad_len);
638 memset(skb->data, 0, emad_len);
639 skb_reserve(skb, emad_len);
644 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
645 const struct mlxsw_reg_info *reg,
647 enum mlxsw_core_reg_access_type type,
648 struct mlxsw_reg_trans *trans,
649 struct list_head *bulk_list,
650 mlxsw_reg_trans_cb_t *cb,
651 unsigned long cb_priv, u64 tid)
656 dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
657 tid, reg->id, mlxsw_reg_id_str(reg->id),
658 mlxsw_core_reg_access_type_str(type));
660 skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
664 list_add_tail(&trans->bulk_list, bulk_list);
665 trans->core = mlxsw_core;
667 trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
668 trans->tx_info.is_emad = true;
669 INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
671 init_completion(&trans->completion);
673 trans->cb_priv = cb_priv;
677 mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
678 mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
680 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
681 list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
682 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
683 err = mlxsw_emad_transmit(mlxsw_core, trans);
689 spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
690 list_del_rcu(&trans->list);
691 spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
692 list_del(&trans->bulk_list);
693 dev_kfree_skb(trans->tx_skb);
701 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
703 spin_lock(&mlxsw_core_driver_list_lock);
704 list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
705 spin_unlock(&mlxsw_core_driver_list_lock);
708 EXPORT_SYMBOL(mlxsw_core_driver_register);
710 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
712 spin_lock(&mlxsw_core_driver_list_lock);
713 list_del(&mlxsw_driver->list);
714 spin_unlock(&mlxsw_core_driver_list_lock);
716 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
718 static struct mlxsw_driver *__driver_find(const char *kind)
720 struct mlxsw_driver *mlxsw_driver;
722 list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
723 if (strcmp(mlxsw_driver->kind, kind) == 0)
729 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
731 struct mlxsw_driver *mlxsw_driver;
733 spin_lock(&mlxsw_core_driver_list_lock);
734 mlxsw_driver = __driver_find(kind);
735 spin_unlock(&mlxsw_core_driver_list_lock);
739 static int mlxsw_devlink_port_split(struct devlink *devlink,
740 unsigned int port_index,
742 struct netlink_ext_ack *extack)
744 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
746 if (port_index >= mlxsw_core->max_ports) {
747 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
750 if (!mlxsw_core->driver->port_split)
752 return mlxsw_core->driver->port_split(mlxsw_core, port_index, count,
756 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
757 unsigned int port_index,
758 struct netlink_ext_ack *extack)
760 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
762 if (port_index >= mlxsw_core->max_ports) {
763 NL_SET_ERR_MSG_MOD(extack, "Port index exceeds maximum number of ports");
766 if (!mlxsw_core->driver->port_unsplit)
768 return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index,
773 mlxsw_devlink_sb_pool_get(struct devlink *devlink,
774 unsigned int sb_index, u16 pool_index,
775 struct devlink_sb_pool_info *pool_info)
777 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
778 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
780 if (!mlxsw_driver->sb_pool_get)
782 return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
783 pool_index, pool_info);
787 mlxsw_devlink_sb_pool_set(struct devlink *devlink,
788 unsigned int sb_index, u16 pool_index, u32 size,
789 enum devlink_sb_threshold_type threshold_type)
791 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
792 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
794 if (!mlxsw_driver->sb_pool_set)
796 return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
797 pool_index, size, threshold_type);
800 static void *__dl_port(struct devlink_port *devlink_port)
802 return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
805 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
806 enum devlink_port_type port_type)
808 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
809 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
810 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
812 if (!mlxsw_driver->port_type_set)
815 return mlxsw_driver->port_type_set(mlxsw_core,
816 mlxsw_core_port->local_port,
820 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
821 unsigned int sb_index, u16 pool_index,
824 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
825 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
826 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
828 if (!mlxsw_driver->sb_port_pool_get ||
829 !mlxsw_core_port_check(mlxsw_core_port))
831 return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
832 pool_index, p_threshold);
835 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
836 unsigned int sb_index, u16 pool_index,
839 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
840 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
841 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
843 if (!mlxsw_driver->sb_port_pool_set ||
844 !mlxsw_core_port_check(mlxsw_core_port))
846 return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
847 pool_index, threshold);
851 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
852 unsigned int sb_index, u16 tc_index,
853 enum devlink_sb_pool_type pool_type,
854 u16 *p_pool_index, u32 *p_threshold)
856 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
857 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
858 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
860 if (!mlxsw_driver->sb_tc_pool_bind_get ||
861 !mlxsw_core_port_check(mlxsw_core_port))
863 return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
865 p_pool_index, p_threshold);
869 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
870 unsigned int sb_index, u16 tc_index,
871 enum devlink_sb_pool_type pool_type,
872 u16 pool_index, u32 threshold)
874 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
875 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
876 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
878 if (!mlxsw_driver->sb_tc_pool_bind_set ||
879 !mlxsw_core_port_check(mlxsw_core_port))
881 return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
883 pool_index, threshold);
886 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
887 unsigned int sb_index)
889 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
890 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
892 if (!mlxsw_driver->sb_occ_snapshot)
894 return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
897 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
898 unsigned int sb_index)
900 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
901 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
903 if (!mlxsw_driver->sb_occ_max_clear)
905 return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
909 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
910 unsigned int sb_index, u16 pool_index,
911 u32 *p_cur, u32 *p_max)
913 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
914 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
915 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
917 if (!mlxsw_driver->sb_occ_port_pool_get ||
918 !mlxsw_core_port_check(mlxsw_core_port))
920 return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
921 pool_index, p_cur, p_max);
925 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
926 unsigned int sb_index, u16 tc_index,
927 enum devlink_sb_pool_type pool_type,
928 u32 *p_cur, u32 *p_max)
930 struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
931 struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
932 struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
934 if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
935 !mlxsw_core_port_check(mlxsw_core_port))
937 return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
939 pool_type, p_cur, p_max);
942 static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
943 struct netlink_ext_ack *extack)
945 struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
948 if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
951 mlxsw_core_bus_device_unregister(mlxsw_core, true);
952 err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
954 mlxsw_core->bus_priv, true,
956 mlxsw_core->reload_fail = !!err;
961 static const struct devlink_ops mlxsw_devlink_ops = {
962 .reload = mlxsw_devlink_core_bus_device_reload,
963 .port_type_set = mlxsw_devlink_port_type_set,
964 .port_split = mlxsw_devlink_port_split,
965 .port_unsplit = mlxsw_devlink_port_unsplit,
966 .sb_pool_get = mlxsw_devlink_sb_pool_get,
967 .sb_pool_set = mlxsw_devlink_sb_pool_set,
968 .sb_port_pool_get = mlxsw_devlink_sb_port_pool_get,
969 .sb_port_pool_set = mlxsw_devlink_sb_port_pool_set,
970 .sb_tc_pool_bind_get = mlxsw_devlink_sb_tc_pool_bind_get,
971 .sb_tc_pool_bind_set = mlxsw_devlink_sb_tc_pool_bind_set,
972 .sb_occ_snapshot = mlxsw_devlink_sb_occ_snapshot,
973 .sb_occ_max_clear = mlxsw_devlink_sb_occ_max_clear,
974 .sb_occ_port_pool_get = mlxsw_devlink_sb_occ_port_pool_get,
975 .sb_occ_tc_port_bind_get = mlxsw_devlink_sb_occ_tc_port_bind_get,
978 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
979 const struct mlxsw_bus *mlxsw_bus,
980 void *bus_priv, bool reload,
981 struct devlink *devlink)
983 const char *device_kind = mlxsw_bus_info->device_kind;
984 struct mlxsw_core *mlxsw_core;
985 struct mlxsw_driver *mlxsw_driver;
986 struct mlxsw_res *res;
990 mlxsw_driver = mlxsw_core_driver_get(device_kind);
995 alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
996 devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
999 goto err_devlink_alloc;
1003 mlxsw_core = devlink_priv(devlink);
1004 INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
1005 INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
1006 mlxsw_core->driver = mlxsw_driver;
1007 mlxsw_core->bus = mlxsw_bus;
1008 mlxsw_core->bus_priv = bus_priv;
1009 mlxsw_core->bus_info = mlxsw_bus_info;
1011 res = mlxsw_driver->res_query_enabled ? &mlxsw_core->res : NULL;
1012 err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile, res);
1016 if (mlxsw_driver->resources_register && !reload) {
1017 err = mlxsw_driver->resources_register(mlxsw_core);
1019 goto err_register_resources;
1022 err = mlxsw_ports_init(mlxsw_core);
1024 goto err_ports_init;
1026 if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
1027 MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
1028 alloc_size = sizeof(u8) *
1029 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
1030 MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
1031 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1032 if (!mlxsw_core->lag.mapping) {
1034 goto err_alloc_lag_mapping;
1038 err = mlxsw_emad_init(mlxsw_core);
1043 err = devlink_register(devlink, mlxsw_bus_info->dev);
1045 goto err_devlink_register;
1048 err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1050 goto err_hwmon_init;
1052 err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
1053 &mlxsw_core->thermal);
1055 goto err_thermal_init;
1057 if (mlxsw_driver->init) {
1058 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
1060 goto err_driver_init;
1066 mlxsw_thermal_fini(mlxsw_core->thermal);
1068 mlxsw_hwmon_fini(mlxsw_core->hwmon);
1071 devlink_unregister(devlink);
1072 err_devlink_register:
1073 mlxsw_emad_fini(mlxsw_core);
1075 kfree(mlxsw_core->lag.mapping);
1076 err_alloc_lag_mapping:
1077 mlxsw_ports_fini(mlxsw_core);
1080 devlink_resources_unregister(devlink, NULL);
1081 err_register_resources:
1082 mlxsw_bus->fini(bus_priv);
1085 devlink_free(devlink);
1089 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
1091 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
1094 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1096 if (mlxsw_core->reload_fail) {
1098 /* Only the parts that were not de-initialized in the
1099 * failed reload attempt need to be de-initialized.
1101 goto reload_fail_deinit;
1106 if (mlxsw_core->driver->fini)
1107 mlxsw_core->driver->fini(mlxsw_core);
1108 mlxsw_thermal_fini(mlxsw_core->thermal);
1109 mlxsw_hwmon_fini(mlxsw_core->hwmon);
1111 devlink_unregister(devlink);
1112 mlxsw_emad_fini(mlxsw_core);
1113 kfree(mlxsw_core->lag.mapping);
1114 mlxsw_ports_fini(mlxsw_core);
1116 devlink_resources_unregister(devlink, NULL);
1117 mlxsw_core->bus->fini(mlxsw_core->bus_priv);
1119 devlink_free(devlink);
1124 devlink_unregister(devlink);
1125 devlink_resources_unregister(devlink, NULL);
1126 devlink_free(devlink);
1128 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
1130 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
1131 const struct mlxsw_tx_info *tx_info)
1133 return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
1136 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
1138 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1139 const struct mlxsw_tx_info *tx_info)
1141 return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
1144 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
1146 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
1147 const struct mlxsw_rx_listener *rxl_b)
1149 return (rxl_a->func == rxl_b->func &&
1150 rxl_a->local_port == rxl_b->local_port &&
1151 rxl_a->trap_id == rxl_b->trap_id);
1154 static struct mlxsw_rx_listener_item *
1155 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
1156 const struct mlxsw_rx_listener *rxl,
1159 struct mlxsw_rx_listener_item *rxl_item;
1161 list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
1162 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
1163 rxl_item->priv == priv)
1169 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
1170 const struct mlxsw_rx_listener *rxl,
1173 struct mlxsw_rx_listener_item *rxl_item;
1175 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1178 rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
1181 rxl_item->rxl = *rxl;
1182 rxl_item->priv = priv;
1184 list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
1187 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
1189 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
1190 const struct mlxsw_rx_listener *rxl,
1193 struct mlxsw_rx_listener_item *rxl_item;
1195 rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1198 list_del_rcu(&rxl_item->list);
1202 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
1204 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
1207 struct mlxsw_event_listener_item *event_listener_item = priv;
1208 struct mlxsw_reg_info reg;
1210 char *op_tlv = mlxsw_emad_op_tlv(skb);
1211 char *reg_tlv = mlxsw_emad_reg_tlv(skb);
1213 reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
1214 reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
1215 payload = mlxsw_emad_reg_payload(op_tlv);
1216 event_listener_item->el.func(®, payload, event_listener_item->priv);
1220 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
1221 const struct mlxsw_event_listener *el_b)
1223 return (el_a->func == el_b->func &&
1224 el_a->trap_id == el_b->trap_id);
1227 static struct mlxsw_event_listener_item *
1228 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
1229 const struct mlxsw_event_listener *el,
1232 struct mlxsw_event_listener_item *el_item;
1234 list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1235 if (__is_event_listener_equal(&el_item->el, el) &&
1236 el_item->priv == priv)
1242 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1243 const struct mlxsw_event_listener *el,
1247 struct mlxsw_event_listener_item *el_item;
1248 const struct mlxsw_rx_listener rxl = {
1249 .func = mlxsw_core_event_listener_func,
1250 .local_port = MLXSW_PORT_DONT_CARE,
1251 .trap_id = el->trap_id,
1254 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1257 el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1261 el_item->priv = priv;
1263 err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1265 goto err_rx_listener_register;
1267 /* No reason to save item if we did not manage to register an RX
1270 list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1274 err_rx_listener_register:
1278 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1280 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1281 const struct mlxsw_event_listener *el,
1284 struct mlxsw_event_listener_item *el_item;
1285 const struct mlxsw_rx_listener rxl = {
1286 .func = mlxsw_core_event_listener_func,
1287 .local_port = MLXSW_PORT_DONT_CARE,
1288 .trap_id = el->trap_id,
1291 el_item = __find_event_listener_item(mlxsw_core, el, priv);
1294 mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1295 list_del(&el_item->list);
1298 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1300 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
1301 const struct mlxsw_listener *listener,
1304 if (listener->is_event)
1305 return mlxsw_core_event_listener_register(mlxsw_core,
1306 &listener->u.event_listener,
1309 return mlxsw_core_rx_listener_register(mlxsw_core,
1310 &listener->u.rx_listener,
1314 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
1315 const struct mlxsw_listener *listener,
1318 if (listener->is_event)
1319 mlxsw_core_event_listener_unregister(mlxsw_core,
1320 &listener->u.event_listener,
1323 mlxsw_core_rx_listener_unregister(mlxsw_core,
1324 &listener->u.rx_listener,
1328 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
1329 const struct mlxsw_listener *listener, void *priv)
1331 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1334 err = mlxsw_core_listener_register(mlxsw_core, listener, priv);
1338 mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id,
1339 listener->trap_group, listener->is_ctrl);
1340 err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1347 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1350 EXPORT_SYMBOL(mlxsw_core_trap_register);
1352 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
1353 const struct mlxsw_listener *listener,
1356 char hpkt_pl[MLXSW_REG_HPKT_LEN];
1358 if (!listener->is_event) {
1359 mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action,
1360 listener->trap_id, listener->trap_group,
1362 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1365 mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1367 EXPORT_SYMBOL(mlxsw_core_trap_unregister);
1369 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
1371 return atomic64_inc_return(&mlxsw_core->emad.tid);
1374 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1375 const struct mlxsw_reg_info *reg,
1377 enum mlxsw_core_reg_access_type type,
1378 struct list_head *bulk_list,
1379 mlxsw_reg_trans_cb_t *cb,
1380 unsigned long cb_priv)
1382 u64 tid = mlxsw_core_tid_get(mlxsw_core);
1383 struct mlxsw_reg_trans *trans;
1386 trans = kzalloc(sizeof(*trans), GFP_KERNEL);
1390 err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1391 bulk_list, cb, cb_priv, tid);
1393 kfree_rcu(trans, rcu);
1399 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
1400 const struct mlxsw_reg_info *reg, char *payload,
1401 struct list_head *bulk_list,
1402 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1404 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1405 MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
1406 bulk_list, cb, cb_priv);
1408 EXPORT_SYMBOL(mlxsw_reg_trans_query);
1410 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
1411 const struct mlxsw_reg_info *reg, char *payload,
1412 struct list_head *bulk_list,
1413 mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1415 return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1416 MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
1417 bulk_list, cb, cb_priv);
1419 EXPORT_SYMBOL(mlxsw_reg_trans_write);
1421 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
1423 struct mlxsw_core *mlxsw_core = trans->core;
1426 wait_for_completion(&trans->completion);
1427 cancel_delayed_work_sync(&trans->timeout_dw);
1431 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
1432 trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
1434 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1435 trans->tid, trans->reg->id,
1436 mlxsw_reg_id_str(trans->reg->id),
1437 mlxsw_core_reg_access_type_str(trans->type),
1439 mlxsw_emad_op_tlv_status_str(trans->emad_status));
1441 list_del(&trans->bulk_list);
1442 kfree_rcu(trans, rcu);
1446 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
1448 struct mlxsw_reg_trans *trans;
1449 struct mlxsw_reg_trans *tmp;
1453 list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
1454 err = mlxsw_reg_trans_wait(trans);
1455 if (err && sum_err == 0)
1456 sum_err = err; /* first error to be returned */
1460 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
1462 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1463 const struct mlxsw_reg_info *reg,
1465 enum mlxsw_core_reg_access_type type)
1467 enum mlxsw_emad_op_tlv_status status;
1470 char *in_mbox, *out_mbox, *tmp;
1472 dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1473 reg->id, mlxsw_reg_id_str(reg->id),
1474 mlxsw_core_reg_access_type_str(type));
1476 in_mbox = mlxsw_cmd_mbox_alloc();
1480 out_mbox = mlxsw_cmd_mbox_alloc();
1486 mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
1487 mlxsw_core_tid_get(mlxsw_core));
1488 tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1489 mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1491 /* There is a special treatment needed for MRSR (reset) register.
1492 * The command interface will return error after the command
1493 * is executed, so tell the lower layer to expect it
1494 * and cope accordingly.
1496 reset_ok = reg->id == MLXSW_REG_MRSR_ID;
1500 err = mlxsw_cmd_access_reg(mlxsw_core, reset_ok, in_mbox, out_mbox);
1502 err = mlxsw_emad_process_status(out_mbox, &status);
1504 if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1506 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
1507 status, mlxsw_emad_op_tlv_status_str(status));
1512 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1515 mlxsw_cmd_mbox_free(out_mbox);
1517 mlxsw_cmd_mbox_free(in_mbox);
1519 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1520 reg->id, mlxsw_reg_id_str(reg->id),
1521 mlxsw_core_reg_access_type_str(type));
1525 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
1526 char *payload, size_t payload_len,
1527 unsigned long cb_priv)
1529 char *orig_payload = (char *) cb_priv;
1531 memcpy(orig_payload, payload, payload_len);
1534 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1535 const struct mlxsw_reg_info *reg,
1537 enum mlxsw_core_reg_access_type type)
1539 LIST_HEAD(bulk_list);
1542 /* During initialization EMAD interface is not available to us,
1543 * so we default to command interface. We switch to EMAD interface
1544 * after setting the appropriate traps.
1546 if (!mlxsw_core->emad.use_emad)
1547 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1550 err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1551 payload, type, &bulk_list,
1552 mlxsw_core_reg_access_cb,
1553 (unsigned long) payload);
1556 return mlxsw_reg_trans_bulk_wait(&bulk_list);
1559 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1560 const struct mlxsw_reg_info *reg, char *payload)
1562 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1563 MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1565 EXPORT_SYMBOL(mlxsw_reg_query);
1567 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1568 const struct mlxsw_reg_info *reg, char *payload)
1570 return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1571 MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1573 EXPORT_SYMBOL(mlxsw_reg_write);
1575 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1576 struct mlxsw_rx_info *rx_info)
1578 struct mlxsw_rx_listener_item *rxl_item;
1579 const struct mlxsw_rx_listener *rxl;
1583 if (rx_info->is_lag) {
1584 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1585 __func__, rx_info->u.lag_id,
1587 /* Upper layer does not care if the skb came from LAG or not,
1588 * so just get the local_port for the lag port and push it up.
1590 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1592 rx_info->lag_port_index);
1594 local_port = rx_info->u.sys_port;
1597 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1598 __func__, local_port, rx_info->trap_id);
1600 if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1601 (local_port >= mlxsw_core->max_ports))
1605 list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1606 rxl = &rxl_item->rxl;
1607 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1608 rxl->local_port == local_port) &&
1609 rxl->trap_id == rx_info->trap_id) {
1619 rxl->func(skb, local_port, rxl_item->priv);
1626 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1628 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1629 u16 lag_id, u8 port_index)
1631 return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
1635 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1636 u16 lag_id, u8 port_index, u8 local_port)
1638 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1639 lag_id, port_index);
1641 mlxsw_core->lag.mapping[index] = local_port;
1643 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1645 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1646 u16 lag_id, u8 port_index)
1648 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1649 lag_id, port_index);
1651 return mlxsw_core->lag.mapping[index];
1653 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1655 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1656 u16 lag_id, u8 local_port)
1660 for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
1661 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1664 if (mlxsw_core->lag.mapping[index] == local_port)
1665 mlxsw_core->lag.mapping[index] = 0;
1668 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1670 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
1671 enum mlxsw_res_id res_id)
1673 return mlxsw_res_valid(&mlxsw_core->res, res_id);
1675 EXPORT_SYMBOL(mlxsw_core_res_valid);
1677 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
1678 enum mlxsw_res_id res_id)
1680 return mlxsw_res_get(&mlxsw_core->res, res_id);
1682 EXPORT_SYMBOL(mlxsw_core_res_get);
1684 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
1686 struct devlink *devlink = priv_to_devlink(mlxsw_core);
1687 struct mlxsw_core_port *mlxsw_core_port =
1688 &mlxsw_core->ports[local_port];
1689 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1692 mlxsw_core_port->local_port = local_port;
1693 err = devlink_port_register(devlink, devlink_port, local_port);
1695 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1698 EXPORT_SYMBOL(mlxsw_core_port_init);
1700 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
1702 struct mlxsw_core_port *mlxsw_core_port =
1703 &mlxsw_core->ports[local_port];
1704 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1706 devlink_port_unregister(devlink_port);
1707 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1709 EXPORT_SYMBOL(mlxsw_core_port_fini);
1711 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1712 void *port_driver_priv, struct net_device *dev,
1713 u32 port_number, bool split,
1714 u32 split_port_subnumber)
1716 struct mlxsw_core_port *mlxsw_core_port =
1717 &mlxsw_core->ports[local_port];
1718 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1720 mlxsw_core_port->port_driver_priv = port_driver_priv;
1721 devlink_port_attrs_set(devlink_port, DEVLINK_PORT_FLAVOUR_PHYSICAL,
1722 port_number, split, split_port_subnumber);
1723 devlink_port_type_eth_set(devlink_port, dev);
1725 EXPORT_SYMBOL(mlxsw_core_port_eth_set);
1727 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1728 void *port_driver_priv)
1730 struct mlxsw_core_port *mlxsw_core_port =
1731 &mlxsw_core->ports[local_port];
1732 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1734 mlxsw_core_port->port_driver_priv = port_driver_priv;
1735 devlink_port_type_ib_set(devlink_port, NULL);
1737 EXPORT_SYMBOL(mlxsw_core_port_ib_set);
1739 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
1740 void *port_driver_priv)
1742 struct mlxsw_core_port *mlxsw_core_port =
1743 &mlxsw_core->ports[local_port];
1744 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1746 mlxsw_core_port->port_driver_priv = port_driver_priv;
1747 devlink_port_type_clear(devlink_port);
1749 EXPORT_SYMBOL(mlxsw_core_port_clear);
1751 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
1754 struct mlxsw_core_port *mlxsw_core_port =
1755 &mlxsw_core->ports[local_port];
1756 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1758 return devlink_port->type;
1760 EXPORT_SYMBOL(mlxsw_core_port_type_get);
1762 int mlxsw_core_port_get_phys_port_name(struct mlxsw_core *mlxsw_core,
1763 u8 local_port, char *name, size_t len)
1765 struct mlxsw_core_port *mlxsw_core_port =
1766 &mlxsw_core->ports[local_port];
1767 struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1769 return devlink_port_get_phys_port_name(devlink_port, name, len);
1771 EXPORT_SYMBOL(mlxsw_core_port_get_phys_port_name);
1773 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
1774 const char *buf, size_t size)
1776 __be32 *m = (__be32 *) buf;
1778 int count = size / sizeof(__be32);
1780 for (i = count - 1; i >= 0; i--)
1785 for (i = 0; i < count; i += 4)
1786 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
1787 i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
1788 be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
1791 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1792 u32 in_mod, bool out_mbox_direct, bool reset_ok,
1793 char *in_mbox, size_t in_mbox_size,
1794 char *out_mbox, size_t out_mbox_size)
1799 BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1800 if (!mlxsw_core->bus->cmd_exec)
1803 dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1804 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1806 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1807 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1810 err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1811 opcode_mod, in_mod, out_mbox_direct,
1812 in_mbox, in_mbox_size,
1813 out_mbox, out_mbox_size, &status);
1815 if (!err && out_mbox) {
1816 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1817 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1820 if (reset_ok && err == -EIO &&
1821 status == MLXSW_CMD_STATUS_RUNNING_RESET) {
1823 } else if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1824 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1825 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1826 in_mod, status, mlxsw_cmd_status_str(status));
1827 } else if (err == -ETIMEDOUT) {
1828 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1829 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1835 EXPORT_SYMBOL(mlxsw_cmd_exec);
1837 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
1839 return queue_delayed_work(mlxsw_wq, dwork, delay);
1841 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
1843 bool mlxsw_core_schedule_work(struct work_struct *work)
1845 return queue_work(mlxsw_owq, work);
1847 EXPORT_SYMBOL(mlxsw_core_schedule_work);
1849 void mlxsw_core_flush_owq(void)
1851 flush_workqueue(mlxsw_owq);
1853 EXPORT_SYMBOL(mlxsw_core_flush_owq);
1855 int mlxsw_core_kvd_sizes_get(struct mlxsw_core *mlxsw_core,
1856 const struct mlxsw_config_profile *profile,
1857 u64 *p_single_size, u64 *p_double_size,
1860 struct mlxsw_driver *driver = mlxsw_core->driver;
1862 if (!driver->kvd_sizes_get)
1865 return driver->kvd_sizes_get(mlxsw_core, profile,
1866 p_single_size, p_double_size,
1869 EXPORT_SYMBOL(mlxsw_core_kvd_sizes_get);
1871 void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
1873 mlxsw_core->fw_flash_in_progress = true;
1875 EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
1877 void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
1879 mlxsw_core->fw_flash_in_progress = false;
1881 EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
1883 static int __init mlxsw_core_module_init(void)
1887 mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
1890 mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
1891 mlxsw_core_driver_name);
1894 goto err_alloc_ordered_workqueue;
1898 err_alloc_ordered_workqueue:
1899 destroy_workqueue(mlxsw_wq);
1903 static void __exit mlxsw_core_module_exit(void)
1905 destroy_workqueue(mlxsw_owq);
1906 destroy_workqueue(mlxsw_wq);
1909 module_init(mlxsw_core_module_init);
1910 module_exit(mlxsw_core_module_exit);
1912 MODULE_LICENSE("Dual BSD/GPL");
1913 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1914 MODULE_DESCRIPTION("Mellanox switch device core driver");