GNU Linux-libre 4.9.288-gnu1
[releases.git] / drivers / net / ethernet / mellanox / mlxsw / core.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/core.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/u64_stats_sync.h>
46 #include <linux/netdevice.h>
47 #include <linux/completion.h>
48 #include <linux/skbuff.h>
49 #include <linux/etherdevice.h>
50 #include <linux/types.h>
51 #include <linux/string.h>
52 #include <linux/gfp.h>
53 #include <linux/random.h>
54 #include <linux/jiffies.h>
55 #include <linux/mutex.h>
56 #include <linux/rcupdate.h>
57 #include <linux/slab.h>
58 #include <linux/workqueue.h>
59 #include <asm/byteorder.h>
60 #include <net/devlink.h>
61 #include <trace/events/devlink.h>
62
63 #include "core.h"
64 #include "item.h"
65 #include "cmd.h"
66 #include "port.h"
67 #include "trap.h"
68 #include "emad.h"
69 #include "reg.h"
70
71 static LIST_HEAD(mlxsw_core_driver_list);
72 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
73
74 static const char mlxsw_core_driver_name[] = "mlxsw_core";
75
76 static struct dentry *mlxsw_core_dbg_root;
77
78 static struct workqueue_struct *mlxsw_wq;
79
80 struct mlxsw_core_pcpu_stats {
81         u64                     trap_rx_packets[MLXSW_TRAP_ID_MAX];
82         u64                     trap_rx_bytes[MLXSW_TRAP_ID_MAX];
83         u64                     port_rx_packets[MLXSW_PORT_MAX_PORTS];
84         u64                     port_rx_bytes[MLXSW_PORT_MAX_PORTS];
85         struct u64_stats_sync   syncp;
86         u32                     trap_rx_dropped[MLXSW_TRAP_ID_MAX];
87         u32                     port_rx_dropped[MLXSW_PORT_MAX_PORTS];
88         u32                     trap_rx_invalid;
89         u32                     port_rx_invalid;
90 };
91
92 struct mlxsw_core {
93         struct mlxsw_driver *driver;
94         const struct mlxsw_bus *bus;
95         void *bus_priv;
96         const struct mlxsw_bus_info *bus_info;
97         struct list_head rx_listener_list;
98         struct list_head event_listener_list;
99         struct {
100                 atomic64_t tid;
101                 struct list_head trans_list;
102                 spinlock_t trans_list_lock; /* protects trans_list writes */
103                 bool use_emad;
104         } emad;
105         struct mlxsw_core_pcpu_stats __percpu *pcpu_stats;
106         struct dentry *dbg_dir;
107         struct {
108                 struct debugfs_blob_wrapper vsd_blob;
109                 struct debugfs_blob_wrapper psid_blob;
110         } dbg;
111         struct {
112                 u8 *mapping; /* lag_id+port_index to local_port mapping */
113         } lag;
114         struct mlxsw_resources resources;
115         struct mlxsw_hwmon *hwmon;
116         unsigned long driver_priv[0];
117         /* driver_priv has to be always the last item */
118 };
119
120 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
121 {
122         return mlxsw_core->driver_priv;
123 }
124 EXPORT_SYMBOL(mlxsw_core_driver_priv);
125
126 struct mlxsw_rx_listener_item {
127         struct list_head list;
128         struct mlxsw_rx_listener rxl;
129         void *priv;
130 };
131
132 struct mlxsw_event_listener_item {
133         struct list_head list;
134         struct mlxsw_event_listener el;
135         void *priv;
136 };
137
138 /******************
139  * EMAD processing
140  ******************/
141
142 /* emad_eth_hdr_dmac
143  * Destination MAC in EMAD's Ethernet header.
144  * Must be set to 01:02:c9:00:00:01
145  */
146 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
147
148 /* emad_eth_hdr_smac
149  * Source MAC in EMAD's Ethernet header.
150  * Must be set to 00:02:c9:01:02:03
151  */
152 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
153
154 /* emad_eth_hdr_ethertype
155  * Ethertype in EMAD's Ethernet header.
156  * Must be set to 0x8932
157  */
158 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
159
160 /* emad_eth_hdr_mlx_proto
161  * Mellanox protocol.
162  * Must be set to 0x0.
163  */
164 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
165
166 /* emad_eth_hdr_ver
167  * Mellanox protocol version.
168  * Must be set to 0x0.
169  */
170 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
171
172 /* emad_op_tlv_type
173  * Type of the TLV.
174  * Must be set to 0x1 (operation TLV).
175  */
176 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
177
178 /* emad_op_tlv_len
179  * Length of the operation TLV in u32.
180  * Must be set to 0x4.
181  */
182 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
183
184 /* emad_op_tlv_dr
185  * Direct route bit. Setting to 1 indicates the EMAD is a direct route
186  * EMAD. DR TLV must follow.
187  *
188  * Note: Currently not supported and must not be set.
189  */
190 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
191
192 /* emad_op_tlv_status
193  * Returned status in case of EMAD response. Must be set to 0 in case
194  * of EMAD request.
195  * 0x0 - success
196  * 0x1 - device is busy. Requester should retry
197  * 0x2 - Mellanox protocol version not supported
198  * 0x3 - unknown TLV
199  * 0x4 - register not supported
200  * 0x5 - operation class not supported
201  * 0x6 - EMAD method not supported
202  * 0x7 - bad parameter (e.g. port out of range)
203  * 0x8 - resource not available
204  * 0x9 - message receipt acknowledgment. Requester should retry
205  * 0x70 - internal error
206  */
207 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
208
209 /* emad_op_tlv_register_id
210  * Register ID of register within register TLV.
211  */
212 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
213
214 /* emad_op_tlv_r
215  * Response bit. Setting to 1 indicates Response, otherwise request.
216  */
217 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
218
219 /* emad_op_tlv_method
220  * EMAD method type.
221  * 0x1 - query
222  * 0x2 - write
223  * 0x3 - send (currently not supported)
224  * 0x4 - event
225  */
226 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
227
228 /* emad_op_tlv_class
229  * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
230  */
231 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
232
233 /* emad_op_tlv_tid
234  * EMAD transaction ID. Used for pairing request and response EMADs.
235  */
236 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
237
238 /* emad_reg_tlv_type
239  * Type of the TLV.
240  * Must be set to 0x3 (register TLV).
241  */
242 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
243
244 /* emad_reg_tlv_len
245  * Length of the operation TLV in u32.
246  */
247 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
248
249 /* emad_end_tlv_type
250  * Type of the TLV.
251  * Must be set to 0x0 (end TLV).
252  */
253 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
254
255 /* emad_end_tlv_len
256  * Length of the end TLV in u32.
257  * Must be set to 1.
258  */
259 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
260
261 enum mlxsw_core_reg_access_type {
262         MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
263         MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
264 };
265
266 static inline const char *
267 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
268 {
269         switch (type) {
270         case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
271                 return "query";
272         case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
273                 return "write";
274         }
275         BUG();
276 }
277
278 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
279 {
280         mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
281         mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
282 }
283
284 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
285                                     const struct mlxsw_reg_info *reg,
286                                     char *payload)
287 {
288         mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
289         mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
290         memcpy(reg_tlv + sizeof(u32), payload, reg->len);
291 }
292
293 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
294                                    const struct mlxsw_reg_info *reg,
295                                    enum mlxsw_core_reg_access_type type,
296                                    u64 tid)
297 {
298         mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
299         mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
300         mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
301         mlxsw_emad_op_tlv_status_set(op_tlv, 0);
302         mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
303         mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
304         if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
305                 mlxsw_emad_op_tlv_method_set(op_tlv,
306                                              MLXSW_EMAD_OP_TLV_METHOD_QUERY);
307         else
308                 mlxsw_emad_op_tlv_method_set(op_tlv,
309                                              MLXSW_EMAD_OP_TLV_METHOD_WRITE);
310         mlxsw_emad_op_tlv_class_set(op_tlv,
311                                     MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
312         mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
313 }
314
315 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
316 {
317         char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
318
319         mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
320         mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
321         mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
322         mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
323         mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
324
325         skb_reset_mac_header(skb);
326
327         return 0;
328 }
329
330 static void mlxsw_emad_construct(struct sk_buff *skb,
331                                  const struct mlxsw_reg_info *reg,
332                                  char *payload,
333                                  enum mlxsw_core_reg_access_type type,
334                                  u64 tid)
335 {
336         char *buf;
337
338         buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
339         mlxsw_emad_pack_end_tlv(buf);
340
341         buf = skb_push(skb, reg->len + sizeof(u32));
342         mlxsw_emad_pack_reg_tlv(buf, reg, payload);
343
344         buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
345         mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
346
347         mlxsw_emad_construct_eth_hdr(skb);
348 }
349
350 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
351 {
352         return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
353 }
354
355 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
356 {
357         return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
358                                       MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
359 }
360
361 static char *mlxsw_emad_reg_payload(const char *op_tlv)
362 {
363         return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
364 }
365
366 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
367 {
368         char *op_tlv;
369
370         op_tlv = mlxsw_emad_op_tlv(skb);
371         return mlxsw_emad_op_tlv_tid_get(op_tlv);
372 }
373
374 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
375 {
376         char *op_tlv;
377
378         op_tlv = mlxsw_emad_op_tlv(skb);
379         return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
380 }
381
382 static int mlxsw_emad_process_status(char *op_tlv,
383                                      enum mlxsw_emad_op_tlv_status *p_status)
384 {
385         *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
386
387         switch (*p_status) {
388         case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
389                 return 0;
390         case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
391         case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
392                 return -EAGAIN;
393         case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
394         case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
395         case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
396         case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
397         case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
398         case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
399         case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
400         case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
401         default:
402                 return -EIO;
403         }
404 }
405
406 static int
407 mlxsw_emad_process_status_skb(struct sk_buff *skb,
408                               enum mlxsw_emad_op_tlv_status *p_status)
409 {
410         return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
411 }
412
413 struct mlxsw_reg_trans {
414         struct list_head list;
415         struct list_head bulk_list;
416         struct mlxsw_core *core;
417         struct sk_buff *tx_skb;
418         struct mlxsw_tx_info tx_info;
419         struct delayed_work timeout_dw;
420         unsigned int retries;
421         u64 tid;
422         struct completion completion;
423         atomic_t active;
424         mlxsw_reg_trans_cb_t *cb;
425         unsigned long cb_priv;
426         const struct mlxsw_reg_info *reg;
427         enum mlxsw_core_reg_access_type type;
428         int err;
429         enum mlxsw_emad_op_tlv_status emad_status;
430         struct rcu_head rcu;
431 };
432
433 #define MLXSW_EMAD_TIMEOUT_MS 200
434
435 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
436 {
437         unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
438
439         mlxsw_core_schedule_dw(&trans->timeout_dw, timeout << trans->retries);
440 }
441
442 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
443                                struct mlxsw_reg_trans *trans)
444 {
445         struct sk_buff *skb;
446         int err;
447
448         skb = skb_copy(trans->tx_skb, GFP_KERNEL);
449         if (!skb)
450                 return -ENOMEM;
451
452         trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
453                             skb->data + mlxsw_core->driver->txhdr_len,
454                             skb->len - mlxsw_core->driver->txhdr_len);
455
456         atomic_set(&trans->active, 1);
457         err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
458         if (err) {
459                 dev_kfree_skb(skb);
460                 return err;
461         }
462         mlxsw_emad_trans_timeout_schedule(trans);
463         return 0;
464 }
465
466 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
467 {
468         struct mlxsw_core *mlxsw_core = trans->core;
469
470         dev_kfree_skb(trans->tx_skb);
471         spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
472         list_del_rcu(&trans->list);
473         spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
474         trans->err = err;
475         complete(&trans->completion);
476 }
477
478 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
479                                       struct mlxsw_reg_trans *trans)
480 {
481         int err;
482
483         if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
484                 trans->retries++;
485                 err = mlxsw_emad_transmit(trans->core, trans);
486                 if (err == 0)
487                         return;
488
489                 if (!atomic_dec_and_test(&trans->active))
490                         return;
491         } else {
492                 err = -EIO;
493         }
494         mlxsw_emad_trans_finish(trans, err);
495 }
496
497 static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
498 {
499         struct mlxsw_reg_trans *trans = container_of(work,
500                                                      struct mlxsw_reg_trans,
501                                                      timeout_dw.work);
502
503         if (!atomic_dec_and_test(&trans->active))
504                 return;
505
506         mlxsw_emad_transmit_retry(trans->core, trans);
507 }
508
509 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
510                                         struct mlxsw_reg_trans *trans,
511                                         struct sk_buff *skb)
512 {
513         int err;
514
515         if (!atomic_dec_and_test(&trans->active))
516                 return;
517
518         err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
519         if (err == -EAGAIN) {
520                 mlxsw_emad_transmit_retry(mlxsw_core, trans);
521         } else {
522                 if (err == 0) {
523                         char *op_tlv = mlxsw_emad_op_tlv(skb);
524
525                         if (trans->cb)
526                                 trans->cb(mlxsw_core,
527                                           mlxsw_emad_reg_payload(op_tlv),
528                                           trans->reg->len, trans->cb_priv);
529                 }
530                 mlxsw_emad_trans_finish(trans, err);
531         }
532 }
533
534 /* called with rcu read lock held */
535 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
536                                         void *priv)
537 {
538         struct mlxsw_core *mlxsw_core = priv;
539         struct mlxsw_reg_trans *trans;
540
541         trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
542                             skb->data, skb->len);
543
544         if (!mlxsw_emad_is_resp(skb))
545                 goto free_skb;
546
547         list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
548                 if (mlxsw_emad_get_tid(skb) == trans->tid) {
549                         mlxsw_emad_process_response(mlxsw_core, trans, skb);
550                         break;
551                 }
552         }
553
554 free_skb:
555         dev_kfree_skb(skb);
556 }
557
558 static const struct mlxsw_rx_listener mlxsw_emad_rx_listener = {
559         .func = mlxsw_emad_rx_listener_func,
560         .local_port = MLXSW_PORT_DONT_CARE,
561         .trap_id = MLXSW_TRAP_ID_ETHEMAD,
562 };
563
564 static int mlxsw_emad_traps_set(struct mlxsw_core *mlxsw_core)
565 {
566         char htgt_pl[MLXSW_REG_HTGT_LEN];
567         char hpkt_pl[MLXSW_REG_HPKT_LEN];
568         int err;
569
570         mlxsw_reg_htgt_pack(htgt_pl, MLXSW_REG_HTGT_TRAP_GROUP_EMAD);
571         err = mlxsw_reg_write(mlxsw_core, MLXSW_REG(htgt), htgt_pl);
572         if (err)
573                 return err;
574
575         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_TRAP_TO_CPU,
576                             MLXSW_TRAP_ID_ETHEMAD);
577         return mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
578 }
579
580 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
581 {
582         u64 tid;
583         int err;
584
585         /* Set the upper 32 bits of the transaction ID field to a random
586          * number. This allows us to discard EMADs addressed to other
587          * devices.
588          */
589         get_random_bytes(&tid, 4);
590         tid <<= 32;
591         atomic64_set(&mlxsw_core->emad.tid, tid);
592
593         INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
594         spin_lock_init(&mlxsw_core->emad.trans_list_lock);
595
596         err = mlxsw_core_rx_listener_register(mlxsw_core,
597                                               &mlxsw_emad_rx_listener,
598                                               mlxsw_core);
599         if (err)
600                 return err;
601
602         err = mlxsw_emad_traps_set(mlxsw_core);
603         if (err)
604                 goto err_emad_trap_set;
605
606         mlxsw_core->emad.use_emad = true;
607
608         return 0;
609
610 err_emad_trap_set:
611         mlxsw_core_rx_listener_unregister(mlxsw_core,
612                                           &mlxsw_emad_rx_listener,
613                                           mlxsw_core);
614         return err;
615 }
616
617 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
618 {
619         char hpkt_pl[MLXSW_REG_HPKT_LEN];
620
621         mlxsw_core->emad.use_emad = false;
622         mlxsw_reg_hpkt_pack(hpkt_pl, MLXSW_REG_HPKT_ACTION_DISCARD,
623                             MLXSW_TRAP_ID_ETHEMAD);
624         mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
625
626         mlxsw_core_rx_listener_unregister(mlxsw_core,
627                                           &mlxsw_emad_rx_listener,
628                                           mlxsw_core);
629 }
630
631 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
632                                         u16 reg_len)
633 {
634         struct sk_buff *skb;
635         u16 emad_len;
636
637         emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
638                     (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
639                     sizeof(u32) + mlxsw_core->driver->txhdr_len);
640         if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
641                 return NULL;
642
643         skb = netdev_alloc_skb(NULL, emad_len);
644         if (!skb)
645                 return NULL;
646         memset(skb->data, 0, emad_len);
647         skb_reserve(skb, emad_len);
648
649         return skb;
650 }
651
652 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
653                                  const struct mlxsw_reg_info *reg,
654                                  char *payload,
655                                  enum mlxsw_core_reg_access_type type,
656                                  struct mlxsw_reg_trans *trans,
657                                  struct list_head *bulk_list,
658                                  mlxsw_reg_trans_cb_t *cb,
659                                  unsigned long cb_priv, u64 tid)
660 {
661         struct sk_buff *skb;
662         int err;
663
664         dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
665                 trans->tid, reg->id, mlxsw_reg_id_str(reg->id),
666                 mlxsw_core_reg_access_type_str(type));
667
668         skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
669         if (!skb)
670                 return -ENOMEM;
671
672         list_add_tail(&trans->bulk_list, bulk_list);
673         trans->core = mlxsw_core;
674         trans->tx_skb = skb;
675         trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
676         trans->tx_info.is_emad = true;
677         INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
678         trans->tid = tid;
679         init_completion(&trans->completion);
680         trans->cb = cb;
681         trans->cb_priv = cb_priv;
682         trans->reg = reg;
683         trans->type = type;
684
685         mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
686         mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
687
688         spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
689         list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
690         spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
691         err = mlxsw_emad_transmit(mlxsw_core, trans);
692         if (err)
693                 goto err_out;
694         return 0;
695
696 err_out:
697         spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
698         list_del_rcu(&trans->list);
699         spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
700         list_del(&trans->bulk_list);
701         dev_kfree_skb(trans->tx_skb);
702         return err;
703 }
704
705 /*****************
706  * Core functions
707  *****************/
708
709 static int mlxsw_core_rx_stats_dbg_read(struct seq_file *file, void *data)
710 {
711         struct mlxsw_core *mlxsw_core = file->private;
712         struct mlxsw_core_pcpu_stats *p;
713         u64 rx_packets, rx_bytes;
714         u64 tmp_rx_packets, tmp_rx_bytes;
715         u32 rx_dropped, rx_invalid;
716         unsigned int start;
717         int i;
718         int j;
719         static const char hdr[] =
720                 "     NUM   RX_PACKETS     RX_BYTES RX_DROPPED\n";
721
722         seq_printf(file, hdr);
723         for (i = 0; i < MLXSW_TRAP_ID_MAX; i++) {
724                 rx_packets = 0;
725                 rx_bytes = 0;
726                 rx_dropped = 0;
727                 for_each_possible_cpu(j) {
728                         p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
729                         do {
730                                 start = u64_stats_fetch_begin(&p->syncp);
731                                 tmp_rx_packets = p->trap_rx_packets[i];
732                                 tmp_rx_bytes = p->trap_rx_bytes[i];
733                         } while (u64_stats_fetch_retry(&p->syncp, start));
734
735                         rx_packets += tmp_rx_packets;
736                         rx_bytes += tmp_rx_bytes;
737                         rx_dropped += p->trap_rx_dropped[i];
738                 }
739                 seq_printf(file, "trap %3d %12llu %12llu %10u\n",
740                            i, rx_packets, rx_bytes, rx_dropped);
741         }
742         rx_invalid = 0;
743         for_each_possible_cpu(j) {
744                 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
745                 rx_invalid += p->trap_rx_invalid;
746         }
747         seq_printf(file, "trap INV                           %10u\n",
748                    rx_invalid);
749
750         for (i = 0; i < MLXSW_PORT_MAX_PORTS; i++) {
751                 rx_packets = 0;
752                 rx_bytes = 0;
753                 rx_dropped = 0;
754                 for_each_possible_cpu(j) {
755                         p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
756                         do {
757                                 start = u64_stats_fetch_begin(&p->syncp);
758                                 tmp_rx_packets = p->port_rx_packets[i];
759                                 tmp_rx_bytes = p->port_rx_bytes[i];
760                         } while (u64_stats_fetch_retry(&p->syncp, start));
761
762                         rx_packets += tmp_rx_packets;
763                         rx_bytes += tmp_rx_bytes;
764                         rx_dropped += p->port_rx_dropped[i];
765                 }
766                 seq_printf(file, "port %3d %12llu %12llu %10u\n",
767                            i, rx_packets, rx_bytes, rx_dropped);
768         }
769         rx_invalid = 0;
770         for_each_possible_cpu(j) {
771                 p = per_cpu_ptr(mlxsw_core->pcpu_stats, j);
772                 rx_invalid += p->port_rx_invalid;
773         }
774         seq_printf(file, "port INV                           %10u\n",
775                    rx_invalid);
776         return 0;
777 }
778
779 static int mlxsw_core_rx_stats_dbg_open(struct inode *inode, struct file *f)
780 {
781         struct mlxsw_core *mlxsw_core = inode->i_private;
782
783         return single_open(f, mlxsw_core_rx_stats_dbg_read, mlxsw_core);
784 }
785
786 static const struct file_operations mlxsw_core_rx_stats_dbg_ops = {
787         .owner = THIS_MODULE,
788         .open = mlxsw_core_rx_stats_dbg_open,
789         .release = single_release,
790         .read = seq_read,
791         .llseek = seq_lseek
792 };
793
794 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
795 {
796         spin_lock(&mlxsw_core_driver_list_lock);
797         list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
798         spin_unlock(&mlxsw_core_driver_list_lock);
799         return 0;
800 }
801 EXPORT_SYMBOL(mlxsw_core_driver_register);
802
803 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
804 {
805         spin_lock(&mlxsw_core_driver_list_lock);
806         list_del(&mlxsw_driver->list);
807         spin_unlock(&mlxsw_core_driver_list_lock);
808 }
809 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
810
811 static struct mlxsw_driver *__driver_find(const char *kind)
812 {
813         struct mlxsw_driver *mlxsw_driver;
814
815         list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
816                 if (strcmp(mlxsw_driver->kind, kind) == 0)
817                         return mlxsw_driver;
818         }
819         return NULL;
820 }
821
822 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
823 {
824         struct mlxsw_driver *mlxsw_driver;
825
826         spin_lock(&mlxsw_core_driver_list_lock);
827         mlxsw_driver = __driver_find(kind);
828         if (!mlxsw_driver) {
829                 spin_unlock(&mlxsw_core_driver_list_lock);
830                 request_module(MLXSW_MODULE_ALIAS_PREFIX "%s", kind);
831                 spin_lock(&mlxsw_core_driver_list_lock);
832                 mlxsw_driver = __driver_find(kind);
833         }
834         if (mlxsw_driver) {
835                 if (!try_module_get(mlxsw_driver->owner))
836                         mlxsw_driver = NULL;
837         }
838
839         spin_unlock(&mlxsw_core_driver_list_lock);
840         return mlxsw_driver;
841 }
842
843 static void mlxsw_core_driver_put(const char *kind)
844 {
845         struct mlxsw_driver *mlxsw_driver;
846
847         spin_lock(&mlxsw_core_driver_list_lock);
848         mlxsw_driver = __driver_find(kind);
849         spin_unlock(&mlxsw_core_driver_list_lock);
850         if (!mlxsw_driver)
851                 return;
852         module_put(mlxsw_driver->owner);
853 }
854
855 static int mlxsw_core_debugfs_init(struct mlxsw_core *mlxsw_core)
856 {
857         const struct mlxsw_bus_info *bus_info = mlxsw_core->bus_info;
858
859         mlxsw_core->dbg_dir = debugfs_create_dir(bus_info->device_name,
860                                                  mlxsw_core_dbg_root);
861         if (!mlxsw_core->dbg_dir)
862                 return -ENOMEM;
863         debugfs_create_file("rx_stats", S_IRUGO, mlxsw_core->dbg_dir,
864                             mlxsw_core, &mlxsw_core_rx_stats_dbg_ops);
865         mlxsw_core->dbg.vsd_blob.data = (void *) &bus_info->vsd;
866         mlxsw_core->dbg.vsd_blob.size = sizeof(bus_info->vsd);
867         debugfs_create_blob("vsd", S_IRUGO, mlxsw_core->dbg_dir,
868                             &mlxsw_core->dbg.vsd_blob);
869         mlxsw_core->dbg.psid_blob.data = (void *) &bus_info->psid;
870         mlxsw_core->dbg.psid_blob.size = sizeof(bus_info->psid);
871         debugfs_create_blob("psid", S_IRUGO, mlxsw_core->dbg_dir,
872                             &mlxsw_core->dbg.psid_blob);
873         return 0;
874 }
875
876 static void mlxsw_core_debugfs_fini(struct mlxsw_core *mlxsw_core)
877 {
878         debugfs_remove_recursive(mlxsw_core->dbg_dir);
879 }
880
881 static int mlxsw_devlink_port_split(struct devlink *devlink,
882                                     unsigned int port_index,
883                                     unsigned int count)
884 {
885         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
886
887         if (port_index >= MLXSW_PORT_MAX_PORTS)
888                 return -EINVAL;
889         if (!mlxsw_core->driver->port_split)
890                 return -EOPNOTSUPP;
891         return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
892 }
893
894 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
895                                       unsigned int port_index)
896 {
897         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
898
899         if (port_index >= MLXSW_PORT_MAX_PORTS)
900                 return -EINVAL;
901         if (!mlxsw_core->driver->port_unsplit)
902                 return -EOPNOTSUPP;
903         return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
904 }
905
906 static int
907 mlxsw_devlink_sb_pool_get(struct devlink *devlink,
908                           unsigned int sb_index, u16 pool_index,
909                           struct devlink_sb_pool_info *pool_info)
910 {
911         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
912         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
913
914         if (!mlxsw_driver->sb_pool_get)
915                 return -EOPNOTSUPP;
916         return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
917                                          pool_index, pool_info);
918 }
919
920 static int
921 mlxsw_devlink_sb_pool_set(struct devlink *devlink,
922                           unsigned int sb_index, u16 pool_index, u32 size,
923                           enum devlink_sb_threshold_type threshold_type)
924 {
925         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
926         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
927
928         if (!mlxsw_driver->sb_pool_set)
929                 return -EOPNOTSUPP;
930         return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
931                                          pool_index, size, threshold_type);
932 }
933
934 static void *__dl_port(struct devlink_port *devlink_port)
935 {
936         return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
937 }
938
939 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
940                                           unsigned int sb_index, u16 pool_index,
941                                           u32 *p_threshold)
942 {
943         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
944         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
945         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
946
947         if (!mlxsw_driver->sb_port_pool_get)
948                 return -EOPNOTSUPP;
949         return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
950                                               pool_index, p_threshold);
951 }
952
953 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
954                                           unsigned int sb_index, u16 pool_index,
955                                           u32 threshold)
956 {
957         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
958         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
959         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
960
961         if (!mlxsw_driver->sb_port_pool_set)
962                 return -EOPNOTSUPP;
963         return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
964                                               pool_index, threshold);
965 }
966
967 static int
968 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
969                                   unsigned int sb_index, u16 tc_index,
970                                   enum devlink_sb_pool_type pool_type,
971                                   u16 *p_pool_index, u32 *p_threshold)
972 {
973         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
974         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
975         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
976
977         if (!mlxsw_driver->sb_tc_pool_bind_get)
978                 return -EOPNOTSUPP;
979         return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
980                                                  tc_index, pool_type,
981                                                  p_pool_index, p_threshold);
982 }
983
984 static int
985 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
986                                   unsigned int sb_index, u16 tc_index,
987                                   enum devlink_sb_pool_type pool_type,
988                                   u16 pool_index, u32 threshold)
989 {
990         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
991         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
992         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
993
994         if (!mlxsw_driver->sb_tc_pool_bind_set)
995                 return -EOPNOTSUPP;
996         return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
997                                                  tc_index, pool_type,
998                                                  pool_index, threshold);
999 }
1000
1001 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
1002                                          unsigned int sb_index)
1003 {
1004         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1005         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1006
1007         if (!mlxsw_driver->sb_occ_snapshot)
1008                 return -EOPNOTSUPP;
1009         return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
1010 }
1011
1012 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
1013                                           unsigned int sb_index)
1014 {
1015         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1016         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1017
1018         if (!mlxsw_driver->sb_occ_max_clear)
1019                 return -EOPNOTSUPP;
1020         return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
1021 }
1022
1023 static int
1024 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
1025                                    unsigned int sb_index, u16 pool_index,
1026                                    u32 *p_cur, u32 *p_max)
1027 {
1028         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1029         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1030         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1031
1032         if (!mlxsw_driver->sb_occ_port_pool_get)
1033                 return -EOPNOTSUPP;
1034         return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
1035                                                   pool_index, p_cur, p_max);
1036 }
1037
1038 static int
1039 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
1040                                       unsigned int sb_index, u16 tc_index,
1041                                       enum devlink_sb_pool_type pool_type,
1042                                       u32 *p_cur, u32 *p_max)
1043 {
1044         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
1045         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
1046         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
1047
1048         if (!mlxsw_driver->sb_occ_tc_port_bind_get)
1049                 return -EOPNOTSUPP;
1050         return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
1051                                                      sb_index, tc_index,
1052                                                      pool_type, p_cur, p_max);
1053 }
1054
1055 static const struct devlink_ops mlxsw_devlink_ops = {
1056         .port_split                     = mlxsw_devlink_port_split,
1057         .port_unsplit                   = mlxsw_devlink_port_unsplit,
1058         .sb_pool_get                    = mlxsw_devlink_sb_pool_get,
1059         .sb_pool_set                    = mlxsw_devlink_sb_pool_set,
1060         .sb_port_pool_get               = mlxsw_devlink_sb_port_pool_get,
1061         .sb_port_pool_set               = mlxsw_devlink_sb_port_pool_set,
1062         .sb_tc_pool_bind_get            = mlxsw_devlink_sb_tc_pool_bind_get,
1063         .sb_tc_pool_bind_set            = mlxsw_devlink_sb_tc_pool_bind_set,
1064         .sb_occ_snapshot                = mlxsw_devlink_sb_occ_snapshot,
1065         .sb_occ_max_clear               = mlxsw_devlink_sb_occ_max_clear,
1066         .sb_occ_port_pool_get           = mlxsw_devlink_sb_occ_port_pool_get,
1067         .sb_occ_tc_port_bind_get        = mlxsw_devlink_sb_occ_tc_port_bind_get,
1068 };
1069
1070 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
1071                                    const struct mlxsw_bus *mlxsw_bus,
1072                                    void *bus_priv)
1073 {
1074         const char *device_kind = mlxsw_bus_info->device_kind;
1075         struct mlxsw_core *mlxsw_core;
1076         struct mlxsw_driver *mlxsw_driver;
1077         struct devlink *devlink;
1078         size_t alloc_size;
1079         int err;
1080
1081         mlxsw_driver = mlxsw_core_driver_get(device_kind);
1082         if (!mlxsw_driver)
1083                 return -EINVAL;
1084         alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
1085         devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
1086         if (!devlink) {
1087                 err = -ENOMEM;
1088                 goto err_devlink_alloc;
1089         }
1090
1091         mlxsw_core = devlink_priv(devlink);
1092         INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
1093         INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
1094         mlxsw_core->driver = mlxsw_driver;
1095         mlxsw_core->bus = mlxsw_bus;
1096         mlxsw_core->bus_priv = bus_priv;
1097         mlxsw_core->bus_info = mlxsw_bus_info;
1098
1099         mlxsw_core->pcpu_stats =
1100                 netdev_alloc_pcpu_stats(struct mlxsw_core_pcpu_stats);
1101         if (!mlxsw_core->pcpu_stats) {
1102                 err = -ENOMEM;
1103                 goto err_alloc_stats;
1104         }
1105
1106         err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
1107                               &mlxsw_core->resources);
1108         if (err)
1109                 goto err_bus_init;
1110
1111         if (mlxsw_core->resources.max_lag_valid &&
1112             mlxsw_core->resources.max_ports_in_lag_valid) {
1113                 alloc_size = sizeof(u8) * mlxsw_core->resources.max_lag *
1114                         mlxsw_core->resources.max_ports_in_lag;
1115                 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1116                 if (!mlxsw_core->lag.mapping) {
1117                         err = -ENOMEM;
1118                         goto err_alloc_lag_mapping;
1119                 }
1120         }
1121
1122         err = mlxsw_emad_init(mlxsw_core);
1123         if (err)
1124                 goto err_emad_init;
1125
1126         err = devlink_register(devlink, mlxsw_bus_info->dev);
1127         if (err)
1128                 goto err_devlink_register;
1129
1130         err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1131         if (err)
1132                 goto err_hwmon_init;
1133
1134         err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
1135         if (err)
1136                 goto err_driver_init;
1137
1138         err = mlxsw_core_debugfs_init(mlxsw_core);
1139         if (err)
1140                 goto err_debugfs_init;
1141
1142         return 0;
1143
1144 err_debugfs_init:
1145         mlxsw_core->driver->fini(mlxsw_core);
1146 err_driver_init:
1147 err_hwmon_init:
1148         devlink_unregister(devlink);
1149 err_devlink_register:
1150         mlxsw_emad_fini(mlxsw_core);
1151 err_emad_init:
1152         kfree(mlxsw_core->lag.mapping);
1153 err_alloc_lag_mapping:
1154         mlxsw_bus->fini(bus_priv);
1155 err_bus_init:
1156         free_percpu(mlxsw_core->pcpu_stats);
1157 err_alloc_stats:
1158         devlink_free(devlink);
1159 err_devlink_alloc:
1160         mlxsw_core_driver_put(device_kind);
1161         return err;
1162 }
1163 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
1164
1165 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
1166 {
1167         const char *device_kind = mlxsw_core->bus_info->device_kind;
1168         struct devlink *devlink = priv_to_devlink(mlxsw_core);
1169
1170         mlxsw_core_debugfs_fini(mlxsw_core);
1171         mlxsw_core->driver->fini(mlxsw_core);
1172         devlink_unregister(devlink);
1173         mlxsw_emad_fini(mlxsw_core);
1174         mlxsw_core->bus->fini(mlxsw_core->bus_priv);
1175         kfree(mlxsw_core->lag.mapping);
1176         free_percpu(mlxsw_core->pcpu_stats);
1177         devlink_free(devlink);
1178         mlxsw_core_driver_put(device_kind);
1179 }
1180 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
1181
1182 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
1183                                   const struct mlxsw_tx_info *tx_info)
1184 {
1185         return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
1186                                                   tx_info);
1187 }
1188 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
1189
1190 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1191                             const struct mlxsw_tx_info *tx_info)
1192 {
1193         return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
1194                                              tx_info);
1195 }
1196 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
1197
1198 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
1199                                    const struct mlxsw_rx_listener *rxl_b)
1200 {
1201         return (rxl_a->func == rxl_b->func &&
1202                 rxl_a->local_port == rxl_b->local_port &&
1203                 rxl_a->trap_id == rxl_b->trap_id);
1204 }
1205
1206 static struct mlxsw_rx_listener_item *
1207 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
1208                         const struct mlxsw_rx_listener *rxl,
1209                         void *priv)
1210 {
1211         struct mlxsw_rx_listener_item *rxl_item;
1212
1213         list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
1214                 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
1215                     rxl_item->priv == priv)
1216                         return rxl_item;
1217         }
1218         return NULL;
1219 }
1220
1221 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
1222                                     const struct mlxsw_rx_listener *rxl,
1223                                     void *priv)
1224 {
1225         struct mlxsw_rx_listener_item *rxl_item;
1226
1227         rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1228         if (rxl_item)
1229                 return -EEXIST;
1230         rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
1231         if (!rxl_item)
1232                 return -ENOMEM;
1233         rxl_item->rxl = *rxl;
1234         rxl_item->priv = priv;
1235
1236         list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
1237         return 0;
1238 }
1239 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
1240
1241 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
1242                                        const struct mlxsw_rx_listener *rxl,
1243                                        void *priv)
1244 {
1245         struct mlxsw_rx_listener_item *rxl_item;
1246
1247         rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1248         if (!rxl_item)
1249                 return;
1250         list_del_rcu(&rxl_item->list);
1251         synchronize_rcu();
1252         kfree(rxl_item);
1253 }
1254 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
1255
1256 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
1257                                            void *priv)
1258 {
1259         struct mlxsw_event_listener_item *event_listener_item = priv;
1260         struct mlxsw_reg_info reg;
1261         char *payload;
1262         char *op_tlv = mlxsw_emad_op_tlv(skb);
1263         char *reg_tlv = mlxsw_emad_reg_tlv(skb);
1264
1265         reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
1266         reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
1267         payload = mlxsw_emad_reg_payload(op_tlv);
1268         event_listener_item->el.func(&reg, payload, event_listener_item->priv);
1269         dev_kfree_skb(skb);
1270 }
1271
1272 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
1273                                       const struct mlxsw_event_listener *el_b)
1274 {
1275         return (el_a->func == el_b->func &&
1276                 el_a->trap_id == el_b->trap_id);
1277 }
1278
1279 static struct mlxsw_event_listener_item *
1280 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
1281                            const struct mlxsw_event_listener *el,
1282                            void *priv)
1283 {
1284         struct mlxsw_event_listener_item *el_item;
1285
1286         list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1287                 if (__is_event_listener_equal(&el_item->el, el) &&
1288                     el_item->priv == priv)
1289                         return el_item;
1290         }
1291         return NULL;
1292 }
1293
1294 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1295                                        const struct mlxsw_event_listener *el,
1296                                        void *priv)
1297 {
1298         int err;
1299         struct mlxsw_event_listener_item *el_item;
1300         const struct mlxsw_rx_listener rxl = {
1301                 .func = mlxsw_core_event_listener_func,
1302                 .local_port = MLXSW_PORT_DONT_CARE,
1303                 .trap_id = el->trap_id,
1304         };
1305
1306         el_item = __find_event_listener_item(mlxsw_core, el, priv);
1307         if (el_item)
1308                 return -EEXIST;
1309         el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1310         if (!el_item)
1311                 return -ENOMEM;
1312         el_item->el = *el;
1313         el_item->priv = priv;
1314
1315         err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1316         if (err)
1317                 goto err_rx_listener_register;
1318
1319         /* No reason to save item if we did not manage to register an RX
1320          * listener for it.
1321          */
1322         list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1323
1324         return 0;
1325
1326 err_rx_listener_register:
1327         kfree(el_item);
1328         return err;
1329 }
1330 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1331
1332 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1333                                           const struct mlxsw_event_listener *el,
1334                                           void *priv)
1335 {
1336         struct mlxsw_event_listener_item *el_item;
1337         const struct mlxsw_rx_listener rxl = {
1338                 .func = mlxsw_core_event_listener_func,
1339                 .local_port = MLXSW_PORT_DONT_CARE,
1340                 .trap_id = el->trap_id,
1341         };
1342
1343         el_item = __find_event_listener_item(mlxsw_core, el, priv);
1344         if (!el_item)
1345                 return;
1346         mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1347         list_del(&el_item->list);
1348         kfree(el_item);
1349 }
1350 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1351
1352 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
1353 {
1354         return atomic64_inc_return(&mlxsw_core->emad.tid);
1355 }
1356
1357 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1358                                       const struct mlxsw_reg_info *reg,
1359                                       char *payload,
1360                                       enum mlxsw_core_reg_access_type type,
1361                                       struct list_head *bulk_list,
1362                                       mlxsw_reg_trans_cb_t *cb,
1363                                       unsigned long cb_priv)
1364 {
1365         u64 tid = mlxsw_core_tid_get(mlxsw_core);
1366         struct mlxsw_reg_trans *trans;
1367         int err;
1368
1369         trans = kzalloc(sizeof(*trans), GFP_KERNEL);
1370         if (!trans)
1371                 return -ENOMEM;
1372
1373         err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1374                                     bulk_list, cb, cb_priv, tid);
1375         if (err) {
1376                 kfree_rcu(trans, rcu);
1377                 return err;
1378         }
1379         return 0;
1380 }
1381
1382 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
1383                           const struct mlxsw_reg_info *reg, char *payload,
1384                           struct list_head *bulk_list,
1385                           mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1386 {
1387         return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1388                                           MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
1389                                           bulk_list, cb, cb_priv);
1390 }
1391 EXPORT_SYMBOL(mlxsw_reg_trans_query);
1392
1393 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
1394                           const struct mlxsw_reg_info *reg, char *payload,
1395                           struct list_head *bulk_list,
1396                           mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1397 {
1398         return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1399                                           MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
1400                                           bulk_list, cb, cb_priv);
1401 }
1402 EXPORT_SYMBOL(mlxsw_reg_trans_write);
1403
1404 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
1405 {
1406         struct mlxsw_core *mlxsw_core = trans->core;
1407         int err;
1408
1409         wait_for_completion(&trans->completion);
1410         cancel_delayed_work_sync(&trans->timeout_dw);
1411         err = trans->err;
1412
1413         if (trans->retries)
1414                 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
1415                          trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
1416         if (err)
1417                 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1418                         trans->tid, trans->reg->id,
1419                         mlxsw_reg_id_str(trans->reg->id),
1420                         mlxsw_core_reg_access_type_str(trans->type),
1421                         trans->emad_status,
1422                         mlxsw_emad_op_tlv_status_str(trans->emad_status));
1423
1424         list_del(&trans->bulk_list);
1425         kfree_rcu(trans, rcu);
1426         return err;
1427 }
1428
1429 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
1430 {
1431         struct mlxsw_reg_trans *trans;
1432         struct mlxsw_reg_trans *tmp;
1433         int sum_err = 0;
1434         int err;
1435
1436         list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
1437                 err = mlxsw_reg_trans_wait(trans);
1438                 if (err && sum_err == 0)
1439                         sum_err = err; /* first error to be returned */
1440         }
1441         return sum_err;
1442 }
1443 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
1444
1445 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1446                                      const struct mlxsw_reg_info *reg,
1447                                      char *payload,
1448                                      enum mlxsw_core_reg_access_type type)
1449 {
1450         enum mlxsw_emad_op_tlv_status status;
1451         int err, n_retry;
1452         char *in_mbox, *out_mbox, *tmp;
1453
1454         dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1455                 reg->id, mlxsw_reg_id_str(reg->id),
1456                 mlxsw_core_reg_access_type_str(type));
1457
1458         in_mbox = mlxsw_cmd_mbox_alloc();
1459         if (!in_mbox)
1460                 return -ENOMEM;
1461
1462         out_mbox = mlxsw_cmd_mbox_alloc();
1463         if (!out_mbox) {
1464                 err = -ENOMEM;
1465                 goto free_in_mbox;
1466         }
1467
1468         mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
1469                                mlxsw_core_tid_get(mlxsw_core));
1470         tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1471         mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1472
1473         n_retry = 0;
1474 retry:
1475         err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1476         if (!err) {
1477                 err = mlxsw_emad_process_status(out_mbox, &status);
1478                 if (err) {
1479                         if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1480                                 goto retry;
1481                         dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
1482                                 status, mlxsw_emad_op_tlv_status_str(status));
1483                 }
1484         }
1485
1486         if (!err)
1487                 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1488                        reg->len);
1489
1490         mlxsw_cmd_mbox_free(out_mbox);
1491 free_in_mbox:
1492         mlxsw_cmd_mbox_free(in_mbox);
1493         if (err)
1494                 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1495                         reg->id, mlxsw_reg_id_str(reg->id),
1496                         mlxsw_core_reg_access_type_str(type));
1497         return err;
1498 }
1499
1500 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
1501                                      char *payload, size_t payload_len,
1502                                      unsigned long cb_priv)
1503 {
1504         char *orig_payload = (char *) cb_priv;
1505
1506         memcpy(orig_payload, payload, payload_len);
1507 }
1508
1509 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1510                                  const struct mlxsw_reg_info *reg,
1511                                  char *payload,
1512                                  enum mlxsw_core_reg_access_type type)
1513 {
1514         LIST_HEAD(bulk_list);
1515         int err;
1516
1517         /* During initialization EMAD interface is not available to us,
1518          * so we default to command interface. We switch to EMAD interface
1519          * after setting the appropriate traps.
1520          */
1521         if (!mlxsw_core->emad.use_emad)
1522                 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1523                                                  payload, type);
1524
1525         err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1526                                          payload, type, &bulk_list,
1527                                          mlxsw_core_reg_access_cb,
1528                                          (unsigned long) payload);
1529         if (err)
1530                 return err;
1531         return mlxsw_reg_trans_bulk_wait(&bulk_list);
1532 }
1533
1534 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1535                     const struct mlxsw_reg_info *reg, char *payload)
1536 {
1537         return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1538                                      MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1539 }
1540 EXPORT_SYMBOL(mlxsw_reg_query);
1541
1542 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1543                     const struct mlxsw_reg_info *reg, char *payload)
1544 {
1545         return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1546                                      MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1547 }
1548 EXPORT_SYMBOL(mlxsw_reg_write);
1549
1550 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1551                             struct mlxsw_rx_info *rx_info)
1552 {
1553         struct mlxsw_rx_listener_item *rxl_item;
1554         const struct mlxsw_rx_listener *rxl;
1555         struct mlxsw_core_pcpu_stats *pcpu_stats;
1556         u8 local_port;
1557         bool found = false;
1558
1559         if (rx_info->is_lag) {
1560                 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1561                                     __func__, rx_info->u.lag_id,
1562                                     rx_info->trap_id);
1563                 /* Upper layer does not care if the skb came from LAG or not,
1564                  * so just get the local_port for the lag port and push it up.
1565                  */
1566                 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1567                                                         rx_info->u.lag_id,
1568                                                         rx_info->lag_port_index);
1569         } else {
1570                 local_port = rx_info->u.sys_port;
1571         }
1572
1573         dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1574                             __func__, local_port, rx_info->trap_id);
1575
1576         if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1577             (local_port >= MLXSW_PORT_MAX_PORTS))
1578                 goto drop;
1579
1580         rcu_read_lock();
1581         list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1582                 rxl = &rxl_item->rxl;
1583                 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1584                      rxl->local_port == local_port) &&
1585                     rxl->trap_id == rx_info->trap_id) {
1586                         found = true;
1587                         break;
1588                 }
1589         }
1590         if (!found) {
1591                 rcu_read_unlock();
1592                 goto drop;
1593         }
1594
1595         pcpu_stats = this_cpu_ptr(mlxsw_core->pcpu_stats);
1596         u64_stats_update_begin(&pcpu_stats->syncp);
1597         pcpu_stats->port_rx_packets[local_port]++;
1598         pcpu_stats->port_rx_bytes[local_port] += skb->len;
1599         pcpu_stats->trap_rx_packets[rx_info->trap_id]++;
1600         pcpu_stats->trap_rx_bytes[rx_info->trap_id] += skb->len;
1601         u64_stats_update_end(&pcpu_stats->syncp);
1602
1603         rxl->func(skb, local_port, rxl_item->priv);
1604         rcu_read_unlock();
1605         return;
1606
1607 drop:
1608         if (rx_info->trap_id >= MLXSW_TRAP_ID_MAX)
1609                 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_invalid);
1610         else
1611                 this_cpu_inc(mlxsw_core->pcpu_stats->trap_rx_dropped[rx_info->trap_id]);
1612         if (local_port >= MLXSW_PORT_MAX_PORTS)
1613                 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_invalid);
1614         else
1615                 this_cpu_inc(mlxsw_core->pcpu_stats->port_rx_dropped[local_port]);
1616         dev_kfree_skb(skb);
1617 }
1618 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1619
1620 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1621                                         u16 lag_id, u8 port_index)
1622 {
1623         return mlxsw_core->resources.max_ports_in_lag * lag_id +
1624                port_index;
1625 }
1626
1627 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1628                                 u16 lag_id, u8 port_index, u8 local_port)
1629 {
1630         int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1631                                                  lag_id, port_index);
1632
1633         mlxsw_core->lag.mapping[index] = local_port;
1634 }
1635 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1636
1637 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1638                               u16 lag_id, u8 port_index)
1639 {
1640         int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1641                                                  lag_id, port_index);
1642
1643         return mlxsw_core->lag.mapping[index];
1644 }
1645 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1646
1647 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1648                                   u16 lag_id, u8 local_port)
1649 {
1650         int i;
1651
1652         for (i = 0; i < mlxsw_core->resources.max_ports_in_lag; i++) {
1653                 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1654                                                          lag_id, i);
1655
1656                 if (mlxsw_core->lag.mapping[index] == local_port)
1657                         mlxsw_core->lag.mapping[index] = 0;
1658         }
1659 }
1660 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1661
1662 struct mlxsw_resources *mlxsw_core_resources_get(struct mlxsw_core *mlxsw_core)
1663 {
1664         return &mlxsw_core->resources;
1665 }
1666 EXPORT_SYMBOL(mlxsw_core_resources_get);
1667
1668 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core,
1669                          struct mlxsw_core_port *mlxsw_core_port, u8 local_port,
1670                          struct net_device *dev, bool split, u32 split_group)
1671 {
1672         struct devlink *devlink = priv_to_devlink(mlxsw_core);
1673         struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1674
1675         if (split)
1676                 devlink_port_split_set(devlink_port, split_group);
1677         devlink_port_type_eth_set(devlink_port, dev);
1678         return devlink_port_register(devlink, devlink_port, local_port);
1679 }
1680 EXPORT_SYMBOL(mlxsw_core_port_init);
1681
1682 void mlxsw_core_port_fini(struct mlxsw_core_port *mlxsw_core_port)
1683 {
1684         struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1685
1686         devlink_port_unregister(devlink_port);
1687 }
1688 EXPORT_SYMBOL(mlxsw_core_port_fini);
1689
1690 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
1691                                     const char *buf, size_t size)
1692 {
1693         __be32 *m = (__be32 *) buf;
1694         int i;
1695         int count = size / sizeof(__be32);
1696
1697         for (i = count - 1; i >= 0; i--)
1698                 if (m[i])
1699                         break;
1700         i++;
1701         count = i ? i : 1;
1702         for (i = 0; i < count; i += 4)
1703                 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
1704                         i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
1705                         be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
1706 }
1707
1708 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1709                    u32 in_mod, bool out_mbox_direct,
1710                    char *in_mbox, size_t in_mbox_size,
1711                    char *out_mbox, size_t out_mbox_size)
1712 {
1713         u8 status;
1714         int err;
1715
1716         BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1717         if (!mlxsw_core->bus->cmd_exec)
1718                 return -EOPNOTSUPP;
1719
1720         dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1721                 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1722         if (in_mbox) {
1723                 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1724                 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1725         }
1726
1727         err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1728                                         opcode_mod, in_mod, out_mbox_direct,
1729                                         in_mbox, in_mbox_size,
1730                                         out_mbox, out_mbox_size, &status);
1731
1732         if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1733                 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1734                         opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1735                         in_mod, status, mlxsw_cmd_status_str(status));
1736         } else if (err == -ETIMEDOUT) {
1737                 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1738                         opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1739                         in_mod);
1740         }
1741
1742         if (!err && out_mbox) {
1743                 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1744                 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1745         }
1746         return err;
1747 }
1748 EXPORT_SYMBOL(mlxsw_cmd_exec);
1749
1750 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
1751 {
1752         return queue_delayed_work(mlxsw_wq, dwork, delay);
1753 }
1754 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
1755
1756 static int __init mlxsw_core_module_init(void)
1757 {
1758         int err;
1759
1760         mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, WQ_MEM_RECLAIM, 0);
1761         if (!mlxsw_wq)
1762                 return -ENOMEM;
1763         mlxsw_core_dbg_root = debugfs_create_dir(mlxsw_core_driver_name, NULL);
1764         if (!mlxsw_core_dbg_root) {
1765                 err = -ENOMEM;
1766                 goto err_debugfs_create_dir;
1767         }
1768         return 0;
1769
1770 err_debugfs_create_dir:
1771         destroy_workqueue(mlxsw_wq);
1772         return err;
1773 }
1774
1775 static void __exit mlxsw_core_module_exit(void)
1776 {
1777         debugfs_remove_recursive(mlxsw_core_dbg_root);
1778         destroy_workqueue(mlxsw_wq);
1779 }
1780
1781 module_init(mlxsw_core_module_init);
1782 module_exit(mlxsw_core_module_exit);
1783
1784 MODULE_LICENSE("Dual BSD/GPL");
1785 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1786 MODULE_DESCRIPTION("Mellanox switch device core driver");