GNU Linux-libre 4.14.332-gnu1
[releases.git] / drivers / net / ethernet / mellanox / mlxsw / core.c
1 /*
2  * drivers/net/ethernet/mellanox/mlxsw/core.c
3  * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2015 Jiri Pirko <jiri@mellanox.com>
5  * Copyright (c) 2015 Ido Schimmel <idosch@mellanox.com>
6  * Copyright (c) 2015 Elad Raz <eladr@mellanox.com>
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  * 3. Neither the names of the copyright holders nor the names of its
17  *    contributors may be used to endorse or promote products derived from
18  *    this software without specific prior written permission.
19  *
20  * Alternatively, this software may be distributed under the terms of the
21  * GNU General Public License ("GPL") version 2 as published by the Free
22  * Software Foundation.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  */
36
37 #include <linux/kernel.h>
38 #include <linux/module.h>
39 #include <linux/device.h>
40 #include <linux/export.h>
41 #include <linux/err.h>
42 #include <linux/if_link.h>
43 #include <linux/netdevice.h>
44 #include <linux/completion.h>
45 #include <linux/skbuff.h>
46 #include <linux/etherdevice.h>
47 #include <linux/types.h>
48 #include <linux/string.h>
49 #include <linux/gfp.h>
50 #include <linux/random.h>
51 #include <linux/jiffies.h>
52 #include <linux/mutex.h>
53 #include <linux/rcupdate.h>
54 #include <linux/slab.h>
55 #include <linux/workqueue.h>
56 #include <asm/byteorder.h>
57 #include <net/devlink.h>
58 #include <trace/events/devlink.h>
59
60 #include "core.h"
61 #include "item.h"
62 #include "cmd.h"
63 #include "port.h"
64 #include "trap.h"
65 #include "emad.h"
66 #include "reg.h"
67 #include "resources.h"
68
69 static LIST_HEAD(mlxsw_core_driver_list);
70 static DEFINE_SPINLOCK(mlxsw_core_driver_list_lock);
71
72 static const char mlxsw_core_driver_name[] = "mlxsw_core";
73
74 static struct workqueue_struct *mlxsw_wq;
75 static struct workqueue_struct *mlxsw_owq;
76
77 struct mlxsw_core_port {
78         struct devlink_port devlink_port;
79         void *port_driver_priv;
80         u8 local_port;
81 };
82
83 void *mlxsw_core_port_driver_priv(struct mlxsw_core_port *mlxsw_core_port)
84 {
85         return mlxsw_core_port->port_driver_priv;
86 }
87 EXPORT_SYMBOL(mlxsw_core_port_driver_priv);
88
89 static bool mlxsw_core_port_check(struct mlxsw_core_port *mlxsw_core_port)
90 {
91         return mlxsw_core_port->port_driver_priv != NULL;
92 }
93
94 struct mlxsw_core {
95         struct mlxsw_driver *driver;
96         const struct mlxsw_bus *bus;
97         void *bus_priv;
98         const struct mlxsw_bus_info *bus_info;
99         struct workqueue_struct *emad_wq;
100         struct list_head rx_listener_list;
101         struct list_head event_listener_list;
102         struct {
103                 atomic64_t tid;
104                 struct list_head trans_list;
105                 spinlock_t trans_list_lock; /* protects trans_list writes */
106                 bool use_emad;
107         } emad;
108         struct {
109                 u8 *mapping; /* lag_id+port_index to local_port mapping */
110         } lag;
111         struct mlxsw_res res;
112         struct mlxsw_hwmon *hwmon;
113         struct mlxsw_thermal *thermal;
114         struct mlxsw_core_port *ports;
115         unsigned int max_ports;
116         bool fw_flash_in_progress;
117         unsigned long driver_priv[0];
118         /* driver_priv has to be always the last item */
119 };
120
121 #define MLXSW_PORT_MAX_PORTS_DEFAULT    0x40
122
123 static int mlxsw_ports_init(struct mlxsw_core *mlxsw_core)
124 {
125         /* Switch ports are numbered from 1 to queried value */
126         if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_SYSTEM_PORT))
127                 mlxsw_core->max_ports = MLXSW_CORE_RES_GET(mlxsw_core,
128                                                            MAX_SYSTEM_PORT) + 1;
129         else
130                 mlxsw_core->max_ports = MLXSW_PORT_MAX_PORTS_DEFAULT + 1;
131
132         mlxsw_core->ports = kcalloc(mlxsw_core->max_ports,
133                                     sizeof(struct mlxsw_core_port), GFP_KERNEL);
134         if (!mlxsw_core->ports)
135                 return -ENOMEM;
136
137         return 0;
138 }
139
140 static void mlxsw_ports_fini(struct mlxsw_core *mlxsw_core)
141 {
142         kfree(mlxsw_core->ports);
143 }
144
145 unsigned int mlxsw_core_max_ports(const struct mlxsw_core *mlxsw_core)
146 {
147         return mlxsw_core->max_ports;
148 }
149 EXPORT_SYMBOL(mlxsw_core_max_ports);
150
151 void *mlxsw_core_driver_priv(struct mlxsw_core *mlxsw_core)
152 {
153         return mlxsw_core->driver_priv;
154 }
155 EXPORT_SYMBOL(mlxsw_core_driver_priv);
156
157 struct mlxsw_rx_listener_item {
158         struct list_head list;
159         struct mlxsw_rx_listener rxl;
160         void *priv;
161 };
162
163 struct mlxsw_event_listener_item {
164         struct list_head list;
165         struct mlxsw_event_listener el;
166         void *priv;
167 };
168
169 /******************
170  * EMAD processing
171  ******************/
172
173 /* emad_eth_hdr_dmac
174  * Destination MAC in EMAD's Ethernet header.
175  * Must be set to 01:02:c9:00:00:01
176  */
177 MLXSW_ITEM_BUF(emad, eth_hdr, dmac, 0x00, 6);
178
179 /* emad_eth_hdr_smac
180  * Source MAC in EMAD's Ethernet header.
181  * Must be set to 00:02:c9:01:02:03
182  */
183 MLXSW_ITEM_BUF(emad, eth_hdr, smac, 0x06, 6);
184
185 /* emad_eth_hdr_ethertype
186  * Ethertype in EMAD's Ethernet header.
187  * Must be set to 0x8932
188  */
189 MLXSW_ITEM32(emad, eth_hdr, ethertype, 0x0C, 16, 16);
190
191 /* emad_eth_hdr_mlx_proto
192  * Mellanox protocol.
193  * Must be set to 0x0.
194  */
195 MLXSW_ITEM32(emad, eth_hdr, mlx_proto, 0x0C, 8, 8);
196
197 /* emad_eth_hdr_ver
198  * Mellanox protocol version.
199  * Must be set to 0x0.
200  */
201 MLXSW_ITEM32(emad, eth_hdr, ver, 0x0C, 4, 4);
202
203 /* emad_op_tlv_type
204  * Type of the TLV.
205  * Must be set to 0x1 (operation TLV).
206  */
207 MLXSW_ITEM32(emad, op_tlv, type, 0x00, 27, 5);
208
209 /* emad_op_tlv_len
210  * Length of the operation TLV in u32.
211  * Must be set to 0x4.
212  */
213 MLXSW_ITEM32(emad, op_tlv, len, 0x00, 16, 11);
214
215 /* emad_op_tlv_dr
216  * Direct route bit. Setting to 1 indicates the EMAD is a direct route
217  * EMAD. DR TLV must follow.
218  *
219  * Note: Currently not supported and must not be set.
220  */
221 MLXSW_ITEM32(emad, op_tlv, dr, 0x00, 15, 1);
222
223 /* emad_op_tlv_status
224  * Returned status in case of EMAD response. Must be set to 0 in case
225  * of EMAD request.
226  * 0x0 - success
227  * 0x1 - device is busy. Requester should retry
228  * 0x2 - Mellanox protocol version not supported
229  * 0x3 - unknown TLV
230  * 0x4 - register not supported
231  * 0x5 - operation class not supported
232  * 0x6 - EMAD method not supported
233  * 0x7 - bad parameter (e.g. port out of range)
234  * 0x8 - resource not available
235  * 0x9 - message receipt acknowledgment. Requester should retry
236  * 0x70 - internal error
237  */
238 MLXSW_ITEM32(emad, op_tlv, status, 0x00, 8, 7);
239
240 /* emad_op_tlv_register_id
241  * Register ID of register within register TLV.
242  */
243 MLXSW_ITEM32(emad, op_tlv, register_id, 0x04, 16, 16);
244
245 /* emad_op_tlv_r
246  * Response bit. Setting to 1 indicates Response, otherwise request.
247  */
248 MLXSW_ITEM32(emad, op_tlv, r, 0x04, 15, 1);
249
250 /* emad_op_tlv_method
251  * EMAD method type.
252  * 0x1 - query
253  * 0x2 - write
254  * 0x3 - send (currently not supported)
255  * 0x4 - event
256  */
257 MLXSW_ITEM32(emad, op_tlv, method, 0x04, 8, 7);
258
259 /* emad_op_tlv_class
260  * EMAD operation class. Must be set to 0x1 (REG_ACCESS).
261  */
262 MLXSW_ITEM32(emad, op_tlv, class, 0x04, 0, 8);
263
264 /* emad_op_tlv_tid
265  * EMAD transaction ID. Used for pairing request and response EMADs.
266  */
267 MLXSW_ITEM64(emad, op_tlv, tid, 0x08, 0, 64);
268
269 /* emad_reg_tlv_type
270  * Type of the TLV.
271  * Must be set to 0x3 (register TLV).
272  */
273 MLXSW_ITEM32(emad, reg_tlv, type, 0x00, 27, 5);
274
275 /* emad_reg_tlv_len
276  * Length of the operation TLV in u32.
277  */
278 MLXSW_ITEM32(emad, reg_tlv, len, 0x00, 16, 11);
279
280 /* emad_end_tlv_type
281  * Type of the TLV.
282  * Must be set to 0x0 (end TLV).
283  */
284 MLXSW_ITEM32(emad, end_tlv, type, 0x00, 27, 5);
285
286 /* emad_end_tlv_len
287  * Length of the end TLV in u32.
288  * Must be set to 1.
289  */
290 MLXSW_ITEM32(emad, end_tlv, len, 0x00, 16, 11);
291
292 enum mlxsw_core_reg_access_type {
293         MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
294         MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
295 };
296
297 static inline const char *
298 mlxsw_core_reg_access_type_str(enum mlxsw_core_reg_access_type type)
299 {
300         switch (type) {
301         case MLXSW_CORE_REG_ACCESS_TYPE_QUERY:
302                 return "query";
303         case MLXSW_CORE_REG_ACCESS_TYPE_WRITE:
304                 return "write";
305         }
306         BUG();
307 }
308
309 static void mlxsw_emad_pack_end_tlv(char *end_tlv)
310 {
311         mlxsw_emad_end_tlv_type_set(end_tlv, MLXSW_EMAD_TLV_TYPE_END);
312         mlxsw_emad_end_tlv_len_set(end_tlv, MLXSW_EMAD_END_TLV_LEN);
313 }
314
315 static void mlxsw_emad_pack_reg_tlv(char *reg_tlv,
316                                     const struct mlxsw_reg_info *reg,
317                                     char *payload)
318 {
319         mlxsw_emad_reg_tlv_type_set(reg_tlv, MLXSW_EMAD_TLV_TYPE_REG);
320         mlxsw_emad_reg_tlv_len_set(reg_tlv, reg->len / sizeof(u32) + 1);
321         memcpy(reg_tlv + sizeof(u32), payload, reg->len);
322 }
323
324 static void mlxsw_emad_pack_op_tlv(char *op_tlv,
325                                    const struct mlxsw_reg_info *reg,
326                                    enum mlxsw_core_reg_access_type type,
327                                    u64 tid)
328 {
329         mlxsw_emad_op_tlv_type_set(op_tlv, MLXSW_EMAD_TLV_TYPE_OP);
330         mlxsw_emad_op_tlv_len_set(op_tlv, MLXSW_EMAD_OP_TLV_LEN);
331         mlxsw_emad_op_tlv_dr_set(op_tlv, 0);
332         mlxsw_emad_op_tlv_status_set(op_tlv, 0);
333         mlxsw_emad_op_tlv_register_id_set(op_tlv, reg->id);
334         mlxsw_emad_op_tlv_r_set(op_tlv, MLXSW_EMAD_OP_TLV_REQUEST);
335         if (type == MLXSW_CORE_REG_ACCESS_TYPE_QUERY)
336                 mlxsw_emad_op_tlv_method_set(op_tlv,
337                                              MLXSW_EMAD_OP_TLV_METHOD_QUERY);
338         else
339                 mlxsw_emad_op_tlv_method_set(op_tlv,
340                                              MLXSW_EMAD_OP_TLV_METHOD_WRITE);
341         mlxsw_emad_op_tlv_class_set(op_tlv,
342                                     MLXSW_EMAD_OP_TLV_CLASS_REG_ACCESS);
343         mlxsw_emad_op_tlv_tid_set(op_tlv, tid);
344 }
345
346 static int mlxsw_emad_construct_eth_hdr(struct sk_buff *skb)
347 {
348         char *eth_hdr = skb_push(skb, MLXSW_EMAD_ETH_HDR_LEN);
349
350         mlxsw_emad_eth_hdr_dmac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_DMAC);
351         mlxsw_emad_eth_hdr_smac_memcpy_to(eth_hdr, MLXSW_EMAD_EH_SMAC);
352         mlxsw_emad_eth_hdr_ethertype_set(eth_hdr, MLXSW_EMAD_EH_ETHERTYPE);
353         mlxsw_emad_eth_hdr_mlx_proto_set(eth_hdr, MLXSW_EMAD_EH_MLX_PROTO);
354         mlxsw_emad_eth_hdr_ver_set(eth_hdr, MLXSW_EMAD_EH_PROTO_VERSION);
355
356         skb_reset_mac_header(skb);
357
358         return 0;
359 }
360
361 static void mlxsw_emad_construct(struct sk_buff *skb,
362                                  const struct mlxsw_reg_info *reg,
363                                  char *payload,
364                                  enum mlxsw_core_reg_access_type type,
365                                  u64 tid)
366 {
367         char *buf;
368
369         buf = skb_push(skb, MLXSW_EMAD_END_TLV_LEN * sizeof(u32));
370         mlxsw_emad_pack_end_tlv(buf);
371
372         buf = skb_push(skb, reg->len + sizeof(u32));
373         mlxsw_emad_pack_reg_tlv(buf, reg, payload);
374
375         buf = skb_push(skb, MLXSW_EMAD_OP_TLV_LEN * sizeof(u32));
376         mlxsw_emad_pack_op_tlv(buf, reg, type, tid);
377
378         mlxsw_emad_construct_eth_hdr(skb);
379 }
380
381 static char *mlxsw_emad_op_tlv(const struct sk_buff *skb)
382 {
383         return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN));
384 }
385
386 static char *mlxsw_emad_reg_tlv(const struct sk_buff *skb)
387 {
388         return ((char *) (skb->data + MLXSW_EMAD_ETH_HDR_LEN +
389                                       MLXSW_EMAD_OP_TLV_LEN * sizeof(u32)));
390 }
391
392 static char *mlxsw_emad_reg_payload(const char *op_tlv)
393 {
394         return ((char *) (op_tlv + (MLXSW_EMAD_OP_TLV_LEN + 1) * sizeof(u32)));
395 }
396
397 static u64 mlxsw_emad_get_tid(const struct sk_buff *skb)
398 {
399         char *op_tlv;
400
401         op_tlv = mlxsw_emad_op_tlv(skb);
402         return mlxsw_emad_op_tlv_tid_get(op_tlv);
403 }
404
405 static bool mlxsw_emad_is_resp(const struct sk_buff *skb)
406 {
407         char *op_tlv;
408
409         op_tlv = mlxsw_emad_op_tlv(skb);
410         return (mlxsw_emad_op_tlv_r_get(op_tlv) == MLXSW_EMAD_OP_TLV_RESPONSE);
411 }
412
413 static int mlxsw_emad_process_status(char *op_tlv,
414                                      enum mlxsw_emad_op_tlv_status *p_status)
415 {
416         *p_status = mlxsw_emad_op_tlv_status_get(op_tlv);
417
418         switch (*p_status) {
419         case MLXSW_EMAD_OP_TLV_STATUS_SUCCESS:
420                 return 0;
421         case MLXSW_EMAD_OP_TLV_STATUS_BUSY:
422         case MLXSW_EMAD_OP_TLV_STATUS_MESSAGE_RECEIPT_ACK:
423                 return -EAGAIN;
424         case MLXSW_EMAD_OP_TLV_STATUS_VERSION_NOT_SUPPORTED:
425         case MLXSW_EMAD_OP_TLV_STATUS_UNKNOWN_TLV:
426         case MLXSW_EMAD_OP_TLV_STATUS_REGISTER_NOT_SUPPORTED:
427         case MLXSW_EMAD_OP_TLV_STATUS_CLASS_NOT_SUPPORTED:
428         case MLXSW_EMAD_OP_TLV_STATUS_METHOD_NOT_SUPPORTED:
429         case MLXSW_EMAD_OP_TLV_STATUS_BAD_PARAMETER:
430         case MLXSW_EMAD_OP_TLV_STATUS_RESOURCE_NOT_AVAILABLE:
431         case MLXSW_EMAD_OP_TLV_STATUS_INTERNAL_ERROR:
432         default:
433                 return -EIO;
434         }
435 }
436
437 static int
438 mlxsw_emad_process_status_skb(struct sk_buff *skb,
439                               enum mlxsw_emad_op_tlv_status *p_status)
440 {
441         return mlxsw_emad_process_status(mlxsw_emad_op_tlv(skb), p_status);
442 }
443
444 struct mlxsw_reg_trans {
445         struct list_head list;
446         struct list_head bulk_list;
447         struct mlxsw_core *core;
448         struct sk_buff *tx_skb;
449         struct mlxsw_tx_info tx_info;
450         struct delayed_work timeout_dw;
451         unsigned int retries;
452         u64 tid;
453         struct completion completion;
454         atomic_t active;
455         mlxsw_reg_trans_cb_t *cb;
456         unsigned long cb_priv;
457         const struct mlxsw_reg_info *reg;
458         enum mlxsw_core_reg_access_type type;
459         int err;
460         enum mlxsw_emad_op_tlv_status emad_status;
461         struct rcu_head rcu;
462 };
463
464 #define MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS   3000
465 #define MLXSW_EMAD_TIMEOUT_MS                   200
466
467 static void mlxsw_emad_trans_timeout_schedule(struct mlxsw_reg_trans *trans)
468 {
469         unsigned long timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_MS);
470
471         if (trans->core->fw_flash_in_progress)
472                 timeout = msecs_to_jiffies(MLXSW_EMAD_TIMEOUT_DURING_FW_FLASH_MS);
473
474         queue_delayed_work(trans->core->emad_wq, &trans->timeout_dw,
475                            timeout << trans->retries);
476 }
477
478 static int mlxsw_emad_transmit(struct mlxsw_core *mlxsw_core,
479                                struct mlxsw_reg_trans *trans)
480 {
481         struct sk_buff *skb;
482         int err;
483
484         skb = skb_copy(trans->tx_skb, GFP_KERNEL);
485         if (!skb)
486                 return -ENOMEM;
487
488         trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), false, 0,
489                             skb->data + mlxsw_core->driver->txhdr_len,
490                             skb->len - mlxsw_core->driver->txhdr_len);
491
492         atomic_set(&trans->active, 1);
493         err = mlxsw_core_skb_transmit(mlxsw_core, skb, &trans->tx_info);
494         if (err) {
495                 dev_kfree_skb(skb);
496                 return err;
497         }
498         mlxsw_emad_trans_timeout_schedule(trans);
499         return 0;
500 }
501
502 static void mlxsw_emad_trans_finish(struct mlxsw_reg_trans *trans, int err)
503 {
504         struct mlxsw_core *mlxsw_core = trans->core;
505
506         dev_kfree_skb(trans->tx_skb);
507         spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
508         list_del_rcu(&trans->list);
509         spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
510         trans->err = err;
511         complete(&trans->completion);
512 }
513
514 static void mlxsw_emad_transmit_retry(struct mlxsw_core *mlxsw_core,
515                                       struct mlxsw_reg_trans *trans)
516 {
517         int err;
518
519         if (trans->retries < MLXSW_EMAD_MAX_RETRY) {
520                 trans->retries++;
521                 err = mlxsw_emad_transmit(trans->core, trans);
522                 if (err == 0)
523                         return;
524
525                 if (!atomic_dec_and_test(&trans->active))
526                         return;
527         } else {
528                 err = -EIO;
529         }
530         mlxsw_emad_trans_finish(trans, err);
531 }
532
533 static void mlxsw_emad_trans_timeout_work(struct work_struct *work)
534 {
535         struct mlxsw_reg_trans *trans = container_of(work,
536                                                      struct mlxsw_reg_trans,
537                                                      timeout_dw.work);
538
539         if (!atomic_dec_and_test(&trans->active))
540                 return;
541
542         mlxsw_emad_transmit_retry(trans->core, trans);
543 }
544
545 static void mlxsw_emad_process_response(struct mlxsw_core *mlxsw_core,
546                                         struct mlxsw_reg_trans *trans,
547                                         struct sk_buff *skb)
548 {
549         int err;
550
551         if (!atomic_dec_and_test(&trans->active))
552                 return;
553
554         err = mlxsw_emad_process_status_skb(skb, &trans->emad_status);
555         if (err == -EAGAIN) {
556                 mlxsw_emad_transmit_retry(mlxsw_core, trans);
557         } else {
558                 if (err == 0) {
559                         char *op_tlv = mlxsw_emad_op_tlv(skb);
560
561                         if (trans->cb)
562                                 trans->cb(mlxsw_core,
563                                           mlxsw_emad_reg_payload(op_tlv),
564                                           trans->reg->len, trans->cb_priv);
565                 }
566                 mlxsw_emad_trans_finish(trans, err);
567         }
568 }
569
570 /* called with rcu read lock held */
571 static void mlxsw_emad_rx_listener_func(struct sk_buff *skb, u8 local_port,
572                                         void *priv)
573 {
574         struct mlxsw_core *mlxsw_core = priv;
575         struct mlxsw_reg_trans *trans;
576
577         trace_devlink_hwmsg(priv_to_devlink(mlxsw_core), true, 0,
578                             skb->data, skb->len);
579
580         if (!mlxsw_emad_is_resp(skb))
581                 goto free_skb;
582
583         list_for_each_entry_rcu(trans, &mlxsw_core->emad.trans_list, list) {
584                 if (mlxsw_emad_get_tid(skb) == trans->tid) {
585                         mlxsw_emad_process_response(mlxsw_core, trans, skb);
586                         break;
587                 }
588         }
589
590 free_skb:
591         dev_kfree_skb(skb);
592 }
593
594 static const struct mlxsw_listener mlxsw_emad_rx_listener =
595         MLXSW_RXL(mlxsw_emad_rx_listener_func, ETHEMAD, TRAP_TO_CPU, false,
596                   EMAD, DISCARD);
597
598 static int mlxsw_emad_init(struct mlxsw_core *mlxsw_core)
599 {
600         struct workqueue_struct *emad_wq;
601         u64 tid;
602         int err;
603
604         if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
605                 return 0;
606
607         emad_wq = alloc_workqueue("mlxsw_core_emad", 0, 0);
608         if (!emad_wq)
609                 return -ENOMEM;
610         mlxsw_core->emad_wq = emad_wq;
611
612         /* Set the upper 32 bits of the transaction ID field to a random
613          * number. This allows us to discard EMADs addressed to other
614          * devices.
615          */
616         get_random_bytes(&tid, 4);
617         tid <<= 32;
618         atomic64_set(&mlxsw_core->emad.tid, tid);
619
620         INIT_LIST_HEAD(&mlxsw_core->emad.trans_list);
621         spin_lock_init(&mlxsw_core->emad.trans_list_lock);
622
623         err = mlxsw_core_trap_register(mlxsw_core, &mlxsw_emad_rx_listener,
624                                        mlxsw_core);
625         if (err)
626                 goto err_trap_register;
627
628         err = mlxsw_core->driver->basic_trap_groups_set(mlxsw_core);
629         if (err)
630                 goto err_emad_trap_set;
631         mlxsw_core->emad.use_emad = true;
632
633         return 0;
634
635 err_emad_trap_set:
636         mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
637                                    mlxsw_core);
638 err_trap_register:
639         destroy_workqueue(mlxsw_core->emad_wq);
640         return err;
641 }
642
643 static void mlxsw_emad_fini(struct mlxsw_core *mlxsw_core)
644 {
645
646         if (!(mlxsw_core->bus->features & MLXSW_BUS_F_TXRX))
647                 return;
648
649         mlxsw_core->emad.use_emad = false;
650         mlxsw_core_trap_unregister(mlxsw_core, &mlxsw_emad_rx_listener,
651                                    mlxsw_core);
652         destroy_workqueue(mlxsw_core->emad_wq);
653 }
654
655 static struct sk_buff *mlxsw_emad_alloc(const struct mlxsw_core *mlxsw_core,
656                                         u16 reg_len)
657 {
658         struct sk_buff *skb;
659         u16 emad_len;
660
661         emad_len = (reg_len + sizeof(u32) + MLXSW_EMAD_ETH_HDR_LEN +
662                     (MLXSW_EMAD_OP_TLV_LEN + MLXSW_EMAD_END_TLV_LEN) *
663                     sizeof(u32) + mlxsw_core->driver->txhdr_len);
664         if (emad_len > MLXSW_EMAD_MAX_FRAME_LEN)
665                 return NULL;
666
667         skb = netdev_alloc_skb(NULL, emad_len);
668         if (!skb)
669                 return NULL;
670         memset(skb->data, 0, emad_len);
671         skb_reserve(skb, emad_len);
672
673         return skb;
674 }
675
676 static int mlxsw_emad_reg_access(struct mlxsw_core *mlxsw_core,
677                                  const struct mlxsw_reg_info *reg,
678                                  char *payload,
679                                  enum mlxsw_core_reg_access_type type,
680                                  struct mlxsw_reg_trans *trans,
681                                  struct list_head *bulk_list,
682                                  mlxsw_reg_trans_cb_t *cb,
683                                  unsigned long cb_priv, u64 tid)
684 {
685         struct sk_buff *skb;
686         int err;
687
688         dev_dbg(mlxsw_core->bus_info->dev, "EMAD reg access (tid=%llx,reg_id=%x(%s),type=%s)\n",
689                 tid, reg->id, mlxsw_reg_id_str(reg->id),
690                 mlxsw_core_reg_access_type_str(type));
691
692         skb = mlxsw_emad_alloc(mlxsw_core, reg->len);
693         if (!skb)
694                 return -ENOMEM;
695
696         list_add_tail(&trans->bulk_list, bulk_list);
697         trans->core = mlxsw_core;
698         trans->tx_skb = skb;
699         trans->tx_info.local_port = MLXSW_PORT_CPU_PORT;
700         trans->tx_info.is_emad = true;
701         INIT_DELAYED_WORK(&trans->timeout_dw, mlxsw_emad_trans_timeout_work);
702         trans->tid = tid;
703         init_completion(&trans->completion);
704         trans->cb = cb;
705         trans->cb_priv = cb_priv;
706         trans->reg = reg;
707         trans->type = type;
708
709         mlxsw_emad_construct(skb, reg, payload, type, trans->tid);
710         mlxsw_core->driver->txhdr_construct(skb, &trans->tx_info);
711
712         spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
713         list_add_tail_rcu(&trans->list, &mlxsw_core->emad.trans_list);
714         spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
715         err = mlxsw_emad_transmit(mlxsw_core, trans);
716         if (err)
717                 goto err_out;
718         return 0;
719
720 err_out:
721         spin_lock_bh(&mlxsw_core->emad.trans_list_lock);
722         list_del_rcu(&trans->list);
723         spin_unlock_bh(&mlxsw_core->emad.trans_list_lock);
724         list_del(&trans->bulk_list);
725         dev_kfree_skb(trans->tx_skb);
726         return err;
727 }
728
729 /*****************
730  * Core functions
731  *****************/
732
733 int mlxsw_core_driver_register(struct mlxsw_driver *mlxsw_driver)
734 {
735         spin_lock(&mlxsw_core_driver_list_lock);
736         list_add_tail(&mlxsw_driver->list, &mlxsw_core_driver_list);
737         spin_unlock(&mlxsw_core_driver_list_lock);
738         return 0;
739 }
740 EXPORT_SYMBOL(mlxsw_core_driver_register);
741
742 void mlxsw_core_driver_unregister(struct mlxsw_driver *mlxsw_driver)
743 {
744         spin_lock(&mlxsw_core_driver_list_lock);
745         list_del(&mlxsw_driver->list);
746         spin_unlock(&mlxsw_core_driver_list_lock);
747 }
748 EXPORT_SYMBOL(mlxsw_core_driver_unregister);
749
750 static struct mlxsw_driver *__driver_find(const char *kind)
751 {
752         struct mlxsw_driver *mlxsw_driver;
753
754         list_for_each_entry(mlxsw_driver, &mlxsw_core_driver_list, list) {
755                 if (strcmp(mlxsw_driver->kind, kind) == 0)
756                         return mlxsw_driver;
757         }
758         return NULL;
759 }
760
761 static struct mlxsw_driver *mlxsw_core_driver_get(const char *kind)
762 {
763         struct mlxsw_driver *mlxsw_driver;
764
765         spin_lock(&mlxsw_core_driver_list_lock);
766         mlxsw_driver = __driver_find(kind);
767         spin_unlock(&mlxsw_core_driver_list_lock);
768         return mlxsw_driver;
769 }
770
771 static void mlxsw_core_driver_put(const char *kind)
772 {
773         struct mlxsw_driver *mlxsw_driver;
774
775         spin_lock(&mlxsw_core_driver_list_lock);
776         mlxsw_driver = __driver_find(kind);
777         spin_unlock(&mlxsw_core_driver_list_lock);
778 }
779
780 static int mlxsw_devlink_port_split(struct devlink *devlink,
781                                     unsigned int port_index,
782                                     unsigned int count)
783 {
784         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
785
786         if (port_index >= mlxsw_core->max_ports)
787                 return -EINVAL;
788         if (!mlxsw_core->driver->port_split)
789                 return -EOPNOTSUPP;
790         return mlxsw_core->driver->port_split(mlxsw_core, port_index, count);
791 }
792
793 static int mlxsw_devlink_port_unsplit(struct devlink *devlink,
794                                       unsigned int port_index)
795 {
796         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
797
798         if (port_index >= mlxsw_core->max_ports)
799                 return -EINVAL;
800         if (!mlxsw_core->driver->port_unsplit)
801                 return -EOPNOTSUPP;
802         return mlxsw_core->driver->port_unsplit(mlxsw_core, port_index);
803 }
804
805 static int
806 mlxsw_devlink_sb_pool_get(struct devlink *devlink,
807                           unsigned int sb_index, u16 pool_index,
808                           struct devlink_sb_pool_info *pool_info)
809 {
810         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
811         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
812
813         if (!mlxsw_driver->sb_pool_get)
814                 return -EOPNOTSUPP;
815         return mlxsw_driver->sb_pool_get(mlxsw_core, sb_index,
816                                          pool_index, pool_info);
817 }
818
819 static int
820 mlxsw_devlink_sb_pool_set(struct devlink *devlink,
821                           unsigned int sb_index, u16 pool_index, u32 size,
822                           enum devlink_sb_threshold_type threshold_type)
823 {
824         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
825         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
826
827         if (!mlxsw_driver->sb_pool_set)
828                 return -EOPNOTSUPP;
829         return mlxsw_driver->sb_pool_set(mlxsw_core, sb_index,
830                                          pool_index, size, threshold_type);
831 }
832
833 static void *__dl_port(struct devlink_port *devlink_port)
834 {
835         return container_of(devlink_port, struct mlxsw_core_port, devlink_port);
836 }
837
838 static int mlxsw_devlink_port_type_set(struct devlink_port *devlink_port,
839                                        enum devlink_port_type port_type)
840 {
841         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
842         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
843         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
844
845         if (!mlxsw_driver->port_type_set)
846                 return -EOPNOTSUPP;
847
848         return mlxsw_driver->port_type_set(mlxsw_core,
849                                            mlxsw_core_port->local_port,
850                                            port_type);
851 }
852
853 static int mlxsw_devlink_sb_port_pool_get(struct devlink_port *devlink_port,
854                                           unsigned int sb_index, u16 pool_index,
855                                           u32 *p_threshold)
856 {
857         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
858         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
859         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
860
861         if (!mlxsw_driver->sb_port_pool_get ||
862             !mlxsw_core_port_check(mlxsw_core_port))
863                 return -EOPNOTSUPP;
864         return mlxsw_driver->sb_port_pool_get(mlxsw_core_port, sb_index,
865                                               pool_index, p_threshold);
866 }
867
868 static int mlxsw_devlink_sb_port_pool_set(struct devlink_port *devlink_port,
869                                           unsigned int sb_index, u16 pool_index,
870                                           u32 threshold)
871 {
872         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
873         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
874         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
875
876         if (!mlxsw_driver->sb_port_pool_set ||
877             !mlxsw_core_port_check(mlxsw_core_port))
878                 return -EOPNOTSUPP;
879         return mlxsw_driver->sb_port_pool_set(mlxsw_core_port, sb_index,
880                                               pool_index, threshold);
881 }
882
883 static int
884 mlxsw_devlink_sb_tc_pool_bind_get(struct devlink_port *devlink_port,
885                                   unsigned int sb_index, u16 tc_index,
886                                   enum devlink_sb_pool_type pool_type,
887                                   u16 *p_pool_index, u32 *p_threshold)
888 {
889         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
890         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
891         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
892
893         if (!mlxsw_driver->sb_tc_pool_bind_get ||
894             !mlxsw_core_port_check(mlxsw_core_port))
895                 return -EOPNOTSUPP;
896         return mlxsw_driver->sb_tc_pool_bind_get(mlxsw_core_port, sb_index,
897                                                  tc_index, pool_type,
898                                                  p_pool_index, p_threshold);
899 }
900
901 static int
902 mlxsw_devlink_sb_tc_pool_bind_set(struct devlink_port *devlink_port,
903                                   unsigned int sb_index, u16 tc_index,
904                                   enum devlink_sb_pool_type pool_type,
905                                   u16 pool_index, u32 threshold)
906 {
907         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
908         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
909         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
910
911         if (!mlxsw_driver->sb_tc_pool_bind_set ||
912             !mlxsw_core_port_check(mlxsw_core_port))
913                 return -EOPNOTSUPP;
914         return mlxsw_driver->sb_tc_pool_bind_set(mlxsw_core_port, sb_index,
915                                                  tc_index, pool_type,
916                                                  pool_index, threshold);
917 }
918
919 static int mlxsw_devlink_sb_occ_snapshot(struct devlink *devlink,
920                                          unsigned int sb_index)
921 {
922         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
923         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
924
925         if (!mlxsw_driver->sb_occ_snapshot)
926                 return -EOPNOTSUPP;
927         return mlxsw_driver->sb_occ_snapshot(mlxsw_core, sb_index);
928 }
929
930 static int mlxsw_devlink_sb_occ_max_clear(struct devlink *devlink,
931                                           unsigned int sb_index)
932 {
933         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
934         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
935
936         if (!mlxsw_driver->sb_occ_max_clear)
937                 return -EOPNOTSUPP;
938         return mlxsw_driver->sb_occ_max_clear(mlxsw_core, sb_index);
939 }
940
941 static int
942 mlxsw_devlink_sb_occ_port_pool_get(struct devlink_port *devlink_port,
943                                    unsigned int sb_index, u16 pool_index,
944                                    u32 *p_cur, u32 *p_max)
945 {
946         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
947         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
948         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
949
950         if (!mlxsw_driver->sb_occ_port_pool_get ||
951             !mlxsw_core_port_check(mlxsw_core_port))
952                 return -EOPNOTSUPP;
953         return mlxsw_driver->sb_occ_port_pool_get(mlxsw_core_port, sb_index,
954                                                   pool_index, p_cur, p_max);
955 }
956
957 static int
958 mlxsw_devlink_sb_occ_tc_port_bind_get(struct devlink_port *devlink_port,
959                                       unsigned int sb_index, u16 tc_index,
960                                       enum devlink_sb_pool_type pool_type,
961                                       u32 *p_cur, u32 *p_max)
962 {
963         struct mlxsw_core *mlxsw_core = devlink_priv(devlink_port->devlink);
964         struct mlxsw_driver *mlxsw_driver = mlxsw_core->driver;
965         struct mlxsw_core_port *mlxsw_core_port = __dl_port(devlink_port);
966
967         if (!mlxsw_driver->sb_occ_tc_port_bind_get ||
968             !mlxsw_core_port_check(mlxsw_core_port))
969                 return -EOPNOTSUPP;
970         return mlxsw_driver->sb_occ_tc_port_bind_get(mlxsw_core_port,
971                                                      sb_index, tc_index,
972                                                      pool_type, p_cur, p_max);
973 }
974
975 static const struct devlink_ops mlxsw_devlink_ops = {
976         .port_type_set                  = mlxsw_devlink_port_type_set,
977         .port_split                     = mlxsw_devlink_port_split,
978         .port_unsplit                   = mlxsw_devlink_port_unsplit,
979         .sb_pool_get                    = mlxsw_devlink_sb_pool_get,
980         .sb_pool_set                    = mlxsw_devlink_sb_pool_set,
981         .sb_port_pool_get               = mlxsw_devlink_sb_port_pool_get,
982         .sb_port_pool_set               = mlxsw_devlink_sb_port_pool_set,
983         .sb_tc_pool_bind_get            = mlxsw_devlink_sb_tc_pool_bind_get,
984         .sb_tc_pool_bind_set            = mlxsw_devlink_sb_tc_pool_bind_set,
985         .sb_occ_snapshot                = mlxsw_devlink_sb_occ_snapshot,
986         .sb_occ_max_clear               = mlxsw_devlink_sb_occ_max_clear,
987         .sb_occ_port_pool_get           = mlxsw_devlink_sb_occ_port_pool_get,
988         .sb_occ_tc_port_bind_get        = mlxsw_devlink_sb_occ_tc_port_bind_get,
989 };
990
991 int mlxsw_core_bus_device_register(const struct mlxsw_bus_info *mlxsw_bus_info,
992                                    const struct mlxsw_bus *mlxsw_bus,
993                                    void *bus_priv)
994 {
995         const char *device_kind = mlxsw_bus_info->device_kind;
996         struct mlxsw_core *mlxsw_core;
997         struct mlxsw_driver *mlxsw_driver;
998         struct devlink *devlink;
999         size_t alloc_size;
1000         int err;
1001
1002         mlxsw_driver = mlxsw_core_driver_get(device_kind);
1003         if (!mlxsw_driver)
1004                 return -EINVAL;
1005         alloc_size = sizeof(*mlxsw_core) + mlxsw_driver->priv_size;
1006         devlink = devlink_alloc(&mlxsw_devlink_ops, alloc_size);
1007         if (!devlink) {
1008                 err = -ENOMEM;
1009                 goto err_devlink_alloc;
1010         }
1011
1012         mlxsw_core = devlink_priv(devlink);
1013         INIT_LIST_HEAD(&mlxsw_core->rx_listener_list);
1014         INIT_LIST_HEAD(&mlxsw_core->event_listener_list);
1015         mlxsw_core->driver = mlxsw_driver;
1016         mlxsw_core->bus = mlxsw_bus;
1017         mlxsw_core->bus_priv = bus_priv;
1018         mlxsw_core->bus_info = mlxsw_bus_info;
1019
1020         err = mlxsw_bus->init(bus_priv, mlxsw_core, mlxsw_driver->profile,
1021                               &mlxsw_core->res);
1022         if (err)
1023                 goto err_bus_init;
1024
1025         err = mlxsw_ports_init(mlxsw_core);
1026         if (err)
1027                 goto err_ports_init;
1028
1029         if (MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG) &&
1030             MLXSW_CORE_RES_VALID(mlxsw_core, MAX_LAG_MEMBERS)) {
1031                 alloc_size = sizeof(u8) *
1032                         MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG) *
1033                         MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS);
1034                 mlxsw_core->lag.mapping = kzalloc(alloc_size, GFP_KERNEL);
1035                 if (!mlxsw_core->lag.mapping) {
1036                         err = -ENOMEM;
1037                         goto err_alloc_lag_mapping;
1038                 }
1039         }
1040
1041         err = mlxsw_emad_init(mlxsw_core);
1042         if (err)
1043                 goto err_emad_init;
1044
1045         err = devlink_register(devlink, mlxsw_bus_info->dev);
1046         if (err)
1047                 goto err_devlink_register;
1048
1049         err = mlxsw_hwmon_init(mlxsw_core, mlxsw_bus_info, &mlxsw_core->hwmon);
1050         if (err)
1051                 goto err_hwmon_init;
1052
1053         err = mlxsw_thermal_init(mlxsw_core, mlxsw_bus_info,
1054                                  &mlxsw_core->thermal);
1055         if (err)
1056                 goto err_thermal_init;
1057
1058         if (mlxsw_driver->init) {
1059                 err = mlxsw_driver->init(mlxsw_core, mlxsw_bus_info);
1060                 if (err)
1061                         goto err_driver_init;
1062         }
1063
1064         return 0;
1065
1066 err_driver_init:
1067         mlxsw_thermal_fini(mlxsw_core->thermal);
1068 err_thermal_init:
1069 err_hwmon_init:
1070         devlink_unregister(devlink);
1071 err_devlink_register:
1072         mlxsw_emad_fini(mlxsw_core);
1073 err_emad_init:
1074         kfree(mlxsw_core->lag.mapping);
1075 err_alloc_lag_mapping:
1076         mlxsw_ports_fini(mlxsw_core);
1077 err_ports_init:
1078         mlxsw_bus->fini(bus_priv);
1079 err_bus_init:
1080         devlink_free(devlink);
1081 err_devlink_alloc:
1082         mlxsw_core_driver_put(device_kind);
1083         return err;
1084 }
1085 EXPORT_SYMBOL(mlxsw_core_bus_device_register);
1086
1087 void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core)
1088 {
1089         const char *device_kind = mlxsw_core->bus_info->device_kind;
1090         struct devlink *devlink = priv_to_devlink(mlxsw_core);
1091
1092         if (mlxsw_core->driver->fini)
1093                 mlxsw_core->driver->fini(mlxsw_core);
1094         mlxsw_thermal_fini(mlxsw_core->thermal);
1095         devlink_unregister(devlink);
1096         mlxsw_emad_fini(mlxsw_core);
1097         kfree(mlxsw_core->lag.mapping);
1098         mlxsw_ports_fini(mlxsw_core);
1099         mlxsw_core->bus->fini(mlxsw_core->bus_priv);
1100         devlink_free(devlink);
1101         mlxsw_core_driver_put(device_kind);
1102 }
1103 EXPORT_SYMBOL(mlxsw_core_bus_device_unregister);
1104
1105 bool mlxsw_core_skb_transmit_busy(struct mlxsw_core *mlxsw_core,
1106                                   const struct mlxsw_tx_info *tx_info)
1107 {
1108         return mlxsw_core->bus->skb_transmit_busy(mlxsw_core->bus_priv,
1109                                                   tx_info);
1110 }
1111 EXPORT_SYMBOL(mlxsw_core_skb_transmit_busy);
1112
1113 int mlxsw_core_skb_transmit(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1114                             const struct mlxsw_tx_info *tx_info)
1115 {
1116         return mlxsw_core->bus->skb_transmit(mlxsw_core->bus_priv, skb,
1117                                              tx_info);
1118 }
1119 EXPORT_SYMBOL(mlxsw_core_skb_transmit);
1120
1121 static bool __is_rx_listener_equal(const struct mlxsw_rx_listener *rxl_a,
1122                                    const struct mlxsw_rx_listener *rxl_b)
1123 {
1124         return (rxl_a->func == rxl_b->func &&
1125                 rxl_a->local_port == rxl_b->local_port &&
1126                 rxl_a->trap_id == rxl_b->trap_id);
1127 }
1128
1129 static struct mlxsw_rx_listener_item *
1130 __find_rx_listener_item(struct mlxsw_core *mlxsw_core,
1131                         const struct mlxsw_rx_listener *rxl,
1132                         void *priv)
1133 {
1134         struct mlxsw_rx_listener_item *rxl_item;
1135
1136         list_for_each_entry(rxl_item, &mlxsw_core->rx_listener_list, list) {
1137                 if (__is_rx_listener_equal(&rxl_item->rxl, rxl) &&
1138                     rxl_item->priv == priv)
1139                         return rxl_item;
1140         }
1141         return NULL;
1142 }
1143
1144 int mlxsw_core_rx_listener_register(struct mlxsw_core *mlxsw_core,
1145                                     const struct mlxsw_rx_listener *rxl,
1146                                     void *priv)
1147 {
1148         struct mlxsw_rx_listener_item *rxl_item;
1149
1150         rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1151         if (rxl_item)
1152                 return -EEXIST;
1153         rxl_item = kmalloc(sizeof(*rxl_item), GFP_KERNEL);
1154         if (!rxl_item)
1155                 return -ENOMEM;
1156         rxl_item->rxl = *rxl;
1157         rxl_item->priv = priv;
1158
1159         list_add_rcu(&rxl_item->list, &mlxsw_core->rx_listener_list);
1160         return 0;
1161 }
1162 EXPORT_SYMBOL(mlxsw_core_rx_listener_register);
1163
1164 void mlxsw_core_rx_listener_unregister(struct mlxsw_core *mlxsw_core,
1165                                        const struct mlxsw_rx_listener *rxl,
1166                                        void *priv)
1167 {
1168         struct mlxsw_rx_listener_item *rxl_item;
1169
1170         rxl_item = __find_rx_listener_item(mlxsw_core, rxl, priv);
1171         if (!rxl_item)
1172                 return;
1173         list_del_rcu(&rxl_item->list);
1174         synchronize_rcu();
1175         kfree(rxl_item);
1176 }
1177 EXPORT_SYMBOL(mlxsw_core_rx_listener_unregister);
1178
1179 static void mlxsw_core_event_listener_func(struct sk_buff *skb, u8 local_port,
1180                                            void *priv)
1181 {
1182         struct mlxsw_event_listener_item *event_listener_item = priv;
1183         struct mlxsw_reg_info reg;
1184         char *payload;
1185         char *op_tlv = mlxsw_emad_op_tlv(skb);
1186         char *reg_tlv = mlxsw_emad_reg_tlv(skb);
1187
1188         reg.id = mlxsw_emad_op_tlv_register_id_get(op_tlv);
1189         reg.len = (mlxsw_emad_reg_tlv_len_get(reg_tlv) - 1) * sizeof(u32);
1190         payload = mlxsw_emad_reg_payload(op_tlv);
1191         event_listener_item->el.func(&reg, payload, event_listener_item->priv);
1192         dev_kfree_skb(skb);
1193 }
1194
1195 static bool __is_event_listener_equal(const struct mlxsw_event_listener *el_a,
1196                                       const struct mlxsw_event_listener *el_b)
1197 {
1198         return (el_a->func == el_b->func &&
1199                 el_a->trap_id == el_b->trap_id);
1200 }
1201
1202 static struct mlxsw_event_listener_item *
1203 __find_event_listener_item(struct mlxsw_core *mlxsw_core,
1204                            const struct mlxsw_event_listener *el,
1205                            void *priv)
1206 {
1207         struct mlxsw_event_listener_item *el_item;
1208
1209         list_for_each_entry(el_item, &mlxsw_core->event_listener_list, list) {
1210                 if (__is_event_listener_equal(&el_item->el, el) &&
1211                     el_item->priv == priv)
1212                         return el_item;
1213         }
1214         return NULL;
1215 }
1216
1217 int mlxsw_core_event_listener_register(struct mlxsw_core *mlxsw_core,
1218                                        const struct mlxsw_event_listener *el,
1219                                        void *priv)
1220 {
1221         int err;
1222         struct mlxsw_event_listener_item *el_item;
1223         const struct mlxsw_rx_listener rxl = {
1224                 .func = mlxsw_core_event_listener_func,
1225                 .local_port = MLXSW_PORT_DONT_CARE,
1226                 .trap_id = el->trap_id,
1227         };
1228
1229         el_item = __find_event_listener_item(mlxsw_core, el, priv);
1230         if (el_item)
1231                 return -EEXIST;
1232         el_item = kmalloc(sizeof(*el_item), GFP_KERNEL);
1233         if (!el_item)
1234                 return -ENOMEM;
1235         el_item->el = *el;
1236         el_item->priv = priv;
1237
1238         err = mlxsw_core_rx_listener_register(mlxsw_core, &rxl, el_item);
1239         if (err)
1240                 goto err_rx_listener_register;
1241
1242         /* No reason to save item if we did not manage to register an RX
1243          * listener for it.
1244          */
1245         list_add_rcu(&el_item->list, &mlxsw_core->event_listener_list);
1246
1247         return 0;
1248
1249 err_rx_listener_register:
1250         kfree(el_item);
1251         return err;
1252 }
1253 EXPORT_SYMBOL(mlxsw_core_event_listener_register);
1254
1255 void mlxsw_core_event_listener_unregister(struct mlxsw_core *mlxsw_core,
1256                                           const struct mlxsw_event_listener *el,
1257                                           void *priv)
1258 {
1259         struct mlxsw_event_listener_item *el_item;
1260         const struct mlxsw_rx_listener rxl = {
1261                 .func = mlxsw_core_event_listener_func,
1262                 .local_port = MLXSW_PORT_DONT_CARE,
1263                 .trap_id = el->trap_id,
1264         };
1265
1266         el_item = __find_event_listener_item(mlxsw_core, el, priv);
1267         if (!el_item)
1268                 return;
1269         mlxsw_core_rx_listener_unregister(mlxsw_core, &rxl, el_item);
1270         list_del(&el_item->list);
1271         kfree(el_item);
1272 }
1273 EXPORT_SYMBOL(mlxsw_core_event_listener_unregister);
1274
1275 static int mlxsw_core_listener_register(struct mlxsw_core *mlxsw_core,
1276                                         const struct mlxsw_listener *listener,
1277                                         void *priv)
1278 {
1279         if (listener->is_event)
1280                 return mlxsw_core_event_listener_register(mlxsw_core,
1281                                                 &listener->u.event_listener,
1282                                                 priv);
1283         else
1284                 return mlxsw_core_rx_listener_register(mlxsw_core,
1285                                                 &listener->u.rx_listener,
1286                                                 priv);
1287 }
1288
1289 static void mlxsw_core_listener_unregister(struct mlxsw_core *mlxsw_core,
1290                                       const struct mlxsw_listener *listener,
1291                                       void *priv)
1292 {
1293         if (listener->is_event)
1294                 mlxsw_core_event_listener_unregister(mlxsw_core,
1295                                                      &listener->u.event_listener,
1296                                                      priv);
1297         else
1298                 mlxsw_core_rx_listener_unregister(mlxsw_core,
1299                                                   &listener->u.rx_listener,
1300                                                   priv);
1301 }
1302
1303 int mlxsw_core_trap_register(struct mlxsw_core *mlxsw_core,
1304                              const struct mlxsw_listener *listener, void *priv)
1305 {
1306         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1307         int err;
1308
1309         err = mlxsw_core_listener_register(mlxsw_core, listener, priv);
1310         if (err)
1311                 return err;
1312
1313         mlxsw_reg_hpkt_pack(hpkt_pl, listener->action, listener->trap_id,
1314                             listener->trap_group, listener->is_ctrl);
1315         err = mlxsw_reg_write(mlxsw_core,  MLXSW_REG(hpkt), hpkt_pl);
1316         if (err)
1317                 goto err_trap_set;
1318
1319         return 0;
1320
1321 err_trap_set:
1322         mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1323         return err;
1324 }
1325 EXPORT_SYMBOL(mlxsw_core_trap_register);
1326
1327 void mlxsw_core_trap_unregister(struct mlxsw_core *mlxsw_core,
1328                                 const struct mlxsw_listener *listener,
1329                                 void *priv)
1330 {
1331         char hpkt_pl[MLXSW_REG_HPKT_LEN];
1332
1333         if (!listener->is_event) {
1334                 mlxsw_reg_hpkt_pack(hpkt_pl, listener->unreg_action,
1335                                     listener->trap_id, listener->trap_group,
1336                                     listener->is_ctrl);
1337                 mlxsw_reg_write(mlxsw_core, MLXSW_REG(hpkt), hpkt_pl);
1338         }
1339
1340         mlxsw_core_listener_unregister(mlxsw_core, listener, priv);
1341 }
1342 EXPORT_SYMBOL(mlxsw_core_trap_unregister);
1343
1344 static u64 mlxsw_core_tid_get(struct mlxsw_core *mlxsw_core)
1345 {
1346         return atomic64_inc_return(&mlxsw_core->emad.tid);
1347 }
1348
1349 static int mlxsw_core_reg_access_emad(struct mlxsw_core *mlxsw_core,
1350                                       const struct mlxsw_reg_info *reg,
1351                                       char *payload,
1352                                       enum mlxsw_core_reg_access_type type,
1353                                       struct list_head *bulk_list,
1354                                       mlxsw_reg_trans_cb_t *cb,
1355                                       unsigned long cb_priv)
1356 {
1357         u64 tid = mlxsw_core_tid_get(mlxsw_core);
1358         struct mlxsw_reg_trans *trans;
1359         int err;
1360
1361         trans = kzalloc(sizeof(*trans), GFP_KERNEL);
1362         if (!trans)
1363                 return -ENOMEM;
1364
1365         err = mlxsw_emad_reg_access(mlxsw_core, reg, payload, type, trans,
1366                                     bulk_list, cb, cb_priv, tid);
1367         if (err) {
1368                 kfree_rcu(trans, rcu);
1369                 return err;
1370         }
1371         return 0;
1372 }
1373
1374 int mlxsw_reg_trans_query(struct mlxsw_core *mlxsw_core,
1375                           const struct mlxsw_reg_info *reg, char *payload,
1376                           struct list_head *bulk_list,
1377                           mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1378 {
1379         return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1380                                           MLXSW_CORE_REG_ACCESS_TYPE_QUERY,
1381                                           bulk_list, cb, cb_priv);
1382 }
1383 EXPORT_SYMBOL(mlxsw_reg_trans_query);
1384
1385 int mlxsw_reg_trans_write(struct mlxsw_core *mlxsw_core,
1386                           const struct mlxsw_reg_info *reg, char *payload,
1387                           struct list_head *bulk_list,
1388                           mlxsw_reg_trans_cb_t *cb, unsigned long cb_priv)
1389 {
1390         return mlxsw_core_reg_access_emad(mlxsw_core, reg, payload,
1391                                           MLXSW_CORE_REG_ACCESS_TYPE_WRITE,
1392                                           bulk_list, cb, cb_priv);
1393 }
1394 EXPORT_SYMBOL(mlxsw_reg_trans_write);
1395
1396 static int mlxsw_reg_trans_wait(struct mlxsw_reg_trans *trans)
1397 {
1398         struct mlxsw_core *mlxsw_core = trans->core;
1399         int err;
1400
1401         wait_for_completion(&trans->completion);
1402         cancel_delayed_work_sync(&trans->timeout_dw);
1403         err = trans->err;
1404
1405         if (trans->retries)
1406                 dev_warn(mlxsw_core->bus_info->dev, "EMAD retries (%d/%d) (tid=%llx)\n",
1407                          trans->retries, MLXSW_EMAD_MAX_RETRY, trans->tid);
1408         if (err)
1409                 dev_err(mlxsw_core->bus_info->dev, "EMAD reg access failed (tid=%llx,reg_id=%x(%s),type=%s,status=%x(%s))\n",
1410                         trans->tid, trans->reg->id,
1411                         mlxsw_reg_id_str(trans->reg->id),
1412                         mlxsw_core_reg_access_type_str(trans->type),
1413                         trans->emad_status,
1414                         mlxsw_emad_op_tlv_status_str(trans->emad_status));
1415
1416         list_del(&trans->bulk_list);
1417         kfree_rcu(trans, rcu);
1418         return err;
1419 }
1420
1421 int mlxsw_reg_trans_bulk_wait(struct list_head *bulk_list)
1422 {
1423         struct mlxsw_reg_trans *trans;
1424         struct mlxsw_reg_trans *tmp;
1425         int sum_err = 0;
1426         int err;
1427
1428         list_for_each_entry_safe(trans, tmp, bulk_list, bulk_list) {
1429                 err = mlxsw_reg_trans_wait(trans);
1430                 if (err && sum_err == 0)
1431                         sum_err = err; /* first error to be returned */
1432         }
1433         return sum_err;
1434 }
1435 EXPORT_SYMBOL(mlxsw_reg_trans_bulk_wait);
1436
1437 static int mlxsw_core_reg_access_cmd(struct mlxsw_core *mlxsw_core,
1438                                      const struct mlxsw_reg_info *reg,
1439                                      char *payload,
1440                                      enum mlxsw_core_reg_access_type type)
1441 {
1442         enum mlxsw_emad_op_tlv_status status;
1443         int err, n_retry;
1444         char *in_mbox, *out_mbox, *tmp;
1445
1446         dev_dbg(mlxsw_core->bus_info->dev, "Reg cmd access (reg_id=%x(%s),type=%s)\n",
1447                 reg->id, mlxsw_reg_id_str(reg->id),
1448                 mlxsw_core_reg_access_type_str(type));
1449
1450         in_mbox = mlxsw_cmd_mbox_alloc();
1451         if (!in_mbox)
1452                 return -ENOMEM;
1453
1454         out_mbox = mlxsw_cmd_mbox_alloc();
1455         if (!out_mbox) {
1456                 err = -ENOMEM;
1457                 goto free_in_mbox;
1458         }
1459
1460         mlxsw_emad_pack_op_tlv(in_mbox, reg, type,
1461                                mlxsw_core_tid_get(mlxsw_core));
1462         tmp = in_mbox + MLXSW_EMAD_OP_TLV_LEN * sizeof(u32);
1463         mlxsw_emad_pack_reg_tlv(tmp, reg, payload);
1464
1465         n_retry = 0;
1466 retry:
1467         err = mlxsw_cmd_access_reg(mlxsw_core, in_mbox, out_mbox);
1468         if (!err) {
1469                 err = mlxsw_emad_process_status(out_mbox, &status);
1470                 if (err) {
1471                         if (err == -EAGAIN && n_retry++ < MLXSW_EMAD_MAX_RETRY)
1472                                 goto retry;
1473                         dev_err(mlxsw_core->bus_info->dev, "Reg cmd access status failed (status=%x(%s))\n",
1474                                 status, mlxsw_emad_op_tlv_status_str(status));
1475                 }
1476         }
1477
1478         if (!err)
1479                 memcpy(payload, mlxsw_emad_reg_payload(out_mbox),
1480                        reg->len);
1481
1482         mlxsw_cmd_mbox_free(out_mbox);
1483 free_in_mbox:
1484         mlxsw_cmd_mbox_free(in_mbox);
1485         if (err)
1486                 dev_err(mlxsw_core->bus_info->dev, "Reg cmd access failed (reg_id=%x(%s),type=%s)\n",
1487                         reg->id, mlxsw_reg_id_str(reg->id),
1488                         mlxsw_core_reg_access_type_str(type));
1489         return err;
1490 }
1491
1492 static void mlxsw_core_reg_access_cb(struct mlxsw_core *mlxsw_core,
1493                                      char *payload, size_t payload_len,
1494                                      unsigned long cb_priv)
1495 {
1496         char *orig_payload = (char *) cb_priv;
1497
1498         memcpy(orig_payload, payload, payload_len);
1499 }
1500
1501 static int mlxsw_core_reg_access(struct mlxsw_core *mlxsw_core,
1502                                  const struct mlxsw_reg_info *reg,
1503                                  char *payload,
1504                                  enum mlxsw_core_reg_access_type type)
1505 {
1506         LIST_HEAD(bulk_list);
1507         int err;
1508
1509         /* During initialization EMAD interface is not available to us,
1510          * so we default to command interface. We switch to EMAD interface
1511          * after setting the appropriate traps.
1512          */
1513         if (!mlxsw_core->emad.use_emad)
1514                 return mlxsw_core_reg_access_cmd(mlxsw_core, reg,
1515                                                  payload, type);
1516
1517         err = mlxsw_core_reg_access_emad(mlxsw_core, reg,
1518                                          payload, type, &bulk_list,
1519                                          mlxsw_core_reg_access_cb,
1520                                          (unsigned long) payload);
1521         if (err)
1522                 return err;
1523         return mlxsw_reg_trans_bulk_wait(&bulk_list);
1524 }
1525
1526 int mlxsw_reg_query(struct mlxsw_core *mlxsw_core,
1527                     const struct mlxsw_reg_info *reg, char *payload)
1528 {
1529         return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1530                                      MLXSW_CORE_REG_ACCESS_TYPE_QUERY);
1531 }
1532 EXPORT_SYMBOL(mlxsw_reg_query);
1533
1534 int mlxsw_reg_write(struct mlxsw_core *mlxsw_core,
1535                     const struct mlxsw_reg_info *reg, char *payload)
1536 {
1537         return mlxsw_core_reg_access(mlxsw_core, reg, payload,
1538                                      MLXSW_CORE_REG_ACCESS_TYPE_WRITE);
1539 }
1540 EXPORT_SYMBOL(mlxsw_reg_write);
1541
1542 void mlxsw_core_skb_receive(struct mlxsw_core *mlxsw_core, struct sk_buff *skb,
1543                             struct mlxsw_rx_info *rx_info)
1544 {
1545         struct mlxsw_rx_listener_item *rxl_item;
1546         const struct mlxsw_rx_listener *rxl;
1547         u8 local_port;
1548         bool found = false;
1549
1550         if (rx_info->is_lag) {
1551                 dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: lag_id = %d, lag_port_index = 0x%x\n",
1552                                     __func__, rx_info->u.lag_id,
1553                                     rx_info->trap_id);
1554                 /* Upper layer does not care if the skb came from LAG or not,
1555                  * so just get the local_port for the lag port and push it up.
1556                  */
1557                 local_port = mlxsw_core_lag_mapping_get(mlxsw_core,
1558                                                         rx_info->u.lag_id,
1559                                                         rx_info->lag_port_index);
1560         } else {
1561                 local_port = rx_info->u.sys_port;
1562         }
1563
1564         dev_dbg_ratelimited(mlxsw_core->bus_info->dev, "%s: local_port = %d, trap_id = 0x%x\n",
1565                             __func__, local_port, rx_info->trap_id);
1566
1567         if ((rx_info->trap_id >= MLXSW_TRAP_ID_MAX) ||
1568             (local_port >= mlxsw_core->max_ports))
1569                 goto drop;
1570
1571         rcu_read_lock();
1572         list_for_each_entry_rcu(rxl_item, &mlxsw_core->rx_listener_list, list) {
1573                 rxl = &rxl_item->rxl;
1574                 if ((rxl->local_port == MLXSW_PORT_DONT_CARE ||
1575                      rxl->local_port == local_port) &&
1576                     rxl->trap_id == rx_info->trap_id) {
1577                         found = true;
1578                         break;
1579                 }
1580         }
1581         if (!found) {
1582                 rcu_read_unlock();
1583                 goto drop;
1584         }
1585
1586         rxl->func(skb, local_port, rxl_item->priv);
1587         rcu_read_unlock();
1588         return;
1589
1590 drop:
1591         dev_kfree_skb(skb);
1592 }
1593 EXPORT_SYMBOL(mlxsw_core_skb_receive);
1594
1595 static int mlxsw_core_lag_mapping_index(struct mlxsw_core *mlxsw_core,
1596                                         u16 lag_id, u8 port_index)
1597 {
1598         return MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS) * lag_id +
1599                port_index;
1600 }
1601
1602 void mlxsw_core_lag_mapping_set(struct mlxsw_core *mlxsw_core,
1603                                 u16 lag_id, u8 port_index, u8 local_port)
1604 {
1605         int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1606                                                  lag_id, port_index);
1607
1608         mlxsw_core->lag.mapping[index] = local_port;
1609 }
1610 EXPORT_SYMBOL(mlxsw_core_lag_mapping_set);
1611
1612 u8 mlxsw_core_lag_mapping_get(struct mlxsw_core *mlxsw_core,
1613                               u16 lag_id, u8 port_index)
1614 {
1615         int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1616                                                  lag_id, port_index);
1617
1618         return mlxsw_core->lag.mapping[index];
1619 }
1620 EXPORT_SYMBOL(mlxsw_core_lag_mapping_get);
1621
1622 void mlxsw_core_lag_mapping_clear(struct mlxsw_core *mlxsw_core,
1623                                   u16 lag_id, u8 local_port)
1624 {
1625         int i;
1626
1627         for (i = 0; i < MLXSW_CORE_RES_GET(mlxsw_core, MAX_LAG_MEMBERS); i++) {
1628                 int index = mlxsw_core_lag_mapping_index(mlxsw_core,
1629                                                          lag_id, i);
1630
1631                 if (mlxsw_core->lag.mapping[index] == local_port)
1632                         mlxsw_core->lag.mapping[index] = 0;
1633         }
1634 }
1635 EXPORT_SYMBOL(mlxsw_core_lag_mapping_clear);
1636
1637 bool mlxsw_core_res_valid(struct mlxsw_core *mlxsw_core,
1638                           enum mlxsw_res_id res_id)
1639 {
1640         return mlxsw_res_valid(&mlxsw_core->res, res_id);
1641 }
1642 EXPORT_SYMBOL(mlxsw_core_res_valid);
1643
1644 u64 mlxsw_core_res_get(struct mlxsw_core *mlxsw_core,
1645                        enum mlxsw_res_id res_id)
1646 {
1647         return mlxsw_res_get(&mlxsw_core->res, res_id);
1648 }
1649 EXPORT_SYMBOL(mlxsw_core_res_get);
1650
1651 int mlxsw_core_port_init(struct mlxsw_core *mlxsw_core, u8 local_port)
1652 {
1653         struct devlink *devlink = priv_to_devlink(mlxsw_core);
1654         struct mlxsw_core_port *mlxsw_core_port =
1655                                         &mlxsw_core->ports[local_port];
1656         struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1657         int err;
1658
1659         mlxsw_core_port->local_port = local_port;
1660         err = devlink_port_register(devlink, devlink_port, local_port);
1661         if (err)
1662                 memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1663         return err;
1664 }
1665 EXPORT_SYMBOL(mlxsw_core_port_init);
1666
1667 void mlxsw_core_port_fini(struct mlxsw_core *mlxsw_core, u8 local_port)
1668 {
1669         struct mlxsw_core_port *mlxsw_core_port =
1670                                         &mlxsw_core->ports[local_port];
1671         struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1672
1673         devlink_port_unregister(devlink_port);
1674         memset(mlxsw_core_port, 0, sizeof(*mlxsw_core_port));
1675 }
1676 EXPORT_SYMBOL(mlxsw_core_port_fini);
1677
1678 void mlxsw_core_port_eth_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1679                              void *port_driver_priv, struct net_device *dev,
1680                              bool split, u32 split_group)
1681 {
1682         struct mlxsw_core_port *mlxsw_core_port =
1683                                         &mlxsw_core->ports[local_port];
1684         struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1685
1686         mlxsw_core_port->port_driver_priv = port_driver_priv;
1687         if (split)
1688                 devlink_port_split_set(devlink_port, split_group);
1689         devlink_port_type_eth_set(devlink_port, dev);
1690 }
1691 EXPORT_SYMBOL(mlxsw_core_port_eth_set);
1692
1693 void mlxsw_core_port_ib_set(struct mlxsw_core *mlxsw_core, u8 local_port,
1694                             void *port_driver_priv)
1695 {
1696         struct mlxsw_core_port *mlxsw_core_port =
1697                                         &mlxsw_core->ports[local_port];
1698         struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1699
1700         mlxsw_core_port->port_driver_priv = port_driver_priv;
1701         devlink_port_type_ib_set(devlink_port, NULL);
1702 }
1703 EXPORT_SYMBOL(mlxsw_core_port_ib_set);
1704
1705 void mlxsw_core_port_clear(struct mlxsw_core *mlxsw_core, u8 local_port,
1706                            void *port_driver_priv)
1707 {
1708         struct mlxsw_core_port *mlxsw_core_port =
1709                                         &mlxsw_core->ports[local_port];
1710         struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1711
1712         mlxsw_core_port->port_driver_priv = port_driver_priv;
1713         devlink_port_type_clear(devlink_port);
1714 }
1715 EXPORT_SYMBOL(mlxsw_core_port_clear);
1716
1717 enum devlink_port_type mlxsw_core_port_type_get(struct mlxsw_core *mlxsw_core,
1718                                                 u8 local_port)
1719 {
1720         struct mlxsw_core_port *mlxsw_core_port =
1721                                         &mlxsw_core->ports[local_port];
1722         struct devlink_port *devlink_port = &mlxsw_core_port->devlink_port;
1723
1724         return devlink_port->type;
1725 }
1726 EXPORT_SYMBOL(mlxsw_core_port_type_get);
1727
1728 static void mlxsw_core_buf_dump_dbg(struct mlxsw_core *mlxsw_core,
1729                                     const char *buf, size_t size)
1730 {
1731         __be32 *m = (__be32 *) buf;
1732         int i;
1733         int count = size / sizeof(__be32);
1734
1735         for (i = count - 1; i >= 0; i--)
1736                 if (m[i])
1737                         break;
1738         i++;
1739         count = i ? i : 1;
1740         for (i = 0; i < count; i += 4)
1741                 dev_dbg(mlxsw_core->bus_info->dev, "%04x - %08x %08x %08x %08x\n",
1742                         i * 4, be32_to_cpu(m[i]), be32_to_cpu(m[i + 1]),
1743                         be32_to_cpu(m[i + 2]), be32_to_cpu(m[i + 3]));
1744 }
1745
1746 int mlxsw_cmd_exec(struct mlxsw_core *mlxsw_core, u16 opcode, u8 opcode_mod,
1747                    u32 in_mod, bool out_mbox_direct,
1748                    char *in_mbox, size_t in_mbox_size,
1749                    char *out_mbox, size_t out_mbox_size)
1750 {
1751         u8 status;
1752         int err;
1753
1754         BUG_ON(in_mbox_size % sizeof(u32) || out_mbox_size % sizeof(u32));
1755         if (!mlxsw_core->bus->cmd_exec)
1756                 return -EOPNOTSUPP;
1757
1758         dev_dbg(mlxsw_core->bus_info->dev, "Cmd exec (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1759                 opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod, in_mod);
1760         if (in_mbox) {
1761                 dev_dbg(mlxsw_core->bus_info->dev, "Input mailbox:\n");
1762                 mlxsw_core_buf_dump_dbg(mlxsw_core, in_mbox, in_mbox_size);
1763         }
1764
1765         err = mlxsw_core->bus->cmd_exec(mlxsw_core->bus_priv, opcode,
1766                                         opcode_mod, in_mod, out_mbox_direct,
1767                                         in_mbox, in_mbox_size,
1768                                         out_mbox, out_mbox_size, &status);
1769
1770         if (err == -EIO && status != MLXSW_CMD_STATUS_OK) {
1771                 dev_err(mlxsw_core->bus_info->dev, "Cmd exec failed (opcode=%x(%s),opcode_mod=%x,in_mod=%x,status=%x(%s))\n",
1772                         opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1773                         in_mod, status, mlxsw_cmd_status_str(status));
1774         } else if (err == -ETIMEDOUT) {
1775                 dev_err(mlxsw_core->bus_info->dev, "Cmd exec timed-out (opcode=%x(%s),opcode_mod=%x,in_mod=%x)\n",
1776                         opcode, mlxsw_cmd_opcode_str(opcode), opcode_mod,
1777                         in_mod);
1778         }
1779
1780         if (!err && out_mbox) {
1781                 dev_dbg(mlxsw_core->bus_info->dev, "Output mailbox:\n");
1782                 mlxsw_core_buf_dump_dbg(mlxsw_core, out_mbox, out_mbox_size);
1783         }
1784         return err;
1785 }
1786 EXPORT_SYMBOL(mlxsw_cmd_exec);
1787
1788 int mlxsw_core_schedule_dw(struct delayed_work *dwork, unsigned long delay)
1789 {
1790         return queue_delayed_work(mlxsw_wq, dwork, delay);
1791 }
1792 EXPORT_SYMBOL(mlxsw_core_schedule_dw);
1793
1794 bool mlxsw_core_schedule_work(struct work_struct *work)
1795 {
1796         return queue_work(mlxsw_owq, work);
1797 }
1798 EXPORT_SYMBOL(mlxsw_core_schedule_work);
1799
1800 void mlxsw_core_flush_owq(void)
1801 {
1802         flush_workqueue(mlxsw_owq);
1803 }
1804 EXPORT_SYMBOL(mlxsw_core_flush_owq);
1805
1806 void mlxsw_core_fw_flash_start(struct mlxsw_core *mlxsw_core)
1807 {
1808         mlxsw_core->fw_flash_in_progress = true;
1809 }
1810 EXPORT_SYMBOL(mlxsw_core_fw_flash_start);
1811
1812 void mlxsw_core_fw_flash_end(struct mlxsw_core *mlxsw_core)
1813 {
1814         mlxsw_core->fw_flash_in_progress = false;
1815 }
1816 EXPORT_SYMBOL(mlxsw_core_fw_flash_end);
1817
1818 static int __init mlxsw_core_module_init(void)
1819 {
1820         int err;
1821
1822         mlxsw_wq = alloc_workqueue(mlxsw_core_driver_name, 0, 0);
1823         if (!mlxsw_wq)
1824                 return -ENOMEM;
1825         mlxsw_owq = alloc_ordered_workqueue("%s_ordered", 0,
1826                                             mlxsw_core_driver_name);
1827         if (!mlxsw_owq) {
1828                 err = -ENOMEM;
1829                 goto err_alloc_ordered_workqueue;
1830         }
1831         return 0;
1832
1833 err_alloc_ordered_workqueue:
1834         destroy_workqueue(mlxsw_wq);
1835         return err;
1836 }
1837
1838 static void __exit mlxsw_core_module_exit(void)
1839 {
1840         destroy_workqueue(mlxsw_owq);
1841         destroy_workqueue(mlxsw_wq);
1842 }
1843
1844 module_init(mlxsw_core_module_init);
1845 module_exit(mlxsw_core_module_exit);
1846
1847 MODULE_LICENSE("Dual BSD/GPL");
1848 MODULE_AUTHOR("Jiri Pirko <jiri@mellanox.com>");
1849 MODULE_DESCRIPTION("Mellanox switch device core driver");