GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_nve.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/err.h>
5 #include <linux/gfp.h>
6 #include <linux/kernel.h>
7 #include <linux/list.h>
8 #include <linux/netlink.h>
9 #include <linux/rtnetlink.h>
10 #include <linux/slab.h>
11 #include <net/inet_ecn.h>
12 #include <net/ipv6.h>
13
14 #include "reg.h"
15 #include "spectrum.h"
16 #include "spectrum_nve.h"
17
18 const struct mlxsw_sp_nve_ops *mlxsw_sp1_nve_ops_arr[] = {
19         [MLXSW_SP_NVE_TYPE_VXLAN]       = &mlxsw_sp1_nve_vxlan_ops,
20 };
21
22 const struct mlxsw_sp_nve_ops *mlxsw_sp2_nve_ops_arr[] = {
23         [MLXSW_SP_NVE_TYPE_VXLAN]       = &mlxsw_sp2_nve_vxlan_ops,
24 };
25
26 struct mlxsw_sp_nve_mc_entry;
27 struct mlxsw_sp_nve_mc_record;
28 struct mlxsw_sp_nve_mc_list;
29
30 struct mlxsw_sp_nve_mc_record_ops {
31         enum mlxsw_reg_tnumt_record_type type;
32         int (*entry_add)(struct mlxsw_sp_nve_mc_record *mc_record,
33                          struct mlxsw_sp_nve_mc_entry *mc_entry,
34                          const union mlxsw_sp_l3addr *addr);
35         void (*entry_del)(const struct mlxsw_sp_nve_mc_record *mc_record,
36                           const struct mlxsw_sp_nve_mc_entry *mc_entry);
37         void (*entry_set)(const struct mlxsw_sp_nve_mc_record *mc_record,
38                           const struct mlxsw_sp_nve_mc_entry *mc_entry,
39                           char *tnumt_pl, unsigned int entry_index);
40         bool (*entry_compare)(const struct mlxsw_sp_nve_mc_record *mc_record,
41                               const struct mlxsw_sp_nve_mc_entry *mc_entry,
42                               const union mlxsw_sp_l3addr *addr);
43 };
44
45 struct mlxsw_sp_nve_mc_list_key {
46         u16 fid_index;
47 };
48
49 struct mlxsw_sp_nve_mc_ipv6_entry {
50         struct in6_addr addr6;
51         u32 addr6_kvdl_index;
52 };
53
54 struct mlxsw_sp_nve_mc_entry {
55         union {
56                 __be32 addr4;
57                 struct mlxsw_sp_nve_mc_ipv6_entry ipv6_entry;
58         };
59         u8 valid:1;
60 };
61
62 struct mlxsw_sp_nve_mc_record {
63         struct list_head list;
64         enum mlxsw_sp_l3proto proto;
65         unsigned int num_entries;
66         struct mlxsw_sp *mlxsw_sp;
67         struct mlxsw_sp_nve_mc_list *mc_list;
68         const struct mlxsw_sp_nve_mc_record_ops *ops;
69         u32 kvdl_index;
70         struct mlxsw_sp_nve_mc_entry entries[];
71 };
72
73 struct mlxsw_sp_nve_mc_list {
74         struct list_head records_list;
75         struct rhash_head ht_node;
76         struct mlxsw_sp_nve_mc_list_key key;
77 };
78
79 static const struct rhashtable_params mlxsw_sp_nve_mc_list_ht_params = {
80         .key_len = sizeof(struct mlxsw_sp_nve_mc_list_key),
81         .key_offset = offsetof(struct mlxsw_sp_nve_mc_list, key),
82         .head_offset = offsetof(struct mlxsw_sp_nve_mc_list, ht_node),
83 };
84
85 static int
86 mlxsw_sp_nve_mc_record_ipv4_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
87                                       struct mlxsw_sp_nve_mc_entry *mc_entry,
88                                       const union mlxsw_sp_l3addr *addr)
89 {
90         mc_entry->addr4 = addr->addr4;
91
92         return 0;
93 }
94
95 static void
96 mlxsw_sp_nve_mc_record_ipv4_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
97                                       const struct mlxsw_sp_nve_mc_entry *mc_entry)
98 {
99 }
100
101 static void
102 mlxsw_sp_nve_mc_record_ipv4_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record,
103                                       const struct mlxsw_sp_nve_mc_entry *mc_entry,
104                                       char *tnumt_pl, unsigned int entry_index)
105 {
106         u32 udip = be32_to_cpu(mc_entry->addr4);
107
108         mlxsw_reg_tnumt_udip_set(tnumt_pl, entry_index, udip);
109 }
110
111 static bool
112 mlxsw_sp_nve_mc_record_ipv4_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record,
113                                           const struct mlxsw_sp_nve_mc_entry *mc_entry,
114                                           const union mlxsw_sp_l3addr *addr)
115 {
116         return mc_entry->addr4 == addr->addr4;
117 }
118
119 static const struct mlxsw_sp_nve_mc_record_ops
120 mlxsw_sp_nve_mc_record_ipv4_ops = {
121         .type           = MLXSW_REG_TNUMT_RECORD_TYPE_IPV4,
122         .entry_add      = &mlxsw_sp_nve_mc_record_ipv4_entry_add,
123         .entry_del      = &mlxsw_sp_nve_mc_record_ipv4_entry_del,
124         .entry_set      = &mlxsw_sp_nve_mc_record_ipv4_entry_set,
125         .entry_compare  = &mlxsw_sp_nve_mc_record_ipv4_entry_compare,
126 };
127
128 static int
129 mlxsw_sp_nve_mc_record_ipv6_entry_add(struct mlxsw_sp_nve_mc_record *mc_record,
130                                       struct mlxsw_sp_nve_mc_entry *mc_entry,
131                                       const union mlxsw_sp_l3addr *addr)
132 {
133         u32 kvdl_index;
134         int err;
135
136         err = mlxsw_sp_ipv6_addr_kvdl_index_get(mc_record->mlxsw_sp,
137                                                 &addr->addr6, &kvdl_index);
138         if (err)
139                 return err;
140
141         mc_entry->ipv6_entry.addr6 = addr->addr6;
142         mc_entry->ipv6_entry.addr6_kvdl_index = kvdl_index;
143         return 0;
144 }
145
146 static void
147 mlxsw_sp_nve_mc_record_ipv6_entry_del(const struct mlxsw_sp_nve_mc_record *mc_record,
148                                       const struct mlxsw_sp_nve_mc_entry *mc_entry)
149 {
150         mlxsw_sp_ipv6_addr_put(mc_record->mlxsw_sp,
151                                &mc_entry->ipv6_entry.addr6);
152 }
153
154 static void
155 mlxsw_sp_nve_mc_record_ipv6_entry_set(const struct mlxsw_sp_nve_mc_record *mc_record,
156                                       const struct mlxsw_sp_nve_mc_entry *mc_entry,
157                                       char *tnumt_pl, unsigned int entry_index)
158 {
159         u32 udip_ptr = mc_entry->ipv6_entry.addr6_kvdl_index;
160
161         mlxsw_reg_tnumt_udip_ptr_set(tnumt_pl, entry_index, udip_ptr);
162 }
163
164 static bool
165 mlxsw_sp_nve_mc_record_ipv6_entry_compare(const struct mlxsw_sp_nve_mc_record *mc_record,
166                                           const struct mlxsw_sp_nve_mc_entry *mc_entry,
167                                           const union mlxsw_sp_l3addr *addr)
168 {
169         return ipv6_addr_equal(&mc_entry->ipv6_entry.addr6, &addr->addr6);
170 }
171
172 static const struct mlxsw_sp_nve_mc_record_ops
173 mlxsw_sp_nve_mc_record_ipv6_ops = {
174         .type           = MLXSW_REG_TNUMT_RECORD_TYPE_IPV6,
175         .entry_add      = &mlxsw_sp_nve_mc_record_ipv6_entry_add,
176         .entry_del      = &mlxsw_sp_nve_mc_record_ipv6_entry_del,
177         .entry_set      = &mlxsw_sp_nve_mc_record_ipv6_entry_set,
178         .entry_compare  = &mlxsw_sp_nve_mc_record_ipv6_entry_compare,
179 };
180
181 static const struct mlxsw_sp_nve_mc_record_ops *
182 mlxsw_sp_nve_mc_record_ops_arr[] = {
183         [MLXSW_SP_L3_PROTO_IPV4] = &mlxsw_sp_nve_mc_record_ipv4_ops,
184         [MLXSW_SP_L3_PROTO_IPV6] = &mlxsw_sp_nve_mc_record_ipv6_ops,
185 };
186
187 int mlxsw_sp_nve_learned_ip_resolve(struct mlxsw_sp *mlxsw_sp, u32 uip,
188                                     enum mlxsw_sp_l3proto proto,
189                                     union mlxsw_sp_l3addr *addr)
190 {
191         switch (proto) {
192         case MLXSW_SP_L3_PROTO_IPV4:
193                 addr->addr4 = cpu_to_be32(uip);
194                 return 0;
195         default:
196                 WARN_ON(1);
197                 return -EINVAL;
198         }
199 }
200
201 static struct mlxsw_sp_nve_mc_list *
202 mlxsw_sp_nve_mc_list_find(struct mlxsw_sp *mlxsw_sp,
203                           const struct mlxsw_sp_nve_mc_list_key *key)
204 {
205         struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
206
207         return rhashtable_lookup_fast(&nve->mc_list_ht, key,
208                                       mlxsw_sp_nve_mc_list_ht_params);
209 }
210
211 static struct mlxsw_sp_nve_mc_list *
212 mlxsw_sp_nve_mc_list_create(struct mlxsw_sp *mlxsw_sp,
213                             const struct mlxsw_sp_nve_mc_list_key *key)
214 {
215         struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
216         struct mlxsw_sp_nve_mc_list *mc_list;
217         int err;
218
219         mc_list = kmalloc(sizeof(*mc_list), GFP_KERNEL);
220         if (!mc_list)
221                 return ERR_PTR(-ENOMEM);
222
223         INIT_LIST_HEAD(&mc_list->records_list);
224         mc_list->key = *key;
225
226         err = rhashtable_insert_fast(&nve->mc_list_ht, &mc_list->ht_node,
227                                      mlxsw_sp_nve_mc_list_ht_params);
228         if (err)
229                 goto err_rhashtable_insert;
230
231         return mc_list;
232
233 err_rhashtable_insert:
234         kfree(mc_list);
235         return ERR_PTR(err);
236 }
237
238 static void mlxsw_sp_nve_mc_list_destroy(struct mlxsw_sp *mlxsw_sp,
239                                          struct mlxsw_sp_nve_mc_list *mc_list)
240 {
241         struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
242
243         rhashtable_remove_fast(&nve->mc_list_ht, &mc_list->ht_node,
244                                mlxsw_sp_nve_mc_list_ht_params);
245         WARN_ON(!list_empty(&mc_list->records_list));
246         kfree(mc_list);
247 }
248
249 static struct mlxsw_sp_nve_mc_list *
250 mlxsw_sp_nve_mc_list_get(struct mlxsw_sp *mlxsw_sp,
251                          const struct mlxsw_sp_nve_mc_list_key *key)
252 {
253         struct mlxsw_sp_nve_mc_list *mc_list;
254
255         mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, key);
256         if (mc_list)
257                 return mc_list;
258
259         return mlxsw_sp_nve_mc_list_create(mlxsw_sp, key);
260 }
261
262 static void
263 mlxsw_sp_nve_mc_list_put(struct mlxsw_sp *mlxsw_sp,
264                          struct mlxsw_sp_nve_mc_list *mc_list)
265 {
266         if (!list_empty(&mc_list->records_list))
267                 return;
268         mlxsw_sp_nve_mc_list_destroy(mlxsw_sp, mc_list);
269 }
270
271 static struct mlxsw_sp_nve_mc_record *
272 mlxsw_sp_nve_mc_record_create(struct mlxsw_sp *mlxsw_sp,
273                               struct mlxsw_sp_nve_mc_list *mc_list,
274                               enum mlxsw_sp_l3proto proto)
275 {
276         unsigned int num_max_entries = mlxsw_sp->nve->num_max_mc_entries[proto];
277         struct mlxsw_sp_nve_mc_record *mc_record;
278         int err;
279
280         mc_record = kzalloc(struct_size(mc_record, entries, num_max_entries),
281                             GFP_KERNEL);
282         if (!mc_record)
283                 return ERR_PTR(-ENOMEM);
284
285         err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1,
286                                   &mc_record->kvdl_index);
287         if (err)
288                 goto err_kvdl_alloc;
289
290         mc_record->ops = mlxsw_sp_nve_mc_record_ops_arr[proto];
291         mc_record->mlxsw_sp = mlxsw_sp;
292         mc_record->mc_list = mc_list;
293         mc_record->proto = proto;
294         list_add_tail(&mc_record->list, &mc_list->records_list);
295
296         return mc_record;
297
298 err_kvdl_alloc:
299         kfree(mc_record);
300         return ERR_PTR(err);
301 }
302
303 static void
304 mlxsw_sp_nve_mc_record_destroy(struct mlxsw_sp_nve_mc_record *mc_record)
305 {
306         struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp;
307
308         list_del(&mc_record->list);
309         mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_TNUMT, 1,
310                            mc_record->kvdl_index);
311         WARN_ON(mc_record->num_entries);
312         kfree(mc_record);
313 }
314
315 static struct mlxsw_sp_nve_mc_record *
316 mlxsw_sp_nve_mc_record_get(struct mlxsw_sp *mlxsw_sp,
317                            struct mlxsw_sp_nve_mc_list *mc_list,
318                            enum mlxsw_sp_l3proto proto)
319 {
320         struct mlxsw_sp_nve_mc_record *mc_record;
321
322         list_for_each_entry_reverse(mc_record, &mc_list->records_list, list) {
323                 unsigned int num_entries = mc_record->num_entries;
324                 struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
325
326                 if (mc_record->proto == proto &&
327                     num_entries < nve->num_max_mc_entries[proto])
328                         return mc_record;
329         }
330
331         return mlxsw_sp_nve_mc_record_create(mlxsw_sp, mc_list, proto);
332 }
333
334 static void
335 mlxsw_sp_nve_mc_record_put(struct mlxsw_sp_nve_mc_record *mc_record)
336 {
337         if (mc_record->num_entries != 0)
338                 return;
339
340         mlxsw_sp_nve_mc_record_destroy(mc_record);
341 }
342
343 static struct mlxsw_sp_nve_mc_entry *
344 mlxsw_sp_nve_mc_free_entry_find(struct mlxsw_sp_nve_mc_record *mc_record)
345 {
346         struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
347         unsigned int num_max_entries;
348         int i;
349
350         num_max_entries = nve->num_max_mc_entries[mc_record->proto];
351         for (i = 0; i < num_max_entries; i++) {
352                 if (mc_record->entries[i].valid)
353                         continue;
354                 return &mc_record->entries[i];
355         }
356
357         return NULL;
358 }
359
360 static int
361 mlxsw_sp_nve_mc_record_refresh(struct mlxsw_sp_nve_mc_record *mc_record)
362 {
363         enum mlxsw_reg_tnumt_record_type type = mc_record->ops->type;
364         struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
365         struct mlxsw_sp *mlxsw_sp = mc_record->mlxsw_sp;
366         char tnumt_pl[MLXSW_REG_TNUMT_LEN];
367         unsigned int num_max_entries;
368         unsigned int num_entries = 0;
369         u32 next_kvdl_index = 0;
370         bool next_valid = false;
371         int i;
372
373         if (!list_is_last(&mc_record->list, &mc_list->records_list)) {
374                 struct mlxsw_sp_nve_mc_record *next_record;
375
376                 next_record = list_next_entry(mc_record, list);
377                 next_kvdl_index = next_record->kvdl_index;
378                 next_valid = true;
379         }
380
381         mlxsw_reg_tnumt_pack(tnumt_pl, type, MLXSW_REG_TUNNEL_PORT_NVE,
382                              mc_record->kvdl_index, next_valid,
383                              next_kvdl_index, mc_record->num_entries);
384
385         num_max_entries = mlxsw_sp->nve->num_max_mc_entries[mc_record->proto];
386         for (i = 0; i < num_max_entries; i++) {
387                 struct mlxsw_sp_nve_mc_entry *mc_entry;
388
389                 mc_entry = &mc_record->entries[i];
390                 if (!mc_entry->valid)
391                         continue;
392                 mc_record->ops->entry_set(mc_record, mc_entry, tnumt_pl,
393                                           num_entries++);
394         }
395
396         WARN_ON(num_entries != mc_record->num_entries);
397
398         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnumt), tnumt_pl);
399 }
400
401 static bool
402 mlxsw_sp_nve_mc_record_is_first(struct mlxsw_sp_nve_mc_record *mc_record)
403 {
404         struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
405         struct mlxsw_sp_nve_mc_record *first_record;
406
407         first_record = list_first_entry(&mc_list->records_list,
408                                         struct mlxsw_sp_nve_mc_record, list);
409
410         return mc_record == first_record;
411 }
412
413 static struct mlxsw_sp_nve_mc_entry *
414 mlxsw_sp_nve_mc_entry_find(struct mlxsw_sp_nve_mc_record *mc_record,
415                            union mlxsw_sp_l3addr *addr)
416 {
417         struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
418         unsigned int num_max_entries;
419         int i;
420
421         num_max_entries = nve->num_max_mc_entries[mc_record->proto];
422         for (i = 0; i < num_max_entries; i++) {
423                 struct mlxsw_sp_nve_mc_entry *mc_entry;
424
425                 mc_entry = &mc_record->entries[i];
426                 if (!mc_entry->valid)
427                         continue;
428                 if (mc_record->ops->entry_compare(mc_record, mc_entry, addr))
429                         return mc_entry;
430         }
431
432         return NULL;
433 }
434
435 static int
436 mlxsw_sp_nve_mc_record_ip_add(struct mlxsw_sp_nve_mc_record *mc_record,
437                               union mlxsw_sp_l3addr *addr)
438 {
439         struct mlxsw_sp_nve_mc_entry *mc_entry = NULL;
440         int err;
441
442         mc_entry = mlxsw_sp_nve_mc_free_entry_find(mc_record);
443         if (WARN_ON(!mc_entry))
444                 return -EINVAL;
445
446         err = mc_record->ops->entry_add(mc_record, mc_entry, addr);
447         if (err)
448                 return err;
449         mc_record->num_entries++;
450         mc_entry->valid = true;
451
452         err = mlxsw_sp_nve_mc_record_refresh(mc_record);
453         if (err)
454                 goto err_record_refresh;
455
456         /* If this is a new record and not the first one, then we need to
457          * update the next pointer of the previous entry
458          */
459         if (mc_record->num_entries != 1 ||
460             mlxsw_sp_nve_mc_record_is_first(mc_record))
461                 return 0;
462
463         err = mlxsw_sp_nve_mc_record_refresh(list_prev_entry(mc_record, list));
464         if (err)
465                 goto err_prev_record_refresh;
466
467         return 0;
468
469 err_prev_record_refresh:
470 err_record_refresh:
471         mc_entry->valid = false;
472         mc_record->num_entries--;
473         mc_record->ops->entry_del(mc_record, mc_entry);
474         return err;
475 }
476
477 static void
478 mlxsw_sp_nve_mc_record_entry_del(struct mlxsw_sp_nve_mc_record *mc_record,
479                                  struct mlxsw_sp_nve_mc_entry *mc_entry)
480 {
481         struct mlxsw_sp_nve_mc_list *mc_list = mc_record->mc_list;
482
483         mc_entry->valid = false;
484         mc_record->num_entries--;
485
486         /* When the record continues to exist we only need to invalidate
487          * the requested entry
488          */
489         if (mc_record->num_entries != 0) {
490                 mlxsw_sp_nve_mc_record_refresh(mc_record);
491                 mc_record->ops->entry_del(mc_record, mc_entry);
492                 return;
493         }
494
495         /* If the record needs to be deleted, but it is not the first,
496          * then we need to make sure that the previous record no longer
497          * points to it. Remove deleted record from the list to reflect
498          * that and then re-add it at the end, so that it could be
499          * properly removed by the record destruction code
500          */
501         if (!mlxsw_sp_nve_mc_record_is_first(mc_record)) {
502                 struct mlxsw_sp_nve_mc_record *prev_record;
503
504                 prev_record = list_prev_entry(mc_record, list);
505                 list_del(&mc_record->list);
506                 mlxsw_sp_nve_mc_record_refresh(prev_record);
507                 list_add_tail(&mc_record->list, &mc_list->records_list);
508                 mc_record->ops->entry_del(mc_record, mc_entry);
509                 return;
510         }
511
512         /* If the first record needs to be deleted, but the list is not
513          * singular, then the second record needs to be written in the
514          * first record's address, as this address is stored as a property
515          * of the FID
516          */
517         if (mlxsw_sp_nve_mc_record_is_first(mc_record) &&
518             !list_is_singular(&mc_list->records_list)) {
519                 struct mlxsw_sp_nve_mc_record *next_record;
520
521                 next_record = list_next_entry(mc_record, list);
522                 swap(mc_record->kvdl_index, next_record->kvdl_index);
523                 mlxsw_sp_nve_mc_record_refresh(next_record);
524                 mc_record->ops->entry_del(mc_record, mc_entry);
525                 return;
526         }
527
528         /* This is the last case where the last remaining record needs to
529          * be deleted. Simply delete the entry
530          */
531         mc_record->ops->entry_del(mc_record, mc_entry);
532 }
533
534 static struct mlxsw_sp_nve_mc_record *
535 mlxsw_sp_nve_mc_record_find(struct mlxsw_sp_nve_mc_list *mc_list,
536                             enum mlxsw_sp_l3proto proto,
537                             union mlxsw_sp_l3addr *addr,
538                             struct mlxsw_sp_nve_mc_entry **mc_entry)
539 {
540         struct mlxsw_sp_nve_mc_record *mc_record;
541
542         list_for_each_entry(mc_record, &mc_list->records_list, list) {
543                 if (mc_record->proto != proto)
544                         continue;
545
546                 *mc_entry = mlxsw_sp_nve_mc_entry_find(mc_record, addr);
547                 if (*mc_entry)
548                         return mc_record;
549         }
550
551         return NULL;
552 }
553
554 static int mlxsw_sp_nve_mc_list_ip_add(struct mlxsw_sp *mlxsw_sp,
555                                        struct mlxsw_sp_nve_mc_list *mc_list,
556                                        enum mlxsw_sp_l3proto proto,
557                                        union mlxsw_sp_l3addr *addr)
558 {
559         struct mlxsw_sp_nve_mc_record *mc_record;
560         int err;
561
562         mc_record = mlxsw_sp_nve_mc_record_get(mlxsw_sp, mc_list, proto);
563         if (IS_ERR(mc_record))
564                 return PTR_ERR(mc_record);
565
566         err = mlxsw_sp_nve_mc_record_ip_add(mc_record, addr);
567         if (err)
568                 goto err_ip_add;
569
570         return 0;
571
572 err_ip_add:
573         mlxsw_sp_nve_mc_record_put(mc_record);
574         return err;
575 }
576
577 static void mlxsw_sp_nve_mc_list_ip_del(struct mlxsw_sp *mlxsw_sp,
578                                         struct mlxsw_sp_nve_mc_list *mc_list,
579                                         enum mlxsw_sp_l3proto proto,
580                                         union mlxsw_sp_l3addr *addr)
581 {
582         struct mlxsw_sp_nve_mc_record *mc_record;
583         struct mlxsw_sp_nve_mc_entry *mc_entry;
584
585         mc_record = mlxsw_sp_nve_mc_record_find(mc_list, proto, addr,
586                                                 &mc_entry);
587         if (!mc_record)
588                 return;
589
590         mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
591         mlxsw_sp_nve_mc_record_put(mc_record);
592 }
593
594 static int
595 mlxsw_sp_nve_fid_flood_index_set(struct mlxsw_sp_fid *fid,
596                                  struct mlxsw_sp_nve_mc_list *mc_list)
597 {
598         struct mlxsw_sp_nve_mc_record *mc_record;
599
600         /* The address of the first record in the list is a property of
601          * the FID and we never change it. It only needs to be set when
602          * a new list is created
603          */
604         if (mlxsw_sp_fid_nve_flood_index_is_set(fid))
605                 return 0;
606
607         mc_record = list_first_entry(&mc_list->records_list,
608                                      struct mlxsw_sp_nve_mc_record, list);
609
610         return mlxsw_sp_fid_nve_flood_index_set(fid, mc_record->kvdl_index);
611 }
612
613 static void
614 mlxsw_sp_nve_fid_flood_index_clear(struct mlxsw_sp_fid *fid,
615                                    struct mlxsw_sp_nve_mc_list *mc_list)
616 {
617         struct mlxsw_sp_nve_mc_record *mc_record;
618
619         /* The address of the first record needs to be invalidated only when
620          * the last record is about to be removed
621          */
622         if (!list_is_singular(&mc_list->records_list))
623                 return;
624
625         mc_record = list_first_entry(&mc_list->records_list,
626                                      struct mlxsw_sp_nve_mc_record, list);
627         if (mc_record->num_entries != 1)
628                 return;
629
630         return mlxsw_sp_fid_nve_flood_index_clear(fid);
631 }
632
633 int mlxsw_sp_nve_flood_ip_add(struct mlxsw_sp *mlxsw_sp,
634                               struct mlxsw_sp_fid *fid,
635                               enum mlxsw_sp_l3proto proto,
636                               union mlxsw_sp_l3addr *addr)
637 {
638         struct mlxsw_sp_nve_mc_list_key key = { 0 };
639         struct mlxsw_sp_nve_mc_list *mc_list;
640         int err;
641
642         key.fid_index = mlxsw_sp_fid_index(fid);
643         mc_list = mlxsw_sp_nve_mc_list_get(mlxsw_sp, &key);
644         if (IS_ERR(mc_list))
645                 return PTR_ERR(mc_list);
646
647         err = mlxsw_sp_nve_mc_list_ip_add(mlxsw_sp, mc_list, proto, addr);
648         if (err)
649                 goto err_add_ip;
650
651         err = mlxsw_sp_nve_fid_flood_index_set(fid, mc_list);
652         if (err)
653                 goto err_fid_flood_index_set;
654
655         return 0;
656
657 err_fid_flood_index_set:
658         mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr);
659 err_add_ip:
660         mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
661         return err;
662 }
663
664 void mlxsw_sp_nve_flood_ip_del(struct mlxsw_sp *mlxsw_sp,
665                                struct mlxsw_sp_fid *fid,
666                                enum mlxsw_sp_l3proto proto,
667                                union mlxsw_sp_l3addr *addr)
668 {
669         struct mlxsw_sp_nve_mc_list_key key = { 0 };
670         struct mlxsw_sp_nve_mc_list *mc_list;
671
672         key.fid_index = mlxsw_sp_fid_index(fid);
673         mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
674         if (!mc_list)
675                 return;
676
677         mlxsw_sp_nve_fid_flood_index_clear(fid, mc_list);
678         mlxsw_sp_nve_mc_list_ip_del(mlxsw_sp, mc_list, proto, addr);
679         mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
680 }
681
682 static void
683 mlxsw_sp_nve_mc_record_delete(struct mlxsw_sp_nve_mc_record *mc_record)
684 {
685         struct mlxsw_sp_nve *nve = mc_record->mlxsw_sp->nve;
686         unsigned int num_max_entries;
687         int i;
688
689         num_max_entries = nve->num_max_mc_entries[mc_record->proto];
690         for (i = 0; i < num_max_entries; i++) {
691                 struct mlxsw_sp_nve_mc_entry *mc_entry = &mc_record->entries[i];
692
693                 if (!mc_entry->valid)
694                         continue;
695                 mlxsw_sp_nve_mc_record_entry_del(mc_record, mc_entry);
696         }
697
698         WARN_ON(mc_record->num_entries);
699         mlxsw_sp_nve_mc_record_put(mc_record);
700 }
701
702 static void mlxsw_sp_nve_flood_ip_flush(struct mlxsw_sp *mlxsw_sp,
703                                         struct mlxsw_sp_fid *fid)
704 {
705         struct mlxsw_sp_nve_mc_record *mc_record, *tmp;
706         struct mlxsw_sp_nve_mc_list_key key = { 0 };
707         struct mlxsw_sp_nve_mc_list *mc_list;
708
709         if (!mlxsw_sp_fid_nve_flood_index_is_set(fid))
710                 return;
711
712         mlxsw_sp_fid_nve_flood_index_clear(fid);
713
714         key.fid_index = mlxsw_sp_fid_index(fid);
715         mc_list = mlxsw_sp_nve_mc_list_find(mlxsw_sp, &key);
716         if (WARN_ON(!mc_list))
717                 return;
718
719         list_for_each_entry_safe(mc_record, tmp, &mc_list->records_list, list)
720                 mlxsw_sp_nve_mc_record_delete(mc_record);
721
722         WARN_ON(!list_empty(&mc_list->records_list));
723         mlxsw_sp_nve_mc_list_put(mlxsw_sp, mc_list);
724 }
725
726 static int mlxsw_sp_nve_tunnel_init(struct mlxsw_sp *mlxsw_sp,
727                                     struct mlxsw_sp_nve_config *config)
728 {
729         struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
730         const struct mlxsw_sp_nve_ops *ops;
731         int err;
732
733         if (nve->num_nve_tunnels++ != 0)
734                 return 0;
735
736         nve->config = *config;
737
738         err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
739                                   &nve->tunnel_index);
740         if (err)
741                 goto err_kvdl_alloc;
742
743         ops = nve->nve_ops_arr[config->type];
744         err = ops->init(nve, config);
745         if (err)
746                 goto err_ops_init;
747
748         return 0;
749
750 err_ops_init:
751         mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
752                            nve->tunnel_index);
753 err_kvdl_alloc:
754         memset(&nve->config, 0, sizeof(nve->config));
755         nve->num_nve_tunnels--;
756         return err;
757 }
758
759 static void mlxsw_sp_nve_tunnel_fini(struct mlxsw_sp *mlxsw_sp)
760 {
761         struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
762         const struct mlxsw_sp_nve_ops *ops;
763
764         ops = nve->nve_ops_arr[nve->config.type];
765
766         if (mlxsw_sp->nve->num_nve_tunnels == 1) {
767                 ops->fini(nve);
768                 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ADJ, 1,
769                                    nve->tunnel_index);
770                 memset(&nve->config, 0, sizeof(nve->config));
771         }
772         nve->num_nve_tunnels--;
773 }
774
775 static void mlxsw_sp_nve_fdb_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
776                                           u16 fid_index)
777 {
778         char sfdf_pl[MLXSW_REG_SFDF_LEN];
779
780         mlxsw_reg_sfdf_pack(sfdf_pl, MLXSW_REG_SFDF_FLUSH_PER_NVE_AND_FID);
781         mlxsw_reg_sfdf_fid_set(sfdf_pl, fid_index);
782         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(sfdf), sfdf_pl);
783 }
784
785 static void mlxsw_sp_nve_fdb_clear_offload(struct mlxsw_sp *mlxsw_sp,
786                                            const struct mlxsw_sp_fid *fid,
787                                            const struct net_device *nve_dev,
788                                            __be32 vni)
789 {
790         const struct mlxsw_sp_nve_ops *ops;
791         enum mlxsw_sp_nve_type type;
792
793         if (WARN_ON(mlxsw_sp_fid_nve_type(fid, &type)))
794                 return;
795
796         ops = mlxsw_sp->nve->nve_ops_arr[type];
797         ops->fdb_clear_offload(nve_dev, vni);
798 }
799
800 struct mlxsw_sp_nve_ipv6_ht_key {
801         u8 mac[ETH_ALEN];
802         u16 fid_index;
803 };
804
805 struct mlxsw_sp_nve_ipv6_ht_node {
806         struct rhash_head ht_node;
807         struct list_head list;
808         struct mlxsw_sp_nve_ipv6_ht_key key;
809         struct in6_addr addr6;
810 };
811
812 static const struct rhashtable_params mlxsw_sp_nve_ipv6_ht_params = {
813         .key_len = sizeof(struct mlxsw_sp_nve_ipv6_ht_key),
814         .key_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, key),
815         .head_offset = offsetof(struct mlxsw_sp_nve_ipv6_ht_node, ht_node),
816 };
817
818 int mlxsw_sp_nve_ipv6_addr_kvdl_set(struct mlxsw_sp *mlxsw_sp,
819                                     const struct in6_addr *addr6,
820                                     u32 *p_kvdl_index)
821 {
822         return mlxsw_sp_ipv6_addr_kvdl_index_get(mlxsw_sp, addr6, p_kvdl_index);
823 }
824
825 void mlxsw_sp_nve_ipv6_addr_kvdl_unset(struct mlxsw_sp *mlxsw_sp,
826                                        const struct in6_addr *addr6)
827 {
828         mlxsw_sp_ipv6_addr_put(mlxsw_sp, addr6);
829 }
830
831 static struct mlxsw_sp_nve_ipv6_ht_node *
832 mlxsw_sp_nve_ipv6_ht_node_lookup(struct mlxsw_sp *mlxsw_sp, const char *mac,
833                                  u16 fid_index)
834 {
835         struct mlxsw_sp_nve_ipv6_ht_key key = {};
836
837         ether_addr_copy(key.mac, mac);
838         key.fid_index = fid_index;
839         return rhashtable_lookup_fast(&mlxsw_sp->nve->ipv6_ht, &key,
840                                       mlxsw_sp_nve_ipv6_ht_params);
841 }
842
843 static int mlxsw_sp_nve_ipv6_ht_insert(struct mlxsw_sp *mlxsw_sp,
844                                        const char *mac, u16 fid_index,
845                                        const struct in6_addr *addr6)
846 {
847         struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
848         struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
849         int err;
850
851         ipv6_ht_node = kzalloc(sizeof(*ipv6_ht_node), GFP_KERNEL);
852         if (!ipv6_ht_node)
853                 return -ENOMEM;
854
855         ether_addr_copy(ipv6_ht_node->key.mac, mac);
856         ipv6_ht_node->key.fid_index = fid_index;
857         ipv6_ht_node->addr6 = *addr6;
858
859         err = rhashtable_insert_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node,
860                                      mlxsw_sp_nve_ipv6_ht_params);
861         if (err)
862                 goto err_rhashtable_insert;
863
864         list_add(&ipv6_ht_node->list, &nve->ipv6_addr_list);
865
866         return 0;
867
868 err_rhashtable_insert:
869         kfree(ipv6_ht_node);
870         return err;
871 }
872
873 static void
874 mlxsw_sp_nve_ipv6_ht_remove(struct mlxsw_sp *mlxsw_sp,
875                             struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node)
876 {
877         struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
878
879         list_del(&ipv6_ht_node->list);
880         rhashtable_remove_fast(&nve->ipv6_ht, &ipv6_ht_node->ht_node,
881                                mlxsw_sp_nve_ipv6_ht_params);
882         kfree(ipv6_ht_node);
883 }
884
885 int
886 mlxsw_sp_nve_ipv6_addr_map_replace(struct mlxsw_sp *mlxsw_sp, const char *mac,
887                                    u16 fid_index,
888                                    const struct in6_addr *new_addr6)
889 {
890         struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
891
892         ASSERT_RTNL();
893
894         ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac,
895                                                         fid_index);
896         if (!ipv6_ht_node)
897                 return mlxsw_sp_nve_ipv6_ht_insert(mlxsw_sp, mac, fid_index,
898                                                    new_addr6);
899
900         mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6);
901         ipv6_ht_node->addr6 = *new_addr6;
902         return 0;
903 }
904
905 void mlxsw_sp_nve_ipv6_addr_map_del(struct mlxsw_sp *mlxsw_sp, const char *mac,
906                                     u16 fid_index)
907 {
908         struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node;
909
910         ASSERT_RTNL();
911
912         ipv6_ht_node = mlxsw_sp_nve_ipv6_ht_node_lookup(mlxsw_sp, mac,
913                                                         fid_index);
914         if (WARN_ON(!ipv6_ht_node))
915                 return;
916
917         mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node);
918 }
919
920 static void mlxsw_sp_nve_ipv6_addr_flush_by_fid(struct mlxsw_sp *mlxsw_sp,
921                                                 u16 fid_index)
922 {
923         struct mlxsw_sp_nve_ipv6_ht_node *ipv6_ht_node, *tmp;
924         struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
925
926         list_for_each_entry_safe(ipv6_ht_node, tmp, &nve->ipv6_addr_list,
927                                  list) {
928                 if (ipv6_ht_node->key.fid_index != fid_index)
929                         continue;
930
931                 mlxsw_sp_ipv6_addr_put(mlxsw_sp, &ipv6_ht_node->addr6);
932                 mlxsw_sp_nve_ipv6_ht_remove(mlxsw_sp, ipv6_ht_node);
933         }
934 }
935
936 int mlxsw_sp_nve_fid_enable(struct mlxsw_sp *mlxsw_sp, struct mlxsw_sp_fid *fid,
937                             struct mlxsw_sp_nve_params *params,
938                             struct netlink_ext_ack *extack)
939 {
940         struct mlxsw_sp_nve *nve = mlxsw_sp->nve;
941         const struct mlxsw_sp_nve_ops *ops;
942         struct mlxsw_sp_nve_config config;
943         int err;
944
945         ops = nve->nve_ops_arr[params->type];
946
947         if (!ops->can_offload(nve, params, extack))
948                 return -EINVAL;
949
950         memset(&config, 0, sizeof(config));
951         ops->nve_config(nve, params, &config);
952         if (nve->num_nve_tunnels &&
953             memcmp(&config, &nve->config, sizeof(config))) {
954                 NL_SET_ERR_MSG_MOD(extack, "Conflicting NVE tunnels configuration");
955                 return -EINVAL;
956         }
957
958         err = mlxsw_sp_nve_tunnel_init(mlxsw_sp, &config);
959         if (err) {
960                 NL_SET_ERR_MSG_MOD(extack, "Failed to initialize NVE tunnel");
961                 return err;
962         }
963
964         err = mlxsw_sp_fid_vni_set(fid, params->type, params->vni,
965                                    params->dev->ifindex);
966         if (err) {
967                 NL_SET_ERR_MSG_MOD(extack, "Failed to set VNI on FID");
968                 goto err_fid_vni_set;
969         }
970
971         err = ops->fdb_replay(params->dev, params->vni, extack);
972         if (err)
973                 goto err_fdb_replay;
974
975         return 0;
976
977 err_fdb_replay:
978         mlxsw_sp_fid_vni_clear(fid);
979 err_fid_vni_set:
980         mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
981         return err;
982 }
983
984 void mlxsw_sp_nve_fid_disable(struct mlxsw_sp *mlxsw_sp,
985                               struct mlxsw_sp_fid *fid)
986 {
987         u16 fid_index = mlxsw_sp_fid_index(fid);
988         struct net_device *nve_dev;
989         int nve_ifindex;
990         __be32 vni;
991
992         /* Necessary for __dev_get_by_index() below. */
993         ASSERT_RTNL();
994
995         mlxsw_sp_nve_flood_ip_flush(mlxsw_sp, fid);
996         mlxsw_sp_nve_fdb_flush_by_fid(mlxsw_sp, fid_index);
997         mlxsw_sp_nve_ipv6_addr_flush_by_fid(mlxsw_sp, fid_index);
998
999         if (WARN_ON(mlxsw_sp_fid_nve_ifindex(fid, &nve_ifindex) ||
1000                     mlxsw_sp_fid_vni(fid, &vni)))
1001                 goto out;
1002
1003         nve_dev = __dev_get_by_index(mlxsw_sp_net(mlxsw_sp), nve_ifindex);
1004         if (!nve_dev)
1005                 goto out;
1006
1007         mlxsw_sp_nve_fdb_clear_offload(mlxsw_sp, fid, nve_dev, vni);
1008         mlxsw_sp_fid_fdb_clear_offload(fid, nve_dev);
1009
1010 out:
1011         mlxsw_sp_fid_vni_clear(fid);
1012         mlxsw_sp_nve_tunnel_fini(mlxsw_sp);
1013 }
1014
1015 int mlxsw_sp_port_nve_init(struct mlxsw_sp_port *mlxsw_sp_port)
1016 {
1017         struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
1018         char tnqdr_pl[MLXSW_REG_TNQDR_LEN];
1019
1020         mlxsw_reg_tnqdr_pack(tnqdr_pl, mlxsw_sp_port->local_port);
1021         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqdr), tnqdr_pl);
1022 }
1023
1024 void mlxsw_sp_port_nve_fini(struct mlxsw_sp_port *mlxsw_sp_port)
1025 {
1026 }
1027
1028 static int mlxsw_sp_nve_qos_init(struct mlxsw_sp *mlxsw_sp)
1029 {
1030         char tnqcr_pl[MLXSW_REG_TNQCR_LEN];
1031
1032         mlxsw_reg_tnqcr_pack(tnqcr_pl);
1033         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tnqcr), tnqcr_pl);
1034 }
1035
1036 static int mlxsw_sp_nve_ecn_encap_init(struct mlxsw_sp *mlxsw_sp)
1037 {
1038         int i;
1039
1040         /* Iterate over inner ECN values */
1041         for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
1042                 u8 outer_ecn = INET_ECN_encapsulate(0, i);
1043                 char tneem_pl[MLXSW_REG_TNEEM_LEN];
1044                 int err;
1045
1046                 mlxsw_reg_tneem_pack(tneem_pl, i, outer_ecn);
1047                 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tneem),
1048                                       tneem_pl);
1049                 if (err)
1050                         return err;
1051         }
1052
1053         return 0;
1054 }
1055
1056 static int __mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp,
1057                                          u8 inner_ecn, u8 outer_ecn)
1058 {
1059         char tndem_pl[MLXSW_REG_TNDEM_LEN];
1060         u8 new_inner_ecn;
1061         bool trap_en;
1062
1063         new_inner_ecn = mlxsw_sp_tunnel_ecn_decap(outer_ecn, inner_ecn,
1064                                                   &trap_en);
1065         mlxsw_reg_tndem_pack(tndem_pl, outer_ecn, inner_ecn, new_inner_ecn,
1066                              trap_en, trap_en ? MLXSW_TRAP_ID_DECAP_ECN0 : 0);
1067         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(tndem), tndem_pl);
1068 }
1069
1070 static int mlxsw_sp_nve_ecn_decap_init(struct mlxsw_sp *mlxsw_sp)
1071 {
1072         int i;
1073
1074         /* Iterate over inner ECN values */
1075         for (i = INET_ECN_NOT_ECT; i <= INET_ECN_CE; i++) {
1076                 int j;
1077
1078                 /* Iterate over outer ECN values */
1079                 for (j = INET_ECN_NOT_ECT; j <= INET_ECN_CE; j++) {
1080                         int err;
1081
1082                         err = __mlxsw_sp_nve_ecn_decap_init(mlxsw_sp, i, j);
1083                         if (err)
1084                                 return err;
1085                 }
1086         }
1087
1088         return 0;
1089 }
1090
1091 static int mlxsw_sp_nve_ecn_init(struct mlxsw_sp *mlxsw_sp)
1092 {
1093         int err;
1094
1095         err = mlxsw_sp_nve_ecn_encap_init(mlxsw_sp);
1096         if (err)
1097                 return err;
1098
1099         return mlxsw_sp_nve_ecn_decap_init(mlxsw_sp);
1100 }
1101
1102 static int mlxsw_sp_nve_resources_query(struct mlxsw_sp *mlxsw_sp)
1103 {
1104         unsigned int max;
1105
1106         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4) ||
1107             !MLXSW_CORE_RES_VALID(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6))
1108                 return -EIO;
1109         max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV4);
1110         mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV4] = max;
1111         max = MLXSW_CORE_RES_GET(mlxsw_sp->core, MAX_NVE_MC_ENTRIES_IPV6);
1112         mlxsw_sp->nve->num_max_mc_entries[MLXSW_SP_L3_PROTO_IPV6] = max;
1113
1114         return 0;
1115 }
1116
1117 int mlxsw_sp_nve_init(struct mlxsw_sp *mlxsw_sp)
1118 {
1119         struct mlxsw_sp_nve *nve;
1120         int err;
1121
1122         nve = kzalloc(sizeof(*mlxsw_sp->nve), GFP_KERNEL);
1123         if (!nve)
1124                 return -ENOMEM;
1125         mlxsw_sp->nve = nve;
1126         nve->mlxsw_sp = mlxsw_sp;
1127         nve->nve_ops_arr = mlxsw_sp->nve_ops_arr;
1128
1129         err = rhashtable_init(&nve->mc_list_ht,
1130                               &mlxsw_sp_nve_mc_list_ht_params);
1131         if (err)
1132                 goto err_mc_rhashtable_init;
1133
1134         err = rhashtable_init(&nve->ipv6_ht, &mlxsw_sp_nve_ipv6_ht_params);
1135         if (err)
1136                 goto err_ipv6_rhashtable_init;
1137
1138         INIT_LIST_HEAD(&nve->ipv6_addr_list);
1139
1140         err = mlxsw_sp_nve_qos_init(mlxsw_sp);
1141         if (err)
1142                 goto err_nve_qos_init;
1143
1144         err = mlxsw_sp_nve_ecn_init(mlxsw_sp);
1145         if (err)
1146                 goto err_nve_ecn_init;
1147
1148         err = mlxsw_sp_nve_resources_query(mlxsw_sp);
1149         if (err)
1150                 goto err_nve_resources_query;
1151
1152         return 0;
1153
1154 err_nve_resources_query:
1155 err_nve_ecn_init:
1156 err_nve_qos_init:
1157         rhashtable_destroy(&nve->ipv6_ht);
1158 err_ipv6_rhashtable_init:
1159         rhashtable_destroy(&nve->mc_list_ht);
1160 err_mc_rhashtable_init:
1161         mlxsw_sp->nve = NULL;
1162         kfree(nve);
1163         return err;
1164 }
1165
1166 void mlxsw_sp_nve_fini(struct mlxsw_sp *mlxsw_sp)
1167 {
1168         WARN_ON(mlxsw_sp->nve->num_nve_tunnels);
1169         WARN_ON(!list_empty(&mlxsw_sp->nve->ipv6_addr_list));
1170         rhashtable_destroy(&mlxsw_sp->nve->ipv6_ht);
1171         rhashtable_destroy(&mlxsw_sp->nve->mc_list_ht);
1172         kfree(mlxsw_sp->nve);
1173         mlxsw_sp->nve = NULL;
1174 }