GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / net / ethernet / mellanox / mlxsw / spectrum_acl_tcam.c
1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2017-2018 Mellanox Technologies. All rights reserved */
3
4 #include <linux/kernel.h>
5 #include <linux/slab.h>
6 #include <linux/errno.h>
7 #include <linux/bitops.h>
8 #include <linux/list.h>
9 #include <linux/rhashtable.h>
10 #include <linux/netdevice.h>
11 #include <linux/mutex.h>
12 #include <linux/refcount.h>
13 #include <linux/idr.h>
14 #include <net/devlink.h>
15 #include <trace/events/mlxsw.h>
16
17 #include "reg.h"
18 #include "core.h"
19 #include "resources.h"
20 #include "spectrum.h"
21 #include "spectrum_acl_tcam.h"
22 #include "core_acl_flex_keys.h"
23
24 size_t mlxsw_sp_acl_tcam_priv_size(struct mlxsw_sp *mlxsw_sp)
25 {
26         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
27
28         return ops->priv_size;
29 }
30
31 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT 5000 /* ms */
32 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN 3000 /* ms */
33 #define MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS 100 /* number of entries */
34
35 int mlxsw_sp_acl_tcam_priority_get(struct mlxsw_sp *mlxsw_sp,
36                                    struct mlxsw_sp_acl_rule_info *rulei,
37                                    u32 *priority, bool fillup_priority)
38 {
39         u64 max_priority;
40
41         if (!fillup_priority) {
42                 *priority = 0;
43                 return 0;
44         }
45
46         if (!MLXSW_CORE_RES_VALID(mlxsw_sp->core, KVD_SIZE))
47                 return -EIO;
48
49         /* Priority range is 1..cap_kvd_size-1. */
50         max_priority = MLXSW_CORE_RES_GET(mlxsw_sp->core, KVD_SIZE) - 1;
51         if (rulei->priority >= max_priority)
52                 return -EINVAL;
53
54         /* Unlike in TC, in HW, higher number means higher priority. */
55         *priority = max_priority - rulei->priority;
56         return 0;
57 }
58
59 static int mlxsw_sp_acl_tcam_region_id_get(struct mlxsw_sp_acl_tcam *tcam,
60                                            u16 *p_id)
61 {
62         int id;
63
64         id = ida_alloc_max(&tcam->used_regions, tcam->max_regions - 1,
65                            GFP_KERNEL);
66         if (id < 0)
67                 return id;
68
69         *p_id = id;
70
71         return 0;
72 }
73
74 static void mlxsw_sp_acl_tcam_region_id_put(struct mlxsw_sp_acl_tcam *tcam,
75                                             u16 id)
76 {
77         ida_free(&tcam->used_regions, id);
78 }
79
80 static int mlxsw_sp_acl_tcam_group_id_get(struct mlxsw_sp_acl_tcam *tcam,
81                                           u16 *p_id)
82 {
83         int id;
84
85         id = ida_alloc_max(&tcam->used_groups, tcam->max_groups - 1,
86                            GFP_KERNEL);
87         if (id < 0)
88                 return id;
89
90         *p_id = id;
91
92         return 0;
93 }
94
95 static void mlxsw_sp_acl_tcam_group_id_put(struct mlxsw_sp_acl_tcam *tcam,
96                                            u16 id)
97 {
98         ida_free(&tcam->used_groups, id);
99 }
100
101 struct mlxsw_sp_acl_tcam_pattern {
102         const enum mlxsw_afk_element *elements;
103         unsigned int elements_count;
104 };
105
106 struct mlxsw_sp_acl_tcam_group {
107         struct mlxsw_sp_acl_tcam *tcam;
108         u16 id;
109         struct mutex lock; /* guards region list updates */
110         struct list_head region_list;
111         unsigned int region_count;
112 };
113
114 struct mlxsw_sp_acl_tcam_vgroup {
115         struct mlxsw_sp_acl_tcam_group group;
116         struct list_head vregion_list;
117         struct rhashtable vchunk_ht;
118         const struct mlxsw_sp_acl_tcam_pattern *patterns;
119         unsigned int patterns_count;
120         bool tmplt_elusage_set;
121         struct mlxsw_afk_element_usage tmplt_elusage;
122         bool vregion_rehash_enabled;
123         unsigned int *p_min_prio;
124         unsigned int *p_max_prio;
125 };
126
127 struct mlxsw_sp_acl_tcam_rehash_ctx {
128         void *hints_priv;
129         bool this_is_rollback;
130         struct mlxsw_sp_acl_tcam_vchunk *current_vchunk; /* vchunk being
131                                                           * currently migrated.
132                                                           */
133         struct mlxsw_sp_acl_tcam_ventry *start_ventry; /* ventry to start
134                                                         * migration from in
135                                                         * a vchunk being
136                                                         * currently migrated.
137                                                         */
138         struct mlxsw_sp_acl_tcam_ventry *stop_ventry; /* ventry to stop
139                                                        * migration at
140                                                        * a vchunk being
141                                                        * currently migrated.
142                                                        */
143 };
144
145 struct mlxsw_sp_acl_tcam_vregion {
146         struct mutex lock; /* Protects consistency of region, region2 pointers
147                             * and vchunk_list.
148                             */
149         struct mlxsw_sp_acl_tcam_region *region;
150         struct mlxsw_sp_acl_tcam_region *region2; /* Used during migration */
151         struct list_head list; /* Member of a TCAM group */
152         struct list_head tlist; /* Member of a TCAM */
153         struct list_head vchunk_list; /* List of vchunks under this vregion */
154         struct mlxsw_afk_key_info *key_info;
155         struct mlxsw_sp_acl_tcam *tcam;
156         struct mlxsw_sp_acl_tcam_vgroup *vgroup;
157         struct {
158                 struct delayed_work dw;
159                 struct mlxsw_sp_acl_tcam_rehash_ctx ctx;
160         } rehash;
161         struct mlxsw_sp *mlxsw_sp;
162         refcount_t ref_count;
163 };
164
165 struct mlxsw_sp_acl_tcam_vchunk;
166
167 struct mlxsw_sp_acl_tcam_chunk {
168         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
169         struct mlxsw_sp_acl_tcam_region *region;
170         unsigned long priv[];
171         /* priv has to be always the last item */
172 };
173
174 struct mlxsw_sp_acl_tcam_vchunk {
175         struct mlxsw_sp_acl_tcam_chunk *chunk;
176         struct mlxsw_sp_acl_tcam_chunk *chunk2; /* Used during migration */
177         struct list_head list; /* Member of a TCAM vregion */
178         struct rhash_head ht_node; /* Member of a chunk HT */
179         struct list_head ventry_list;
180         unsigned int priority; /* Priority within the vregion and group */
181         struct mlxsw_sp_acl_tcam_vgroup *vgroup;
182         struct mlxsw_sp_acl_tcam_vregion *vregion;
183         refcount_t ref_count;
184 };
185
186 struct mlxsw_sp_acl_tcam_entry {
187         struct mlxsw_sp_acl_tcam_ventry *ventry;
188         struct mlxsw_sp_acl_tcam_chunk *chunk;
189         unsigned long priv[];
190         /* priv has to be always the last item */
191 };
192
193 struct mlxsw_sp_acl_tcam_ventry {
194         struct mlxsw_sp_acl_tcam_entry *entry;
195         struct list_head list; /* Member of a TCAM vchunk */
196         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
197         struct mlxsw_sp_acl_rule_info *rulei;
198 };
199
200 static const struct rhashtable_params mlxsw_sp_acl_tcam_vchunk_ht_params = {
201         .key_len = sizeof(unsigned int),
202         .key_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, priority),
203         .head_offset = offsetof(struct mlxsw_sp_acl_tcam_vchunk, ht_node),
204         .automatic_shrinking = true,
205 };
206
207 static int mlxsw_sp_acl_tcam_group_update(struct mlxsw_sp *mlxsw_sp,
208                                           struct mlxsw_sp_acl_tcam_group *group)
209 {
210         struct mlxsw_sp_acl_tcam_region *region;
211         char pagt_pl[MLXSW_REG_PAGT_LEN];
212         int acl_index = 0;
213
214         mlxsw_reg_pagt_pack(pagt_pl, group->id);
215         list_for_each_entry(region, &group->region_list, list) {
216                 bool multi = false;
217
218                 /* Check if the next entry in the list has the same vregion. */
219                 if (region->list.next != &group->region_list &&
220                     list_next_entry(region, list)->vregion == region->vregion)
221                         multi = true;
222                 mlxsw_reg_pagt_acl_id_pack(pagt_pl, acl_index++,
223                                            region->id, multi);
224         }
225         mlxsw_reg_pagt_size_set(pagt_pl, acl_index);
226         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pagt), pagt_pl);
227 }
228
229 static int
230 mlxsw_sp_acl_tcam_group_add(struct mlxsw_sp_acl_tcam *tcam,
231                             struct mlxsw_sp_acl_tcam_group *group)
232 {
233         int err;
234
235         group->tcam = tcam;
236         INIT_LIST_HEAD(&group->region_list);
237
238         err = mlxsw_sp_acl_tcam_group_id_get(tcam, &group->id);
239         if (err)
240                 return err;
241
242         mutex_init(&group->lock);
243
244         return 0;
245 }
246
247 static void mlxsw_sp_acl_tcam_group_del(struct mlxsw_sp_acl_tcam_group *group)
248 {
249         struct mlxsw_sp_acl_tcam *tcam = group->tcam;
250
251         mutex_destroy(&group->lock);
252         mlxsw_sp_acl_tcam_group_id_put(tcam, group->id);
253         WARN_ON(!list_empty(&group->region_list));
254 }
255
256 static int
257 mlxsw_sp_acl_tcam_vgroup_add(struct mlxsw_sp *mlxsw_sp,
258                              struct mlxsw_sp_acl_tcam *tcam,
259                              struct mlxsw_sp_acl_tcam_vgroup *vgroup,
260                              const struct mlxsw_sp_acl_tcam_pattern *patterns,
261                              unsigned int patterns_count,
262                              struct mlxsw_afk_element_usage *tmplt_elusage,
263                              bool vregion_rehash_enabled,
264                              unsigned int *p_min_prio,
265                              unsigned int *p_max_prio)
266 {
267         int err;
268
269         vgroup->patterns = patterns;
270         vgroup->patterns_count = patterns_count;
271         vgroup->vregion_rehash_enabled = vregion_rehash_enabled;
272         vgroup->p_min_prio = p_min_prio;
273         vgroup->p_max_prio = p_max_prio;
274
275         if (tmplt_elusage) {
276                 vgroup->tmplt_elusage_set = true;
277                 memcpy(&vgroup->tmplt_elusage, tmplt_elusage,
278                        sizeof(vgroup->tmplt_elusage));
279         }
280         INIT_LIST_HEAD(&vgroup->vregion_list);
281
282         err = mlxsw_sp_acl_tcam_group_add(tcam, &vgroup->group);
283         if (err)
284                 return err;
285
286         err = rhashtable_init(&vgroup->vchunk_ht,
287                               &mlxsw_sp_acl_tcam_vchunk_ht_params);
288         if (err)
289                 goto err_rhashtable_init;
290
291         return 0;
292
293 err_rhashtable_init:
294         mlxsw_sp_acl_tcam_group_del(&vgroup->group);
295         return err;
296 }
297
298 static void
299 mlxsw_sp_acl_tcam_vgroup_del(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
300 {
301         rhashtable_destroy(&vgroup->vchunk_ht);
302         mlxsw_sp_acl_tcam_group_del(&vgroup->group);
303         WARN_ON(!list_empty(&vgroup->vregion_list));
304 }
305
306 static int
307 mlxsw_sp_acl_tcam_group_bind(struct mlxsw_sp *mlxsw_sp,
308                              struct mlxsw_sp_acl_tcam_group *group,
309                              struct mlxsw_sp_port *mlxsw_sp_port,
310                              bool ingress)
311 {
312         char ppbt_pl[MLXSW_REG_PPBT_LEN];
313
314         mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
315                                                MLXSW_REG_PXBT_E_EACL,
316                             MLXSW_REG_PXBT_OP_BIND, mlxsw_sp_port->local_port,
317                             group->id);
318         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
319 }
320
321 static void
322 mlxsw_sp_acl_tcam_group_unbind(struct mlxsw_sp *mlxsw_sp,
323                                struct mlxsw_sp_acl_tcam_group *group,
324                                struct mlxsw_sp_port *mlxsw_sp_port,
325                                bool ingress)
326 {
327         char ppbt_pl[MLXSW_REG_PPBT_LEN];
328
329         mlxsw_reg_ppbt_pack(ppbt_pl, ingress ? MLXSW_REG_PXBT_E_IACL :
330                                                MLXSW_REG_PXBT_E_EACL,
331                             MLXSW_REG_PXBT_OP_UNBIND, mlxsw_sp_port->local_port,
332                             group->id);
333         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbt), ppbt_pl);
334 }
335
336 static u16
337 mlxsw_sp_acl_tcam_group_id(struct mlxsw_sp_acl_tcam_group *group)
338 {
339         return group->id;
340 }
341
342 static unsigned int
343 mlxsw_sp_acl_tcam_vregion_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
344 {
345         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
346
347         if (list_empty(&vregion->vchunk_list))
348                 return 0;
349         /* As a priority of a vregion, return priority of the first vchunk */
350         vchunk = list_first_entry(&vregion->vchunk_list,
351                                   typeof(*vchunk), list);
352         return vchunk->priority;
353 }
354
355 static unsigned int
356 mlxsw_sp_acl_tcam_vregion_max_prio(struct mlxsw_sp_acl_tcam_vregion *vregion)
357 {
358         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
359
360         if (list_empty(&vregion->vchunk_list))
361                 return 0;
362         vchunk = list_last_entry(&vregion->vchunk_list,
363                                  typeof(*vchunk), list);
364         return vchunk->priority;
365 }
366
367 static void
368 mlxsw_sp_acl_tcam_vgroup_prio_update(struct mlxsw_sp_acl_tcam_vgroup *vgroup)
369 {
370         struct mlxsw_sp_acl_tcam_vregion *vregion;
371
372         if (list_empty(&vgroup->vregion_list))
373                 return;
374         vregion = list_first_entry(&vgroup->vregion_list,
375                                    typeof(*vregion), list);
376         *vgroup->p_min_prio = mlxsw_sp_acl_tcam_vregion_prio(vregion);
377         vregion = list_last_entry(&vgroup->vregion_list,
378                                   typeof(*vregion), list);
379         *vgroup->p_max_prio = mlxsw_sp_acl_tcam_vregion_max_prio(vregion);
380 }
381
382 static int
383 mlxsw_sp_acl_tcam_group_region_attach(struct mlxsw_sp *mlxsw_sp,
384                                       struct mlxsw_sp_acl_tcam_group *group,
385                                       struct mlxsw_sp_acl_tcam_region *region,
386                                       unsigned int priority,
387                                       struct mlxsw_sp_acl_tcam_region *next_region)
388 {
389         struct mlxsw_sp_acl_tcam_region *region2;
390         struct list_head *pos;
391         int err;
392
393         mutex_lock(&group->lock);
394         if (group->region_count == group->tcam->max_group_size) {
395                 err = -ENOBUFS;
396                 goto err_region_count_check;
397         }
398
399         if (next_region) {
400                 /* If the next region is defined, place the new one
401                  * before it. The next one is a sibling.
402                  */
403                 pos = &next_region->list;
404         } else {
405                 /* Position the region inside the list according to priority */
406                 list_for_each(pos, &group->region_list) {
407                         region2 = list_entry(pos, typeof(*region2), list);
408                         if (mlxsw_sp_acl_tcam_vregion_prio(region2->vregion) >
409                             priority)
410                                 break;
411                 }
412         }
413         list_add_tail(&region->list, pos);
414         region->group = group;
415
416         err = mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
417         if (err)
418                 goto err_group_update;
419
420         group->region_count++;
421         mutex_unlock(&group->lock);
422         return 0;
423
424 err_group_update:
425         list_del(&region->list);
426 err_region_count_check:
427         mutex_unlock(&group->lock);
428         return err;
429 }
430
431 static void
432 mlxsw_sp_acl_tcam_group_region_detach(struct mlxsw_sp *mlxsw_sp,
433                                       struct mlxsw_sp_acl_tcam_region *region)
434 {
435         struct mlxsw_sp_acl_tcam_group *group = region->group;
436
437         mutex_lock(&group->lock);
438         list_del(&region->list);
439         group->region_count--;
440         mlxsw_sp_acl_tcam_group_update(mlxsw_sp, group);
441         mutex_unlock(&group->lock);
442 }
443
444 static int
445 mlxsw_sp_acl_tcam_vgroup_vregion_attach(struct mlxsw_sp *mlxsw_sp,
446                                         struct mlxsw_sp_acl_tcam_vgroup *vgroup,
447                                         struct mlxsw_sp_acl_tcam_vregion *vregion,
448                                         unsigned int priority)
449 {
450         struct mlxsw_sp_acl_tcam_vregion *vregion2;
451         struct list_head *pos;
452         int err;
453
454         /* Position the vregion inside the list according to priority */
455         list_for_each(pos, &vgroup->vregion_list) {
456                 vregion2 = list_entry(pos, typeof(*vregion2), list);
457                 if (mlxsw_sp_acl_tcam_vregion_prio(vregion2) > priority)
458                         break;
459         }
460         list_add_tail(&vregion->list, pos);
461
462         err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp, &vgroup->group,
463                                                     vregion->region,
464                                                     priority, NULL);
465         if (err)
466                 goto err_region_attach;
467
468         return 0;
469
470 err_region_attach:
471         list_del(&vregion->list);
472         return err;
473 }
474
475 static void
476 mlxsw_sp_acl_tcam_vgroup_vregion_detach(struct mlxsw_sp *mlxsw_sp,
477                                         struct mlxsw_sp_acl_tcam_vregion *vregion)
478 {
479         list_del(&vregion->list);
480         if (vregion->region2)
481                 mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp,
482                                                       vregion->region2);
483         mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, vregion->region);
484 }
485
486 static struct mlxsw_sp_acl_tcam_vregion *
487 mlxsw_sp_acl_tcam_vgroup_vregion_find(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
488                                       unsigned int priority,
489                                       struct mlxsw_afk_element_usage *elusage,
490                                       bool *p_need_split)
491 {
492         struct mlxsw_sp_acl_tcam_vregion *vregion, *vregion2;
493         struct list_head *pos;
494         bool issubset;
495
496         list_for_each(pos, &vgroup->vregion_list) {
497                 vregion = list_entry(pos, typeof(*vregion), list);
498
499                 /* First, check if the requested priority does not rather belong
500                  * under some of the next vregions.
501                  */
502                 if (pos->next != &vgroup->vregion_list) { /* not last */
503                         vregion2 = list_entry(pos->next, typeof(*vregion2),
504                                               list);
505                         if (priority >=
506                             mlxsw_sp_acl_tcam_vregion_prio(vregion2))
507                                 continue;
508                 }
509
510                 issubset = mlxsw_afk_key_info_subset(vregion->key_info,
511                                                      elusage);
512
513                 /* If requested element usage would not fit and the priority
514                  * is lower than the currently inspected vregion we cannot
515                  * use this region, so return NULL to indicate new vregion has
516                  * to be created.
517                  */
518                 if (!issubset &&
519                     priority < mlxsw_sp_acl_tcam_vregion_prio(vregion))
520                         return NULL;
521
522                 /* If requested element usage would not fit and the priority
523                  * is higher than the currently inspected vregion we cannot
524                  * use this vregion. There is still some hope that the next
525                  * vregion would be the fit. So let it be processed and
526                  * eventually break at the check right above this.
527                  */
528                 if (!issubset &&
529                     priority > mlxsw_sp_acl_tcam_vregion_max_prio(vregion))
530                         continue;
531
532                 /* Indicate if the vregion needs to be split in order to add
533                  * the requested priority. Split is needed when requested
534                  * element usage won't fit into the found vregion.
535                  */
536                 *p_need_split = !issubset;
537                 return vregion;
538         }
539         return NULL; /* New vregion has to be created. */
540 }
541
542 static void
543 mlxsw_sp_acl_tcam_vgroup_use_patterns(struct mlxsw_sp_acl_tcam_vgroup *vgroup,
544                                       struct mlxsw_afk_element_usage *elusage,
545                                       struct mlxsw_afk_element_usage *out)
546 {
547         const struct mlxsw_sp_acl_tcam_pattern *pattern;
548         int i;
549
550         /* In case the template is set, we don't have to look up the pattern
551          * and just use the template.
552          */
553         if (vgroup->tmplt_elusage_set) {
554                 memcpy(out, &vgroup->tmplt_elusage, sizeof(*out));
555                 WARN_ON(!mlxsw_afk_element_usage_subset(elusage, out));
556                 return;
557         }
558
559         for (i = 0; i < vgroup->patterns_count; i++) {
560                 pattern = &vgroup->patterns[i];
561                 mlxsw_afk_element_usage_fill(out, pattern->elements,
562                                              pattern->elements_count);
563                 if (mlxsw_afk_element_usage_subset(elusage, out))
564                         return;
565         }
566         memcpy(out, elusage, sizeof(*out));
567 }
568
569 static int
570 mlxsw_sp_acl_tcam_region_alloc(struct mlxsw_sp *mlxsw_sp,
571                                struct mlxsw_sp_acl_tcam_region *region)
572 {
573         struct mlxsw_afk_key_info *key_info = region->key_info;
574         char ptar_pl[MLXSW_REG_PTAR_LEN];
575         unsigned int encodings_count;
576         int i;
577         int err;
578
579         mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_ALLOC,
580                             region->key_type,
581                             MLXSW_SP_ACL_TCAM_REGION_BASE_COUNT,
582                             region->id, region->tcam_region_info);
583         encodings_count = mlxsw_afk_key_info_blocks_count_get(key_info);
584         for (i = 0; i < encodings_count; i++) {
585                 u16 encoding;
586
587                 encoding = mlxsw_afk_key_info_block_encoding_get(key_info, i);
588                 mlxsw_reg_ptar_key_id_pack(ptar_pl, i, encoding);
589         }
590         err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
591         if (err)
592                 return err;
593         mlxsw_reg_ptar_unpack(ptar_pl, region->tcam_region_info);
594         return 0;
595 }
596
597 static void
598 mlxsw_sp_acl_tcam_region_free(struct mlxsw_sp *mlxsw_sp,
599                               struct mlxsw_sp_acl_tcam_region *region)
600 {
601         char ptar_pl[MLXSW_REG_PTAR_LEN];
602
603         mlxsw_reg_ptar_pack(ptar_pl, MLXSW_REG_PTAR_OP_FREE,
604                             region->key_type, 0, region->id,
605                             region->tcam_region_info);
606         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ptar), ptar_pl);
607 }
608
609 static int
610 mlxsw_sp_acl_tcam_region_enable(struct mlxsw_sp *mlxsw_sp,
611                                 struct mlxsw_sp_acl_tcam_region *region)
612 {
613         char pacl_pl[MLXSW_REG_PACL_LEN];
614
615         mlxsw_reg_pacl_pack(pacl_pl, region->id, true,
616                             region->tcam_region_info);
617         return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
618 }
619
620 static void
621 mlxsw_sp_acl_tcam_region_disable(struct mlxsw_sp *mlxsw_sp,
622                                  struct mlxsw_sp_acl_tcam_region *region)
623 {
624         char pacl_pl[MLXSW_REG_PACL_LEN];
625
626         mlxsw_reg_pacl_pack(pacl_pl, region->id, false,
627                             region->tcam_region_info);
628         mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pacl), pacl_pl);
629 }
630
631 static struct mlxsw_sp_acl_tcam_region *
632 mlxsw_sp_acl_tcam_region_create(struct mlxsw_sp *mlxsw_sp,
633                                 struct mlxsw_sp_acl_tcam *tcam,
634                                 struct mlxsw_sp_acl_tcam_vregion *vregion,
635                                 void *hints_priv)
636 {
637         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
638         struct mlxsw_sp_acl_tcam_region *region;
639         int err;
640
641         region = kzalloc(sizeof(*region) + ops->region_priv_size, GFP_KERNEL);
642         if (!region)
643                 return ERR_PTR(-ENOMEM);
644         region->mlxsw_sp = mlxsw_sp;
645         region->vregion = vregion;
646         region->key_info = vregion->key_info;
647
648         err = mlxsw_sp_acl_tcam_region_id_get(tcam, &region->id);
649         if (err)
650                 goto err_region_id_get;
651
652         err = ops->region_associate(mlxsw_sp, region);
653         if (err)
654                 goto err_tcam_region_associate;
655
656         region->key_type = ops->key_type;
657         err = mlxsw_sp_acl_tcam_region_alloc(mlxsw_sp, region);
658         if (err)
659                 goto err_tcam_region_alloc;
660
661         err = mlxsw_sp_acl_tcam_region_enable(mlxsw_sp, region);
662         if (err)
663                 goto err_tcam_region_enable;
664
665         err = ops->region_init(mlxsw_sp, region->priv, tcam->priv,
666                                region, hints_priv);
667         if (err)
668                 goto err_tcam_region_init;
669
670         return region;
671
672 err_tcam_region_init:
673         mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
674 err_tcam_region_enable:
675         mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
676 err_tcam_region_alloc:
677 err_tcam_region_associate:
678         mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
679 err_region_id_get:
680         kfree(region);
681         return ERR_PTR(err);
682 }
683
684 static void
685 mlxsw_sp_acl_tcam_region_destroy(struct mlxsw_sp *mlxsw_sp,
686                                  struct mlxsw_sp_acl_tcam_region *region)
687 {
688         struct mlxsw_sp_acl_tcam *tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
689         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
690
691         ops->region_fini(mlxsw_sp, region->priv);
692         mlxsw_sp_acl_tcam_region_disable(mlxsw_sp, region);
693         mlxsw_sp_acl_tcam_region_free(mlxsw_sp, region);
694         mlxsw_sp_acl_tcam_region_id_put(tcam, region->id);
695         kfree(region);
696 }
697
698 static void
699 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(struct mlxsw_sp_acl_tcam_vregion *vregion)
700 {
701         unsigned long interval = vregion->tcam->vregion_rehash_intrvl;
702
703         if (!interval)
704                 return;
705         mlxsw_core_schedule_dw(&vregion->rehash.dw,
706                                msecs_to_jiffies(interval));
707 }
708
709 static void
710 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
711                                  struct mlxsw_sp_acl_tcam_vregion *vregion,
712                                  int *credits);
713
714 static void mlxsw_sp_acl_tcam_vregion_rehash_work(struct work_struct *work)
715 {
716         struct mlxsw_sp_acl_tcam_vregion *vregion =
717                 container_of(work, struct mlxsw_sp_acl_tcam_vregion,
718                              rehash.dw.work);
719         int credits = MLXSW_SP_ACL_TCAM_VREGION_REHASH_CREDITS;
720
721         mutex_lock(&vregion->lock);
722         mlxsw_sp_acl_tcam_vregion_rehash(vregion->mlxsw_sp, vregion, &credits);
723         mutex_unlock(&vregion->lock);
724         if (credits < 0)
725                 /* Rehash gone out of credits so it was interrupted.
726                  * Schedule the work as soon as possible to continue.
727                  */
728                 mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
729         else
730                 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
731 }
732
733 static void
734 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
735 {
736         /* The entry markers are relative to the current chunk and therefore
737          * needs to be reset together with the chunk marker.
738          */
739         ctx->current_vchunk = NULL;
740         ctx->start_ventry = NULL;
741         ctx->stop_ventry = NULL;
742 }
743
744 static void
745 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(struct mlxsw_sp_acl_tcam_vchunk *vchunk)
746 {
747         struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
748
749         /* If a rule was added or deleted from vchunk which is currently
750          * under rehash migration, we have to reset the ventry pointers
751          * to make sure all rules are properly migrated.
752          */
753         if (vregion->rehash.ctx.current_vchunk == vchunk) {
754                 vregion->rehash.ctx.start_ventry = NULL;
755                 vregion->rehash.ctx.stop_ventry = NULL;
756         }
757 }
758
759 static void
760 mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(struct mlxsw_sp_acl_tcam_vregion *vregion)
761 {
762         /* If a chunk was added or deleted from vregion we have to reset
763          * the current chunk pointer to make sure all chunks
764          * are properly migrated.
765          */
766         mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(&vregion->rehash.ctx);
767 }
768
769 static struct mlxsw_sp_acl_tcam_vregion *
770 mlxsw_sp_acl_tcam_vregion_create(struct mlxsw_sp *mlxsw_sp,
771                                  struct mlxsw_sp_acl_tcam_vgroup *vgroup,
772                                  unsigned int priority,
773                                  struct mlxsw_afk_element_usage *elusage)
774 {
775         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
776         struct mlxsw_afk *afk = mlxsw_sp_acl_afk(mlxsw_sp->acl);
777         struct mlxsw_sp_acl_tcam *tcam = vgroup->group.tcam;
778         struct mlxsw_sp_acl_tcam_vregion *vregion;
779         int err;
780
781         vregion = kzalloc(sizeof(*vregion), GFP_KERNEL);
782         if (!vregion)
783                 return ERR_PTR(-ENOMEM);
784         INIT_LIST_HEAD(&vregion->vchunk_list);
785         mutex_init(&vregion->lock);
786         vregion->tcam = tcam;
787         vregion->mlxsw_sp = mlxsw_sp;
788         vregion->vgroup = vgroup;
789         refcount_set(&vregion->ref_count, 1);
790
791         vregion->key_info = mlxsw_afk_key_info_get(afk, elusage);
792         if (IS_ERR(vregion->key_info)) {
793                 err = PTR_ERR(vregion->key_info);
794                 goto err_key_info_get;
795         }
796
797         vregion->region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, tcam,
798                                                           vregion, NULL);
799         if (IS_ERR(vregion->region)) {
800                 err = PTR_ERR(vregion->region);
801                 goto err_region_create;
802         }
803
804         err = mlxsw_sp_acl_tcam_vgroup_vregion_attach(mlxsw_sp, vgroup, vregion,
805                                                       priority);
806         if (err)
807                 goto err_vgroup_vregion_attach;
808
809         if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
810                 /* Create the delayed work for vregion periodic rehash */
811                 INIT_DELAYED_WORK(&vregion->rehash.dw,
812                                   mlxsw_sp_acl_tcam_vregion_rehash_work);
813                 mlxsw_sp_acl_tcam_vregion_rehash_work_schedule(vregion);
814                 mutex_lock(&tcam->lock);
815                 list_add_tail(&vregion->tlist, &tcam->vregion_list);
816                 mutex_unlock(&tcam->lock);
817         }
818
819         return vregion;
820
821 err_vgroup_vregion_attach:
822         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
823 err_region_create:
824         mlxsw_afk_key_info_put(vregion->key_info);
825 err_key_info_get:
826         kfree(vregion);
827         return ERR_PTR(err);
828 }
829
830 static void
831 mlxsw_sp_acl_tcam_vregion_destroy(struct mlxsw_sp *mlxsw_sp,
832                                   struct mlxsw_sp_acl_tcam_vregion *vregion)
833 {
834         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
835         struct mlxsw_sp_acl_tcam_vgroup *vgroup = vregion->vgroup;
836         struct mlxsw_sp_acl_tcam *tcam = vregion->tcam;
837
838         if (vgroup->vregion_rehash_enabled && ops->region_rehash_hints_get) {
839                 struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
840
841                 mutex_lock(&tcam->lock);
842                 list_del(&vregion->tlist);
843                 mutex_unlock(&tcam->lock);
844                 if (cancel_delayed_work_sync(&vregion->rehash.dw) &&
845                     ctx->hints_priv)
846                         ops->region_rehash_hints_put(ctx->hints_priv);
847         }
848         mlxsw_sp_acl_tcam_vgroup_vregion_detach(mlxsw_sp, vregion);
849         if (vregion->region2)
850                 mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region2);
851         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, vregion->region);
852         mlxsw_afk_key_info_put(vregion->key_info);
853         mutex_destroy(&vregion->lock);
854         kfree(vregion);
855 }
856
857 static struct mlxsw_sp_acl_tcam_vregion *
858 mlxsw_sp_acl_tcam_vregion_get(struct mlxsw_sp *mlxsw_sp,
859                               struct mlxsw_sp_acl_tcam_vgroup *vgroup,
860                               unsigned int priority,
861                               struct mlxsw_afk_element_usage *elusage)
862 {
863         struct mlxsw_afk_element_usage vregion_elusage;
864         struct mlxsw_sp_acl_tcam_vregion *vregion;
865         bool need_split;
866
867         vregion = mlxsw_sp_acl_tcam_vgroup_vregion_find(vgroup, priority,
868                                                         elusage, &need_split);
869         if (vregion) {
870                 if (need_split) {
871                         /* According to priority, new vchunk should belong to
872                          * an existing vregion. However, this vchunk needs
873                          * elements that vregion does not contain. We need
874                          * to split the existing vregion into two and create
875                          * a new vregion for the new vchunk in between.
876                          * This is not supported now.
877                          */
878                         return ERR_PTR(-EOPNOTSUPP);
879                 }
880                 refcount_inc(&vregion->ref_count);
881                 return vregion;
882         }
883
884         mlxsw_sp_acl_tcam_vgroup_use_patterns(vgroup, elusage,
885                                               &vregion_elusage);
886
887         return mlxsw_sp_acl_tcam_vregion_create(mlxsw_sp, vgroup, priority,
888                                                 &vregion_elusage);
889 }
890
891 static void
892 mlxsw_sp_acl_tcam_vregion_put(struct mlxsw_sp *mlxsw_sp,
893                               struct mlxsw_sp_acl_tcam_vregion *vregion)
894 {
895         if (!refcount_dec_and_test(&vregion->ref_count))
896                 return;
897         mlxsw_sp_acl_tcam_vregion_destroy(mlxsw_sp, vregion);
898 }
899
900 static struct mlxsw_sp_acl_tcam_chunk *
901 mlxsw_sp_acl_tcam_chunk_create(struct mlxsw_sp *mlxsw_sp,
902                                struct mlxsw_sp_acl_tcam_vchunk *vchunk,
903                                struct mlxsw_sp_acl_tcam_region *region)
904 {
905         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
906         struct mlxsw_sp_acl_tcam_chunk *chunk;
907
908         chunk = kzalloc(sizeof(*chunk) + ops->chunk_priv_size, GFP_KERNEL);
909         if (!chunk)
910                 return ERR_PTR(-ENOMEM);
911         chunk->vchunk = vchunk;
912         chunk->region = region;
913
914         ops->chunk_init(region->priv, chunk->priv, vchunk->priority);
915         return chunk;
916 }
917
918 static void
919 mlxsw_sp_acl_tcam_chunk_destroy(struct mlxsw_sp *mlxsw_sp,
920                                 struct mlxsw_sp_acl_tcam_chunk *chunk)
921 {
922         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
923
924         ops->chunk_fini(chunk->priv);
925         kfree(chunk);
926 }
927
928 static struct mlxsw_sp_acl_tcam_vchunk *
929 mlxsw_sp_acl_tcam_vchunk_create(struct mlxsw_sp *mlxsw_sp,
930                                 struct mlxsw_sp_acl_tcam_vgroup *vgroup,
931                                 unsigned int priority,
932                                 struct mlxsw_afk_element_usage *elusage)
933 {
934         struct mlxsw_sp_acl_tcam_vchunk *vchunk, *vchunk2;
935         struct mlxsw_sp_acl_tcam_vregion *vregion;
936         struct list_head *pos;
937         int err;
938
939         if (priority == MLXSW_SP_ACL_TCAM_CATCHALL_PRIO)
940                 return ERR_PTR(-EINVAL);
941
942         vchunk = kzalloc(sizeof(*vchunk), GFP_KERNEL);
943         if (!vchunk)
944                 return ERR_PTR(-ENOMEM);
945         INIT_LIST_HEAD(&vchunk->ventry_list);
946         vchunk->priority = priority;
947         vchunk->vgroup = vgroup;
948         refcount_set(&vchunk->ref_count, 1);
949
950         vregion = mlxsw_sp_acl_tcam_vregion_get(mlxsw_sp, vgroup,
951                                                 priority, elusage);
952         if (IS_ERR(vregion)) {
953                 err = PTR_ERR(vregion);
954                 goto err_vregion_get;
955         }
956
957         vchunk->vregion = vregion;
958
959         err = rhashtable_insert_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
960                                      mlxsw_sp_acl_tcam_vchunk_ht_params);
961         if (err)
962                 goto err_rhashtable_insert;
963
964         mutex_lock(&vregion->lock);
965         vchunk->chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk,
966                                                        vchunk->vregion->region);
967         if (IS_ERR(vchunk->chunk)) {
968                 mutex_unlock(&vregion->lock);
969                 err = PTR_ERR(vchunk->chunk);
970                 goto err_chunk_create;
971         }
972
973         mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
974
975         /* Position the vchunk inside the list according to priority */
976         list_for_each(pos, &vregion->vchunk_list) {
977                 vchunk2 = list_entry(pos, typeof(*vchunk2), list);
978                 if (vchunk2->priority > priority)
979                         break;
980         }
981         list_add_tail(&vchunk->list, pos);
982         mutex_unlock(&vregion->lock);
983         mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
984
985         return vchunk;
986
987 err_chunk_create:
988         rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
989                                mlxsw_sp_acl_tcam_vchunk_ht_params);
990 err_rhashtable_insert:
991         mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vregion);
992 err_vregion_get:
993         kfree(vchunk);
994         return ERR_PTR(err);
995 }
996
997 static void
998 mlxsw_sp_acl_tcam_vchunk_destroy(struct mlxsw_sp *mlxsw_sp,
999                                  struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1000 {
1001         struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1002         struct mlxsw_sp_acl_tcam_vgroup *vgroup = vchunk->vgroup;
1003
1004         mutex_lock(&vregion->lock);
1005         mlxsw_sp_acl_tcam_rehash_ctx_vregion_changed(vregion);
1006         list_del(&vchunk->list);
1007         if (vchunk->chunk2)
1008                 mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1009         mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk);
1010         mutex_unlock(&vregion->lock);
1011         rhashtable_remove_fast(&vgroup->vchunk_ht, &vchunk->ht_node,
1012                                mlxsw_sp_acl_tcam_vchunk_ht_params);
1013         mlxsw_sp_acl_tcam_vregion_put(mlxsw_sp, vchunk->vregion);
1014         kfree(vchunk);
1015         mlxsw_sp_acl_tcam_vgroup_prio_update(vgroup);
1016 }
1017
1018 static struct mlxsw_sp_acl_tcam_vchunk *
1019 mlxsw_sp_acl_tcam_vchunk_get(struct mlxsw_sp *mlxsw_sp,
1020                              struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1021                              unsigned int priority,
1022                              struct mlxsw_afk_element_usage *elusage)
1023 {
1024         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1025
1026         vchunk = rhashtable_lookup_fast(&vgroup->vchunk_ht, &priority,
1027                                         mlxsw_sp_acl_tcam_vchunk_ht_params);
1028         if (vchunk) {
1029                 if (WARN_ON(!mlxsw_afk_key_info_subset(vchunk->vregion->key_info,
1030                                                        elusage)))
1031                         return ERR_PTR(-EINVAL);
1032                 refcount_inc(&vchunk->ref_count);
1033                 return vchunk;
1034         }
1035         return mlxsw_sp_acl_tcam_vchunk_create(mlxsw_sp, vgroup,
1036                                                priority, elusage);
1037 }
1038
1039 static void
1040 mlxsw_sp_acl_tcam_vchunk_put(struct mlxsw_sp *mlxsw_sp,
1041                              struct mlxsw_sp_acl_tcam_vchunk *vchunk)
1042 {
1043         if (!refcount_dec_and_test(&vchunk->ref_count))
1044                 return;
1045         mlxsw_sp_acl_tcam_vchunk_destroy(mlxsw_sp, vchunk);
1046 }
1047
1048 static struct mlxsw_sp_acl_tcam_entry *
1049 mlxsw_sp_acl_tcam_entry_create(struct mlxsw_sp *mlxsw_sp,
1050                                struct mlxsw_sp_acl_tcam_ventry *ventry,
1051                                struct mlxsw_sp_acl_tcam_chunk *chunk)
1052 {
1053         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1054         struct mlxsw_sp_acl_tcam_entry *entry;
1055         int err;
1056
1057         entry = kzalloc(sizeof(*entry) + ops->entry_priv_size, GFP_KERNEL);
1058         if (!entry)
1059                 return ERR_PTR(-ENOMEM);
1060         entry->ventry = ventry;
1061         entry->chunk = chunk;
1062
1063         err = ops->entry_add(mlxsw_sp, chunk->region->priv, chunk->priv,
1064                              entry->priv, ventry->rulei);
1065         if (err)
1066                 goto err_entry_add;
1067
1068         return entry;
1069
1070 err_entry_add:
1071         kfree(entry);
1072         return ERR_PTR(err);
1073 }
1074
1075 static void mlxsw_sp_acl_tcam_entry_destroy(struct mlxsw_sp *mlxsw_sp,
1076                                             struct mlxsw_sp_acl_tcam_entry *entry)
1077 {
1078         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1079
1080         ops->entry_del(mlxsw_sp, entry->chunk->region->priv,
1081                        entry->chunk->priv, entry->priv);
1082         kfree(entry);
1083 }
1084
1085 static int
1086 mlxsw_sp_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
1087                                        struct mlxsw_sp_acl_tcam_region *region,
1088                                        struct mlxsw_sp_acl_tcam_entry *entry,
1089                                        struct mlxsw_sp_acl_rule_info *rulei)
1090 {
1091         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1092
1093         return ops->entry_action_replace(mlxsw_sp, region->priv,
1094                                          entry->priv, rulei);
1095 }
1096
1097 static int
1098 mlxsw_sp_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
1099                                      struct mlxsw_sp_acl_tcam_entry *entry,
1100                                      bool *activity)
1101 {
1102         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1103
1104         return ops->entry_activity_get(mlxsw_sp, entry->chunk->region->priv,
1105                                        entry->priv, activity);
1106 }
1107
1108 static int mlxsw_sp_acl_tcam_ventry_add(struct mlxsw_sp *mlxsw_sp,
1109                                         struct mlxsw_sp_acl_tcam_vgroup *vgroup,
1110                                         struct mlxsw_sp_acl_tcam_ventry *ventry,
1111                                         struct mlxsw_sp_acl_rule_info *rulei)
1112 {
1113         struct mlxsw_sp_acl_tcam_vregion *vregion;
1114         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1115         int err;
1116
1117         vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp, vgroup, rulei->priority,
1118                                               &rulei->values.elusage);
1119         if (IS_ERR(vchunk))
1120                 return PTR_ERR(vchunk);
1121
1122         ventry->vchunk = vchunk;
1123         ventry->rulei = rulei;
1124         vregion = vchunk->vregion;
1125
1126         mutex_lock(&vregion->lock);
1127         ventry->entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry,
1128                                                        vchunk->chunk);
1129         if (IS_ERR(ventry->entry)) {
1130                 mutex_unlock(&vregion->lock);
1131                 err = PTR_ERR(ventry->entry);
1132                 goto err_entry_create;
1133         }
1134
1135         list_add_tail(&ventry->list, &vchunk->ventry_list);
1136         mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1137         mutex_unlock(&vregion->lock);
1138
1139         return 0;
1140
1141 err_entry_create:
1142         mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1143         return err;
1144 }
1145
1146 static void mlxsw_sp_acl_tcam_ventry_del(struct mlxsw_sp *mlxsw_sp,
1147                                          struct mlxsw_sp_acl_tcam_ventry *ventry)
1148 {
1149         struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1150         struct mlxsw_sp_acl_tcam_vregion *vregion = vchunk->vregion;
1151
1152         mutex_lock(&vregion->lock);
1153         mlxsw_sp_acl_tcam_rehash_ctx_vchunk_changed(vchunk);
1154         list_del(&ventry->list);
1155         mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1156         mutex_unlock(&vregion->lock);
1157         mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, vchunk);
1158 }
1159
1160 static int
1161 mlxsw_sp_acl_tcam_ventry_action_replace(struct mlxsw_sp *mlxsw_sp,
1162                                         struct mlxsw_sp_acl_tcam_ventry *ventry,
1163                                         struct mlxsw_sp_acl_rule_info *rulei)
1164 {
1165         struct mlxsw_sp_acl_tcam_vchunk *vchunk = ventry->vchunk;
1166
1167         return mlxsw_sp_acl_tcam_entry_action_replace(mlxsw_sp,
1168                                                       vchunk->vregion->region,
1169                                                       ventry->entry, rulei);
1170 }
1171
1172 static int
1173 mlxsw_sp_acl_tcam_ventry_activity_get(struct mlxsw_sp *mlxsw_sp,
1174                                       struct mlxsw_sp_acl_tcam_ventry *ventry,
1175                                       bool *activity)
1176 {
1177         struct mlxsw_sp_acl_tcam_vregion *vregion = ventry->vchunk->vregion;
1178         int err;
1179
1180         mutex_lock(&vregion->lock);
1181         err = mlxsw_sp_acl_tcam_entry_activity_get(mlxsw_sp, ventry->entry,
1182                                                    activity);
1183         mutex_unlock(&vregion->lock);
1184         return err;
1185 }
1186
1187 static int
1188 mlxsw_sp_acl_tcam_ventry_migrate(struct mlxsw_sp *mlxsw_sp,
1189                                  struct mlxsw_sp_acl_tcam_ventry *ventry,
1190                                  struct mlxsw_sp_acl_tcam_chunk *chunk,
1191                                  int *credits)
1192 {
1193         struct mlxsw_sp_acl_tcam_entry *new_entry;
1194
1195         /* First check if the entry is not already where we want it to be. */
1196         if (ventry->entry->chunk == chunk)
1197                 return 0;
1198
1199         if (--(*credits) < 0)
1200                 return 0;
1201
1202         new_entry = mlxsw_sp_acl_tcam_entry_create(mlxsw_sp, ventry, chunk);
1203         if (IS_ERR(new_entry))
1204                 return PTR_ERR(new_entry);
1205         mlxsw_sp_acl_tcam_entry_destroy(mlxsw_sp, ventry->entry);
1206         ventry->entry = new_entry;
1207         return 0;
1208 }
1209
1210 static int
1211 mlxsw_sp_acl_tcam_vchunk_migrate_start(struct mlxsw_sp *mlxsw_sp,
1212                                        struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1213                                        struct mlxsw_sp_acl_tcam_region *region,
1214                                        struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1215 {
1216         struct mlxsw_sp_acl_tcam_chunk *new_chunk;
1217
1218         WARN_ON(vchunk->chunk2);
1219
1220         new_chunk = mlxsw_sp_acl_tcam_chunk_create(mlxsw_sp, vchunk, region);
1221         if (IS_ERR(new_chunk))
1222                 return PTR_ERR(new_chunk);
1223         vchunk->chunk2 = vchunk->chunk;
1224         vchunk->chunk = new_chunk;
1225         ctx->current_vchunk = vchunk;
1226         ctx->start_ventry = NULL;
1227         ctx->stop_ventry = NULL;
1228         return 0;
1229 }
1230
1231 static void
1232 mlxsw_sp_acl_tcam_vchunk_migrate_end(struct mlxsw_sp *mlxsw_sp,
1233                                      struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1234                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1235 {
1236         mlxsw_sp_acl_tcam_chunk_destroy(mlxsw_sp, vchunk->chunk2);
1237         vchunk->chunk2 = NULL;
1238         mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
1239 }
1240
1241 static int
1242 mlxsw_sp_acl_tcam_vchunk_migrate_one(struct mlxsw_sp *mlxsw_sp,
1243                                      struct mlxsw_sp_acl_tcam_vchunk *vchunk,
1244                                      struct mlxsw_sp_acl_tcam_region *region,
1245                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1246                                      int *credits)
1247 {
1248         struct mlxsw_sp_acl_tcam_ventry *ventry;
1249         int err;
1250
1251         if (vchunk->chunk->region != region) {
1252                 err = mlxsw_sp_acl_tcam_vchunk_migrate_start(mlxsw_sp, vchunk,
1253                                                              region, ctx);
1254                 if (err)
1255                         return err;
1256         } else if (!vchunk->chunk2) {
1257                 /* The chunk is already as it should be, nothing to do. */
1258                 return 0;
1259         }
1260
1261         if (list_empty(&vchunk->ventry_list))
1262                 goto out;
1263
1264         /* If the migration got interrupted, we have the ventry to start from
1265          * stored in context.
1266          */
1267         if (ctx->start_ventry)
1268                 ventry = ctx->start_ventry;
1269         else
1270                 ventry = list_first_entry(&vchunk->ventry_list,
1271                                           typeof(*ventry), list);
1272
1273         WARN_ON(ventry->vchunk != vchunk);
1274
1275         list_for_each_entry_from(ventry, &vchunk->ventry_list, list) {
1276                 /* During rollback, once we reach the ventry that failed
1277                  * to migrate, we are done.
1278                  */
1279                 if (ventry == ctx->stop_ventry)
1280                         break;
1281
1282                 err = mlxsw_sp_acl_tcam_ventry_migrate(mlxsw_sp, ventry,
1283                                                        vchunk->chunk, credits);
1284                 if (err) {
1285                         if (ctx->this_is_rollback) {
1286                                 /* Save the ventry which we ended with and try
1287                                  * to continue later on.
1288                                  */
1289                                 ctx->start_ventry = ventry;
1290                                 return err;
1291                         }
1292                         /* Swap the chunk and chunk2 pointers so the follow-up
1293                          * rollback call will see the original chunk pointer
1294                          * in vchunk->chunk.
1295                          */
1296                         swap(vchunk->chunk, vchunk->chunk2);
1297                         /* The rollback has to be done from beginning of the
1298                          * chunk, that is why we have to null the start_ventry.
1299                          * However, we know where to stop the rollback,
1300                          * at the current ventry.
1301                          */
1302                         ctx->start_ventry = NULL;
1303                         ctx->stop_ventry = ventry;
1304                         return err;
1305                 } else if (*credits < 0) {
1306                         /* We are out of credits, the rest of the ventries
1307                          * will be migrated later. Save the ventry
1308                          * which we ended with.
1309                          */
1310                         ctx->start_ventry = ventry;
1311                         return 0;
1312                 }
1313         }
1314
1315 out:
1316         mlxsw_sp_acl_tcam_vchunk_migrate_end(mlxsw_sp, vchunk, ctx);
1317         return 0;
1318 }
1319
1320 static int
1321 mlxsw_sp_acl_tcam_vchunk_migrate_all(struct mlxsw_sp *mlxsw_sp,
1322                                      struct mlxsw_sp_acl_tcam_vregion *vregion,
1323                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1324                                      int *credits)
1325 {
1326         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1327         int err;
1328
1329         if (list_empty(&vregion->vchunk_list))
1330                 return 0;
1331
1332         /* If the migration got interrupted, we have the vchunk
1333          * we are working on stored in context.
1334          */
1335         if (ctx->current_vchunk)
1336                 vchunk = ctx->current_vchunk;
1337         else
1338                 vchunk = list_first_entry(&vregion->vchunk_list,
1339                                           typeof(*vchunk), list);
1340
1341         list_for_each_entry_from(vchunk, &vregion->vchunk_list, list) {
1342                 err = mlxsw_sp_acl_tcam_vchunk_migrate_one(mlxsw_sp, vchunk,
1343                                                            vregion->region,
1344                                                            ctx, credits);
1345                 if (err || *credits < 0)
1346                         return err;
1347         }
1348         return 0;
1349 }
1350
1351 static int
1352 mlxsw_sp_acl_tcam_vregion_migrate(struct mlxsw_sp *mlxsw_sp,
1353                                   struct mlxsw_sp_acl_tcam_vregion *vregion,
1354                                   struct mlxsw_sp_acl_tcam_rehash_ctx *ctx,
1355                                   int *credits)
1356 {
1357         int err, err2;
1358
1359         trace_mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion);
1360         err = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1361                                                    ctx, credits);
1362         if (err) {
1363                 if (ctx->this_is_rollback)
1364                         return err;
1365                 /* In case migration was not successful, we need to swap
1366                  * so the original region pointer is assigned again
1367                  * to vregion->region.
1368                  */
1369                 swap(vregion->region, vregion->region2);
1370                 mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
1371                 ctx->this_is_rollback = true;
1372                 err2 = mlxsw_sp_acl_tcam_vchunk_migrate_all(mlxsw_sp, vregion,
1373                                                             ctx, credits);
1374                 if (err2) {
1375                         trace_mlxsw_sp_acl_tcam_vregion_rehash_rollback_failed(mlxsw_sp,
1376                                                                                vregion);
1377                         dev_err(mlxsw_sp->bus_info->dev, "Failed to rollback during vregion migration fail\n");
1378                         /* Let the rollback to be continued later on. */
1379                 }
1380         }
1381         trace_mlxsw_sp_acl_tcam_vregion_migrate_end(mlxsw_sp, vregion);
1382         return err;
1383 }
1384
1385 static bool
1386 mlxsw_sp_acl_tcam_vregion_rehash_in_progress(const struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1387 {
1388         return ctx->hints_priv;
1389 }
1390
1391 static int
1392 mlxsw_sp_acl_tcam_vregion_rehash_start(struct mlxsw_sp *mlxsw_sp,
1393                                        struct mlxsw_sp_acl_tcam_vregion *vregion,
1394                                        struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1395 {
1396         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1397         unsigned int priority = mlxsw_sp_acl_tcam_vregion_prio(vregion);
1398         struct mlxsw_sp_acl_tcam_region *new_region;
1399         void *hints_priv;
1400         int err;
1401
1402         trace_mlxsw_sp_acl_tcam_vregion_rehash(mlxsw_sp, vregion);
1403
1404         hints_priv = ops->region_rehash_hints_get(vregion->region->priv);
1405         if (IS_ERR(hints_priv))
1406                 return PTR_ERR(hints_priv);
1407
1408         new_region = mlxsw_sp_acl_tcam_region_create(mlxsw_sp, vregion->tcam,
1409                                                      vregion, hints_priv);
1410         if (IS_ERR(new_region)) {
1411                 err = PTR_ERR(new_region);
1412                 goto err_region_create;
1413         }
1414
1415         /* vregion->region contains the pointer to the new region
1416          * we are going to migrate to.
1417          */
1418         vregion->region2 = vregion->region;
1419         vregion->region = new_region;
1420         err = mlxsw_sp_acl_tcam_group_region_attach(mlxsw_sp,
1421                                                     vregion->region2->group,
1422                                                     new_region, priority,
1423                                                     vregion->region2);
1424         if (err)
1425                 goto err_group_region_attach;
1426
1427         ctx->hints_priv = hints_priv;
1428         ctx->this_is_rollback = false;
1429         mlxsw_sp_acl_tcam_rehash_ctx_vchunk_reset(ctx);
1430
1431         return 0;
1432
1433 err_group_region_attach:
1434         vregion->region = vregion->region2;
1435         vregion->region2 = NULL;
1436         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, new_region);
1437 err_region_create:
1438         ops->region_rehash_hints_put(hints_priv);
1439         return err;
1440 }
1441
1442 static void
1443 mlxsw_sp_acl_tcam_vregion_rehash_end(struct mlxsw_sp *mlxsw_sp,
1444                                      struct mlxsw_sp_acl_tcam_vregion *vregion,
1445                                      struct mlxsw_sp_acl_tcam_rehash_ctx *ctx)
1446 {
1447         struct mlxsw_sp_acl_tcam_region *unused_region = vregion->region2;
1448         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1449
1450         vregion->region2 = NULL;
1451         mlxsw_sp_acl_tcam_group_region_detach(mlxsw_sp, unused_region);
1452         mlxsw_sp_acl_tcam_region_destroy(mlxsw_sp, unused_region);
1453         ops->region_rehash_hints_put(ctx->hints_priv);
1454         ctx->hints_priv = NULL;
1455 }
1456
1457 static void
1458 mlxsw_sp_acl_tcam_vregion_rehash(struct mlxsw_sp *mlxsw_sp,
1459                                  struct mlxsw_sp_acl_tcam_vregion *vregion,
1460                                  int *credits)
1461 {
1462         struct mlxsw_sp_acl_tcam_rehash_ctx *ctx = &vregion->rehash.ctx;
1463         int err;
1464
1465         /* Check if the previous rehash work was interrupted
1466          * which means we have to continue it now.
1467          * If not, start a new rehash.
1468          */
1469         if (!mlxsw_sp_acl_tcam_vregion_rehash_in_progress(ctx)) {
1470                 err = mlxsw_sp_acl_tcam_vregion_rehash_start(mlxsw_sp,
1471                                                              vregion, ctx);
1472                 if (err) {
1473                         if (err != -EAGAIN)
1474                                 dev_err(mlxsw_sp->bus_info->dev, "Failed get rehash hints\n");
1475                         return;
1476                 }
1477         }
1478
1479         err = mlxsw_sp_acl_tcam_vregion_migrate(mlxsw_sp, vregion,
1480                                                 ctx, credits);
1481         if (err) {
1482                 dev_err_ratelimited(mlxsw_sp->bus_info->dev, "Failed to migrate vregion\n");
1483                 return;
1484         }
1485
1486         if (*credits >= 0)
1487                 mlxsw_sp_acl_tcam_vregion_rehash_end(mlxsw_sp, vregion, ctx);
1488 }
1489
1490 static int
1491 mlxsw_sp_acl_tcam_region_rehash_intrvl_get(struct devlink *devlink, u32 id,
1492                                            struct devlink_param_gset_ctx *ctx)
1493 {
1494         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1495         struct mlxsw_sp_acl_tcam *tcam;
1496         struct mlxsw_sp *mlxsw_sp;
1497
1498         mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1499         tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
1500         ctx->val.vu32 = tcam->vregion_rehash_intrvl;
1501
1502         return 0;
1503 }
1504
1505 static int
1506 mlxsw_sp_acl_tcam_region_rehash_intrvl_set(struct devlink *devlink, u32 id,
1507                                            struct devlink_param_gset_ctx *ctx)
1508 {
1509         struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
1510         struct mlxsw_sp_acl_tcam_vregion *vregion;
1511         struct mlxsw_sp_acl_tcam *tcam;
1512         struct mlxsw_sp *mlxsw_sp;
1513         u32 val = ctx->val.vu32;
1514
1515         if (val < MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_MIN && val)
1516                 return -EINVAL;
1517
1518         mlxsw_sp = mlxsw_core_driver_priv(mlxsw_core);
1519         tcam = mlxsw_sp_acl_to_tcam(mlxsw_sp->acl);
1520         tcam->vregion_rehash_intrvl = val;
1521         mutex_lock(&tcam->lock);
1522         list_for_each_entry(vregion, &tcam->vregion_list, tlist) {
1523                 if (val)
1524                         mlxsw_core_schedule_dw(&vregion->rehash.dw, 0);
1525                 else
1526                         cancel_delayed_work_sync(&vregion->rehash.dw);
1527         }
1528         mutex_unlock(&tcam->lock);
1529         return 0;
1530 }
1531
1532 static const struct devlink_param mlxsw_sp_acl_tcam_rehash_params[] = {
1533         DEVLINK_PARAM_DRIVER(MLXSW_DEVLINK_PARAM_ID_ACL_REGION_REHASH_INTERVAL,
1534                              "acl_region_rehash_interval",
1535                              DEVLINK_PARAM_TYPE_U32,
1536                              BIT(DEVLINK_PARAM_CMODE_RUNTIME),
1537                              mlxsw_sp_acl_tcam_region_rehash_intrvl_get,
1538                              mlxsw_sp_acl_tcam_region_rehash_intrvl_set,
1539                              NULL),
1540 };
1541
1542 static int mlxsw_sp_acl_tcam_rehash_params_register(struct mlxsw_sp *mlxsw_sp)
1543 {
1544         struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
1545
1546         if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get)
1547                 return 0;
1548
1549         return devl_params_register(devlink, mlxsw_sp_acl_tcam_rehash_params,
1550                                     ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params));
1551 }
1552
1553 static void
1554 mlxsw_sp_acl_tcam_rehash_params_unregister(struct mlxsw_sp *mlxsw_sp)
1555 {
1556         struct devlink *devlink = priv_to_devlink(mlxsw_sp->core);
1557
1558         if (!mlxsw_sp->acl_tcam_ops->region_rehash_hints_get)
1559                 return;
1560
1561         devl_params_unregister(devlink, mlxsw_sp_acl_tcam_rehash_params,
1562                                ARRAY_SIZE(mlxsw_sp_acl_tcam_rehash_params));
1563 }
1564
1565 int mlxsw_sp_acl_tcam_init(struct mlxsw_sp *mlxsw_sp,
1566                            struct mlxsw_sp_acl_tcam *tcam)
1567 {
1568         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1569         u64 max_tcam_regions;
1570         u64 max_regions;
1571         u64 max_groups;
1572         int err;
1573
1574         mutex_init(&tcam->lock);
1575         tcam->vregion_rehash_intrvl =
1576                         MLXSW_SP_ACL_TCAM_VREGION_REHASH_INTRVL_DFLT;
1577         INIT_LIST_HEAD(&tcam->vregion_list);
1578
1579         err = mlxsw_sp_acl_tcam_rehash_params_register(mlxsw_sp);
1580         if (err)
1581                 goto err_rehash_params_register;
1582
1583         max_tcam_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1584                                               ACL_MAX_TCAM_REGIONS);
1585         max_regions = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_REGIONS);
1586
1587         /* Use 1:1 mapping between ACL region and TCAM region */
1588         if (max_tcam_regions < max_regions)
1589                 max_regions = max_tcam_regions;
1590
1591         ida_init(&tcam->used_regions);
1592         tcam->max_regions = max_regions;
1593
1594         max_groups = MLXSW_CORE_RES_GET(mlxsw_sp->core, ACL_MAX_GROUPS);
1595         ida_init(&tcam->used_groups);
1596         tcam->max_groups = max_groups;
1597         tcam->max_group_size = MLXSW_CORE_RES_GET(mlxsw_sp->core,
1598                                                   ACL_MAX_GROUP_SIZE);
1599         tcam->max_group_size = min_t(unsigned int, tcam->max_group_size,
1600                                      MLXSW_REG_PAGT_ACL_MAX_NUM);
1601
1602         err = ops->init(mlxsw_sp, tcam->priv, tcam);
1603         if (err)
1604                 goto err_tcam_init;
1605
1606         return 0;
1607
1608 err_tcam_init:
1609         ida_destroy(&tcam->used_groups);
1610         ida_destroy(&tcam->used_regions);
1611         mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
1612 err_rehash_params_register:
1613         mutex_destroy(&tcam->lock);
1614         return err;
1615 }
1616
1617 void mlxsw_sp_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp,
1618                             struct mlxsw_sp_acl_tcam *tcam)
1619 {
1620         const struct mlxsw_sp_acl_tcam_ops *ops = mlxsw_sp->acl_tcam_ops;
1621
1622         ops->fini(mlxsw_sp, tcam->priv);
1623         ida_destroy(&tcam->used_groups);
1624         ida_destroy(&tcam->used_regions);
1625         mlxsw_sp_acl_tcam_rehash_params_unregister(mlxsw_sp);
1626         mutex_destroy(&tcam->lock);
1627 }
1628
1629 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv4[] = {
1630         MLXSW_AFK_ELEMENT_SRC_SYS_PORT,
1631         MLXSW_AFK_ELEMENT_DMAC_32_47,
1632         MLXSW_AFK_ELEMENT_DMAC_0_31,
1633         MLXSW_AFK_ELEMENT_SMAC_32_47,
1634         MLXSW_AFK_ELEMENT_SMAC_0_31,
1635         MLXSW_AFK_ELEMENT_ETHERTYPE,
1636         MLXSW_AFK_ELEMENT_IP_PROTO,
1637         MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1638         MLXSW_AFK_ELEMENT_DST_IP_0_31,
1639         MLXSW_AFK_ELEMENT_DST_L4_PORT,
1640         MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1641         MLXSW_AFK_ELEMENT_VID,
1642         MLXSW_AFK_ELEMENT_PCP,
1643         MLXSW_AFK_ELEMENT_TCP_FLAGS,
1644         MLXSW_AFK_ELEMENT_IP_TTL_,
1645         MLXSW_AFK_ELEMENT_IP_ECN,
1646         MLXSW_AFK_ELEMENT_IP_DSCP,
1647 };
1648
1649 static const enum mlxsw_afk_element mlxsw_sp_acl_tcam_pattern_ipv6[] = {
1650         MLXSW_AFK_ELEMENT_ETHERTYPE,
1651         MLXSW_AFK_ELEMENT_IP_PROTO,
1652         MLXSW_AFK_ELEMENT_SRC_IP_96_127,
1653         MLXSW_AFK_ELEMENT_SRC_IP_64_95,
1654         MLXSW_AFK_ELEMENT_SRC_IP_32_63,
1655         MLXSW_AFK_ELEMENT_SRC_IP_0_31,
1656         MLXSW_AFK_ELEMENT_DST_IP_96_127,
1657         MLXSW_AFK_ELEMENT_DST_IP_64_95,
1658         MLXSW_AFK_ELEMENT_DST_IP_32_63,
1659         MLXSW_AFK_ELEMENT_DST_IP_0_31,
1660         MLXSW_AFK_ELEMENT_DST_L4_PORT,
1661         MLXSW_AFK_ELEMENT_SRC_L4_PORT,
1662 };
1663
1664 static const struct mlxsw_sp_acl_tcam_pattern mlxsw_sp_acl_tcam_patterns[] = {
1665         {
1666                 .elements = mlxsw_sp_acl_tcam_pattern_ipv4,
1667                 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv4),
1668         },
1669         {
1670                 .elements = mlxsw_sp_acl_tcam_pattern_ipv6,
1671                 .elements_count = ARRAY_SIZE(mlxsw_sp_acl_tcam_pattern_ipv6),
1672         },
1673 };
1674
1675 #define MLXSW_SP_ACL_TCAM_PATTERNS_COUNT \
1676         ARRAY_SIZE(mlxsw_sp_acl_tcam_patterns)
1677
1678 struct mlxsw_sp_acl_tcam_flower_ruleset {
1679         struct mlxsw_sp_acl_tcam_vgroup vgroup;
1680 };
1681
1682 struct mlxsw_sp_acl_tcam_flower_rule {
1683         struct mlxsw_sp_acl_tcam_ventry ventry;
1684 };
1685
1686 static int
1687 mlxsw_sp_acl_tcam_flower_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1688                                      struct mlxsw_sp_acl_tcam *tcam,
1689                                      void *ruleset_priv,
1690                                      struct mlxsw_afk_element_usage *tmplt_elusage,
1691                                      unsigned int *p_min_prio,
1692                                      unsigned int *p_max_prio)
1693 {
1694         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1695
1696         return mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1697                                             mlxsw_sp_acl_tcam_patterns,
1698                                             MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1699                                             tmplt_elusage, true,
1700                                             p_min_prio, p_max_prio);
1701 }
1702
1703 static void
1704 mlxsw_sp_acl_tcam_flower_ruleset_del(struct mlxsw_sp *mlxsw_sp,
1705                                      void *ruleset_priv)
1706 {
1707         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1708
1709         mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1710 }
1711
1712 static int
1713 mlxsw_sp_acl_tcam_flower_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
1714                                       void *ruleset_priv,
1715                                       struct mlxsw_sp_port *mlxsw_sp_port,
1716                                       bool ingress)
1717 {
1718         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1719
1720         return mlxsw_sp_acl_tcam_group_bind(mlxsw_sp, &ruleset->vgroup.group,
1721                                             mlxsw_sp_port, ingress);
1722 }
1723
1724 static void
1725 mlxsw_sp_acl_tcam_flower_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1726                                         void *ruleset_priv,
1727                                         struct mlxsw_sp_port *mlxsw_sp_port,
1728                                         bool ingress)
1729 {
1730         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1731
1732         mlxsw_sp_acl_tcam_group_unbind(mlxsw_sp, &ruleset->vgroup.group,
1733                                        mlxsw_sp_port, ingress);
1734 }
1735
1736 static u16
1737 mlxsw_sp_acl_tcam_flower_ruleset_group_id(void *ruleset_priv)
1738 {
1739         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1740
1741         return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1742 }
1743
1744 static int
1745 mlxsw_sp_acl_tcam_flower_rule_add(struct mlxsw_sp *mlxsw_sp,
1746                                   void *ruleset_priv, void *rule_priv,
1747                                   struct mlxsw_sp_acl_rule_info *rulei)
1748 {
1749         struct mlxsw_sp_acl_tcam_flower_ruleset *ruleset = ruleset_priv;
1750         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1751
1752         return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1753                                             &rule->ventry, rulei);
1754 }
1755
1756 static void
1757 mlxsw_sp_acl_tcam_flower_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1758 {
1759         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1760
1761         mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1762 }
1763
1764 static int
1765 mlxsw_sp_acl_tcam_flower_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1766                                              void *rule_priv,
1767                                              struct mlxsw_sp_acl_rule_info *rulei)
1768 {
1769         return -EOPNOTSUPP;
1770 }
1771
1772 static int
1773 mlxsw_sp_acl_tcam_flower_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1774                                            void *rule_priv, bool *activity)
1775 {
1776         struct mlxsw_sp_acl_tcam_flower_rule *rule = rule_priv;
1777
1778         return mlxsw_sp_acl_tcam_ventry_activity_get(mlxsw_sp, &rule->ventry,
1779                                                      activity);
1780 }
1781
1782 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_flower_ops = {
1783         .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_flower_ruleset),
1784         .ruleset_add            = mlxsw_sp_acl_tcam_flower_ruleset_add,
1785         .ruleset_del            = mlxsw_sp_acl_tcam_flower_ruleset_del,
1786         .ruleset_bind           = mlxsw_sp_acl_tcam_flower_ruleset_bind,
1787         .ruleset_unbind         = mlxsw_sp_acl_tcam_flower_ruleset_unbind,
1788         .ruleset_group_id       = mlxsw_sp_acl_tcam_flower_ruleset_group_id,
1789         .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_flower_rule),
1790         .rule_add               = mlxsw_sp_acl_tcam_flower_rule_add,
1791         .rule_del               = mlxsw_sp_acl_tcam_flower_rule_del,
1792         .rule_action_replace    = mlxsw_sp_acl_tcam_flower_rule_action_replace,
1793         .rule_activity_get      = mlxsw_sp_acl_tcam_flower_rule_activity_get,
1794 };
1795
1796 struct mlxsw_sp_acl_tcam_mr_ruleset {
1797         struct mlxsw_sp_acl_tcam_vchunk *vchunk;
1798         struct mlxsw_sp_acl_tcam_vgroup vgroup;
1799 };
1800
1801 struct mlxsw_sp_acl_tcam_mr_rule {
1802         struct mlxsw_sp_acl_tcam_ventry ventry;
1803 };
1804
1805 static int
1806 mlxsw_sp_acl_tcam_mr_ruleset_add(struct mlxsw_sp *mlxsw_sp,
1807                                  struct mlxsw_sp_acl_tcam *tcam,
1808                                  void *ruleset_priv,
1809                                  struct mlxsw_afk_element_usage *tmplt_elusage,
1810                                  unsigned int *p_min_prio,
1811                                  unsigned int *p_max_prio)
1812 {
1813         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1814         int err;
1815
1816         err = mlxsw_sp_acl_tcam_vgroup_add(mlxsw_sp, tcam, &ruleset->vgroup,
1817                                            mlxsw_sp_acl_tcam_patterns,
1818                                            MLXSW_SP_ACL_TCAM_PATTERNS_COUNT,
1819                                            tmplt_elusage, false,
1820                                            p_min_prio, p_max_prio);
1821         if (err)
1822                 return err;
1823
1824         /* For most of the TCAM clients it would make sense to take a tcam chunk
1825          * only when the first rule is written. This is not the case for
1826          * multicast router as it is required to bind the multicast router to a
1827          * specific ACL Group ID which must exist in HW before multicast router
1828          * is initialized.
1829          */
1830         ruleset->vchunk = mlxsw_sp_acl_tcam_vchunk_get(mlxsw_sp,
1831                                                        &ruleset->vgroup, 1,
1832                                                        tmplt_elusage);
1833         if (IS_ERR(ruleset->vchunk)) {
1834                 err = PTR_ERR(ruleset->vchunk);
1835                 goto err_chunk_get;
1836         }
1837
1838         return 0;
1839
1840 err_chunk_get:
1841         mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1842         return err;
1843 }
1844
1845 static void
1846 mlxsw_sp_acl_tcam_mr_ruleset_del(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv)
1847 {
1848         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1849
1850         mlxsw_sp_acl_tcam_vchunk_put(mlxsw_sp, ruleset->vchunk);
1851         mlxsw_sp_acl_tcam_vgroup_del(&ruleset->vgroup);
1852 }
1853
1854 static int
1855 mlxsw_sp_acl_tcam_mr_ruleset_bind(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1856                                   struct mlxsw_sp_port *mlxsw_sp_port,
1857                                   bool ingress)
1858 {
1859         /* Binding is done when initializing multicast router */
1860         return 0;
1861 }
1862
1863 static void
1864 mlxsw_sp_acl_tcam_mr_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
1865                                     void *ruleset_priv,
1866                                     struct mlxsw_sp_port *mlxsw_sp_port,
1867                                     bool ingress)
1868 {
1869 }
1870
1871 static u16
1872 mlxsw_sp_acl_tcam_mr_ruleset_group_id(void *ruleset_priv)
1873 {
1874         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1875
1876         return mlxsw_sp_acl_tcam_group_id(&ruleset->vgroup.group);
1877 }
1878
1879 static int
1880 mlxsw_sp_acl_tcam_mr_rule_add(struct mlxsw_sp *mlxsw_sp, void *ruleset_priv,
1881                               void *rule_priv,
1882                               struct mlxsw_sp_acl_rule_info *rulei)
1883 {
1884         struct mlxsw_sp_acl_tcam_mr_ruleset *ruleset = ruleset_priv;
1885         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1886
1887         return mlxsw_sp_acl_tcam_ventry_add(mlxsw_sp, &ruleset->vgroup,
1888                                            &rule->ventry, rulei);
1889 }
1890
1891 static void
1892 mlxsw_sp_acl_tcam_mr_rule_del(struct mlxsw_sp *mlxsw_sp, void *rule_priv)
1893 {
1894         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1895
1896         mlxsw_sp_acl_tcam_ventry_del(mlxsw_sp, &rule->ventry);
1897 }
1898
1899 static int
1900 mlxsw_sp_acl_tcam_mr_rule_action_replace(struct mlxsw_sp *mlxsw_sp,
1901                                          void *rule_priv,
1902                                          struct mlxsw_sp_acl_rule_info *rulei)
1903 {
1904         struct mlxsw_sp_acl_tcam_mr_rule *rule = rule_priv;
1905
1906         return mlxsw_sp_acl_tcam_ventry_action_replace(mlxsw_sp, &rule->ventry,
1907                                                        rulei);
1908 }
1909
1910 static int
1911 mlxsw_sp_acl_tcam_mr_rule_activity_get(struct mlxsw_sp *mlxsw_sp,
1912                                        void *rule_priv, bool *activity)
1913 {
1914         *activity = false;
1915
1916         return 0;
1917 }
1918
1919 static const struct mlxsw_sp_acl_profile_ops mlxsw_sp_acl_tcam_mr_ops = {
1920         .ruleset_priv_size      = sizeof(struct mlxsw_sp_acl_tcam_mr_ruleset),
1921         .ruleset_add            = mlxsw_sp_acl_tcam_mr_ruleset_add,
1922         .ruleset_del            = mlxsw_sp_acl_tcam_mr_ruleset_del,
1923         .ruleset_bind           = mlxsw_sp_acl_tcam_mr_ruleset_bind,
1924         .ruleset_unbind         = mlxsw_sp_acl_tcam_mr_ruleset_unbind,
1925         .ruleset_group_id       = mlxsw_sp_acl_tcam_mr_ruleset_group_id,
1926         .rule_priv_size         = sizeof(struct mlxsw_sp_acl_tcam_mr_rule),
1927         .rule_add               = mlxsw_sp_acl_tcam_mr_rule_add,
1928         .rule_del               = mlxsw_sp_acl_tcam_mr_rule_del,
1929         .rule_action_replace    = mlxsw_sp_acl_tcam_mr_rule_action_replace,
1930         .rule_activity_get      = mlxsw_sp_acl_tcam_mr_rule_activity_get,
1931 };
1932
1933 static const struct mlxsw_sp_acl_profile_ops *
1934 mlxsw_sp_acl_tcam_profile_ops_arr[] = {
1935         [MLXSW_SP_ACL_PROFILE_FLOWER] = &mlxsw_sp_acl_tcam_flower_ops,
1936         [MLXSW_SP_ACL_PROFILE_MR] = &mlxsw_sp_acl_tcam_mr_ops,
1937 };
1938
1939 const struct mlxsw_sp_acl_profile_ops *
1940 mlxsw_sp_acl_tcam_profile_ops(struct mlxsw_sp *mlxsw_sp,
1941                               enum mlxsw_sp_acl_profile profile)
1942 {
1943         const struct mlxsw_sp_acl_profile_ops *ops;
1944
1945         if (WARN_ON(profile >= ARRAY_SIZE(mlxsw_sp_acl_tcam_profile_ops_arr)))
1946                 return NULL;
1947         ops = mlxsw_sp_acl_tcam_profile_ops_arr[profile];
1948         if (WARN_ON(!ops))
1949                 return NULL;
1950         return ops;
1951 }