2 * drivers/net/ethernet/mellanox/mlxsw/spectrum_acl.c
3 * Copyright (c) 2017 Mellanox Technologies. All rights reserved.
4 * Copyright (c) 2017 Jiri Pirko <jiri@mellanox.com>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. Neither the names of the copyright holders nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
18 * Alternatively, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") version 2 as published by the Free
20 * Software Foundation.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
23 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
32 * POSSIBILITY OF SUCH DAMAGE.
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/errno.h>
38 #include <linux/list.h>
39 #include <linux/string.h>
40 #include <linux/rhashtable.h>
41 #include <linux/netdevice.h>
42 #include <net/tc_act/tc_vlan.h>
46 #include "resources.h"
48 #include "core_acl_flex_keys.h"
49 #include "core_acl_flex_actions.h"
50 #include "spectrum_acl_flex_keys.h"
53 struct mlxsw_sp *mlxsw_sp;
54 struct mlxsw_afk *afk;
55 struct mlxsw_afa *afa;
56 struct mlxsw_sp_fid *dummy_fid;
57 const struct mlxsw_sp_acl_ops *ops;
58 struct rhashtable ruleset_ht;
59 struct list_head rules;
61 struct delayed_work dw;
62 unsigned long interval; /* ms */
63 #define MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS 1000
64 } rule_activity_update;
65 unsigned long priv[0];
66 /* priv has to be always the last item */
69 struct mlxsw_afk *mlxsw_sp_acl_afk(struct mlxsw_sp_acl *acl)
74 struct mlxsw_sp_acl_ruleset_ht_key {
75 struct net_device *dev; /* dev this ruleset is bound to */
78 const struct mlxsw_sp_acl_profile_ops *ops;
81 struct mlxsw_sp_acl_ruleset {
82 struct rhash_head ht_node; /* Member of acl HT */
83 struct mlxsw_sp_acl_ruleset_ht_key ht_key;
84 struct rhashtable rule_ht;
85 unsigned int ref_count;
86 unsigned long priv[0];
87 /* priv has to be always the last item */
90 struct mlxsw_sp_acl_rule {
91 struct rhash_head ht_node; /* Member of rule HT */
92 struct list_head list;
93 unsigned long cookie; /* HT key */
94 struct mlxsw_sp_acl_ruleset *ruleset;
95 struct mlxsw_sp_acl_rule_info *rulei;
99 unsigned long priv[0];
100 /* priv has to be always the last item */
103 static const struct rhashtable_params mlxsw_sp_acl_ruleset_ht_params = {
104 .key_len = sizeof(struct mlxsw_sp_acl_ruleset_ht_key),
105 .key_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_key),
106 .head_offset = offsetof(struct mlxsw_sp_acl_ruleset, ht_node),
107 .automatic_shrinking = true,
110 static const struct rhashtable_params mlxsw_sp_acl_rule_ht_params = {
111 .key_len = sizeof(unsigned long),
112 .key_offset = offsetof(struct mlxsw_sp_acl_rule, cookie),
113 .head_offset = offsetof(struct mlxsw_sp_acl_rule, ht_node),
114 .automatic_shrinking = true,
117 struct mlxsw_sp_fid *mlxsw_sp_acl_dummy_fid(struct mlxsw_sp *mlxsw_sp)
119 return mlxsw_sp->acl->dummy_fid;
122 static struct mlxsw_sp_acl_ruleset *
123 mlxsw_sp_acl_ruleset_create(struct mlxsw_sp *mlxsw_sp,
124 const struct mlxsw_sp_acl_profile_ops *ops)
126 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
127 struct mlxsw_sp_acl_ruleset *ruleset;
131 alloc_size = sizeof(*ruleset) + ops->ruleset_priv_size;
132 ruleset = kzalloc(alloc_size, GFP_KERNEL);
134 return ERR_PTR(-ENOMEM);
135 ruleset->ref_count = 1;
136 ruleset->ht_key.ops = ops;
138 err = rhashtable_init(&ruleset->rule_ht, &mlxsw_sp_acl_rule_ht_params);
140 goto err_rhashtable_init;
142 err = ops->ruleset_add(mlxsw_sp, acl->priv, ruleset->priv);
144 goto err_ops_ruleset_add;
149 rhashtable_destroy(&ruleset->rule_ht);
155 static void mlxsw_sp_acl_ruleset_destroy(struct mlxsw_sp *mlxsw_sp,
156 struct mlxsw_sp_acl_ruleset *ruleset)
158 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
160 ops->ruleset_del(mlxsw_sp, ruleset->priv);
161 rhashtable_destroy(&ruleset->rule_ht);
165 static int mlxsw_sp_acl_ruleset_bind(struct mlxsw_sp *mlxsw_sp,
166 struct mlxsw_sp_acl_ruleset *ruleset,
167 struct net_device *dev, bool ingress,
170 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
171 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
174 ruleset->ht_key.dev = dev;
175 ruleset->ht_key.ingress = ingress;
176 ruleset->ht_key.chain_index = chain_index;
177 err = rhashtable_insert_fast(&acl->ruleset_ht, &ruleset->ht_node,
178 mlxsw_sp_acl_ruleset_ht_params);
181 if (!ruleset->ht_key.chain_index) {
182 /* We only need ruleset with chain index 0, the implicit one,
183 * to be directly bound to device. The rest of the rulesets
184 * are bound by "Goto action set".
186 err = ops->ruleset_bind(mlxsw_sp, ruleset->priv, dev, ingress);
188 goto err_ops_ruleset_bind;
192 err_ops_ruleset_bind:
193 rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
194 mlxsw_sp_acl_ruleset_ht_params);
198 static void mlxsw_sp_acl_ruleset_unbind(struct mlxsw_sp *mlxsw_sp,
199 struct mlxsw_sp_acl_ruleset *ruleset)
201 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
202 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
204 if (!ruleset->ht_key.chain_index)
205 ops->ruleset_unbind(mlxsw_sp, ruleset->priv);
206 rhashtable_remove_fast(&acl->ruleset_ht, &ruleset->ht_node,
207 mlxsw_sp_acl_ruleset_ht_params);
210 static void mlxsw_sp_acl_ruleset_ref_inc(struct mlxsw_sp_acl_ruleset *ruleset)
212 ruleset->ref_count++;
215 static void mlxsw_sp_acl_ruleset_ref_dec(struct mlxsw_sp *mlxsw_sp,
216 struct mlxsw_sp_acl_ruleset *ruleset)
218 if (--ruleset->ref_count)
220 mlxsw_sp_acl_ruleset_unbind(mlxsw_sp, ruleset);
221 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
224 static struct mlxsw_sp_acl_ruleset *
225 __mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp_acl *acl, struct net_device *dev,
226 bool ingress, u32 chain_index,
227 const struct mlxsw_sp_acl_profile_ops *ops)
229 struct mlxsw_sp_acl_ruleset_ht_key ht_key;
231 memset(&ht_key, 0, sizeof(ht_key));
233 ht_key.ingress = ingress;
234 ht_key.chain_index = chain_index;
236 return rhashtable_lookup_fast(&acl->ruleset_ht, &ht_key,
237 mlxsw_sp_acl_ruleset_ht_params);
240 struct mlxsw_sp_acl_ruleset *
241 mlxsw_sp_acl_ruleset_lookup(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
242 bool ingress, u32 chain_index,
243 enum mlxsw_sp_acl_profile profile)
245 const struct mlxsw_sp_acl_profile_ops *ops;
246 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
247 struct mlxsw_sp_acl_ruleset *ruleset;
249 ops = acl->ops->profile_ops(mlxsw_sp, profile);
251 return ERR_PTR(-EINVAL);
252 ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
255 return ERR_PTR(-ENOENT);
259 struct mlxsw_sp_acl_ruleset *
260 mlxsw_sp_acl_ruleset_get(struct mlxsw_sp *mlxsw_sp, struct net_device *dev,
261 bool ingress, u32 chain_index,
262 enum mlxsw_sp_acl_profile profile)
264 const struct mlxsw_sp_acl_profile_ops *ops;
265 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
266 struct mlxsw_sp_acl_ruleset *ruleset;
269 ops = acl->ops->profile_ops(mlxsw_sp, profile);
271 return ERR_PTR(-EINVAL);
273 ruleset = __mlxsw_sp_acl_ruleset_lookup(acl, dev, ingress,
276 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
279 ruleset = mlxsw_sp_acl_ruleset_create(mlxsw_sp, ops);
282 err = mlxsw_sp_acl_ruleset_bind(mlxsw_sp, ruleset, dev,
283 ingress, chain_index);
285 goto err_ruleset_bind;
289 mlxsw_sp_acl_ruleset_destroy(mlxsw_sp, ruleset);
293 void mlxsw_sp_acl_ruleset_put(struct mlxsw_sp *mlxsw_sp,
294 struct mlxsw_sp_acl_ruleset *ruleset)
296 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
299 u16 mlxsw_sp_acl_ruleset_group_id(struct mlxsw_sp_acl_ruleset *ruleset)
301 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
303 return ops->ruleset_group_id(ruleset->priv);
307 mlxsw_sp_acl_rulei_counter_alloc(struct mlxsw_sp *mlxsw_sp,
308 struct mlxsw_sp_acl_rule_info *rulei)
312 err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &rulei->counter_index);
315 rulei->counter_valid = true;
320 mlxsw_sp_acl_rulei_counter_free(struct mlxsw_sp *mlxsw_sp,
321 struct mlxsw_sp_acl_rule_info *rulei)
323 rulei->counter_valid = false;
324 mlxsw_sp_flow_counter_free(mlxsw_sp, rulei->counter_index);
327 struct mlxsw_sp_acl_rule_info *
328 mlxsw_sp_acl_rulei_create(struct mlxsw_sp_acl *acl)
330 struct mlxsw_sp_acl_rule_info *rulei;
333 rulei = kzalloc(sizeof(*rulei), GFP_KERNEL);
336 rulei->act_block = mlxsw_afa_block_create(acl->afa);
337 if (IS_ERR(rulei->act_block)) {
338 err = PTR_ERR(rulei->act_block);
339 goto err_afa_block_create;
343 err_afa_block_create:
348 void mlxsw_sp_acl_rulei_destroy(struct mlxsw_sp_acl_rule_info *rulei)
350 mlxsw_afa_block_destroy(rulei->act_block);
354 int mlxsw_sp_acl_rulei_commit(struct mlxsw_sp_acl_rule_info *rulei)
356 return mlxsw_afa_block_commit(rulei->act_block);
359 void mlxsw_sp_acl_rulei_priority(struct mlxsw_sp_acl_rule_info *rulei,
360 unsigned int priority)
362 rulei->priority = priority;
365 void mlxsw_sp_acl_rulei_keymask_u32(struct mlxsw_sp_acl_rule_info *rulei,
366 enum mlxsw_afk_element element,
367 u32 key_value, u32 mask_value)
369 mlxsw_afk_values_add_u32(&rulei->values, element,
370 key_value, mask_value);
373 void mlxsw_sp_acl_rulei_keymask_buf(struct mlxsw_sp_acl_rule_info *rulei,
374 enum mlxsw_afk_element element,
375 const char *key_value,
376 const char *mask_value, unsigned int len)
378 mlxsw_afk_values_add_buf(&rulei->values, element,
379 key_value, mask_value, len);
382 void mlxsw_sp_acl_rulei_act_continue(struct mlxsw_sp_acl_rule_info *rulei)
384 mlxsw_afa_block_continue(rulei->act_block);
387 void mlxsw_sp_acl_rulei_act_jump(struct mlxsw_sp_acl_rule_info *rulei,
390 mlxsw_afa_block_jump(rulei->act_block, group_id);
393 int mlxsw_sp_acl_rulei_act_drop(struct mlxsw_sp_acl_rule_info *rulei)
395 return mlxsw_afa_block_append_drop(rulei->act_block);
398 int mlxsw_sp_acl_rulei_act_trap(struct mlxsw_sp_acl_rule_info *rulei)
400 return mlxsw_afa_block_append_trap(rulei->act_block);
403 int mlxsw_sp_acl_rulei_act_fwd(struct mlxsw_sp *mlxsw_sp,
404 struct mlxsw_sp_acl_rule_info *rulei,
405 struct net_device *out_dev)
407 struct mlxsw_sp_port *mlxsw_sp_port;
412 if (!mlxsw_sp_port_dev_check(out_dev))
414 mlxsw_sp_port = netdev_priv(out_dev);
415 if (mlxsw_sp_port->mlxsw_sp != mlxsw_sp)
417 local_port = mlxsw_sp_port->local_port;
420 /* If out_dev is NULL, the caller wants to
421 * set forward to ingress port.
426 return mlxsw_afa_block_append_fwd(rulei->act_block,
427 local_port, in_port);
430 int mlxsw_sp_acl_rulei_act_vlan(struct mlxsw_sp *mlxsw_sp,
431 struct mlxsw_sp_acl_rule_info *rulei,
432 u32 action, u16 vid, u16 proto, u8 prio)
436 if (action == TCA_VLAN_ACT_MODIFY) {
445 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN protocol %#04x\n",
450 return mlxsw_afa_block_append_vlan_modify(rulei->act_block,
451 vid, prio, ethertype);
453 dev_err(mlxsw_sp->bus_info->dev, "Unsupported VLAN action\n");
458 int mlxsw_sp_acl_rulei_act_count(struct mlxsw_sp *mlxsw_sp,
459 struct mlxsw_sp_acl_rule_info *rulei)
461 return mlxsw_afa_block_append_counter(rulei->act_block,
462 rulei->counter_index);
465 int mlxsw_sp_acl_rulei_act_fid_set(struct mlxsw_sp *mlxsw_sp,
466 struct mlxsw_sp_acl_rule_info *rulei,
469 return mlxsw_afa_block_append_fid_set(rulei->act_block, fid);
472 struct mlxsw_sp_acl_rule *
473 mlxsw_sp_acl_rule_create(struct mlxsw_sp *mlxsw_sp,
474 struct mlxsw_sp_acl_ruleset *ruleset,
475 unsigned long cookie)
477 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
478 struct mlxsw_sp_acl_rule *rule;
481 mlxsw_sp_acl_ruleset_ref_inc(ruleset);
482 rule = kzalloc(sizeof(*rule) + ops->rule_priv_size, GFP_KERNEL);
487 rule->cookie = cookie;
488 rule->ruleset = ruleset;
490 rule->rulei = mlxsw_sp_acl_rulei_create(mlxsw_sp->acl);
491 if (IS_ERR(rule->rulei)) {
492 err = PTR_ERR(rule->rulei);
493 goto err_rulei_create;
496 err = mlxsw_sp_acl_rulei_counter_alloc(mlxsw_sp, rule->rulei);
498 goto err_counter_alloc;
502 mlxsw_sp_acl_rulei_destroy(rule->rulei);
506 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
510 void mlxsw_sp_acl_rule_destroy(struct mlxsw_sp *mlxsw_sp,
511 struct mlxsw_sp_acl_rule *rule)
513 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
515 mlxsw_sp_acl_rulei_counter_free(mlxsw_sp, rule->rulei);
516 mlxsw_sp_acl_rulei_destroy(rule->rulei);
518 mlxsw_sp_acl_ruleset_ref_dec(mlxsw_sp, ruleset);
521 int mlxsw_sp_acl_rule_add(struct mlxsw_sp *mlxsw_sp,
522 struct mlxsw_sp_acl_rule *rule)
524 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
525 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
528 err = ops->rule_add(mlxsw_sp, ruleset->priv, rule->priv, rule->rulei);
532 err = rhashtable_insert_fast(&ruleset->rule_ht, &rule->ht_node,
533 mlxsw_sp_acl_rule_ht_params);
535 goto err_rhashtable_insert;
537 list_add_tail(&rule->list, &mlxsw_sp->acl->rules);
540 err_rhashtable_insert:
541 ops->rule_del(mlxsw_sp, rule->priv);
545 void mlxsw_sp_acl_rule_del(struct mlxsw_sp *mlxsw_sp,
546 struct mlxsw_sp_acl_rule *rule)
548 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
549 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
551 list_del(&rule->list);
552 rhashtable_remove_fast(&ruleset->rule_ht, &rule->ht_node,
553 mlxsw_sp_acl_rule_ht_params);
554 ops->rule_del(mlxsw_sp, rule->priv);
557 struct mlxsw_sp_acl_rule *
558 mlxsw_sp_acl_rule_lookup(struct mlxsw_sp *mlxsw_sp,
559 struct mlxsw_sp_acl_ruleset *ruleset,
560 unsigned long cookie)
562 return rhashtable_lookup_fast(&ruleset->rule_ht, &cookie,
563 mlxsw_sp_acl_rule_ht_params);
566 struct mlxsw_sp_acl_rule_info *
567 mlxsw_sp_acl_rule_rulei(struct mlxsw_sp_acl_rule *rule)
572 static int mlxsw_sp_acl_rule_activity_update(struct mlxsw_sp *mlxsw_sp,
573 struct mlxsw_sp_acl_rule *rule)
575 struct mlxsw_sp_acl_ruleset *ruleset = rule->ruleset;
576 const struct mlxsw_sp_acl_profile_ops *ops = ruleset->ht_key.ops;
580 err = ops->rule_activity_get(mlxsw_sp, rule->priv, &active);
584 rule->last_used = jiffies;
588 static int mlxsw_sp_acl_rules_activity_update(struct mlxsw_sp_acl *acl)
590 struct mlxsw_sp_acl_rule *rule;
593 /* Protect internal structures from changes */
595 list_for_each_entry(rule, &acl->rules, list) {
596 err = mlxsw_sp_acl_rule_activity_update(acl->mlxsw_sp,
599 goto err_rule_update;
609 static void mlxsw_sp_acl_rule_activity_work_schedule(struct mlxsw_sp_acl *acl)
611 unsigned long interval = acl->rule_activity_update.interval;
613 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw,
614 msecs_to_jiffies(interval));
617 static void mlxsw_sp_acl_rul_activity_update_work(struct work_struct *work)
619 struct mlxsw_sp_acl *acl = container_of(work, struct mlxsw_sp_acl,
620 rule_activity_update.dw.work);
623 err = mlxsw_sp_acl_rules_activity_update(acl);
625 dev_err(acl->mlxsw_sp->bus_info->dev, "Could not update acl activity");
627 mlxsw_sp_acl_rule_activity_work_schedule(acl);
630 int mlxsw_sp_acl_rule_get_stats(struct mlxsw_sp *mlxsw_sp,
631 struct mlxsw_sp_acl_rule *rule,
632 u64 *packets, u64 *bytes, u64 *last_use)
635 struct mlxsw_sp_acl_rule_info *rulei;
640 rulei = mlxsw_sp_acl_rule_rulei(rule);
641 err = mlxsw_sp_flow_counter_get(mlxsw_sp, rulei->counter_index,
642 ¤t_packets, ¤t_bytes);
646 *packets = current_packets - rule->last_packets;
647 *bytes = current_bytes - rule->last_bytes;
648 *last_use = rule->last_used;
650 rule->last_bytes = current_bytes;
651 rule->last_packets = current_packets;
656 #define MLXSW_SP_KDVL_ACT_EXT_SIZE 1
658 static int mlxsw_sp_act_kvdl_set_add(void *priv, u32 *p_kvdl_index,
659 char *enc_actions, bool is_first)
661 struct mlxsw_sp *mlxsw_sp = priv;
662 char pefa_pl[MLXSW_REG_PEFA_LEN];
666 /* The first action set of a TCAM entry is stored directly in TCAM,
667 * not KVD linear area.
672 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KDVL_ACT_EXT_SIZE,
676 mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, enc_actions);
677 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
680 *p_kvdl_index = kvdl_index;
684 mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
688 static void mlxsw_sp_act_kvdl_set_del(void *priv, u32 kvdl_index,
691 struct mlxsw_sp *mlxsw_sp = priv;
695 mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
698 static int mlxsw_sp_act_kvdl_fwd_entry_add(void *priv, u32 *p_kvdl_index,
701 struct mlxsw_sp *mlxsw_sp = priv;
702 char ppbs_pl[MLXSW_REG_PPBS_LEN];
706 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, 1, &kvdl_index);
709 mlxsw_reg_ppbs_pack(ppbs_pl, kvdl_index, local_port);
710 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(ppbs), ppbs_pl);
713 *p_kvdl_index = kvdl_index;
717 mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
721 static void mlxsw_sp_act_kvdl_fwd_entry_del(void *priv, u32 kvdl_index)
723 struct mlxsw_sp *mlxsw_sp = priv;
725 mlxsw_sp_kvdl_free(mlxsw_sp, kvdl_index);
728 static const struct mlxsw_afa_ops mlxsw_sp_act_afa_ops = {
729 .kvdl_set_add = mlxsw_sp_act_kvdl_set_add,
730 .kvdl_set_del = mlxsw_sp_act_kvdl_set_del,
731 .kvdl_fwd_entry_add = mlxsw_sp_act_kvdl_fwd_entry_add,
732 .kvdl_fwd_entry_del = mlxsw_sp_act_kvdl_fwd_entry_del,
735 int mlxsw_sp_acl_init(struct mlxsw_sp *mlxsw_sp)
737 const struct mlxsw_sp_acl_ops *acl_ops = &mlxsw_sp_acl_tcam_ops;
738 struct mlxsw_sp_fid *fid;
739 struct mlxsw_sp_acl *acl;
742 acl = kzalloc(sizeof(*acl) + acl_ops->priv_size, GFP_KERNEL);
746 acl->mlxsw_sp = mlxsw_sp;
747 acl->afk = mlxsw_afk_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
750 MLXSW_SP_AFK_BLOCKS_COUNT);
756 acl->afa = mlxsw_afa_create(MLXSW_CORE_RES_GET(mlxsw_sp->core,
757 ACL_ACTIONS_PER_SET),
758 &mlxsw_sp_act_afa_ops, mlxsw_sp);
759 if (IS_ERR(acl->afa)) {
760 err = PTR_ERR(acl->afa);
764 err = rhashtable_init(&acl->ruleset_ht,
765 &mlxsw_sp_acl_ruleset_ht_params);
767 goto err_rhashtable_init;
769 fid = mlxsw_sp_fid_dummy_get(mlxsw_sp);
774 acl->dummy_fid = fid;
776 INIT_LIST_HEAD(&acl->rules);
777 err = acl_ops->init(mlxsw_sp, acl->priv);
779 goto err_acl_ops_init;
783 /* Create the delayed work for the rule activity_update */
784 INIT_DELAYED_WORK(&acl->rule_activity_update.dw,
785 mlxsw_sp_acl_rul_activity_update_work);
786 acl->rule_activity_update.interval = MLXSW_SP_ACL_RULE_ACTIVITY_UPDATE_PERIOD_MS;
787 mlxsw_core_schedule_dw(&acl->rule_activity_update.dw, 0);
791 mlxsw_sp_fid_put(fid);
793 rhashtable_destroy(&acl->ruleset_ht);
795 mlxsw_afa_destroy(acl->afa);
797 mlxsw_afk_destroy(acl->afk);
803 void mlxsw_sp_acl_fini(struct mlxsw_sp *mlxsw_sp)
805 struct mlxsw_sp_acl *acl = mlxsw_sp->acl;
806 const struct mlxsw_sp_acl_ops *acl_ops = acl->ops;
808 cancel_delayed_work_sync(&mlxsw_sp->acl->rule_activity_update.dw);
809 acl_ops->fini(mlxsw_sp, acl->priv);
810 WARN_ON(!list_empty(&acl->rules));
811 mlxsw_sp_fid_put(acl->dummy_fid);
812 rhashtable_destroy(&acl->ruleset_ht);
813 mlxsw_afa_destroy(acl->afa);
814 mlxsw_afk_destroy(acl->afk);