1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /* Copyright (c) 2018 Mellanox Technologies. All rights reserved */
4 #include <linux/kernel.h>
7 #include "spectrum_acl_tcam.h"
8 #include "core_acl_flex_actions.h"
10 struct mlxsw_sp2_acl_tcam {
11 struct mlxsw_sp_acl_atcam atcam;
13 unsigned int kvdl_count;
16 struct mlxsw_sp2_acl_tcam_region {
17 struct mlxsw_sp_acl_atcam_region aregion;
18 struct mlxsw_sp_acl_tcam_region *region;
21 struct mlxsw_sp2_acl_tcam_chunk {
22 struct mlxsw_sp_acl_atcam_chunk achunk;
25 struct mlxsw_sp2_acl_tcam_entry {
26 struct mlxsw_sp_acl_atcam_entry aentry;
27 struct mlxsw_afa_block *act_block;
31 mlxsw_sp2_acl_ctcam_region_entry_insert(struct mlxsw_sp_acl_ctcam_region *cregion,
32 struct mlxsw_sp_acl_ctcam_entry *centry,
35 struct mlxsw_sp_acl_atcam_region *aregion;
36 struct mlxsw_sp_acl_atcam_entry *aentry;
37 struct mlxsw_sp_acl_erp_mask *erp_mask;
39 aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
40 aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
42 erp_mask = mlxsw_sp_acl_erp_mask_get(aregion, mask, true);
44 return PTR_ERR(erp_mask);
45 aentry->erp_mask = erp_mask;
51 mlxsw_sp2_acl_ctcam_region_entry_remove(struct mlxsw_sp_acl_ctcam_region *cregion,
52 struct mlxsw_sp_acl_ctcam_entry *centry)
54 struct mlxsw_sp_acl_atcam_region *aregion;
55 struct mlxsw_sp_acl_atcam_entry *aentry;
57 aregion = mlxsw_sp_acl_tcam_cregion_aregion(cregion);
58 aentry = mlxsw_sp_acl_tcam_centry_aentry(centry);
60 mlxsw_sp_acl_erp_mask_put(aregion, aentry->erp_mask);
63 static const struct mlxsw_sp_acl_ctcam_region_ops
64 mlxsw_sp2_acl_ctcam_region_ops = {
65 .entry_insert = mlxsw_sp2_acl_ctcam_region_entry_insert,
66 .entry_remove = mlxsw_sp2_acl_ctcam_region_entry_remove,
69 static int mlxsw_sp2_acl_tcam_init(struct mlxsw_sp *mlxsw_sp, void *priv,
70 struct mlxsw_sp_acl_tcam *_tcam)
72 struct mlxsw_sp2_acl_tcam *tcam = priv;
73 struct mlxsw_afa_block *afa_block;
74 char pefa_pl[MLXSW_REG_PEFA_LEN];
75 char pgcr_pl[MLXSW_REG_PGCR_LEN];
80 /* Some TCAM regions are not exposed to the host and used internally
81 * by the device. Allocate KVDL entries for the default actions of
82 * these regions to avoid the host from overwriting them.
84 tcam->kvdl_count = _tcam->max_regions;
85 if (MLXSW_CORE_RES_VALID(mlxsw_sp->core, ACL_MAX_DEFAULT_ACTIONS))
86 tcam->kvdl_count = MLXSW_CORE_RES_GET(mlxsw_sp->core,
87 ACL_MAX_DEFAULT_ACTIONS);
88 err = mlxsw_sp_kvdl_alloc(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
89 tcam->kvdl_count, &tcam->kvdl_index);
93 /* Create flex action block, set default action (continue)
94 * but don't commit. We need just the current set encoding
95 * to be written using PEFA register to all indexes for all regions.
97 afa_block = mlxsw_afa_block_create(mlxsw_sp->afa);
98 if (IS_ERR(afa_block)) {
99 err = PTR_ERR(afa_block);
102 err = mlxsw_afa_block_continue(afa_block);
104 goto err_afa_block_continue;
105 enc_actions = mlxsw_afa_block_cur_set(afa_block);
107 /* Only write to KVDL entries used by TCAM regions exposed to the
110 for (i = 0; i < _tcam->max_regions; i++) {
111 mlxsw_reg_pefa_pack(pefa_pl, tcam->kvdl_index + i,
113 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pefa), pefa_pl);
117 mlxsw_reg_pgcr_pack(pgcr_pl, tcam->kvdl_index);
118 err = mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(pgcr), pgcr_pl);
122 err = mlxsw_sp_acl_atcam_init(mlxsw_sp, &tcam->atcam);
126 mlxsw_afa_block_destroy(afa_block);
132 err_afa_block_continue:
133 mlxsw_afa_block_destroy(afa_block);
135 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
136 tcam->kvdl_count, tcam->kvdl_index);
140 static void mlxsw_sp2_acl_tcam_fini(struct mlxsw_sp *mlxsw_sp, void *priv)
142 struct mlxsw_sp2_acl_tcam *tcam = priv;
144 mlxsw_sp_acl_atcam_fini(mlxsw_sp, &tcam->atcam);
145 mlxsw_sp_kvdl_free(mlxsw_sp, MLXSW_SP_KVDL_ENTRY_TYPE_ACTSET,
146 tcam->kvdl_count, tcam->kvdl_index);
150 mlxsw_sp2_acl_tcam_region_init(struct mlxsw_sp *mlxsw_sp, void *region_priv,
152 struct mlxsw_sp_acl_tcam_region *_region,
155 struct mlxsw_sp2_acl_tcam_region *region = region_priv;
156 struct mlxsw_sp2_acl_tcam *tcam = tcam_priv;
158 region->region = _region;
160 return mlxsw_sp_acl_atcam_region_init(mlxsw_sp, &tcam->atcam,
163 &mlxsw_sp2_acl_ctcam_region_ops);
167 mlxsw_sp2_acl_tcam_region_fini(struct mlxsw_sp *mlxsw_sp, void *region_priv)
169 struct mlxsw_sp2_acl_tcam_region *region = region_priv;
171 mlxsw_sp_acl_atcam_region_fini(®ion->aregion);
175 mlxsw_sp2_acl_tcam_region_associate(struct mlxsw_sp *mlxsw_sp,
176 struct mlxsw_sp_acl_tcam_region *region)
178 return mlxsw_sp_acl_atcam_region_associate(mlxsw_sp, region->id);
181 static void *mlxsw_sp2_acl_tcam_region_rehash_hints_get(void *region_priv)
183 struct mlxsw_sp2_acl_tcam_region *region = region_priv;
185 return mlxsw_sp_acl_atcam_rehash_hints_get(®ion->aregion);
188 static void mlxsw_sp2_acl_tcam_region_rehash_hints_put(void *hints_priv)
190 mlxsw_sp_acl_atcam_rehash_hints_put(hints_priv);
193 static void mlxsw_sp2_acl_tcam_chunk_init(void *region_priv, void *chunk_priv,
194 unsigned int priority)
196 struct mlxsw_sp2_acl_tcam_region *region = region_priv;
197 struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
199 mlxsw_sp_acl_atcam_chunk_init(®ion->aregion, &chunk->achunk,
203 static void mlxsw_sp2_acl_tcam_chunk_fini(void *chunk_priv)
205 struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
207 mlxsw_sp_acl_atcam_chunk_fini(&chunk->achunk);
210 static int mlxsw_sp2_acl_tcam_entry_add(struct mlxsw_sp *mlxsw_sp,
211 void *region_priv, void *chunk_priv,
213 struct mlxsw_sp_acl_rule_info *rulei)
215 struct mlxsw_sp2_acl_tcam_region *region = region_priv;
216 struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
217 struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
219 entry->act_block = rulei->act_block;
220 return mlxsw_sp_acl_atcam_entry_add(mlxsw_sp, ®ion->aregion,
221 &chunk->achunk, &entry->aentry,
225 static void mlxsw_sp2_acl_tcam_entry_del(struct mlxsw_sp *mlxsw_sp,
226 void *region_priv, void *chunk_priv,
229 struct mlxsw_sp2_acl_tcam_region *region = region_priv;
230 struct mlxsw_sp2_acl_tcam_chunk *chunk = chunk_priv;
231 struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
233 mlxsw_sp_acl_atcam_entry_del(mlxsw_sp, ®ion->aregion, &chunk->achunk,
238 mlxsw_sp2_acl_tcam_entry_action_replace(struct mlxsw_sp *mlxsw_sp,
239 void *region_priv, void *entry_priv,
240 struct mlxsw_sp_acl_rule_info *rulei)
242 struct mlxsw_sp2_acl_tcam_region *region = region_priv;
243 struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
245 entry->act_block = rulei->act_block;
246 return mlxsw_sp_acl_atcam_entry_action_replace(mlxsw_sp,
248 &entry->aentry, rulei);
252 mlxsw_sp2_acl_tcam_entry_activity_get(struct mlxsw_sp *mlxsw_sp,
253 void *region_priv, void *entry_priv,
256 struct mlxsw_sp2_acl_tcam_entry *entry = entry_priv;
258 return mlxsw_afa_block_activity_get(entry->act_block, activity);
261 const struct mlxsw_sp_acl_tcam_ops mlxsw_sp2_acl_tcam_ops = {
262 .key_type = MLXSW_REG_PTAR_KEY_TYPE_FLEX2,
263 .priv_size = sizeof(struct mlxsw_sp2_acl_tcam),
264 .init = mlxsw_sp2_acl_tcam_init,
265 .fini = mlxsw_sp2_acl_tcam_fini,
266 .region_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_region),
267 .region_init = mlxsw_sp2_acl_tcam_region_init,
268 .region_fini = mlxsw_sp2_acl_tcam_region_fini,
269 .region_associate = mlxsw_sp2_acl_tcam_region_associate,
270 .region_rehash_hints_get = mlxsw_sp2_acl_tcam_region_rehash_hints_get,
271 .region_rehash_hints_put = mlxsw_sp2_acl_tcam_region_rehash_hints_put,
272 .chunk_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_chunk),
273 .chunk_init = mlxsw_sp2_acl_tcam_chunk_init,
274 .chunk_fini = mlxsw_sp2_acl_tcam_chunk_fini,
275 .entry_priv_size = sizeof(struct mlxsw_sp2_acl_tcam_entry),
276 .entry_add = mlxsw_sp2_acl_tcam_entry_add,
277 .entry_del = mlxsw_sp2_acl_tcam_entry_del,
278 .entry_action_replace = mlxsw_sp2_acl_tcam_entry_action_replace,
279 .entry_activity_get = mlxsw_sp2_acl_tcam_entry_activity_get,