GNU Linux-libre 6.8.9-gnu
[releases.git] / drivers / net / ethernet / marvell / octeontx2 / nic / otx2_tc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2021 Marvell.
5  *
6  */
7
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/inetdevice.h>
11 #include <linux/rhashtable.h>
12 #include <linux/bitfield.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_mirred.h>
17 #include <net/tc_act/tc_vlan.h>
18 #include <net/ipv6.h>
19
20 #include "cn10k.h"
21 #include "otx2_common.h"
22 #include "qos.h"
23
24 #define CN10K_MAX_BURST_MANTISSA        0x7FFFULL
25 #define CN10K_MAX_BURST_SIZE            8453888ULL
26
27 #define CN10K_TLX_BURST_MANTISSA        GENMASK_ULL(43, 29)
28 #define CN10K_TLX_BURST_EXPONENT        GENMASK_ULL(47, 44)
29
30 #define OTX2_UNSUPP_LSE_DEPTH           GENMASK(6, 4)
31
32 #define MCAST_INVALID_GRP               (-1U)
33
34 struct otx2_tc_flow_stats {
35         u64 bytes;
36         u64 pkts;
37         u64 used;
38 };
39
40 struct otx2_tc_flow {
41         struct list_head                list;
42         unsigned long                   cookie;
43         struct rcu_head                 rcu;
44         struct otx2_tc_flow_stats       stats;
45         spinlock_t                      lock; /* lock for stats */
46         u16                             rq;
47         u16                             entry;
48         u16                             leaf_profile;
49         bool                            is_act_police;
50         u32                             prio;
51         struct npc_install_flow_req     req;
52         u32                             mcast_grp_idx;
53         u64                             rate;
54         u32                             burst;
55         bool                            is_pps;
56 };
57
58 static void otx2_get_egress_burst_cfg(struct otx2_nic *nic, u32 burst,
59                                       u32 *burst_exp, u32 *burst_mantissa)
60 {
61         int max_burst, max_mantissa;
62         unsigned int tmp;
63
64         if (is_dev_otx2(nic->pdev)) {
65                 max_burst = MAX_BURST_SIZE;
66                 max_mantissa = MAX_BURST_MANTISSA;
67         } else {
68                 max_burst = CN10K_MAX_BURST_SIZE;
69                 max_mantissa = CN10K_MAX_BURST_MANTISSA;
70         }
71
72         /* Burst is calculated as
73          * ((256 + BURST_MANTISSA) << (1 + BURST_EXPONENT)) / 256
74          * Max supported burst size is 130,816 bytes.
75          */
76         burst = min_t(u32, burst, max_burst);
77         if (burst) {
78                 *burst_exp = ilog2(burst) ? ilog2(burst) - 1 : 0;
79                 tmp = burst - rounddown_pow_of_two(burst);
80                 if (burst < max_mantissa)
81                         *burst_mantissa = tmp * 2;
82                 else
83                         *burst_mantissa = tmp / (1ULL << (*burst_exp - 7));
84         } else {
85                 *burst_exp = MAX_BURST_EXPONENT;
86                 *burst_mantissa = max_mantissa;
87         }
88 }
89
90 static void otx2_get_egress_rate_cfg(u64 maxrate, u32 *exp,
91                                      u32 *mantissa, u32 *div_exp)
92 {
93         u64 tmp;
94
95         /* Rate calculation by hardware
96          *
97          * PIR_ADD = ((256 + mantissa) << exp) / 256
98          * rate = (2 * PIR_ADD) / ( 1 << div_exp)
99          * The resultant rate is in Mbps.
100          */
101
102         /* 2Mbps to 100Gbps can be expressed with div_exp = 0.
103          * Setting this to '0' will ease the calculation of
104          * exponent and mantissa.
105          */
106         *div_exp = 0;
107
108         if (maxrate) {
109                 *exp = ilog2(maxrate) ? ilog2(maxrate) - 1 : 0;
110                 tmp = maxrate - rounddown_pow_of_two(maxrate);
111                 if (maxrate < MAX_RATE_MANTISSA)
112                         *mantissa = tmp * 2;
113                 else
114                         *mantissa = tmp / (1ULL << (*exp - 7));
115         } else {
116                 /* Instead of disabling rate limiting, set all values to max */
117                 *exp = MAX_RATE_EXPONENT;
118                 *mantissa = MAX_RATE_MANTISSA;
119         }
120 }
121
122 u64 otx2_get_txschq_rate_regval(struct otx2_nic *nic,
123                                 u64 maxrate, u32 burst)
124 {
125         u32 burst_exp, burst_mantissa;
126         u32 exp, mantissa, div_exp;
127         u64 regval = 0;
128
129         /* Get exponent and mantissa values from the desired rate */
130         otx2_get_egress_burst_cfg(nic, burst, &burst_exp, &burst_mantissa);
131         otx2_get_egress_rate_cfg(maxrate, &exp, &mantissa, &div_exp);
132
133         if (is_dev_otx2(nic->pdev)) {
134                 regval = FIELD_PREP(TLX_BURST_EXPONENT, (u64)burst_exp) |
135                                 FIELD_PREP(TLX_BURST_MANTISSA, (u64)burst_mantissa) |
136                                 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
137                                 FIELD_PREP(TLX_RATE_EXPONENT, exp) |
138                                 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
139         } else {
140                 regval = FIELD_PREP(CN10K_TLX_BURST_EXPONENT, (u64)burst_exp) |
141                                 FIELD_PREP(CN10K_TLX_BURST_MANTISSA, (u64)burst_mantissa) |
142                                 FIELD_PREP(TLX_RATE_DIVIDER_EXPONENT, div_exp) |
143                                 FIELD_PREP(TLX_RATE_EXPONENT, exp) |
144                                 FIELD_PREP(TLX_RATE_MANTISSA, mantissa) | BIT_ULL(0);
145         }
146
147         return regval;
148 }
149
150 static int otx2_set_matchall_egress_rate(struct otx2_nic *nic,
151                                          u32 burst, u64 maxrate)
152 {
153         struct otx2_hw *hw = &nic->hw;
154         struct nix_txschq_config *req;
155         int txschq, err;
156
157         /* All SQs share the same TL4, so pick the first scheduler */
158         txschq = hw->txschq_list[NIX_TXSCH_LVL_TL4][0];
159
160         mutex_lock(&nic->mbox.lock);
161         req = otx2_mbox_alloc_msg_nix_txschq_cfg(&nic->mbox);
162         if (!req) {
163                 mutex_unlock(&nic->mbox.lock);
164                 return -ENOMEM;
165         }
166
167         req->lvl = NIX_TXSCH_LVL_TL4;
168         req->num_regs = 1;
169         req->reg[0] = NIX_AF_TL4X_PIR(txschq);
170         req->regval[0] = otx2_get_txschq_rate_regval(nic, maxrate, burst);
171
172         err = otx2_sync_mbox_msg(&nic->mbox);
173         mutex_unlock(&nic->mbox.lock);
174         return err;
175 }
176
177 static int otx2_tc_validate_flow(struct otx2_nic *nic,
178                                  struct flow_action *actions,
179                                  struct netlink_ext_ack *extack)
180 {
181         if (nic->flags & OTX2_FLAG_INTF_DOWN) {
182                 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
183                 return -EINVAL;
184         }
185
186         if (!flow_action_has_entries(actions)) {
187                 NL_SET_ERR_MSG_MOD(extack, "MATCHALL offload called with no action");
188                 return -EINVAL;
189         }
190
191         if (!flow_offload_has_one_action(actions)) {
192                 NL_SET_ERR_MSG_MOD(extack,
193                                    "Egress MATCHALL offload supports only 1 policing action");
194                 return -EINVAL;
195         }
196         return 0;
197 }
198
199 static int otx2_policer_validate(const struct flow_action *action,
200                                  const struct flow_action_entry *act,
201                                  struct netlink_ext_ack *extack)
202 {
203         if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
204                 NL_SET_ERR_MSG_MOD(extack,
205                                    "Offload not supported when exceed action is not drop");
206                 return -EOPNOTSUPP;
207         }
208
209         if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
210             act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
211                 NL_SET_ERR_MSG_MOD(extack,
212                                    "Offload not supported when conform action is not pipe or ok");
213                 return -EOPNOTSUPP;
214         }
215
216         if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
217             !flow_action_is_last_entry(action, act)) {
218                 NL_SET_ERR_MSG_MOD(extack,
219                                    "Offload not supported when conform action is ok, but action is not last");
220                 return -EOPNOTSUPP;
221         }
222
223         if (act->police.peakrate_bytes_ps ||
224             act->police.avrate || act->police.overhead) {
225                 NL_SET_ERR_MSG_MOD(extack,
226                                    "Offload not supported when peakrate/avrate/overhead is configured");
227                 return -EOPNOTSUPP;
228         }
229
230         return 0;
231 }
232
233 static int otx2_tc_egress_matchall_install(struct otx2_nic *nic,
234                                            struct tc_cls_matchall_offload *cls)
235 {
236         struct netlink_ext_ack *extack = cls->common.extack;
237         struct flow_action *actions = &cls->rule->action;
238         struct flow_action_entry *entry;
239         int err;
240
241         err = otx2_tc_validate_flow(nic, actions, extack);
242         if (err)
243                 return err;
244
245         if (nic->flags & OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED) {
246                 NL_SET_ERR_MSG_MOD(extack,
247                                    "Only one Egress MATCHALL ratelimiter can be offloaded");
248                 return -ENOMEM;
249         }
250
251         entry = &cls->rule->action.entries[0];
252         switch (entry->id) {
253         case FLOW_ACTION_POLICE:
254                 err = otx2_policer_validate(&cls->rule->action, entry, extack);
255                 if (err)
256                         return err;
257
258                 if (entry->police.rate_pkt_ps) {
259                         NL_SET_ERR_MSG_MOD(extack, "QoS offload not support packets per second");
260                         return -EOPNOTSUPP;
261                 }
262                 err = otx2_set_matchall_egress_rate(nic, entry->police.burst,
263                                                     otx2_convert_rate(entry->police.rate_bytes_ps));
264                 if (err)
265                         return err;
266                 nic->flags |= OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
267                 break;
268         default:
269                 NL_SET_ERR_MSG_MOD(extack,
270                                    "Only police action is supported with Egress MATCHALL offload");
271                 return -EOPNOTSUPP;
272         }
273
274         return 0;
275 }
276
277 static int otx2_tc_egress_matchall_delete(struct otx2_nic *nic,
278                                           struct tc_cls_matchall_offload *cls)
279 {
280         struct netlink_ext_ack *extack = cls->common.extack;
281         int err;
282
283         if (nic->flags & OTX2_FLAG_INTF_DOWN) {
284                 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
285                 return -EINVAL;
286         }
287
288         err = otx2_set_matchall_egress_rate(nic, 0, 0);
289         nic->flags &= ~OTX2_FLAG_TC_MATCHALL_EGRESS_ENABLED;
290         return err;
291 }
292
293 static int otx2_tc_act_set_hw_police(struct otx2_nic *nic,
294                                      struct otx2_tc_flow *node)
295 {
296         int rc;
297
298         mutex_lock(&nic->mbox.lock);
299
300         rc = cn10k_alloc_leaf_profile(nic, &node->leaf_profile);
301         if (rc) {
302                 mutex_unlock(&nic->mbox.lock);
303                 return rc;
304         }
305
306         rc = cn10k_set_ipolicer_rate(nic, node->leaf_profile,
307                                      node->burst, node->rate, node->is_pps);
308         if (rc)
309                 goto free_leaf;
310
311         rc = cn10k_map_unmap_rq_policer(nic, node->rq, node->leaf_profile, true);
312         if (rc)
313                 goto free_leaf;
314
315         mutex_unlock(&nic->mbox.lock);
316
317         return 0;
318
319 free_leaf:
320         if (cn10k_free_leaf_profile(nic, node->leaf_profile))
321                 netdev_err(nic->netdev,
322                            "Unable to free leaf bandwidth profile(%d)\n",
323                            node->leaf_profile);
324         mutex_unlock(&nic->mbox.lock);
325         return rc;
326 }
327
328 static int otx2_tc_act_set_police(struct otx2_nic *nic,
329                                   struct otx2_tc_flow *node,
330                                   struct flow_cls_offload *f,
331                                   u64 rate, u32 burst, u32 mark,
332                                   struct npc_install_flow_req *req, bool pps)
333 {
334         struct netlink_ext_ack *extack = f->common.extack;
335         struct otx2_hw *hw = &nic->hw;
336         int rq_idx, rc;
337
338         rq_idx = find_first_zero_bit(&nic->rq_bmap, hw->rx_queues);
339         if (rq_idx >= hw->rx_queues) {
340                 NL_SET_ERR_MSG_MOD(extack, "Police action rules exceeded");
341                 return -EINVAL;
342         }
343
344         req->match_id = mark & 0xFFFFULL;
345         req->index = rq_idx;
346         req->op = NIX_RX_ACTIONOP_UCAST;
347
348         node->is_act_police = true;
349         node->rq = rq_idx;
350         node->burst = burst;
351         node->rate = rate;
352         node->is_pps = pps;
353
354         rc = otx2_tc_act_set_hw_police(nic, node);
355         if (!rc)
356                 set_bit(rq_idx, &nic->rq_bmap);
357
358         return rc;
359 }
360
361 static int otx2_tc_update_mcast(struct otx2_nic *nic,
362                                 struct npc_install_flow_req *req,
363                                 struct netlink_ext_ack *extack,
364                                 struct otx2_tc_flow *node,
365                                 struct nix_mcast_grp_update_req *ureq,
366                                 u8 num_intf)
367 {
368         struct nix_mcast_grp_update_req *grp_update_req;
369         struct nix_mcast_grp_create_req *creq;
370         struct nix_mcast_grp_create_rsp *crsp;
371         u32 grp_index;
372         int rc;
373
374         mutex_lock(&nic->mbox.lock);
375         creq = otx2_mbox_alloc_msg_nix_mcast_grp_create(&nic->mbox);
376         if (!creq) {
377                 rc = -ENOMEM;
378                 goto error;
379         }
380
381         creq->dir = NIX_MCAST_INGRESS;
382         /* Send message to AF */
383         rc = otx2_sync_mbox_msg(&nic->mbox);
384         if (rc) {
385                 NL_SET_ERR_MSG_MOD(extack, "Failed to create multicast group");
386                 goto error;
387         }
388
389         crsp = (struct nix_mcast_grp_create_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
390                         0,
391                         &creq->hdr);
392         if (IS_ERR(crsp)) {
393                 rc = PTR_ERR(crsp);
394                 goto error;
395         }
396
397         grp_index = crsp->mcast_grp_idx;
398         grp_update_req = otx2_mbox_alloc_msg_nix_mcast_grp_update(&nic->mbox);
399         if (!grp_update_req) {
400                 NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
401                 rc = -ENOMEM;
402                 goto error;
403         }
404
405         ureq->op = NIX_MCAST_OP_ADD_ENTRY;
406         ureq->mcast_grp_idx = grp_index;
407         ureq->num_mce_entry = num_intf;
408         ureq->pcifunc[0] = nic->pcifunc;
409         ureq->channel[0] = nic->hw.tx_chan_base;
410
411         ureq->dest_type[0] = NIX_RX_RSS;
412         ureq->rq_rss_index[0] = 0;
413         memcpy(&ureq->hdr, &grp_update_req->hdr, sizeof(struct mbox_msghdr));
414         memcpy(grp_update_req, ureq, sizeof(struct nix_mcast_grp_update_req));
415
416         /* Send message to AF */
417         rc = otx2_sync_mbox_msg(&nic->mbox);
418         if (rc) {
419                 NL_SET_ERR_MSG_MOD(extack, "Failed to update multicast group");
420                 goto error;
421         }
422
423         mutex_unlock(&nic->mbox.lock);
424         req->op = NIX_RX_ACTIONOP_MCAST;
425         req->index = grp_index;
426         node->mcast_grp_idx = grp_index;
427         return 0;
428
429 error:
430         mutex_unlock(&nic->mbox.lock);
431         return rc;
432 }
433
434 static int otx2_tc_parse_actions(struct otx2_nic *nic,
435                                  struct flow_action *flow_action,
436                                  struct npc_install_flow_req *req,
437                                  struct flow_cls_offload *f,
438                                  struct otx2_tc_flow *node)
439 {
440         struct nix_mcast_grp_update_req dummy_grp_update_req = { 0 };
441         struct netlink_ext_ack *extack = f->common.extack;
442         bool pps = false, mcast = false;
443         struct flow_action_entry *act;
444         struct net_device *target;
445         struct otx2_nic *priv;
446         u32 burst, mark = 0;
447         u8 nr_police = 0;
448         u8 num_intf = 1;
449         int err, i;
450         u64 rate;
451
452         if (!flow_action_has_entries(flow_action)) {
453                 NL_SET_ERR_MSG_MOD(extack, "no tc actions specified");
454                 return -EINVAL;
455         }
456
457         flow_action_for_each(i, act, flow_action) {
458                 switch (act->id) {
459                 case FLOW_ACTION_DROP:
460                         req->op = NIX_RX_ACTIONOP_DROP;
461                         return 0;
462                 case FLOW_ACTION_ACCEPT:
463                         req->op = NIX_RX_ACTION_DEFAULT;
464                         return 0;
465                 case FLOW_ACTION_REDIRECT_INGRESS:
466                         target = act->dev;
467                         priv = netdev_priv(target);
468                         /* npc_install_flow_req doesn't support passing a target pcifunc */
469                         if (rvu_get_pf(nic->pcifunc) != rvu_get_pf(priv->pcifunc)) {
470                                 NL_SET_ERR_MSG_MOD(extack,
471                                                    "can't redirect to other pf/vf");
472                                 return -EOPNOTSUPP;
473                         }
474                         req->vf = priv->pcifunc & RVU_PFVF_FUNC_MASK;
475
476                         /* if op is already set; avoid overwriting the same */
477                         if (!req->op)
478                                 req->op = NIX_RX_ACTION_DEFAULT;
479                         break;
480
481                 case FLOW_ACTION_VLAN_POP:
482                         req->vtag0_valid = true;
483                         /* use RX_VTAG_TYPE7 which is initialized to strip vlan tag */
484                         req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE7;
485                         break;
486                 case FLOW_ACTION_POLICE:
487                         /* Ingress ratelimiting is not supported on OcteonTx2 */
488                         if (is_dev_otx2(nic->pdev)) {
489                                 NL_SET_ERR_MSG_MOD(extack,
490                                         "Ingress policing not supported on this platform");
491                                 return -EOPNOTSUPP;
492                         }
493
494                         err = otx2_policer_validate(flow_action, act, extack);
495                         if (err)
496                                 return err;
497
498                         if (act->police.rate_bytes_ps > 0) {
499                                 rate = act->police.rate_bytes_ps * 8;
500                                 burst = act->police.burst;
501                         } else if (act->police.rate_pkt_ps > 0) {
502                                 /* The algorithm used to calculate rate
503                                  * mantissa, exponent values for a given token
504                                  * rate (token can be byte or packet) requires
505                                  * token rate to be mutiplied by 8.
506                                  */
507                                 rate = act->police.rate_pkt_ps * 8;
508                                 burst = act->police.burst_pkt;
509                                 pps = true;
510                         }
511                         nr_police++;
512                         break;
513                 case FLOW_ACTION_MARK:
514                         mark = act->mark;
515                         break;
516
517                 case FLOW_ACTION_RX_QUEUE_MAPPING:
518                         req->op = NIX_RX_ACTIONOP_UCAST;
519                         req->index = act->rx_queue;
520                         break;
521
522                 case FLOW_ACTION_MIRRED_INGRESS:
523                         target = act->dev;
524                         priv = netdev_priv(target);
525                         dummy_grp_update_req.pcifunc[num_intf] = priv->pcifunc;
526                         dummy_grp_update_req.channel[num_intf] = priv->hw.tx_chan_base;
527                         dummy_grp_update_req.dest_type[num_intf] = NIX_RX_RSS;
528                         dummy_grp_update_req.rq_rss_index[num_intf] = 0;
529                         mcast = true;
530                         num_intf++;
531                         break;
532
533                 default:
534                         return -EOPNOTSUPP;
535                 }
536         }
537
538         if (mcast) {
539                 err = otx2_tc_update_mcast(nic, req, extack, node,
540                                            &dummy_grp_update_req,
541                                            num_intf);
542                 if (err)
543                         return err;
544         }
545
546         if (nr_police > 1) {
547                 NL_SET_ERR_MSG_MOD(extack,
548                                    "rate limit police offload requires a single action");
549                 return -EOPNOTSUPP;
550         }
551
552         if (nr_police)
553                 return otx2_tc_act_set_police(nic, node, f, rate, burst,
554                                               mark, req, pps);
555
556         return 0;
557 }
558
559 static int otx2_tc_process_vlan(struct otx2_nic *nic, struct flow_msg *flow_spec,
560                                 struct flow_msg *flow_mask, struct flow_rule *rule,
561                                 struct npc_install_flow_req *req, bool is_inner)
562 {
563         struct flow_match_vlan match;
564         u16 vlan_tci, vlan_tci_mask;
565
566         if (is_inner)
567                 flow_rule_match_cvlan(rule, &match);
568         else
569                 flow_rule_match_vlan(rule, &match);
570
571         if (!eth_type_vlan(match.key->vlan_tpid)) {
572                 netdev_err(nic->netdev, "vlan tpid 0x%x not supported\n",
573                            ntohs(match.key->vlan_tpid));
574                 return -EOPNOTSUPP;
575         }
576
577         if (!match.mask->vlan_id) {
578                 struct flow_action_entry *act;
579                 int i;
580
581                 flow_action_for_each(i, act, &rule->action) {
582                         if (act->id == FLOW_ACTION_DROP) {
583                                 netdev_err(nic->netdev,
584                                            "vlan tpid 0x%x with vlan_id %d is not supported for DROP rule.\n",
585                                            ntohs(match.key->vlan_tpid), match.key->vlan_id);
586                                 return -EOPNOTSUPP;
587                         }
588                 }
589         }
590
591         if (match.mask->vlan_id ||
592             match.mask->vlan_dei ||
593             match.mask->vlan_priority) {
594                 vlan_tci = match.key->vlan_id |
595                            match.key->vlan_dei << 12 |
596                            match.key->vlan_priority << 13;
597
598                 vlan_tci_mask = match.mask->vlan_id |
599                                 match.mask->vlan_dei << 12 |
600                                 match.mask->vlan_priority << 13;
601                 if (is_inner) {
602                         flow_spec->vlan_itci = htons(vlan_tci);
603                         flow_mask->vlan_itci = htons(vlan_tci_mask);
604                         req->features |= BIT_ULL(NPC_INNER_VID);
605                 } else {
606                         flow_spec->vlan_tci = htons(vlan_tci);
607                         flow_mask->vlan_tci = htons(vlan_tci_mask);
608                         req->features |= BIT_ULL(NPC_OUTER_VID);
609                 }
610         }
611
612         return 0;
613 }
614
615 static int otx2_tc_prepare_flow(struct otx2_nic *nic, struct otx2_tc_flow *node,
616                                 struct flow_cls_offload *f,
617                                 struct npc_install_flow_req *req)
618 {
619         struct netlink_ext_ack *extack = f->common.extack;
620         struct flow_msg *flow_spec = &req->packet;
621         struct flow_msg *flow_mask = &req->mask;
622         struct flow_dissector *dissector;
623         struct flow_rule *rule;
624         u8 ip_proto = 0;
625
626         rule = flow_cls_offload_flow_rule(f);
627         dissector = rule->match.dissector;
628
629         if ((dissector->used_keys &
630             ~(BIT_ULL(FLOW_DISSECTOR_KEY_CONTROL) |
631               BIT_ULL(FLOW_DISSECTOR_KEY_BASIC) |
632               BIT_ULL(FLOW_DISSECTOR_KEY_ETH_ADDRS) |
633               BIT_ULL(FLOW_DISSECTOR_KEY_VLAN) |
634               BIT(FLOW_DISSECTOR_KEY_CVLAN) |
635               BIT_ULL(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
636               BIT_ULL(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
637               BIT_ULL(FLOW_DISSECTOR_KEY_PORTS) |
638               BIT(FLOW_DISSECTOR_KEY_IPSEC) |
639               BIT_ULL(FLOW_DISSECTOR_KEY_MPLS) |
640               BIT_ULL(FLOW_DISSECTOR_KEY_ICMP) |
641               BIT_ULL(FLOW_DISSECTOR_KEY_IP))))  {
642                 netdev_info(nic->netdev, "unsupported flow used key 0x%llx",
643                             dissector->used_keys);
644                 return -EOPNOTSUPP;
645         }
646
647         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_BASIC)) {
648                 struct flow_match_basic match;
649
650                 flow_rule_match_basic(rule, &match);
651
652                 /* All EtherTypes can be matched, no hw limitation */
653                 flow_spec->etype = match.key->n_proto;
654                 flow_mask->etype = match.mask->n_proto;
655                 req->features |= BIT_ULL(NPC_ETYPE);
656
657                 if (match.mask->ip_proto &&
658                     (match.key->ip_proto != IPPROTO_TCP &&
659                      match.key->ip_proto != IPPROTO_UDP &&
660                      match.key->ip_proto != IPPROTO_SCTP &&
661                      match.key->ip_proto != IPPROTO_ICMP &&
662                      match.key->ip_proto != IPPROTO_ESP &&
663                      match.key->ip_proto != IPPROTO_AH &&
664                      match.key->ip_proto != IPPROTO_ICMPV6)) {
665                         netdev_info(nic->netdev,
666                                     "ip_proto=0x%x not supported\n",
667                                     match.key->ip_proto);
668                         return -EOPNOTSUPP;
669                 }
670                 if (match.mask->ip_proto)
671                         ip_proto = match.key->ip_proto;
672
673                 if (ip_proto == IPPROTO_UDP)
674                         req->features |= BIT_ULL(NPC_IPPROTO_UDP);
675                 else if (ip_proto == IPPROTO_TCP)
676                         req->features |= BIT_ULL(NPC_IPPROTO_TCP);
677                 else if (ip_proto == IPPROTO_SCTP)
678                         req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
679                 else if (ip_proto == IPPROTO_ICMP)
680                         req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
681                 else if (ip_proto == IPPROTO_ICMPV6)
682                         req->features |= BIT_ULL(NPC_IPPROTO_ICMP6);
683                 else if (ip_proto == IPPROTO_ESP)
684                         req->features |= BIT_ULL(NPC_IPPROTO_ESP);
685                 else if (ip_proto == IPPROTO_AH)
686                         req->features |= BIT_ULL(NPC_IPPROTO_AH);
687         }
688
689         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CONTROL)) {
690                 struct flow_match_control match;
691                 u32 val;
692
693                 flow_rule_match_control(rule, &match);
694                 if (match.mask->flags & FLOW_DIS_FIRST_FRAG) {
695                         NL_SET_ERR_MSG_MOD(extack, "HW doesn't support frag first/later");
696                         return -EOPNOTSUPP;
697                 }
698
699                 if (match.mask->flags & FLOW_DIS_IS_FRAGMENT) {
700                         val = match.key->flags & FLOW_DIS_IS_FRAGMENT;
701                         if (ntohs(flow_spec->etype) == ETH_P_IP) {
702                                 flow_spec->ip_flag = val ? IPV4_FLAG_MORE : 0;
703                                 flow_mask->ip_flag = IPV4_FLAG_MORE;
704                                 req->features |= BIT_ULL(NPC_IPFRAG_IPV4);
705                         } else if (ntohs(flow_spec->etype) == ETH_P_IPV6) {
706                                 flow_spec->next_header = val ?
707                                                          IPPROTO_FRAGMENT : 0;
708                                 flow_mask->next_header = 0xff;
709                                 req->features |= BIT_ULL(NPC_IPFRAG_IPV6);
710                         } else {
711                                 NL_SET_ERR_MSG_MOD(extack, "flow-type should be either IPv4 and IPv6");
712                                 return -EOPNOTSUPP;
713                         }
714                 }
715         }
716
717         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
718                 struct flow_match_eth_addrs match;
719
720                 flow_rule_match_eth_addrs(rule, &match);
721                 if (!is_zero_ether_addr(match.mask->src)) {
722                         NL_SET_ERR_MSG_MOD(extack, "src mac match not supported");
723                         return -EOPNOTSUPP;
724                 }
725
726                 if (!is_zero_ether_addr(match.mask->dst)) {
727                         ether_addr_copy(flow_spec->dmac, (u8 *)&match.key->dst);
728                         ether_addr_copy(flow_mask->dmac,
729                                         (u8 *)&match.mask->dst);
730                         req->features |= BIT_ULL(NPC_DMAC);
731                 }
732         }
733
734         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPSEC)) {
735                 struct flow_match_ipsec match;
736
737                 flow_rule_match_ipsec(rule, &match);
738                 if (!match.mask->spi) {
739                         NL_SET_ERR_MSG_MOD(extack, "spi index not specified");
740                         return -EOPNOTSUPP;
741                 }
742                 if (ip_proto != IPPROTO_ESP &&
743                     ip_proto != IPPROTO_AH) {
744                         NL_SET_ERR_MSG_MOD(extack,
745                                            "SPI index is valid only for ESP/AH proto");
746                         return -EOPNOTSUPP;
747                 }
748
749                 flow_spec->spi = match.key->spi;
750                 flow_mask->spi = match.mask->spi;
751                 req->features |= BIT_ULL(NPC_IPSEC_SPI);
752         }
753
754         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IP)) {
755                 struct flow_match_ip match;
756
757                 flow_rule_match_ip(rule, &match);
758                 if ((ntohs(flow_spec->etype) != ETH_P_IP) &&
759                     match.mask->tos) {
760                         NL_SET_ERR_MSG_MOD(extack, "tos not supported");
761                         return -EOPNOTSUPP;
762                 }
763                 if (match.mask->ttl) {
764                         NL_SET_ERR_MSG_MOD(extack, "ttl not supported");
765                         return -EOPNOTSUPP;
766                 }
767                 flow_spec->tos = match.key->tos;
768                 flow_mask->tos = match.mask->tos;
769                 req->features |= BIT_ULL(NPC_TOS);
770         }
771
772         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_VLAN)) {
773                 int ret;
774
775                 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, false);
776                 if (ret)
777                         return ret;
778         }
779
780         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_CVLAN)) {
781                 int ret;
782
783                 ret = otx2_tc_process_vlan(nic, flow_spec, flow_mask, rule, req, true);
784                 if (ret)
785                         return ret;
786         }
787
788         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
789                 struct flow_match_ipv4_addrs match;
790
791                 flow_rule_match_ipv4_addrs(rule, &match);
792
793                 flow_spec->ip4dst = match.key->dst;
794                 flow_mask->ip4dst = match.mask->dst;
795                 req->features |= BIT_ULL(NPC_DIP_IPV4);
796
797                 flow_spec->ip4src = match.key->src;
798                 flow_mask->ip4src = match.mask->src;
799                 req->features |= BIT_ULL(NPC_SIP_IPV4);
800         } else if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
801                 struct flow_match_ipv6_addrs match;
802
803                 flow_rule_match_ipv6_addrs(rule, &match);
804
805                 if (ipv6_addr_loopback(&match.key->dst) ||
806                     ipv6_addr_loopback(&match.key->src)) {
807                         NL_SET_ERR_MSG_MOD(extack,
808                                            "Flow matching IPv6 loopback addr not supported");
809                         return -EOPNOTSUPP;
810                 }
811
812                 if (!ipv6_addr_any(&match.mask->dst)) {
813                         memcpy(&flow_spec->ip6dst,
814                                (struct in6_addr *)&match.key->dst,
815                                sizeof(flow_spec->ip6dst));
816                         memcpy(&flow_mask->ip6dst,
817                                (struct in6_addr *)&match.mask->dst,
818                                sizeof(flow_spec->ip6dst));
819                         req->features |= BIT_ULL(NPC_DIP_IPV6);
820                 }
821
822                 if (!ipv6_addr_any(&match.mask->src)) {
823                         memcpy(&flow_spec->ip6src,
824                                (struct in6_addr *)&match.key->src,
825                                sizeof(flow_spec->ip6src));
826                         memcpy(&flow_mask->ip6src,
827                                (struct in6_addr *)&match.mask->src,
828                                sizeof(flow_spec->ip6src));
829                         req->features |= BIT_ULL(NPC_SIP_IPV6);
830                 }
831         }
832
833         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_PORTS)) {
834                 struct flow_match_ports match;
835
836                 flow_rule_match_ports(rule, &match);
837
838                 flow_spec->dport = match.key->dst;
839                 flow_mask->dport = match.mask->dst;
840
841                 if (flow_mask->dport) {
842                         if (ip_proto == IPPROTO_UDP)
843                                 req->features |= BIT_ULL(NPC_DPORT_UDP);
844                         else if (ip_proto == IPPROTO_TCP)
845                                 req->features |= BIT_ULL(NPC_DPORT_TCP);
846                         else if (ip_proto == IPPROTO_SCTP)
847                                 req->features |= BIT_ULL(NPC_DPORT_SCTP);
848                 }
849
850                 flow_spec->sport = match.key->src;
851                 flow_mask->sport = match.mask->src;
852
853                 if (flow_mask->sport) {
854                         if (ip_proto == IPPROTO_UDP)
855                                 req->features |= BIT_ULL(NPC_SPORT_UDP);
856                         else if (ip_proto == IPPROTO_TCP)
857                                 req->features |= BIT_ULL(NPC_SPORT_TCP);
858                         else if (ip_proto == IPPROTO_SCTP)
859                                 req->features |= BIT_ULL(NPC_SPORT_SCTP);
860                 }
861         }
862
863         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_MPLS)) {
864                 struct flow_match_mpls match;
865                 u8 bit;
866
867                 flow_rule_match_mpls(rule, &match);
868
869                 if (match.mask->used_lses & OTX2_UNSUPP_LSE_DEPTH) {
870                         NL_SET_ERR_MSG_MOD(extack,
871                                            "unsupported LSE depth for MPLS match offload");
872                         return -EOPNOTSUPP;
873                 }
874
875                 for_each_set_bit(bit, (unsigned long *)&match.mask->used_lses,
876                                  FLOW_DIS_MPLS_MAX)  {
877                         /* check if any of the fields LABEL,TC,BOS are set */
878                         if (*((u32 *)&match.mask->ls[bit]) &
879                             OTX2_FLOWER_MASK_MPLS_NON_TTL) {
880                                 /* Hardware will capture 4 byte MPLS header into
881                                  * two fields NPC_MPLSX_LBTCBOS and NPC_MPLSX_TTL.
882                                  * Derive the associated NPC key based on header
883                                  * index and offset.
884                                  */
885
886                                 req->features |= BIT_ULL(NPC_MPLS1_LBTCBOS +
887                                                          2 * bit);
888                                 flow_spec->mpls_lse[bit] =
889                                         FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB,
890                                                    match.key->ls[bit].mpls_label) |
891                                         FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC,
892                                                    match.key->ls[bit].mpls_tc) |
893                                         FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS,
894                                                    match.key->ls[bit].mpls_bos);
895
896                                 flow_mask->mpls_lse[bit] =
897                                         FIELD_PREP(OTX2_FLOWER_MASK_MPLS_LB,
898                                                    match.mask->ls[bit].mpls_label) |
899                                         FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TC,
900                                                    match.mask->ls[bit].mpls_tc) |
901                                         FIELD_PREP(OTX2_FLOWER_MASK_MPLS_BOS,
902                                                    match.mask->ls[bit].mpls_bos);
903                         }
904
905                         if (match.mask->ls[bit].mpls_ttl) {
906                                 req->features |= BIT_ULL(NPC_MPLS1_TTL +
907                                                          2 * bit);
908                                 flow_spec->mpls_lse[bit] |=
909                                         FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL,
910                                                    match.key->ls[bit].mpls_ttl);
911                                 flow_mask->mpls_lse[bit] |=
912                                         FIELD_PREP(OTX2_FLOWER_MASK_MPLS_TTL,
913                                                    match.mask->ls[bit].mpls_ttl);
914                         }
915                 }
916         }
917
918         if (flow_rule_match_key(rule, FLOW_DISSECTOR_KEY_ICMP)) {
919                 struct flow_match_icmp match;
920
921                 flow_rule_match_icmp(rule, &match);
922
923                 flow_spec->icmp_type = match.key->type;
924                 flow_mask->icmp_type = match.mask->type;
925                 req->features |= BIT_ULL(NPC_TYPE_ICMP);
926
927                 flow_spec->icmp_code = match.key->code;
928                 flow_mask->icmp_code = match.mask->code;
929                 req->features |= BIT_ULL(NPC_CODE_ICMP);
930         }
931         return otx2_tc_parse_actions(nic, &rule->action, req, f, node);
932 }
933
934 static void otx2_destroy_tc_flow_list(struct otx2_nic *pfvf)
935 {
936         struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
937         struct otx2_tc_flow *iter, *tmp;
938
939         if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
940                 return;
941
942         list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list_tc, list) {
943                 list_del(&iter->list);
944                 kfree(iter);
945                 flow_cfg->nr_flows--;
946         }
947 }
948
949 static struct otx2_tc_flow *otx2_tc_get_entry_by_cookie(struct otx2_flow_config *flow_cfg,
950                                                         unsigned long cookie)
951 {
952         struct otx2_tc_flow *tmp;
953
954         list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
955                 if (tmp->cookie == cookie)
956                         return tmp;
957         }
958
959         return NULL;
960 }
961
962 static struct otx2_tc_flow *otx2_tc_get_entry_by_index(struct otx2_flow_config *flow_cfg,
963                                                        int index)
964 {
965         struct otx2_tc_flow *tmp;
966         int i = 0;
967
968         list_for_each_entry(tmp, &flow_cfg->flow_list_tc, list) {
969                 if (i == index)
970                         return tmp;
971                 i++;
972         }
973
974         return NULL;
975 }
976
977 static void otx2_tc_del_from_flow_list(struct otx2_flow_config *flow_cfg,
978                                        struct otx2_tc_flow *node)
979 {
980         struct list_head *pos, *n;
981         struct otx2_tc_flow *tmp;
982
983         list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
984                 tmp = list_entry(pos, struct otx2_tc_flow, list);
985                 if (node == tmp) {
986                         list_del(&node->list);
987                         return;
988                 }
989         }
990 }
991
992 static int otx2_tc_add_to_flow_list(struct otx2_flow_config *flow_cfg,
993                                     struct otx2_tc_flow *node)
994 {
995         struct list_head *pos, *n;
996         struct otx2_tc_flow *tmp;
997         int index = 0;
998
999         /* If the flow list is empty then add the new node */
1000         if (list_empty(&flow_cfg->flow_list_tc)) {
1001                 list_add(&node->list, &flow_cfg->flow_list_tc);
1002                 return index;
1003         }
1004
1005         list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
1006                 tmp = list_entry(pos, struct otx2_tc_flow, list);
1007                 if (node->prio < tmp->prio)
1008                         break;
1009                 index++;
1010         }
1011
1012         list_add(&node->list, pos->prev);
1013         return index;
1014 }
1015
1016 static int otx2_add_mcam_flow_entry(struct otx2_nic *nic, struct npc_install_flow_req *req)
1017 {
1018         struct npc_install_flow_req *tmp_req;
1019         int err;
1020
1021         mutex_lock(&nic->mbox.lock);
1022         tmp_req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
1023         if (!tmp_req) {
1024                 mutex_unlock(&nic->mbox.lock);
1025                 return -ENOMEM;
1026         }
1027
1028         memcpy(tmp_req, req, sizeof(struct npc_install_flow_req));
1029         /* Send message to AF */
1030         err = otx2_sync_mbox_msg(&nic->mbox);
1031         if (err) {
1032                 netdev_err(nic->netdev, "Failed to install MCAM flow entry %d\n",
1033                            req->entry);
1034                 mutex_unlock(&nic->mbox.lock);
1035                 return -EFAULT;
1036         }
1037
1038         mutex_unlock(&nic->mbox.lock);
1039         return 0;
1040 }
1041
1042 static int otx2_del_mcam_flow_entry(struct otx2_nic *nic, u16 entry, u16 *cntr_val)
1043 {
1044         struct npc_delete_flow_rsp *rsp;
1045         struct npc_delete_flow_req *req;
1046         int err;
1047
1048         mutex_lock(&nic->mbox.lock);
1049         req = otx2_mbox_alloc_msg_npc_delete_flow(&nic->mbox);
1050         if (!req) {
1051                 mutex_unlock(&nic->mbox.lock);
1052                 return -ENOMEM;
1053         }
1054
1055         req->entry = entry;
1056
1057         /* Send message to AF */
1058         err = otx2_sync_mbox_msg(&nic->mbox);
1059         if (err) {
1060                 netdev_err(nic->netdev, "Failed to delete MCAM flow entry %d\n",
1061                            entry);
1062                 mutex_unlock(&nic->mbox.lock);
1063                 return -EFAULT;
1064         }
1065
1066         if (cntr_val) {
1067                 rsp = (struct npc_delete_flow_rsp *)otx2_mbox_get_rsp(&nic->mbox.mbox,
1068                                                                       0, &req->hdr);
1069                 if (IS_ERR(rsp)) {
1070                         netdev_err(nic->netdev, "Failed to get MCAM delete response for entry %d\n",
1071                                    entry);
1072                         mutex_unlock(&nic->mbox.lock);
1073                         return -EFAULT;
1074                 }
1075
1076                 *cntr_val = rsp->cntr_val;
1077         }
1078
1079         mutex_unlock(&nic->mbox.lock);
1080         return 0;
1081 }
1082
1083 static int otx2_tc_update_mcam_table_del_req(struct otx2_nic *nic,
1084                                              struct otx2_flow_config *flow_cfg,
1085                                              struct otx2_tc_flow *node)
1086 {
1087         struct list_head *pos, *n;
1088         struct otx2_tc_flow *tmp;
1089         int i = 0, index = 0;
1090         u16 cntr_val = 0;
1091
1092         /* Find and delete the entry from the list and re-install
1093          * all the entries from beginning to the index of the
1094          * deleted entry to higher mcam indexes.
1095          */
1096         list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
1097                 tmp = list_entry(pos, struct otx2_tc_flow, list);
1098                 if (node == tmp) {
1099                         list_del(&tmp->list);
1100                         break;
1101                 }
1102
1103                 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
1104                 tmp->entry++;
1105                 tmp->req.entry = tmp->entry;
1106                 tmp->req.cntr_val = cntr_val;
1107                 index++;
1108         }
1109
1110         list_for_each_safe(pos, n, &flow_cfg->flow_list_tc) {
1111                 if (i == index)
1112                         break;
1113
1114                 tmp = list_entry(pos, struct otx2_tc_flow, list);
1115                 otx2_add_mcam_flow_entry(nic, &tmp->req);
1116                 i++;
1117         }
1118
1119         return 0;
1120 }
1121
1122 static int otx2_tc_update_mcam_table_add_req(struct otx2_nic *nic,
1123                                              struct otx2_flow_config *flow_cfg,
1124                                              struct otx2_tc_flow *node)
1125 {
1126         int mcam_idx = flow_cfg->max_flows - flow_cfg->nr_flows - 1;
1127         struct otx2_tc_flow *tmp;
1128         int list_idx, i;
1129         u16 cntr_val = 0;
1130
1131         /* Find the index of the entry(list_idx) whose priority
1132          * is greater than the new entry and re-install all
1133          * the entries from beginning to list_idx to higher
1134          * mcam indexes.
1135          */
1136         list_idx = otx2_tc_add_to_flow_list(flow_cfg, node);
1137         for (i = 0; i < list_idx; i++) {
1138                 tmp = otx2_tc_get_entry_by_index(flow_cfg, i);
1139                 if (!tmp)
1140                         return -ENOMEM;
1141
1142                 otx2_del_mcam_flow_entry(nic, tmp->entry, &cntr_val);
1143                 tmp->entry = flow_cfg->flow_ent[mcam_idx];
1144                 tmp->req.entry = tmp->entry;
1145                 tmp->req.cntr_val = cntr_val;
1146                 otx2_add_mcam_flow_entry(nic, &tmp->req);
1147                 mcam_idx++;
1148         }
1149
1150         return mcam_idx;
1151 }
1152
1153 static int otx2_tc_update_mcam_table(struct otx2_nic *nic,
1154                                      struct otx2_flow_config *flow_cfg,
1155                                      struct otx2_tc_flow *node,
1156                                      bool add_req)
1157 {
1158         if (add_req)
1159                 return otx2_tc_update_mcam_table_add_req(nic, flow_cfg, node);
1160
1161         return otx2_tc_update_mcam_table_del_req(nic, flow_cfg, node);
1162 }
1163
1164 static int otx2_tc_del_flow(struct otx2_nic *nic,
1165                             struct flow_cls_offload *tc_flow_cmd)
1166 {
1167         struct otx2_flow_config *flow_cfg = nic->flow_cfg;
1168         struct nix_mcast_grp_destroy_req *grp_destroy_req;
1169         struct otx2_tc_flow *flow_node;
1170         int err;
1171
1172         flow_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
1173         if (!flow_node) {
1174                 netdev_err(nic->netdev, "tc flow not found for cookie 0x%lx\n",
1175                            tc_flow_cmd->cookie);
1176                 return -EINVAL;
1177         }
1178
1179         if (flow_node->is_act_police) {
1180                 __clear_bit(flow_node->rq, &nic->rq_bmap);
1181
1182                 if (nic->flags & OTX2_FLAG_INTF_DOWN)
1183                         goto free_mcam_flow;
1184
1185                 mutex_lock(&nic->mbox.lock);
1186
1187                 err = cn10k_map_unmap_rq_policer(nic, flow_node->rq,
1188                                                  flow_node->leaf_profile, false);
1189                 if (err)
1190                         netdev_err(nic->netdev,
1191                                    "Unmapping RQ %d & profile %d failed\n",
1192                                    flow_node->rq, flow_node->leaf_profile);
1193
1194                 err = cn10k_free_leaf_profile(nic, flow_node->leaf_profile);
1195                 if (err)
1196                         netdev_err(nic->netdev,
1197                                    "Unable to free leaf bandwidth profile(%d)\n",
1198                                    flow_node->leaf_profile);
1199
1200                 mutex_unlock(&nic->mbox.lock);
1201         }
1202         /* Remove the multicast/mirror related nodes */
1203         if (flow_node->mcast_grp_idx != MCAST_INVALID_GRP) {
1204                 mutex_lock(&nic->mbox.lock);
1205                 grp_destroy_req = otx2_mbox_alloc_msg_nix_mcast_grp_destroy(&nic->mbox);
1206                 grp_destroy_req->mcast_grp_idx = flow_node->mcast_grp_idx;
1207                 otx2_sync_mbox_msg(&nic->mbox);
1208                 mutex_unlock(&nic->mbox.lock);
1209         }
1210
1211
1212 free_mcam_flow:
1213         otx2_del_mcam_flow_entry(nic, flow_node->entry, NULL);
1214         otx2_tc_update_mcam_table(nic, flow_cfg, flow_node, false);
1215         kfree_rcu(flow_node, rcu);
1216         flow_cfg->nr_flows--;
1217         return 0;
1218 }
1219
1220 static int otx2_tc_add_flow(struct otx2_nic *nic,
1221                             struct flow_cls_offload *tc_flow_cmd)
1222 {
1223         struct netlink_ext_ack *extack = tc_flow_cmd->common.extack;
1224         struct otx2_flow_config *flow_cfg = nic->flow_cfg;
1225         struct otx2_tc_flow *new_node, *old_node;
1226         struct npc_install_flow_req *req, dummy;
1227         int rc, err, mcam_idx;
1228
1229         if (!(nic->flags & OTX2_FLAG_TC_FLOWER_SUPPORT))
1230                 return -ENOMEM;
1231
1232         if (nic->flags & OTX2_FLAG_INTF_DOWN) {
1233                 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
1234                 return -EINVAL;
1235         }
1236
1237         if (flow_cfg->nr_flows == flow_cfg->max_flows) {
1238                 NL_SET_ERR_MSG_MOD(extack,
1239                                    "Free MCAM entry not available to add the flow");
1240                 return -ENOMEM;
1241         }
1242
1243         /* allocate memory for the new flow and it's node */
1244         new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1245         if (!new_node)
1246                 return -ENOMEM;
1247         spin_lock_init(&new_node->lock);
1248         new_node->cookie = tc_flow_cmd->cookie;
1249         new_node->prio = tc_flow_cmd->common.prio;
1250         new_node->mcast_grp_idx = MCAST_INVALID_GRP;
1251
1252         memset(&dummy, 0, sizeof(struct npc_install_flow_req));
1253
1254         rc = otx2_tc_prepare_flow(nic, new_node, tc_flow_cmd, &dummy);
1255         if (rc) {
1256                 kfree_rcu(new_node, rcu);
1257                 return rc;
1258         }
1259
1260         /* If a flow exists with the same cookie, delete it */
1261         old_node = otx2_tc_get_entry_by_cookie(flow_cfg, tc_flow_cmd->cookie);
1262         if (old_node)
1263                 otx2_tc_del_flow(nic, tc_flow_cmd);
1264
1265         mcam_idx = otx2_tc_update_mcam_table(nic, flow_cfg, new_node, true);
1266         mutex_lock(&nic->mbox.lock);
1267         req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
1268         if (!req) {
1269                 mutex_unlock(&nic->mbox.lock);
1270                 rc = -ENOMEM;
1271                 goto free_leaf;
1272         }
1273
1274         memcpy(&dummy.hdr, &req->hdr, sizeof(struct mbox_msghdr));
1275         memcpy(req, &dummy, sizeof(struct npc_install_flow_req));
1276         req->channel = nic->hw.rx_chan_base;
1277         req->entry = flow_cfg->flow_ent[mcam_idx];
1278         req->intf = NIX_INTF_RX;
1279         req->set_cntr = 1;
1280         new_node->entry = req->entry;
1281
1282         /* Send message to AF */
1283         rc = otx2_sync_mbox_msg(&nic->mbox);
1284         if (rc) {
1285                 NL_SET_ERR_MSG_MOD(extack, "Failed to install MCAM flow entry");
1286                 mutex_unlock(&nic->mbox.lock);
1287                 goto free_leaf;
1288         }
1289
1290         mutex_unlock(&nic->mbox.lock);
1291         memcpy(&new_node->req, req, sizeof(struct npc_install_flow_req));
1292
1293         flow_cfg->nr_flows++;
1294         return 0;
1295
1296 free_leaf:
1297         otx2_tc_del_from_flow_list(flow_cfg, new_node);
1298         kfree_rcu(new_node, rcu);
1299         if (new_node->is_act_police) {
1300                 mutex_lock(&nic->mbox.lock);
1301
1302                 err = cn10k_map_unmap_rq_policer(nic, new_node->rq,
1303                                                  new_node->leaf_profile, false);
1304                 if (err)
1305                         netdev_err(nic->netdev,
1306                                    "Unmapping RQ %d & profile %d failed\n",
1307                                    new_node->rq, new_node->leaf_profile);
1308                 err = cn10k_free_leaf_profile(nic, new_node->leaf_profile);
1309                 if (err)
1310                         netdev_err(nic->netdev,
1311                                    "Unable to free leaf bandwidth profile(%d)\n",
1312                                    new_node->leaf_profile);
1313
1314                 __clear_bit(new_node->rq, &nic->rq_bmap);
1315
1316                 mutex_unlock(&nic->mbox.lock);
1317         }
1318
1319         return rc;
1320 }
1321
1322 static int otx2_tc_get_flow_stats(struct otx2_nic *nic,
1323                                   struct flow_cls_offload *tc_flow_cmd)
1324 {
1325         struct npc_mcam_get_stats_req *req;
1326         struct npc_mcam_get_stats_rsp *rsp;
1327         struct otx2_tc_flow_stats *stats;
1328         struct otx2_tc_flow *flow_node;
1329         int err;
1330
1331         flow_node = otx2_tc_get_entry_by_cookie(nic->flow_cfg, tc_flow_cmd->cookie);
1332         if (!flow_node) {
1333                 netdev_info(nic->netdev, "tc flow not found for cookie %lx",
1334                             tc_flow_cmd->cookie);
1335                 return -EINVAL;
1336         }
1337
1338         mutex_lock(&nic->mbox.lock);
1339
1340         req = otx2_mbox_alloc_msg_npc_mcam_entry_stats(&nic->mbox);
1341         if (!req) {
1342                 mutex_unlock(&nic->mbox.lock);
1343                 return -ENOMEM;
1344         }
1345
1346         req->entry = flow_node->entry;
1347
1348         err = otx2_sync_mbox_msg(&nic->mbox);
1349         if (err) {
1350                 netdev_err(nic->netdev, "Failed to get stats for MCAM flow entry %d\n",
1351                            req->entry);
1352                 mutex_unlock(&nic->mbox.lock);
1353                 return -EFAULT;
1354         }
1355
1356         rsp = (struct npc_mcam_get_stats_rsp *)otx2_mbox_get_rsp
1357                 (&nic->mbox.mbox, 0, &req->hdr);
1358         if (IS_ERR(rsp)) {
1359                 mutex_unlock(&nic->mbox.lock);
1360                 return PTR_ERR(rsp);
1361         }
1362
1363         mutex_unlock(&nic->mbox.lock);
1364
1365         if (!rsp->stat_ena)
1366                 return -EINVAL;
1367
1368         stats = &flow_node->stats;
1369
1370         spin_lock(&flow_node->lock);
1371         flow_stats_update(&tc_flow_cmd->stats, 0x0, rsp->stat - stats->pkts, 0x0, 0x0,
1372                           FLOW_ACTION_HW_STATS_IMMEDIATE);
1373         stats->pkts = rsp->stat;
1374         spin_unlock(&flow_node->lock);
1375
1376         return 0;
1377 }
1378
1379 static int otx2_setup_tc_cls_flower(struct otx2_nic *nic,
1380                                     struct flow_cls_offload *cls_flower)
1381 {
1382         switch (cls_flower->command) {
1383         case FLOW_CLS_REPLACE:
1384                 return otx2_tc_add_flow(nic, cls_flower);
1385         case FLOW_CLS_DESTROY:
1386                 return otx2_tc_del_flow(nic, cls_flower);
1387         case FLOW_CLS_STATS:
1388                 return otx2_tc_get_flow_stats(nic, cls_flower);
1389         default:
1390                 return -EOPNOTSUPP;
1391         }
1392 }
1393
1394 static int otx2_tc_ingress_matchall_install(struct otx2_nic *nic,
1395                                             struct tc_cls_matchall_offload *cls)
1396 {
1397         struct netlink_ext_ack *extack = cls->common.extack;
1398         struct flow_action *actions = &cls->rule->action;
1399         struct flow_action_entry *entry;
1400         u64 rate;
1401         int err;
1402
1403         err = otx2_tc_validate_flow(nic, actions, extack);
1404         if (err)
1405                 return err;
1406
1407         if (nic->flags & OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED) {
1408                 NL_SET_ERR_MSG_MOD(extack,
1409                                    "Only one ingress MATCHALL ratelimitter can be offloaded");
1410                 return -ENOMEM;
1411         }
1412
1413         entry = &cls->rule->action.entries[0];
1414         switch (entry->id) {
1415         case FLOW_ACTION_POLICE:
1416                 /* Ingress ratelimiting is not supported on OcteonTx2 */
1417                 if (is_dev_otx2(nic->pdev)) {
1418                         NL_SET_ERR_MSG_MOD(extack,
1419                                            "Ingress policing not supported on this platform");
1420                         return -EOPNOTSUPP;
1421                 }
1422
1423                 err = cn10k_alloc_matchall_ipolicer(nic);
1424                 if (err)
1425                         return err;
1426
1427                 /* Convert to bits per second */
1428                 rate = entry->police.rate_bytes_ps * 8;
1429                 err = cn10k_set_matchall_ipolicer_rate(nic, entry->police.burst, rate);
1430                 if (err)
1431                         return err;
1432                 nic->flags |= OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
1433                 break;
1434         default:
1435                 NL_SET_ERR_MSG_MOD(extack,
1436                                    "Only police action supported with Ingress MATCHALL offload");
1437                 return -EOPNOTSUPP;
1438         }
1439
1440         return 0;
1441 }
1442
1443 static int otx2_tc_ingress_matchall_delete(struct otx2_nic *nic,
1444                                            struct tc_cls_matchall_offload *cls)
1445 {
1446         struct netlink_ext_ack *extack = cls->common.extack;
1447         int err;
1448
1449         if (nic->flags & OTX2_FLAG_INTF_DOWN) {
1450                 NL_SET_ERR_MSG_MOD(extack, "Interface not initialized");
1451                 return -EINVAL;
1452         }
1453
1454         err = cn10k_free_matchall_ipolicer(nic);
1455         nic->flags &= ~OTX2_FLAG_TC_MATCHALL_INGRESS_ENABLED;
1456         return err;
1457 }
1458
1459 static int otx2_setup_tc_ingress_matchall(struct otx2_nic *nic,
1460                                           struct tc_cls_matchall_offload *cls_matchall)
1461 {
1462         switch (cls_matchall->command) {
1463         case TC_CLSMATCHALL_REPLACE:
1464                 return otx2_tc_ingress_matchall_install(nic, cls_matchall);
1465         case TC_CLSMATCHALL_DESTROY:
1466                 return otx2_tc_ingress_matchall_delete(nic, cls_matchall);
1467         case TC_CLSMATCHALL_STATS:
1468         default:
1469                 break;
1470         }
1471
1472         return -EOPNOTSUPP;
1473 }
1474
1475 static int otx2_setup_tc_block_ingress_cb(enum tc_setup_type type,
1476                                           void *type_data, void *cb_priv)
1477 {
1478         struct otx2_nic *nic = cb_priv;
1479         bool ntuple;
1480
1481         if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1482                 return -EOPNOTSUPP;
1483
1484         ntuple = nic->netdev->features & NETIF_F_NTUPLE;
1485         switch (type) {
1486         case TC_SETUP_CLSFLOWER:
1487                 if (ntuple) {
1488                         netdev_warn(nic->netdev,
1489                                     "Can't install TC flower offload rule when NTUPLE is active");
1490                         return -EOPNOTSUPP;
1491                 }
1492
1493                 return otx2_setup_tc_cls_flower(nic, type_data);
1494         case TC_SETUP_CLSMATCHALL:
1495                 return otx2_setup_tc_ingress_matchall(nic, type_data);
1496         default:
1497                 break;
1498         }
1499
1500         return -EOPNOTSUPP;
1501 }
1502
1503 static int otx2_setup_tc_egress_matchall(struct otx2_nic *nic,
1504                                          struct tc_cls_matchall_offload *cls_matchall)
1505 {
1506         switch (cls_matchall->command) {
1507         case TC_CLSMATCHALL_REPLACE:
1508                 return otx2_tc_egress_matchall_install(nic, cls_matchall);
1509         case TC_CLSMATCHALL_DESTROY:
1510                 return otx2_tc_egress_matchall_delete(nic, cls_matchall);
1511         case TC_CLSMATCHALL_STATS:
1512         default:
1513                 break;
1514         }
1515
1516         return -EOPNOTSUPP;
1517 }
1518
1519 static int otx2_setup_tc_block_egress_cb(enum tc_setup_type type,
1520                                          void *type_data, void *cb_priv)
1521 {
1522         struct otx2_nic *nic = cb_priv;
1523
1524         if (!tc_cls_can_offload_and_chain0(nic->netdev, type_data))
1525                 return -EOPNOTSUPP;
1526
1527         switch (type) {
1528         case TC_SETUP_CLSMATCHALL:
1529                 return otx2_setup_tc_egress_matchall(nic, type_data);
1530         default:
1531                 break;
1532         }
1533
1534         return -EOPNOTSUPP;
1535 }
1536
1537 static LIST_HEAD(otx2_block_cb_list);
1538
1539 static int otx2_setup_tc_block(struct net_device *netdev,
1540                                struct flow_block_offload *f)
1541 {
1542         struct otx2_nic *nic = netdev_priv(netdev);
1543         flow_setup_cb_t *cb;
1544         bool ingress;
1545
1546         if (f->block_shared)
1547                 return -EOPNOTSUPP;
1548
1549         if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS) {
1550                 cb = otx2_setup_tc_block_ingress_cb;
1551                 ingress = true;
1552         } else if (f->binder_type == FLOW_BLOCK_BINDER_TYPE_CLSACT_EGRESS) {
1553                 cb = otx2_setup_tc_block_egress_cb;
1554                 ingress = false;
1555         } else {
1556                 return -EOPNOTSUPP;
1557         }
1558
1559         return flow_block_cb_setup_simple(f, &otx2_block_cb_list, cb,
1560                                           nic, nic, ingress);
1561 }
1562
1563 int otx2_setup_tc(struct net_device *netdev, enum tc_setup_type type,
1564                   void *type_data)
1565 {
1566         switch (type) {
1567         case TC_SETUP_BLOCK:
1568                 return otx2_setup_tc_block(netdev, type_data);
1569         case TC_SETUP_QDISC_HTB:
1570                 return otx2_setup_tc_htb(netdev, type_data);
1571         default:
1572                 return -EOPNOTSUPP;
1573         }
1574 }
1575 EXPORT_SYMBOL(otx2_setup_tc);
1576
1577 int otx2_init_tc(struct otx2_nic *nic)
1578 {
1579         /* Exclude receive queue 0 being used for police action */
1580         set_bit(0, &nic->rq_bmap);
1581
1582         if (!nic->flow_cfg) {
1583                 netdev_err(nic->netdev,
1584                            "Can't init TC, nic->flow_cfg is not setup\n");
1585                 return -EINVAL;
1586         }
1587
1588         return 0;
1589 }
1590 EXPORT_SYMBOL(otx2_init_tc);
1591
1592 void otx2_shutdown_tc(struct otx2_nic *nic)
1593 {
1594         otx2_destroy_tc_flow_list(nic);
1595 }
1596 EXPORT_SYMBOL(otx2_shutdown_tc);
1597
1598 static void otx2_tc_config_ingress_rule(struct otx2_nic *nic,
1599                                         struct otx2_tc_flow *node)
1600 {
1601         struct npc_install_flow_req *req;
1602
1603         if (otx2_tc_act_set_hw_police(nic, node))
1604                 return;
1605
1606         mutex_lock(&nic->mbox.lock);
1607
1608         req = otx2_mbox_alloc_msg_npc_install_flow(&nic->mbox);
1609         if (!req)
1610                 goto err;
1611
1612         memcpy(req, &node->req, sizeof(struct npc_install_flow_req));
1613
1614         if (otx2_sync_mbox_msg(&nic->mbox))
1615                 netdev_err(nic->netdev,
1616                            "Failed to install MCAM flow entry for ingress rule");
1617 err:
1618         mutex_unlock(&nic->mbox.lock);
1619 }
1620
1621 void otx2_tc_apply_ingress_police_rules(struct otx2_nic *nic)
1622 {
1623         struct otx2_flow_config *flow_cfg = nic->flow_cfg;
1624         struct otx2_tc_flow *node;
1625
1626         /* If any ingress policer rules exist for the interface then
1627          * apply those rules. Ingress policer rules depend on bandwidth
1628          * profiles linked to the receive queues. Since no receive queues
1629          * exist when interface is down, ingress policer rules are stored
1630          * and configured in hardware after all receive queues are allocated
1631          * in otx2_open.
1632          */
1633         list_for_each_entry(node, &flow_cfg->flow_list_tc, list) {
1634                 if (node->is_act_police)
1635                         otx2_tc_config_ingress_rule(nic, node);
1636         }
1637 }
1638 EXPORT_SYMBOL(otx2_tc_apply_ingress_police_rules);