GNU Linux-libre 4.19.263-gnu1
[releases.git] / drivers / net / ethernet / netronome / nfp / flower / action.c
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/bitfield.h>
35 #include <net/geneve.h>
36 #include <net/pkt_cls.h>
37 #include <net/switchdev.h>
38 #include <net/tc_act/tc_csum.h>
39 #include <net/tc_act/tc_gact.h>
40 #include <net/tc_act/tc_mirred.h>
41 #include <net/tc_act/tc_pedit.h>
42 #include <net/tc_act/tc_vlan.h>
43 #include <net/tc_act/tc_tunnel_key.h>
44
45 #include "cmsg.h"
46 #include "main.h"
47 #include "../nfp_net_repr.h"
48
49 /* The kernel versions of TUNNEL_* are not ABI and therefore vulnerable
50  * to change. Such changes will break our FW ABI.
51  */
52 #define NFP_FL_TUNNEL_CSUM                      cpu_to_be16(0x01)
53 #define NFP_FL_TUNNEL_KEY                       cpu_to_be16(0x04)
54 #define NFP_FL_TUNNEL_GENEVE_OPT                cpu_to_be16(0x0800)
55 #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS      IP_TUNNEL_INFO_TX
56 #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS     (NFP_FL_TUNNEL_CSUM | \
57                                                  NFP_FL_TUNNEL_KEY | \
58                                                  NFP_FL_TUNNEL_GENEVE_OPT)
59
60 static void nfp_fl_pop_vlan(struct nfp_fl_pop_vlan *pop_vlan)
61 {
62         size_t act_size = sizeof(struct nfp_fl_pop_vlan);
63
64         pop_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_POP_VLAN;
65         pop_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
66         pop_vlan->reserved = 0;
67 }
68
69 static void
70 nfp_fl_push_vlan(struct nfp_fl_push_vlan *push_vlan,
71                  const struct tc_action *action)
72 {
73         size_t act_size = sizeof(struct nfp_fl_push_vlan);
74         u16 tmp_push_vlan_tci;
75
76         push_vlan->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_VLAN;
77         push_vlan->head.len_lw = act_size >> NFP_FL_LW_SIZ;
78         push_vlan->reserved = 0;
79         push_vlan->vlan_tpid = tcf_vlan_push_proto(action);
80
81         tmp_push_vlan_tci =
82                 FIELD_PREP(NFP_FL_PUSH_VLAN_PRIO, tcf_vlan_push_prio(action)) |
83                 FIELD_PREP(NFP_FL_PUSH_VLAN_VID, tcf_vlan_push_vid(action));
84         push_vlan->vlan_tci = cpu_to_be16(tmp_push_vlan_tci);
85 }
86
87 static int
88 nfp_fl_pre_lag(struct nfp_app *app, const struct tc_action *action,
89                struct nfp_fl_payload *nfp_flow, int act_len)
90 {
91         size_t act_size = sizeof(struct nfp_fl_pre_lag);
92         struct nfp_fl_pre_lag *pre_lag;
93         struct net_device *out_dev;
94         int err;
95
96         out_dev = tcf_mirred_dev(action);
97         if (!out_dev || !netif_is_lag_master(out_dev))
98                 return 0;
99
100         if (act_len + act_size > NFP_FL_MAX_A_SIZ)
101                 return -EOPNOTSUPP;
102
103         /* Pre_lag action must be first on action list.
104          * If other actions already exist they need pushed forward.
105          */
106         if (act_len)
107                 memmove(nfp_flow->action_data + act_size,
108                         nfp_flow->action_data, act_len);
109
110         pre_lag = (struct nfp_fl_pre_lag *)nfp_flow->action_data;
111         err = nfp_flower_lag_populate_pre_action(app, out_dev, pre_lag);
112         if (err)
113                 return err;
114
115         pre_lag->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_LAG;
116         pre_lag->head.len_lw = act_size >> NFP_FL_LW_SIZ;
117
118         nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
119
120         return act_size;
121 }
122
123 static bool nfp_fl_netdev_is_tunnel_type(struct net_device *out_dev,
124                                          enum nfp_flower_tun_type tun_type)
125 {
126         if (!out_dev->rtnl_link_ops)
127                 return false;
128
129         if (!strcmp(out_dev->rtnl_link_ops->kind, "vxlan"))
130                 return tun_type == NFP_FL_TUNNEL_VXLAN;
131
132         if (!strcmp(out_dev->rtnl_link_ops->kind, "geneve"))
133                 return tun_type == NFP_FL_TUNNEL_GENEVE;
134
135         return false;
136 }
137
138 static int
139 nfp_fl_output(struct nfp_app *app, struct nfp_fl_output *output,
140               const struct tc_action *action, struct nfp_fl_payload *nfp_flow,
141               bool last, struct net_device *in_dev,
142               enum nfp_flower_tun_type tun_type, int *tun_out_cnt)
143 {
144         size_t act_size = sizeof(struct nfp_fl_output);
145         struct nfp_flower_priv *priv = app->priv;
146         struct net_device *out_dev;
147         u16 tmp_flags;
148
149         output->head.jump_id = NFP_FL_ACTION_OPCODE_OUTPUT;
150         output->head.len_lw = act_size >> NFP_FL_LW_SIZ;
151
152         out_dev = tcf_mirred_dev(action);
153         if (!out_dev)
154                 return -EOPNOTSUPP;
155
156         tmp_flags = last ? NFP_FL_OUT_FLAGS_LAST : 0;
157
158         if (tun_type) {
159                 /* Verify the egress netdev matches the tunnel type. */
160                 if (!nfp_fl_netdev_is_tunnel_type(out_dev, tun_type))
161                         return -EOPNOTSUPP;
162
163                 if (*tun_out_cnt)
164                         return -EOPNOTSUPP;
165                 (*tun_out_cnt)++;
166
167                 output->flags = cpu_to_be16(tmp_flags |
168                                             NFP_FL_OUT_FLAGS_USE_TUN);
169                 output->port = cpu_to_be32(NFP_FL_PORT_TYPE_TUN | tun_type);
170         } else if (netif_is_lag_master(out_dev) &&
171                    priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
172                 int gid;
173
174                 output->flags = cpu_to_be16(tmp_flags);
175                 gid = nfp_flower_lag_get_output_id(app, out_dev);
176                 if (gid < 0)
177                         return gid;
178                 output->port = cpu_to_be32(NFP_FL_LAG_OUT | gid);
179         } else {
180                 /* Set action output parameters. */
181                 output->flags = cpu_to_be16(tmp_flags);
182
183                 /* Only offload if egress ports are on the same device as the
184                  * ingress port.
185                  */
186                 if (!switchdev_port_same_parent_id(in_dev, out_dev))
187                         return -EOPNOTSUPP;
188                 if (!nfp_netdev_is_nfp_repr(out_dev))
189                         return -EOPNOTSUPP;
190
191                 output->port = cpu_to_be32(nfp_repr_get_port_id(out_dev));
192                 if (!output->port)
193                         return -EOPNOTSUPP;
194         }
195         nfp_flow->meta.shortcut = output->port;
196
197         return 0;
198 }
199
200 static enum nfp_flower_tun_type
201 nfp_fl_get_tun_from_act_l4_port(struct nfp_app *app,
202                                 const struct tc_action *action)
203 {
204         struct ip_tunnel_info *tun = tcf_tunnel_info(action);
205         struct nfp_flower_priv *priv = app->priv;
206
207         switch (tun->key.tp_dst) {
208         case htons(NFP_FL_VXLAN_PORT):
209                 return NFP_FL_TUNNEL_VXLAN;
210         case htons(NFP_FL_GENEVE_PORT):
211                 if (priv->flower_ext_feats & NFP_FL_FEATS_GENEVE)
212                         return NFP_FL_TUNNEL_GENEVE;
213                 /* FALLTHROUGH */
214         default:
215                 return NFP_FL_TUNNEL_NONE;
216         }
217 }
218
219 static struct nfp_fl_pre_tunnel *nfp_fl_pre_tunnel(char *act_data, int act_len)
220 {
221         size_t act_size = sizeof(struct nfp_fl_pre_tunnel);
222         struct nfp_fl_pre_tunnel *pre_tun_act;
223
224         /* Pre_tunnel action must be first on action list.
225          * If other actions already exist they need to be pushed forward.
226          */
227         if (act_len)
228                 memmove(act_data + act_size, act_data, act_len);
229
230         pre_tun_act = (struct nfp_fl_pre_tunnel *)act_data;
231
232         memset(pre_tun_act, 0, act_size);
233
234         pre_tun_act->head.jump_id = NFP_FL_ACTION_OPCODE_PRE_TUNNEL;
235         pre_tun_act->head.len_lw = act_size >> NFP_FL_LW_SIZ;
236
237         return pre_tun_act;
238 }
239
240 static int
241 nfp_fl_push_geneve_options(struct nfp_fl_payload *nfp_fl, int *list_len,
242                            const struct tc_action *action)
243 {
244         struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
245         int opt_len, opt_cnt, act_start, tot_push_len;
246         u8 *src = ip_tunnel_info_opts(ip_tun);
247
248         /* We need to populate the options in reverse order for HW.
249          * Therefore we go through the options, calculating the
250          * number of options and the total size, then we populate
251          * them in reverse order in the action list.
252          */
253         opt_cnt = 0;
254         tot_push_len = 0;
255         opt_len = ip_tun->options_len;
256         while (opt_len > 0) {
257                 struct geneve_opt *opt = (struct geneve_opt *)src;
258
259                 opt_cnt++;
260                 if (opt_cnt > NFP_FL_MAX_GENEVE_OPT_CNT)
261                         return -EOPNOTSUPP;
262
263                 tot_push_len += sizeof(struct nfp_fl_push_geneve) +
264                                opt->length * 4;
265                 if (tot_push_len > NFP_FL_MAX_GENEVE_OPT_ACT)
266                         return -EOPNOTSUPP;
267
268                 opt_len -= sizeof(struct geneve_opt) + opt->length * 4;
269                 src += sizeof(struct geneve_opt) + opt->length * 4;
270         }
271
272         if (*list_len + tot_push_len > NFP_FL_MAX_A_SIZ)
273                 return -EOPNOTSUPP;
274
275         act_start = *list_len;
276         *list_len += tot_push_len;
277         src = ip_tunnel_info_opts(ip_tun);
278         while (opt_cnt) {
279                 struct geneve_opt *opt = (struct geneve_opt *)src;
280                 struct nfp_fl_push_geneve *push;
281                 size_t act_size, len;
282
283                 opt_cnt--;
284                 act_size = sizeof(struct nfp_fl_push_geneve) + opt->length * 4;
285                 tot_push_len -= act_size;
286                 len = act_start + tot_push_len;
287
288                 push = (struct nfp_fl_push_geneve *)&nfp_fl->action_data[len];
289                 push->head.jump_id = NFP_FL_ACTION_OPCODE_PUSH_GENEVE;
290                 push->head.len_lw = act_size >> NFP_FL_LW_SIZ;
291                 push->reserved = 0;
292                 push->class = opt->opt_class;
293                 push->type = opt->type;
294                 push->length = opt->length;
295                 memcpy(&push->opt_data, opt->opt_data, opt->length * 4);
296
297                 src += sizeof(struct geneve_opt) + opt->length * 4;
298         }
299
300         return 0;
301 }
302
303 static int
304 nfp_fl_set_ipv4_udp_tun(struct nfp_app *app,
305                         struct nfp_fl_set_ipv4_udp_tun *set_tun,
306                         const struct tc_action *action,
307                         struct nfp_fl_pre_tunnel *pre_tun,
308                         enum nfp_flower_tun_type tun_type,
309                         struct net_device *netdev)
310 {
311         size_t act_size = sizeof(struct nfp_fl_set_ipv4_udp_tun);
312         struct ip_tunnel_info *ip_tun = tcf_tunnel_info(action);
313         struct nfp_flower_priv *priv = app->priv;
314         u32 tmp_set_ip_tun_type_index = 0;
315         /* Currently support one pre-tunnel so index is always 0. */
316         int pretun_idx = 0;
317
318         BUILD_BUG_ON(NFP_FL_TUNNEL_CSUM != TUNNEL_CSUM ||
319                      NFP_FL_TUNNEL_KEY  != TUNNEL_KEY ||
320                      NFP_FL_TUNNEL_GENEVE_OPT != TUNNEL_GENEVE_OPT);
321         if (ip_tun->options_len &&
322             (tun_type != NFP_FL_TUNNEL_GENEVE ||
323             !(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT)))
324                 return -EOPNOTSUPP;
325
326         set_tun->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_TUNNEL;
327         set_tun->head.len_lw = act_size >> NFP_FL_LW_SIZ;
328
329         /* Set tunnel type and pre-tunnel index. */
330         tmp_set_ip_tun_type_index |=
331                 FIELD_PREP(NFP_FL_IPV4_TUNNEL_TYPE, tun_type) |
332                 FIELD_PREP(NFP_FL_IPV4_PRE_TUN_INDEX, pretun_idx);
333
334         set_tun->tun_type_index = cpu_to_be32(tmp_set_ip_tun_type_index);
335         set_tun->tun_id = ip_tun->key.tun_id;
336
337         if (ip_tun->key.ttl) {
338                 set_tun->ttl = ip_tun->key.ttl;
339         } else {
340                 struct net *net = dev_net(netdev);
341                 struct flowi4 flow = {};
342                 struct rtable *rt;
343                 int err;
344
345                 /* Do a route lookup to determine ttl - if fails then use
346                  * default. Note that CONFIG_INET is a requirement of
347                  * CONFIG_NET_SWITCHDEV so must be defined here.
348                  */
349                 flow.daddr = ip_tun->key.u.ipv4.dst;
350                 flow.flowi4_proto = IPPROTO_UDP;
351                 rt = ip_route_output_key(net, &flow);
352                 err = PTR_ERR_OR_ZERO(rt);
353                 if (!err) {
354                         set_tun->ttl = ip4_dst_hoplimit(&rt->dst);
355                         ip_rt_put(rt);
356                 } else {
357                         set_tun->ttl = net->ipv4.sysctl_ip_default_ttl;
358                 }
359         }
360
361         set_tun->tos = ip_tun->key.tos;
362
363         if (!(ip_tun->key.tun_flags & NFP_FL_TUNNEL_KEY) ||
364             ip_tun->key.tun_flags & ~NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS)
365                 return -EOPNOTSUPP;
366         set_tun->tun_flags = ip_tun->key.tun_flags;
367
368         if (tun_type == NFP_FL_TUNNEL_GENEVE) {
369                 set_tun->tun_proto = htons(ETH_P_TEB);
370                 set_tun->tun_len = ip_tun->options_len / 4;
371         }
372
373         /* Complete pre_tunnel action. */
374         pre_tun->ipv4_dst = ip_tun->key.u.ipv4.dst;
375
376         return 0;
377 }
378
379 static void nfp_fl_set_helper32(u32 value, u32 mask, u8 *p_exact, u8 *p_mask)
380 {
381         u32 oldvalue = get_unaligned((u32 *)p_exact);
382         u32 oldmask = get_unaligned((u32 *)p_mask);
383
384         value &= mask;
385         value |= oldvalue & ~mask;
386
387         put_unaligned(oldmask | mask, (u32 *)p_mask);
388         put_unaligned(value, (u32 *)p_exact);
389 }
390
391 static int
392 nfp_fl_set_eth(const struct tc_action *action, int idx, u32 off,
393                struct nfp_fl_set_eth *set_eth)
394 {
395         u32 exact, mask;
396
397         if (off + 4 > ETH_ALEN * 2)
398                 return -EOPNOTSUPP;
399
400         mask = ~tcf_pedit_mask(action, idx);
401         exact = tcf_pedit_val(action, idx);
402
403         if (exact & ~mask)
404                 return -EOPNOTSUPP;
405
406         nfp_fl_set_helper32(exact, mask, &set_eth->eth_addr_val[off],
407                             &set_eth->eth_addr_mask[off]);
408
409         set_eth->reserved = cpu_to_be16(0);
410         set_eth->head.jump_id = NFP_FL_ACTION_OPCODE_SET_ETHERNET;
411         set_eth->head.len_lw = sizeof(*set_eth) >> NFP_FL_LW_SIZ;
412
413         return 0;
414 }
415
416 static int
417 nfp_fl_set_ip4(const struct tc_action *action, int idx, u32 off,
418                struct nfp_fl_set_ip4_addrs *set_ip_addr)
419 {
420         __be32 exact, mask;
421
422         /* We are expecting tcf_pedit to return a big endian value */
423         mask = (__force __be32)~tcf_pedit_mask(action, idx);
424         exact = (__force __be32)tcf_pedit_val(action, idx);
425
426         if (exact & ~mask)
427                 return -EOPNOTSUPP;
428
429         switch (off) {
430         case offsetof(struct iphdr, daddr):
431                 set_ip_addr->ipv4_dst_mask |= mask;
432                 set_ip_addr->ipv4_dst &= ~mask;
433                 set_ip_addr->ipv4_dst |= exact & mask;
434                 break;
435         case offsetof(struct iphdr, saddr):
436                 set_ip_addr->ipv4_src_mask |= mask;
437                 set_ip_addr->ipv4_src &= ~mask;
438                 set_ip_addr->ipv4_src |= exact & mask;
439                 break;
440         default:
441                 return -EOPNOTSUPP;
442         }
443
444         set_ip_addr->reserved = cpu_to_be16(0);
445         set_ip_addr->head.jump_id = NFP_FL_ACTION_OPCODE_SET_IPV4_ADDRS;
446         set_ip_addr->head.len_lw = sizeof(*set_ip_addr) >> NFP_FL_LW_SIZ;
447
448         return 0;
449 }
450
451 static void
452 nfp_fl_set_ip6_helper(int opcode_tag, u8 word, __be32 exact, __be32 mask,
453                       struct nfp_fl_set_ipv6_addr *ip6)
454 {
455         ip6->ipv6[word].mask |= mask;
456         ip6->ipv6[word].exact &= ~mask;
457         ip6->ipv6[word].exact |= exact & mask;
458
459         ip6->reserved = cpu_to_be16(0);
460         ip6->head.jump_id = opcode_tag;
461         ip6->head.len_lw = sizeof(*ip6) >> NFP_FL_LW_SIZ;
462 }
463
464 static int
465 nfp_fl_set_ip6(const struct tc_action *action, int idx, u32 off,
466                struct nfp_fl_set_ipv6_addr *ip_dst,
467                struct nfp_fl_set_ipv6_addr *ip_src)
468 {
469         __be32 exact, mask;
470         u8 word;
471
472         /* We are expecting tcf_pedit to return a big endian value */
473         mask = (__force __be32)~tcf_pedit_mask(action, idx);
474         exact = (__force __be32)tcf_pedit_val(action, idx);
475
476         if (exact & ~mask)
477                 return -EOPNOTSUPP;
478
479         if (off < offsetof(struct ipv6hdr, saddr)) {
480                 return -EOPNOTSUPP;
481         } else if (off < offsetof(struct ipv6hdr, daddr)) {
482                 word = (off - offsetof(struct ipv6hdr, saddr)) / sizeof(exact);
483                 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_SRC, word,
484                                       exact, mask, ip_src);
485         } else if (off < offsetof(struct ipv6hdr, daddr) +
486                        sizeof(struct in6_addr)) {
487                 word = (off - offsetof(struct ipv6hdr, daddr)) / sizeof(exact);
488                 nfp_fl_set_ip6_helper(NFP_FL_ACTION_OPCODE_SET_IPV6_DST, word,
489                                       exact, mask, ip_dst);
490         } else {
491                 return -EOPNOTSUPP;
492         }
493
494         return 0;
495 }
496
497 static int
498 nfp_fl_set_tport(const struct tc_action *action, int idx, u32 off,
499                  struct nfp_fl_set_tport *set_tport, int opcode)
500 {
501         u32 exact, mask;
502
503         if (off)
504                 return -EOPNOTSUPP;
505
506         mask = ~tcf_pedit_mask(action, idx);
507         exact = tcf_pedit_val(action, idx);
508
509         if (exact & ~mask)
510                 return -EOPNOTSUPP;
511
512         nfp_fl_set_helper32(exact, mask, set_tport->tp_port_val,
513                             set_tport->tp_port_mask);
514
515         set_tport->reserved = cpu_to_be16(0);
516         set_tport->head.jump_id = opcode;
517         set_tport->head.len_lw = sizeof(*set_tport) >> NFP_FL_LW_SIZ;
518
519         return 0;
520 }
521
522 static u32 nfp_fl_csum_l4_to_flag(u8 ip_proto)
523 {
524         switch (ip_proto) {
525         case 0:
526                 /* Filter doesn't force proto match,
527                  * both TCP and UDP will be updated if encountered
528                  */
529                 return TCA_CSUM_UPDATE_FLAG_TCP | TCA_CSUM_UPDATE_FLAG_UDP;
530         case IPPROTO_TCP:
531                 return TCA_CSUM_UPDATE_FLAG_TCP;
532         case IPPROTO_UDP:
533                 return TCA_CSUM_UPDATE_FLAG_UDP;
534         default:
535                 /* All other protocols will be ignored by FW */
536                 return 0;
537         }
538 }
539
540 static int
541 nfp_fl_pedit(const struct tc_action *action, struct tc_cls_flower_offload *flow,
542              char *nfp_action, int *a_len, u32 *csum_updated)
543 {
544         struct nfp_fl_set_ipv6_addr set_ip6_dst, set_ip6_src;
545         struct nfp_fl_set_ip4_addrs set_ip_addr;
546         struct nfp_fl_set_tport set_tport;
547         struct nfp_fl_set_eth set_eth;
548         enum pedit_header_type htype;
549         int idx, nkeys, err;
550         size_t act_size = 0;
551         u32 offset, cmd;
552         u8 ip_proto = 0;
553
554         memset(&set_ip6_dst, 0, sizeof(set_ip6_dst));
555         memset(&set_ip6_src, 0, sizeof(set_ip6_src));
556         memset(&set_ip_addr, 0, sizeof(set_ip_addr));
557         memset(&set_tport, 0, sizeof(set_tport));
558         memset(&set_eth, 0, sizeof(set_eth));
559         nkeys = tcf_pedit_nkeys(action);
560
561         for (idx = 0; idx < nkeys; idx++) {
562                 cmd = tcf_pedit_cmd(action, idx);
563                 htype = tcf_pedit_htype(action, idx);
564                 offset = tcf_pedit_offset(action, idx);
565
566                 if (cmd != TCA_PEDIT_KEY_EX_CMD_SET)
567                         return -EOPNOTSUPP;
568
569                 switch (htype) {
570                 case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH:
571                         err = nfp_fl_set_eth(action, idx, offset, &set_eth);
572                         break;
573                 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4:
574                         err = nfp_fl_set_ip4(action, idx, offset, &set_ip_addr);
575                         break;
576                 case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6:
577                         err = nfp_fl_set_ip6(action, idx, offset, &set_ip6_dst,
578                                              &set_ip6_src);
579                         break;
580                 case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP:
581                         err = nfp_fl_set_tport(action, idx, offset, &set_tport,
582                                                NFP_FL_ACTION_OPCODE_SET_TCP);
583                         break;
584                 case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP:
585                         err = nfp_fl_set_tport(action, idx, offset, &set_tport,
586                                                NFP_FL_ACTION_OPCODE_SET_UDP);
587                         break;
588                 default:
589                         return -EOPNOTSUPP;
590                 }
591                 if (err)
592                         return err;
593         }
594
595         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
596                 struct flow_dissector_key_basic *basic;
597
598                 basic = skb_flow_dissector_target(flow->dissector,
599                                                   FLOW_DISSECTOR_KEY_BASIC,
600                                                   flow->key);
601                 ip_proto = basic->ip_proto;
602         }
603
604         if (set_eth.head.len_lw) {
605                 act_size = sizeof(set_eth);
606                 memcpy(nfp_action, &set_eth, act_size);
607                 *a_len += act_size;
608         }
609         if (set_ip_addr.head.len_lw) {
610                 nfp_action += act_size;
611                 act_size = sizeof(set_ip_addr);
612                 memcpy(nfp_action, &set_ip_addr, act_size);
613                 *a_len += act_size;
614
615                 /* Hardware will automatically fix IPv4 and TCP/UDP checksum. */
616                 *csum_updated |= TCA_CSUM_UPDATE_FLAG_IPV4HDR |
617                                 nfp_fl_csum_l4_to_flag(ip_proto);
618         }
619         if (set_ip6_dst.head.len_lw && set_ip6_src.head.len_lw) {
620                 /* TC compiles set src and dst IPv6 address as a single action,
621                  * the hardware requires this to be 2 separate actions.
622                  */
623                 nfp_action += act_size;
624                 act_size = sizeof(set_ip6_src);
625                 memcpy(nfp_action, &set_ip6_src, act_size);
626                 *a_len += act_size;
627
628                 act_size = sizeof(set_ip6_dst);
629                 memcpy(&nfp_action[sizeof(set_ip6_src)], &set_ip6_dst,
630                        act_size);
631                 *a_len += act_size;
632
633                 /* Hardware will automatically fix TCP/UDP checksum. */
634                 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
635         } else if (set_ip6_dst.head.len_lw) {
636                 nfp_action += act_size;
637                 act_size = sizeof(set_ip6_dst);
638                 memcpy(nfp_action, &set_ip6_dst, act_size);
639                 *a_len += act_size;
640
641                 /* Hardware will automatically fix TCP/UDP checksum. */
642                 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
643         } else if (set_ip6_src.head.len_lw) {
644                 nfp_action += act_size;
645                 act_size = sizeof(set_ip6_src);
646                 memcpy(nfp_action, &set_ip6_src, act_size);
647                 *a_len += act_size;
648
649                 /* Hardware will automatically fix TCP/UDP checksum. */
650                 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
651         }
652         if (set_tport.head.len_lw) {
653                 nfp_action += act_size;
654                 act_size = sizeof(set_tport);
655                 memcpy(nfp_action, &set_tport, act_size);
656                 *a_len += act_size;
657
658                 /* Hardware will automatically fix TCP/UDP checksum. */
659                 *csum_updated |= nfp_fl_csum_l4_to_flag(ip_proto);
660         }
661
662         return 0;
663 }
664
665 static int
666 nfp_flower_output_action(struct nfp_app *app, const struct tc_action *a,
667                          struct nfp_fl_payload *nfp_fl, int *a_len,
668                          struct net_device *netdev, bool last,
669                          enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
670                          int *out_cnt, u32 *csum_updated)
671 {
672         struct nfp_flower_priv *priv = app->priv;
673         struct nfp_fl_output *output;
674         int err, prelag_size;
675
676         /* If csum_updated has not been reset by now, it means HW will
677          * incorrectly update csums when they are not requested.
678          */
679         if (*csum_updated)
680                 return -EOPNOTSUPP;
681
682         if (*a_len + sizeof(struct nfp_fl_output) > NFP_FL_MAX_A_SIZ)
683                 return -EOPNOTSUPP;
684
685         output = (struct nfp_fl_output *)&nfp_fl->action_data[*a_len];
686         err = nfp_fl_output(app, output, a, nfp_fl, last, netdev, *tun_type,
687                             tun_out_cnt);
688         if (err)
689                 return err;
690
691         *a_len += sizeof(struct nfp_fl_output);
692
693         if (priv->flower_ext_feats & NFP_FL_FEATS_LAG) {
694                 /* nfp_fl_pre_lag returns -err or size of prelag action added.
695                  * This will be 0 if it is not egressing to a lag dev.
696                  */
697                 prelag_size = nfp_fl_pre_lag(app, a, nfp_fl, *a_len);
698                 if (prelag_size < 0)
699                         return prelag_size;
700                 else if (prelag_size > 0 && (!last || *out_cnt))
701                         return -EOPNOTSUPP;
702
703                 *a_len += prelag_size;
704         }
705         (*out_cnt)++;
706
707         return 0;
708 }
709
710 static int
711 nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a,
712                        struct tc_cls_flower_offload *flow,
713                        struct nfp_fl_payload *nfp_fl, int *a_len,
714                        struct net_device *netdev,
715                        enum nfp_flower_tun_type *tun_type, int *tun_out_cnt,
716                        int *out_cnt, u32 *csum_updated)
717 {
718         struct nfp_fl_set_ipv4_udp_tun *set_tun;
719         struct nfp_fl_pre_tunnel *pre_tun;
720         struct nfp_fl_push_vlan *psh_v;
721         struct nfp_fl_pop_vlan *pop_v;
722         int err;
723
724         if (is_tcf_gact_shot(a)) {
725                 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_DROP);
726         } else if (is_tcf_mirred_egress_redirect(a)) {
727                 err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
728                                                true, tun_type, tun_out_cnt,
729                                                out_cnt, csum_updated);
730                 if (err)
731                         return err;
732
733         } else if (is_tcf_mirred_egress_mirror(a)) {
734                 err = nfp_flower_output_action(app, a, nfp_fl, a_len, netdev,
735                                                false, tun_type, tun_out_cnt,
736                                                out_cnt, csum_updated);
737                 if (err)
738                         return err;
739
740         } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_POP) {
741                 if (*a_len + sizeof(struct nfp_fl_pop_vlan) > NFP_FL_MAX_A_SIZ)
742                         return -EOPNOTSUPP;
743
744                 pop_v = (struct nfp_fl_pop_vlan *)&nfp_fl->action_data[*a_len];
745                 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_POPV);
746
747                 nfp_fl_pop_vlan(pop_v);
748                 *a_len += sizeof(struct nfp_fl_pop_vlan);
749         } else if (is_tcf_vlan(a) && tcf_vlan_action(a) == TCA_VLAN_ACT_PUSH) {
750                 if (*a_len + sizeof(struct nfp_fl_push_vlan) > NFP_FL_MAX_A_SIZ)
751                         return -EOPNOTSUPP;
752
753                 psh_v = (struct nfp_fl_push_vlan *)&nfp_fl->action_data[*a_len];
754                 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
755
756                 nfp_fl_push_vlan(psh_v, a);
757                 *a_len += sizeof(struct nfp_fl_push_vlan);
758         } else if (is_tcf_tunnel_set(a)) {
759                 struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a);
760                 struct nfp_repr *repr = netdev_priv(netdev);
761
762                 *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a);
763                 if (*tun_type == NFP_FL_TUNNEL_NONE)
764                         return -EOPNOTSUPP;
765
766                 if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS)
767                         return -EOPNOTSUPP;
768
769                 /* Pre-tunnel action is required for tunnel encap.
770                  * This checks for next hop entries on NFP.
771                  * If none, the packet falls back before applying other actions.
772                  */
773                 if (*a_len + sizeof(struct nfp_fl_pre_tunnel) +
774                     sizeof(struct nfp_fl_set_ipv4_udp_tun) > NFP_FL_MAX_A_SIZ)
775                         return -EOPNOTSUPP;
776
777                 pre_tun = nfp_fl_pre_tunnel(nfp_fl->action_data, *a_len);
778                 nfp_fl->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
779                 *a_len += sizeof(struct nfp_fl_pre_tunnel);
780
781                 err = nfp_fl_push_geneve_options(nfp_fl, a_len, a);
782                 if (err)
783                         return err;
784
785                 set_tun = (void *)&nfp_fl->action_data[*a_len];
786                 err = nfp_fl_set_ipv4_udp_tun(app, set_tun, a, pre_tun,
787                                               *tun_type, netdev);
788                 if (err)
789                         return err;
790                 *a_len += sizeof(struct nfp_fl_set_ipv4_udp_tun);
791         } else if (is_tcf_tunnel_release(a)) {
792                 /* Tunnel decap is handled by default so accept action. */
793                 return 0;
794         } else if (is_tcf_pedit(a)) {
795                 if (nfp_fl_pedit(a, flow, &nfp_fl->action_data[*a_len],
796                                  a_len, csum_updated))
797                         return -EOPNOTSUPP;
798         } else if (is_tcf_csum(a)) {
799                 /* csum action requests recalc of something we have not fixed */
800                 if (tcf_csum_update_flags(a) & ~*csum_updated)
801                         return -EOPNOTSUPP;
802                 /* If we will correctly fix the csum we can remove it from the
803                  * csum update list. Which will later be used to check support.
804                  */
805                 *csum_updated &= ~tcf_csum_update_flags(a);
806         } else {
807                 /* Currently we do not handle any other actions. */
808                 return -EOPNOTSUPP;
809         }
810
811         return 0;
812 }
813
814 int nfp_flower_compile_action(struct nfp_app *app,
815                               struct tc_cls_flower_offload *flow,
816                               struct net_device *netdev,
817                               struct nfp_fl_payload *nfp_flow)
818 {
819         int act_len, act_cnt, err, tun_out_cnt, out_cnt, i;
820         enum nfp_flower_tun_type tun_type;
821         const struct tc_action *a;
822         u32 csum_updated = 0;
823
824         memset(nfp_flow->action_data, 0, NFP_FL_MAX_A_SIZ);
825         nfp_flow->meta.act_len = 0;
826         tun_type = NFP_FL_TUNNEL_NONE;
827         act_len = 0;
828         act_cnt = 0;
829         tun_out_cnt = 0;
830         out_cnt = 0;
831
832         tcf_exts_for_each_action(i, a, flow->exts) {
833                 err = nfp_flower_loop_action(app, a, flow, nfp_flow, &act_len,
834                                              netdev, &tun_type, &tun_out_cnt,
835                                              &out_cnt, &csum_updated);
836                 if (err)
837                         return err;
838                 act_cnt++;
839         }
840
841         /* We optimise when the action list is small, this can unfortunately
842          * not happen once we have more than one action in the action list.
843          */
844         if (act_cnt > 1)
845                 nfp_flow->meta.shortcut = cpu_to_be32(NFP_FL_SC_ACT_NULL);
846
847         nfp_flow->meta.act_len = act_len;
848
849         return 0;
850 }