GNU Linux-libre 4.19.207-gnu1
[releases.git] / drivers / net / ethernet / netronome / nfp / flower / offload.c
1 /*
2  * Copyright (C) 2017 Netronome Systems, Inc.
3  *
4  * This software is dual licensed under the GNU General License Version 2,
5  * June 1991 as shown in the file COPYING in the top-level directory of this
6  * source tree or the BSD 2-Clause License provided below.  You have the
7  * option to license this software under the complete terms of either license.
8  *
9  * The BSD 2-Clause License:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      1. Redistributions of source code must retain the above
16  *         copyright notice, this list of conditions and the following
17  *         disclaimer.
18  *
19  *      2. Redistributions in binary form must reproduce the above
20  *         copyright notice, this list of conditions and the following
21  *         disclaimer in the documentation and/or other materials
22  *         provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/skbuff.h>
35 #include <net/devlink.h>
36 #include <net/pkt_cls.h>
37
38 #include "cmsg.h"
39 #include "main.h"
40 #include "../nfpcore/nfp_cpp.h"
41 #include "../nfpcore/nfp_nsp.h"
42 #include "../nfp_app.h"
43 #include "../nfp_main.h"
44 #include "../nfp_net.h"
45 #include "../nfp_port.h"
46
47 #define NFP_FLOWER_SUPPORTED_TCPFLAGS \
48         (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST | \
49          TCPHDR_PSH | TCPHDR_URG)
50
51 #define NFP_FLOWER_SUPPORTED_CTLFLAGS \
52         (FLOW_DIS_IS_FRAGMENT | \
53          FLOW_DIS_FIRST_FRAG)
54
55 #define NFP_FLOWER_WHITELIST_DISSECTOR \
56         (BIT(FLOW_DISSECTOR_KEY_CONTROL) | \
57          BIT(FLOW_DISSECTOR_KEY_BASIC) | \
58          BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) | \
59          BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) | \
60          BIT(FLOW_DISSECTOR_KEY_TCP) | \
61          BIT(FLOW_DISSECTOR_KEY_PORTS) | \
62          BIT(FLOW_DISSECTOR_KEY_ETH_ADDRS) | \
63          BIT(FLOW_DISSECTOR_KEY_VLAN) | \
64          BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
65          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
66          BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
67          BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
68          BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
69          BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
70          BIT(FLOW_DISSECTOR_KEY_ENC_IP) | \
71          BIT(FLOW_DISSECTOR_KEY_MPLS) | \
72          BIT(FLOW_DISSECTOR_KEY_IP))
73
74 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR \
75         (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
76          BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) | \
77          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
78          BIT(FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS) | \
79          BIT(FLOW_DISSECTOR_KEY_ENC_OPTS) | \
80          BIT(FLOW_DISSECTOR_KEY_ENC_PORTS) | \
81          BIT(FLOW_DISSECTOR_KEY_ENC_IP))
82
83 #define NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R \
84         (BIT(FLOW_DISSECTOR_KEY_ENC_CONTROL) | \
85          BIT(FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS) | \
86          BIT(FLOW_DISSECTOR_KEY_ENC_PORTS))
87
88 static int
89 nfp_flower_xmit_flow(struct net_device *netdev,
90                      struct nfp_fl_payload *nfp_flow, u8 mtype)
91 {
92         u32 meta_len, key_len, mask_len, act_len, tot_len;
93         struct nfp_repr *priv = netdev_priv(netdev);
94         struct sk_buff *skb;
95         unsigned char *msg;
96
97         meta_len =  sizeof(struct nfp_fl_rule_metadata);
98         key_len = nfp_flow->meta.key_len;
99         mask_len = nfp_flow->meta.mask_len;
100         act_len = nfp_flow->meta.act_len;
101
102         tot_len = meta_len + key_len + mask_len + act_len;
103
104         /* Convert to long words as firmware expects
105          * lengths in units of NFP_FL_LW_SIZ.
106          */
107         nfp_flow->meta.key_len >>= NFP_FL_LW_SIZ;
108         nfp_flow->meta.mask_len >>= NFP_FL_LW_SIZ;
109         nfp_flow->meta.act_len >>= NFP_FL_LW_SIZ;
110
111         skb = nfp_flower_cmsg_alloc(priv->app, tot_len, mtype, GFP_KERNEL);
112         if (!skb)
113                 return -ENOMEM;
114
115         msg = nfp_flower_cmsg_get_data(skb);
116         memcpy(msg, &nfp_flow->meta, meta_len);
117         memcpy(&msg[meta_len], nfp_flow->unmasked_data, key_len);
118         memcpy(&msg[meta_len + key_len], nfp_flow->mask_data, mask_len);
119         memcpy(&msg[meta_len + key_len + mask_len],
120                nfp_flow->action_data, act_len);
121
122         /* Convert back to bytes as software expects
123          * lengths in units of bytes.
124          */
125         nfp_flow->meta.key_len <<= NFP_FL_LW_SIZ;
126         nfp_flow->meta.mask_len <<= NFP_FL_LW_SIZ;
127         nfp_flow->meta.act_len <<= NFP_FL_LW_SIZ;
128
129         nfp_ctrl_tx(priv->app->ctrl, skb);
130
131         return 0;
132 }
133
134 static bool nfp_flower_check_higher_than_mac(struct tc_cls_flower_offload *f)
135 {
136         return dissector_uses_key(f->dissector,
137                                   FLOW_DISSECTOR_KEY_IPV4_ADDRS) ||
138                 dissector_uses_key(f->dissector,
139                                    FLOW_DISSECTOR_KEY_IPV6_ADDRS) ||
140                 dissector_uses_key(f->dissector,
141                                    FLOW_DISSECTOR_KEY_PORTS) ||
142                 dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ICMP);
143 }
144
145 static int
146 nfp_flower_calc_opt_layer(struct flow_dissector_key_enc_opts *enc_opts,
147                           u32 *key_layer_two, int *key_size)
148 {
149         if (enc_opts->len > NFP_FL_MAX_GENEVE_OPT_KEY)
150                 return -EOPNOTSUPP;
151
152         if (enc_opts->len > 0) {
153                 *key_layer_two |= NFP_FLOWER_LAYER2_GENEVE_OP;
154                 *key_size += sizeof(struct nfp_flower_geneve_options);
155         }
156
157         return 0;
158 }
159
160 static int
161 nfp_flower_calculate_key_layers(struct nfp_app *app,
162                                 struct nfp_fl_key_ls *ret_key_ls,
163                                 struct tc_cls_flower_offload *flow,
164                                 bool egress,
165                                 enum nfp_flower_tun_type *tun_type)
166 {
167         struct flow_dissector_key_basic *mask_basic = NULL;
168         struct flow_dissector_key_basic *key_basic = NULL;
169         struct nfp_flower_priv *priv = app->priv;
170         u32 key_layer_two;
171         u8 key_layer;
172         int key_size;
173         int err;
174
175         if (flow->dissector->used_keys & ~NFP_FLOWER_WHITELIST_DISSECTOR)
176                 return -EOPNOTSUPP;
177
178         /* If any tun dissector is used then the required set must be used. */
179         if (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR &&
180             (flow->dissector->used_keys & NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
181             != NFP_FLOWER_WHITELIST_TUN_DISSECTOR_R)
182                 return -EOPNOTSUPP;
183
184         key_layer_two = 0;
185         key_layer = NFP_FLOWER_LAYER_PORT;
186         key_size = sizeof(struct nfp_flower_meta_tci) +
187                    sizeof(struct nfp_flower_in_port);
188
189         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS) ||
190             dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_MPLS)) {
191                 key_layer |= NFP_FLOWER_LAYER_MAC;
192                 key_size += sizeof(struct nfp_flower_mac_mpls);
193         }
194
195         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
196                 struct flow_dissector_key_vlan *flow_vlan;
197
198                 flow_vlan = skb_flow_dissector_target(flow->dissector,
199                                                       FLOW_DISSECTOR_KEY_VLAN,
200                                                       flow->mask);
201                 if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) &&
202                     flow_vlan->vlan_priority)
203                         return -EOPNOTSUPP;
204         }
205
206         if (dissector_uses_key(flow->dissector,
207                                FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
208                 struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL;
209                 struct flow_dissector_key_ports *mask_enc_ports = NULL;
210                 struct flow_dissector_key_enc_opts *enc_op = NULL;
211                 struct flow_dissector_key_ports *enc_ports = NULL;
212                 struct flow_dissector_key_control *mask_enc_ctl =
213                         skb_flow_dissector_target(flow->dissector,
214                                                   FLOW_DISSECTOR_KEY_ENC_CONTROL,
215                                                   flow->mask);
216                 struct flow_dissector_key_control *enc_ctl =
217                         skb_flow_dissector_target(flow->dissector,
218                                                   FLOW_DISSECTOR_KEY_ENC_CONTROL,
219                                                   flow->key);
220                 if (!egress)
221                         return -EOPNOTSUPP;
222
223                 if (mask_enc_ctl->addr_type != 0xffff ||
224                     enc_ctl->addr_type != FLOW_DISSECTOR_KEY_IPV4_ADDRS)
225                         return -EOPNOTSUPP;
226
227                 /* These fields are already verified as used. */
228                 mask_ipv4 =
229                         skb_flow_dissector_target(flow->dissector,
230                                                   FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
231                                                   flow->mask);
232                 if (mask_ipv4->dst != cpu_to_be32(~0))
233                         return -EOPNOTSUPP;
234
235                 mask_enc_ports =
236                         skb_flow_dissector_target(flow->dissector,
237                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
238                                                   flow->mask);
239                 enc_ports =
240                         skb_flow_dissector_target(flow->dissector,
241                                                   FLOW_DISSECTOR_KEY_ENC_PORTS,
242                                                   flow->key);
243
244                 if (mask_enc_ports->dst != cpu_to_be16(~0))
245                         return -EOPNOTSUPP;
246
247                 if (dissector_uses_key(flow->dissector,
248                                        FLOW_DISSECTOR_KEY_ENC_OPTS)) {
249                         enc_op = skb_flow_dissector_target(flow->dissector,
250                                                            FLOW_DISSECTOR_KEY_ENC_OPTS,
251                                                            flow->key);
252                 }
253
254                 switch (enc_ports->dst) {
255                 case htons(NFP_FL_VXLAN_PORT):
256                         *tun_type = NFP_FL_TUNNEL_VXLAN;
257                         key_layer |= NFP_FLOWER_LAYER_VXLAN;
258                         key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
259
260                         if (enc_op)
261                                 return -EOPNOTSUPP;
262                         break;
263                 case htons(NFP_FL_GENEVE_PORT):
264                         if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE))
265                                 return -EOPNOTSUPP;
266                         *tun_type = NFP_FL_TUNNEL_GENEVE;
267                         key_layer |= NFP_FLOWER_LAYER_EXT_META;
268                         key_size += sizeof(struct nfp_flower_ext_meta);
269                         key_layer_two |= NFP_FLOWER_LAYER2_GENEVE;
270                         key_size += sizeof(struct nfp_flower_ipv4_udp_tun);
271
272                         if (!enc_op)
273                                 break;
274                         if (!(priv->flower_ext_feats & NFP_FL_FEATS_GENEVE_OPT))
275                                 return -EOPNOTSUPP;
276                         err = nfp_flower_calc_opt_layer(enc_op, &key_layer_two,
277                                                         &key_size);
278                         if (err)
279                                 return err;
280                         break;
281                 default:
282                         return -EOPNOTSUPP;
283                 }
284         } else if (egress) {
285                 /* Reject non tunnel matches offloaded to egress repr. */
286                 return -EOPNOTSUPP;
287         }
288
289         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_BASIC)) {
290                 mask_basic = skb_flow_dissector_target(flow->dissector,
291                                                        FLOW_DISSECTOR_KEY_BASIC,
292                                                        flow->mask);
293
294                 key_basic = skb_flow_dissector_target(flow->dissector,
295                                                       FLOW_DISSECTOR_KEY_BASIC,
296                                                       flow->key);
297         }
298
299         if (mask_basic && mask_basic->n_proto) {
300                 /* Ethernet type is present in the key. */
301                 switch (key_basic->n_proto) {
302                 case cpu_to_be16(ETH_P_IP):
303                         key_layer |= NFP_FLOWER_LAYER_IPV4;
304                         key_size += sizeof(struct nfp_flower_ipv4);
305                         break;
306
307                 case cpu_to_be16(ETH_P_IPV6):
308                         key_layer |= NFP_FLOWER_LAYER_IPV6;
309                         key_size += sizeof(struct nfp_flower_ipv6);
310                         break;
311
312                 /* Currently we do not offload ARP
313                  * because we rely on it to get to the host.
314                  */
315                 case cpu_to_be16(ETH_P_ARP):
316                         return -EOPNOTSUPP;
317
318                 case cpu_to_be16(ETH_P_MPLS_UC):
319                 case cpu_to_be16(ETH_P_MPLS_MC):
320                         if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
321                                 key_layer |= NFP_FLOWER_LAYER_MAC;
322                                 key_size += sizeof(struct nfp_flower_mac_mpls);
323                         }
324                         break;
325
326                 /* Will be included in layer 2. */
327                 case cpu_to_be16(ETH_P_8021Q):
328                         break;
329
330                 default:
331                         /* Other ethtype - we need check the masks for the
332                          * remainder of the key to ensure we can offload.
333                          */
334                         if (nfp_flower_check_higher_than_mac(flow))
335                                 return -EOPNOTSUPP;
336                         break;
337                 }
338         }
339
340         if (mask_basic && mask_basic->ip_proto) {
341                 /* Ethernet type is present in the key. */
342                 switch (key_basic->ip_proto) {
343                 case IPPROTO_TCP:
344                 case IPPROTO_UDP:
345                 case IPPROTO_SCTP:
346                 case IPPROTO_ICMP:
347                 case IPPROTO_ICMPV6:
348                         key_layer |= NFP_FLOWER_LAYER_TP;
349                         key_size += sizeof(struct nfp_flower_tp_ports);
350                         break;
351                 default:
352                         /* Other ip proto - we need check the masks for the
353                          * remainder of the key to ensure we can offload.
354                          */
355                         return -EOPNOTSUPP;
356                 }
357         }
358
359         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_TCP)) {
360                 struct flow_dissector_key_tcp *tcp;
361                 u32 tcp_flags;
362
363                 tcp = skb_flow_dissector_target(flow->dissector,
364                                                 FLOW_DISSECTOR_KEY_TCP,
365                                                 flow->key);
366                 tcp_flags = be16_to_cpu(tcp->flags);
367
368                 if (tcp_flags & ~NFP_FLOWER_SUPPORTED_TCPFLAGS)
369                         return -EOPNOTSUPP;
370
371                 /* We only support PSH and URG flags when either
372                  * FIN, SYN or RST is present as well.
373                  */
374                 if ((tcp_flags & (TCPHDR_PSH | TCPHDR_URG)) &&
375                     !(tcp_flags & (TCPHDR_FIN | TCPHDR_SYN | TCPHDR_RST)))
376                         return -EOPNOTSUPP;
377
378                 /* We need to store TCP flags in the either the IPv4 or IPv6 key
379                  * space, thus we need to ensure we include a IPv4/IPv6 key
380                  * layer if we have not done so already.
381                  */
382                 if (!key_basic)
383                         return -EOPNOTSUPP;
384
385                 if (!(key_layer & NFP_FLOWER_LAYER_IPV4) &&
386                     !(key_layer & NFP_FLOWER_LAYER_IPV6)) {
387                         switch (key_basic->n_proto) {
388                         case cpu_to_be16(ETH_P_IP):
389                                 key_layer |= NFP_FLOWER_LAYER_IPV4;
390                                 key_size += sizeof(struct nfp_flower_ipv4);
391                                 break;
392
393                         case cpu_to_be16(ETH_P_IPV6):
394                                 key_layer |= NFP_FLOWER_LAYER_IPV6;
395                                 key_size += sizeof(struct nfp_flower_ipv6);
396                                 break;
397
398                         default:
399                                 return -EOPNOTSUPP;
400                         }
401                 }
402         }
403
404         if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
405                 struct flow_dissector_key_control *key_ctl;
406
407                 key_ctl = skb_flow_dissector_target(flow->dissector,
408                                                     FLOW_DISSECTOR_KEY_CONTROL,
409                                                     flow->key);
410
411                 if (key_ctl->flags & ~NFP_FLOWER_SUPPORTED_CTLFLAGS)
412                         return -EOPNOTSUPP;
413         }
414
415         ret_key_ls->key_layer = key_layer;
416         ret_key_ls->key_layer_two = key_layer_two;
417         ret_key_ls->key_size = key_size;
418
419         return 0;
420 }
421
422 static struct nfp_fl_payload *
423 nfp_flower_allocate_new(struct nfp_fl_key_ls *key_layer, bool egress)
424 {
425         struct nfp_fl_payload *flow_pay;
426
427         flow_pay = kmalloc(sizeof(*flow_pay), GFP_KERNEL);
428         if (!flow_pay)
429                 return NULL;
430
431         flow_pay->meta.key_len = key_layer->key_size;
432         flow_pay->unmasked_data = kmalloc(key_layer->key_size, GFP_KERNEL);
433         if (!flow_pay->unmasked_data)
434                 goto err_free_flow;
435
436         flow_pay->meta.mask_len = key_layer->key_size;
437         flow_pay->mask_data = kmalloc(key_layer->key_size, GFP_KERNEL);
438         if (!flow_pay->mask_data)
439                 goto err_free_unmasked;
440
441         flow_pay->action_data = kmalloc(NFP_FL_MAX_A_SIZ, GFP_KERNEL);
442         if (!flow_pay->action_data)
443                 goto err_free_mask;
444
445         flow_pay->nfp_tun_ipv4_addr = 0;
446         flow_pay->meta.flags = 0;
447         spin_lock_init(&flow_pay->lock);
448
449         flow_pay->ingress_offload = !egress;
450
451         return flow_pay;
452
453 err_free_mask:
454         kfree(flow_pay->mask_data);
455 err_free_unmasked:
456         kfree(flow_pay->unmasked_data);
457 err_free_flow:
458         kfree(flow_pay);
459         return NULL;
460 }
461
462 /**
463  * nfp_flower_add_offload() - Adds a new flow to hardware.
464  * @app:        Pointer to the APP handle
465  * @netdev:     netdev structure.
466  * @flow:       TC flower classifier offload structure.
467  * @egress:     NFP netdev is the egress.
468  *
469  * Adds a new flow to the repeated hash structure and action payload.
470  *
471  * Return: negative value on error, 0 if configured successfully.
472  */
473 static int
474 nfp_flower_add_offload(struct nfp_app *app, struct net_device *netdev,
475                        struct tc_cls_flower_offload *flow, bool egress)
476 {
477         enum nfp_flower_tun_type tun_type = NFP_FL_TUNNEL_NONE;
478         struct nfp_port *port = nfp_port_from_netdev(netdev);
479         struct nfp_flower_priv *priv = app->priv;
480         struct nfp_fl_payload *flow_pay;
481         struct nfp_fl_key_ls *key_layer;
482         struct net_device *ingr_dev;
483         int err;
484
485         ingr_dev = egress ? NULL : netdev;
486         flow_pay = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
487                                               NFP_FL_STATS_CTX_DONT_CARE);
488         if (flow_pay) {
489                 /* Ignore as duplicate if it has been added by different cb. */
490                 if (flow_pay->ingress_offload && egress)
491                         return 0;
492                 else
493                         return -EOPNOTSUPP;
494         }
495
496         key_layer = kmalloc(sizeof(*key_layer), GFP_KERNEL);
497         if (!key_layer)
498                 return -ENOMEM;
499
500         err = nfp_flower_calculate_key_layers(app, key_layer, flow, egress,
501                                               &tun_type);
502         if (err)
503                 goto err_free_key_ls;
504
505         flow_pay = nfp_flower_allocate_new(key_layer, egress);
506         if (!flow_pay) {
507                 err = -ENOMEM;
508                 goto err_free_key_ls;
509         }
510
511         flow_pay->ingress_dev = egress ? NULL : netdev;
512
513         err = nfp_flower_compile_flow_match(flow, key_layer, netdev, flow_pay,
514                                             tun_type);
515         if (err)
516                 goto err_destroy_flow;
517
518         err = nfp_flower_compile_action(app, flow, netdev, flow_pay);
519         if (err)
520                 goto err_destroy_flow;
521
522         err = nfp_compile_flow_metadata(app, flow, flow_pay,
523                                         flow_pay->ingress_dev);
524         if (err)
525                 goto err_destroy_flow;
526
527         err = nfp_flower_xmit_flow(netdev, flow_pay,
528                                    NFP_FLOWER_CMSG_TYPE_FLOW_ADD);
529         if (err)
530                 goto err_destroy_flow;
531
532         INIT_HLIST_NODE(&flow_pay->link);
533         flow_pay->tc_flower_cookie = flow->cookie;
534         hash_add_rcu(priv->flow_table, &flow_pay->link, flow->cookie);
535         port->tc_offload_cnt++;
536
537         /* Deallocate flow payload when flower rule has been destroyed. */
538         kfree(key_layer);
539
540         return 0;
541
542 err_destroy_flow:
543         kfree(flow_pay->action_data);
544         kfree(flow_pay->mask_data);
545         kfree(flow_pay->unmasked_data);
546         kfree(flow_pay);
547 err_free_key_ls:
548         kfree(key_layer);
549         return err;
550 }
551
552 /**
553  * nfp_flower_del_offload() - Removes a flow from hardware.
554  * @app:        Pointer to the APP handle
555  * @netdev:     netdev structure.
556  * @flow:       TC flower classifier offload structure
557  * @egress:     Netdev is the egress dev.
558  *
559  * Removes a flow from the repeated hash structure and clears the
560  * action payload.
561  *
562  * Return: negative value on error, 0 if removed successfully.
563  */
564 static int
565 nfp_flower_del_offload(struct nfp_app *app, struct net_device *netdev,
566                        struct tc_cls_flower_offload *flow, bool egress)
567 {
568         struct nfp_port *port = nfp_port_from_netdev(netdev);
569         struct nfp_fl_payload *nfp_flow;
570         struct net_device *ingr_dev;
571         int err;
572
573         ingr_dev = egress ? NULL : netdev;
574         nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
575                                               NFP_FL_STATS_CTX_DONT_CARE);
576         if (!nfp_flow)
577                 return egress ? 0 : -ENOENT;
578
579         err = nfp_modify_flow_metadata(app, nfp_flow);
580         if (err)
581                 goto err_free_flow;
582
583         if (nfp_flow->nfp_tun_ipv4_addr)
584                 nfp_tunnel_del_ipv4_off(app, nfp_flow->nfp_tun_ipv4_addr);
585
586         err = nfp_flower_xmit_flow(netdev, nfp_flow,
587                                    NFP_FLOWER_CMSG_TYPE_FLOW_DEL);
588         if (err)
589                 goto err_free_flow;
590
591 err_free_flow:
592         hash_del_rcu(&nfp_flow->link);
593         port->tc_offload_cnt--;
594         kfree(nfp_flow->action_data);
595         kfree(nfp_flow->mask_data);
596         kfree(nfp_flow->unmasked_data);
597         kfree_rcu(nfp_flow, rcu);
598         return err;
599 }
600
601 /**
602  * nfp_flower_get_stats() - Populates flow stats obtained from hardware.
603  * @app:        Pointer to the APP handle
604  * @netdev:     Netdev structure.
605  * @flow:       TC flower classifier offload structure
606  * @egress:     Netdev is the egress dev.
607  *
608  * Populates a flow statistics structure which which corresponds to a
609  * specific flow.
610  *
611  * Return: negative value on error, 0 if stats populated successfully.
612  */
613 static int
614 nfp_flower_get_stats(struct nfp_app *app, struct net_device *netdev,
615                      struct tc_cls_flower_offload *flow, bool egress)
616 {
617         struct nfp_fl_payload *nfp_flow;
618         struct net_device *ingr_dev;
619
620         ingr_dev = egress ? NULL : netdev;
621         nfp_flow = nfp_flower_search_fl_table(app, flow->cookie, ingr_dev,
622                                               NFP_FL_STATS_CTX_DONT_CARE);
623         if (!nfp_flow)
624                 return -EINVAL;
625
626         if (nfp_flow->ingress_offload && egress)
627                 return 0;
628
629         spin_lock_bh(&nfp_flow->lock);
630         tcf_exts_stats_update(flow->exts, nfp_flow->stats.bytes,
631                               nfp_flow->stats.pkts, nfp_flow->stats.used);
632
633         nfp_flow->stats.pkts = 0;
634         nfp_flow->stats.bytes = 0;
635         spin_unlock_bh(&nfp_flow->lock);
636
637         return 0;
638 }
639
640 static int
641 nfp_flower_repr_offload(struct nfp_app *app, struct net_device *netdev,
642                         struct tc_cls_flower_offload *flower, bool egress)
643 {
644         if (!eth_proto_is_802_3(flower->common.protocol))
645                 return -EOPNOTSUPP;
646
647         switch (flower->command) {
648         case TC_CLSFLOWER_REPLACE:
649                 return nfp_flower_add_offload(app, netdev, flower, egress);
650         case TC_CLSFLOWER_DESTROY:
651                 return nfp_flower_del_offload(app, netdev, flower, egress);
652         case TC_CLSFLOWER_STATS:
653                 return nfp_flower_get_stats(app, netdev, flower, egress);
654         default:
655                 return -EOPNOTSUPP;
656         }
657 }
658
659 int nfp_flower_setup_tc_egress_cb(enum tc_setup_type type, void *type_data,
660                                   void *cb_priv)
661 {
662         struct nfp_repr *repr = cb_priv;
663
664         if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
665                 return -EOPNOTSUPP;
666
667         switch (type) {
668         case TC_SETUP_CLSFLOWER:
669                 return nfp_flower_repr_offload(repr->app, repr->netdev,
670                                                type_data, true);
671         default:
672                 return -EOPNOTSUPP;
673         }
674 }
675
676 static int nfp_flower_setup_tc_block_cb(enum tc_setup_type type,
677                                         void *type_data, void *cb_priv)
678 {
679         struct nfp_repr *repr = cb_priv;
680
681         if (!tc_cls_can_offload_and_chain0(repr->netdev, type_data))
682                 return -EOPNOTSUPP;
683
684         switch (type) {
685         case TC_SETUP_CLSFLOWER:
686                 return nfp_flower_repr_offload(repr->app, repr->netdev,
687                                                type_data, false);
688         default:
689                 return -EOPNOTSUPP;
690         }
691 }
692
693 static int nfp_flower_setup_tc_block(struct net_device *netdev,
694                                      struct tc_block_offload *f)
695 {
696         struct nfp_repr *repr = netdev_priv(netdev);
697
698         if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
699                 return -EOPNOTSUPP;
700
701         switch (f->command) {
702         case TC_BLOCK_BIND:
703                 return tcf_block_cb_register(f->block,
704                                              nfp_flower_setup_tc_block_cb,
705                                              repr, repr, f->extack);
706         case TC_BLOCK_UNBIND:
707                 tcf_block_cb_unregister(f->block,
708                                         nfp_flower_setup_tc_block_cb,
709                                         repr);
710                 return 0;
711         default:
712                 return -EOPNOTSUPP;
713         }
714 }
715
716 int nfp_flower_setup_tc(struct nfp_app *app, struct net_device *netdev,
717                         enum tc_setup_type type, void *type_data)
718 {
719         switch (type) {
720         case TC_SETUP_BLOCK:
721                 return nfp_flower_setup_tc_block(netdev, type_data);
722         default:
723                 return -EOPNOTSUPP;
724         }
725 }