GNU Linux-libre 4.14.332-gnu1
[releases.git] / drivers / net / ethernet / broadcom / bnxt / bnxt_tc.c
1 /* Broadcom NetXtreme-C/E network driver.
2  *
3  * Copyright (c) 2017 Broadcom Limited
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  */
9
10 #include <linux/netdevice.h>
11 #include <linux/inetdevice.h>
12 #include <linux/if_vlan.h>
13 #include <net/flow_dissector.h>
14 #include <net/pkt_cls.h>
15 #include <net/tc_act/tc_gact.h>
16 #include <net/tc_act/tc_skbedit.h>
17 #include <net/tc_act/tc_mirred.h>
18 #include <net/tc_act/tc_vlan.h>
19
20 #include "bnxt_hsi.h"
21 #include "bnxt.h"
22 #include "bnxt_sriov.h"
23 #include "bnxt_tc.h"
24 #include "bnxt_vfr.h"
25
26 #ifdef CONFIG_BNXT_FLOWER_OFFLOAD
27
28 #define BNXT_FID_INVALID                        0xffff
29 #define VLAN_TCI(vid, prio)     ((vid) | ((prio) << VLAN_PRIO_SHIFT))
30
31 /* Return the dst fid of the func for flow forwarding
32  * For PFs: src_fid is the fid of the PF
33  * For VF-reps: src_fid the fid of the VF
34  */
35 static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
36 {
37         struct bnxt *bp;
38
39         /* check if dev belongs to the same switch */
40         if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
41                 netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
42                             dev->ifindex);
43                 return BNXT_FID_INVALID;
44         }
45
46         /* Is dev a VF-rep? */
47         if (dev != pf_bp->dev)
48                 return bnxt_vf_rep_get_fid(dev);
49
50         bp = netdev_priv(dev);
51         return bp->pf.fw_fid;
52 }
53
54 static int bnxt_tc_parse_redir(struct bnxt *bp,
55                                struct bnxt_tc_actions *actions,
56                                const struct tc_action *tc_act)
57 {
58         int ifindex = tcf_mirred_ifindex(tc_act);
59         struct net_device *dev;
60         u16 dst_fid;
61
62         dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
63         if (!dev) {
64                 netdev_info(bp->dev, "no dev for ifindex=%d", ifindex);
65                 return -EINVAL;
66         }
67
68         /* find the FID from dev */
69         dst_fid = bnxt_flow_get_dst_fid(bp, dev);
70         if (dst_fid == BNXT_FID_INVALID) {
71                 netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
72                 return -EINVAL;
73         }
74
75         actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
76         actions->dst_fid = dst_fid;
77         actions->dst_dev = dev;
78         return 0;
79 }
80
81 static int bnxt_tc_parse_vlan(struct bnxt *bp,
82                               struct bnxt_tc_actions *actions,
83                               const struct tc_action *tc_act)
84 {
85         switch (tcf_vlan_action(tc_act)) {
86         case TCA_VLAN_ACT_POP:
87                 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
88                 break;
89         case TCA_VLAN_ACT_PUSH:
90                 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
91                 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
92                 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
93                 break;
94         default:
95                 return -EOPNOTSUPP;
96         }
97         return 0;
98 }
99
100 static int bnxt_tc_parse_actions(struct bnxt *bp,
101                                  struct bnxt_tc_actions *actions,
102                                  struct tcf_exts *tc_exts)
103 {
104         const struct tc_action *tc_act;
105         LIST_HEAD(tc_actions);
106         int rc;
107
108         if (!tcf_exts_has_actions(tc_exts)) {
109                 netdev_info(bp->dev, "no actions");
110                 return -EINVAL;
111         }
112
113         tcf_exts_to_list(tc_exts, &tc_actions);
114         list_for_each_entry(tc_act, &tc_actions, list) {
115                 /* Drop action */
116                 if (is_tcf_gact_shot(tc_act)) {
117                         actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
118                         return 0; /* don't bother with other actions */
119                 }
120
121                 /* Redirect action */
122                 if (is_tcf_mirred_egress_redirect(tc_act)) {
123                         rc = bnxt_tc_parse_redir(bp, actions, tc_act);
124                         if (rc)
125                                 return rc;
126                         continue;
127                 }
128
129                 /* Push/pop VLAN */
130                 if (is_tcf_vlan(tc_act)) {
131                         rc = bnxt_tc_parse_vlan(bp, actions, tc_act);
132                         if (rc)
133                                 return rc;
134                         continue;
135                 }
136         }
137
138         return 0;
139 }
140
141 #define GET_KEY(flow_cmd, key_type)                                     \
142                 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
143                                           (flow_cmd)->key)
144 #define GET_MASK(flow_cmd, key_type)                                    \
145                 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
146                                           (flow_cmd)->mask)
147
148 static int bnxt_tc_parse_flow(struct bnxt *bp,
149                               struct tc_cls_flower_offload *tc_flow_cmd,
150                               struct bnxt_tc_flow *flow)
151 {
152         struct flow_dissector *dissector = tc_flow_cmd->dissector;
153         u16 addr_type = 0;
154
155         /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
156         if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
157             (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
158                 netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
159                             dissector->used_keys);
160                 return -EOPNOTSUPP;
161         }
162
163         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
164                 struct flow_dissector_key_control *key =
165                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
166
167                 addr_type = key->addr_type;
168         }
169
170         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
171                 struct flow_dissector_key_basic *key =
172                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
173                 struct flow_dissector_key_basic *mask =
174                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
175
176                 flow->l2_key.ether_type = key->n_proto;
177                 flow->l2_mask.ether_type = mask->n_proto;
178
179                 if (key->n_proto == htons(ETH_P_IP) ||
180                     key->n_proto == htons(ETH_P_IPV6)) {
181                         flow->l4_key.ip_proto = key->ip_proto;
182                         flow->l4_mask.ip_proto = mask->ip_proto;
183                 }
184         }
185
186         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
187                 struct flow_dissector_key_eth_addrs *key =
188                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
189                 struct flow_dissector_key_eth_addrs *mask =
190                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
191
192                 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
193                 ether_addr_copy(flow->l2_key.dmac, key->dst);
194                 ether_addr_copy(flow->l2_mask.dmac, mask->dst);
195                 ether_addr_copy(flow->l2_key.smac, key->src);
196                 ether_addr_copy(flow->l2_mask.smac, mask->src);
197         }
198
199         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
200                 struct flow_dissector_key_vlan *key =
201                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
202                 struct flow_dissector_key_vlan *mask =
203                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
204
205                 flow->l2_key.inner_vlan_tci =
206                    cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
207                 flow->l2_mask.inner_vlan_tci =
208                    cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
209                 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
210                 flow->l2_mask.inner_vlan_tpid = htons(0xffff);
211                 flow->l2_key.num_vlans = 1;
212         }
213
214         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
215                 struct flow_dissector_key_ipv4_addrs *key =
216                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
217                 struct flow_dissector_key_ipv4_addrs *mask =
218                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
219
220                 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
221                 flow->l3_key.ipv4.daddr.s_addr = key->dst;
222                 flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
223                 flow->l3_key.ipv4.saddr.s_addr = key->src;
224                 flow->l3_mask.ipv4.saddr.s_addr = mask->src;
225         } else if (dissector_uses_key(dissector,
226                                       FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
227                 struct flow_dissector_key_ipv6_addrs *key =
228                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
229                 struct flow_dissector_key_ipv6_addrs *mask =
230                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
231
232                 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
233                 flow->l3_key.ipv6.daddr = key->dst;
234                 flow->l3_mask.ipv6.daddr = mask->dst;
235                 flow->l3_key.ipv6.saddr = key->src;
236                 flow->l3_mask.ipv6.saddr = mask->src;
237         }
238
239         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
240                 struct flow_dissector_key_ports *key =
241                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
242                 struct flow_dissector_key_ports *mask =
243                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
244
245                 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
246                 flow->l4_key.ports.dport = key->dst;
247                 flow->l4_mask.ports.dport = mask->dst;
248                 flow->l4_key.ports.sport = key->src;
249                 flow->l4_mask.ports.sport = mask->src;
250         }
251
252         if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
253                 struct flow_dissector_key_icmp *key =
254                         GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
255                 struct flow_dissector_key_icmp *mask =
256                         GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
257
258                 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
259                 flow->l4_key.icmp.type = key->type;
260                 flow->l4_key.icmp.code = key->code;
261                 flow->l4_mask.icmp.type = mask->type;
262                 flow->l4_mask.icmp.code = mask->code;
263         }
264
265         return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
266 }
267
268 static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
269 {
270         struct hwrm_cfa_flow_free_input req = { 0 };
271         int rc;
272
273         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
274         req.flow_handle = flow_handle;
275
276         rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
277         if (rc)
278                 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
279                             __func__, flow_handle, rc);
280         return rc;
281 }
282
283 static int ipv6_mask_len(struct in6_addr *mask)
284 {
285         int mask_len = 0, i;
286
287         for (i = 0; i < 4; i++)
288                 mask_len += inet_mask_len(mask->s6_addr32[i]);
289
290         return mask_len;
291 }
292
293 static bool is_wildcard(void *mask, int len)
294 {
295         const u8 *p = mask;
296         int i;
297
298         for (i = 0; i < len; i++) {
299                 if (p[i] != 0)
300                         return false;
301         }
302         return true;
303 }
304
305 static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
306                                     __le16 ref_flow_handle, __le16 *flow_handle)
307 {
308         struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
309         struct bnxt_tc_actions *actions = &flow->actions;
310         struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
311         struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
312         struct hwrm_cfa_flow_alloc_input req = { 0 };
313         u16 flow_flags = 0, action_flags = 0;
314         int rc;
315
316         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
317
318         req.src_fid = cpu_to_le16(flow->src_fid);
319         req.ref_flow_handle = ref_flow_handle;
320         req.ethertype = flow->l2_key.ether_type;
321         req.ip_proto = flow->l4_key.ip_proto;
322
323         if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
324                 memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
325                 memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
326         }
327
328         if (flow->l2_key.num_vlans > 0) {
329                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
330                 /* FW expects the inner_vlan_tci value to be set
331                  * in outer_vlan_tci when num_vlans is 1 (which is
332                  * always the case in TC.)
333                  */
334                 req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
335         }
336
337         /* If all IP and L4 fields are wildcarded then this is an L2 flow */
338         if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
339             is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
340                 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
341         } else {
342                 flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
343                                 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
344                                 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
345
346                 if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
347                         req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
348                         req.ip_dst_mask_len =
349                                 inet_mask_len(l3_mask->ipv4.daddr.s_addr);
350                         req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
351                         req.ip_src_mask_len =
352                                 inet_mask_len(l3_mask->ipv4.saddr.s_addr);
353                 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
354                         memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
355                                sizeof(req.ip_dst));
356                         req.ip_dst_mask_len =
357                                         ipv6_mask_len(&l3_mask->ipv6.daddr);
358                         memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
359                                sizeof(req.ip_src));
360                         req.ip_src_mask_len =
361                                         ipv6_mask_len(&l3_mask->ipv6.saddr);
362                 }
363         }
364
365         if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
366                 req.l4_src_port = flow->l4_key.ports.sport;
367                 req.l4_src_port_mask = flow->l4_mask.ports.sport;
368                 req.l4_dst_port = flow->l4_key.ports.dport;
369                 req.l4_dst_port_mask = flow->l4_mask.ports.dport;
370         } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
371                 /* l4 ports serve as type/code when ip_proto is ICMP */
372                 req.l4_src_port = htons(flow->l4_key.icmp.type);
373                 req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
374                 req.l4_dst_port = htons(flow->l4_key.icmp.code);
375                 req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
376         }
377         req.flags = cpu_to_le16(flow_flags);
378
379         if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
380                 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
381         } else {
382                 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
383                         action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
384                         req.dst_fid = cpu_to_le16(actions->dst_fid);
385                 }
386                 if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
387                         action_flags |=
388                             CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
389                         req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
390                         req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
391                         memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
392                         memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
393                 }
394                 if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
395                         action_flags |=
396                             CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
397                         /* Rewrite config with tpid = 0 implies vlan pop */
398                         req.l2_rewrite_vlan_tpid = 0;
399                         memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
400                         memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
401                 }
402         }
403         req.action_flags = cpu_to_le16(action_flags);
404
405         mutex_lock(&bp->hwrm_cmd_lock);
406
407         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
408         if (!rc)
409                 *flow_handle = resp->flow_handle;
410
411         mutex_unlock(&bp->hwrm_cmd_lock);
412
413         return rc;
414 }
415
416 /* Add val to accum while handling a possible wraparound
417  * of val. Eventhough val is of type u64, its actual width
418  * is denoted by mask and will wrap-around beyond that width.
419  */
420 static void accumulate_val(u64 *accum, u64 val, u64 mask)
421 {
422 #define low_bits(x, mask)               ((x) & (mask))
423 #define high_bits(x, mask)              ((x) & ~(mask))
424         bool wrapped = val < low_bits(*accum, mask);
425
426         *accum = high_bits(*accum, mask) + val;
427         if (wrapped)
428                 *accum += (mask + 1);
429 }
430
431 /* The HW counters' width is much less than 64bits.
432  * Handle possible wrap-around while updating the stat counters
433  */
434 static void bnxt_flow_stats_fix_wraparound(struct bnxt_tc_info *tc_info,
435                                            struct bnxt_tc_flow_stats *stats,
436                                            struct bnxt_tc_flow_stats *hw_stats)
437 {
438         accumulate_val(&stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
439         accumulate_val(&stats->packets, hw_stats->packets,
440                        tc_info->packets_mask);
441 }
442
443 /* Fix possible wraparound of the stats queried from HW, calculate
444  * the delta from prev_stats, and also update the prev_stats.
445  * The HW flow stats are fetched under the hwrm_cmd_lock mutex.
446  * This routine is best called while under the mutex so that the
447  * stats processing happens atomically.
448  */
449 static void bnxt_flow_stats_calc(struct bnxt_tc_info *tc_info,
450                                  struct bnxt_tc_flow *flow,
451                                  struct bnxt_tc_flow_stats *stats)
452 {
453         struct bnxt_tc_flow_stats *acc_stats, *prev_stats;
454
455         acc_stats = &flow->stats;
456         bnxt_flow_stats_fix_wraparound(tc_info, acc_stats, stats);
457
458         prev_stats = &flow->prev_stats;
459         stats->bytes = acc_stats->bytes - prev_stats->bytes;
460         stats->packets = acc_stats->packets - prev_stats->packets;
461         *prev_stats = *acc_stats;
462 }
463
464 static int bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp,
465                                         __le16 flow_handle,
466                                         struct bnxt_tc_flow *flow,
467                                         struct bnxt_tc_flow_stats *stats)
468 {
469         struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
470         struct hwrm_cfa_flow_stats_input req = { 0 };
471         int rc;
472
473         bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
474         req.num_flows = cpu_to_le16(1);
475         req.flow_handle_0 = flow_handle;
476
477         mutex_lock(&bp->hwrm_cmd_lock);
478         rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
479         if (!rc) {
480                 stats->packets = le64_to_cpu(resp->packet_0);
481                 stats->bytes = le64_to_cpu(resp->byte_0);
482                 bnxt_flow_stats_calc(&bp->tc_info, flow, stats);
483         } else {
484                 netdev_info(bp->dev, "error rc=%d", rc);
485         }
486
487         mutex_unlock(&bp->hwrm_cmd_lock);
488         return rc;
489 }
490
491 static int bnxt_tc_put_l2_node(struct bnxt *bp,
492                                struct bnxt_tc_flow_node *flow_node)
493 {
494         struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
495         struct bnxt_tc_info *tc_info = &bp->tc_info;
496         int rc;
497
498         /* remove flow_node from the L2 shared flow list */
499         list_del(&flow_node->l2_list_node);
500         if (--l2_node->refcount == 0) {
501                 rc =  rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
502                                              tc_info->l2_ht_params);
503                 if (rc)
504                         netdev_err(bp->dev,
505                                    "Error: %s: rhashtable_remove_fast: %d",
506                                    __func__, rc);
507                 kfree_rcu(l2_node, rcu);
508         }
509         return 0;
510 }
511
512 static struct bnxt_tc_l2_node *
513 bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
514                     struct rhashtable_params ht_params,
515                     struct bnxt_tc_l2_key *l2_key)
516 {
517         struct bnxt_tc_l2_node *l2_node;
518         int rc;
519
520         l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
521         if (!l2_node) {
522                 l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
523                 if (!l2_node) {
524                         rc = -ENOMEM;
525                         return NULL;
526                 }
527
528                 l2_node->key = *l2_key;
529                 rc = rhashtable_insert_fast(l2_table, &l2_node->node,
530                                             ht_params);
531                 if (rc) {
532                         kfree(l2_node);
533                         netdev_err(bp->dev,
534                                    "Error: %s: rhashtable_insert_fast: %d",
535                                    __func__, rc);
536                         return NULL;
537                 }
538                 INIT_LIST_HEAD(&l2_node->common_l2_flows);
539         }
540         return l2_node;
541 }
542
543 /* Get the ref_flow_handle for a flow by checking if there are any other
544  * flows that share the same L2 key as this flow.
545  */
546 static int
547 bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
548                             struct bnxt_tc_flow_node *flow_node,
549                             __le16 *ref_flow_handle)
550 {
551         struct bnxt_tc_info *tc_info = &bp->tc_info;
552         struct bnxt_tc_flow_node *ref_flow_node;
553         struct bnxt_tc_l2_node *l2_node;
554
555         l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
556                                       tc_info->l2_ht_params,
557                                       &flow->l2_key);
558         if (!l2_node)
559                 return -1;
560
561         /* If any other flow is using this l2_node, use it's flow_handle
562          * as the ref_flow_handle
563          */
564         if (l2_node->refcount > 0) {
565                 ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
566                                                  struct bnxt_tc_flow_node,
567                                                  l2_list_node);
568                 *ref_flow_handle = ref_flow_node->flow_handle;
569         } else {
570                 *ref_flow_handle = cpu_to_le16(0xffff);
571         }
572
573         /* Insert the l2_node into the flow_node so that subsequent flows
574          * with a matching l2 key can use the flow_handle of this flow
575          * as their ref_flow_handle
576          */
577         flow_node->l2_node = l2_node;
578         list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
579         l2_node->refcount++;
580         return 0;
581 }
582
583 /* After the flow parsing is done, this routine is used for checking
584  * if there are any aspects of the flow that prevent it from being
585  * offloaded.
586  */
587 static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
588 {
589         /* If L4 ports are specified then ip_proto must be TCP or UDP */
590         if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
591             (flow->l4_key.ip_proto != IPPROTO_TCP &&
592              flow->l4_key.ip_proto != IPPROTO_UDP)) {
593                 netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
594                             flow->l4_key.ip_proto);
595                 return false;
596         }
597
598         return true;
599 }
600
601 static int __bnxt_tc_del_flow(struct bnxt *bp,
602                               struct bnxt_tc_flow_node *flow_node)
603 {
604         struct bnxt_tc_info *tc_info = &bp->tc_info;
605         int rc;
606
607         /* send HWRM cmd to free the flow-id */
608         bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
609
610         mutex_lock(&tc_info->lock);
611
612         /* release reference to l2 node */
613         bnxt_tc_put_l2_node(bp, flow_node);
614
615         mutex_unlock(&tc_info->lock);
616
617         rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
618                                     tc_info->flow_ht_params);
619         if (rc)
620                 netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
621                            __func__, rc);
622
623         kfree_rcu(flow_node, rcu);
624         return 0;
625 }
626
627 /* Add a new flow or replace an existing flow.
628  * Notes on locking:
629  * There are essentially two critical sections here.
630  * 1. while adding a new flow
631  *    a) lookup l2-key
632  *    b) issue HWRM cmd and get flow_handle
633  *    c) link l2-key with flow
634  * 2. while deleting a flow
635  *    a) unlinking l2-key from flow
636  * A lock is needed to protect these two critical sections.
637  *
638  * The hash-tables are already protected by the rhashtable API.
639  */
640 static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
641                             struct tc_cls_flower_offload *tc_flow_cmd)
642 {
643         struct bnxt_tc_flow_node *new_node, *old_node;
644         struct bnxt_tc_info *tc_info = &bp->tc_info;
645         struct bnxt_tc_flow *flow;
646         __le16 ref_flow_handle;
647         int rc;
648
649         /* allocate memory for the new flow and it's node */
650         new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
651         if (!new_node) {
652                 rc = -ENOMEM;
653                 goto done;
654         }
655         new_node->cookie = tc_flow_cmd->cookie;
656         flow = &new_node->flow;
657
658         rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
659         if (rc)
660                 goto free_node;
661         flow->src_fid = src_fid;
662
663         if (!bnxt_tc_can_offload(bp, flow)) {
664                 rc = -ENOSPC;
665                 goto free_node;
666         }
667
668         /* If a flow exists with the same cookie, delete it */
669         old_node = rhashtable_lookup_fast(&tc_info->flow_table,
670                                           &tc_flow_cmd->cookie,
671                                           tc_info->flow_ht_params);
672         if (old_node)
673                 __bnxt_tc_del_flow(bp, old_node);
674
675         /* Check if the L2 part of the flow has been offloaded already.
676          * If so, bump up it's refcnt and get it's reference handle.
677          */
678         mutex_lock(&tc_info->lock);
679         rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
680         if (rc)
681                 goto unlock;
682
683         /* send HWRM cmd to alloc the flow */
684         rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
685                                       &new_node->flow_handle);
686         if (rc)
687                 goto put_l2;
688
689         /* add new flow to flow-table */
690         rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
691                                     tc_info->flow_ht_params);
692         if (rc)
693                 goto hwrm_flow_free;
694
695         mutex_unlock(&tc_info->lock);
696         return 0;
697
698 hwrm_flow_free:
699         bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
700 put_l2:
701         bnxt_tc_put_l2_node(bp, new_node);
702 unlock:
703         mutex_unlock(&tc_info->lock);
704 free_node:
705         kfree(new_node);
706 done:
707         netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
708                    __func__, tc_flow_cmd->cookie, rc);
709         return rc;
710 }
711
712 static int bnxt_tc_del_flow(struct bnxt *bp,
713                             struct tc_cls_flower_offload *tc_flow_cmd)
714 {
715         struct bnxt_tc_info *tc_info = &bp->tc_info;
716         struct bnxt_tc_flow_node *flow_node;
717
718         flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
719                                            &tc_flow_cmd->cookie,
720                                            tc_info->flow_ht_params);
721         if (!flow_node) {
722                 netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
723                             tc_flow_cmd->cookie);
724                 return -EINVAL;
725         }
726
727         return __bnxt_tc_del_flow(bp, flow_node);
728 }
729
730 static int bnxt_tc_get_flow_stats(struct bnxt *bp,
731                                   struct tc_cls_flower_offload *tc_flow_cmd)
732 {
733         struct bnxt_tc_info *tc_info = &bp->tc_info;
734         struct bnxt_tc_flow_node *flow_node;
735         struct bnxt_tc_flow_stats stats;
736         int rc;
737
738         flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
739                                            &tc_flow_cmd->cookie,
740                                            tc_info->flow_ht_params);
741         if (!flow_node) {
742                 netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
743                             tc_flow_cmd->cookie);
744                 return -1;
745         }
746
747         rc = bnxt_hwrm_cfa_flow_stats_get(bp, flow_node->flow_handle,
748                                           &flow_node->flow, &stats);
749         if (rc)
750                 return rc;
751
752         tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets, 0);
753         return 0;
754 }
755
756 int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
757                          struct tc_cls_flower_offload *cls_flower)
758 {
759         int rc = 0;
760
761         if (!is_classid_clsact_ingress(cls_flower->common.classid) ||
762             cls_flower->common.chain_index)
763                 return -EOPNOTSUPP;
764
765         switch (cls_flower->command) {
766         case TC_CLSFLOWER_REPLACE:
767                 rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
768                 break;
769
770         case TC_CLSFLOWER_DESTROY:
771                 rc = bnxt_tc_del_flow(bp, cls_flower);
772                 break;
773
774         case TC_CLSFLOWER_STATS:
775                 rc = bnxt_tc_get_flow_stats(bp, cls_flower);
776                 break;
777         }
778         return rc;
779 }
780
781 static const struct rhashtable_params bnxt_tc_flow_ht_params = {
782         .head_offset = offsetof(struct bnxt_tc_flow_node, node),
783         .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
784         .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
785         .automatic_shrinking = true
786 };
787
788 static const struct rhashtable_params bnxt_tc_l2_ht_params = {
789         .head_offset = offsetof(struct bnxt_tc_l2_node, node),
790         .key_offset = offsetof(struct bnxt_tc_l2_node, key),
791         .key_len = BNXT_TC_L2_KEY_LEN,
792         .automatic_shrinking = true
793 };
794
795 /* convert counter width in bits to a mask */
796 #define mask(width)             ((u64)~0 >> (64 - (width)))
797
798 int bnxt_init_tc(struct bnxt *bp)
799 {
800         struct bnxt_tc_info *tc_info = &bp->tc_info;
801         int rc;
802
803         if (bp->hwrm_spec_code < 0x10800) {
804                 netdev_warn(bp->dev,
805                             "Firmware does not support TC flower offload.\n");
806                 return -ENOTSUPP;
807         }
808         mutex_init(&tc_info->lock);
809
810         /* Counter widths are programmed by FW */
811         tc_info->bytes_mask = mask(36);
812         tc_info->packets_mask = mask(28);
813
814         tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
815         rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
816         if (rc)
817                 return rc;
818
819         tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
820         rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
821         if (rc)
822                 goto destroy_flow_table;
823
824         tc_info->enabled = true;
825         bp->dev->hw_features |= NETIF_F_HW_TC;
826         bp->dev->features |= NETIF_F_HW_TC;
827         return 0;
828
829 destroy_flow_table:
830         rhashtable_destroy(&tc_info->flow_table);
831         return rc;
832 }
833
834 void bnxt_shutdown_tc(struct bnxt *bp)
835 {
836         struct bnxt_tc_info *tc_info = &bp->tc_info;
837
838         if (!tc_info->enabled)
839                 return;
840
841         rhashtable_destroy(&tc_info->flow_table);
842         rhashtable_destroy(&tc_info->l2_table);
843 }
844
845 #else
846 #endif