1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
5 #include "cxgb4_tc_matchall.h"
8 #include "cxgb4_filter.h"
9 #include "cxgb4_tc_flower.h"
11 static int cxgb4_policer_validate(const struct flow_action *action,
12 const struct flow_action_entry *act,
13 struct netlink_ext_ack *extack)
15 if (act->police.exceed.act_id != FLOW_ACTION_DROP) {
16 NL_SET_ERR_MSG_MOD(extack,
17 "Offload not supported when exceed action is not drop");
21 if (act->police.notexceed.act_id != FLOW_ACTION_PIPE &&
22 act->police.notexceed.act_id != FLOW_ACTION_ACCEPT) {
23 NL_SET_ERR_MSG_MOD(extack,
24 "Offload not supported when conform action is not pipe or ok");
28 if (act->police.notexceed.act_id == FLOW_ACTION_ACCEPT &&
29 !flow_action_is_last_entry(action, act)) {
30 NL_SET_ERR_MSG_MOD(extack,
31 "Offload not supported when conform action is ok, but action is not last");
35 if (act->police.peakrate_bytes_ps ||
36 act->police.avrate || act->police.overhead) {
37 NL_SET_ERR_MSG_MOD(extack,
38 "Offload not supported when peakrate/avrate/overhead is configured");
42 if (act->police.rate_pkt_ps) {
43 NL_SET_ERR_MSG_MOD(extack,
44 "QoS offload not support packets per second");
51 static int cxgb4_matchall_egress_validate(struct net_device *dev,
52 struct tc_cls_matchall_offload *cls)
54 struct netlink_ext_ack *extack = cls->common.extack;
55 struct flow_action *actions = &cls->rule->action;
56 struct port_info *pi = netdev2pinfo(dev);
57 struct flow_action_entry *entry;
58 struct ch_sched_queue qe;
59 struct sched_class *e;
64 if (!flow_action_has_entries(actions)) {
65 NL_SET_ERR_MSG_MOD(extack,
66 "Egress MATCHALL offload needs at least 1 policing action");
68 } else if (!flow_offload_has_one_action(actions)) {
69 NL_SET_ERR_MSG_MOD(extack,
70 "Egress MATCHALL offload only supports 1 policing action");
72 } else if (pi->tc_block_shared) {
73 NL_SET_ERR_MSG_MOD(extack,
74 "Egress MATCHALL offload not supported with shared blocks");
78 ret = t4_get_link_params(pi, NULL, &speed, NULL);
80 NL_SET_ERR_MSG_MOD(extack,
81 "Failed to get max speed supported by the link");
85 /* Convert from Mbps to bps */
86 max_link_rate = (u64)speed * 1000 * 1000;
88 flow_action_for_each(i, entry, actions) {
90 case FLOW_ACTION_POLICE:
91 ret = cxgb4_policer_validate(actions, entry, extack);
95 /* Convert bytes per second to bits per second */
96 if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
97 NL_SET_ERR_MSG_MOD(extack,
98 "Specified policing max rate is larger than underlying link speed");
103 NL_SET_ERR_MSG_MOD(extack,
104 "Only policing action supported with Egress MATCHALL offload");
109 for (i = 0; i < pi->nqsets; i++) {
110 memset(&qe, 0, sizeof(qe));
113 e = cxgb4_sched_queue_lookup(dev, &qe);
114 if (e && e->info.u.params.level != SCHED_CLASS_LEVEL_CH_RL) {
115 NL_SET_ERR_MSG_MOD(extack,
116 "Some queues are already bound to different class");
124 static int cxgb4_matchall_tc_bind_queues(struct net_device *dev, u32 tc)
126 struct port_info *pi = netdev2pinfo(dev);
127 struct ch_sched_queue qe;
131 for (i = 0; i < pi->nqsets; i++) {
134 ret = cxgb4_sched_class_bind(dev, &qe, SCHED_QUEUE);
144 qe.class = SCHED_CLS_NONE;
145 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
151 static void cxgb4_matchall_tc_unbind_queues(struct net_device *dev)
153 struct port_info *pi = netdev2pinfo(dev);
154 struct ch_sched_queue qe;
157 for (i = 0; i < pi->nqsets; i++) {
159 qe.class = SCHED_CLS_NONE;
160 cxgb4_sched_class_unbind(dev, &qe, SCHED_QUEUE);
164 static int cxgb4_matchall_alloc_tc(struct net_device *dev,
165 struct tc_cls_matchall_offload *cls)
167 struct ch_sched_params p = {
168 .type = SCHED_CLASS_TYPE_PACKET,
169 .u.params.level = SCHED_CLASS_LEVEL_CH_RL,
170 .u.params.mode = SCHED_CLASS_MODE_CLASS,
171 .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
172 .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
173 .u.params.class = SCHED_CLS_NONE,
174 .u.params.minrate = 0,
175 .u.params.weight = 0,
176 .u.params.pktsize = dev->mtu,
178 struct netlink_ext_ack *extack = cls->common.extack;
179 struct cxgb4_tc_port_matchall *tc_port_matchall;
180 struct port_info *pi = netdev2pinfo(dev);
181 struct adapter *adap = netdev2adap(dev);
182 struct flow_action_entry *entry;
183 struct sched_class *e;
187 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
189 flow_action_for_each(i, entry, &cls->rule->action)
190 if (entry->id == FLOW_ACTION_POLICE)
193 ret = cxgb4_policer_validate(&cls->rule->action, entry, extack);
197 /* Convert from bytes per second to Kbps */
198 p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
199 p.u.params.channel = pi->tx_chan;
200 e = cxgb4_sched_class_alloc(dev, &p);
202 NL_SET_ERR_MSG_MOD(extack,
203 "No free traffic class available for policing action");
207 ret = cxgb4_matchall_tc_bind_queues(dev, e->idx);
209 NL_SET_ERR_MSG_MOD(extack,
210 "Could not bind queues to traffic class");
214 tc_port_matchall->egress.hwtc = e->idx;
215 tc_port_matchall->egress.cookie = cls->cookie;
216 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
220 cxgb4_sched_class_free(dev, e->idx);
224 static void cxgb4_matchall_free_tc(struct net_device *dev)
226 struct cxgb4_tc_port_matchall *tc_port_matchall;
227 struct port_info *pi = netdev2pinfo(dev);
228 struct adapter *adap = netdev2adap(dev);
230 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
231 cxgb4_matchall_tc_unbind_queues(dev);
232 cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
234 tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
235 tc_port_matchall->egress.cookie = 0;
236 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
239 static int cxgb4_matchall_mirror_alloc(struct net_device *dev,
240 struct tc_cls_matchall_offload *cls)
242 struct netlink_ext_ack *extack = cls->common.extack;
243 struct cxgb4_tc_port_matchall *tc_port_matchall;
244 struct port_info *pi = netdev2pinfo(dev);
245 struct adapter *adap = netdev2adap(dev);
246 struct flow_action_entry *act;
250 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
251 flow_action_for_each(i, act, &cls->rule->action) {
252 if (act->id == FLOW_ACTION_MIRRED) {
253 ret = cxgb4_port_mirror_alloc(dev);
255 NL_SET_ERR_MSG_MOD(extack,
256 "Couldn't allocate mirror");
260 tc_port_matchall->ingress.viid_mirror = pi->viid_mirror;
268 static void cxgb4_matchall_mirror_free(struct net_device *dev)
270 struct cxgb4_tc_port_matchall *tc_port_matchall;
271 struct port_info *pi = netdev2pinfo(dev);
272 struct adapter *adap = netdev2adap(dev);
274 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
275 if (!tc_port_matchall->ingress.viid_mirror)
278 cxgb4_port_mirror_free(dev);
279 tc_port_matchall->ingress.viid_mirror = 0;
282 static int cxgb4_matchall_del_filter(struct net_device *dev, u8 filter_type)
284 struct cxgb4_tc_port_matchall *tc_port_matchall;
285 struct port_info *pi = netdev2pinfo(dev);
286 struct adapter *adap = netdev2adap(dev);
289 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
290 ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid[filter_type],
291 &tc_port_matchall->ingress.fs[filter_type]);
295 tc_port_matchall->ingress.tid[filter_type] = 0;
299 static int cxgb4_matchall_add_filter(struct net_device *dev,
300 struct tc_cls_matchall_offload *cls,
303 struct netlink_ext_ack *extack = cls->common.extack;
304 struct cxgb4_tc_port_matchall *tc_port_matchall;
305 struct port_info *pi = netdev2pinfo(dev);
306 struct adapter *adap = netdev2adap(dev);
307 struct ch_filter_specification *fs;
310 /* Get a free filter entry TID, where we can insert this new
311 * rule. Only insert rule if its prio doesn't conflict with
314 fidx = cxgb4_get_free_ftid(dev, filter_type ? PF_INET6 : PF_INET,
315 false, cls->common.prio);
317 NL_SET_ERR_MSG_MOD(extack,
318 "No free LETCAM index available");
322 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
323 fs = &tc_port_matchall->ingress.fs[filter_type];
324 memset(fs, 0, sizeof(*fs));
326 if (fidx < adap->tids.nhpftids)
328 fs->tc_prio = cls->common.prio;
329 fs->tc_cookie = cls->cookie;
330 fs->type = filter_type;
333 fs->val.pfvf_vld = 1;
334 fs->val.pf = adap->pf;
335 fs->val.vf = pi->vin;
337 cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
339 ret = cxgb4_set_filter(dev, fidx, fs);
343 tc_port_matchall->ingress.tid[filter_type] = fidx;
347 static int cxgb4_matchall_alloc_filter(struct net_device *dev,
348 struct tc_cls_matchall_offload *cls)
350 struct cxgb4_tc_port_matchall *tc_port_matchall;
351 struct port_info *pi = netdev2pinfo(dev);
352 struct adapter *adap = netdev2adap(dev);
355 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
357 ret = cxgb4_matchall_mirror_alloc(dev, cls);
361 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
362 ret = cxgb4_matchall_add_filter(dev, cls, i);
367 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
372 cxgb4_matchall_del_filter(dev, i);
374 cxgb4_matchall_mirror_free(dev);
378 static int cxgb4_matchall_free_filter(struct net_device *dev)
380 struct cxgb4_tc_port_matchall *tc_port_matchall;
381 struct port_info *pi = netdev2pinfo(dev);
382 struct adapter *adap = netdev2adap(dev);
386 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
388 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
389 ret = cxgb4_matchall_del_filter(dev, i);
394 cxgb4_matchall_mirror_free(dev);
396 tc_port_matchall->ingress.packets = 0;
397 tc_port_matchall->ingress.bytes = 0;
398 tc_port_matchall->ingress.last_used = 0;
399 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
403 int cxgb4_tc_matchall_replace(struct net_device *dev,
404 struct tc_cls_matchall_offload *cls_matchall,
407 struct netlink_ext_ack *extack = cls_matchall->common.extack;
408 struct cxgb4_tc_port_matchall *tc_port_matchall;
409 struct port_info *pi = netdev2pinfo(dev);
410 struct adapter *adap = netdev2adap(dev);
413 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
415 if (tc_port_matchall->ingress.state ==
416 CXGB4_MATCHALL_STATE_ENABLED) {
417 NL_SET_ERR_MSG_MOD(extack,
418 "Only 1 Ingress MATCHALL can be offloaded");
422 ret = cxgb4_validate_flow_actions(dev,
423 &cls_matchall->rule->action,
428 return cxgb4_matchall_alloc_filter(dev, cls_matchall);
431 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
432 NL_SET_ERR_MSG_MOD(extack,
433 "Only 1 Egress MATCHALL can be offloaded");
437 ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
441 return cxgb4_matchall_alloc_tc(dev, cls_matchall);
444 int cxgb4_tc_matchall_destroy(struct net_device *dev,
445 struct tc_cls_matchall_offload *cls_matchall,
448 struct cxgb4_tc_port_matchall *tc_port_matchall;
449 struct port_info *pi = netdev2pinfo(dev);
450 struct adapter *adap = netdev2adap(dev);
452 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
454 /* All the filter types of this matchall rule save the
455 * same cookie. So, checking for the first one is
458 if (cls_matchall->cookie !=
459 tc_port_matchall->ingress.fs[0].tc_cookie)
462 return cxgb4_matchall_free_filter(dev);
465 if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
468 cxgb4_matchall_free_tc(dev);
472 int cxgb4_tc_matchall_stats(struct net_device *dev,
473 struct tc_cls_matchall_offload *cls_matchall)
475 u64 tmp_packets, tmp_bytes, packets = 0, bytes = 0;
476 struct cxgb4_tc_port_matchall *tc_port_matchall;
477 struct cxgb4_matchall_ingress_entry *ingress;
478 struct port_info *pi = netdev2pinfo(dev);
479 struct adapter *adap = netdev2adap(dev);
483 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
484 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
487 ingress = &tc_port_matchall->ingress;
488 for (i = 0; i < CXGB4_FILTER_TYPE_MAX; i++) {
489 ret = cxgb4_get_filter_counters(dev, ingress->tid[i],
490 &tmp_packets, &tmp_bytes,
491 ingress->fs[i].hash);
495 packets += tmp_packets;
499 if (tc_port_matchall->ingress.packets != packets) {
500 flow_stats_update(&cls_matchall->stats,
501 bytes - tc_port_matchall->ingress.bytes,
502 packets - tc_port_matchall->ingress.packets,
503 0, tc_port_matchall->ingress.last_used,
504 FLOW_ACTION_HW_STATS_IMMEDIATE);
506 tc_port_matchall->ingress.packets = packets;
507 tc_port_matchall->ingress.bytes = bytes;
508 tc_port_matchall->ingress.last_used = jiffies;
514 static void cxgb4_matchall_disable_offload(struct net_device *dev)
516 struct cxgb4_tc_port_matchall *tc_port_matchall;
517 struct port_info *pi = netdev2pinfo(dev);
518 struct adapter *adap = netdev2adap(dev);
520 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
521 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
522 cxgb4_matchall_free_tc(dev);
524 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
525 cxgb4_matchall_free_filter(dev);
528 int cxgb4_init_tc_matchall(struct adapter *adap)
530 struct cxgb4_tc_port_matchall *tc_port_matchall;
531 struct cxgb4_tc_matchall *tc_matchall;
534 tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
538 tc_port_matchall = kcalloc(adap->params.nports,
539 sizeof(*tc_port_matchall),
541 if (!tc_port_matchall) {
543 goto out_free_matchall;
546 tc_matchall->port_matchall = tc_port_matchall;
547 adap->tc_matchall = tc_matchall;
555 void cxgb4_cleanup_tc_matchall(struct adapter *adap)
559 if (adap->tc_matchall) {
560 if (adap->tc_matchall->port_matchall) {
561 for (i = 0; i < adap->params.nports; i++) {
562 struct net_device *dev = adap->port[i];
565 cxgb4_matchall_disable_offload(dev);
567 kfree(adap->tc_matchall->port_matchall);
569 kfree(adap->tc_matchall);