1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2019 Chelsio Communications. All rights reserved. */
5 #include "cxgb4_tc_matchall.h"
8 #include "cxgb4_filter.h"
9 #include "cxgb4_tc_flower.h"
11 static int cxgb4_matchall_egress_validate(struct net_device *dev,
12 struct tc_cls_matchall_offload *cls)
14 struct netlink_ext_ack *extack = cls->common.extack;
15 struct flow_action *actions = &cls->rule->action;
16 struct port_info *pi = netdev2pinfo(dev);
17 struct flow_action_entry *entry;
22 if (!flow_action_has_entries(actions)) {
23 NL_SET_ERR_MSG_MOD(extack,
24 "Egress MATCHALL offload needs at least 1 policing action");
26 } else if (!flow_offload_has_one_action(actions)) {
27 NL_SET_ERR_MSG_MOD(extack,
28 "Egress MATCHALL offload only supports 1 policing action");
30 } else if (pi->tc_block_shared) {
31 NL_SET_ERR_MSG_MOD(extack,
32 "Egress MATCHALL offload not supported with shared blocks");
36 ret = t4_get_link_params(pi, NULL, &speed, NULL);
38 NL_SET_ERR_MSG_MOD(extack,
39 "Failed to get max speed supported by the link");
43 /* Convert from Mbps to bps */
44 max_link_rate = (u64)speed * 1000 * 1000;
46 flow_action_for_each(i, entry, actions) {
48 case FLOW_ACTION_POLICE:
49 /* Convert bytes per second to bits per second */
50 if (entry->police.rate_bytes_ps * 8 > max_link_rate) {
51 NL_SET_ERR_MSG_MOD(extack,
52 "Specified policing max rate is larger than underlying link speed");
57 NL_SET_ERR_MSG_MOD(extack,
58 "Only policing action supported with Egress MATCHALL offload");
66 static int cxgb4_matchall_alloc_tc(struct net_device *dev,
67 struct tc_cls_matchall_offload *cls)
69 struct ch_sched_params p = {
70 .type = SCHED_CLASS_TYPE_PACKET,
71 .u.params.level = SCHED_CLASS_LEVEL_CH_RL,
72 .u.params.mode = SCHED_CLASS_MODE_CLASS,
73 .u.params.rateunit = SCHED_CLASS_RATEUNIT_BITS,
74 .u.params.ratemode = SCHED_CLASS_RATEMODE_ABS,
75 .u.params.class = SCHED_CLS_NONE,
76 .u.params.minrate = 0,
78 .u.params.pktsize = dev->mtu,
80 struct netlink_ext_ack *extack = cls->common.extack;
81 struct cxgb4_tc_port_matchall *tc_port_matchall;
82 struct port_info *pi = netdev2pinfo(dev);
83 struct adapter *adap = netdev2adap(dev);
84 struct flow_action_entry *entry;
85 struct sched_class *e;
88 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
90 flow_action_for_each(i, entry, &cls->rule->action)
91 if (entry->id == FLOW_ACTION_POLICE)
94 /* Convert from bytes per second to Kbps */
95 p.u.params.maxrate = div_u64(entry->police.rate_bytes_ps * 8, 1000);
96 p.u.params.channel = pi->tx_chan;
97 e = cxgb4_sched_class_alloc(dev, &p);
99 NL_SET_ERR_MSG_MOD(extack,
100 "No free traffic class available for policing action");
104 tc_port_matchall->egress.hwtc = e->idx;
105 tc_port_matchall->egress.cookie = cls->cookie;
106 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_ENABLED;
110 static void cxgb4_matchall_free_tc(struct net_device *dev)
112 struct cxgb4_tc_port_matchall *tc_port_matchall;
113 struct port_info *pi = netdev2pinfo(dev);
114 struct adapter *adap = netdev2adap(dev);
116 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
117 cxgb4_sched_class_free(dev, tc_port_matchall->egress.hwtc);
119 tc_port_matchall->egress.hwtc = SCHED_CLS_NONE;
120 tc_port_matchall->egress.cookie = 0;
121 tc_port_matchall->egress.state = CXGB4_MATCHALL_STATE_DISABLED;
124 static int cxgb4_matchall_alloc_filter(struct net_device *dev,
125 struct tc_cls_matchall_offload *cls)
127 struct netlink_ext_ack *extack = cls->common.extack;
128 struct cxgb4_tc_port_matchall *tc_port_matchall;
129 struct port_info *pi = netdev2pinfo(dev);
130 struct adapter *adap = netdev2adap(dev);
131 struct ch_filter_specification *fs;
134 /* Note that TC uses prio 0 to indicate stack to generate
135 * automatic prio and hence doesn't pass prio 0 to driver.
136 * However, the hardware TCAM index starts from 0. Hence, the
137 * -1 here. 1 slot is enough to create a wildcard matchall
140 if (cls->common.prio <= adap->tids.nftids)
141 fidx = cls->common.prio - 1;
143 fidx = cxgb4_get_free_ftid(dev, PF_INET);
145 /* Only insert MATCHALL rule if its priority doesn't conflict
146 * with existing rules in the LETCAM.
149 !cxgb4_filter_prio_in_range(dev, fidx, cls->common.prio)) {
150 NL_SET_ERR_MSG_MOD(extack,
151 "No free LETCAM index available");
155 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
156 fs = &tc_port_matchall->ingress.fs;
157 memset(fs, 0, sizeof(*fs));
159 fs->tc_prio = cls->common.prio;
160 fs->tc_cookie = cls->cookie;
163 fs->val.pfvf_vld = 1;
164 fs->val.pf = adap->pf;
165 fs->val.vf = pi->vin;
167 cxgb4_process_flow_actions(dev, &cls->rule->action, fs);
169 ret = cxgb4_set_filter(dev, fidx, fs);
173 tc_port_matchall->ingress.tid = fidx;
174 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_ENABLED;
178 static int cxgb4_matchall_free_filter(struct net_device *dev)
180 struct cxgb4_tc_port_matchall *tc_port_matchall;
181 struct port_info *pi = netdev2pinfo(dev);
182 struct adapter *adap = netdev2adap(dev);
185 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
187 ret = cxgb4_del_filter(dev, tc_port_matchall->ingress.tid,
188 &tc_port_matchall->ingress.fs);
192 tc_port_matchall->ingress.packets = 0;
193 tc_port_matchall->ingress.bytes = 0;
194 tc_port_matchall->ingress.last_used = 0;
195 tc_port_matchall->ingress.tid = 0;
196 tc_port_matchall->ingress.state = CXGB4_MATCHALL_STATE_DISABLED;
200 int cxgb4_tc_matchall_replace(struct net_device *dev,
201 struct tc_cls_matchall_offload *cls_matchall,
204 struct netlink_ext_ack *extack = cls_matchall->common.extack;
205 struct cxgb4_tc_port_matchall *tc_port_matchall;
206 struct port_info *pi = netdev2pinfo(dev);
207 struct adapter *adap = netdev2adap(dev);
210 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
212 if (tc_port_matchall->ingress.state ==
213 CXGB4_MATCHALL_STATE_ENABLED) {
214 NL_SET_ERR_MSG_MOD(extack,
215 "Only 1 Ingress MATCHALL can be offloaded");
219 ret = cxgb4_validate_flow_actions(dev,
220 &cls_matchall->rule->action);
224 return cxgb4_matchall_alloc_filter(dev, cls_matchall);
227 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED) {
228 NL_SET_ERR_MSG_MOD(extack,
229 "Only 1 Egress MATCHALL can be offloaded");
233 ret = cxgb4_matchall_egress_validate(dev, cls_matchall);
237 return cxgb4_matchall_alloc_tc(dev, cls_matchall);
240 int cxgb4_tc_matchall_destroy(struct net_device *dev,
241 struct tc_cls_matchall_offload *cls_matchall,
244 struct cxgb4_tc_port_matchall *tc_port_matchall;
245 struct port_info *pi = netdev2pinfo(dev);
246 struct adapter *adap = netdev2adap(dev);
248 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
250 if (cls_matchall->cookie !=
251 tc_port_matchall->ingress.fs.tc_cookie)
254 return cxgb4_matchall_free_filter(dev);
257 if (cls_matchall->cookie != tc_port_matchall->egress.cookie)
260 cxgb4_matchall_free_tc(dev);
264 int cxgb4_tc_matchall_stats(struct net_device *dev,
265 struct tc_cls_matchall_offload *cls_matchall)
267 struct cxgb4_tc_port_matchall *tc_port_matchall;
268 struct port_info *pi = netdev2pinfo(dev);
269 struct adapter *adap = netdev2adap(dev);
273 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
274 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_DISABLED)
277 ret = cxgb4_get_filter_counters(dev, tc_port_matchall->ingress.tid,
279 tc_port_matchall->ingress.fs.hash);
283 if (tc_port_matchall->ingress.packets != packets) {
284 flow_stats_update(&cls_matchall->stats,
285 bytes - tc_port_matchall->ingress.bytes,
286 packets - tc_port_matchall->ingress.packets,
287 tc_port_matchall->ingress.last_used);
289 tc_port_matchall->ingress.packets = packets;
290 tc_port_matchall->ingress.bytes = bytes;
291 tc_port_matchall->ingress.last_used = jiffies;
297 static void cxgb4_matchall_disable_offload(struct net_device *dev)
299 struct cxgb4_tc_port_matchall *tc_port_matchall;
300 struct port_info *pi = netdev2pinfo(dev);
301 struct adapter *adap = netdev2adap(dev);
303 tc_port_matchall = &adap->tc_matchall->port_matchall[pi->port_id];
304 if (tc_port_matchall->egress.state == CXGB4_MATCHALL_STATE_ENABLED)
305 cxgb4_matchall_free_tc(dev);
307 if (tc_port_matchall->ingress.state == CXGB4_MATCHALL_STATE_ENABLED)
308 cxgb4_matchall_free_filter(dev);
311 int cxgb4_init_tc_matchall(struct adapter *adap)
313 struct cxgb4_tc_port_matchall *tc_port_matchall;
314 struct cxgb4_tc_matchall *tc_matchall;
317 tc_matchall = kzalloc(sizeof(*tc_matchall), GFP_KERNEL);
321 tc_port_matchall = kcalloc(adap->params.nports,
322 sizeof(*tc_port_matchall),
324 if (!tc_port_matchall) {
326 goto out_free_matchall;
329 tc_matchall->port_matchall = tc_port_matchall;
330 adap->tc_matchall = tc_matchall;
338 void cxgb4_cleanup_tc_matchall(struct adapter *adap)
342 if (adap->tc_matchall) {
343 if (adap->tc_matchall->port_matchall) {
344 for (i = 0; i < adap->params.nports; i++) {
345 struct net_device *dev = adap->port[i];
348 cxgb4_matchall_disable_offload(dev);
350 kfree(adap->tc_matchall->port_matchall);
352 kfree(adap->tc_matchall);