1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
4 #include <linux/math64.h>
5 #include <net/pkt_cls.h>
6 #include <net/pkt_sched.h>
10 #include "../nfp_port.h"
12 #define NFP_FL_QOS_UPDATE msecs_to_jiffies(1000)
14 struct nfp_police_cfg_head {
19 /* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
20 * See RFC 2698 for more details.
21 * ----------------------------------------------------------------
23 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
24 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
26 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
28 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
29 * | Token Bucket Peak |
30 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
31 * | Token Bucket Committed |
32 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
34 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
35 * | Committed Burst Size |
36 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
37 * | Peak Information Rate |
38 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
39 * | Committed Information Rate |
40 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
42 struct nfp_police_config {
43 struct nfp_police_cfg_head head;
52 struct nfp_police_stats_reply {
53 struct nfp_police_cfg_head head;
61 nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
62 struct tc_cls_matchall_offload *flow,
63 struct netlink_ext_ack *extack)
65 struct flow_action_entry *action = &flow->rule->action.entries[0];
66 struct nfp_flower_priv *fl_priv = app->priv;
67 struct nfp_flower_repr_priv *repr_priv;
68 struct nfp_police_config *config;
69 struct nfp_repr *repr;
75 if (!nfp_netdev_is_nfp_repr(netdev)) {
76 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
79 repr = netdev_priv(netdev);
80 repr_priv = repr->app_priv;
82 if (repr_priv->block_shared) {
83 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
87 if (repr->port->type != NFP_PORT_VF_PORT) {
88 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports");
92 if (!flow_offload_has_one_action(&flow->rule->action)) {
93 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action");
97 if (flow->common.prio != 1) {
98 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
102 if (action->id != FLOW_ACTION_POLICE) {
103 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action");
107 if (action->police.rate_pkt_ps) {
108 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not support packets per second");
112 rate = action->police.rate_bytes_ps;
113 burst = action->police.burst;
114 netdev_port_id = nfp_repr_get_port_id(netdev);
116 skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
117 NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
121 config = nfp_flower_cmsg_get_data(skb);
122 memset(config, 0, sizeof(struct nfp_police_config));
123 config->head.port = cpu_to_be32(netdev_port_id);
124 config->bkt_tkn_p = cpu_to_be32(burst);
125 config->bkt_tkn_c = cpu_to_be32(burst);
126 config->pbs = cpu_to_be32(burst);
127 config->cbs = cpu_to_be32(burst);
128 config->pir = cpu_to_be32(rate);
129 config->cir = cpu_to_be32(rate);
130 nfp_ctrl_tx(repr->app->ctrl, skb);
132 repr_priv->qos_table.netdev_port_id = netdev_port_id;
133 fl_priv->qos_rate_limiters++;
134 if (fl_priv->qos_rate_limiters == 1)
135 schedule_delayed_work(&fl_priv->qos_stats_work,
142 nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
143 struct tc_cls_matchall_offload *flow,
144 struct netlink_ext_ack *extack)
146 struct nfp_flower_priv *fl_priv = app->priv;
147 struct nfp_flower_repr_priv *repr_priv;
148 struct nfp_police_config *config;
149 struct nfp_repr *repr;
153 if (!nfp_netdev_is_nfp_repr(netdev)) {
154 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
157 repr = netdev_priv(netdev);
159 netdev_port_id = nfp_repr_get_port_id(netdev);
160 repr_priv = repr->app_priv;
162 if (!repr_priv->qos_table.netdev_port_id) {
163 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
167 skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
168 NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
172 /* Clear all qos associate data for this interface */
173 memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
174 fl_priv->qos_rate_limiters--;
175 if (!fl_priv->qos_rate_limiters)
176 cancel_delayed_work_sync(&fl_priv->qos_stats_work);
178 config = nfp_flower_cmsg_get_data(skb);
179 memset(config, 0, sizeof(struct nfp_police_config));
180 config->head.port = cpu_to_be32(netdev_port_id);
181 nfp_ctrl_tx(repr->app->ctrl, skb);
186 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
188 struct nfp_flower_priv *fl_priv = app->priv;
189 struct nfp_flower_repr_priv *repr_priv;
190 struct nfp_police_stats_reply *msg;
191 struct nfp_stat_pair *curr_stats;
192 struct nfp_stat_pair *prev_stats;
193 struct net_device *netdev;
194 struct nfp_repr *repr;
197 msg = nfp_flower_cmsg_get_data(skb);
198 netdev_port_id = be32_to_cpu(msg->head.port);
200 netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
202 goto exit_unlock_rcu;
204 repr = netdev_priv(netdev);
205 repr_priv = repr->app_priv;
206 curr_stats = &repr_priv->qos_table.curr_stats;
207 prev_stats = &repr_priv->qos_table.prev_stats;
209 spin_lock_bh(&fl_priv->qos_stats_lock);
210 curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
211 be64_to_cpu(msg->drop_pkts);
212 curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
213 be64_to_cpu(msg->drop_bytes);
215 if (!repr_priv->qos_table.last_update) {
216 prev_stats->pkts = curr_stats->pkts;
217 prev_stats->bytes = curr_stats->bytes;
220 repr_priv->qos_table.last_update = jiffies;
221 spin_unlock_bh(&fl_priv->qos_stats_lock);
228 nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
231 struct nfp_police_cfg_head *head;
234 skb = nfp_flower_cmsg_alloc(fl_priv->app,
235 sizeof(struct nfp_police_cfg_head),
236 NFP_FLOWER_CMSG_TYPE_QOS_STATS,
241 head = nfp_flower_cmsg_get_data(skb);
242 memset(head, 0, sizeof(struct nfp_police_cfg_head));
243 head->port = cpu_to_be32(netdev_port_id);
245 nfp_ctrl_tx(fl_priv->app->ctrl, skb);
249 nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
251 struct nfp_reprs *repr_set;
255 repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
257 goto exit_unlock_rcu;
259 for (i = 0; i < repr_set->num_reprs; i++) {
260 struct net_device *netdev;
262 netdev = rcu_dereference(repr_set->reprs[i]);
264 struct nfp_repr *priv = netdev_priv(netdev);
265 struct nfp_flower_repr_priv *repr_priv;
268 repr_priv = priv->app_priv;
269 netdev_port_id = repr_priv->qos_table.netdev_port_id;
273 nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
281 static void update_stats_cache(struct work_struct *work)
283 struct delayed_work *delayed_work;
284 struct nfp_flower_priv *fl_priv;
286 delayed_work = to_delayed_work(work);
287 fl_priv = container_of(delayed_work, struct nfp_flower_priv,
290 nfp_flower_stats_rlim_request_all(fl_priv);
291 schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
295 nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
296 struct tc_cls_matchall_offload *flow,
297 struct netlink_ext_ack *extack)
299 struct nfp_flower_priv *fl_priv = app->priv;
300 struct nfp_flower_repr_priv *repr_priv;
301 struct nfp_stat_pair *curr_stats;
302 struct nfp_stat_pair *prev_stats;
303 u64 diff_bytes, diff_pkts;
304 struct nfp_repr *repr;
306 if (!nfp_netdev_is_nfp_repr(netdev)) {
307 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
310 repr = netdev_priv(netdev);
312 repr_priv = repr->app_priv;
313 if (!repr_priv->qos_table.netdev_port_id) {
314 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
318 spin_lock_bh(&fl_priv->qos_stats_lock);
319 curr_stats = &repr_priv->qos_table.curr_stats;
320 prev_stats = &repr_priv->qos_table.prev_stats;
321 diff_pkts = curr_stats->pkts - prev_stats->pkts;
322 diff_bytes = curr_stats->bytes - prev_stats->bytes;
323 prev_stats->pkts = curr_stats->pkts;
324 prev_stats->bytes = curr_stats->bytes;
325 spin_unlock_bh(&fl_priv->qos_stats_lock);
327 flow_stats_update(&flow->stats, diff_bytes, diff_pkts, 0,
328 repr_priv->qos_table.last_update,
329 FLOW_ACTION_HW_STATS_DELAYED);
333 void nfp_flower_qos_init(struct nfp_app *app)
335 struct nfp_flower_priv *fl_priv = app->priv;
337 spin_lock_init(&fl_priv->qos_stats_lock);
338 INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
341 void nfp_flower_qos_cleanup(struct nfp_app *app)
343 struct nfp_flower_priv *fl_priv = app->priv;
345 cancel_delayed_work_sync(&fl_priv->qos_stats_work);
348 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
349 struct tc_cls_matchall_offload *flow)
351 struct netlink_ext_ack *extack = flow->common.extack;
352 struct nfp_flower_priv *fl_priv = app->priv;
354 if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
355 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
359 switch (flow->command) {
360 case TC_CLSMATCHALL_REPLACE:
361 return nfp_flower_install_rate_limiter(app, netdev, flow,
363 case TC_CLSMATCHALL_DESTROY:
364 return nfp_flower_remove_rate_limiter(app, netdev, flow,
366 case TC_CLSMATCHALL_STATS:
367 return nfp_flower_stats_rate_limiter(app, netdev, flow,