Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf-next
[linux-2.6-microblaze.git] / drivers / net / ethernet / netronome / nfp / flower / qos_conf.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
3
4 #include <linux/math64.h>
5 #include <net/pkt_cls.h>
6 #include <net/pkt_sched.h>
7
8 #include "cmsg.h"
9 #include "main.h"
10 #include "../nfp_port.h"
11
12 #define NFP_FL_QOS_UPDATE               msecs_to_jiffies(1000)
13
14 struct nfp_police_cfg_head {
15         __be32 flags_opts;
16         __be32 port;
17 };
18
19 /* Police cmsg for configuring a trTCM traffic conditioner (8W/32B)
20  * See RFC 2698 for more details.
21  * ----------------------------------------------------------------
22  *    3                   2                   1
23  *  1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
24  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
25  * |                          Flag options                         |
26  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
27  * |                          Port Ingress                         |
28  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
29  * |                        Token Bucket Peak                      |
30  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
31  * |                     Token Bucket Committed                    |
32  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
33  * |                         Peak Burst Size                       |
34  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
35  * |                      Committed Burst Size                     |
36  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
37  * |                      Peak Information Rate                    |
38  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
39  * |                    Committed Information Rate                 |
40  * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
41  */
42 struct nfp_police_config {
43         struct nfp_police_cfg_head head;
44         __be32 bkt_tkn_p;
45         __be32 bkt_tkn_c;
46         __be32 pbs;
47         __be32 cbs;
48         __be32 pir;
49         __be32 cir;
50 };
51
52 struct nfp_police_stats_reply {
53         struct nfp_police_cfg_head head;
54         __be64 pass_bytes;
55         __be64 pass_pkts;
56         __be64 drop_bytes;
57         __be64 drop_pkts;
58 };
59
60 static int
61 nfp_flower_install_rate_limiter(struct nfp_app *app, struct net_device *netdev,
62                                 struct tc_cls_matchall_offload *flow,
63                                 struct netlink_ext_ack *extack)
64 {
65         struct flow_action_entry *action = &flow->rule->action.entries[0];
66         struct nfp_flower_priv *fl_priv = app->priv;
67         struct nfp_flower_repr_priv *repr_priv;
68         struct nfp_police_config *config;
69         struct nfp_repr *repr;
70         struct sk_buff *skb;
71         u32 netdev_port_id;
72         u32 burst;
73         u64 rate;
74
75         if (!nfp_netdev_is_nfp_repr(netdev)) {
76                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
77                 return -EOPNOTSUPP;
78         }
79         repr = netdev_priv(netdev);
80         repr_priv = repr->app_priv;
81
82         if (repr_priv->block_shared) {
83                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on shared blocks");
84                 return -EOPNOTSUPP;
85         }
86
87         if (repr->port->type != NFP_PORT_VF_PORT) {
88                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on non-VF ports");
89                 return -EOPNOTSUPP;
90         }
91
92         if (!flow_offload_has_one_action(&flow->rule->action)) {
93                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires a single action");
94                 return -EOPNOTSUPP;
95         }
96
97         if (flow->common.prio != 1) {
98                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires highest priority");
99                 return -EOPNOTSUPP;
100         }
101
102         if (action->id != FLOW_ACTION_POLICE) {
103                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload requires police action");
104                 return -EOPNOTSUPP;
105         }
106
107         if (action->police.rate_pkt_ps) {
108                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not support packets per second");
109                 return -EOPNOTSUPP;
110         }
111
112         rate = action->police.rate_bytes_ps;
113         burst = action->police.burst;
114         netdev_port_id = nfp_repr_get_port_id(netdev);
115
116         skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
117                                     NFP_FLOWER_CMSG_TYPE_QOS_MOD, GFP_KERNEL);
118         if (!skb)
119                 return -ENOMEM;
120
121         config = nfp_flower_cmsg_get_data(skb);
122         memset(config, 0, sizeof(struct nfp_police_config));
123         config->head.port = cpu_to_be32(netdev_port_id);
124         config->bkt_tkn_p = cpu_to_be32(burst);
125         config->bkt_tkn_c = cpu_to_be32(burst);
126         config->pbs = cpu_to_be32(burst);
127         config->cbs = cpu_to_be32(burst);
128         config->pir = cpu_to_be32(rate);
129         config->cir = cpu_to_be32(rate);
130         nfp_ctrl_tx(repr->app->ctrl, skb);
131
132         repr_priv->qos_table.netdev_port_id = netdev_port_id;
133         fl_priv->qos_rate_limiters++;
134         if (fl_priv->qos_rate_limiters == 1)
135                 schedule_delayed_work(&fl_priv->qos_stats_work,
136                                       NFP_FL_QOS_UPDATE);
137
138         return 0;
139 }
140
141 static int
142 nfp_flower_remove_rate_limiter(struct nfp_app *app, struct net_device *netdev,
143                                struct tc_cls_matchall_offload *flow,
144                                struct netlink_ext_ack *extack)
145 {
146         struct nfp_flower_priv *fl_priv = app->priv;
147         struct nfp_flower_repr_priv *repr_priv;
148         struct nfp_police_config *config;
149         struct nfp_repr *repr;
150         struct sk_buff *skb;
151         u32 netdev_port_id;
152
153         if (!nfp_netdev_is_nfp_repr(netdev)) {
154                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
155                 return -EOPNOTSUPP;
156         }
157         repr = netdev_priv(netdev);
158
159         netdev_port_id = nfp_repr_get_port_id(netdev);
160         repr_priv = repr->app_priv;
161
162         if (!repr_priv->qos_table.netdev_port_id) {
163                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot remove qos entry that does not exist");
164                 return -EOPNOTSUPP;
165         }
166
167         skb = nfp_flower_cmsg_alloc(repr->app, sizeof(struct nfp_police_config),
168                                     NFP_FLOWER_CMSG_TYPE_QOS_DEL, GFP_KERNEL);
169         if (!skb)
170                 return -ENOMEM;
171
172         /* Clear all qos associate data for this interface */
173         memset(&repr_priv->qos_table, 0, sizeof(struct nfp_fl_qos));
174         fl_priv->qos_rate_limiters--;
175         if (!fl_priv->qos_rate_limiters)
176                 cancel_delayed_work_sync(&fl_priv->qos_stats_work);
177
178         config = nfp_flower_cmsg_get_data(skb);
179         memset(config, 0, sizeof(struct nfp_police_config));
180         config->head.port = cpu_to_be32(netdev_port_id);
181         nfp_ctrl_tx(repr->app->ctrl, skb);
182
183         return 0;
184 }
185
186 void nfp_flower_stats_rlim_reply(struct nfp_app *app, struct sk_buff *skb)
187 {
188         struct nfp_flower_priv *fl_priv = app->priv;
189         struct nfp_flower_repr_priv *repr_priv;
190         struct nfp_police_stats_reply *msg;
191         struct nfp_stat_pair *curr_stats;
192         struct nfp_stat_pair *prev_stats;
193         struct net_device *netdev;
194         struct nfp_repr *repr;
195         u32 netdev_port_id;
196
197         msg = nfp_flower_cmsg_get_data(skb);
198         netdev_port_id = be32_to_cpu(msg->head.port);
199         rcu_read_lock();
200         netdev = nfp_app_dev_get(app, netdev_port_id, NULL);
201         if (!netdev)
202                 goto exit_unlock_rcu;
203
204         repr = netdev_priv(netdev);
205         repr_priv = repr->app_priv;
206         curr_stats = &repr_priv->qos_table.curr_stats;
207         prev_stats = &repr_priv->qos_table.prev_stats;
208
209         spin_lock_bh(&fl_priv->qos_stats_lock);
210         curr_stats->pkts = be64_to_cpu(msg->pass_pkts) +
211                            be64_to_cpu(msg->drop_pkts);
212         curr_stats->bytes = be64_to_cpu(msg->pass_bytes) +
213                             be64_to_cpu(msg->drop_bytes);
214
215         if (!repr_priv->qos_table.last_update) {
216                 prev_stats->pkts = curr_stats->pkts;
217                 prev_stats->bytes = curr_stats->bytes;
218         }
219
220         repr_priv->qos_table.last_update = jiffies;
221         spin_unlock_bh(&fl_priv->qos_stats_lock);
222
223 exit_unlock_rcu:
224         rcu_read_unlock();
225 }
226
227 static void
228 nfp_flower_stats_rlim_request(struct nfp_flower_priv *fl_priv,
229                               u32 netdev_port_id)
230 {
231         struct nfp_police_cfg_head *head;
232         struct sk_buff *skb;
233
234         skb = nfp_flower_cmsg_alloc(fl_priv->app,
235                                     sizeof(struct nfp_police_cfg_head),
236                                     NFP_FLOWER_CMSG_TYPE_QOS_STATS,
237                                     GFP_ATOMIC);
238         if (!skb)
239                 return;
240
241         head = nfp_flower_cmsg_get_data(skb);
242         memset(head, 0, sizeof(struct nfp_police_cfg_head));
243         head->port = cpu_to_be32(netdev_port_id);
244
245         nfp_ctrl_tx(fl_priv->app->ctrl, skb);
246 }
247
248 static void
249 nfp_flower_stats_rlim_request_all(struct nfp_flower_priv *fl_priv)
250 {
251         struct nfp_reprs *repr_set;
252         int i;
253
254         rcu_read_lock();
255         repr_set = rcu_dereference(fl_priv->app->reprs[NFP_REPR_TYPE_VF]);
256         if (!repr_set)
257                 goto exit_unlock_rcu;
258
259         for (i = 0; i < repr_set->num_reprs; i++) {
260                 struct net_device *netdev;
261
262                 netdev = rcu_dereference(repr_set->reprs[i]);
263                 if (netdev) {
264                         struct nfp_repr *priv = netdev_priv(netdev);
265                         struct nfp_flower_repr_priv *repr_priv;
266                         u32 netdev_port_id;
267
268                         repr_priv = priv->app_priv;
269                         netdev_port_id = repr_priv->qos_table.netdev_port_id;
270                         if (!netdev_port_id)
271                                 continue;
272
273                         nfp_flower_stats_rlim_request(fl_priv, netdev_port_id);
274                 }
275         }
276
277 exit_unlock_rcu:
278         rcu_read_unlock();
279 }
280
281 static void update_stats_cache(struct work_struct *work)
282 {
283         struct delayed_work *delayed_work;
284         struct nfp_flower_priv *fl_priv;
285
286         delayed_work = to_delayed_work(work);
287         fl_priv = container_of(delayed_work, struct nfp_flower_priv,
288                                qos_stats_work);
289
290         nfp_flower_stats_rlim_request_all(fl_priv);
291         schedule_delayed_work(&fl_priv->qos_stats_work, NFP_FL_QOS_UPDATE);
292 }
293
294 static int
295 nfp_flower_stats_rate_limiter(struct nfp_app *app, struct net_device *netdev,
296                               struct tc_cls_matchall_offload *flow,
297                               struct netlink_ext_ack *extack)
298 {
299         struct nfp_flower_priv *fl_priv = app->priv;
300         struct nfp_flower_repr_priv *repr_priv;
301         struct nfp_stat_pair *curr_stats;
302         struct nfp_stat_pair *prev_stats;
303         u64 diff_bytes, diff_pkts;
304         struct nfp_repr *repr;
305
306         if (!nfp_netdev_is_nfp_repr(netdev)) {
307                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: qos rate limit offload not supported on higher level port");
308                 return -EOPNOTSUPP;
309         }
310         repr = netdev_priv(netdev);
311
312         repr_priv = repr->app_priv;
313         if (!repr_priv->qos_table.netdev_port_id) {
314                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: cannot find qos entry for stats update");
315                 return -EOPNOTSUPP;
316         }
317
318         spin_lock_bh(&fl_priv->qos_stats_lock);
319         curr_stats = &repr_priv->qos_table.curr_stats;
320         prev_stats = &repr_priv->qos_table.prev_stats;
321         diff_pkts = curr_stats->pkts - prev_stats->pkts;
322         diff_bytes = curr_stats->bytes - prev_stats->bytes;
323         prev_stats->pkts = curr_stats->pkts;
324         prev_stats->bytes = curr_stats->bytes;
325         spin_unlock_bh(&fl_priv->qos_stats_lock);
326
327         flow_stats_update(&flow->stats, diff_bytes, diff_pkts, 0,
328                           repr_priv->qos_table.last_update,
329                           FLOW_ACTION_HW_STATS_DELAYED);
330         return 0;
331 }
332
333 void nfp_flower_qos_init(struct nfp_app *app)
334 {
335         struct nfp_flower_priv *fl_priv = app->priv;
336
337         spin_lock_init(&fl_priv->qos_stats_lock);
338         INIT_DELAYED_WORK(&fl_priv->qos_stats_work, &update_stats_cache);
339 }
340
341 void nfp_flower_qos_cleanup(struct nfp_app *app)
342 {
343         struct nfp_flower_priv *fl_priv = app->priv;
344
345         cancel_delayed_work_sync(&fl_priv->qos_stats_work);
346 }
347
348 int nfp_flower_setup_qos_offload(struct nfp_app *app, struct net_device *netdev,
349                                  struct tc_cls_matchall_offload *flow)
350 {
351         struct netlink_ext_ack *extack = flow->common.extack;
352         struct nfp_flower_priv *fl_priv = app->priv;
353
354         if (!(fl_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)) {
355                 NL_SET_ERR_MSG_MOD(extack, "unsupported offload: loaded firmware does not support qos rate limit offload");
356                 return -EOPNOTSUPP;
357         }
358
359         switch (flow->command) {
360         case TC_CLSMATCHALL_REPLACE:
361                 return nfp_flower_install_rate_limiter(app, netdev, flow,
362                                                        extack);
363         case TC_CLSMATCHALL_DESTROY:
364                 return nfp_flower_remove_rate_limiter(app, netdev, flow,
365                                                       extack);
366         case TC_CLSMATCHALL_STATS:
367                 return nfp_flower_stats_rate_limiter(app, netdev, flow,
368                                                      extack);
369         default:
370                 return -EOPNOTSUPP;
371         }
372 }