cxgb4: fix endian conversions for L4 ports in filters
[linux-2.6-microblaze.git] / drivers / net / ethernet / chelsio / cxgb4 / cxgb4_filter.c
1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 #include <net/ipv6.h>
35
36 #include "cxgb4.h"
37 #include "t4_regs.h"
38 #include "t4_tcb.h"
39 #include "t4_values.h"
40 #include "clip_tbl.h"
41 #include "l2t.h"
42 #include "smt.h"
43 #include "t4fw_api.h"
44 #include "cxgb4_filter.h"
45
46 static inline bool is_field_set(u32 val, u32 mask)
47 {
48         return val || mask;
49 }
50
51 static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
52 {
53         return !(conf & conf_mask) && is_field_set(val, mask);
54 }
55
56 static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
57                          unsigned int ftid,  u16 word, u64 mask, u64 val,
58                          int no_reply)
59 {
60         struct cpl_set_tcb_field *req;
61         struct sk_buff *skb;
62
63         skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
64         if (!skb)
65                 return -ENOMEM;
66
67         req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
68         INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
69         req->reply_ctrl = htons(REPLY_CHAN_V(0) |
70                                 QUEUENO_V(adap->sge.fw_evtq.abs_id) |
71                                 NO_REPLY_V(no_reply));
72         req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
73         req->mask = cpu_to_be64(mask);
74         req->val = cpu_to_be64(val);
75         set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
76         t4_ofld_send(adap, skb);
77         return 0;
78 }
79
80 /* Set one of the t_flags bits in the TCB.
81  */
82 static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f,
83                          unsigned int ftid, unsigned int bit_pos,
84                          unsigned int val, int no_reply)
85 {
86         return set_tcb_field(adap, f, ftid,  TCB_T_FLAGS_W, 1ULL << bit_pos,
87                              (unsigned long long)val << bit_pos, no_reply);
88 }
89
90 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, unsigned int tid)
91 {
92         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
93         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
94
95         txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
96         txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_req), 16));
97         sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
98         sc->len = htonl(sizeof(*abort_req) - sizeof(struct work_request_hdr));
99         OPCODE_TID(abort_req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
100         abort_req->rsvd0 = htonl(0);
101         abort_req->rsvd1 = 0;
102         abort_req->cmd = CPL_ABORT_NO_RST;
103 }
104
105 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, unsigned int tid)
106 {
107         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
108         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
109
110         txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
111         txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
112         sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
113         sc->len = htonl(sizeof(*abort_rpl) - sizeof(struct work_request_hdr));
114         OPCODE_TID(abort_rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
115         abort_rpl->rsvd0 = htonl(0);
116         abort_rpl->rsvd1 = 0;
117         abort_rpl->cmd = CPL_ABORT_NO_RST;
118 }
119
120 static void mk_set_tcb_ulp(struct filter_entry *f,
121                            struct cpl_set_tcb_field *req,
122                            unsigned int word, u64 mask, u64 val,
123                            u8 cookie, int no_reply)
124 {
125         struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
126         struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
127
128         txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
129         txpkt->len = htonl(DIV_ROUND_UP(sizeof(*req), 16));
130         sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
131         sc->len = htonl(sizeof(*req) - sizeof(struct work_request_hdr));
132         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
133         req->reply_ctrl = htons(NO_REPLY_V(no_reply) | REPLY_CHAN_V(0) |
134                                 QUEUENO_V(0));
135         req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
136         req->mask = cpu_to_be64(mask);
137         req->val = cpu_to_be64(val);
138         sc = (struct ulptx_idata *)(req + 1);
139         sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
140         sc->len = htonl(0);
141 }
142
143 static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
144 {
145         int err;
146
147         /* do a set-tcb for smac-sel and CWR bit.. */
148         err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
149         if (err)
150                 goto smac_err;
151
152         err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
153                             TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
154                             TCB_SMAC_SEL_V(f->smt->idx), 1);
155         if (!err)
156                 return 0;
157
158 smac_err:
159         dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n",
160                 f->tid, err);
161         return err;
162 }
163
164 static void set_nat_params(struct adapter *adap, struct filter_entry *f,
165                            unsigned int tid, bool dip, bool sip, bool dp,
166                            bool sp)
167 {
168         u8 *nat_lp = (u8 *)&f->fs.nat_lport;
169         u8 *nat_fp = (u8 *)&f->fs.nat_fport;
170
171         if (dip) {
172                 if (f->fs.type) {
173                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
174                                       WORD_MASK, f->fs.nat_lip[15] |
175                                       f->fs.nat_lip[14] << 8 |
176                                       f->fs.nat_lip[13] << 16 |
177                                       f->fs.nat_lip[12] << 24, 1);
178
179                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
180                                       WORD_MASK, f->fs.nat_lip[11] |
181                                       f->fs.nat_lip[10] << 8 |
182                                       f->fs.nat_lip[9] << 16 |
183                                       f->fs.nat_lip[8] << 24, 1);
184
185                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
186                                       WORD_MASK, f->fs.nat_lip[7] |
187                                       f->fs.nat_lip[6] << 8 |
188                                       f->fs.nat_lip[5] << 16 |
189                                       f->fs.nat_lip[4] << 24, 1);
190
191                         set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
192                                       WORD_MASK, f->fs.nat_lip[3] |
193                                       f->fs.nat_lip[2] << 8 |
194                                       f->fs.nat_lip[1] << 16 |
195                                       f->fs.nat_lip[0] << 24, 1);
196                 } else {
197                         set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
198                                       WORD_MASK, f->fs.nat_lip[3] |
199                                       f->fs.nat_lip[2] << 8 |
200                                       f->fs.nat_lip[1] << 16 |
201                                       f->fs.nat_lip[0] << 24, 1);
202                 }
203         }
204
205         if (sip) {
206                 if (f->fs.type) {
207                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W,
208                                       WORD_MASK, f->fs.nat_fip[15] |
209                                       f->fs.nat_fip[14] << 8 |
210                                       f->fs.nat_fip[13] << 16 |
211                                       f->fs.nat_fip[12] << 24, 1);
212
213                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
214                                       WORD_MASK, f->fs.nat_fip[11] |
215                                       f->fs.nat_fip[10] << 8 |
216                                       f->fs.nat_fip[9] << 16 |
217                                       f->fs.nat_fip[8] << 24, 1);
218
219                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
220                                       WORD_MASK, f->fs.nat_fip[7] |
221                                       f->fs.nat_fip[6] << 8 |
222                                       f->fs.nat_fip[5] << 16 |
223                                       f->fs.nat_fip[4] << 24, 1);
224
225                         set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
226                                       WORD_MASK, f->fs.nat_fip[3] |
227                                       f->fs.nat_fip[2] << 8 |
228                                       f->fs.nat_fip[1] << 16 |
229                                       f->fs.nat_fip[0] << 24, 1);
230
231                 } else {
232                         set_tcb_field(adap, f, tid,
233                                       TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W,
234                                       WORD_MASK, f->fs.nat_fip[3] |
235                                       f->fs.nat_fip[2] << 8 |
236                                       f->fs.nat_fip[1] << 16 |
237                                       f->fs.nat_fip[0] << 24, 1);
238                 }
239         }
240
241         set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
242                       (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
243                       (sp ? (nat_fp[1] << 16 | nat_fp[0] << 24) : 0),
244                       1);
245 }
246
247 /* Validate filter spec against configuration done on the card. */
248 static int validate_filter(struct net_device *dev,
249                            struct ch_filter_specification *fs)
250 {
251         struct adapter *adapter = netdev2adap(dev);
252         u32 fconf, iconf;
253
254         /* Check for unconfigured fields being used. */
255         iconf = adapter->params.tp.ingress_config;
256         fconf = fs->hash ? adapter->params.tp.filter_mask :
257                            adapter->params.tp.vlan_pri_map;
258
259         if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
260             unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
261             unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
262             unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
263                         fs->mask.ethtype) ||
264             unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
265             unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
266                         fs->mask.matchtype) ||
267             unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
268             unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
269             unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
270                         fs->mask.pfvf_vld) ||
271             unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
272                         fs->mask.ovlan_vld) ||
273             unsupported(fconf, VNIC_ID_F, fs->val.encap_vld,
274                         fs->mask.encap_vld) ||
275             unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
276                 return -EOPNOTSUPP;
277
278         /* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
279          * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
280          * in TP_INGRESS_CONFIG.  Hense the somewhat crazy checks
281          * below.  Additionally, since the T4 firmware interface also
282          * carries that overlap, we need to translate any PF/VF
283          * specification into that internal format below.
284          */
285         if ((is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
286              is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) ||
287             (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
288              is_field_set(fs->val.encap_vld, fs->mask.encap_vld)) ||
289             (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
290              is_field_set(fs->val.encap_vld, fs->mask.encap_vld)))
291                 return -EOPNOTSUPP;
292         if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
293             (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
294              (iconf & VNIC_F)))
295                 return -EOPNOTSUPP;
296         if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
297                 return -ERANGE;
298         fs->mask.pf &= 0x7;
299         fs->mask.vf &= 0x7f;
300
301         /* If the user is requesting that the filter action loop
302          * matching packets back out one of our ports, make sure that
303          * the egress port is in range.
304          */
305         if (fs->action == FILTER_SWITCH &&
306             fs->eport >= adapter->params.nports)
307                 return -ERANGE;
308
309         /* Don't allow various trivially obvious bogus out-of-range values... */
310         if (fs->val.iport >= adapter->params.nports)
311                 return -ERANGE;
312
313         /* T4 doesn't support removing VLAN Tags for loop back filters. */
314         if (is_t4(adapter->params.chip) &&
315             fs->action == FILTER_SWITCH &&
316             (fs->newvlan == VLAN_REMOVE ||
317              fs->newvlan == VLAN_REWRITE))
318                 return -EOPNOTSUPP;
319
320         if (fs->val.encap_vld &&
321             CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
322                 return -EOPNOTSUPP;
323         return 0;
324 }
325
326 static int get_filter_steerq(struct net_device *dev,
327                              struct ch_filter_specification *fs)
328 {
329         struct adapter *adapter = netdev2adap(dev);
330         int iq;
331
332         /* If the user has requested steering matching Ingress Packets
333          * to a specific Queue Set, we need to make sure it's in range
334          * for the port and map that into the Absolute Queue ID of the
335          * Queue Set's Response Queue.
336          */
337         if (!fs->dirsteer) {
338                 if (fs->iq)
339                         return -EINVAL;
340                 iq = 0;
341         } else {
342                 struct port_info *pi = netdev_priv(dev);
343
344                 /* If the iq id is greater than the number of qsets,
345                  * then assume it is an absolute qid.
346                  */
347                 if (fs->iq < pi->nqsets)
348                         iq = adapter->sge.ethrxq[pi->first_qset +
349                                                  fs->iq].rspq.abs_id;
350                 else
351                         iq = fs->iq;
352         }
353
354         return iq;
355 }
356
357 static int get_filter_count(struct adapter *adapter, unsigned int fidx,
358                             u64 *pkts, u64 *bytes, bool hash)
359 {
360         unsigned int tcb_base, tcbaddr;
361         unsigned int word_offset;
362         struct filter_entry *f;
363         __be64 be64_byte_count;
364         int ret;
365
366         tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
367         if (is_hashfilter(adapter) && hash) {
368                 if (tid_out_of_range(&adapter->tids, fidx))
369                         return -E2BIG;
370                 f = adapter->tids.tid_tab[fidx - adapter->tids.tid_base];
371                 if (!f)
372                         return -EINVAL;
373         } else {
374                 if ((fidx != (adapter->tids.nftids + adapter->tids.nsftids +
375                               adapter->tids.nhpftids - 1)) &&
376                     fidx >= (adapter->tids.nftids + adapter->tids.nhpftids))
377                         return -E2BIG;
378
379                 if (fidx < adapter->tids.nhpftids)
380                         f = &adapter->tids.hpftid_tab[fidx];
381                 else
382                         f = &adapter->tids.ftid_tab[fidx -
383                                                     adapter->tids.nhpftids];
384                 if (!f->valid)
385                         return -EINVAL;
386         }
387         tcbaddr = tcb_base + f->tid * TCB_SIZE;
388
389         spin_lock(&adapter->win0_lock);
390         if (is_t4(adapter->params.chip)) {
391                 __be64 be64_count;
392
393                 /* T4 doesn't maintain byte counts in hw */
394                 *bytes = 0;
395
396                 /* Get pkts */
397                 word_offset = 4;
398                 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
399                                    tcbaddr + (word_offset * sizeof(__be32)),
400                                    sizeof(be64_count),
401                                    (__be32 *)&be64_count,
402                                    T4_MEMORY_READ);
403                 if (ret < 0)
404                         goto out;
405                 *pkts = be64_to_cpu(be64_count);
406         } else {
407                 __be32 be32_count;
408
409                 /* Get bytes */
410                 word_offset = 4;
411                 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
412                                    tcbaddr + (word_offset * sizeof(__be32)),
413                                    sizeof(be64_byte_count),
414                                    &be64_byte_count,
415                                    T4_MEMORY_READ);
416                 if (ret < 0)
417                         goto out;
418                 *bytes = be64_to_cpu(be64_byte_count);
419
420                 /* Get pkts */
421                 word_offset = 6;
422                 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
423                                    tcbaddr + (word_offset * sizeof(__be32)),
424                                    sizeof(be32_count),
425                                    &be32_count,
426                                    T4_MEMORY_READ);
427                 if (ret < 0)
428                         goto out;
429                 *pkts = (u64)be32_to_cpu(be32_count);
430         }
431
432 out:
433         spin_unlock(&adapter->win0_lock);
434         return ret;
435 }
436
437 int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
438                               u64 *hitcnt, u64 *bytecnt, bool hash)
439 {
440         struct adapter *adapter = netdev2adap(dev);
441
442         return get_filter_count(adapter, fidx, hitcnt, bytecnt, hash);
443 }
444
445 static bool cxgb4_filter_prio_in_range(struct tid_info *t, u32 idx, u8 nslots,
446                                        u32 prio)
447 {
448         struct filter_entry *prev_tab, *next_tab, *prev_fe, *next_fe;
449         u32 prev_ftid, next_ftid;
450
451         /* Only insert the rule if both of the following conditions
452          * are met:
453          * 1. The immediate previous rule has priority <= @prio.
454          * 2. The immediate next rule has priority >= @prio.
455          */
456
457         /* High Priority (HPFILTER) region always has higher priority
458          * than normal FILTER region. So, all rules in HPFILTER region
459          * must have prio value <= rules in normal FILTER region.
460          */
461         if (idx < t->nhpftids) {
462                 /* Don't insert if there's a rule already present at @idx
463                  * in HPFILTER region.
464                  */
465                 if (test_bit(idx, t->hpftid_bmap))
466                         return false;
467
468                 next_tab = t->hpftid_tab;
469                 next_ftid = find_next_bit(t->hpftid_bmap, t->nhpftids, idx);
470                 if (next_ftid >= t->nhpftids) {
471                         /* No next entry found in HPFILTER region.
472                          * See if there's any next entry in normal
473                          * FILTER region.
474                          */
475                         next_ftid = find_first_bit(t->ftid_bmap, t->nftids);
476                         if (next_ftid >= t->nftids)
477                                 next_ftid = idx;
478                         else
479                                 next_tab = t->ftid_tab;
480                 }
481
482                 /* Search for the closest previous filter entry in HPFILTER
483                  * region. No need to search in normal FILTER region because
484                  * there can never be any entry in normal FILTER region whose
485                  * prio value is < last entry in HPFILTER region.
486                  */
487                 prev_ftid = find_last_bit(t->hpftid_bmap, idx);
488                 if (prev_ftid >= idx)
489                         prev_ftid = idx;
490
491                 prev_tab = t->hpftid_tab;
492         } else {
493                 idx -= t->nhpftids;
494
495                 /* Don't insert if there's a rule already present at @idx
496                  * in normal FILTER region.
497                  */
498                 if (test_bit(idx, t->ftid_bmap))
499                         return false;
500
501                 prev_tab = t->ftid_tab;
502                 prev_ftid = find_last_bit(t->ftid_bmap, idx);
503                 if (prev_ftid >= idx) {
504                         /* No previous entry found in normal FILTER
505                          * region. See if there's any previous entry
506                          * in HPFILTER region.
507                          */
508                         prev_ftid = find_last_bit(t->hpftid_bmap, t->nhpftids);
509                         if (prev_ftid >= t->nhpftids)
510                                 prev_ftid = idx;
511                         else
512                                 prev_tab = t->hpftid_tab;
513                 }
514
515                 /* Search for the closest next filter entry in normal
516                  * FILTER region. No need to search in HPFILTER region
517                  * because there can never be any entry in HPFILTER
518                  * region whose prio value is > first entry in normal
519                  * FILTER region.
520                  */
521                 next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
522                 if (next_ftid >= t->nftids)
523                         next_ftid = idx;
524
525                 next_tab = t->ftid_tab;
526         }
527
528         next_fe = &next_tab[next_ftid];
529
530         /* See if the filter entry belongs to an IPv6 rule, which
531          * occupy 4 slots on T5 and 2 slots on T6. Adjust the
532          * reference to the previously inserted filter entry
533          * accordingly.
534          */
535         prev_fe = &prev_tab[prev_ftid & ~(nslots - 1)];
536         if (!prev_fe->fs.type)
537                 prev_fe = &prev_tab[prev_ftid];
538
539         if ((prev_fe->valid && prev_fe->fs.tc_prio > prio) ||
540             (next_fe->valid && next_fe->fs.tc_prio < prio))
541                 return false;
542
543         return true;
544 }
545
546 int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
547                         u32 tc_prio)
548 {
549         struct adapter *adap = netdev2adap(dev);
550         struct tid_info *t = &adap->tids;
551         u32 bmap_ftid, max_ftid;
552         struct filter_entry *f;
553         unsigned long *bmap;
554         bool found = false;
555         u8 i, cnt, n;
556         int ftid = 0;
557
558         /* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots
559          * on T5.
560          */
561         n = 1;
562         if (family == PF_INET6) {
563                 n++;
564                 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
565                         n += 2;
566         }
567
568         /* There are 3 filter regions available in hardware in
569          * following order of priority:
570          *
571          * 1. High Priority (HPFILTER) region (Highest Priority).
572          * 2. HASH region.
573          * 3. Normal FILTER region (Lowest Priority).
574          *
575          * Entries in HPFILTER and normal FILTER region have index
576          * 0 as the highest priority and the rules will be scanned
577          * in ascending order until either a rule hits or end of
578          * the region is reached.
579          *
580          * All HASH region entries have same priority. The set of
581          * fields to match in headers are pre-determined. The same
582          * set of header match fields must be compulsorily specified
583          * in all the rules wanting to get inserted in HASH region.
584          * Hence, HASH region is an exact-match region. A HASH is
585          * generated for a rule based on the values in the
586          * pre-determined set of header match fields. The generated
587          * HASH serves as an index into the HASH region. There can
588          * never be 2 rules having the same HASH. Hardware will
589          * compute a HASH for every incoming packet based on the
590          * values in the pre-determined set of header match fields
591          * and uses it as an index to check if there's a rule
592          * inserted in the HASH region at the specified index. If
593          * there's a rule inserted, then it's considered as a filter
594          * hit. Otherwise, it's a filter miss and normal FILTER region
595          * is scanned afterwards.
596          */
597
598         spin_lock_bh(&t->ftid_lock);
599
600         ftid = (tc_prio <= t->nhpftids) ? 0 : t->nhpftids;
601         max_ftid = t->nftids + t->nhpftids;
602         while (ftid < max_ftid) {
603                 if (ftid < t->nhpftids) {
604                         /* If the new rule wants to get inserted into
605                          * HPFILTER region, but its prio is greater
606                          * than the rule with the highest prio in HASH
607                          * region, then reject the rule.
608                          */
609                         if (t->tc_hash_tids_max_prio &&
610                             tc_prio > t->tc_hash_tids_max_prio)
611                                 break;
612
613                         /* If there's not enough slots available
614                          * in HPFILTER region, then move on to
615                          * normal FILTER region immediately.
616                          */
617                         if (ftid + n > t->nhpftids) {
618                                 ftid = t->nhpftids;
619                                 continue;
620                         }
621
622                         bmap = t->hpftid_bmap;
623                         bmap_ftid = ftid;
624                 } else if (hash_en) {
625                         /* Ensure priority is >= last rule in HPFILTER
626                          * region.
627                          */
628                         ftid = find_last_bit(t->hpftid_bmap, t->nhpftids);
629                         if (ftid < t->nhpftids) {
630                                 f = &t->hpftid_tab[ftid];
631                                 if (f->valid && tc_prio < f->fs.tc_prio)
632                                         break;
633                         }
634
635                         /* Ensure priority is <= first rule in normal
636                          * FILTER region.
637                          */
638                         ftid = find_first_bit(t->ftid_bmap, t->nftids);
639                         if (ftid < t->nftids) {
640                                 f = &t->ftid_tab[ftid];
641                                 if (f->valid && tc_prio > f->fs.tc_prio)
642                                         break;
643                         }
644
645                         found = true;
646                         ftid = t->nhpftids;
647                         goto out_unlock;
648                 } else {
649                         /* If the new rule wants to get inserted into
650                          * normal FILTER region, but its prio is less
651                          * than the rule with the highest prio in HASH
652                          * region, then reject the rule.
653                          */
654                         if (t->tc_hash_tids_max_prio &&
655                             tc_prio < t->tc_hash_tids_max_prio)
656                                 break;
657
658                         if (ftid + n > max_ftid)
659                                 break;
660
661                         bmap = t->ftid_bmap;
662                         bmap_ftid = ftid - t->nhpftids;
663                 }
664
665                 cnt = 0;
666                 for (i = 0; i < n; i++) {
667                         if (test_bit(bmap_ftid + i, bmap))
668                                 break;
669                         cnt++;
670                 }
671
672                 if (cnt == n) {
673                         /* Ensure the new rule's prio doesn't conflict
674                          * with existing rules.
675                          */
676                         if (cxgb4_filter_prio_in_range(t, ftid, n,
677                                                        tc_prio)) {
678                                 ftid &= ~(n - 1);
679                                 found = true;
680                                 break;
681                         }
682                 }
683
684                 ftid += n;
685         }
686
687 out_unlock:
688         spin_unlock_bh(&t->ftid_lock);
689         return found ? ftid : -ENOMEM;
690 }
691
692 static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
693                           unsigned int chip_ver)
694 {
695         spin_lock_bh(&t->ftid_lock);
696
697         if (test_bit(fidx, t->ftid_bmap)) {
698                 spin_unlock_bh(&t->ftid_lock);
699                 return -EBUSY;
700         }
701
702         if (family == PF_INET) {
703                 __set_bit(fidx, t->ftid_bmap);
704         } else {
705                 if (chip_ver < CHELSIO_T6)
706                         bitmap_allocate_region(t->ftid_bmap, fidx, 2);
707                 else
708                         bitmap_allocate_region(t->ftid_bmap, fidx, 1);
709         }
710
711         spin_unlock_bh(&t->ftid_lock);
712         return 0;
713 }
714
715 static int cxgb4_set_hpftid(struct tid_info *t, int fidx, int family)
716 {
717         spin_lock_bh(&t->ftid_lock);
718
719         if (test_bit(fidx, t->hpftid_bmap)) {
720                 spin_unlock_bh(&t->ftid_lock);
721                 return -EBUSY;
722         }
723
724         if (family == PF_INET)
725                 __set_bit(fidx, t->hpftid_bmap);
726         else
727                 bitmap_allocate_region(t->hpftid_bmap, fidx, 1);
728
729         spin_unlock_bh(&t->ftid_lock);
730         return 0;
731 }
732
733 static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
734                              unsigned int chip_ver)
735 {
736         spin_lock_bh(&t->ftid_lock);
737         if (family == PF_INET) {
738                 __clear_bit(fidx, t->ftid_bmap);
739         } else {
740                 if (chip_ver < CHELSIO_T6)
741                         bitmap_release_region(t->ftid_bmap, fidx, 2);
742                 else
743                         bitmap_release_region(t->ftid_bmap, fidx, 1);
744         }
745         spin_unlock_bh(&t->ftid_lock);
746 }
747
748 static void cxgb4_clear_hpftid(struct tid_info *t, int fidx, int family)
749 {
750         spin_lock_bh(&t->ftid_lock);
751
752         if (family == PF_INET)
753                 __clear_bit(fidx, t->hpftid_bmap);
754         else
755                 bitmap_release_region(t->hpftid_bmap, fidx, 1);
756
757         spin_unlock_bh(&t->ftid_lock);
758 }
759
760 /* Delete the filter at a specified index. */
761 static int del_filter_wr(struct adapter *adapter, int fidx)
762 {
763         struct fw_filter_wr *fwr;
764         struct filter_entry *f;
765         struct sk_buff *skb;
766         unsigned int len;
767
768         if (fidx < adapter->tids.nhpftids)
769                 f = &adapter->tids.hpftid_tab[fidx];
770         else
771                 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
772
773         len = sizeof(*fwr);
774
775         skb = alloc_skb(len, GFP_KERNEL);
776         if (!skb)
777                 return -ENOMEM;
778
779         fwr = __skb_put(skb, len);
780         t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
781
782         /* Mark the filter as "pending" and ship off the Filter Work Request.
783          * When we get the Work Request Reply we'll clear the pending status.
784          */
785         f->pending = 1;
786         t4_mgmt_tx(adapter, skb);
787         return 0;
788 }
789
790 /* Send a Work Request to write the filter at a specified index.  We construct
791  * a Firmware Filter Work Request to have the work done and put the indicated
792  * filter into "pending" mode which will prevent any further actions against
793  * it till we get a reply from the firmware on the completion status of the
794  * request.
795  */
796 int set_filter_wr(struct adapter *adapter, int fidx)
797 {
798         struct fw_filter2_wr *fwr;
799         struct filter_entry *f;
800         struct sk_buff *skb;
801
802         if (fidx < adapter->tids.nhpftids)
803                 f = &adapter->tids.hpftid_tab[fidx];
804         else
805                 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
806
807         skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
808         if (!skb)
809                 return -ENOMEM;
810
811         /* If the new filter requires loopback Destination MAC and/or VLAN
812          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
813          * the filter.
814          */
815         if (f->fs.newdmac || f->fs.newvlan) {
816                 /* allocate L2T entry for new filter */
817                 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
818                                                 f->fs.eport, f->fs.dmac);
819                 if (!f->l2t) {
820                         kfree_skb(skb);
821                         return -ENOMEM;
822                 }
823         }
824
825         /* If the new filter requires loopback Source MAC rewriting then
826          * we need to allocate a SMT entry for the filter.
827          */
828         if (f->fs.newsmac) {
829                 f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
830                 if (!f->smt) {
831                         if (f->l2t) {
832                                 cxgb4_l2t_release(f->l2t);
833                                 f->l2t = NULL;
834                         }
835                         kfree_skb(skb);
836                         return -ENOMEM;
837                 }
838         }
839
840         fwr = __skb_put_zero(skb, sizeof(*fwr));
841
842         /* It would be nice to put most of the following in t4_hw.c but most
843          * of the work is translating the cxgbtool ch_filter_specification
844          * into the Work Request and the definition of that structure is
845          * currently in cxgbtool.h which isn't appropriate to pull into the
846          * common code.  We may eventually try to come up with a more neutral
847          * filter specification structure but for now it's easiest to simply
848          * put this fairly direct code in line ...
849          */
850         if (adapter->params.filter2_wr_support)
851                 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR));
852         else
853                 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
854         fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
855         fwr->tid_to_iq =
856                 htonl(FW_FILTER_WR_TID_V(f->tid) |
857                       FW_FILTER_WR_RQTYPE_V(f->fs.type) |
858                       FW_FILTER_WR_NOREPLY_V(0) |
859                       FW_FILTER_WR_IQ_V(f->fs.iq));
860         fwr->del_filter_to_l2tix =
861                 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
862                       FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
863                       FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
864                       FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
865                       FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
866                       FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
867                       FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
868                       FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
869                                              f->fs.newvlan == VLAN_REWRITE) |
870                       FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
871                                             f->fs.newvlan == VLAN_REWRITE) |
872                       FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
873                       FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
874                       FW_FILTER_WR_PRIO_V(f->fs.prio) |
875                       FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
876         fwr->ethtype = htons(f->fs.val.ethtype);
877         fwr->ethtypem = htons(f->fs.mask.ethtype);
878         fwr->frag_to_ovlan_vldm =
879                 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
880                  FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
881                  FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
882                  FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
883                  FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
884                  FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
885         fwr->smac_sel = 0;
886         fwr->rx_chan_rx_rpl_iq =
887                 htons(FW_FILTER_WR_RX_CHAN_V(0) |
888                       FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
889         fwr->maci_to_matchtypem =
890                 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
891                       FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
892                       FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
893                       FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
894                       FW_FILTER_WR_PORT_V(f->fs.val.iport) |
895                       FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
896                       FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
897                       FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
898         fwr->ptcl = f->fs.val.proto;
899         fwr->ptclm = f->fs.mask.proto;
900         fwr->ttyp = f->fs.val.tos;
901         fwr->ttypm = f->fs.mask.tos;
902         fwr->ivlan = htons(f->fs.val.ivlan);
903         fwr->ivlanm = htons(f->fs.mask.ivlan);
904         fwr->ovlan = htons(f->fs.val.ovlan);
905         fwr->ovlanm = htons(f->fs.mask.ovlan);
906         memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
907         memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
908         memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
909         memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
910         fwr->lp = htons(f->fs.val.lport);
911         fwr->lpm = htons(f->fs.mask.lport);
912         fwr->fp = htons(f->fs.val.fport);
913         fwr->fpm = htons(f->fs.mask.fport);
914
915         if (adapter->params.filter2_wr_support) {
916                 u8 *nat_lp = (u8 *)&f->fs.nat_lport;
917                 u8 *nat_fp = (u8 *)&f->fs.nat_fport;
918
919                 fwr->natmode_to_ulp_type =
920                         FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
921                                                  ULP_MODE_TCPDDP :
922                                                  ULP_MODE_NONE) |
923                         FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
924                 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
925                 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
926                 fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8);
927                 fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8);
928         }
929
930         /* Mark the filter as "pending" and ship off the Filter Work Request.
931          * When we get the Work Request Reply we'll clear the pending status.
932          */
933         f->pending = 1;
934         set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
935         t4_ofld_send(adapter, skb);
936         return 0;
937 }
938
939 /* Return an error number if the indicated filter isn't writable ... */
940 int writable_filter(struct filter_entry *f)
941 {
942         if (f->locked)
943                 return -EPERM;
944         if (f->pending)
945                 return -EBUSY;
946
947         return 0;
948 }
949
950 /* Delete the filter at the specified index (if valid).  The checks for all
951  * the common problems with doing this like the filter being locked, currently
952  * pending in another operation, etc.
953  */
954 int delete_filter(struct adapter *adapter, unsigned int fidx)
955 {
956         struct filter_entry *f;
957         int ret;
958
959         if (fidx >= adapter->tids.nftids + adapter->tids.nsftids +
960                     adapter->tids.nhpftids)
961                 return -EINVAL;
962
963         if (fidx < adapter->tids.nhpftids)
964                 f = &adapter->tids.hpftid_tab[fidx];
965         else
966                 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
967         ret = writable_filter(f);
968         if (ret)
969                 return ret;
970         if (f->valid)
971                 return del_filter_wr(adapter, fidx);
972
973         return 0;
974 }
975
976 /* Clear a filter and release any of its resources that we own.  This also
977  * clears the filter's "pending" status.
978  */
979 void clear_filter(struct adapter *adap, struct filter_entry *f)
980 {
981         struct port_info *pi = netdev_priv(f->dev);
982
983         /* If the new or old filter have loopback rewriteing rules then we'll
984          * need to free any existing L2T, SMT, CLIP entries of filter
985          * rule.
986          */
987         if (f->l2t)
988                 cxgb4_l2t_release(f->l2t);
989
990         if (f->smt)
991                 cxgb4_smt_release(f->smt);
992
993         if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
994                 t4_free_encap_mac_filt(adap, pi->viid,
995                                        f->fs.val.ovlan & 0x1ff, 0);
996
997         if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
998                 cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
999
1000         /* The zeroing of the filter rule below clears the filter valid,
1001          * pending, locked flags, l2t pointer, etc. so it's all we need for
1002          * this operation.
1003          */
1004         memset(f, 0, sizeof(*f));
1005 }
1006
1007 void clear_all_filters(struct adapter *adapter)
1008 {
1009         struct net_device *dev = adapter->port[0];
1010         unsigned int i;
1011
1012         if (adapter->tids.hpftid_tab) {
1013                 struct filter_entry *f = &adapter->tids.hpftid_tab[0];
1014
1015                 for (i = 0; i < adapter->tids.nhpftids; i++, f++)
1016                         if (f->valid || f->pending)
1017                                 cxgb4_del_filter(dev, i, &f->fs);
1018         }
1019
1020         if (adapter->tids.ftid_tab) {
1021                 struct filter_entry *f = &adapter->tids.ftid_tab[0];
1022                 unsigned int max_ftid = adapter->tids.nftids +
1023                                         adapter->tids.nsftids +
1024                                         adapter->tids.nhpftids;
1025
1026                 /* Clear all TCAM filters */
1027                 for (i = adapter->tids.nhpftids; i < max_ftid; i++, f++)
1028                         if (f->valid || f->pending)
1029                                 cxgb4_del_filter(dev, i, &f->fs);
1030         }
1031
1032         /* Clear all hash filters */
1033         if (is_hashfilter(adapter) && adapter->tids.tid_tab) {
1034                 struct filter_entry *f;
1035                 unsigned int sb;
1036
1037                 for (i = adapter->tids.hash_base;
1038                      i <= adapter->tids.ntids; i++) {
1039                         f = (struct filter_entry *)
1040                                 adapter->tids.tid_tab[i];
1041
1042                         if (f && (f->valid || f->pending))
1043                                 cxgb4_del_filter(dev, f->tid, &f->fs);
1044                 }
1045
1046                 sb = t4_read_reg(adapter, LE_DB_SRVR_START_INDEX_A);
1047                 for (i = 0; i < sb; i++) {
1048                         f = (struct filter_entry *)adapter->tids.tid_tab[i];
1049
1050                         if (f && (f->valid || f->pending))
1051                                 cxgb4_del_filter(dev, f->tid, &f->fs);
1052                 }
1053         }
1054 }
1055
1056 /* Fill up default masks for set match fields. */
1057 static void fill_default_mask(struct ch_filter_specification *fs)
1058 {
1059         unsigned int lip = 0, lip_mask = 0;
1060         unsigned int fip = 0, fip_mask = 0;
1061         unsigned int i;
1062
1063         if (fs->val.iport && !fs->mask.iport)
1064                 fs->mask.iport |= ~0;
1065         if (fs->val.fcoe && !fs->mask.fcoe)
1066                 fs->mask.fcoe |= ~0;
1067         if (fs->val.matchtype && !fs->mask.matchtype)
1068                 fs->mask.matchtype |= ~0;
1069         if (fs->val.macidx && !fs->mask.macidx)
1070                 fs->mask.macidx |= ~0;
1071         if (fs->val.ethtype && !fs->mask.ethtype)
1072                 fs->mask.ethtype |= ~0;
1073         if (fs->val.ivlan && !fs->mask.ivlan)
1074                 fs->mask.ivlan |= ~0;
1075         if (fs->val.ovlan && !fs->mask.ovlan)
1076                 fs->mask.ovlan |= ~0;
1077         if (fs->val.frag && !fs->mask.frag)
1078                 fs->mask.frag |= ~0;
1079         if (fs->val.tos && !fs->mask.tos)
1080                 fs->mask.tos |= ~0;
1081         if (fs->val.proto && !fs->mask.proto)
1082                 fs->mask.proto |= ~0;
1083         if (fs->val.pfvf_vld && !fs->mask.pfvf_vld)
1084                 fs->mask.pfvf_vld |= ~0;
1085         if (fs->val.pf && !fs->mask.pf)
1086                 fs->mask.pf |= ~0;
1087         if (fs->val.vf && !fs->mask.vf)
1088                 fs->mask.vf |= ~0;
1089
1090         for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
1091                 lip |= fs->val.lip[i];
1092                 lip_mask |= fs->mask.lip[i];
1093                 fip |= fs->val.fip[i];
1094                 fip_mask |= fs->mask.fip[i];
1095         }
1096
1097         if (lip && !lip_mask)
1098                 memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
1099
1100         if (fip && !fip_mask)
1101                 memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
1102
1103         if (fs->val.lport && !fs->mask.lport)
1104                 fs->mask.lport = ~0;
1105         if (fs->val.fport && !fs->mask.fport)
1106                 fs->mask.fport = ~0;
1107 }
1108
1109 static bool is_addr_all_mask(u8 *ipmask, int family)
1110 {
1111         if (family == AF_INET) {
1112                 struct in_addr *addr;
1113
1114                 addr = (struct in_addr *)ipmask;
1115                 if (addr->s_addr == 0xffffffff)
1116                         return true;
1117         } else if (family == AF_INET6) {
1118                 struct in6_addr *addr6;
1119
1120                 addr6 = (struct in6_addr *)ipmask;
1121                 if (addr6->s6_addr32[0] == 0xffffffff &&
1122                     addr6->s6_addr32[1] == 0xffffffff &&
1123                     addr6->s6_addr32[2] == 0xffffffff &&
1124                     addr6->s6_addr32[3] == 0xffffffff)
1125                         return true;
1126         }
1127         return false;
1128 }
1129
1130 static bool is_inaddr_any(u8 *ip, int family)
1131 {
1132         int addr_type;
1133
1134         if (family == AF_INET) {
1135                 struct in_addr *addr;
1136
1137                 addr = (struct in_addr *)ip;
1138                 if (addr->s_addr == htonl(INADDR_ANY))
1139                         return true;
1140         } else if (family == AF_INET6) {
1141                 struct in6_addr *addr6;
1142
1143                 addr6 = (struct in6_addr *)ip;
1144                 addr_type = ipv6_addr_type((const struct in6_addr *)
1145                                            &addr6);
1146                 if (addr_type == IPV6_ADDR_ANY)
1147                         return true;
1148         }
1149         return false;
1150 }
1151
1152 bool is_filter_exact_match(struct adapter *adap,
1153                            struct ch_filter_specification *fs)
1154 {
1155         struct tp_params *tp = &adap->params.tp;
1156         u64 hash_filter_mask = tp->hash_filter_mask;
1157         u64 ntuple_mask = 0;
1158
1159         if (!is_hashfilter(adap))
1160                 return false;
1161
1162          /* Keep tunnel VNI match disabled for hash-filters for now */
1163         if (fs->mask.encap_vld)
1164                 return false;
1165
1166         if (fs->type) {
1167                 if (is_inaddr_any(fs->val.fip, AF_INET6) ||
1168                     !is_addr_all_mask(fs->mask.fip, AF_INET6))
1169                         return false;
1170
1171                 if (is_inaddr_any(fs->val.lip, AF_INET6) ||
1172                     !is_addr_all_mask(fs->mask.lip, AF_INET6))
1173                         return false;
1174         } else {
1175                 if (is_inaddr_any(fs->val.fip, AF_INET) ||
1176                     !is_addr_all_mask(fs->mask.fip, AF_INET))
1177                         return false;
1178
1179                 if (is_inaddr_any(fs->val.lip, AF_INET) ||
1180                     !is_addr_all_mask(fs->mask.lip, AF_INET))
1181                         return false;
1182         }
1183
1184         if (!fs->val.lport || fs->mask.lport != 0xffff)
1185                 return false;
1186
1187         if (!fs->val.fport || fs->mask.fport != 0xffff)
1188                 return false;
1189
1190         /* calculate tuple mask and compare with mask configured in hw */
1191         if (tp->fcoe_shift >= 0)
1192                 ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
1193
1194         if (tp->port_shift >= 0)
1195                 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
1196
1197         if (tp->vnic_shift >= 0) {
1198                 if ((adap->params.tp.ingress_config & VNIC_F))
1199                         ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
1200                 else
1201                         ntuple_mask |= (u64)fs->mask.ovlan_vld <<
1202                                 tp->vnic_shift;
1203         }
1204
1205         if (tp->vlan_shift >= 0)
1206                 ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
1207
1208         if (tp->tos_shift >= 0)
1209                 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
1210
1211         if (tp->protocol_shift >= 0)
1212                 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
1213
1214         if (tp->ethertype_shift >= 0)
1215                 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
1216
1217         if (tp->macmatch_shift >= 0)
1218                 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
1219
1220         if (tp->matchtype_shift >= 0)
1221                 ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
1222
1223         if (tp->frag_shift >= 0)
1224                 ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
1225
1226         if (ntuple_mask != hash_filter_mask)
1227                 return false;
1228
1229         return true;
1230 }
1231
1232 static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
1233                               struct net_device *dev)
1234 {
1235         struct adapter *adap = netdev2adap(dev);
1236         struct tp_params *tp = &adap->params.tp;
1237         u64 ntuple = 0;
1238
1239         /* Initialize each of the fields which we care about which are present
1240          * in the Compressed Filter Tuple.
1241          */
1242         if (tp->vlan_shift >= 0 && fs->mask.ivlan)
1243                 ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
1244
1245         if (tp->port_shift >= 0 && fs->mask.iport)
1246                 ntuple |= (u64)fs->val.iport << tp->port_shift;
1247
1248         if (tp->protocol_shift >= 0) {
1249                 if (!fs->val.proto)
1250                         ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
1251                 else
1252                         ntuple |= (u64)fs->val.proto << tp->protocol_shift;
1253         }
1254
1255         if (tp->tos_shift >= 0 && fs->mask.tos)
1256                 ntuple |= (u64)(fs->val.tos) << tp->tos_shift;
1257
1258         if (tp->vnic_shift >= 0) {
1259                 if ((adap->params.tp.ingress_config & USE_ENC_IDX_F) &&
1260                     fs->mask.encap_vld)
1261                         ntuple |= (u64)((fs->val.encap_vld << 16) |
1262                                         (fs->val.ovlan)) << tp->vnic_shift;
1263                 else if ((adap->params.tp.ingress_config & VNIC_F) &&
1264                          fs->mask.pfvf_vld)
1265                         ntuple |= (u64)((fs->val.pfvf_vld << 16) |
1266                                         (fs->val.pf << 13) |
1267                                         (fs->val.vf)) << tp->vnic_shift;
1268                 else
1269                         ntuple |= (u64)((fs->val.ovlan_vld << 16) |
1270                                         (fs->val.ovlan)) << tp->vnic_shift;
1271         }
1272
1273         if (tp->macmatch_shift >= 0 && fs->mask.macidx)
1274                 ntuple |= (u64)(fs->val.macidx) << tp->macmatch_shift;
1275
1276         if (tp->ethertype_shift >= 0 && fs->mask.ethtype)
1277                 ntuple |= (u64)(fs->val.ethtype) << tp->ethertype_shift;
1278
1279         if (tp->matchtype_shift >= 0 && fs->mask.matchtype)
1280                 ntuple |= (u64)(fs->val.matchtype) << tp->matchtype_shift;
1281
1282         if (tp->frag_shift >= 0 && fs->mask.frag)
1283                 ntuple |= (u64)(fs->val.frag) << tp->frag_shift;
1284
1285         if (tp->fcoe_shift >= 0 && fs->mask.fcoe)
1286                 ntuple |= (u64)(fs->val.fcoe) << tp->fcoe_shift;
1287         return ntuple;
1288 }
1289
1290 static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
1291                              unsigned int qid_filterid, struct adapter *adap)
1292 {
1293         struct cpl_t6_act_open_req6 *t6req = NULL;
1294         struct cpl_act_open_req6 *req = NULL;
1295
1296         t6req = (struct cpl_t6_act_open_req6 *)__skb_put(skb, sizeof(*t6req));
1297         INIT_TP_WR(t6req, 0);
1298         req = (struct cpl_act_open_req6 *)t6req;
1299         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_filterid));
1300         req->local_port = cpu_to_be16(f->fs.val.lport);
1301         req->peer_port = cpu_to_be16(f->fs.val.fport);
1302         req->local_ip_hi = *(__be64 *)(&f->fs.val.lip);
1303         req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1);
1304         req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip);
1305         req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1);
1306         req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1307                                         f->fs.newvlan == VLAN_REWRITE) |
1308                                 DELACK_V(f->fs.hitcnts) |
1309                                 L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1310                                 SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1311                                             0x7F) << 1) |
1312                                 TX_CHAN_V(f->fs.eport) |
1313                                 NO_CONG_V(f->fs.rpttid) |
1314                                 ULP_MODE_V(f->fs.nat_mode ?
1315                                            ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1316                                 TCAM_BYPASS_F | NON_OFFLOAD_F);
1317         t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1318                                                                       f->dev)));
1319         t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1320                             RSS_QUEUE_V(f->fs.iq) |
1321                             TX_QUEUE_V(f->fs.nat_mode) |
1322                             T5_OPT_2_VALID_F |
1323                             RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
1324                             CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
1325                                          (f->fs.dirsteer << 1)) |
1326                             PACE_V((f->fs.maskhash) |
1327                                    ((f->fs.dirsteerhash) << 1)) |
1328                             CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
1329 }
1330
1331 static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
1332                             unsigned int qid_filterid, struct adapter *adap)
1333 {
1334         struct cpl_t6_act_open_req *t6req = NULL;
1335         struct cpl_act_open_req *req = NULL;
1336
1337         t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req));
1338         INIT_TP_WR(t6req, 0);
1339         req = (struct cpl_act_open_req *)t6req;
1340         OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
1341         req->local_port = cpu_to_be16(f->fs.val.lport);
1342         req->peer_port = cpu_to_be16(f->fs.val.fport);
1343         memcpy(&req->local_ip, f->fs.val.lip, 4);
1344         memcpy(&req->peer_ip, f->fs.val.fip, 4);
1345         req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1346                                         f->fs.newvlan == VLAN_REWRITE) |
1347                                 DELACK_V(f->fs.hitcnts) |
1348                                 L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1349                                 SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1350                                             0x7F) << 1) |
1351                                 TX_CHAN_V(f->fs.eport) |
1352                                 NO_CONG_V(f->fs.rpttid) |
1353                                 ULP_MODE_V(f->fs.nat_mode ?
1354                                            ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1355                                 TCAM_BYPASS_F | NON_OFFLOAD_F);
1356
1357         t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1358                                                                       f->dev)));
1359         t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1360                             RSS_QUEUE_V(f->fs.iq) |
1361                             TX_QUEUE_V(f->fs.nat_mode) |
1362                             T5_OPT_2_VALID_F |
1363                             RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
1364                             CONG_CNTRL_V((f->fs.action == FILTER_DROP) |
1365                                          (f->fs.dirsteer << 1)) |
1366                             PACE_V((f->fs.maskhash) |
1367                                    ((f->fs.dirsteerhash) << 1)) |
1368                             CCTRL_ECN_V(f->fs.action == FILTER_SWITCH));
1369 }
1370
1371 static int cxgb4_set_hash_filter(struct net_device *dev,
1372                                  struct ch_filter_specification *fs,
1373                                  struct filter_ctx *ctx)
1374 {
1375         struct adapter *adapter = netdev2adap(dev);
1376         struct port_info *pi = netdev_priv(dev);
1377         struct tid_info *t = &adapter->tids;
1378         struct filter_entry *f;
1379         struct sk_buff *skb;
1380         int iq, atid, size;
1381         int ret = 0;
1382         u32 iconf;
1383
1384         fill_default_mask(fs);
1385         ret = validate_filter(dev, fs);
1386         if (ret)
1387                 return ret;
1388
1389         iq = get_filter_steerq(dev, fs);
1390         if (iq < 0)
1391                 return iq;
1392
1393         f = kzalloc(sizeof(*f), GFP_KERNEL);
1394         if (!f)
1395                 return -ENOMEM;
1396
1397         f->fs = *fs;
1398         f->ctx = ctx;
1399         f->dev = dev;
1400         f->fs.iq = iq;
1401
1402         /* If the new filter requires loopback Destination MAC and/or VLAN
1403          * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1404          * the filter.
1405          */
1406         if (f->fs.newdmac || f->fs.newvlan) {
1407                 /* allocate L2T entry for new filter */
1408                 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
1409                                                 f->fs.eport, f->fs.dmac);
1410                 if (!f->l2t) {
1411                         ret = -ENOMEM;
1412                         goto out_err;
1413                 }
1414         }
1415
1416         /* If the new filter requires loopback Source MAC rewriting then
1417          * we need to allocate a SMT entry for the filter.
1418          */
1419         if (f->fs.newsmac) {
1420                 f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
1421                 if (!f->smt) {
1422                         if (f->l2t) {
1423                                 cxgb4_l2t_release(f->l2t);
1424                                 f->l2t = NULL;
1425                         }
1426                         ret = -ENOMEM;
1427                         goto free_l2t;
1428                 }
1429         }
1430
1431         atid = cxgb4_alloc_atid(t, f);
1432         if (atid < 0) {
1433                 ret = atid;
1434                 goto free_smt;
1435         }
1436
1437         iconf = adapter->params.tp.ingress_config;
1438         if (iconf & VNIC_F) {
1439                 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1440                 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1441                 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1442                 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1443         } else if (iconf & USE_ENC_IDX_F) {
1444                 if (f->fs.val.encap_vld) {
1445                         struct port_info *pi = netdev_priv(f->dev);
1446                         u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1447
1448                         /* allocate MPS TCAM entry */
1449                         ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
1450                                                       match_all_mac,
1451                                                       match_all_mac,
1452                                                       f->fs.val.vni,
1453                                                       f->fs.mask.vni,
1454                                                       0, 1, 1);
1455                         if (ret < 0)
1456                                 goto free_atid;
1457
1458                         f->fs.val.ovlan = ret;
1459                         f->fs.mask.ovlan = 0xffff;
1460                         f->fs.val.ovlan_vld = 1;
1461                         f->fs.mask.ovlan_vld = 1;
1462                 }
1463         }
1464
1465         size = sizeof(struct cpl_t6_act_open_req);
1466         if (f->fs.type) {
1467                 ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1);
1468                 if (ret)
1469                         goto free_mps;
1470
1471                 skb = alloc_skb(size, GFP_KERNEL);
1472                 if (!skb) {
1473                         ret = -ENOMEM;
1474                         goto free_clip;
1475                 }
1476
1477                 mk_act_open_req6(f, skb,
1478                                  ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1479                                  adapter);
1480         } else {
1481                 skb = alloc_skb(size, GFP_KERNEL);
1482                 if (!skb) {
1483                         ret = -ENOMEM;
1484                         goto free_mps;
1485                 }
1486
1487                 mk_act_open_req(f, skb,
1488                                 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1489                                 adapter);
1490         }
1491
1492         f->pending = 1;
1493         set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3);
1494         t4_ofld_send(adapter, skb);
1495         return 0;
1496
1497 free_clip:
1498         cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
1499
1500 free_mps:
1501         if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
1502                 t4_free_encap_mac_filt(adapter, pi->viid, f->fs.val.ovlan, 1);
1503
1504 free_atid:
1505         cxgb4_free_atid(t, atid);
1506
1507 free_smt:
1508         if (f->smt) {
1509                 cxgb4_smt_release(f->smt);
1510                 f->smt = NULL;
1511         }
1512
1513 free_l2t:
1514         if (f->l2t) {
1515                 cxgb4_l2t_release(f->l2t);
1516                 f->l2t = NULL;
1517         }
1518
1519 out_err:
1520         kfree(f);
1521         return ret;
1522 }
1523
1524 /* Check a Chelsio Filter Request for validity, convert it into our internal
1525  * format and send it to the hardware.  Return 0 on success, an error number
1526  * otherwise.  We attach any provided filter operation context to the internal
1527  * filter specification in order to facilitate signaling completion of the
1528  * operation.
1529  */
1530 int __cxgb4_set_filter(struct net_device *dev, int ftid,
1531                        struct ch_filter_specification *fs,
1532                        struct filter_ctx *ctx)
1533 {
1534         struct adapter *adapter = netdev2adap(dev);
1535         unsigned int max_fidx, fidx, chip_ver;
1536         int iq, ret, filter_id = ftid;
1537         struct filter_entry *f, *tab;
1538         u32 iconf;
1539
1540         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1541         if (fs->hash) {
1542                 if (is_hashfilter(adapter))
1543                         return cxgb4_set_hash_filter(dev, fs, ctx);
1544                 netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
1545                            __func__);
1546                 return -EINVAL;
1547         }
1548
1549         max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
1550         if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1551             filter_id >= max_fidx)
1552                 return -E2BIG;
1553
1554         fill_default_mask(fs);
1555
1556         ret = validate_filter(dev, fs);
1557         if (ret)
1558                 return ret;
1559
1560         iq = get_filter_steerq(dev, fs);
1561         if (iq < 0)
1562                 return iq;
1563
1564         if (fs->prio) {
1565                 tab = &adapter->tids.hpftid_tab[0];
1566         } else {
1567                 tab = &adapter->tids.ftid_tab[0];
1568                 filter_id = ftid - adapter->tids.nhpftids;
1569         }
1570
1571         /* IPv6 filters occupy four slots and must be aligned on
1572          * four-slot boundaries.  IPv4 filters only occupy a single
1573          * slot and have no alignment requirements but writing a new
1574          * IPv4 filter into the middle of an existing IPv6 filter
1575          * requires clearing the old IPv6 filter and hence we prevent
1576          * insertion.
1577          */
1578         if (fs->type == 0) { /* IPv4 */
1579                 /* For T6, If our IPv4 filter isn't being written to a
1580                  * multiple of two filter index and there's an IPv6
1581                  * filter at the multiple of 2 base slot, then we need
1582                  * to delete that IPv6 filter ...
1583                  * For adapters below T6, IPv6 filter occupies 4 entries.
1584                  * Hence we need to delete the filter in multiple of 4 slot.
1585                  */
1586                 if (chip_ver < CHELSIO_T6)
1587                         fidx = filter_id & ~0x3;
1588                 else
1589                         fidx = filter_id & ~0x1;
1590
1591                 if (fidx != filter_id && tab[fidx].fs.type) {
1592                         f = &tab[fidx];
1593                         if (f->valid) {
1594                                 dev_err(adapter->pdev_dev,
1595                                         "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
1596                                         fidx, fidx + 3);
1597                                 return -EINVAL;
1598                         }
1599                 }
1600         } else { /* IPv6 */
1601                 if (chip_ver < CHELSIO_T6) {
1602                         /* Ensure that the IPv6 filter is aligned on a
1603                          * multiple of 4 boundary.
1604                          */
1605                         if (filter_id & 0x3) {
1606                                 dev_err(adapter->pdev_dev,
1607                                         "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
1608                                 return -EINVAL;
1609                         }
1610
1611                         /* Check all except the base overlapping IPv4 filter
1612                          * slots.
1613                          */
1614                         for (fidx = filter_id + 1; fidx < filter_id + 4;
1615                              fidx++) {
1616                                 f = &tab[fidx];
1617                                 if (f->valid) {
1618                                         dev_err(adapter->pdev_dev,
1619                                                 "Invalid location.  IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
1620                                                 fidx);
1621                                         return -EBUSY;
1622                                 }
1623                         }
1624                 } else {
1625                         /* For T6, CLIP being enabled, IPv6 filter would occupy
1626                          * 2 entries.
1627                          */
1628                         if (filter_id & 0x1)
1629                                 return -EINVAL;
1630                         /* Check overlapping IPv4 filter slot */
1631                         fidx = filter_id + 1;
1632                         f = &tab[fidx];
1633                         if (f->valid) {
1634                                 pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
1635                                        __func__, fidx);
1636                                 return -EBUSY;
1637                         }
1638                 }
1639         }
1640
1641         /* Check to make sure that provided filter index is not
1642          * already in use by someone else
1643          */
1644         f = &tab[filter_id];
1645         if (f->valid)
1646                 return -EBUSY;
1647
1648         if (fs->prio) {
1649                 fidx = filter_id + adapter->tids.hpftid_base;
1650                 ret = cxgb4_set_hpftid(&adapter->tids, filter_id,
1651                                        fs->type ? PF_INET6 : PF_INET);
1652         } else {
1653                 fidx = filter_id + adapter->tids.ftid_base;
1654                 ret = cxgb4_set_ftid(&adapter->tids, filter_id,
1655                                      fs->type ? PF_INET6 : PF_INET,
1656                                      chip_ver);
1657         }
1658
1659         if (ret)
1660                 return ret;
1661
1662         /* Check t  make sure the filter requested is writable ... */
1663         ret = writable_filter(f);
1664         if (ret)
1665                 goto free_tid;
1666
1667         if (is_t6(adapter->params.chip) && fs->type &&
1668             ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
1669             IPV6_ADDR_ANY) {
1670                 ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
1671                 if (ret)
1672                         goto free_tid;
1673         }
1674
1675         /* Convert the filter specification into our internal format.
1676          * We copy the PF/VF specification into the Outer VLAN field
1677          * here so the rest of the code -- including the interface to
1678          * the firmware -- doesn't have to constantly do these checks.
1679          */
1680         f->fs = *fs;
1681         f->fs.iq = iq;
1682         f->dev = dev;
1683
1684         iconf = adapter->params.tp.ingress_config;
1685         if (iconf & VNIC_F) {
1686                 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1687                 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1688                 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1689                 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1690         } else if (iconf & USE_ENC_IDX_F) {
1691                 if (f->fs.val.encap_vld) {
1692                         struct port_info *pi = netdev_priv(f->dev);
1693                         u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1694
1695                         /* allocate MPS TCAM entry */
1696                         ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
1697                                                       match_all_mac,
1698                                                       match_all_mac,
1699                                                       f->fs.val.vni,
1700                                                       f->fs.mask.vni,
1701                                                       0, 1, 1);
1702                         if (ret < 0)
1703                                 goto free_tid;
1704
1705                         f->fs.val.ovlan = ret;
1706                         f->fs.mask.ovlan = 0x1ff;
1707                         f->fs.val.ovlan_vld = 1;
1708                         f->fs.mask.ovlan_vld = 1;
1709                 }
1710         }
1711
1712         /* Attempt to set the filter.  If we don't succeed, we clear
1713          * it and return the failure.
1714          */
1715         f->ctx = ctx;
1716         f->tid = fidx; /* Save the actual tid */
1717         ret = set_filter_wr(adapter, ftid);
1718         if (ret)
1719                 goto free_tid;
1720
1721         return ret;
1722
1723 free_tid:
1724         if (f->fs.prio)
1725                 cxgb4_clear_hpftid(&adapter->tids, filter_id,
1726                                    fs->type ? PF_INET6 : PF_INET);
1727         else
1728                 cxgb4_clear_ftid(&adapter->tids, filter_id,
1729                                  fs->type ? PF_INET6 : PF_INET,
1730                                  chip_ver);
1731
1732         clear_filter(adapter, f);
1733         return ret;
1734 }
1735
1736 static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
1737                                  struct filter_ctx *ctx)
1738 {
1739         struct adapter *adapter = netdev2adap(dev);
1740         struct tid_info *t = &adapter->tids;
1741         struct cpl_abort_req *abort_req;
1742         struct cpl_abort_rpl *abort_rpl;
1743         struct cpl_set_tcb_field *req;
1744         struct ulptx_idata *aligner;
1745         struct work_request_hdr *wr;
1746         struct filter_entry *f;
1747         struct sk_buff *skb;
1748         unsigned int wrlen;
1749         int ret;
1750
1751         netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
1752                    __func__, filter_id, adapter->tids.nftids);
1753
1754         if (tid_out_of_range(t, filter_id))
1755                 return -E2BIG;
1756
1757         f = lookup_tid(t, filter_id);
1758         if (!f) {
1759                 netdev_err(dev, "%s: no filter entry for filter_id = %d",
1760                            __func__, filter_id);
1761                 return -EINVAL;
1762         }
1763
1764         ret = writable_filter(f);
1765         if (ret)
1766                 return ret;
1767
1768         if (!f->valid)
1769                 return -EINVAL;
1770
1771         f->ctx = ctx;
1772         f->pending = 1;
1773         wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
1774                         + sizeof(*abort_req) + sizeof(*abort_rpl), 16);
1775         skb = alloc_skb(wrlen, GFP_KERNEL);
1776         if (!skb) {
1777                 netdev_err(dev, "%s: could not allocate skb ..\n", __func__);
1778                 return -ENOMEM;
1779         }
1780         set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1781         req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
1782         INIT_ULPTX_WR(req, wrlen, 0, 0);
1783         wr = (struct work_request_hdr *)req;
1784         wr++;
1785         req = (struct cpl_set_tcb_field *)wr;
1786         mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
1787                        TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1);
1788         aligner = (struct ulptx_idata *)(req + 1);
1789         abort_req = (struct cpl_abort_req *)(aligner + 1);
1790         mk_abort_req_ulp(abort_req, f->tid);
1791         abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
1792         mk_abort_rpl_ulp(abort_rpl, f->tid);
1793         t4_ofld_send(adapter, skb);
1794         return 0;
1795 }
1796
1797 /* Check a delete filter request for validity and send it to the hardware.
1798  * Return 0 on success, an error number otherwise.  We attach any provided
1799  * filter operation context to the internal filter specification in order to
1800  * facilitate signaling completion of the operation.
1801  */
1802 int __cxgb4_del_filter(struct net_device *dev, int filter_id,
1803                        struct ch_filter_specification *fs,
1804                        struct filter_ctx *ctx)
1805 {
1806         struct adapter *adapter = netdev2adap(dev);
1807         unsigned int max_fidx, chip_ver;
1808         struct filter_entry *f;
1809         int ret;
1810
1811         chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1812         if (fs && fs->hash) {
1813                 if (is_hashfilter(adapter))
1814                         return cxgb4_del_hash_filter(dev, filter_id, ctx);
1815                 netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
1816                            __func__);
1817                 return -EINVAL;
1818         }
1819
1820         max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
1821         if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1822             filter_id >= max_fidx)
1823                 return -E2BIG;
1824
1825         if (filter_id < adapter->tids.nhpftids)
1826                 f = &adapter->tids.hpftid_tab[filter_id];
1827         else
1828                 f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
1829
1830         ret = writable_filter(f);
1831         if (ret)
1832                 return ret;
1833
1834         if (f->valid) {
1835                 f->ctx = ctx;
1836                 if (f->fs.prio)
1837                         cxgb4_clear_hpftid(&adapter->tids,
1838                                            f->tid - adapter->tids.hpftid_base,
1839                                            f->fs.type ? PF_INET6 : PF_INET);
1840                 else
1841                         cxgb4_clear_ftid(&adapter->tids,
1842                                          f->tid - adapter->tids.ftid_base,
1843                                          f->fs.type ? PF_INET6 : PF_INET,
1844                                          chip_ver);
1845                 return del_filter_wr(adapter, filter_id);
1846         }
1847
1848         /* If the caller has passed in a Completion Context then we need to
1849          * mark it as a successful completion so they don't stall waiting
1850          * for it.
1851          */
1852         if (ctx) {
1853                 ctx->result = 0;
1854                 complete(&ctx->completion);
1855         }
1856         return ret;
1857 }
1858
1859 int cxgb4_set_filter(struct net_device *dev, int filter_id,
1860                      struct ch_filter_specification *fs)
1861 {
1862         struct filter_ctx ctx;
1863         int ret;
1864
1865         init_completion(&ctx.completion);
1866
1867         ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
1868         if (ret)
1869                 goto out;
1870
1871         /* Wait for reply */
1872         ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
1873         if (!ret)
1874                 return -ETIMEDOUT;
1875
1876         ret = ctx.result;
1877 out:
1878         return ret;
1879 }
1880
1881 int cxgb4_del_filter(struct net_device *dev, int filter_id,
1882                      struct ch_filter_specification *fs)
1883 {
1884         struct filter_ctx ctx;
1885         int ret;
1886
1887         if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
1888                 return 0;
1889
1890         init_completion(&ctx.completion);
1891
1892         ret = __cxgb4_del_filter(dev, filter_id, fs, &ctx);
1893         if (ret)
1894                 goto out;
1895
1896         /* Wait for reply */
1897         ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
1898         if (!ret)
1899                 return -ETIMEDOUT;
1900
1901         ret = ctx.result;
1902 out:
1903         return ret;
1904 }
1905
1906 static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
1907                                 struct filter_entry *f)
1908 {
1909         if (f->fs.hitcnts)
1910                 set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
1911                               TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) |
1912                               TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
1913                               TCB_TIMESTAMP_V(0ULL) |
1914                               TCB_RTT_TS_RECENT_AGE_V(0ULL),
1915                               1);
1916
1917         if (f->fs.newdmac)
1918                 set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
1919                               1);
1920
1921         if (f->fs.newvlan == VLAN_INSERT ||
1922             f->fs.newvlan == VLAN_REWRITE)
1923                 set_tcb_tflag(adap, f, tid, TF_CCTRL_RFR_S, 1,
1924                               1);
1925         if (f->fs.newsmac)
1926                 configure_filter_smac(adap, f);
1927
1928         if (f->fs.nat_mode) {
1929                 switch (f->fs.nat_mode) {
1930                 case NAT_MODE_DIP:
1931                         set_nat_params(adap, f, tid, true, false, false, false);
1932                         break;
1933
1934                 case NAT_MODE_DIP_DP:
1935                         set_nat_params(adap, f, tid, true, false, true, false);
1936                         break;
1937
1938                 case NAT_MODE_DIP_DP_SIP:
1939                         set_nat_params(adap, f, tid, true, true, true, false);
1940                         break;
1941                 case NAT_MODE_DIP_DP_SP:
1942                         set_nat_params(adap, f, tid, true, false, true, true);
1943                         break;
1944
1945                 case NAT_MODE_SIP_SP:
1946                         set_nat_params(adap, f, tid, false, true, false, true);
1947                         break;
1948
1949                 case NAT_MODE_DIP_SIP_SP:
1950                         set_nat_params(adap, f, tid, true, true, false, true);
1951                         break;
1952
1953                 case NAT_MODE_ALL:
1954                         set_nat_params(adap, f, tid, true, true, true, true);
1955                         break;
1956
1957                 default:
1958                         pr_err("%s: Invalid NAT mode: %d\n",
1959                                __func__, f->fs.nat_mode);
1960                         return -EINVAL;
1961                 }
1962         }
1963         return 0;
1964 }
1965
1966 void hash_del_filter_rpl(struct adapter *adap,
1967                          const struct cpl_abort_rpl_rss *rpl)
1968 {
1969         unsigned int status = rpl->status;
1970         struct tid_info *t = &adap->tids;
1971         unsigned int tid = GET_TID(rpl);
1972         struct filter_ctx *ctx = NULL;
1973         struct filter_entry *f;
1974
1975         dev_dbg(adap->pdev_dev, "%s: status = %u; tid = %u\n",
1976                 __func__, status, tid);
1977
1978         f = lookup_tid(t, tid);
1979         if (!f) {
1980                 dev_err(adap->pdev_dev, "%s:could not find filter entry",
1981                         __func__);
1982                 return;
1983         }
1984         ctx = f->ctx;
1985         f->ctx = NULL;
1986         clear_filter(adap, f);
1987         cxgb4_remove_tid(t, 0, tid, 0);
1988         kfree(f);
1989         if (ctx) {
1990                 ctx->result = 0;
1991                 complete(&ctx->completion);
1992         }
1993 }
1994
1995 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1996 {
1997         unsigned int ftid = TID_TID_G(AOPEN_ATID_G(ntohl(rpl->atid_status)));
1998         unsigned int status  = AOPEN_STATUS_G(ntohl(rpl->atid_status));
1999         struct tid_info *t = &adap->tids;
2000         unsigned int tid = GET_TID(rpl);
2001         struct filter_ctx *ctx = NULL;
2002         struct filter_entry *f;
2003
2004         dev_dbg(adap->pdev_dev, "%s: tid = %u; atid = %u; status = %u\n",
2005                 __func__, tid, ftid, status);
2006
2007         f = lookup_atid(t, ftid);
2008         if (!f) {
2009                 dev_err(adap->pdev_dev, "%s:could not find filter entry",
2010                         __func__);
2011                 return;
2012         }
2013         ctx = f->ctx;
2014         f->ctx = NULL;
2015
2016         switch (status) {
2017         case CPL_ERR_NONE:
2018                 f->tid = tid;
2019                 f->pending = 0;
2020                 f->valid = 1;
2021                 cxgb4_insert_tid(t, f, f->tid, 0);
2022                 cxgb4_free_atid(t, ftid);
2023                 if (ctx) {
2024                         ctx->tid = f->tid;
2025                         ctx->result = 0;
2026                 }
2027                 if (configure_filter_tcb(adap, tid, f)) {
2028                         clear_filter(adap, f);
2029                         cxgb4_remove_tid(t, 0, tid, 0);
2030                         kfree(f);
2031                         if (ctx) {
2032                                 ctx->result = -EINVAL;
2033                                 complete(&ctx->completion);
2034                         }
2035                         return;
2036                 }
2037                 break;
2038
2039         default:
2040                 if (status != CPL_ERR_TCAM_FULL)
2041                         dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
2042                                 __func__, status);
2043
2044                 if (ctx) {
2045                         if (status == CPL_ERR_TCAM_FULL)
2046                                 ctx->result = -ENOSPC;
2047                         else
2048                                 ctx->result = -EINVAL;
2049                 }
2050                 clear_filter(adap, f);
2051                 cxgb4_free_atid(t, ftid);
2052                 kfree(f);
2053         }
2054         if (ctx)
2055                 complete(&ctx->completion);
2056 }
2057
2058 /* Handle a filter write/deletion reply. */
2059 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
2060 {
2061         unsigned int tid = GET_TID(rpl);
2062         struct filter_entry *f = NULL;
2063         unsigned int max_fidx;
2064         int idx;
2065
2066         max_fidx = adap->tids.nftids + adap->tids.nsftids;
2067         /* Get the corresponding filter entry for this tid */
2068         if (adap->tids.ftid_tab) {
2069                 idx = tid - adap->tids.hpftid_base;
2070                 if (idx < adap->tids.nhpftids) {
2071                         f = &adap->tids.hpftid_tab[idx];
2072                 } else {
2073                         /* Check this in normal filter region */
2074                         idx = tid - adap->tids.ftid_base;
2075                         if (idx >= max_fidx)
2076                                 return;
2077                         f = &adap->tids.ftid_tab[idx];
2078                         idx += adap->tids.nhpftids;
2079                 }
2080
2081                 if (f->tid != tid)
2082                         return;
2083         }
2084
2085         /* We found the filter entry for this tid */
2086         if (f) {
2087                 unsigned int ret = TCB_COOKIE_G(rpl->cookie);
2088                 struct filter_ctx *ctx;
2089
2090                 /* Pull off any filter operation context attached to the
2091                  * filter.
2092                  */
2093                 ctx = f->ctx;
2094                 f->ctx = NULL;
2095
2096                 if (ret == FW_FILTER_WR_FLT_DELETED) {
2097                         /* Clear the filter when we get confirmation from the
2098                          * hardware that the filter has been deleted.
2099                          */
2100                         clear_filter(adap, f);
2101                         if (ctx)
2102                                 ctx->result = 0;
2103                 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
2104                         int err = 0;
2105
2106                         if (f->fs.newsmac)
2107                                 err = configure_filter_smac(adap, f);
2108
2109                         if (!err) {
2110                                 f->pending = 0;  /* async setup completed */
2111                                 f->valid = 1;
2112                                 if (ctx) {
2113                                         ctx->result = 0;
2114                                         ctx->tid = idx;
2115                                 }
2116                         } else {
2117                                 clear_filter(adap, f);
2118                                 if (ctx)
2119                                         ctx->result = err;
2120                         }
2121                 } else {
2122                         /* Something went wrong.  Issue a warning about the
2123                          * problem and clear everything out.
2124                          */
2125                         dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
2126                                 idx, ret);
2127                         clear_filter(adap, f);
2128                         if (ctx)
2129                                 ctx->result = -EINVAL;
2130                 }
2131                 if (ctx)
2132                         complete(&ctx->completion);
2133         }
2134 }
2135
2136 void init_hash_filter(struct adapter *adap)
2137 {
2138         u32 reg;
2139
2140         /* On T6, verify the necessary register configs and warn the user in
2141          * case of improper config
2142          */
2143         if (is_t6(adap->params.chip)) {
2144                 if (is_offload(adap)) {
2145                         if (!(t4_read_reg(adap, TP_GLOBAL_CONFIG_A)
2146                            & ACTIVEFILTERCOUNTS_F)) {
2147                                 dev_err(adap->pdev_dev, "Invalid hash filter + ofld config\n");
2148                                 return;
2149                         }
2150                 } else {
2151                         reg = t4_read_reg(adap, LE_DB_RSP_CODE_0_A);
2152                         if (TCAM_ACTV_HIT_G(reg) != 4) {
2153                                 dev_err(adap->pdev_dev, "Invalid hash filter config\n");
2154                                 return;
2155                         }
2156
2157                         reg = t4_read_reg(adap, LE_DB_RSP_CODE_1_A);
2158                         if (HASH_ACTV_HIT_G(reg) != 4) {
2159                                 dev_err(adap->pdev_dev, "Invalid hash filter config\n");
2160                                 return;
2161                         }
2162                 }
2163
2164         } else {
2165                 dev_err(adap->pdev_dev, "Hash filter supported only on T6\n");
2166                 return;
2167         }
2168
2169         adap->params.hash_filter = 1;
2170 }