2 * This file is part of the Chelsio T4 Ethernet driver for Linux.
4 * Copyright (c) 2003-2016 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include "t4_values.h"
44 #include "cxgb4_filter.h"
46 static inline bool is_field_set(u32 val, u32 mask)
51 static inline bool unsupported(u32 conf, u32 conf_mask, u32 val, u32 mask)
53 return !(conf & conf_mask) && is_field_set(val, mask);
56 static int set_tcb_field(struct adapter *adap, struct filter_entry *f,
57 unsigned int ftid, u16 word, u64 mask, u64 val,
60 struct cpl_set_tcb_field *req;
63 skb = alloc_skb(sizeof(struct cpl_set_tcb_field), GFP_ATOMIC);
67 req = (struct cpl_set_tcb_field *)__skb_put_zero(skb, sizeof(*req));
68 INIT_TP_WR_CPL(req, CPL_SET_TCB_FIELD, ftid);
69 req->reply_ctrl = htons(REPLY_CHAN_V(0) |
70 QUEUENO_V(adap->sge.fw_evtq.abs_id) |
71 NO_REPLY_V(no_reply));
72 req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(ftid));
73 req->mask = cpu_to_be64(mask);
74 req->val = cpu_to_be64(val);
75 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
76 t4_ofld_send(adap, skb);
80 /* Set one of the t_flags bits in the TCB.
82 static int set_tcb_tflag(struct adapter *adap, struct filter_entry *f,
83 unsigned int ftid, unsigned int bit_pos,
84 unsigned int val, int no_reply)
86 return set_tcb_field(adap, f, ftid, TCB_T_FLAGS_W, 1ULL << bit_pos,
87 (unsigned long long)val << bit_pos, no_reply);
90 static void mk_abort_req_ulp(struct cpl_abort_req *abort_req, unsigned int tid)
92 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_req;
93 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
95 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
96 txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_req), 16));
97 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
98 sc->len = htonl(sizeof(*abort_req) - sizeof(struct work_request_hdr));
99 OPCODE_TID(abort_req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, tid));
100 abort_req->rsvd0 = htonl(0);
101 abort_req->rsvd1 = 0;
102 abort_req->cmd = CPL_ABORT_NO_RST;
105 static void mk_abort_rpl_ulp(struct cpl_abort_rpl *abort_rpl, unsigned int tid)
107 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)abort_rpl;
108 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
110 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
111 txpkt->len = htonl(DIV_ROUND_UP(sizeof(*abort_rpl), 16));
112 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
113 sc->len = htonl(sizeof(*abort_rpl) - sizeof(struct work_request_hdr));
114 OPCODE_TID(abort_rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
115 abort_rpl->rsvd0 = htonl(0);
116 abort_rpl->rsvd1 = 0;
117 abort_rpl->cmd = CPL_ABORT_NO_RST;
120 static void mk_set_tcb_ulp(struct filter_entry *f,
121 struct cpl_set_tcb_field *req,
122 unsigned int word, u64 mask, u64 val,
123 u8 cookie, int no_reply)
125 struct ulp_txpkt *txpkt = (struct ulp_txpkt *)req;
126 struct ulptx_idata *sc = (struct ulptx_idata *)(txpkt + 1);
128 txpkt->cmd_dest = htonl(ULPTX_CMD_V(ULP_TX_PKT) | ULP_TXPKT_DEST_V(0));
129 txpkt->len = htonl(DIV_ROUND_UP(sizeof(*req), 16));
130 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_IMM));
131 sc->len = htonl(sizeof(*req) - sizeof(struct work_request_hdr));
132 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, f->tid));
133 req->reply_ctrl = htons(NO_REPLY_V(no_reply) | REPLY_CHAN_V(0) |
135 req->word_cookie = htons(TCB_WORD_V(word) | TCB_COOKIE_V(cookie));
136 req->mask = cpu_to_be64(mask);
137 req->val = cpu_to_be64(val);
138 sc = (struct ulptx_idata *)(req + 1);
139 sc->cmd_more = htonl(ULPTX_CMD_V(ULP_TX_SC_NOOP));
143 static int configure_filter_smac(struct adapter *adap, struct filter_entry *f)
147 /* do a set-tcb for smac-sel and CWR bit.. */
148 err = set_tcb_field(adap, f, f->tid, TCB_SMAC_SEL_W,
149 TCB_SMAC_SEL_V(TCB_SMAC_SEL_M),
150 TCB_SMAC_SEL_V(f->smt->idx), 1);
154 err = set_tcb_tflag(adap, f, f->tid, TF_CCTRL_CWR_S, 1, 1);
159 dev_err(adap->pdev_dev, "filter %u smac config failed with error %u\n",
164 static void set_nat_params(struct adapter *adap, struct filter_entry *f,
165 unsigned int tid, bool dip, bool sip, bool dp,
168 u8 *nat_lp = (u8 *)&f->fs.nat_lport;
169 u8 *nat_fp = (u8 *)&f->fs.nat_fport;
173 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W,
174 WORD_MASK, f->fs.nat_lip[15] |
175 f->fs.nat_lip[14] << 8 |
176 f->fs.nat_lip[13] << 16 |
177 (u64)f->fs.nat_lip[12] << 24, 1);
179 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 1,
180 WORD_MASK, f->fs.nat_lip[11] |
181 f->fs.nat_lip[10] << 8 |
182 f->fs.nat_lip[9] << 16 |
183 (u64)f->fs.nat_lip[8] << 24, 1);
185 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 2,
186 WORD_MASK, f->fs.nat_lip[7] |
187 f->fs.nat_lip[6] << 8 |
188 f->fs.nat_lip[5] << 16 |
189 (u64)f->fs.nat_lip[4] << 24, 1);
191 set_tcb_field(adap, f, tid, TCB_SND_UNA_RAW_W + 3,
192 WORD_MASK, f->fs.nat_lip[3] |
193 f->fs.nat_lip[2] << 8 |
194 f->fs.nat_lip[1] << 16 |
195 (u64)f->fs.nat_lip[0] << 24, 1);
197 set_tcb_field(adap, f, tid, TCB_RX_FRAG3_LEN_RAW_W,
198 WORD_MASK, f->fs.nat_lip[3] |
199 f->fs.nat_lip[2] << 8 |
200 f->fs.nat_lip[1] << 16 |
201 (u64)f->fs.nat_lip[0] << 24, 1);
207 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W,
208 WORD_MASK, f->fs.nat_fip[15] |
209 f->fs.nat_fip[14] << 8 |
210 f->fs.nat_fip[13] << 16 |
211 (u64)f->fs.nat_fip[12] << 24, 1);
213 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 1,
214 WORD_MASK, f->fs.nat_fip[11] |
215 f->fs.nat_fip[10] << 8 |
216 f->fs.nat_fip[9] << 16 |
217 (u64)f->fs.nat_fip[8] << 24, 1);
219 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 2,
220 WORD_MASK, f->fs.nat_fip[7] |
221 f->fs.nat_fip[6] << 8 |
222 f->fs.nat_fip[5] << 16 |
223 (u64)f->fs.nat_fip[4] << 24, 1);
225 set_tcb_field(adap, f, tid, TCB_RX_FRAG2_PTR_RAW_W + 3,
226 WORD_MASK, f->fs.nat_fip[3] |
227 f->fs.nat_fip[2] << 8 |
228 f->fs.nat_fip[1] << 16 |
229 (u64)f->fs.nat_fip[0] << 24, 1);
232 set_tcb_field(adap, f, tid,
233 TCB_RX_FRAG3_START_IDX_OFFSET_RAW_W,
234 WORD_MASK, f->fs.nat_fip[3] |
235 f->fs.nat_fip[2] << 8 |
236 f->fs.nat_fip[1] << 16 |
237 (u64)f->fs.nat_fip[0] << 24, 1);
241 set_tcb_field(adap, f, tid, TCB_PDU_HDR_LEN_W, WORD_MASK,
242 (dp ? (nat_lp[1] | nat_lp[0] << 8) : 0) |
243 (sp ? (nat_fp[1] << 16 | (u64)nat_fp[0] << 24) : 0),
247 /* Validate filter spec against configuration done on the card. */
248 static int validate_filter(struct net_device *dev,
249 struct ch_filter_specification *fs)
251 struct adapter *adapter = netdev2adap(dev);
254 /* Check for unconfigured fields being used. */
255 iconf = adapter->params.tp.ingress_config;
256 fconf = fs->hash ? adapter->params.tp.filter_mask :
257 adapter->params.tp.vlan_pri_map;
259 if (unsupported(fconf, FCOE_F, fs->val.fcoe, fs->mask.fcoe) ||
260 unsupported(fconf, PORT_F, fs->val.iport, fs->mask.iport) ||
261 unsupported(fconf, TOS_F, fs->val.tos, fs->mask.tos) ||
262 unsupported(fconf, ETHERTYPE_F, fs->val.ethtype,
264 unsupported(fconf, MACMATCH_F, fs->val.macidx, fs->mask.macidx) ||
265 unsupported(fconf, MPSHITTYPE_F, fs->val.matchtype,
266 fs->mask.matchtype) ||
267 unsupported(fconf, FRAGMENTATION_F, fs->val.frag, fs->mask.frag) ||
268 unsupported(fconf, PROTOCOL_F, fs->val.proto, fs->mask.proto) ||
269 unsupported(fconf, VNIC_ID_F, fs->val.pfvf_vld,
270 fs->mask.pfvf_vld) ||
271 unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
272 fs->mask.ovlan_vld) ||
273 unsupported(fconf, VNIC_ID_F, fs->val.encap_vld,
274 fs->mask.encap_vld) ||
275 unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
278 /* T4 inconveniently uses the same FT_VNIC_ID_W bits for both the Outer
279 * VLAN Tag and PF/VF/VFvld fields based on VNIC_F being set
280 * in TP_INGRESS_CONFIG. Hense the somewhat crazy checks
281 * below. Additionally, since the T4 firmware interface also
282 * carries that overlap, we need to translate any PF/VF
283 * specification into that internal format below.
285 if ((is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
286 is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) ||
287 (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
288 is_field_set(fs->val.encap_vld, fs->mask.encap_vld)) ||
289 (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
290 is_field_set(fs->val.encap_vld, fs->mask.encap_vld)))
292 if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
293 (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
296 if (fs->val.pf > 0x7 || fs->val.vf > 0x7f)
301 /* If the user is requesting that the filter action loop
302 * matching packets back out one of our ports, make sure that
303 * the egress port is in range.
305 if (fs->action == FILTER_SWITCH &&
306 fs->eport >= adapter->params.nports)
309 /* Don't allow various trivially obvious bogus out-of-range values... */
310 if (fs->val.iport >= adapter->params.nports)
313 /* T4 doesn't support removing VLAN Tags for loop back filters. */
314 if (is_t4(adapter->params.chip) &&
315 fs->action == FILTER_SWITCH &&
316 (fs->newvlan == VLAN_REMOVE ||
317 fs->newvlan == VLAN_REWRITE))
320 if (fs->val.encap_vld &&
321 CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
326 static int get_filter_steerq(struct net_device *dev,
327 struct ch_filter_specification *fs)
329 struct adapter *adapter = netdev2adap(dev);
332 /* If the user has requested steering matching Ingress Packets
333 * to a specific Queue Set, we need to make sure it's in range
334 * for the port and map that into the Absolute Queue ID of the
335 * Queue Set's Response Queue.
342 struct port_info *pi = netdev_priv(dev);
344 /* If the iq id is greater than the number of qsets,
345 * then assume it is an absolute qid.
347 if (fs->iq < pi->nqsets)
348 iq = adapter->sge.ethrxq[pi->first_qset +
357 static int get_filter_count(struct adapter *adapter, unsigned int fidx,
358 u64 *pkts, u64 *bytes, bool hash)
360 unsigned int tcb_base, tcbaddr;
361 unsigned int word_offset;
362 struct filter_entry *f;
363 __be64 be64_byte_count;
366 tcb_base = t4_read_reg(adapter, TP_CMM_TCB_BASE_A);
367 if (is_hashfilter(adapter) && hash) {
368 if (tid_out_of_range(&adapter->tids, fidx))
370 f = adapter->tids.tid_tab[fidx - adapter->tids.tid_base];
374 if ((fidx != (adapter->tids.nftids + adapter->tids.nsftids +
375 adapter->tids.nhpftids - 1)) &&
376 fidx >= (adapter->tids.nftids + adapter->tids.nhpftids))
379 if (fidx < adapter->tids.nhpftids)
380 f = &adapter->tids.hpftid_tab[fidx];
382 f = &adapter->tids.ftid_tab[fidx -
383 adapter->tids.nhpftids];
387 tcbaddr = tcb_base + f->tid * TCB_SIZE;
389 spin_lock(&adapter->win0_lock);
390 if (is_t4(adapter->params.chip)) {
393 /* T4 doesn't maintain byte counts in hw */
398 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
399 tcbaddr + (word_offset * sizeof(__be32)),
401 (__be32 *)&be64_count,
405 *pkts = be64_to_cpu(be64_count);
411 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
412 tcbaddr + (word_offset * sizeof(__be32)),
413 sizeof(be64_byte_count),
418 *bytes = be64_to_cpu(be64_byte_count);
422 ret = t4_memory_rw(adapter, MEMWIN_NIC, MEM_EDC0,
423 tcbaddr + (word_offset * sizeof(__be32)),
429 *pkts = (u64)be32_to_cpu(be32_count);
433 spin_unlock(&adapter->win0_lock);
437 int cxgb4_get_filter_counters(struct net_device *dev, unsigned int fidx,
438 u64 *hitcnt, u64 *bytecnt, bool hash)
440 struct adapter *adapter = netdev2adap(dev);
442 return get_filter_count(adapter, fidx, hitcnt, bytecnt, hash);
445 static bool cxgb4_filter_prio_in_range(struct tid_info *t, u32 idx, u8 nslots,
448 struct filter_entry *prev_tab, *next_tab, *prev_fe, *next_fe;
449 u32 prev_ftid, next_ftid;
451 /* Only insert the rule if both of the following conditions
453 * 1. The immediate previous rule has priority <= @prio.
454 * 2. The immediate next rule has priority >= @prio.
457 /* High Priority (HPFILTER) region always has higher priority
458 * than normal FILTER region. So, all rules in HPFILTER region
459 * must have prio value <= rules in normal FILTER region.
461 if (idx < t->nhpftids) {
462 /* Don't insert if there's a rule already present at @idx
463 * in HPFILTER region.
465 if (test_bit(idx, t->hpftid_bmap))
468 next_tab = t->hpftid_tab;
469 next_ftid = find_next_bit(t->hpftid_bmap, t->nhpftids, idx);
470 if (next_ftid >= t->nhpftids) {
471 /* No next entry found in HPFILTER region.
472 * See if there's any next entry in normal
475 next_ftid = find_first_bit(t->ftid_bmap, t->nftids);
476 if (next_ftid >= t->nftids)
479 next_tab = t->ftid_tab;
482 /* Search for the closest previous filter entry in HPFILTER
483 * region. No need to search in normal FILTER region because
484 * there can never be any entry in normal FILTER region whose
485 * prio value is < last entry in HPFILTER region.
487 prev_ftid = find_last_bit(t->hpftid_bmap, idx);
488 if (prev_ftid >= idx)
491 prev_tab = t->hpftid_tab;
495 /* Don't insert if there's a rule already present at @idx
496 * in normal FILTER region.
498 if (test_bit(idx, t->ftid_bmap))
501 prev_tab = t->ftid_tab;
502 prev_ftid = find_last_bit(t->ftid_bmap, idx);
503 if (prev_ftid >= idx) {
504 /* No previous entry found in normal FILTER
505 * region. See if there's any previous entry
506 * in HPFILTER region.
508 prev_ftid = find_last_bit(t->hpftid_bmap, t->nhpftids);
509 if (prev_ftid >= t->nhpftids)
512 prev_tab = t->hpftid_tab;
515 /* Search for the closest next filter entry in normal
516 * FILTER region. No need to search in HPFILTER region
517 * because there can never be any entry in HPFILTER
518 * region whose prio value is > first entry in normal
521 next_ftid = find_next_bit(t->ftid_bmap, t->nftids, idx);
522 if (next_ftid >= t->nftids)
525 next_tab = t->ftid_tab;
528 next_fe = &next_tab[next_ftid];
530 /* See if the filter entry belongs to an IPv6 rule, which
531 * occupy 4 slots on T5 and 2 slots on T6. Adjust the
532 * reference to the previously inserted filter entry
535 prev_fe = &prev_tab[prev_ftid & ~(nslots - 1)];
536 if (!prev_fe->fs.type)
537 prev_fe = &prev_tab[prev_ftid];
539 if ((prev_fe->valid && prev_fe->fs.tc_prio > prio) ||
540 (next_fe->valid && next_fe->fs.tc_prio < prio))
546 int cxgb4_get_free_ftid(struct net_device *dev, u8 family, bool hash_en,
549 struct adapter *adap = netdev2adap(dev);
550 struct tid_info *t = &adap->tids;
551 u32 bmap_ftid, max_ftid;
552 struct filter_entry *f;
558 /* IPv4 occupy 1 slot. IPv6 occupy 2 slots on T6 and 4 slots
562 if (family == PF_INET6) {
564 if (CHELSIO_CHIP_VERSION(adap->params.chip) < CHELSIO_T6)
568 /* There are 3 filter regions available in hardware in
569 * following order of priority:
571 * 1. High Priority (HPFILTER) region (Highest Priority).
573 * 3. Normal FILTER region (Lowest Priority).
575 * Entries in HPFILTER and normal FILTER region have index
576 * 0 as the highest priority and the rules will be scanned
577 * in ascending order until either a rule hits or end of
578 * the region is reached.
580 * All HASH region entries have same priority. The set of
581 * fields to match in headers are pre-determined. The same
582 * set of header match fields must be compulsorily specified
583 * in all the rules wanting to get inserted in HASH region.
584 * Hence, HASH region is an exact-match region. A HASH is
585 * generated for a rule based on the values in the
586 * pre-determined set of header match fields. The generated
587 * HASH serves as an index into the HASH region. There can
588 * never be 2 rules having the same HASH. Hardware will
589 * compute a HASH for every incoming packet based on the
590 * values in the pre-determined set of header match fields
591 * and uses it as an index to check if there's a rule
592 * inserted in the HASH region at the specified index. If
593 * there's a rule inserted, then it's considered as a filter
594 * hit. Otherwise, it's a filter miss and normal FILTER region
595 * is scanned afterwards.
598 spin_lock_bh(&t->ftid_lock);
600 ftid = (tc_prio <= t->nhpftids) ? 0 : t->nhpftids;
601 max_ftid = t->nftids + t->nhpftids;
602 while (ftid < max_ftid) {
603 if (ftid < t->nhpftids) {
604 /* If the new rule wants to get inserted into
605 * HPFILTER region, but its prio is greater
606 * than the rule with the highest prio in HASH
607 * region, or if there's not enough slots
608 * available in HPFILTER region, then skip
609 * trying to insert this rule into HPFILTER
610 * region and directly go to the next region.
612 if ((t->tc_hash_tids_max_prio &&
613 tc_prio > t->tc_hash_tids_max_prio) ||
614 (ftid + n) > t->nhpftids) {
619 bmap = t->hpftid_bmap;
621 } else if (hash_en) {
622 /* Ensure priority is >= last rule in HPFILTER
625 ftid = find_last_bit(t->hpftid_bmap, t->nhpftids);
626 if (ftid < t->nhpftids) {
627 f = &t->hpftid_tab[ftid];
628 if (f->valid && tc_prio < f->fs.tc_prio)
632 /* Ensure priority is <= first rule in normal
635 ftid = find_first_bit(t->ftid_bmap, t->nftids);
636 if (ftid < t->nftids) {
637 f = &t->ftid_tab[ftid];
638 if (f->valid && tc_prio > f->fs.tc_prio)
646 /* If the new rule wants to get inserted into
647 * normal FILTER region, but its prio is less
648 * than the rule with the highest prio in HASH
649 * region, then reject the rule.
651 if (t->tc_hash_tids_max_prio &&
652 tc_prio < t->tc_hash_tids_max_prio)
655 if (ftid + n > max_ftid)
659 bmap_ftid = ftid - t->nhpftids;
663 for (i = 0; i < n; i++) {
664 if (test_bit(bmap_ftid + i, bmap))
670 /* Ensure the new rule's prio doesn't conflict
671 * with existing rules.
673 if (cxgb4_filter_prio_in_range(t, ftid, n,
685 spin_unlock_bh(&t->ftid_lock);
686 return found ? ftid : -ENOMEM;
689 static int cxgb4_set_ftid(struct tid_info *t, int fidx, int family,
690 unsigned int chip_ver)
692 spin_lock_bh(&t->ftid_lock);
694 if (test_bit(fidx, t->ftid_bmap)) {
695 spin_unlock_bh(&t->ftid_lock);
699 if (family == PF_INET) {
700 __set_bit(fidx, t->ftid_bmap);
702 if (chip_ver < CHELSIO_T6)
703 bitmap_allocate_region(t->ftid_bmap, fidx, 2);
705 bitmap_allocate_region(t->ftid_bmap, fidx, 1);
708 spin_unlock_bh(&t->ftid_lock);
712 static int cxgb4_set_hpftid(struct tid_info *t, int fidx, int family)
714 spin_lock_bh(&t->ftid_lock);
716 if (test_bit(fidx, t->hpftid_bmap)) {
717 spin_unlock_bh(&t->ftid_lock);
721 if (family == PF_INET)
722 __set_bit(fidx, t->hpftid_bmap);
724 bitmap_allocate_region(t->hpftid_bmap, fidx, 1);
726 spin_unlock_bh(&t->ftid_lock);
730 static void cxgb4_clear_ftid(struct tid_info *t, int fidx, int family,
731 unsigned int chip_ver)
733 spin_lock_bh(&t->ftid_lock);
734 if (family == PF_INET) {
735 __clear_bit(fidx, t->ftid_bmap);
737 if (chip_ver < CHELSIO_T6)
738 bitmap_release_region(t->ftid_bmap, fidx, 2);
740 bitmap_release_region(t->ftid_bmap, fidx, 1);
742 spin_unlock_bh(&t->ftid_lock);
745 static void cxgb4_clear_hpftid(struct tid_info *t, int fidx, int family)
747 spin_lock_bh(&t->ftid_lock);
749 if (family == PF_INET)
750 __clear_bit(fidx, t->hpftid_bmap);
752 bitmap_release_region(t->hpftid_bmap, fidx, 1);
754 spin_unlock_bh(&t->ftid_lock);
757 /* Delete the filter at a specified index. */
758 static int del_filter_wr(struct adapter *adapter, int fidx)
760 struct fw_filter_wr *fwr;
761 struct filter_entry *f;
765 if (fidx < adapter->tids.nhpftids)
766 f = &adapter->tids.hpftid_tab[fidx];
768 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
772 skb = alloc_skb(len, GFP_KERNEL);
776 fwr = __skb_put(skb, len);
777 t4_mk_filtdelwr(f->tid, fwr, adapter->sge.fw_evtq.abs_id);
779 /* Mark the filter as "pending" and ship off the Filter Work Request.
780 * When we get the Work Request Reply we'll clear the pending status.
783 t4_mgmt_tx(adapter, skb);
787 /* Send a Work Request to write the filter at a specified index. We construct
788 * a Firmware Filter Work Request to have the work done and put the indicated
789 * filter into "pending" mode which will prevent any further actions against
790 * it till we get a reply from the firmware on the completion status of the
793 int set_filter_wr(struct adapter *adapter, int fidx)
795 struct fw_filter2_wr *fwr;
796 struct filter_entry *f;
799 if (fidx < adapter->tids.nhpftids)
800 f = &adapter->tids.hpftid_tab[fidx];
802 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
804 skb = alloc_skb(sizeof(*fwr), GFP_KERNEL);
808 /* If the new filter requires loopback Destination MAC and/or VLAN
809 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
812 if (f->fs.newdmac || f->fs.newvlan) {
813 /* allocate L2T entry for new filter */
814 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
815 f->fs.eport, f->fs.dmac);
822 /* If the new filter requires loopback Source MAC rewriting then
823 * we need to allocate a SMT entry for the filter.
826 f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
829 cxgb4_l2t_release(f->l2t);
837 fwr = __skb_put_zero(skb, sizeof(*fwr));
839 /* It would be nice to put most of the following in t4_hw.c but most
840 * of the work is translating the cxgbtool ch_filter_specification
841 * into the Work Request and the definition of that structure is
842 * currently in cxgbtool.h which isn't appropriate to pull into the
843 * common code. We may eventually try to come up with a more neutral
844 * filter specification structure but for now it's easiest to simply
845 * put this fairly direct code in line ...
847 if (adapter->params.filter2_wr_support)
848 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER2_WR));
850 fwr->op_pkd = htonl(FW_WR_OP_V(FW_FILTER_WR));
851 fwr->len16_pkd = htonl(FW_WR_LEN16_V(sizeof(*fwr) / 16));
853 htonl(FW_FILTER_WR_TID_V(f->tid) |
854 FW_FILTER_WR_RQTYPE_V(f->fs.type) |
855 FW_FILTER_WR_NOREPLY_V(0) |
856 FW_FILTER_WR_IQ_V(f->fs.iq));
857 fwr->del_filter_to_l2tix =
858 htonl(FW_FILTER_WR_RPTTID_V(f->fs.rpttid) |
859 FW_FILTER_WR_DROP_V(f->fs.action == FILTER_DROP) |
860 FW_FILTER_WR_DIRSTEER_V(f->fs.dirsteer) |
861 FW_FILTER_WR_MASKHASH_V(f->fs.maskhash) |
862 FW_FILTER_WR_DIRSTEERHASH_V(f->fs.dirsteerhash) |
863 FW_FILTER_WR_LPBK_V(f->fs.action == FILTER_SWITCH) |
864 FW_FILTER_WR_DMAC_V(f->fs.newdmac) |
865 FW_FILTER_WR_SMAC_V(f->fs.newsmac) |
866 FW_FILTER_WR_INSVLAN_V(f->fs.newvlan == VLAN_INSERT ||
867 f->fs.newvlan == VLAN_REWRITE) |
868 FW_FILTER_WR_RMVLAN_V(f->fs.newvlan == VLAN_REMOVE ||
869 f->fs.newvlan == VLAN_REWRITE) |
870 FW_FILTER_WR_HITCNTS_V(f->fs.hitcnts) |
871 FW_FILTER_WR_TXCHAN_V(f->fs.eport) |
872 FW_FILTER_WR_PRIO_V(f->fs.prio) |
873 FW_FILTER_WR_L2TIX_V(f->l2t ? f->l2t->idx : 0));
874 fwr->ethtype = htons(f->fs.val.ethtype);
875 fwr->ethtypem = htons(f->fs.mask.ethtype);
876 fwr->frag_to_ovlan_vldm =
877 (FW_FILTER_WR_FRAG_V(f->fs.val.frag) |
878 FW_FILTER_WR_FRAGM_V(f->fs.mask.frag) |
879 FW_FILTER_WR_IVLAN_VLD_V(f->fs.val.ivlan_vld) |
880 FW_FILTER_WR_OVLAN_VLD_V(f->fs.val.ovlan_vld) |
881 FW_FILTER_WR_IVLAN_VLDM_V(f->fs.mask.ivlan_vld) |
882 FW_FILTER_WR_OVLAN_VLDM_V(f->fs.mask.ovlan_vld));
884 fwr->smac_sel = f->smt->idx;
885 fwr->rx_chan_rx_rpl_iq =
886 htons(FW_FILTER_WR_RX_CHAN_V(0) |
887 FW_FILTER_WR_RX_RPL_IQ_V(adapter->sge.fw_evtq.abs_id));
888 fwr->maci_to_matchtypem =
889 htonl(FW_FILTER_WR_MACI_V(f->fs.val.macidx) |
890 FW_FILTER_WR_MACIM_V(f->fs.mask.macidx) |
891 FW_FILTER_WR_FCOE_V(f->fs.val.fcoe) |
892 FW_FILTER_WR_FCOEM_V(f->fs.mask.fcoe) |
893 FW_FILTER_WR_PORT_V(f->fs.val.iport) |
894 FW_FILTER_WR_PORTM_V(f->fs.mask.iport) |
895 FW_FILTER_WR_MATCHTYPE_V(f->fs.val.matchtype) |
896 FW_FILTER_WR_MATCHTYPEM_V(f->fs.mask.matchtype));
897 fwr->ptcl = f->fs.val.proto;
898 fwr->ptclm = f->fs.mask.proto;
899 fwr->ttyp = f->fs.val.tos;
900 fwr->ttypm = f->fs.mask.tos;
901 fwr->ivlan = htons(f->fs.val.ivlan);
902 fwr->ivlanm = htons(f->fs.mask.ivlan);
903 fwr->ovlan = htons(f->fs.val.ovlan);
904 fwr->ovlanm = htons(f->fs.mask.ovlan);
905 memcpy(fwr->lip, f->fs.val.lip, sizeof(fwr->lip));
906 memcpy(fwr->lipm, f->fs.mask.lip, sizeof(fwr->lipm));
907 memcpy(fwr->fip, f->fs.val.fip, sizeof(fwr->fip));
908 memcpy(fwr->fipm, f->fs.mask.fip, sizeof(fwr->fipm));
909 fwr->lp = htons(f->fs.val.lport);
910 fwr->lpm = htons(f->fs.mask.lport);
911 fwr->fp = htons(f->fs.val.fport);
912 fwr->fpm = htons(f->fs.mask.fport);
914 if (adapter->params.filter2_wr_support) {
915 u8 *nat_lp = (u8 *)&f->fs.nat_lport;
916 u8 *nat_fp = (u8 *)&f->fs.nat_fport;
918 fwr->natmode_to_ulp_type =
919 FW_FILTER2_WR_ULP_TYPE_V(f->fs.nat_mode ?
922 FW_FILTER2_WR_NATMODE_V(f->fs.nat_mode);
923 memcpy(fwr->newlip, f->fs.nat_lip, sizeof(fwr->newlip));
924 memcpy(fwr->newfip, f->fs.nat_fip, sizeof(fwr->newfip));
925 fwr->newlport = htons(nat_lp[1] | nat_lp[0] << 8);
926 fwr->newfport = htons(nat_fp[1] | nat_fp[0] << 8);
929 /* Mark the filter as "pending" and ship off the Filter Work Request.
930 * When we get the Work Request Reply we'll clear the pending status.
933 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
934 t4_ofld_send(adapter, skb);
938 /* Return an error number if the indicated filter isn't writable ... */
939 int writable_filter(struct filter_entry *f)
949 /* Delete the filter at the specified index (if valid). The checks for all
950 * the common problems with doing this like the filter being locked, currently
951 * pending in another operation, etc.
953 int delete_filter(struct adapter *adapter, unsigned int fidx)
955 struct filter_entry *f;
958 if (fidx >= adapter->tids.nftids + adapter->tids.nsftids +
959 adapter->tids.nhpftids)
962 if (fidx < adapter->tids.nhpftids)
963 f = &adapter->tids.hpftid_tab[fidx];
965 f = &adapter->tids.ftid_tab[fidx - adapter->tids.nhpftids];
966 ret = writable_filter(f);
970 return del_filter_wr(adapter, fidx);
975 /* Clear a filter and release any of its resources that we own. This also
976 * clears the filter's "pending" status.
978 void clear_filter(struct adapter *adap, struct filter_entry *f)
980 struct port_info *pi = netdev_priv(f->dev);
982 /* If the new or old filter have loopback rewriting rules then we'll
983 * need to free any existing L2T, SMT, CLIP entries of filter
987 cxgb4_l2t_release(f->l2t);
990 cxgb4_smt_release(f->smt);
992 if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
993 t4_free_encap_mac_filt(adap, pi->viid,
994 f->fs.val.ovlan & 0x1ff, 0);
996 if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
997 cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
999 /* The zeroing of the filter rule below clears the filter valid,
1000 * pending, locked flags, l2t pointer, etc. so it's all we need for
1003 memset(f, 0, sizeof(*f));
1006 void clear_all_filters(struct adapter *adapter)
1008 struct net_device *dev = adapter->port[0];
1011 if (adapter->tids.hpftid_tab) {
1012 struct filter_entry *f = &adapter->tids.hpftid_tab[0];
1014 for (i = 0; i < adapter->tids.nhpftids; i++, f++)
1015 if (f->valid || f->pending)
1016 cxgb4_del_filter(dev, i, &f->fs);
1019 if (adapter->tids.ftid_tab) {
1020 struct filter_entry *f = &adapter->tids.ftid_tab[0];
1021 unsigned int max_ftid = adapter->tids.nftids +
1022 adapter->tids.nsftids +
1023 adapter->tids.nhpftids;
1025 /* Clear all TCAM filters */
1026 for (i = adapter->tids.nhpftids; i < max_ftid; i++, f++)
1027 if (f->valid || f->pending)
1028 cxgb4_del_filter(dev, i, &f->fs);
1031 /* Clear all hash filters */
1032 if (is_hashfilter(adapter) && adapter->tids.tid_tab) {
1033 struct filter_entry *f;
1036 for (i = adapter->tids.hash_base;
1037 i <= adapter->tids.ntids; i++) {
1038 f = (struct filter_entry *)
1039 adapter->tids.tid_tab[i];
1041 if (f && (f->valid || f->pending))
1042 cxgb4_del_filter(dev, f->tid, &f->fs);
1045 sb = adapter->tids.stid_base;
1046 for (i = 0; i < sb; i++) {
1047 f = (struct filter_entry *)adapter->tids.tid_tab[i];
1049 if (f && (f->valid || f->pending))
1050 cxgb4_del_filter(dev, f->tid, &f->fs);
1055 /* Fill up default masks for set match fields. */
1056 static void fill_default_mask(struct ch_filter_specification *fs)
1058 unsigned int lip = 0, lip_mask = 0;
1059 unsigned int fip = 0, fip_mask = 0;
1062 if (fs->val.iport && !fs->mask.iport)
1063 fs->mask.iport |= ~0;
1064 if (fs->val.fcoe && !fs->mask.fcoe)
1065 fs->mask.fcoe |= ~0;
1066 if (fs->val.matchtype && !fs->mask.matchtype)
1067 fs->mask.matchtype |= ~0;
1068 if (fs->val.macidx && !fs->mask.macidx)
1069 fs->mask.macidx |= ~0;
1070 if (fs->val.ethtype && !fs->mask.ethtype)
1071 fs->mask.ethtype |= ~0;
1072 if (fs->val.ivlan && !fs->mask.ivlan)
1073 fs->mask.ivlan |= ~0;
1074 if (fs->val.ovlan && !fs->mask.ovlan)
1075 fs->mask.ovlan |= ~0;
1076 if (fs->val.frag && !fs->mask.frag)
1077 fs->mask.frag |= ~0;
1078 if (fs->val.tos && !fs->mask.tos)
1080 if (fs->val.proto && !fs->mask.proto)
1081 fs->mask.proto |= ~0;
1082 if (fs->val.pfvf_vld && !fs->mask.pfvf_vld)
1083 fs->mask.pfvf_vld |= ~0;
1084 if (fs->val.pf && !fs->mask.pf)
1086 if (fs->val.vf && !fs->mask.vf)
1089 for (i = 0; i < ARRAY_SIZE(fs->val.lip); i++) {
1090 lip |= fs->val.lip[i];
1091 lip_mask |= fs->mask.lip[i];
1092 fip |= fs->val.fip[i];
1093 fip_mask |= fs->mask.fip[i];
1096 if (lip && !lip_mask)
1097 memset(fs->mask.lip, ~0, sizeof(fs->mask.lip));
1099 if (fip && !fip_mask)
1100 memset(fs->mask.fip, ~0, sizeof(fs->mask.lip));
1102 if (fs->val.lport && !fs->mask.lport)
1103 fs->mask.lport = ~0;
1104 if (fs->val.fport && !fs->mask.fport)
1105 fs->mask.fport = ~0;
1108 static bool is_addr_all_mask(u8 *ipmask, int family)
1110 if (family == AF_INET) {
1111 struct in_addr *addr;
1113 addr = (struct in_addr *)ipmask;
1114 if (addr->s_addr == htonl(0xffffffff))
1116 } else if (family == AF_INET6) {
1117 struct in6_addr *addr6;
1119 addr6 = (struct in6_addr *)ipmask;
1120 if (addr6->s6_addr32[0] == htonl(0xffffffff) &&
1121 addr6->s6_addr32[1] == htonl(0xffffffff) &&
1122 addr6->s6_addr32[2] == htonl(0xffffffff) &&
1123 addr6->s6_addr32[3] == htonl(0xffffffff))
1129 static bool is_inaddr_any(u8 *ip, int family)
1133 if (family == AF_INET) {
1134 struct in_addr *addr;
1136 addr = (struct in_addr *)ip;
1137 if (addr->s_addr == htonl(INADDR_ANY))
1139 } else if (family == AF_INET6) {
1140 struct in6_addr *addr6;
1142 addr6 = (struct in6_addr *)ip;
1143 addr_type = ipv6_addr_type((const struct in6_addr *)
1145 if (addr_type == IPV6_ADDR_ANY)
1151 bool is_filter_exact_match(struct adapter *adap,
1152 struct ch_filter_specification *fs)
1154 struct tp_params *tp = &adap->params.tp;
1155 u64 hash_filter_mask = tp->hash_filter_mask;
1156 u64 ntuple_mask = 0;
1158 if (!is_hashfilter(adap))
1161 if ((atomic_read(&adap->tids.hash_tids_in_use) +
1162 atomic_read(&adap->tids.tids_in_use)) >=
1163 (adap->tids.nhash + (adap->tids.stid_base - adap->tids.tid_base)))
1166 /* Keep tunnel VNI match disabled for hash-filters for now */
1167 if (fs->mask.encap_vld)
1171 if (is_inaddr_any(fs->val.fip, AF_INET6) ||
1172 !is_addr_all_mask(fs->mask.fip, AF_INET6))
1175 if (is_inaddr_any(fs->val.lip, AF_INET6) ||
1176 !is_addr_all_mask(fs->mask.lip, AF_INET6))
1179 if (is_inaddr_any(fs->val.fip, AF_INET) ||
1180 !is_addr_all_mask(fs->mask.fip, AF_INET))
1183 if (is_inaddr_any(fs->val.lip, AF_INET) ||
1184 !is_addr_all_mask(fs->mask.lip, AF_INET))
1188 if (!fs->val.lport || fs->mask.lport != 0xffff)
1191 if (!fs->val.fport || fs->mask.fport != 0xffff)
1194 /* calculate tuple mask and compare with mask configured in hw */
1195 if (tp->fcoe_shift >= 0)
1196 ntuple_mask |= (u64)fs->mask.fcoe << tp->fcoe_shift;
1198 if (tp->port_shift >= 0)
1199 ntuple_mask |= (u64)fs->mask.iport << tp->port_shift;
1201 if (tp->vnic_shift >= 0) {
1202 if ((adap->params.tp.ingress_config & VNIC_F))
1203 ntuple_mask |= (u64)fs->mask.pfvf_vld << tp->vnic_shift;
1205 ntuple_mask |= (u64)fs->mask.ovlan_vld <<
1209 if (tp->vlan_shift >= 0)
1210 ntuple_mask |= (u64)fs->mask.ivlan << tp->vlan_shift;
1212 if (tp->tos_shift >= 0)
1213 ntuple_mask |= (u64)fs->mask.tos << tp->tos_shift;
1215 if (tp->protocol_shift >= 0)
1216 ntuple_mask |= (u64)fs->mask.proto << tp->protocol_shift;
1218 if (tp->ethertype_shift >= 0)
1219 ntuple_mask |= (u64)fs->mask.ethtype << tp->ethertype_shift;
1221 if (tp->macmatch_shift >= 0)
1222 ntuple_mask |= (u64)fs->mask.macidx << tp->macmatch_shift;
1224 if (tp->matchtype_shift >= 0)
1225 ntuple_mask |= (u64)fs->mask.matchtype << tp->matchtype_shift;
1227 if (tp->frag_shift >= 0)
1228 ntuple_mask |= (u64)fs->mask.frag << tp->frag_shift;
1230 if (ntuple_mask != hash_filter_mask)
1236 static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
1237 struct net_device *dev)
1239 struct adapter *adap = netdev2adap(dev);
1240 struct tp_params *tp = &adap->params.tp;
1243 /* Initialize each of the fields which we care about which are present
1244 * in the Compressed Filter Tuple.
1246 if (tp->vlan_shift >= 0 && fs->mask.ivlan)
1247 ntuple |= (FT_VLAN_VLD_F | fs->val.ivlan) << tp->vlan_shift;
1249 if (tp->port_shift >= 0 && fs->mask.iport)
1250 ntuple |= (u64)fs->val.iport << tp->port_shift;
1252 if (tp->protocol_shift >= 0) {
1254 ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
1256 ntuple |= (u64)fs->val.proto << tp->protocol_shift;
1259 if (tp->tos_shift >= 0 && fs->mask.tos)
1260 ntuple |= (u64)(fs->val.tos) << tp->tos_shift;
1262 if (tp->vnic_shift >= 0) {
1263 if ((adap->params.tp.ingress_config & USE_ENC_IDX_F) &&
1265 ntuple |= (u64)((fs->val.encap_vld << 16) |
1266 (fs->val.ovlan)) << tp->vnic_shift;
1267 else if ((adap->params.tp.ingress_config & VNIC_F) &&
1269 ntuple |= (u64)((fs->val.pfvf_vld << 16) |
1270 (fs->val.pf << 13) |
1271 (fs->val.vf)) << tp->vnic_shift;
1273 ntuple |= (u64)((fs->val.ovlan_vld << 16) |
1274 (fs->val.ovlan)) << tp->vnic_shift;
1277 if (tp->macmatch_shift >= 0 && fs->mask.macidx)
1278 ntuple |= (u64)(fs->val.macidx) << tp->macmatch_shift;
1280 if (tp->ethertype_shift >= 0 && fs->mask.ethtype)
1281 ntuple |= (u64)(fs->val.ethtype) << tp->ethertype_shift;
1283 if (tp->matchtype_shift >= 0 && fs->mask.matchtype)
1284 ntuple |= (u64)(fs->val.matchtype) << tp->matchtype_shift;
1286 if (tp->frag_shift >= 0 && fs->mask.frag)
1287 ntuple |= (u64)(fs->val.frag) << tp->frag_shift;
1289 if (tp->fcoe_shift >= 0 && fs->mask.fcoe)
1290 ntuple |= (u64)(fs->val.fcoe) << tp->fcoe_shift;
1294 static void mk_act_open_req6(struct filter_entry *f, struct sk_buff *skb,
1295 unsigned int qid_filterid, struct adapter *adap)
1297 struct cpl_t6_act_open_req6 *t6req = NULL;
1298 struct cpl_act_open_req6 *req = NULL;
1300 t6req = (struct cpl_t6_act_open_req6 *)__skb_put(skb, sizeof(*t6req));
1301 INIT_TP_WR(t6req, 0);
1302 req = (struct cpl_act_open_req6 *)t6req;
1303 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6, qid_filterid));
1304 req->local_port = cpu_to_be16(f->fs.val.lport);
1305 req->peer_port = cpu_to_be16(f->fs.val.fport);
1306 req->local_ip_hi = *(__be64 *)(&f->fs.val.lip);
1307 req->local_ip_lo = *(((__be64 *)&f->fs.val.lip) + 1);
1308 req->peer_ip_hi = *(__be64 *)(&f->fs.val.fip);
1309 req->peer_ip_lo = *(((__be64 *)&f->fs.val.fip) + 1);
1310 req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1311 f->fs.newvlan == VLAN_REWRITE) |
1312 DELACK_V(f->fs.hitcnts) |
1313 L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1314 SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1316 TX_CHAN_V(f->fs.eport) |
1317 NO_CONG_V(f->fs.rpttid) |
1318 ULP_MODE_V(f->fs.nat_mode ?
1319 ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1320 TCAM_BYPASS_F | NON_OFFLOAD_F);
1321 t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1323 t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1324 RSS_QUEUE_V(f->fs.iq) |
1325 TX_QUEUE_V(f->fs.nat_mode) |
1327 RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
1328 PACE_V((f->fs.maskhash) |
1329 ((f->fs.dirsteerhash) << 1)));
1332 static void mk_act_open_req(struct filter_entry *f, struct sk_buff *skb,
1333 unsigned int qid_filterid, struct adapter *adap)
1335 struct cpl_t6_act_open_req *t6req = NULL;
1336 struct cpl_act_open_req *req = NULL;
1338 t6req = (struct cpl_t6_act_open_req *)__skb_put(skb, sizeof(*t6req));
1339 INIT_TP_WR(t6req, 0);
1340 req = (struct cpl_act_open_req *)t6req;
1341 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, qid_filterid));
1342 req->local_port = cpu_to_be16(f->fs.val.lport);
1343 req->peer_port = cpu_to_be16(f->fs.val.fport);
1344 memcpy(&req->local_ip, f->fs.val.lip, 4);
1345 memcpy(&req->peer_ip, f->fs.val.fip, 4);
1346 req->opt0 = cpu_to_be64(NAGLE_V(f->fs.newvlan == VLAN_REMOVE ||
1347 f->fs.newvlan == VLAN_REWRITE) |
1348 DELACK_V(f->fs.hitcnts) |
1349 L2T_IDX_V(f->l2t ? f->l2t->idx : 0) |
1350 SMAC_SEL_V((cxgb4_port_viid(f->dev) &
1352 TX_CHAN_V(f->fs.eport) |
1353 NO_CONG_V(f->fs.rpttid) |
1354 ULP_MODE_V(f->fs.nat_mode ?
1355 ULP_MODE_TCPDDP : ULP_MODE_NONE) |
1356 TCAM_BYPASS_F | NON_OFFLOAD_F);
1358 t6req->params = cpu_to_be64(FILTER_TUPLE_V(hash_filter_ntuple(&f->fs,
1360 t6req->opt2 = htonl(RSS_QUEUE_VALID_F |
1361 RSS_QUEUE_V(f->fs.iq) |
1362 TX_QUEUE_V(f->fs.nat_mode) |
1364 RX_CHANNEL_V(cxgb4_port_e2cchan(f->dev)) |
1365 PACE_V((f->fs.maskhash) |
1366 ((f->fs.dirsteerhash) << 1)));
1369 static int cxgb4_set_hash_filter(struct net_device *dev,
1370 struct ch_filter_specification *fs,
1371 struct filter_ctx *ctx)
1373 struct adapter *adapter = netdev2adap(dev);
1374 struct port_info *pi = netdev_priv(dev);
1375 struct tid_info *t = &adapter->tids;
1376 struct filter_entry *f;
1377 struct sk_buff *skb;
1382 fill_default_mask(fs);
1383 ret = validate_filter(dev, fs);
1387 iq = get_filter_steerq(dev, fs);
1391 f = kzalloc(sizeof(*f), GFP_KERNEL);
1400 /* If the new filter requires loopback Destination MAC and/or VLAN
1401 * rewriting then we need to allocate a Layer 2 Table (L2T) entry for
1404 if (f->fs.newdmac || f->fs.newvlan) {
1405 /* allocate L2T entry for new filter */
1406 f->l2t = t4_l2t_alloc_switching(adapter, f->fs.vlan,
1407 f->fs.eport, f->fs.dmac);
1414 /* If the new filter requires loopback Source MAC rewriting then
1415 * we need to allocate a SMT entry for the filter.
1417 if (f->fs.newsmac) {
1418 f->smt = cxgb4_smt_alloc_switching(f->dev, f->fs.smac);
1421 cxgb4_l2t_release(f->l2t);
1429 atid = cxgb4_alloc_atid(t, f);
1435 iconf = adapter->params.tp.ingress_config;
1436 if (iconf & VNIC_F) {
1437 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1438 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1439 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1440 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1441 } else if (iconf & USE_ENC_IDX_F) {
1442 if (f->fs.val.encap_vld) {
1443 struct port_info *pi = netdev_priv(f->dev);
1444 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1446 /* allocate MPS TCAM entry */
1447 ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
1456 f->fs.val.ovlan = ret;
1457 f->fs.mask.ovlan = 0xffff;
1458 f->fs.val.ovlan_vld = 1;
1459 f->fs.mask.ovlan_vld = 1;
1463 size = sizeof(struct cpl_t6_act_open_req);
1465 ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1);
1469 skb = alloc_skb(size, GFP_KERNEL);
1475 mk_act_open_req6(f, skb,
1476 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1479 skb = alloc_skb(size, GFP_KERNEL);
1485 mk_act_open_req(f, skb,
1486 ((adapter->sge.fw_evtq.abs_id << 14) | atid),
1491 set_wr_txq(skb, CPL_PRIORITY_SETUP, f->fs.val.iport & 0x3);
1492 t4_ofld_send(adapter, skb);
1496 cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
1499 if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
1500 t4_free_encap_mac_filt(adapter, pi->viid, f->fs.val.ovlan, 1);
1503 cxgb4_free_atid(t, atid);
1507 cxgb4_smt_release(f->smt);
1513 cxgb4_l2t_release(f->l2t);
1522 /* Check a Chelsio Filter Request for validity, convert it into our internal
1523 * format and send it to the hardware. Return 0 on success, an error number
1524 * otherwise. We attach any provided filter operation context to the internal
1525 * filter specification in order to facilitate signaling completion of the
1528 int __cxgb4_set_filter(struct net_device *dev, int ftid,
1529 struct ch_filter_specification *fs,
1530 struct filter_ctx *ctx)
1532 struct adapter *adapter = netdev2adap(dev);
1533 unsigned int max_fidx, fidx, chip_ver;
1534 int iq, ret, filter_id = ftid;
1535 struct filter_entry *f, *tab;
1538 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1540 if (is_hashfilter(adapter))
1541 return cxgb4_set_hash_filter(dev, fs, ctx);
1542 netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
1547 max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
1548 if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1549 filter_id >= max_fidx)
1552 fill_default_mask(fs);
1554 ret = validate_filter(dev, fs);
1558 iq = get_filter_steerq(dev, fs);
1563 tab = &adapter->tids.hpftid_tab[0];
1565 tab = &adapter->tids.ftid_tab[0];
1566 filter_id = ftid - adapter->tids.nhpftids;
1569 /* IPv6 filters occupy four slots and must be aligned on
1570 * four-slot boundaries. IPv4 filters only occupy a single
1571 * slot and have no alignment requirements but writing a new
1572 * IPv4 filter into the middle of an existing IPv6 filter
1573 * requires clearing the old IPv6 filter and hence we prevent
1576 if (fs->type == 0) { /* IPv4 */
1577 /* For T6, If our IPv4 filter isn't being written to a
1578 * multiple of two filter index and there's an IPv6
1579 * filter at the multiple of 2 base slot, then we need
1580 * to delete that IPv6 filter ...
1581 * For adapters below T6, IPv6 filter occupies 4 entries.
1582 * Hence we need to delete the filter in multiple of 4 slot.
1584 if (chip_ver < CHELSIO_T6)
1585 fidx = filter_id & ~0x3;
1587 fidx = filter_id & ~0x1;
1589 if (fidx != filter_id && tab[fidx].fs.type) {
1592 dev_err(adapter->pdev_dev,
1593 "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n",
1599 if (chip_ver < CHELSIO_T6) {
1600 /* Ensure that the IPv6 filter is aligned on a
1601 * multiple of 4 boundary.
1603 if (filter_id & 0x3) {
1604 dev_err(adapter->pdev_dev,
1605 "Invalid location. IPv6 must be aligned on a 4-slot boundary\n");
1609 /* Check all except the base overlapping IPv4 filter
1612 for (fidx = filter_id + 1; fidx < filter_id + 4;
1616 dev_err(adapter->pdev_dev,
1617 "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n",
1623 /* For T6, CLIP being enabled, IPv6 filter would occupy
1626 if (filter_id & 0x1)
1628 /* Check overlapping IPv4 filter slot */
1629 fidx = filter_id + 1;
1632 pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n",
1639 /* Check to make sure that provided filter index is not
1640 * already in use by someone else
1642 f = &tab[filter_id];
1647 fidx = filter_id + adapter->tids.hpftid_base;
1648 ret = cxgb4_set_hpftid(&adapter->tids, filter_id,
1649 fs->type ? PF_INET6 : PF_INET);
1651 fidx = filter_id + adapter->tids.ftid_base;
1652 ret = cxgb4_set_ftid(&adapter->tids, filter_id,
1653 fs->type ? PF_INET6 : PF_INET,
1660 /* Check t make sure the filter requested is writable ... */
1661 ret = writable_filter(f);
1665 if (is_t6(adapter->params.chip) && fs->type &&
1666 ipv6_addr_type((const struct in6_addr *)fs->val.lip) !=
1668 ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1);
1673 /* Convert the filter specification into our internal format.
1674 * We copy the PF/VF specification into the Outer VLAN field
1675 * here so the rest of the code -- including the interface to
1676 * the firmware -- doesn't have to constantly do these checks.
1682 iconf = adapter->params.tp.ingress_config;
1683 if (iconf & VNIC_F) {
1684 f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf;
1685 f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
1686 f->fs.val.ovlan_vld = fs->val.pfvf_vld;
1687 f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
1688 } else if (iconf & USE_ENC_IDX_F) {
1689 if (f->fs.val.encap_vld) {
1690 struct port_info *pi = netdev_priv(f->dev);
1691 u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
1693 /* allocate MPS TCAM entry */
1694 ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
1703 f->fs.val.ovlan = ret;
1704 f->fs.mask.ovlan = 0x1ff;
1705 f->fs.val.ovlan_vld = 1;
1706 f->fs.mask.ovlan_vld = 1;
1710 /* Attempt to set the filter. If we don't succeed, we clear
1711 * it and return the failure.
1714 f->tid = fidx; /* Save the actual tid */
1715 ret = set_filter_wr(adapter, ftid);
1723 cxgb4_clear_hpftid(&adapter->tids, filter_id,
1724 fs->type ? PF_INET6 : PF_INET);
1726 cxgb4_clear_ftid(&adapter->tids, filter_id,
1727 fs->type ? PF_INET6 : PF_INET,
1730 clear_filter(adapter, f);
1734 static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
1735 struct filter_ctx *ctx)
1737 struct adapter *adapter = netdev2adap(dev);
1738 struct tid_info *t = &adapter->tids;
1739 struct cpl_abort_req *abort_req;
1740 struct cpl_abort_rpl *abort_rpl;
1741 struct cpl_set_tcb_field *req;
1742 struct ulptx_idata *aligner;
1743 struct work_request_hdr *wr;
1744 struct filter_entry *f;
1745 struct sk_buff *skb;
1749 netdev_dbg(dev, "%s: filter_id = %d ; nftids = %d\n",
1750 __func__, filter_id, adapter->tids.nftids);
1752 if (tid_out_of_range(t, filter_id))
1755 f = lookup_tid(t, filter_id);
1757 netdev_err(dev, "%s: no filter entry for filter_id = %d",
1758 __func__, filter_id);
1762 ret = writable_filter(f);
1771 wrlen = roundup(sizeof(*wr) + (sizeof(*req) + sizeof(*aligner))
1772 + sizeof(*abort_req) + sizeof(*abort_rpl), 16);
1773 skb = alloc_skb(wrlen, GFP_KERNEL);
1775 netdev_err(dev, "%s: could not allocate skb ..\n", __func__);
1778 set_wr_txq(skb, CPL_PRIORITY_CONTROL, f->fs.val.iport & 0x3);
1779 req = (struct cpl_set_tcb_field *)__skb_put(skb, wrlen);
1780 INIT_ULPTX_WR(req, wrlen, 0, 0);
1781 wr = (struct work_request_hdr *)req;
1783 req = (struct cpl_set_tcb_field *)wr;
1784 mk_set_tcb_ulp(f, req, TCB_RSS_INFO_W, TCB_RSS_INFO_V(TCB_RSS_INFO_M),
1785 TCB_RSS_INFO_V(adapter->sge.fw_evtq.abs_id), 0, 1);
1786 aligner = (struct ulptx_idata *)(req + 1);
1787 abort_req = (struct cpl_abort_req *)(aligner + 1);
1788 mk_abort_req_ulp(abort_req, f->tid);
1789 abort_rpl = (struct cpl_abort_rpl *)(abort_req + 1);
1790 mk_abort_rpl_ulp(abort_rpl, f->tid);
1791 t4_ofld_send(adapter, skb);
1795 /* Check a delete filter request for validity and send it to the hardware.
1796 * Return 0 on success, an error number otherwise. We attach any provided
1797 * filter operation context to the internal filter specification in order to
1798 * facilitate signaling completion of the operation.
1800 int __cxgb4_del_filter(struct net_device *dev, int filter_id,
1801 struct ch_filter_specification *fs,
1802 struct filter_ctx *ctx)
1804 struct adapter *adapter = netdev2adap(dev);
1805 unsigned int max_fidx, chip_ver;
1806 struct filter_entry *f;
1809 chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip);
1810 if (fs && fs->hash) {
1811 if (is_hashfilter(adapter))
1812 return cxgb4_del_hash_filter(dev, filter_id, ctx);
1813 netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n",
1818 max_fidx = adapter->tids.nftids + adapter->tids.nhpftids;
1819 if (filter_id != (max_fidx + adapter->tids.nsftids - 1) &&
1820 filter_id >= max_fidx)
1823 if (filter_id < adapter->tids.nhpftids)
1824 f = &adapter->tids.hpftid_tab[filter_id];
1826 f = &adapter->tids.ftid_tab[filter_id - adapter->tids.nhpftids];
1828 ret = writable_filter(f);
1835 cxgb4_clear_hpftid(&adapter->tids,
1836 f->tid - adapter->tids.hpftid_base,
1837 f->fs.type ? PF_INET6 : PF_INET);
1839 cxgb4_clear_ftid(&adapter->tids,
1840 f->tid - adapter->tids.ftid_base,
1841 f->fs.type ? PF_INET6 : PF_INET,
1843 return del_filter_wr(adapter, filter_id);
1846 /* If the caller has passed in a Completion Context then we need to
1847 * mark it as a successful completion so they don't stall waiting
1852 complete(&ctx->completion);
1857 int cxgb4_set_filter(struct net_device *dev, int filter_id,
1858 struct ch_filter_specification *fs)
1860 struct filter_ctx ctx;
1863 init_completion(&ctx.completion);
1865 ret = __cxgb4_set_filter(dev, filter_id, fs, &ctx);
1869 /* Wait for reply */
1870 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
1879 int cxgb4_del_filter(struct net_device *dev, int filter_id,
1880 struct ch_filter_specification *fs)
1882 struct filter_ctx ctx;
1885 if (netdev2adap(dev)->flags & CXGB4_SHUTTING_DOWN)
1888 init_completion(&ctx.completion);
1890 ret = __cxgb4_del_filter(dev, filter_id, fs, &ctx);
1894 /* Wait for reply */
1895 ret = wait_for_completion_timeout(&ctx.completion, 10 * HZ);
1904 static int configure_filter_tcb(struct adapter *adap, unsigned int tid,
1905 struct filter_entry *f)
1907 if (f->fs.hitcnts) {
1908 set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W,
1909 TCB_TIMESTAMP_V(TCB_TIMESTAMP_M),
1910 TCB_TIMESTAMP_V(0ULL),
1912 set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W,
1913 TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M),
1914 TCB_RTT_TS_RECENT_AGE_V(0ULL),
1919 set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1,
1922 if (f->fs.newvlan == VLAN_INSERT ||
1923 f->fs.newvlan == VLAN_REWRITE)
1924 set_tcb_tflag(adap, f, tid, TF_CCTRL_RFR_S, 1,
1927 configure_filter_smac(adap, f);
1929 if (f->fs.nat_mode) {
1930 switch (f->fs.nat_mode) {
1932 set_nat_params(adap, f, tid, true, false, false, false);
1935 case NAT_MODE_DIP_DP:
1936 set_nat_params(adap, f, tid, true, false, true, false);
1939 case NAT_MODE_DIP_DP_SIP:
1940 set_nat_params(adap, f, tid, true, true, true, false);
1942 case NAT_MODE_DIP_DP_SP:
1943 set_nat_params(adap, f, tid, true, false, true, true);
1946 case NAT_MODE_SIP_SP:
1947 set_nat_params(adap, f, tid, false, true, false, true);
1950 case NAT_MODE_DIP_SIP_SP:
1951 set_nat_params(adap, f, tid, true, true, false, true);
1955 set_nat_params(adap, f, tid, true, true, true, true);
1959 pr_err("%s: Invalid NAT mode: %d\n",
1960 __func__, f->fs.nat_mode);
1967 void hash_del_filter_rpl(struct adapter *adap,
1968 const struct cpl_abort_rpl_rss *rpl)
1970 unsigned int status = rpl->status;
1971 struct tid_info *t = &adap->tids;
1972 unsigned int tid = GET_TID(rpl);
1973 struct filter_ctx *ctx = NULL;
1974 struct filter_entry *f;
1976 dev_dbg(adap->pdev_dev, "%s: status = %u; tid = %u\n",
1977 __func__, status, tid);
1979 f = lookup_tid(t, tid);
1981 dev_err(adap->pdev_dev, "%s:could not find filter entry",
1987 clear_filter(adap, f);
1988 cxgb4_remove_tid(t, 0, tid, 0);
1992 complete(&ctx->completion);
1996 void hash_filter_rpl(struct adapter *adap, const struct cpl_act_open_rpl *rpl)
1998 unsigned int ftid = TID_TID_G(AOPEN_ATID_G(ntohl(rpl->atid_status)));
1999 unsigned int status = AOPEN_STATUS_G(ntohl(rpl->atid_status));
2000 struct tid_info *t = &adap->tids;
2001 unsigned int tid = GET_TID(rpl);
2002 struct filter_ctx *ctx = NULL;
2003 struct filter_entry *f;
2005 dev_dbg(adap->pdev_dev, "%s: tid = %u; atid = %u; status = %u\n",
2006 __func__, tid, ftid, status);
2008 f = lookup_atid(t, ftid);
2010 dev_err(adap->pdev_dev, "%s:could not find filter entry",
2022 cxgb4_insert_tid(t, f, f->tid, 0);
2023 cxgb4_free_atid(t, ftid);
2028 if (configure_filter_tcb(adap, tid, f)) {
2029 clear_filter(adap, f);
2030 cxgb4_remove_tid(t, 0, tid, 0);
2033 ctx->result = -EINVAL;
2034 complete(&ctx->completion);
2038 switch (f->fs.action) {
2041 set_tcb_tflag(adap, f, tid,
2042 TF_DIRECT_STEER_S, 1, 1);
2045 set_tcb_tflag(adap, f, tid, TF_DROP_S, 1, 1);
2048 set_tcb_tflag(adap, f, tid, TF_LPBK_S, 1, 1);
2055 if (status != CPL_ERR_TCAM_FULL)
2056 dev_err(adap->pdev_dev, "%s: filter creation PROBLEM; status = %u\n",
2060 if (status == CPL_ERR_TCAM_FULL)
2061 ctx->result = -ENOSPC;
2063 ctx->result = -EINVAL;
2065 clear_filter(adap, f);
2066 cxgb4_free_atid(t, ftid);
2070 complete(&ctx->completion);
2073 /* Handle a filter write/deletion reply. */
2074 void filter_rpl(struct adapter *adap, const struct cpl_set_tcb_rpl *rpl)
2076 unsigned int tid = GET_TID(rpl);
2077 struct filter_entry *f = NULL;
2078 unsigned int max_fidx;
2081 max_fidx = adap->tids.nftids + adap->tids.nsftids;
2082 /* Get the corresponding filter entry for this tid */
2083 if (adap->tids.ftid_tab) {
2084 idx = tid - adap->tids.hpftid_base;
2085 if (idx < adap->tids.nhpftids) {
2086 f = &adap->tids.hpftid_tab[idx];
2088 /* Check this in normal filter region */
2089 idx = tid - adap->tids.ftid_base;
2090 if (idx >= max_fidx)
2092 f = &adap->tids.ftid_tab[idx];
2093 idx += adap->tids.nhpftids;
2100 /* We found the filter entry for this tid */
2102 unsigned int ret = TCB_COOKIE_G(rpl->cookie);
2103 struct filter_ctx *ctx;
2105 /* Pull off any filter operation context attached to the
2111 if (ret == FW_FILTER_WR_FLT_DELETED) {
2112 /* Clear the filter when we get confirmation from the
2113 * hardware that the filter has been deleted.
2115 clear_filter(adap, f);
2118 } else if (ret == FW_FILTER_WR_FLT_ADDED) {
2119 f->pending = 0; /* async setup completed */
2126 /* Something went wrong. Issue a warning about the
2127 * problem and clear everything out.
2129 dev_err(adap->pdev_dev, "filter %u setup failed with error %u\n",
2131 clear_filter(adap, f);
2133 ctx->result = -EINVAL;
2136 complete(&ctx->completion);
2140 void init_hash_filter(struct adapter *adap)
2144 /* On T6, verify the necessary register configs and warn the user in
2145 * case of improper config
2147 if (is_t6(adap->params.chip)) {
2148 if (is_offload(adap)) {
2149 if (!(t4_read_reg(adap, TP_GLOBAL_CONFIG_A)
2150 & ACTIVEFILTERCOUNTS_F)) {
2151 dev_err(adap->pdev_dev, "Invalid hash filter + ofld config\n");
2155 reg = t4_read_reg(adap, LE_DB_RSP_CODE_0_A);
2156 if (TCAM_ACTV_HIT_G(reg) != 4) {
2157 dev_err(adap->pdev_dev, "Invalid hash filter config\n");
2161 reg = t4_read_reg(adap, LE_DB_RSP_CODE_1_A);
2162 if (HASH_ACTV_HIT_G(reg) != 4) {
2163 dev_err(adap->pdev_dev, "Invalid hash filter config\n");
2169 dev_err(adap->pdev_dev, "Hash filter supported only on T6\n");
2173 adap->params.hash_filter = 1;