cxgb4: add tc flower match support for tunnel VNI
authorKumar Sanghvi <kumaras@chelsio.com>
Mon, 14 May 2018 12:21:21 +0000 (17:51 +0530)
committerDavid S. Miller <davem@davemloft.net>
Tue, 15 May 2018 02:50:15 +0000 (22:50 -0400)
Adds support for matching flows based on tunnel VNI value.
Introduces fw APIs for allocating/removing MPS entries related
to encapsulation. And uses the same while adding/deleting filters
for offloading flows based on tunnel VNI match.

Signed-off-by: Kumar Sanghvi <kumaras@chelsio.com>
Signed-off-by: Ganesh Goudar <ganeshgr@chelsio.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h

index 01e7aad..211086b 100644 (file)
@@ -1038,6 +1038,7 @@ struct ch_sched_queue {
 #define VF_BITWIDTH 8
 #define IVLAN_BITWIDTH 16
 #define OVLAN_BITWIDTH 16
+#define ENCAP_VNI_BITWIDTH 24
 
 /* Filter matching rules.  These consist of a set of ingress packet field
  * (value, mask) tuples.  The associated ingress packet field matches the
@@ -1068,6 +1069,7 @@ struct ch_filter_tuple {
        uint32_t ivlan_vld:1;                   /* inner VLAN valid */
        uint32_t ovlan_vld:1;                   /* outer VLAN valid */
        uint32_t pfvf_vld:1;                    /* PF/VF valid */
+       uint32_t encap_vld:1;                   /* Encapsulation valid */
        uint32_t macidx:MACIDX_BITWIDTH;        /* exact match MAC index */
        uint32_t fcoe:FCOE_BITWIDTH;            /* FCoE packet */
        uint32_t iport:IPORT_BITWIDTH;          /* ingress port */
@@ -1078,6 +1080,7 @@ struct ch_filter_tuple {
        uint32_t vf:VF_BITWIDTH;                /* PCI-E VF ID */
        uint32_t ivlan:IVLAN_BITWIDTH;          /* inner VLAN */
        uint32_t ovlan:OVLAN_BITWIDTH;          /* outer VLAN */
+       uint32_t vni:ENCAP_VNI_BITWIDTH;        /* VNI of tunnel */
 
        /* Uncompressed header matching field rules.  These are always
         * available for field rules.
@@ -1694,6 +1697,12 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
 int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
                         const u8 *addr, const u8 *mask, unsigned int idx,
                         u8 lookup_type, u8 port_id, bool sleep_ok);
+int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid, int idx,
+                          bool sleep_ok);
+int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
+                           const u8 *addr, const u8 *mask, unsigned int vni,
+                           unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
+                           bool sleep_ok);
 int t4_alloc_raw_mac_filt(struct adapter *adap, unsigned int viid,
                          const u8 *addr, const u8 *mask, unsigned int idx,
                          u8 lookup_type, u8 port_id, bool sleep_ok);
index aae9802..ac4d7f7 100644 (file)
@@ -265,6 +265,8 @@ static int validate_filter(struct net_device *dev,
                        fs->mask.pfvf_vld) ||
            unsupported(fconf, VNIC_ID_F, fs->val.ovlan_vld,
                        fs->mask.ovlan_vld) ||
+           unsupported(fconf, VNIC_ID_F, fs->val.encap_vld,
+                       fs->mask.encap_vld) ||
            unsupported(fconf, VLAN_F, fs->val.ivlan_vld, fs->mask.ivlan_vld))
                return -EOPNOTSUPP;
 
@@ -275,8 +277,12 @@ static int validate_filter(struct net_device *dev,
         * carries that overlap, we need to translate any PF/VF
         * specification into that internal format below.
         */
-       if (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
-           is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld))
+       if ((is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
+            is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld)) ||
+           (is_field_set(fs->val.pfvf_vld, fs->mask.pfvf_vld) &&
+            is_field_set(fs->val.encap_vld, fs->mask.encap_vld)) ||
+           (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
+            is_field_set(fs->val.encap_vld, fs->mask.encap_vld)))
                return -EOPNOTSUPP;
        if (unsupported(iconf, VNIC_F, fs->val.pfvf_vld, fs->mask.pfvf_vld) ||
            (is_field_set(fs->val.ovlan_vld, fs->mask.ovlan_vld) &&
@@ -306,6 +312,9 @@ static int validate_filter(struct net_device *dev,
             fs->newvlan == VLAN_REWRITE))
                return -EOPNOTSUPP;
 
+       if (fs->val.encap_vld &&
+           CHELSIO_CHIP_VERSION(adapter->params.chip) < CHELSIO_T6)
+               return -EOPNOTSUPP;
        return 0;
 }
 
@@ -705,6 +714,8 @@ int delete_filter(struct adapter *adapter, unsigned int fidx)
  */
 void clear_filter(struct adapter *adap, struct filter_entry *f)
 {
+       struct port_info *pi = netdev_priv(f->dev);
+
        /* If the new or old filter have loopback rewriteing rules then we'll
         * need to free any existing L2T, SMT, CLIP entries of filter
         * rule.
@@ -715,6 +726,12 @@ void clear_filter(struct adapter *adap, struct filter_entry *f)
        if (f->smt)
                cxgb4_smt_release(f->smt);
 
+       if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
+               if (atomic_dec_and_test(&adap->mps_encap[f->fs.val.ovlan &
+                                                        0x1ff].refcnt))
+                       t4_free_encap_mac_filt(adap, pi->viid,
+                                              f->fs.val.ovlan & 0x1ff, 0);
+
        if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type)
                cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
 
@@ -840,6 +857,10 @@ bool is_filter_exact_match(struct adapter *adap,
        if (!is_hashfilter(adap))
                return false;
 
+        /* Keep tunnel VNI match disabled for hash-filters for now */
+       if (fs->mask.encap_vld)
+               return false;
+
        if (fs->type) {
                if (is_inaddr_any(fs->val.fip, AF_INET6) ||
                    !is_addr_all_mask(fs->mask.fip, AF_INET6))
@@ -961,8 +982,12 @@ static u64 hash_filter_ntuple(struct ch_filter_specification *fs,
                ntuple |= (u64)(fs->val.tos) << tp->tos_shift;
 
        if (tp->vnic_shift >= 0) {
-               if ((adap->params.tp.ingress_config & VNIC_F) &&
-                   fs->mask.pfvf_vld)
+               if ((adap->params.tp.ingress_config & USE_ENC_IDX_F) &&
+                   fs->mask.encap_vld)
+                       ntuple |= (u64)((fs->val.encap_vld << 16) |
+                                       (fs->val.ovlan)) << tp->vnic_shift;
+               else if ((adap->params.tp.ingress_config & VNIC_F) &&
+                        fs->mask.pfvf_vld)
                        ntuple |= (u64)((fs->val.pfvf_vld << 16) |
                                        (fs->val.pf << 13) |
                                        (fs->val.vf)) << tp->vnic_shift;
@@ -1076,6 +1101,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
                                 struct filter_ctx *ctx)
 {
        struct adapter *adapter = netdev2adap(dev);
+       struct port_info *pi = netdev_priv(dev);
        struct tid_info *t = &adapter->tids;
        struct filter_entry *f;
        struct sk_buff *skb;
@@ -1142,13 +1168,34 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
                f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
                f->fs.val.ovlan_vld = fs->val.pfvf_vld;
                f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+       } else if (iconf & USE_ENC_IDX_F) {
+               if (f->fs.val.encap_vld) {
+                       struct port_info *pi = netdev_priv(f->dev);
+                       u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+
+                       /* allocate MPS TCAM entry */
+                       ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
+                                                     match_all_mac,
+                                                     match_all_mac,
+                                                     f->fs.val.vni,
+                                                     f->fs.mask.vni,
+                                                     0, 1, 1);
+                       if (ret < 0)
+                               goto free_atid;
+
+                       atomic_inc(&adapter->mps_encap[ret].refcnt);
+                       f->fs.val.ovlan = ret;
+                       f->fs.mask.ovlan = 0xffff;
+                       f->fs.val.ovlan_vld = 1;
+                       f->fs.mask.ovlan_vld = 1;
+               }
        }
 
        size = sizeof(struct cpl_t6_act_open_req);
        if (f->fs.type) {
                ret = cxgb4_clip_get(f->dev, (const u32 *)&f->fs.val.lip, 1);
                if (ret)
-                       goto free_atid;
+                       goto free_mps;
 
                skb = alloc_skb(size, GFP_KERNEL);
                if (!skb) {
@@ -1163,7 +1210,7 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
                skb = alloc_skb(size, GFP_KERNEL);
                if (!skb) {
                        ret = -ENOMEM;
-                       goto free_atid;
+                       goto free_mps;
                }
 
                mk_act_open_req(f, skb,
@@ -1179,6 +1226,10 @@ static int cxgb4_set_hash_filter(struct net_device *dev,
 free_clip:
        cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
 
+free_mps:
+       if (f->fs.val.encap_vld && f->fs.val.ovlan_vld)
+               t4_free_encap_mac_filt(adapter, pi->viid, f->fs.val.ovlan, 1);
+
 free_atid:
        cxgb4_free_atid(t, atid);
 
@@ -1360,6 +1411,27 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
                f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf;
                f->fs.val.ovlan_vld = fs->val.pfvf_vld;
                f->fs.mask.ovlan_vld = fs->mask.pfvf_vld;
+       } else if (iconf & USE_ENC_IDX_F) {
+               if (f->fs.val.encap_vld) {
+                       struct port_info *pi = netdev_priv(f->dev);
+                       u8 match_all_mac[] = { 0, 0, 0, 0, 0, 0 };
+
+                       /* allocate MPS TCAM entry */
+                       ret = t4_alloc_encap_mac_filt(adapter, pi->viid,
+                                                     match_all_mac,
+                                                     match_all_mac,
+                                                     f->fs.val.vni,
+                                                     f->fs.mask.vni,
+                                                     0, 1, 1);
+                       if (ret < 0)
+                               goto free_clip;
+
+                       atomic_inc(&adapter->mps_encap[ret].refcnt);
+                       f->fs.val.ovlan = ret;
+                       f->fs.mask.ovlan = 0x1ff;
+                       f->fs.val.ovlan_vld = 1;
+                       f->fs.mask.ovlan_vld = 1;
+               }
        }
 
        /* Attempt to set the filter.  If we don't succeed, we clear
@@ -1376,6 +1448,13 @@ int __cxgb4_set_filter(struct net_device *dev, int filter_id,
        }
 
        return ret;
+
+free_clip:
+       if (is_t6(adapter->params.chip) && f->fs.type)
+               cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1);
+       cxgb4_clear_ftid(&adapter->tids, filter_id,
+                        fs->type ? PF_INET6 : PF_INET, chip_ver);
+       return ret;
 }
 
 static int cxgb4_del_hash_filter(struct net_device *dev, int filter_id,
index 3656336..3ddd2c4 100644 (file)
@@ -194,6 +194,23 @@ static void cxgb4_process_flow_match(struct net_device *dev,
                fs->mask.tos = mask->tos;
        }
 
+       if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
+               struct flow_dissector_key_keyid *key, *mask;
+
+               key = skb_flow_dissector_target(cls->dissector,
+                                               FLOW_DISSECTOR_KEY_ENC_KEYID,
+                                               cls->key);
+               mask = skb_flow_dissector_target(cls->dissector,
+                                                FLOW_DISSECTOR_KEY_ENC_KEYID,
+                                                cls->mask);
+               fs->val.vni = be32_to_cpu(key->keyid);
+               fs->mask.vni = be32_to_cpu(mask->keyid);
+               if (fs->mask.vni) {
+                       fs->val.encap_vld = 1;
+                       fs->mask.encap_vld = 1;
+               }
+       }
+
        if (dissector_uses_key(cls->dissector, FLOW_DISSECTOR_KEY_VLAN)) {
                struct flow_dissector_key_vlan *key, *mask;
                u16 vlan_tci, vlan_tci_mask;
@@ -247,6 +264,7 @@ static int cxgb4_validate_flow_match(struct net_device *dev,
              BIT(FLOW_DISSECTOR_KEY_IPV4_ADDRS) |
              BIT(FLOW_DISSECTOR_KEY_IPV6_ADDRS) |
              BIT(FLOW_DISSECTOR_KEY_PORTS) |
+             BIT(FLOW_DISSECTOR_KEY_ENC_KEYID) |
              BIT(FLOW_DISSECTOR_KEY_VLAN) |
              BIT(FLOW_DISSECTOR_KEY_IP))) {
                netdev_warn(dev, "Unsupported key used: 0x%x\n",
index 1817a03..77c2c53 100644 (file)
@@ -491,7 +491,7 @@ u64 cxgb4_select_ntuple(struct net_device *dev,
        if (tp->protocol_shift >= 0)
                ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
 
-       if (tp->vnic_shift >= 0) {
+       if (tp->vnic_shift >= 0 && (tp->ingress_config & VNIC_F)) {
                u32 viid = cxgb4_port_viid(dev);
                u32 vf = FW_VIID_VIN_G(viid);
                u32 pf = FW_VIID_PFN_G(viid);
index 7cb3ef4..df5e7c7 100644 (file)
@@ -7512,6 +7512,43 @@ int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
        return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
 }
 
+/**
+ *      t4_free_encap_mac_filt - frees MPS entry at given index
+ *      @adap: the adapter
+ *      @viid: the VI id
+ *      @idx: index of MPS entry to be freed
+ *      @sleep_ok: call is allowed to sleep
+ *
+ *      Frees the MPS entry at supplied index
+ *
+ *      Returns a negative error number or zero on success
+ */
+int t4_free_encap_mac_filt(struct adapter *adap, unsigned int viid,
+                          int idx, bool sleep_ok)
+{
+       struct fw_vi_mac_exact *p;
+       u8 addr[] = {0, 0, 0, 0, 0, 0};
+       struct fw_vi_mac_cmd c;
+       int ret = 0;
+       u32 exact;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+                                  FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+                                  FW_CMD_EXEC_V(0) |
+                                  FW_VI_MAC_CMD_VIID_V(viid));
+       exact = FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC);
+       c.freemacs_to_len16 = cpu_to_be32(FW_VI_MAC_CMD_FREEMACS_V(0) |
+                                         exact |
+                                         FW_CMD_LEN16_V(1));
+       p = c.u.exact;
+       p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+                                     FW_VI_MAC_CMD_IDX_V(idx));
+       memcpy(p->macaddr, addr, sizeof(p->macaddr));
+       ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+       return ret;
+}
+
 /**
  *     t4_free_raw_mac_filt - Frees a raw mac entry in mps tcam
  *     @adap: the adapter
@@ -7562,6 +7599,55 @@ int t4_free_raw_mac_filt(struct adapter *adap, unsigned int viid,
        return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
 }
 
+/**
+ *      t4_alloc_encap_mac_filt - Adds a mac entry in mps tcam with VNI support
+ *      @adap: the adapter
+ *      @viid: the VI id
+ *      @mac: the MAC address
+ *      @mask: the mask
+ *      @vni: the VNI id for the tunnel protocol
+ *      @vni_mask: mask for the VNI id
+ *      @dip_hit: to enable DIP match for the MPS entry
+ *      @lookup_type: MAC address for inner (1) or outer (0) header
+ *      @sleep_ok: call is allowed to sleep
+ *
+ *      Allocates an MPS entry with specified MAC address and VNI value.
+ *
+ *      Returns a negative error number or the allocated index for this mac.
+ */
+int t4_alloc_encap_mac_filt(struct adapter *adap, unsigned int viid,
+                           const u8 *addr, const u8 *mask, unsigned int vni,
+                           unsigned int vni_mask, u8 dip_hit, u8 lookup_type,
+                           bool sleep_ok)
+{
+       struct fw_vi_mac_cmd c;
+       struct fw_vi_mac_vni *p = c.u.exact_vni;
+       int ret = 0;
+       u32 val;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_viid = cpu_to_be32(FW_CMD_OP_V(FW_VI_MAC_CMD) |
+                                  FW_CMD_REQUEST_F | FW_CMD_WRITE_F |
+                                  FW_VI_MAC_CMD_VIID_V(viid));
+       val = FW_CMD_LEN16_V(1) |
+             FW_VI_MAC_CMD_ENTRY_TYPE_V(FW_VI_MAC_TYPE_EXACTMAC_VNI);
+       c.freemacs_to_len16 = cpu_to_be32(val);
+       p->valid_to_idx = cpu_to_be16(FW_VI_MAC_CMD_VALID_F |
+                                     FW_VI_MAC_CMD_IDX_V(FW_VI_MAC_ADD_MAC));
+       memcpy(p->macaddr, addr, sizeof(p->macaddr));
+       memcpy(p->macaddr_mask, mask, sizeof(p->macaddr_mask));
+
+       p->lookup_type_to_vni =
+               cpu_to_be32(FW_VI_MAC_CMD_VNI_V(vni) |
+                           FW_VI_MAC_CMD_DIP_HIT_V(dip_hit) |
+                           FW_VI_MAC_CMD_LOOKUP_TYPE_V(lookup_type));
+       p->vni_mask_pkd = cpu_to_be32(FW_VI_MAC_CMD_VNI_MASK_V(vni_mask));
+       ret = t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, sleep_ok);
+       if (ret == 0)
+               ret = FW_VI_MAC_CMD_IDX_G(be16_to_cpu(p->valid_to_idx));
+       return ret;
+}
+
 /**
  *     t4_alloc_raw_mac_filt - Adds a mac entry in mps tcam
  *     @adap: the adapter
index 276fdf2..843f8ca 100644 (file)
 #define VNIC_V(x) ((x) << VNIC_S)
 #define VNIC_F    VNIC_V(1U)
 
+#define USE_ENC_IDX_S          13
+#define USE_ENC_IDX_V(x)       ((x) << USE_ENC_IDX_S)
+#define USE_ENC_IDX_F          USE_ENC_IDX_V(1U)
+
 #define CSUM_HAS_PSEUDO_HDR_S    10
 #define CSUM_HAS_PSEUDO_HDR_V(x) ((x) << CSUM_HAS_PSEUDO_HDR_S)
 #define CSUM_HAS_PSEUDO_HDR_F    CSUM_HAS_PSEUDO_HDR_V(1U)
index 0e007ee..e6b2e95 100644 (file)
@@ -2158,6 +2158,14 @@ struct fw_vi_mac_cmd {
                        __be64 data0m_pkd;
                        __be32 data1m[2];
                } raw;
+               struct fw_vi_mac_vni {
+                       __be16 valid_to_idx;
+                       __u8 macaddr[6];
+                       __be16 r7;
+                       __u8 macaddr_mask[6];
+                       __be32 lookup_type_to_vni;
+                       __be32 vni_mask_pkd;
+               } exact_vni[2];
        } u;
 };
 
@@ -2205,6 +2213,32 @@ struct fw_vi_mac_cmd {
 #define FW_VI_MAC_CMD_RAW_IDX_G(x)      \
        (((x) >> FW_VI_MAC_CMD_RAW_IDX_S) & FW_VI_MAC_CMD_RAW_IDX_M)
 
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_S    31
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_M    0x1
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_V(x) ((x) << FW_VI_MAC_CMD_LOOKUP_TYPE_S)
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_G(x) \
+       (((x) >> FW_VI_MAC_CMD_LOOKUP_TYPE_S) & FW_VI_MAC_CMD_LOOKUP_TYPE_M)
+#define FW_VI_MAC_CMD_LOOKUP_TYPE_F    FW_VI_MAC_CMD_LOOKUP_TYPE_V(1U)
+
+#define FW_VI_MAC_CMD_DIP_HIT_S                30
+#define FW_VI_MAC_CMD_DIP_HIT_M                0x1
+#define FW_VI_MAC_CMD_DIP_HIT_V(x)     ((x) << FW_VI_MAC_CMD_DIP_HIT_S)
+#define FW_VI_MAC_CMD_DIP_HIT_G(x)     \
+       (((x) >> FW_VI_MAC_CMD_DIP_HIT_S) & FW_VI_MAC_CMD_DIP_HIT_M)
+#define FW_VI_MAC_CMD_DIP_HIT_F                FW_VI_MAC_CMD_DIP_HIT_V(1U)
+
+#define FW_VI_MAC_CMD_VNI_S            0
+#define FW_VI_MAC_CMD_VNI_M            0xffffff
+#define FW_VI_MAC_CMD_VNI_V(x)         ((x) << FW_VI_MAC_CMD_VNI_S)
+#define FW_VI_MAC_CMD_VNI_G(x)         \
+       (((x) >> FW_VI_MAC_CMD_VNI_S) & FW_VI_MAC_CMD_VNI_M)
+
+#define FW_VI_MAC_CMD_VNI_MASK_S       0
+#define FW_VI_MAC_CMD_VNI_MASK_M       0xffffff
+#define FW_VI_MAC_CMD_VNI_MASK_V(x)    ((x) << FW_VI_MAC_CMD_VNI_MASK_S)
+#define FW_VI_MAC_CMD_VNI_MASK_G(x)    \
+       (((x) >> FW_VI_MAC_CMD_VNI_MASK_S) & FW_VI_MAC_CMD_VNI_MASK_M)
+
 #define FW_RXMODE_MTU_NO_CHG   65535
 
 struct fw_vi_rxmode_cmd {