1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physcial Function ethernet driver
4 * Copyright (C) 2020 Marvell.
9 #include "otx2_common.h"
11 #define OTX2_DEFAULT_ACTION 0x1
14 struct ethtool_rx_flow_spec flow_spec;
15 struct list_head list;
22 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
24 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
25 struct npc_mcam_alloc_entry_req *req;
26 struct npc_mcam_alloc_entry_rsp *rsp;
27 int vf_vlan_max_flows;
30 mutex_lock(&pfvf->mbox.lock);
32 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
34 mutex_unlock(&pfvf->mbox.lock);
38 vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
40 req->count = OTX2_MCAM_COUNT + vf_vlan_max_flows;
42 /* Send message to AF */
43 if (otx2_sync_mbox_msg(&pfvf->mbox)) {
44 mutex_unlock(&pfvf->mbox.lock);
48 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
49 (&pfvf->mbox.mbox, 0, &req->hdr);
51 if (rsp->count != req->count) {
52 netdev_info(pfvf->netdev,
53 "Unable to allocate %d MCAM entries, got %d\n",
54 req->count, rsp->count);
55 /* support only ntuples here */
56 flow_cfg->ntuple_max_flows = rsp->count;
57 flow_cfg->ntuple_offset = 0;
58 pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
60 flow_cfg->vf_vlan_offset = 0;
61 flow_cfg->ntuple_offset = flow_cfg->vf_vlan_offset +
63 flow_cfg->unicast_offset = flow_cfg->ntuple_offset +
64 OTX2_MAX_NTUPLE_FLOWS;
65 flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
66 OTX2_MAX_UNICAST_FLOWS;
67 pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
68 pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
69 pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
70 pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
73 for (i = 0; i < rsp->count; i++)
74 flow_cfg->entry[i] = rsp->entry_list[i];
76 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
78 mutex_unlock(&pfvf->mbox.lock);
83 int otx2_mcam_flow_init(struct otx2_nic *pf)
87 pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
92 INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
94 pf->flow_cfg->ntuple_max_flows = OTX2_MAX_NTUPLE_FLOWS;
96 err = otx2_alloc_mcam_entries(pf);
100 pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
101 * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
108 void otx2_mcam_flow_del(struct otx2_nic *pf)
110 otx2_destroy_mcam_flows(pf);
113 /* On success adds mcam entry
114 * On failure enable promisous mode
116 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
118 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
119 struct npc_install_flow_req *req;
122 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
125 /* dont have free mcam entries or uc list is greater than alloted */
126 if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
129 mutex_lock(&pf->mbox.lock);
130 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
132 mutex_unlock(&pf->mbox.lock);
136 /* unicast offset starts with 32 0..31 for ntuple */
137 for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
138 if (pf->mac_table[i].inuse)
140 ether_addr_copy(pf->mac_table[i].addr, mac);
141 pf->mac_table[i].inuse = true;
142 pf->mac_table[i].mcam_entry =
143 flow_cfg->entry[i + flow_cfg->unicast_offset];
144 req->entry = pf->mac_table[i].mcam_entry;
148 ether_addr_copy(req->packet.dmac, mac);
149 eth_broadcast_addr((u8 *)&req->mask.dmac);
150 req->features = BIT_ULL(NPC_DMAC);
151 req->channel = pf->hw.rx_chan_base;
152 req->intf = NIX_INTF_RX;
153 req->op = NIX_RX_ACTION_DEFAULT;
156 err = otx2_sync_mbox_msg(&pf->mbox);
157 mutex_unlock(&pf->mbox.lock);
162 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
164 struct otx2_nic *pf = netdev_priv(netdev);
166 return otx2_do_add_macfilter(pf, mac);
169 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
174 for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
175 if (!pf->mac_table[i].inuse)
178 if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
179 *mcam_entry = pf->mac_table[i].mcam_entry;
180 pf->mac_table[i].inuse = false;
187 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
189 struct otx2_nic *pf = netdev_priv(netdev);
190 struct npc_delete_flow_req *req;
193 /* check does mcam entry exists for given mac */
194 if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
197 mutex_lock(&pf->mbox.lock);
198 req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
200 mutex_unlock(&pf->mbox.lock);
203 req->entry = mcam_entry;
204 /* Send message to AF */
205 err = otx2_sync_mbox_msg(&pf->mbox);
206 mutex_unlock(&pf->mbox.lock);
211 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
213 struct otx2_flow *iter;
215 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
216 if (iter->location == location)
223 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
225 struct list_head *head = &pfvf->flow_cfg->flow_list;
226 struct otx2_flow *iter;
228 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
229 if (iter->location > flow->location)
234 list_add(&flow->list, head);
237 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
240 struct otx2_flow *iter;
242 if (location >= pfvf->flow_cfg->ntuple_max_flows)
245 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
246 if (iter->location == location) {
247 nfc->fs = iter->flow_spec;
255 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
262 nfc->data = pfvf->flow_cfg->ntuple_max_flows;
263 while ((!err || err == -ENOENT) && idx < nfc->rule_cnt) {
264 err = otx2_get_flow(pfvf, nfc, location);
266 rule_locs[idx++] = location;
273 static void otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
274 struct npc_install_flow_req *req,
277 struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
278 struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
279 struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
280 struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
281 struct flow_msg *pmask = &req->mask;
282 struct flow_msg *pkt = &req->packet;
286 if (ipv4_usr_mask->ip4src) {
287 memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
288 sizeof(pkt->ip4src));
289 memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
290 sizeof(pmask->ip4src));
291 req->features |= BIT_ULL(NPC_SIP_IPV4);
293 if (ipv4_usr_mask->ip4dst) {
294 memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
295 sizeof(pkt->ip4dst));
296 memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
297 sizeof(pmask->ip4dst));
298 req->features |= BIT_ULL(NPC_DIP_IPV4);
304 if (ipv4_l4_mask->ip4src) {
305 memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
306 sizeof(pkt->ip4src));
307 memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
308 sizeof(pmask->ip4src));
309 req->features |= BIT_ULL(NPC_SIP_IPV4);
311 if (ipv4_l4_mask->ip4dst) {
312 memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
313 sizeof(pkt->ip4dst));
314 memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
315 sizeof(pmask->ip4dst));
316 req->features |= BIT_ULL(NPC_DIP_IPV4);
318 if (ipv4_l4_mask->psrc) {
319 memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
321 memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
322 sizeof(pmask->sport));
323 if (flow_type == UDP_V4_FLOW)
324 req->features |= BIT_ULL(NPC_SPORT_UDP);
325 else if (flow_type == TCP_V4_FLOW)
326 req->features |= BIT_ULL(NPC_SPORT_TCP);
328 req->features |= BIT_ULL(NPC_SPORT_SCTP);
330 if (ipv4_l4_mask->pdst) {
331 memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
333 memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
334 sizeof(pmask->dport));
335 if (flow_type == UDP_V4_FLOW)
336 req->features |= BIT_ULL(NPC_DPORT_UDP);
337 else if (flow_type == TCP_V4_FLOW)
338 req->features |= BIT_ULL(NPC_DPORT_TCP);
340 req->features |= BIT_ULL(NPC_DPORT_SCTP);
348 static void otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
349 struct npc_install_flow_req *req,
352 struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
353 struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
354 struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
355 struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
356 struct flow_msg *pmask = &req->mask;
357 struct flow_msg *pkt = &req->packet;
361 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
362 memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
363 sizeof(pkt->ip6src));
364 memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
365 sizeof(pmask->ip6src));
366 req->features |= BIT_ULL(NPC_SIP_IPV6);
368 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
369 memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
370 sizeof(pkt->ip6dst));
371 memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
372 sizeof(pmask->ip6dst));
373 req->features |= BIT_ULL(NPC_DIP_IPV6);
379 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
380 memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
381 sizeof(pkt->ip6src));
382 memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
383 sizeof(pmask->ip6src));
384 req->features |= BIT_ULL(NPC_SIP_IPV6);
386 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
387 memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
388 sizeof(pkt->ip6dst));
389 memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
390 sizeof(pmask->ip6dst));
391 req->features |= BIT_ULL(NPC_DIP_IPV6);
393 if (ipv6_l4_mask->psrc) {
394 memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
396 memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
397 sizeof(pmask->sport));
398 if (flow_type == UDP_V6_FLOW)
399 req->features |= BIT_ULL(NPC_SPORT_UDP);
400 else if (flow_type == TCP_V6_FLOW)
401 req->features |= BIT_ULL(NPC_SPORT_TCP);
403 req->features |= BIT_ULL(NPC_SPORT_SCTP);
405 if (ipv6_l4_mask->pdst) {
406 memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
408 memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
409 sizeof(pmask->dport));
410 if (flow_type == UDP_V6_FLOW)
411 req->features |= BIT_ULL(NPC_DPORT_UDP);
412 else if (flow_type == TCP_V6_FLOW)
413 req->features |= BIT_ULL(NPC_DPORT_TCP);
415 req->features |= BIT_ULL(NPC_DPORT_SCTP);
423 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
424 struct npc_install_flow_req *req)
426 struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
427 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
428 struct flow_msg *pmask = &req->mask;
429 struct flow_msg *pkt = &req->packet;
432 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT);
434 /* bits not set in mask are don't care */
436 if (!is_zero_ether_addr(eth_mask->h_source)) {
437 ether_addr_copy(pkt->smac, eth_hdr->h_source);
438 ether_addr_copy(pmask->smac, eth_mask->h_source);
439 req->features |= BIT_ULL(NPC_SMAC);
441 if (!is_zero_ether_addr(eth_mask->h_dest)) {
442 ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
443 ether_addr_copy(pmask->dmac, eth_mask->h_dest);
444 req->features |= BIT_ULL(NPC_DMAC);
446 if (eth_mask->h_proto) {
447 memcpy(&pkt->etype, ð_hdr->h_proto,
449 memcpy(&pmask->etype, ð_mask->h_proto,
450 sizeof(pmask->etype));
451 req->features |= BIT_ULL(NPC_ETYPE);
458 otx2_prepare_ipv4_flow(fsp, req, flow_type);
464 otx2_prepare_ipv6_flow(fsp, req, flow_type);
469 if (fsp->flow_type & FLOW_EXT) {
470 if (fsp->m_ext.vlan_etype)
472 if (fsp->m_ext.vlan_tci) {
473 if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
475 if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
478 memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
479 sizeof(pkt->vlan_tci));
480 memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
481 sizeof(pmask->vlan_tci));
482 req->features |= BIT_ULL(NPC_OUTER_VID);
485 /* Not Drop/Direct to queue but use action in default entry */
486 if (fsp->m_ext.data[1] &&
487 fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
488 req->op = NIX_RX_ACTION_DEFAULT;
491 if (fsp->flow_type & FLOW_MAC_EXT &&
492 !is_zero_ether_addr(fsp->m_ext.h_dest)) {
493 ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
494 ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
495 req->features |= BIT_ULL(NPC_DMAC);
504 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
506 u64 ring_cookie = flow->flow_spec.ring_cookie;
507 struct npc_install_flow_req *req;
510 mutex_lock(&pfvf->mbox.lock);
511 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
513 mutex_unlock(&pfvf->mbox.lock);
517 err = otx2_prepare_flow_request(&flow->flow_spec, req);
519 /* free the allocated msg above */
520 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
521 mutex_unlock(&pfvf->mbox.lock);
525 req->entry = flow->entry;
526 req->intf = NIX_INTF_RX;
528 req->channel = pfvf->hw.rx_chan_base;
529 if (ring_cookie == RX_CLS_FLOW_DISC) {
530 req->op = NIX_RX_ACTIONOP_DROP;
532 /* change to unicast only if action of default entry is not
535 if (req->op != NIX_RX_ACTION_DEFAULT)
536 req->op = NIX_RX_ACTIONOP_UCAST;
537 req->index = ethtool_get_flow_spec_ring(ring_cookie);
538 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
539 if (vf > pci_num_vf(pfvf->pdev)) {
540 mutex_unlock(&pfvf->mbox.lock);
545 /* ethtool ring_cookie has (VF + 1) for VF */
552 /* Send message to AF */
553 err = otx2_sync_mbox_msg(&pfvf->mbox);
554 mutex_unlock(&pfvf->mbox.lock);
558 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rx_flow_spec *fsp)
560 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
561 u32 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
562 struct otx2_flow *flow;
566 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
569 if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
572 if (fsp->location >= flow_cfg->ntuple_max_flows)
575 flow = otx2_find_flow(pfvf, fsp->location);
577 flow = kzalloc(sizeof(*flow), GFP_ATOMIC);
580 flow->location = fsp->location;
581 flow->entry = flow_cfg->entry[flow_cfg->ntuple_offset +
586 flow->flow_spec = *fsp;
588 err = otx2_add_flow_msg(pfvf, flow);
595 /* add the new flow installed to list */
597 otx2_add_flow_to_list(pfvf, flow);
598 flow_cfg->nr_flows++;
604 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
606 struct npc_delete_flow_req *req;
609 mutex_lock(&pfvf->mbox.lock);
610 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
612 mutex_unlock(&pfvf->mbox.lock);
620 /* Send message to AF */
621 err = otx2_sync_mbox_msg(&pfvf->mbox);
622 mutex_unlock(&pfvf->mbox.lock);
626 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
628 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
629 struct otx2_flow *flow;
632 if (location >= flow_cfg->ntuple_max_flows)
635 flow = otx2_find_flow(pfvf, location);
639 err = otx2_remove_flow_msg(pfvf, flow->entry, false);
643 list_del(&flow->list);
645 flow_cfg->nr_flows--;
650 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
652 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
653 struct npc_delete_flow_req *req;
654 struct otx2_flow *iter, *tmp;
657 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
660 mutex_lock(&pfvf->mbox.lock);
661 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
663 mutex_unlock(&pfvf->mbox.lock);
667 req->start = flow_cfg->entry[flow_cfg->ntuple_offset];
668 req->end = flow_cfg->entry[flow_cfg->ntuple_offset +
669 flow_cfg->ntuple_max_flows - 1];
670 err = otx2_sync_mbox_msg(&pfvf->mbox);
671 mutex_unlock(&pfvf->mbox.lock);
673 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
674 list_del(&iter->list);
676 flow_cfg->nr_flows--;
681 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
683 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
684 struct npc_mcam_free_entry_req *req;
685 struct otx2_flow *iter, *tmp;
688 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
691 /* remove all flows */
692 err = otx2_remove_flow_msg(pfvf, 0, true);
696 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
697 list_del(&iter->list);
699 flow_cfg->nr_flows--;
702 mutex_lock(&pfvf->mbox.lock);
703 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
705 mutex_unlock(&pfvf->mbox.lock);
710 /* Send message to AF to free MCAM entries */
711 err = otx2_sync_mbox_msg(&pfvf->mbox);
713 mutex_unlock(&pfvf->mbox.lock);
717 pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
718 mutex_unlock(&pfvf->mbox.lock);
723 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
725 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
726 struct npc_install_flow_req *req;
729 mutex_lock(&pfvf->mbox.lock);
730 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
732 mutex_unlock(&pfvf->mbox.lock);
736 req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
737 req->intf = NIX_INTF_RX;
738 ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
739 eth_broadcast_addr((u8 *)&req->mask.dmac);
740 req->channel = pfvf->hw.rx_chan_base;
741 req->op = NIX_RX_ACTION_DEFAULT;
742 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
743 req->vtag0_valid = true;
744 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
746 /* Send message to AF */
747 err = otx2_sync_mbox_msg(&pfvf->mbox);
748 mutex_unlock(&pfvf->mbox.lock);
752 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
754 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
755 struct npc_delete_flow_req *req;
758 mutex_lock(&pfvf->mbox.lock);
759 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
761 mutex_unlock(&pfvf->mbox.lock);
765 req->entry = flow_cfg->entry[flow_cfg->rx_vlan_offset];
766 /* Send message to AF */
767 err = otx2_sync_mbox_msg(&pfvf->mbox);
768 mutex_unlock(&pfvf->mbox.lock);
772 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
774 struct nix_vtag_config *req;
775 struct mbox_msghdr *rsp_hdr;
778 /* Dont have enough mcam entries */
779 if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
783 err = otx2_install_rxvlan_offload_flow(pf);
787 err = otx2_delete_rxvlan_offload_flow(pf);
792 mutex_lock(&pf->mbox.lock);
793 req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
795 mutex_unlock(&pf->mbox.lock);
799 /* config strip, capture and size */
800 req->vtag_size = VTAGSIZE_T4;
801 req->cfg_type = 1; /* rx vlan cfg */
802 req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
803 req->rx.strip_vtag = enable;
804 req->rx.capture_vtag = enable;
806 err = otx2_sync_mbox_msg(&pf->mbox);
808 mutex_unlock(&pf->mbox.lock);
812 rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
813 if (IS_ERR(rsp_hdr)) {
814 mutex_unlock(&pf->mbox.lock);
815 return PTR_ERR(rsp_hdr);
818 mutex_unlock(&pf->mbox.lock);