1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTx2 RVU Physical Function ethernet driver
4 * Copyright (C) 2020 Marvell.
9 #include "otx2_common.h"
11 #define OTX2_DEFAULT_ACTION 0x1
14 struct ethtool_rx_flow_spec flow_spec;
15 struct list_head list;
29 static void otx2_clear_ntuple_flow_info(struct otx2_nic *pfvf, struct otx2_flow_config *flow_cfg)
31 devm_kfree(pfvf->dev, flow_cfg->flow_ent);
32 flow_cfg->flow_ent = NULL;
33 flow_cfg->ntuple_max_flows = 0;
34 flow_cfg->tc_max_flows = 0;
37 static int otx2_free_ntuple_mcam_entries(struct otx2_nic *pfvf)
39 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
40 struct npc_mcam_free_entry_req *req;
43 if (!flow_cfg->ntuple_max_flows)
46 mutex_lock(&pfvf->mbox.lock);
47 for (ent = 0; ent < flow_cfg->ntuple_max_flows; ent++) {
48 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
52 req->entry = flow_cfg->flow_ent[ent];
54 /* Send message to AF to free MCAM entries */
55 err = otx2_sync_mbox_msg(&pfvf->mbox);
59 mutex_unlock(&pfvf->mbox.lock);
60 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
64 static int otx2_alloc_ntuple_mcam_entries(struct otx2_nic *pfvf, u16 count)
66 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
67 struct npc_mcam_alloc_entry_req *req;
68 struct npc_mcam_alloc_entry_rsp *rsp;
69 int ent, allocated = 0;
71 /* Free current ones and allocate new ones with requested count */
72 otx2_free_ntuple_mcam_entries(pfvf);
77 flow_cfg->flow_ent = devm_kmalloc_array(pfvf->dev, count,
78 sizeof(u16), GFP_KERNEL);
79 if (!flow_cfg->flow_ent)
82 mutex_lock(&pfvf->mbox.lock);
84 /* In a single request a max of NPC_MAX_NONCONTIG_ENTRIES MCAM entries
85 * can only be allocated.
87 while (allocated < count) {
88 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
93 req->count = (count - allocated) > NPC_MAX_NONCONTIG_ENTRIES ?
94 NPC_MAX_NONCONTIG_ENTRIES : count - allocated;
95 req->priority = NPC_MCAM_HIGHER_PRIO;
96 req->ref_entry = flow_cfg->def_ent[0];
98 /* Send message to AF */
99 if (otx2_sync_mbox_msg(&pfvf->mbox))
102 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
103 (&pfvf->mbox.mbox, 0, &req->hdr);
105 for (ent = 0; ent < rsp->count; ent++)
106 flow_cfg->flow_ent[ent + allocated] = rsp->entry_list[ent];
108 allocated += rsp->count;
110 /* If this request is not fulfilled, no need to send
113 if (rsp->count != req->count)
118 mutex_unlock(&pfvf->mbox.lock);
120 flow_cfg->ntuple_offset = 0;
121 flow_cfg->ntuple_max_flows = allocated;
122 flow_cfg->tc_max_flows = allocated;
124 if (allocated != count)
125 netdev_info(pfvf->netdev,
126 "Unable to allocate %d MCAM entries for ntuple, got %d\n",
132 int otx2_alloc_mcam_entries(struct otx2_nic *pfvf)
134 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
135 struct npc_mcam_alloc_entry_req *req;
136 struct npc_mcam_alloc_entry_rsp *rsp;
137 int vf_vlan_max_flows;
140 vf_vlan_max_flows = pfvf->total_vfs * OTX2_PER_VF_VLAN_FLOWS;
141 count = OTX2_MAX_UNICAST_FLOWS +
142 OTX2_MAX_VLAN_FLOWS + vf_vlan_max_flows;
144 flow_cfg->def_ent = devm_kmalloc_array(pfvf->dev, count,
145 sizeof(u16), GFP_KERNEL);
146 if (!flow_cfg->def_ent)
149 mutex_lock(&pfvf->mbox.lock);
151 req = otx2_mbox_alloc_msg_npc_mcam_alloc_entry(&pfvf->mbox);
153 mutex_unlock(&pfvf->mbox.lock);
160 /* Send message to AF */
161 if (otx2_sync_mbox_msg(&pfvf->mbox)) {
162 mutex_unlock(&pfvf->mbox.lock);
166 rsp = (struct npc_mcam_alloc_entry_rsp *)otx2_mbox_get_rsp
167 (&pfvf->mbox.mbox, 0, &req->hdr);
169 if (rsp->count != req->count) {
170 netdev_info(pfvf->netdev,
171 "Unable to allocate MCAM entries for ucast, vlan and vf_vlan\n");
172 mutex_unlock(&pfvf->mbox.lock);
173 devm_kfree(pfvf->dev, flow_cfg->def_ent);
177 for (ent = 0; ent < rsp->count; ent++)
178 flow_cfg->def_ent[ent] = rsp->entry_list[ent];
180 flow_cfg->vf_vlan_offset = 0;
181 flow_cfg->unicast_offset = vf_vlan_max_flows;
182 flow_cfg->rx_vlan_offset = flow_cfg->unicast_offset +
183 OTX2_MAX_UNICAST_FLOWS;
184 pfvf->flags |= OTX2_FLAG_UCAST_FLTR_SUPPORT;
185 pfvf->flags |= OTX2_FLAG_RX_VLAN_SUPPORT;
186 pfvf->flags |= OTX2_FLAG_VF_VLAN_SUPPORT;
188 pfvf->flags |= OTX2_FLAG_MCAM_ENTRIES_ALLOC;
189 mutex_unlock(&pfvf->mbox.lock);
191 /* Allocate entries for Ntuple filters */
192 count = otx2_alloc_ntuple_mcam_entries(pfvf, OTX2_DEFAULT_FLOWCOUNT);
194 otx2_clear_ntuple_flow_info(pfvf, flow_cfg);
198 pfvf->flags |= OTX2_FLAG_NTUPLE_SUPPORT;
199 pfvf->flags |= OTX2_FLAG_TC_FLOWER_SUPPORT;
204 int otx2_mcam_flow_init(struct otx2_nic *pf)
208 pf->flow_cfg = devm_kzalloc(pf->dev, sizeof(struct otx2_flow_config),
213 INIT_LIST_HEAD(&pf->flow_cfg->flow_list);
215 err = otx2_alloc_mcam_entries(pf);
219 /* Check if MCAM entries are allocate or not */
220 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
223 pf->mac_table = devm_kzalloc(pf->dev, sizeof(struct otx2_mac_table)
224 * OTX2_MAX_UNICAST_FLOWS, GFP_KERNEL);
228 otx2_dmacflt_get_max_cnt(pf);
230 /* DMAC filters are not allocated */
231 if (!pf->flow_cfg->dmacflt_max_flows)
234 pf->flow_cfg->bmap_to_dmacindex =
235 devm_kzalloc(pf->dev, sizeof(u8) *
236 pf->flow_cfg->dmacflt_max_flows,
239 if (!pf->flow_cfg->bmap_to_dmacindex)
242 pf->flags |= OTX2_FLAG_DMACFLTR_SUPPORT;
247 void otx2_mcam_flow_del(struct otx2_nic *pf)
249 otx2_destroy_mcam_flows(pf);
252 /* On success adds mcam entry
253 * On failure enable promisous mode
255 static int otx2_do_add_macfilter(struct otx2_nic *pf, const u8 *mac)
257 struct otx2_flow_config *flow_cfg = pf->flow_cfg;
258 struct npc_install_flow_req *req;
261 if (!(pf->flags & OTX2_FLAG_UCAST_FLTR_SUPPORT))
264 /* dont have free mcam entries or uc list is greater than alloted */
265 if (netdev_uc_count(pf->netdev) > OTX2_MAX_UNICAST_FLOWS)
268 mutex_lock(&pf->mbox.lock);
269 req = otx2_mbox_alloc_msg_npc_install_flow(&pf->mbox);
271 mutex_unlock(&pf->mbox.lock);
275 /* unicast offset starts with 32 0..31 for ntuple */
276 for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
277 if (pf->mac_table[i].inuse)
279 ether_addr_copy(pf->mac_table[i].addr, mac);
280 pf->mac_table[i].inuse = true;
281 pf->mac_table[i].mcam_entry =
282 flow_cfg->def_ent[i + flow_cfg->unicast_offset];
283 req->entry = pf->mac_table[i].mcam_entry;
287 ether_addr_copy(req->packet.dmac, mac);
288 eth_broadcast_addr((u8 *)&req->mask.dmac);
289 req->features = BIT_ULL(NPC_DMAC);
290 req->channel = pf->hw.rx_chan_base;
291 req->intf = NIX_INTF_RX;
292 req->op = NIX_RX_ACTION_DEFAULT;
295 err = otx2_sync_mbox_msg(&pf->mbox);
296 mutex_unlock(&pf->mbox.lock);
301 int otx2_add_macfilter(struct net_device *netdev, const u8 *mac)
303 struct otx2_nic *pf = netdev_priv(netdev);
305 if (bitmap_weight(&pf->flow_cfg->dmacflt_bmap,
306 pf->flow_cfg->dmacflt_max_flows))
308 "Add %pM to CGX/RPM DMAC filters list as well\n",
311 return otx2_do_add_macfilter(pf, mac);
314 static bool otx2_get_mcamentry_for_mac(struct otx2_nic *pf, const u8 *mac,
319 for (i = 0; i < OTX2_MAX_UNICAST_FLOWS; i++) {
320 if (!pf->mac_table[i].inuse)
323 if (ether_addr_equal(pf->mac_table[i].addr, mac)) {
324 *mcam_entry = pf->mac_table[i].mcam_entry;
325 pf->mac_table[i].inuse = false;
332 int otx2_del_macfilter(struct net_device *netdev, const u8 *mac)
334 struct otx2_nic *pf = netdev_priv(netdev);
335 struct npc_delete_flow_req *req;
338 /* check does mcam entry exists for given mac */
339 if (!otx2_get_mcamentry_for_mac(pf, mac, &mcam_entry))
342 mutex_lock(&pf->mbox.lock);
343 req = otx2_mbox_alloc_msg_npc_delete_flow(&pf->mbox);
345 mutex_unlock(&pf->mbox.lock);
348 req->entry = mcam_entry;
349 /* Send message to AF */
350 err = otx2_sync_mbox_msg(&pf->mbox);
351 mutex_unlock(&pf->mbox.lock);
356 static struct otx2_flow *otx2_find_flow(struct otx2_nic *pfvf, u32 location)
358 struct otx2_flow *iter;
360 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
361 if (iter->location == location)
368 static void otx2_add_flow_to_list(struct otx2_nic *pfvf, struct otx2_flow *flow)
370 struct list_head *head = &pfvf->flow_cfg->flow_list;
371 struct otx2_flow *iter;
373 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
374 if (iter->location > flow->location)
379 list_add(&flow->list, head);
382 static int otx2_get_maxflows(struct otx2_flow_config *flow_cfg)
384 if (flow_cfg->nr_flows == flow_cfg->ntuple_max_flows ||
385 bitmap_weight(&flow_cfg->dmacflt_bmap,
386 flow_cfg->dmacflt_max_flows))
387 return flow_cfg->ntuple_max_flows + flow_cfg->dmacflt_max_flows;
389 return flow_cfg->ntuple_max_flows;
392 int otx2_get_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
395 struct otx2_flow *iter;
397 if (location >= otx2_get_maxflows(pfvf->flow_cfg))
400 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
401 if (iter->location == location) {
402 nfc->fs = iter->flow_spec;
403 nfc->rss_context = iter->rss_ctx_id;
411 int otx2_get_all_flows(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc,
414 u32 rule_cnt = nfc->rule_cnt;
419 nfc->data = otx2_get_maxflows(pfvf->flow_cfg);
420 while ((!err || err == -ENOENT) && idx < rule_cnt) {
421 err = otx2_get_flow(pfvf, nfc, location);
423 rule_locs[idx++] = location;
426 nfc->rule_cnt = rule_cnt;
431 static int otx2_prepare_ipv4_flow(struct ethtool_rx_flow_spec *fsp,
432 struct npc_install_flow_req *req,
435 struct ethtool_usrip4_spec *ipv4_usr_mask = &fsp->m_u.usr_ip4_spec;
436 struct ethtool_usrip4_spec *ipv4_usr_hdr = &fsp->h_u.usr_ip4_spec;
437 struct ethtool_tcpip4_spec *ipv4_l4_mask = &fsp->m_u.tcp_ip4_spec;
438 struct ethtool_tcpip4_spec *ipv4_l4_hdr = &fsp->h_u.tcp_ip4_spec;
439 struct ethtool_ah_espip4_spec *ah_esp_hdr = &fsp->h_u.ah_ip4_spec;
440 struct ethtool_ah_espip4_spec *ah_esp_mask = &fsp->m_u.ah_ip4_spec;
441 struct flow_msg *pmask = &req->mask;
442 struct flow_msg *pkt = &req->packet;
446 if (ipv4_usr_mask->ip4src) {
447 memcpy(&pkt->ip4src, &ipv4_usr_hdr->ip4src,
448 sizeof(pkt->ip4src));
449 memcpy(&pmask->ip4src, &ipv4_usr_mask->ip4src,
450 sizeof(pmask->ip4src));
451 req->features |= BIT_ULL(NPC_SIP_IPV4);
453 if (ipv4_usr_mask->ip4dst) {
454 memcpy(&pkt->ip4dst, &ipv4_usr_hdr->ip4dst,
455 sizeof(pkt->ip4dst));
456 memcpy(&pmask->ip4dst, &ipv4_usr_mask->ip4dst,
457 sizeof(pmask->ip4dst));
458 req->features |= BIT_ULL(NPC_DIP_IPV4);
460 if (ipv4_usr_mask->tos) {
461 pkt->tos = ipv4_usr_hdr->tos;
462 pmask->tos = ipv4_usr_mask->tos;
463 req->features |= BIT_ULL(NPC_TOS);
465 if (ipv4_usr_mask->proto) {
466 switch (ipv4_usr_hdr->proto) {
468 req->features |= BIT_ULL(NPC_IPPROTO_ICMP);
471 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
474 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
477 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
480 req->features |= BIT_ULL(NPC_IPPROTO_AH);
483 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
489 pkt->etype = cpu_to_be16(ETH_P_IP);
490 pmask->etype = cpu_to_be16(0xFFFF);
491 req->features |= BIT_ULL(NPC_ETYPE);
496 pkt->etype = cpu_to_be16(ETH_P_IP);
497 pmask->etype = cpu_to_be16(0xFFFF);
498 req->features |= BIT_ULL(NPC_ETYPE);
499 if (ipv4_l4_mask->ip4src) {
500 memcpy(&pkt->ip4src, &ipv4_l4_hdr->ip4src,
501 sizeof(pkt->ip4src));
502 memcpy(&pmask->ip4src, &ipv4_l4_mask->ip4src,
503 sizeof(pmask->ip4src));
504 req->features |= BIT_ULL(NPC_SIP_IPV4);
506 if (ipv4_l4_mask->ip4dst) {
507 memcpy(&pkt->ip4dst, &ipv4_l4_hdr->ip4dst,
508 sizeof(pkt->ip4dst));
509 memcpy(&pmask->ip4dst, &ipv4_l4_mask->ip4dst,
510 sizeof(pmask->ip4dst));
511 req->features |= BIT_ULL(NPC_DIP_IPV4);
513 if (ipv4_l4_mask->tos) {
514 pkt->tos = ipv4_l4_hdr->tos;
515 pmask->tos = ipv4_l4_mask->tos;
516 req->features |= BIT_ULL(NPC_TOS);
518 if (ipv4_l4_mask->psrc) {
519 memcpy(&pkt->sport, &ipv4_l4_hdr->psrc,
521 memcpy(&pmask->sport, &ipv4_l4_mask->psrc,
522 sizeof(pmask->sport));
523 if (flow_type == UDP_V4_FLOW)
524 req->features |= BIT_ULL(NPC_SPORT_UDP);
525 else if (flow_type == TCP_V4_FLOW)
526 req->features |= BIT_ULL(NPC_SPORT_TCP);
528 req->features |= BIT_ULL(NPC_SPORT_SCTP);
530 if (ipv4_l4_mask->pdst) {
531 memcpy(&pkt->dport, &ipv4_l4_hdr->pdst,
533 memcpy(&pmask->dport, &ipv4_l4_mask->pdst,
534 sizeof(pmask->dport));
535 if (flow_type == UDP_V4_FLOW)
536 req->features |= BIT_ULL(NPC_DPORT_UDP);
537 else if (flow_type == TCP_V4_FLOW)
538 req->features |= BIT_ULL(NPC_DPORT_TCP);
540 req->features |= BIT_ULL(NPC_DPORT_SCTP);
542 if (flow_type == UDP_V4_FLOW)
543 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
544 else if (flow_type == TCP_V4_FLOW)
545 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
547 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
551 pkt->etype = cpu_to_be16(ETH_P_IP);
552 pmask->etype = cpu_to_be16(0xFFFF);
553 req->features |= BIT_ULL(NPC_ETYPE);
554 if (ah_esp_mask->ip4src) {
555 memcpy(&pkt->ip4src, &ah_esp_hdr->ip4src,
556 sizeof(pkt->ip4src));
557 memcpy(&pmask->ip4src, &ah_esp_mask->ip4src,
558 sizeof(pmask->ip4src));
559 req->features |= BIT_ULL(NPC_SIP_IPV4);
561 if (ah_esp_mask->ip4dst) {
562 memcpy(&pkt->ip4dst, &ah_esp_hdr->ip4dst,
563 sizeof(pkt->ip4dst));
564 memcpy(&pmask->ip4dst, &ah_esp_mask->ip4dst,
565 sizeof(pmask->ip4dst));
566 req->features |= BIT_ULL(NPC_DIP_IPV4);
568 if (ah_esp_mask->tos) {
569 pkt->tos = ah_esp_hdr->tos;
570 pmask->tos = ah_esp_mask->tos;
571 req->features |= BIT_ULL(NPC_TOS);
574 /* NPC profile doesn't extract AH/ESP header fields */
575 if (ah_esp_mask->spi & ah_esp_hdr->spi)
578 if (flow_type == AH_V4_FLOW)
579 req->features |= BIT_ULL(NPC_IPPROTO_AH);
581 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
590 static int otx2_prepare_ipv6_flow(struct ethtool_rx_flow_spec *fsp,
591 struct npc_install_flow_req *req,
594 struct ethtool_usrip6_spec *ipv6_usr_mask = &fsp->m_u.usr_ip6_spec;
595 struct ethtool_usrip6_spec *ipv6_usr_hdr = &fsp->h_u.usr_ip6_spec;
596 struct ethtool_tcpip6_spec *ipv6_l4_mask = &fsp->m_u.tcp_ip6_spec;
597 struct ethtool_tcpip6_spec *ipv6_l4_hdr = &fsp->h_u.tcp_ip6_spec;
598 struct ethtool_ah_espip6_spec *ah_esp_hdr = &fsp->h_u.ah_ip6_spec;
599 struct ethtool_ah_espip6_spec *ah_esp_mask = &fsp->m_u.ah_ip6_spec;
600 struct flow_msg *pmask = &req->mask;
601 struct flow_msg *pkt = &req->packet;
605 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6src)) {
606 memcpy(&pkt->ip6src, &ipv6_usr_hdr->ip6src,
607 sizeof(pkt->ip6src));
608 memcpy(&pmask->ip6src, &ipv6_usr_mask->ip6src,
609 sizeof(pmask->ip6src));
610 req->features |= BIT_ULL(NPC_SIP_IPV6);
612 if (!ipv6_addr_any((struct in6_addr *)ipv6_usr_mask->ip6dst)) {
613 memcpy(&pkt->ip6dst, &ipv6_usr_hdr->ip6dst,
614 sizeof(pkt->ip6dst));
615 memcpy(&pmask->ip6dst, &ipv6_usr_mask->ip6dst,
616 sizeof(pmask->ip6dst));
617 req->features |= BIT_ULL(NPC_DIP_IPV6);
619 pkt->etype = cpu_to_be16(ETH_P_IPV6);
620 pmask->etype = cpu_to_be16(0xFFFF);
621 req->features |= BIT_ULL(NPC_ETYPE);
626 pkt->etype = cpu_to_be16(ETH_P_IPV6);
627 pmask->etype = cpu_to_be16(0xFFFF);
628 req->features |= BIT_ULL(NPC_ETYPE);
629 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6src)) {
630 memcpy(&pkt->ip6src, &ipv6_l4_hdr->ip6src,
631 sizeof(pkt->ip6src));
632 memcpy(&pmask->ip6src, &ipv6_l4_mask->ip6src,
633 sizeof(pmask->ip6src));
634 req->features |= BIT_ULL(NPC_SIP_IPV6);
636 if (!ipv6_addr_any((struct in6_addr *)ipv6_l4_mask->ip6dst)) {
637 memcpy(&pkt->ip6dst, &ipv6_l4_hdr->ip6dst,
638 sizeof(pkt->ip6dst));
639 memcpy(&pmask->ip6dst, &ipv6_l4_mask->ip6dst,
640 sizeof(pmask->ip6dst));
641 req->features |= BIT_ULL(NPC_DIP_IPV6);
643 if (ipv6_l4_mask->psrc) {
644 memcpy(&pkt->sport, &ipv6_l4_hdr->psrc,
646 memcpy(&pmask->sport, &ipv6_l4_mask->psrc,
647 sizeof(pmask->sport));
648 if (flow_type == UDP_V6_FLOW)
649 req->features |= BIT_ULL(NPC_SPORT_UDP);
650 else if (flow_type == TCP_V6_FLOW)
651 req->features |= BIT_ULL(NPC_SPORT_TCP);
653 req->features |= BIT_ULL(NPC_SPORT_SCTP);
655 if (ipv6_l4_mask->pdst) {
656 memcpy(&pkt->dport, &ipv6_l4_hdr->pdst,
658 memcpy(&pmask->dport, &ipv6_l4_mask->pdst,
659 sizeof(pmask->dport));
660 if (flow_type == UDP_V6_FLOW)
661 req->features |= BIT_ULL(NPC_DPORT_UDP);
662 else if (flow_type == TCP_V6_FLOW)
663 req->features |= BIT_ULL(NPC_DPORT_TCP);
665 req->features |= BIT_ULL(NPC_DPORT_SCTP);
667 if (flow_type == UDP_V6_FLOW)
668 req->features |= BIT_ULL(NPC_IPPROTO_UDP);
669 else if (flow_type == TCP_V6_FLOW)
670 req->features |= BIT_ULL(NPC_IPPROTO_TCP);
672 req->features |= BIT_ULL(NPC_IPPROTO_SCTP);
676 pkt->etype = cpu_to_be16(ETH_P_IPV6);
677 pmask->etype = cpu_to_be16(0xFFFF);
678 req->features |= BIT_ULL(NPC_ETYPE);
679 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6src)) {
680 memcpy(&pkt->ip6src, &ah_esp_hdr->ip6src,
681 sizeof(pkt->ip6src));
682 memcpy(&pmask->ip6src, &ah_esp_mask->ip6src,
683 sizeof(pmask->ip6src));
684 req->features |= BIT_ULL(NPC_SIP_IPV6);
686 if (!ipv6_addr_any((struct in6_addr *)ah_esp_hdr->ip6dst)) {
687 memcpy(&pkt->ip6dst, &ah_esp_hdr->ip6dst,
688 sizeof(pkt->ip6dst));
689 memcpy(&pmask->ip6dst, &ah_esp_mask->ip6dst,
690 sizeof(pmask->ip6dst));
691 req->features |= BIT_ULL(NPC_DIP_IPV6);
694 /* NPC profile doesn't extract AH/ESP header fields */
695 if ((ah_esp_mask->spi & ah_esp_hdr->spi) ||
696 (ah_esp_mask->tclass & ah_esp_mask->tclass))
699 if (flow_type == AH_V6_FLOW)
700 req->features |= BIT_ULL(NPC_IPPROTO_AH);
702 req->features |= BIT_ULL(NPC_IPPROTO_ESP);
711 int otx2_prepare_flow_request(struct ethtool_rx_flow_spec *fsp,
712 struct npc_install_flow_req *req)
714 struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
715 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
716 struct flow_msg *pmask = &req->mask;
717 struct flow_msg *pkt = &req->packet;
721 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
723 /* bits not set in mask are don't care */
725 if (!is_zero_ether_addr(eth_mask->h_source)) {
726 ether_addr_copy(pkt->smac, eth_hdr->h_source);
727 ether_addr_copy(pmask->smac, eth_mask->h_source);
728 req->features |= BIT_ULL(NPC_SMAC);
730 if (!is_zero_ether_addr(eth_mask->h_dest)) {
731 ether_addr_copy(pkt->dmac, eth_hdr->h_dest);
732 ether_addr_copy(pmask->dmac, eth_mask->h_dest);
733 req->features |= BIT_ULL(NPC_DMAC);
735 if (eth_mask->h_proto) {
736 memcpy(&pkt->etype, ð_hdr->h_proto,
738 memcpy(&pmask->etype, ð_mask->h_proto,
739 sizeof(pmask->etype));
740 req->features |= BIT_ULL(NPC_ETYPE);
749 ret = otx2_prepare_ipv4_flow(fsp, req, flow_type);
759 ret = otx2_prepare_ipv6_flow(fsp, req, flow_type);
766 if (fsp->flow_type & FLOW_EXT) {
767 if (fsp->m_ext.vlan_etype)
769 if (fsp->m_ext.vlan_tci) {
770 if (fsp->m_ext.vlan_tci != cpu_to_be16(VLAN_VID_MASK))
772 if (be16_to_cpu(fsp->h_ext.vlan_tci) >= VLAN_N_VID)
775 memcpy(&pkt->vlan_tci, &fsp->h_ext.vlan_tci,
776 sizeof(pkt->vlan_tci));
777 memcpy(&pmask->vlan_tci, &fsp->m_ext.vlan_tci,
778 sizeof(pmask->vlan_tci));
779 req->features |= BIT_ULL(NPC_OUTER_VID);
782 /* Not Drop/Direct to queue but use action in default entry */
783 if (fsp->m_ext.data[1] &&
784 fsp->h_ext.data[1] == cpu_to_be32(OTX2_DEFAULT_ACTION))
785 req->op = NIX_RX_ACTION_DEFAULT;
788 if (fsp->flow_type & FLOW_MAC_EXT &&
789 !is_zero_ether_addr(fsp->m_ext.h_dest)) {
790 ether_addr_copy(pkt->dmac, fsp->h_ext.h_dest);
791 ether_addr_copy(pmask->dmac, fsp->m_ext.h_dest);
792 req->features |= BIT_ULL(NPC_DMAC);
801 static int otx2_is_flow_rule_dmacfilter(struct otx2_nic *pfvf,
802 struct ethtool_rx_flow_spec *fsp)
804 struct ethhdr *eth_mask = &fsp->m_u.ether_spec;
805 struct ethhdr *eth_hdr = &fsp->h_u.ether_spec;
806 u64 ring_cookie = fsp->ring_cookie;
809 if (!(pfvf->flags & OTX2_FLAG_DMACFLTR_SUPPORT))
812 flow_type = fsp->flow_type & ~(FLOW_EXT | FLOW_MAC_EXT | FLOW_RSS);
814 /* CGX/RPM block dmac filtering configured for white listing
815 * check for action other than DROP
817 if (flow_type == ETHER_FLOW && ring_cookie != RX_CLS_FLOW_DISC &&
818 !ethtool_get_flow_spec_ring_vf(ring_cookie)) {
819 if (is_zero_ether_addr(eth_mask->h_dest) &&
820 is_valid_ether_addr(eth_hdr->h_dest))
827 static int otx2_add_flow_msg(struct otx2_nic *pfvf, struct otx2_flow *flow)
829 u64 ring_cookie = flow->flow_spec.ring_cookie;
830 struct npc_install_flow_req *req;
833 mutex_lock(&pfvf->mbox.lock);
834 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
836 mutex_unlock(&pfvf->mbox.lock);
840 err = otx2_prepare_flow_request(&flow->flow_spec, req);
842 /* free the allocated msg above */
843 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
844 mutex_unlock(&pfvf->mbox.lock);
848 req->entry = flow->entry;
849 req->intf = NIX_INTF_RX;
851 req->channel = pfvf->hw.rx_chan_base;
852 if (ring_cookie == RX_CLS_FLOW_DISC) {
853 req->op = NIX_RX_ACTIONOP_DROP;
855 /* change to unicast only if action of default entry is not
858 if (flow->flow_spec.flow_type & FLOW_RSS) {
859 req->op = NIX_RX_ACTIONOP_RSS;
860 req->index = flow->rss_ctx_id;
862 req->op = NIX_RX_ACTIONOP_UCAST;
863 req->index = ethtool_get_flow_spec_ring(ring_cookie);
865 vf = ethtool_get_flow_spec_ring_vf(ring_cookie);
866 if (vf > pci_num_vf(pfvf->pdev)) {
867 mutex_unlock(&pfvf->mbox.lock);
872 /* ethtool ring_cookie has (VF + 1) for VF */
879 /* Send message to AF */
880 err = otx2_sync_mbox_msg(&pfvf->mbox);
881 mutex_unlock(&pfvf->mbox.lock);
885 static int otx2_add_flow_with_pfmac(struct otx2_nic *pfvf,
886 struct otx2_flow *flow)
888 struct otx2_flow *pf_mac;
889 struct ethhdr *eth_hdr;
891 pf_mac = kzalloc(sizeof(*pf_mac), GFP_KERNEL);
896 pf_mac->dmac_filter = true;
897 pf_mac->location = pfvf->flow_cfg->ntuple_max_flows;
898 memcpy(&pf_mac->flow_spec, &flow->flow_spec,
899 sizeof(struct ethtool_rx_flow_spec));
900 pf_mac->flow_spec.location = pf_mac->location;
902 /* Copy PF mac address */
903 eth_hdr = &pf_mac->flow_spec.h_u.ether_spec;
904 ether_addr_copy(eth_hdr->h_dest, pfvf->netdev->dev_addr);
906 /* Install DMAC filter with PF mac address */
907 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, 0);
909 otx2_add_flow_to_list(pfvf, pf_mac);
910 pfvf->flow_cfg->nr_flows++;
911 set_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
916 int otx2_add_flow(struct otx2_nic *pfvf, struct ethtool_rxnfc *nfc)
918 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
919 struct ethtool_rx_flow_spec *fsp = &nfc->fs;
920 struct otx2_flow *flow;
921 struct ethhdr *eth_hdr;
926 ring = ethtool_get_flow_spec_ring(fsp->ring_cookie);
927 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
930 if (ring >= pfvf->hw.rx_queues && fsp->ring_cookie != RX_CLS_FLOW_DISC)
933 if (fsp->location >= otx2_get_maxflows(flow_cfg))
936 flow = otx2_find_flow(pfvf, fsp->location);
938 flow = kzalloc(sizeof(*flow), GFP_KERNEL);
941 flow->location = fsp->location;
945 flow->flow_spec = *fsp;
947 if (fsp->flow_type & FLOW_RSS)
948 flow->rss_ctx_id = nfc->rss_context;
950 if (otx2_is_flow_rule_dmacfilter(pfvf, &flow->flow_spec)) {
951 eth_hdr = &flow->flow_spec.h_u.ether_spec;
953 /* Sync dmac filter table with updated fields */
954 if (flow->dmac_filter)
955 return otx2_dmacflt_update(pfvf, eth_hdr->h_dest,
958 if (bitmap_full(&flow_cfg->dmacflt_bmap,
959 flow_cfg->dmacflt_max_flows)) {
960 netdev_warn(pfvf->netdev,
961 "Can't insert the rule %d as max allowed dmac filters are %d\n",
963 flow_cfg->dmacflt_max_flows,
964 flow_cfg->dmacflt_max_flows);
971 /* Install PF mac address to DMAC filter list */
972 if (!test_bit(0, &flow_cfg->dmacflt_bmap))
973 otx2_add_flow_with_pfmac(pfvf, flow);
975 flow->dmac_filter = true;
976 flow->entry = find_first_zero_bit(&flow_cfg->dmacflt_bmap,
977 flow_cfg->dmacflt_max_flows);
978 fsp->location = flow_cfg->ntuple_max_flows + flow->entry;
979 flow->flow_spec.location = fsp->location;
980 flow->location = fsp->location;
982 set_bit(flow->entry, &flow_cfg->dmacflt_bmap);
983 otx2_dmacflt_add(pfvf, eth_hdr->h_dest, flow->entry);
986 if (flow->location >= pfvf->flow_cfg->ntuple_max_flows) {
987 netdev_warn(pfvf->netdev,
988 "Can't insert non dmac ntuple rule at %d, allowed range %d-0\n",
990 flow_cfg->ntuple_max_flows - 1);
993 flow->entry = flow_cfg->flow_ent[flow->location];
994 err = otx2_add_flow_msg(pfvf, flow);
1004 /* add the new flow installed to list */
1006 otx2_add_flow_to_list(pfvf, flow);
1007 flow_cfg->nr_flows++;
1013 static int otx2_remove_flow_msg(struct otx2_nic *pfvf, u16 entry, bool all)
1015 struct npc_delete_flow_req *req;
1018 mutex_lock(&pfvf->mbox.lock);
1019 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1021 mutex_unlock(&pfvf->mbox.lock);
1029 /* Send message to AF */
1030 err = otx2_sync_mbox_msg(&pfvf->mbox);
1031 mutex_unlock(&pfvf->mbox.lock);
1035 static void otx2_update_rem_pfmac(struct otx2_nic *pfvf, int req)
1037 struct otx2_flow *iter;
1038 struct ethhdr *eth_hdr;
1041 list_for_each_entry(iter, &pfvf->flow_cfg->flow_list, list) {
1042 if (iter->dmac_filter && iter->entry == 0) {
1043 eth_hdr = &iter->flow_spec.h_u.ether_spec;
1044 if (req == DMAC_ADDR_DEL) {
1045 otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1047 clear_bit(0, &pfvf->flow_cfg->dmacflt_bmap);
1050 ether_addr_copy(eth_hdr->h_dest,
1051 pfvf->netdev->dev_addr);
1052 otx2_dmacflt_update(pfvf, eth_hdr->h_dest, 0);
1059 list_del(&iter->list);
1061 pfvf->flow_cfg->nr_flows--;
1065 int otx2_remove_flow(struct otx2_nic *pfvf, u32 location)
1067 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1068 struct otx2_flow *flow;
1071 if (location >= otx2_get_maxflows(flow_cfg))
1074 flow = otx2_find_flow(pfvf, location);
1078 if (flow->dmac_filter) {
1079 struct ethhdr *eth_hdr = &flow->flow_spec.h_u.ether_spec;
1081 /* user not allowed to remove dmac filter with interface mac */
1082 if (ether_addr_equal(pfvf->netdev->dev_addr, eth_hdr->h_dest))
1085 err = otx2_dmacflt_remove(pfvf, eth_hdr->h_dest,
1087 clear_bit(flow->entry, &flow_cfg->dmacflt_bmap);
1088 /* If all dmac filters are removed delete macfilter with
1089 * interface mac address and configure CGX/RPM block in
1092 if (bitmap_weight(&flow_cfg->dmacflt_bmap,
1093 flow_cfg->dmacflt_max_flows) == 1)
1094 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_DEL);
1096 err = otx2_remove_flow_msg(pfvf, flow->entry, false);
1102 list_del(&flow->list);
1104 flow_cfg->nr_flows--;
1109 void otx2_rss_ctx_flow_del(struct otx2_nic *pfvf, int ctx_id)
1111 struct otx2_flow *flow, *tmp;
1114 list_for_each_entry_safe(flow, tmp, &pfvf->flow_cfg->flow_list, list) {
1115 if (flow->rss_ctx_id != ctx_id)
1117 err = otx2_remove_flow(pfvf, flow->location);
1119 netdev_warn(pfvf->netdev,
1120 "Can't delete the rule %d associated with this rss group err:%d",
1121 flow->location, err);
1125 int otx2_destroy_ntuple_flows(struct otx2_nic *pfvf)
1127 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1128 struct npc_delete_flow_req *req;
1129 struct otx2_flow *iter, *tmp;
1132 if (!(pfvf->flags & OTX2_FLAG_NTUPLE_SUPPORT))
1135 mutex_lock(&pfvf->mbox.lock);
1136 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1138 mutex_unlock(&pfvf->mbox.lock);
1142 req->start = flow_cfg->flow_ent[0];
1143 req->end = flow_cfg->flow_ent[flow_cfg->ntuple_max_flows - 1];
1144 err = otx2_sync_mbox_msg(&pfvf->mbox);
1145 mutex_unlock(&pfvf->mbox.lock);
1147 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1148 list_del(&iter->list);
1150 flow_cfg->nr_flows--;
1155 int otx2_destroy_mcam_flows(struct otx2_nic *pfvf)
1157 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1158 struct npc_mcam_free_entry_req *req;
1159 struct otx2_flow *iter, *tmp;
1162 if (!(pfvf->flags & OTX2_FLAG_MCAM_ENTRIES_ALLOC))
1165 /* remove all flows */
1166 err = otx2_remove_flow_msg(pfvf, 0, true);
1170 list_for_each_entry_safe(iter, tmp, &flow_cfg->flow_list, list) {
1171 list_del(&iter->list);
1173 flow_cfg->nr_flows--;
1176 mutex_lock(&pfvf->mbox.lock);
1177 req = otx2_mbox_alloc_msg_npc_mcam_free_entry(&pfvf->mbox);
1179 mutex_unlock(&pfvf->mbox.lock);
1184 /* Send message to AF to free MCAM entries */
1185 err = otx2_sync_mbox_msg(&pfvf->mbox);
1187 mutex_unlock(&pfvf->mbox.lock);
1191 pfvf->flags &= ~OTX2_FLAG_MCAM_ENTRIES_ALLOC;
1192 mutex_unlock(&pfvf->mbox.lock);
1197 int otx2_install_rxvlan_offload_flow(struct otx2_nic *pfvf)
1199 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1200 struct npc_install_flow_req *req;
1203 mutex_lock(&pfvf->mbox.lock);
1204 req = otx2_mbox_alloc_msg_npc_install_flow(&pfvf->mbox);
1206 mutex_unlock(&pfvf->mbox.lock);
1210 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1211 req->intf = NIX_INTF_RX;
1212 ether_addr_copy(req->packet.dmac, pfvf->netdev->dev_addr);
1213 eth_broadcast_addr((u8 *)&req->mask.dmac);
1214 req->channel = pfvf->hw.rx_chan_base;
1215 req->op = NIX_RX_ACTION_DEFAULT;
1216 req->features = BIT_ULL(NPC_OUTER_VID) | BIT_ULL(NPC_DMAC);
1217 req->vtag0_valid = true;
1218 req->vtag0_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1220 /* Send message to AF */
1221 err = otx2_sync_mbox_msg(&pfvf->mbox);
1222 mutex_unlock(&pfvf->mbox.lock);
1226 static int otx2_delete_rxvlan_offload_flow(struct otx2_nic *pfvf)
1228 struct otx2_flow_config *flow_cfg = pfvf->flow_cfg;
1229 struct npc_delete_flow_req *req;
1232 mutex_lock(&pfvf->mbox.lock);
1233 req = otx2_mbox_alloc_msg_npc_delete_flow(&pfvf->mbox);
1235 mutex_unlock(&pfvf->mbox.lock);
1239 req->entry = flow_cfg->def_ent[flow_cfg->rx_vlan_offset];
1240 /* Send message to AF */
1241 err = otx2_sync_mbox_msg(&pfvf->mbox);
1242 mutex_unlock(&pfvf->mbox.lock);
1246 int otx2_enable_rxvlan(struct otx2_nic *pf, bool enable)
1248 struct nix_vtag_config *req;
1249 struct mbox_msghdr *rsp_hdr;
1252 /* Dont have enough mcam entries */
1253 if (!(pf->flags & OTX2_FLAG_RX_VLAN_SUPPORT))
1257 err = otx2_install_rxvlan_offload_flow(pf);
1261 err = otx2_delete_rxvlan_offload_flow(pf);
1266 mutex_lock(&pf->mbox.lock);
1267 req = otx2_mbox_alloc_msg_nix_vtag_cfg(&pf->mbox);
1269 mutex_unlock(&pf->mbox.lock);
1273 /* config strip, capture and size */
1274 req->vtag_size = VTAGSIZE_T4;
1275 req->cfg_type = 1; /* rx vlan cfg */
1276 req->rx.vtag_type = NIX_AF_LFX_RX_VTAG_TYPE0;
1277 req->rx.strip_vtag = enable;
1278 req->rx.capture_vtag = enable;
1280 err = otx2_sync_mbox_msg(&pf->mbox);
1282 mutex_unlock(&pf->mbox.lock);
1286 rsp_hdr = otx2_mbox_get_rsp(&pf->mbox.mbox, 0, &req->hdr);
1287 if (IS_ERR(rsp_hdr)) {
1288 mutex_unlock(&pf->mbox.lock);
1289 return PTR_ERR(rsp_hdr);
1292 mutex_unlock(&pf->mbox.lock);
1296 void otx2_dmacflt_reinstall_flows(struct otx2_nic *pf)
1298 struct otx2_flow *iter;
1299 struct ethhdr *eth_hdr;
1301 list_for_each_entry(iter, &pf->flow_cfg->flow_list, list) {
1302 if (iter->dmac_filter) {
1303 eth_hdr = &iter->flow_spec.h_u.ether_spec;
1304 otx2_dmacflt_add(pf, eth_hdr->h_dest,
1310 void otx2_dmacflt_update_pfmac_flow(struct otx2_nic *pfvf)
1312 otx2_update_rem_pfmac(pfvf, DMAC_ADDR_UPDATE);