RDMA: Globally allocate and release QP memory
[linux-2.6-microblaze.git] / drivers / infiniband / core / lag.c
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3  * Copyright (c) 2020 Mellanox Technologies. All rights reserved.
4  */
5
6 #include <rdma/ib_verbs.h>
7 #include <rdma/ib_cache.h>
8 #include <rdma/lag.h>
9
10 static struct sk_buff *rdma_build_skb(struct ib_device *device,
11                                       struct net_device *netdev,
12                                       struct rdma_ah_attr *ah_attr,
13                                       gfp_t flags)
14 {
15         struct ipv6hdr *ip6h;
16         struct sk_buff *skb;
17         struct ethhdr *eth;
18         struct iphdr *iph;
19         struct udphdr *uh;
20         u8 smac[ETH_ALEN];
21         bool is_ipv4;
22         int hdr_len;
23
24         is_ipv4 = ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw);
25         hdr_len = ETH_HLEN + sizeof(struct udphdr) + LL_RESERVED_SPACE(netdev);
26         hdr_len += is_ipv4 ? sizeof(struct iphdr) : sizeof(struct ipv6hdr);
27
28         skb = alloc_skb(hdr_len, flags);
29         if (!skb)
30                 return NULL;
31
32         skb->dev = netdev;
33         skb_reserve(skb, hdr_len);
34         skb_push(skb, sizeof(struct udphdr));
35         skb_reset_transport_header(skb);
36         uh = udp_hdr(skb);
37         uh->source =
38                 htons(rdma_flow_label_to_udp_sport(ah_attr->grh.flow_label));
39         uh->dest = htons(ROCE_V2_UDP_DPORT);
40         uh->len = htons(sizeof(struct udphdr));
41
42         if (is_ipv4) {
43                 skb_push(skb, sizeof(struct iphdr));
44                 skb_reset_network_header(skb);
45                 iph = ip_hdr(skb);
46                 iph->frag_off = 0;
47                 iph->version = 4;
48                 iph->protocol = IPPROTO_UDP;
49                 iph->ihl = 0x5;
50                 iph->tot_len = htons(sizeof(struct udphdr) + sizeof(struct
51                                                                     iphdr));
52                 memcpy(&iph->saddr, ah_attr->grh.sgid_attr->gid.raw + 12,
53                        sizeof(struct in_addr));
54                 memcpy(&iph->daddr, ah_attr->grh.dgid.raw + 12,
55                        sizeof(struct in_addr));
56         } else {
57                 skb_push(skb, sizeof(struct ipv6hdr));
58                 skb_reset_network_header(skb);
59                 ip6h = ipv6_hdr(skb);
60                 ip6h->version = 6;
61                 ip6h->nexthdr = IPPROTO_UDP;
62                 memcpy(&ip6h->flow_lbl, &ah_attr->grh.flow_label,
63                        sizeof(*ip6h->flow_lbl));
64                 memcpy(&ip6h->saddr, ah_attr->grh.sgid_attr->gid.raw,
65                        sizeof(struct in6_addr));
66                 memcpy(&ip6h->daddr, ah_attr->grh.dgid.raw,
67                        sizeof(struct in6_addr));
68         }
69
70         skb_push(skb, sizeof(struct ethhdr));
71         skb_reset_mac_header(skb);
72         eth = eth_hdr(skb);
73         skb->protocol = eth->h_proto = htons(is_ipv4 ? ETH_P_IP : ETH_P_IPV6);
74         rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr, NULL, smac);
75         memcpy(eth->h_source, smac, ETH_ALEN);
76         memcpy(eth->h_dest, ah_attr->roce.dmac, ETH_ALEN);
77
78         return skb;
79 }
80
81 static struct net_device *rdma_get_xmit_slave_udp(struct ib_device *device,
82                                                   struct net_device *master,
83                                                   struct rdma_ah_attr *ah_attr,
84                                                   gfp_t flags)
85 {
86         struct net_device *slave;
87         struct sk_buff *skb;
88
89         skb = rdma_build_skb(device, master, ah_attr, flags);
90         if (!skb)
91                 return ERR_PTR(-ENOMEM);
92
93         rcu_read_lock();
94         slave = netdev_get_xmit_slave(master, skb,
95                                       !!(device->lag_flags &
96                                          RDMA_LAG_FLAGS_HASH_ALL_SLAVES));
97         if (slave)
98                 dev_hold(slave);
99         rcu_read_unlock();
100         kfree_skb(skb);
101         return slave;
102 }
103
104 void rdma_lag_put_ah_roce_slave(struct net_device *xmit_slave)
105 {
106         if (xmit_slave)
107                 dev_put(xmit_slave);
108 }
109
110 struct net_device *rdma_lag_get_ah_roce_slave(struct ib_device *device,
111                                               struct rdma_ah_attr *ah_attr,
112                                               gfp_t flags)
113 {
114         struct net_device *slave = NULL;
115         struct net_device *master;
116
117         if (!(ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE &&
118               ah_attr->grh.sgid_attr->gid_type == IB_GID_TYPE_ROCE_UDP_ENCAP &&
119               ah_attr->grh.flow_label))
120                 return NULL;
121
122         rcu_read_lock();
123         master = rdma_read_gid_attr_ndev_rcu(ah_attr->grh.sgid_attr);
124         if (IS_ERR(master)) {
125                 rcu_read_unlock();
126                 return master;
127         }
128         dev_hold(master);
129         rcu_read_unlock();
130
131         if (!netif_is_bond_master(master))
132                 goto put;
133
134         slave = rdma_get_xmit_slave_udp(device, master, ah_attr, flags);
135 put:
136         dev_put(master);
137         return slave;
138 }