1 // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB
2 /* Copyright (c) 2016 - 2021 Intel Corporation */
13 * irdma_sc_access_ah() - Create, modify or delete AH
14 * @cqp: struct for cqp hw
15 * @info: ah information
17 * @scratch: u64 saved to be used during cqp completion
19 enum irdma_status_code irdma_sc_access_ah(struct irdma_sc_cqp *cqp,
20 struct irdma_ah_info *info,
26 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
28 return IRDMA_ERR_RING_FULL;
30 set_64bit_val(wqe, 0, ether_addr_to_u64(info->mac_addr) << 16);
31 qw1 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXLO, info->pd_idx) |
32 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_TC, info->tc_tos) |
33 FIELD_PREP(IRDMA_UDAQPC_VLANTAG, info->vlan_tag);
35 qw2 = FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ARPINDEX, info->dst_arpindex) |
36 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_FLOWLABEL, info->flow_label) |
37 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_HOPLIMIT, info->hop_ttl) |
38 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_PDINDEXHI, info->pd_idx >> 16);
40 if (!info->ipv4_valid) {
41 set_64bit_val(wqe, 40,
42 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
43 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
44 set_64bit_val(wqe, 32,
45 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
46 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
48 set_64bit_val(wqe, 56,
49 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->src_ip_addr[0]) |
50 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->src_ip_addr[1]));
51 set_64bit_val(wqe, 48,
52 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->src_ip_addr[2]) |
53 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[3]));
55 set_64bit_val(wqe, 32,
56 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
58 set_64bit_val(wqe, 48,
59 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->src_ip_addr[0]));
62 set_64bit_val(wqe, 8, qw1);
63 set_64bit_val(wqe, 16, qw2);
65 dma_wmb(); /* need write block before writing WQE header */
69 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_WQEVALID, cqp->polarity) |
70 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_OPCODE, op) |
71 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_DOLOOPBACKK, info->do_lpbk) |
72 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_IPV4VALID, info->ipv4_valid) |
73 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_AVIDX, info->ah_idx) |
74 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_INSERTVLANTAG, info->insert_vlan_tag));
76 print_hex_dump_debug("WQE: MANAGE_AH WQE", DUMP_PREFIX_OFFSET, 16, 8,
77 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
78 irdma_sc_cqp_post_sq(cqp);
84 * irdma_create_mg_ctx() - create a mcg context
85 * @info: multicast group context info
87 static enum irdma_status_code
88 irdma_create_mg_ctx(struct irdma_mcast_grp_info *info)
90 struct irdma_mcast_grp_ctx_entry_info *entry_info = NULL;
91 u8 idx = 0; /* index in the array */
92 u8 ctx_idx = 0; /* index in the MG context */
94 memset(info->dma_mem_mc.va, 0, IRDMA_MAX_MGS_PER_CTX * sizeof(u64));
96 for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
97 entry_info = &info->mg_ctx_info[idx];
98 if (entry_info->valid_entry) {
99 set_64bit_val((__le64 *)info->dma_mem_mc.va,
100 ctx_idx * sizeof(u64),
101 FIELD_PREP(IRDMA_UDA_MGCTX_DESTPORT, entry_info->dest_port) |
102 FIELD_PREP(IRDMA_UDA_MGCTX_VALIDENT, entry_info->valid_entry) |
103 FIELD_PREP(IRDMA_UDA_MGCTX_QPID, entry_info->qp_id));
112 * irdma_access_mcast_grp() - Access mcast group based on op
114 * @info: multicast group context info
115 * @op: operation to perform
116 * @scratch: u64 saved to be used during cqp completion
118 enum irdma_status_code irdma_access_mcast_grp(struct irdma_sc_cqp *cqp,
119 struct irdma_mcast_grp_info *info,
123 enum irdma_status_code ret_code = 0;
125 if (info->mg_id >= IRDMA_UDA_MAX_FSI_MGS) {
126 ibdev_dbg(to_ibdev(cqp->dev), "WQE: mg_id out of range\n");
127 return IRDMA_ERR_PARAM;
130 wqe = irdma_sc_cqp_get_next_send_wqe(cqp, scratch);
132 ibdev_dbg(to_ibdev(cqp->dev), "WQE: ring full\n");
133 return IRDMA_ERR_RING_FULL;
136 ret_code = irdma_create_mg_ctx(info);
140 set_64bit_val(wqe, 32, info->dma_mem_mc.pa);
141 set_64bit_val(wqe, 16,
142 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANID, info->vlan_id) |
143 FIELD_PREP(IRDMA_UDA_CQPSQ_QS_HANDLE, info->qs_handle));
144 set_64bit_val(wqe, 0, ether_addr_to_u64(info->dest_mac_addr));
145 set_64bit_val(wqe, 8,
146 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_HMC_FCN_ID, info->hmc_fcn_id));
148 if (!info->ipv4_valid) {
149 set_64bit_val(wqe, 56,
150 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR0, info->dest_ip_addr[0]) |
151 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR1, info->dest_ip_addr[1]));
152 set_64bit_val(wqe, 48,
153 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR2, info->dest_ip_addr[2]) |
154 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[3]));
156 set_64bit_val(wqe, 48,
157 FIELD_PREP(IRDMA_UDA_CQPSQ_MAV_ADDR3, info->dest_ip_addr[0]));
160 dma_wmb(); /* need write memory block before writing the WQE header. */
162 set_64bit_val(wqe, 24,
163 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_WQEVALID, cqp->polarity) |
164 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_OPCODE, op) |
165 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_MGIDX, info->mg_id) |
166 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_VLANVALID, info->vlan_valid) |
167 FIELD_PREP(IRDMA_UDA_CQPSQ_MG_IPV4VALID, info->ipv4_valid));
169 print_hex_dump_debug("WQE: MANAGE_MCG WQE", DUMP_PREFIX_OFFSET, 16, 8,
170 wqe, IRDMA_CQP_WQE_SIZE * 8, false);
171 print_hex_dump_debug("WQE: MCG_HOST CTX WQE", DUMP_PREFIX_OFFSET, 16,
172 8, info->dma_mem_mc.va,
173 IRDMA_MAX_MGS_PER_CTX * 8, false);
174 irdma_sc_cqp_post_sq(cqp);
180 * irdma_compare_mgs - Compares two multicast group structures
181 * @entry1: Multcast group info
182 * @entry2: Multcast group info in context
184 static bool irdma_compare_mgs(struct irdma_mcast_grp_ctx_entry_info *entry1,
185 struct irdma_mcast_grp_ctx_entry_info *entry2)
187 if (entry1->dest_port == entry2->dest_port &&
188 entry1->qp_id == entry2->qp_id)
195 * irdma_sc_add_mcast_grp - Allocates mcast group entry in ctx
196 * @ctx: Multcast group context
197 * @mg: Multcast group info
199 enum irdma_status_code irdma_sc_add_mcast_grp(struct irdma_mcast_grp_info *ctx,
200 struct irdma_mcast_grp_ctx_entry_info *mg)
203 bool free_entry_found = false;
204 u32 free_entry_idx = 0;
206 /* find either an identical or a free entry for a multicast group */
207 for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
208 if (ctx->mg_ctx_info[idx].valid_entry) {
209 if (irdma_compare_mgs(&ctx->mg_ctx_info[idx], mg)) {
210 ctx->mg_ctx_info[idx].use_cnt++;
215 if (!free_entry_found) {
216 free_entry_found = true;
217 free_entry_idx = idx;
221 if (free_entry_found) {
222 ctx->mg_ctx_info[free_entry_idx] = *mg;
223 ctx->mg_ctx_info[free_entry_idx].valid_entry = true;
224 ctx->mg_ctx_info[free_entry_idx].use_cnt = 1;
229 return IRDMA_ERR_NO_MEMORY;
233 * irdma_sc_del_mcast_grp - Delete mcast group
234 * @ctx: Multcast group context
235 * @mg: Multcast group info
237 * Finds and removes a specific mulicast group from context, all
238 * parameters must match to remove a multicast group.
240 enum irdma_status_code irdma_sc_del_mcast_grp(struct irdma_mcast_grp_info *ctx,
241 struct irdma_mcast_grp_ctx_entry_info *mg)
245 /* find an entry in multicast group context */
246 for (idx = 0; idx < IRDMA_MAX_MGS_PER_CTX; idx++) {
247 if (!ctx->mg_ctx_info[idx].valid_entry)
250 if (irdma_compare_mgs(mg, &ctx->mg_ctx_info[idx])) {
251 ctx->mg_ctx_info[idx].use_cnt--;
253 if (!ctx->mg_ctx_info[idx].use_cnt) {
254 ctx->mg_ctx_info[idx].valid_entry = false;
256 /* Remove gap if element was not the last */
257 if (idx != ctx->no_of_mgs &&
258 ctx->no_of_mgs > 0) {
259 memcpy(&ctx->mg_ctx_info[idx],
260 &ctx->mg_ctx_info[ctx->no_of_mgs - 1],
261 sizeof(ctx->mg_ctx_info[idx]));
262 ctx->mg_ctx_info[ctx->no_of_mgs - 1].valid_entry = false;
270 return IRDMA_ERR_PARAM;