1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2004, 2011 Intel Corporation. All rights reserved.
4 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
5 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
6 * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
11 #include <rdma/ib_mad.h>
12 #include <rdma/ib_cm.h>
15 * Parameters to routines below should be in network-byte order, and values
16 * are returned in network-byte order.
19 #define IB_CM_CLASS_VERSION 2 /* IB specification 1.2 */
22 struct ib_mad_hdr hdr;
30 /* local QPN:24, responder resources:8 */
32 /* local EECN:24, initiator depth:8 */
35 * remote EECN:24, remote CM response timeout:5,
36 * transport service type:2, end-to-end flow control:1
39 /* starting PSN:24, local CM response timeout:5, retry count:3 */
42 /* path MTU:4, RDC exists:1, RNR retry count:3. */
44 /* max CM Retries:4, SRQ:1, extended transport type:3 */
47 __be16 primary_local_lid;
48 __be16 primary_remote_lid;
49 union ib_gid primary_local_gid;
50 union ib_gid primary_remote_gid;
51 /* flow label:20, rsvd:6, packet rate:6 */
52 __be32 primary_offset88;
53 u8 primary_traffic_class;
55 /* SL:4, subnet local:1, rsvd:3 */
57 /* local ACK timeout:5, rsvd:3 */
61 __be16 alt_remote_lid;
62 union ib_gid alt_local_gid;
63 union ib_gid alt_remote_gid;
64 /* flow label:20, rsvd:6, packet rate:6 */
68 /* SL:4, subnet local:1, rsvd:3 */
70 /* local ACK timeout:5, rsvd:3 */
73 u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
77 static inline __be32 cm_req_get_local_qpn(struct cm_req_msg *req_msg)
79 return cpu_to_be32(be32_to_cpu(req_msg->offset32) >> 8);
82 static inline void cm_req_set_local_qpn(struct cm_req_msg *req_msg, __be32 qpn)
84 req_msg->offset32 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
85 (be32_to_cpu(req_msg->offset32) &
89 static inline u8 cm_req_get_resp_res(struct cm_req_msg *req_msg)
91 return (u8) be32_to_cpu(req_msg->offset32);
94 static inline void cm_req_set_resp_res(struct cm_req_msg *req_msg, u8 resp_res)
96 req_msg->offset32 = cpu_to_be32(resp_res |
97 (be32_to_cpu(req_msg->offset32) &
101 static inline u8 cm_req_get_init_depth(struct cm_req_msg *req_msg)
103 return (u8) be32_to_cpu(req_msg->offset36);
106 static inline void cm_req_set_init_depth(struct cm_req_msg *req_msg,
109 req_msg->offset36 = cpu_to_be32(init_depth |
110 (be32_to_cpu(req_msg->offset36) &
114 static inline u8 cm_req_get_remote_resp_timeout(struct cm_req_msg *req_msg)
116 return (u8) ((be32_to_cpu(req_msg->offset40) & 0xF8) >> 3);
119 static inline void cm_req_set_remote_resp_timeout(struct cm_req_msg *req_msg,
122 req_msg->offset40 = cpu_to_be32((resp_timeout << 3) |
123 (be32_to_cpu(req_msg->offset40) &
127 static inline enum ib_qp_type cm_req_get_qp_type(struct cm_req_msg *req_msg)
129 u8 transport_type = (u8) (be32_to_cpu(req_msg->offset40) & 0x06) >> 1;
130 switch(transport_type) {
131 case 0: return IB_QPT_RC;
132 case 1: return IB_QPT_UC;
134 switch (req_msg->offset51 & 0x7) {
135 case 1: return IB_QPT_XRC_TGT;
142 static inline void cm_req_set_qp_type(struct cm_req_msg *req_msg,
143 enum ib_qp_type qp_type)
147 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
152 req_msg->offset40 = cpu_to_be32((be32_to_cpu(
155 req_msg->offset51 = (req_msg->offset51 & 0xF8) | 1;
158 req_msg->offset40 = cpu_to_be32(be32_to_cpu(
164 static inline u8 cm_req_get_flow_ctrl(struct cm_req_msg *req_msg)
166 return be32_to_cpu(req_msg->offset40) & 0x1;
169 static inline void cm_req_set_flow_ctrl(struct cm_req_msg *req_msg,
172 req_msg->offset40 = cpu_to_be32((flow_ctrl & 0x1) |
173 (be32_to_cpu(req_msg->offset40) &
177 static inline __be32 cm_req_get_starting_psn(struct cm_req_msg *req_msg)
179 return cpu_to_be32(be32_to_cpu(req_msg->offset44) >> 8);
182 static inline void cm_req_set_starting_psn(struct cm_req_msg *req_msg,
185 req_msg->offset44 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
186 (be32_to_cpu(req_msg->offset44) & 0x000000FF));
189 static inline u8 cm_req_get_local_resp_timeout(struct cm_req_msg *req_msg)
191 return (u8) ((be32_to_cpu(req_msg->offset44) & 0xF8) >> 3);
194 static inline void cm_req_set_local_resp_timeout(struct cm_req_msg *req_msg,
197 req_msg->offset44 = cpu_to_be32((resp_timeout << 3) |
198 (be32_to_cpu(req_msg->offset44) & 0xFFFFFF07));
201 static inline u8 cm_req_get_retry_count(struct cm_req_msg *req_msg)
203 return (u8) (be32_to_cpu(req_msg->offset44) & 0x7);
206 static inline void cm_req_set_retry_count(struct cm_req_msg *req_msg,
209 req_msg->offset44 = cpu_to_be32((retry_count & 0x7) |
210 (be32_to_cpu(req_msg->offset44) & 0xFFFFFFF8));
213 static inline u8 cm_req_get_path_mtu(struct cm_req_msg *req_msg)
215 return req_msg->offset50 >> 4;
218 static inline void cm_req_set_path_mtu(struct cm_req_msg *req_msg, u8 path_mtu)
220 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF) | (path_mtu << 4));
223 static inline u8 cm_req_get_rnr_retry_count(struct cm_req_msg *req_msg)
225 return req_msg->offset50 & 0x7;
228 static inline void cm_req_set_rnr_retry_count(struct cm_req_msg *req_msg,
231 req_msg->offset50 = (u8) ((req_msg->offset50 & 0xF8) |
232 (rnr_retry_count & 0x7));
235 static inline u8 cm_req_get_max_cm_retries(struct cm_req_msg *req_msg)
237 return req_msg->offset51 >> 4;
240 static inline void cm_req_set_max_cm_retries(struct cm_req_msg *req_msg,
243 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF) | (retries << 4));
246 static inline u8 cm_req_get_srq(struct cm_req_msg *req_msg)
248 return (req_msg->offset51 & 0x8) >> 3;
251 static inline void cm_req_set_srq(struct cm_req_msg *req_msg, u8 srq)
253 req_msg->offset51 = (u8) ((req_msg->offset51 & 0xF7) |
257 static inline __be32 cm_req_get_primary_flow_label(struct cm_req_msg *req_msg)
259 return cpu_to_be32(be32_to_cpu(req_msg->primary_offset88) >> 12);
262 static inline void cm_req_set_primary_flow_label(struct cm_req_msg *req_msg,
265 req_msg->primary_offset88 = cpu_to_be32(
266 (be32_to_cpu(req_msg->primary_offset88) &
268 (be32_to_cpu(flow_label) << 12));
271 static inline u8 cm_req_get_primary_packet_rate(struct cm_req_msg *req_msg)
273 return (u8) (be32_to_cpu(req_msg->primary_offset88) & 0x3F);
276 static inline void cm_req_set_primary_packet_rate(struct cm_req_msg *req_msg,
279 req_msg->primary_offset88 = cpu_to_be32(
280 (be32_to_cpu(req_msg->primary_offset88) &
281 0xFFFFFFC0) | (rate & 0x3F));
284 static inline u8 cm_req_get_primary_sl(struct cm_req_msg *req_msg)
286 return (u8) (req_msg->primary_offset94 >> 4);
289 static inline void cm_req_set_primary_sl(struct cm_req_msg *req_msg, u8 sl)
291 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0x0F) |
295 static inline u8 cm_req_get_primary_subnet_local(struct cm_req_msg *req_msg)
297 return (u8) ((req_msg->primary_offset94 & 0x08) >> 3);
300 static inline void cm_req_set_primary_subnet_local(struct cm_req_msg *req_msg,
303 req_msg->primary_offset94 = (u8) ((req_msg->primary_offset94 & 0xF7) |
304 ((subnet_local & 0x1) << 3));
307 static inline u8 cm_req_get_primary_local_ack_timeout(struct cm_req_msg *req_msg)
309 return (u8) (req_msg->primary_offset95 >> 3);
312 static inline void cm_req_set_primary_local_ack_timeout(struct cm_req_msg *req_msg,
313 u8 local_ack_timeout)
315 req_msg->primary_offset95 = (u8) ((req_msg->primary_offset95 & 0x07) |
316 (local_ack_timeout << 3));
319 static inline __be32 cm_req_get_alt_flow_label(struct cm_req_msg *req_msg)
321 return cpu_to_be32(be32_to_cpu(req_msg->alt_offset132) >> 12);
324 static inline void cm_req_set_alt_flow_label(struct cm_req_msg *req_msg,
327 req_msg->alt_offset132 = cpu_to_be32(
328 (be32_to_cpu(req_msg->alt_offset132) &
330 (be32_to_cpu(flow_label) << 12));
333 static inline u8 cm_req_get_alt_packet_rate(struct cm_req_msg *req_msg)
335 return (u8) (be32_to_cpu(req_msg->alt_offset132) & 0x3F);
338 static inline void cm_req_set_alt_packet_rate(struct cm_req_msg *req_msg,
341 req_msg->alt_offset132 = cpu_to_be32(
342 (be32_to_cpu(req_msg->alt_offset132) &
343 0xFFFFFFC0) | (rate & 0x3F));
346 static inline u8 cm_req_get_alt_sl(struct cm_req_msg *req_msg)
348 return (u8) (req_msg->alt_offset138 >> 4);
351 static inline void cm_req_set_alt_sl(struct cm_req_msg *req_msg, u8 sl)
353 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0x0F) |
357 static inline u8 cm_req_get_alt_subnet_local(struct cm_req_msg *req_msg)
359 return (u8) ((req_msg->alt_offset138 & 0x08) >> 3);
362 static inline void cm_req_set_alt_subnet_local(struct cm_req_msg *req_msg,
365 req_msg->alt_offset138 = (u8) ((req_msg->alt_offset138 & 0xF7) |
366 ((subnet_local & 0x1) << 3));
369 static inline u8 cm_req_get_alt_local_ack_timeout(struct cm_req_msg *req_msg)
371 return (u8) (req_msg->alt_offset139 >> 3);
374 static inline void cm_req_set_alt_local_ack_timeout(struct cm_req_msg *req_msg,
375 u8 local_ack_timeout)
377 req_msg->alt_offset139 = (u8) ((req_msg->alt_offset139 & 0x07) |
378 (local_ack_timeout << 3));
381 /* Message REJected or MRAed */
382 enum cm_msg_response {
383 CM_MSG_RESPONSE_REQ = 0x0,
384 CM_MSG_RESPONSE_REP = 0x1,
385 CM_MSG_RESPONSE_OTHER = 0x2
389 struct ib_mad_hdr hdr;
391 __be32 local_comm_id;
392 __be32 remote_comm_id;
393 /* message MRAed:2, rsvd:6 */
395 /* service timeout:5, rsvd:3 */
398 u8 private_data[IB_CM_MRA_PRIVATE_DATA_SIZE];
402 static inline u8 cm_mra_get_msg_mraed(struct cm_mra_msg *mra_msg)
404 return (u8) (mra_msg->offset8 >> 6);
407 static inline void cm_mra_set_msg_mraed(struct cm_mra_msg *mra_msg, u8 msg)
409 mra_msg->offset8 = (u8) ((mra_msg->offset8 & 0x3F) | (msg << 6));
412 static inline u8 cm_mra_get_service_timeout(struct cm_mra_msg *mra_msg)
414 return (u8) (mra_msg->offset9 >> 3);
417 static inline void cm_mra_set_service_timeout(struct cm_mra_msg *mra_msg,
420 mra_msg->offset9 = (u8) ((mra_msg->offset9 & 0x07) |
421 (service_timeout << 3));
425 struct ib_mad_hdr hdr;
427 __be32 local_comm_id;
428 __be32 remote_comm_id;
429 /* message REJected:2, rsvd:6 */
431 /* reject info length:7, rsvd:1. */
434 u8 ari[IB_CM_REJ_ARI_LENGTH];
436 u8 private_data[IB_CM_REJ_PRIVATE_DATA_SIZE];
440 static inline u8 cm_rej_get_msg_rejected(struct cm_rej_msg *rej_msg)
442 return (u8) (rej_msg->offset8 >> 6);
445 static inline void cm_rej_set_msg_rejected(struct cm_rej_msg *rej_msg, u8 msg)
447 rej_msg->offset8 = (u8) ((rej_msg->offset8 & 0x3F) | (msg << 6));
450 static inline u8 cm_rej_get_reject_info_len(struct cm_rej_msg *rej_msg)
452 return (u8) (rej_msg->offset9 >> 1);
455 static inline void cm_rej_set_reject_info_len(struct cm_rej_msg *rej_msg,
458 rej_msg->offset9 = (u8) ((rej_msg->offset9 & 0x1) | (len << 1));
462 struct ib_mad_hdr hdr;
464 __be32 local_comm_id;
465 __be32 remote_comm_id;
467 /* local QPN:24, rsvd:8 */
469 /* local EECN:24, rsvd:8 */
471 /* starting PSN:24 rsvd:8 */
475 /* target ACK delay:5, failover accepted:2, end-to-end flow control:1 */
477 /* RNR retry count:3, SRQ:1, rsvd:5 */
479 __be64 local_ca_guid;
481 u8 private_data[IB_CM_REP_PRIVATE_DATA_SIZE];
485 static inline __be32 cm_rep_get_local_qpn(struct cm_rep_msg *rep_msg)
487 return cpu_to_be32(be32_to_cpu(rep_msg->offset12) >> 8);
490 static inline void cm_rep_set_local_qpn(struct cm_rep_msg *rep_msg, __be32 qpn)
492 rep_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
493 (be32_to_cpu(rep_msg->offset12) & 0x000000FF));
496 static inline __be32 cm_rep_get_local_eecn(struct cm_rep_msg *rep_msg)
498 return cpu_to_be32(be32_to_cpu(rep_msg->offset16) >> 8);
501 static inline void cm_rep_set_local_eecn(struct cm_rep_msg *rep_msg, __be32 eecn)
503 rep_msg->offset16 = cpu_to_be32((be32_to_cpu(eecn) << 8) |
504 (be32_to_cpu(rep_msg->offset16) & 0x000000FF));
507 static inline __be32 cm_rep_get_qpn(struct cm_rep_msg *rep_msg, enum ib_qp_type qp_type)
509 return (qp_type == IB_QPT_XRC_INI) ?
510 cm_rep_get_local_eecn(rep_msg) : cm_rep_get_local_qpn(rep_msg);
513 static inline __be32 cm_rep_get_starting_psn(struct cm_rep_msg *rep_msg)
515 return cpu_to_be32(be32_to_cpu(rep_msg->offset20) >> 8);
518 static inline void cm_rep_set_starting_psn(struct cm_rep_msg *rep_msg,
521 rep_msg->offset20 = cpu_to_be32((be32_to_cpu(starting_psn) << 8) |
522 (be32_to_cpu(rep_msg->offset20) & 0x000000FF));
525 static inline u8 cm_rep_get_target_ack_delay(struct cm_rep_msg *rep_msg)
527 return (u8) (rep_msg->offset26 >> 3);
530 static inline void cm_rep_set_target_ack_delay(struct cm_rep_msg *rep_msg,
533 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0x07) |
534 (target_ack_delay << 3));
537 static inline u8 cm_rep_get_failover(struct cm_rep_msg *rep_msg)
539 return (u8) ((rep_msg->offset26 & 0x06) >> 1);
542 static inline void cm_rep_set_failover(struct cm_rep_msg *rep_msg, u8 failover)
544 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xF9) |
545 ((failover & 0x3) << 1));
548 static inline u8 cm_rep_get_flow_ctrl(struct cm_rep_msg *rep_msg)
550 return (u8) (rep_msg->offset26 & 0x01);
553 static inline void cm_rep_set_flow_ctrl(struct cm_rep_msg *rep_msg,
556 rep_msg->offset26 = (u8) ((rep_msg->offset26 & 0xFE) |
560 static inline u8 cm_rep_get_rnr_retry_count(struct cm_rep_msg *rep_msg)
562 return (u8) (rep_msg->offset27 >> 5);
565 static inline void cm_rep_set_rnr_retry_count(struct cm_rep_msg *rep_msg,
568 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0x1F) |
569 (rnr_retry_count << 5));
572 static inline u8 cm_rep_get_srq(struct cm_rep_msg *rep_msg)
574 return (u8) ((rep_msg->offset27 >> 4) & 0x1);
577 static inline void cm_rep_set_srq(struct cm_rep_msg *rep_msg, u8 srq)
579 rep_msg->offset27 = (u8) ((rep_msg->offset27 & 0xEF) |
584 struct ib_mad_hdr hdr;
586 __be32 local_comm_id;
587 __be32 remote_comm_id;
589 u8 private_data[IB_CM_RTU_PRIVATE_DATA_SIZE];
594 struct ib_mad_hdr hdr;
596 __be32 local_comm_id;
597 __be32 remote_comm_id;
598 /* remote QPN/EECN:24, rsvd:8 */
601 u8 private_data[IB_CM_DREQ_PRIVATE_DATA_SIZE];
605 static inline __be32 cm_dreq_get_remote_qpn(struct cm_dreq_msg *dreq_msg)
607 return cpu_to_be32(be32_to_cpu(dreq_msg->offset8) >> 8);
610 static inline void cm_dreq_set_remote_qpn(struct cm_dreq_msg *dreq_msg, __be32 qpn)
612 dreq_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
613 (be32_to_cpu(dreq_msg->offset8) & 0x000000FF));
617 struct ib_mad_hdr hdr;
619 __be32 local_comm_id;
620 __be32 remote_comm_id;
622 u8 private_data[IB_CM_DREP_PRIVATE_DATA_SIZE];
627 struct ib_mad_hdr hdr;
629 __be32 local_comm_id;
630 __be32 remote_comm_id;
633 /* remote QPN/EECN:24, remote CM response timeout:5, rsvd:3 */
637 __be16 alt_local_lid;
638 __be16 alt_remote_lid;
639 union ib_gid alt_local_gid;
640 union ib_gid alt_remote_gid;
641 /* flow label:20, rsvd:4, traffic class:8 */
644 /* rsvd:2, packet rate:6 */
646 /* SL:4, subnet local:1, rsvd:3 */
648 /* local ACK timeout:5, rsvd:3 */
651 u8 private_data[IB_CM_LAP_PRIVATE_DATA_SIZE];
654 static inline __be32 cm_lap_get_remote_qpn(struct cm_lap_msg *lap_msg)
656 return cpu_to_be32(be32_to_cpu(lap_msg->offset12) >> 8);
659 static inline void cm_lap_set_remote_qpn(struct cm_lap_msg *lap_msg, __be32 qpn)
661 lap_msg->offset12 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
662 (be32_to_cpu(lap_msg->offset12) &
666 static inline u8 cm_lap_get_remote_resp_timeout(struct cm_lap_msg *lap_msg)
668 return (u8) ((be32_to_cpu(lap_msg->offset12) & 0xF8) >> 3);
671 static inline void cm_lap_set_remote_resp_timeout(struct cm_lap_msg *lap_msg,
674 lap_msg->offset12 = cpu_to_be32((resp_timeout << 3) |
675 (be32_to_cpu(lap_msg->offset12) &
679 static inline __be32 cm_lap_get_flow_label(struct cm_lap_msg *lap_msg)
681 return cpu_to_be32(be32_to_cpu(lap_msg->offset56) >> 12);
684 static inline void cm_lap_set_flow_label(struct cm_lap_msg *lap_msg,
687 lap_msg->offset56 = cpu_to_be32(
688 (be32_to_cpu(lap_msg->offset56) & 0x00000FFF) |
689 (be32_to_cpu(flow_label) << 12));
692 static inline u8 cm_lap_get_traffic_class(struct cm_lap_msg *lap_msg)
694 return (u8) be32_to_cpu(lap_msg->offset56);
697 static inline void cm_lap_set_traffic_class(struct cm_lap_msg *lap_msg,
700 lap_msg->offset56 = cpu_to_be32(traffic_class |
701 (be32_to_cpu(lap_msg->offset56) &
705 static inline u8 cm_lap_get_packet_rate(struct cm_lap_msg *lap_msg)
707 return lap_msg->offset61 & 0x3F;
710 static inline void cm_lap_set_packet_rate(struct cm_lap_msg *lap_msg,
713 lap_msg->offset61 = (packet_rate & 0x3F) | (lap_msg->offset61 & 0xC0);
716 static inline u8 cm_lap_get_sl(struct cm_lap_msg *lap_msg)
718 return lap_msg->offset62 >> 4;
721 static inline void cm_lap_set_sl(struct cm_lap_msg *lap_msg, u8 sl)
723 lap_msg->offset62 = (sl << 4) | (lap_msg->offset62 & 0x0F);
726 static inline u8 cm_lap_get_subnet_local(struct cm_lap_msg *lap_msg)
728 return (lap_msg->offset62 >> 3) & 0x1;
731 static inline void cm_lap_set_subnet_local(struct cm_lap_msg *lap_msg,
734 lap_msg->offset62 = ((subnet_local & 0x1) << 3) |
735 (lap_msg->offset61 & 0xF7);
737 static inline u8 cm_lap_get_local_ack_timeout(struct cm_lap_msg *lap_msg)
739 return lap_msg->offset63 >> 3;
742 static inline void cm_lap_set_local_ack_timeout(struct cm_lap_msg *lap_msg,
743 u8 local_ack_timeout)
745 lap_msg->offset63 = (local_ack_timeout << 3) |
746 (lap_msg->offset63 & 0x07);
750 struct ib_mad_hdr hdr;
752 __be32 local_comm_id;
753 __be32 remote_comm_id;
758 u8 info[IB_CM_APR_INFO_LENGTH];
760 u8 private_data[IB_CM_APR_PRIVATE_DATA_SIZE];
763 struct cm_sidr_req_msg {
764 struct ib_mad_hdr hdr;
771 u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
774 struct cm_sidr_rep_msg {
775 struct ib_mad_hdr hdr;
785 u8 info[IB_CM_SIDR_REP_INFO_LENGTH];
787 u8 private_data[IB_CM_SIDR_REP_PRIVATE_DATA_SIZE];
790 static inline __be32 cm_sidr_rep_get_qpn(struct cm_sidr_rep_msg *sidr_rep_msg)
792 return cpu_to_be32(be32_to_cpu(sidr_rep_msg->offset8) >> 8);
795 static inline void cm_sidr_rep_set_qpn(struct cm_sidr_rep_msg *sidr_rep_msg,
798 sidr_rep_msg->offset8 = cpu_to_be32((be32_to_cpu(qpn) << 8) |
799 (be32_to_cpu(sidr_rep_msg->offset8) &
803 #endif /* CM_MSGS_H */