1 /* SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB */
2 /* Copyright (c) 2015 - 2021 Intel Corporation */
12 #define IRDMA_DEBUG_ERR "ERR"
13 #define IRDMA_DEBUG_INIT "INIT"
14 #define IRDMA_DEBUG_DEV "DEV"
15 #define IRDMA_DEBUG_CM "CM"
16 #define IRDMA_DEBUG_VERBS "VERBS"
17 #define IRDMA_DEBUG_PUDA "PUDA"
18 #define IRDMA_DEBUG_ILQ "ILQ"
19 #define IRDMA_DEBUG_IEQ "IEQ"
20 #define IRDMA_DEBUG_QP "QP"
21 #define IRDMA_DEBUG_CQ "CQ"
22 #define IRDMA_DEBUG_MR "MR"
23 #define IRDMA_DEBUG_PBLE "PBLE"
24 #define IRDMA_DEBUG_WQE "WQE"
25 #define IRDMA_DEBUG_AEQ "AEQ"
26 #define IRDMA_DEBUG_CQP "CQP"
27 #define IRDMA_DEBUG_HMC "HMC"
28 #define IRDMA_DEBUG_USER "USER"
29 #define IRDMA_DEBUG_VIRT "VIRT"
30 #define IRDMA_DEBUG_DCB "DCB"
31 #define IRDMA_DEBUG_CQE "CQE"
32 #define IRDMA_DEBUG_CLNT "CLNT"
33 #define IRDMA_DEBUG_WS "WS"
34 #define IRDMA_DEBUG_STATS "STATS"
36 enum irdma_page_size {
37 IRDMA_PAGE_SIZE_4K = 0,
42 enum irdma_hdrct_flags {
48 enum irdma_term_layers {
54 enum irdma_term_error_types {
55 RDMAP_REMOTE_PROT = 1,
63 enum irdma_term_rdma_errors {
64 RDMAP_INV_STAG = 0x00,
65 RDMAP_INV_BOUNDS = 0x01,
67 RDMAP_UNASSOC_STAG = 0x03,
69 RDMAP_INV_RDMAP_VER = 0x05,
70 RDMAP_UNEXPECTED_OP = 0x06,
71 RDMAP_CATASTROPHIC_LOCAL = 0x07,
72 RDMAP_CATASTROPHIC_GLOBAL = 0x08,
73 RDMAP_CANT_INV_STAG = 0x09,
74 RDMAP_UNSPECIFIED = 0xff,
77 enum irdma_term_ddp_errors {
78 DDP_CATASTROPHIC_LOCAL = 0x00,
79 DDP_TAGGED_INV_STAG = 0x00,
80 DDP_TAGGED_BOUNDS = 0x01,
81 DDP_TAGGED_UNASSOC_STAG = 0x02,
82 DDP_TAGGED_TO_WRAP = 0x03,
83 DDP_TAGGED_INV_DDP_VER = 0x04,
84 DDP_UNTAGGED_INV_QN = 0x01,
85 DDP_UNTAGGED_INV_MSN_NO_BUF = 0x02,
86 DDP_UNTAGGED_INV_MSN_RANGE = 0x03,
87 DDP_UNTAGGED_INV_MO = 0x04,
88 DDP_UNTAGGED_INV_TOO_LONG = 0x05,
89 DDP_UNTAGGED_INV_DDP_VER = 0x06,
92 enum irdma_term_mpa_errors {
99 enum irdma_qp_event_type {
100 IRDMA_QP_EVENT_CATASTROPHIC,
101 IRDMA_QP_EVENT_ACCESS_ERR,
104 enum irdma_hw_stats_index_32b {
105 IRDMA_HW_STAT_INDEX_IP4RXDISCARD = 0,
106 IRDMA_HW_STAT_INDEX_IP4RXTRUNC = 1,
107 IRDMA_HW_STAT_INDEX_IP4TXNOROUTE = 2,
108 IRDMA_HW_STAT_INDEX_IP6RXDISCARD = 3,
109 IRDMA_HW_STAT_INDEX_IP6RXTRUNC = 4,
110 IRDMA_HW_STAT_INDEX_IP6TXNOROUTE = 5,
111 IRDMA_HW_STAT_INDEX_TCPRTXSEG = 6,
112 IRDMA_HW_STAT_INDEX_TCPRXOPTERR = 7,
113 IRDMA_HW_STAT_INDEX_TCPRXPROTOERR = 8,
114 IRDMA_HW_STAT_INDEX_MAX_32_GEN_1 = 9, /* Must be same value as next entry */
115 IRDMA_HW_STAT_INDEX_RXVLANERR = 9,
116 IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED = 10,
117 IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED = 11,
118 IRDMA_HW_STAT_INDEX_TXNPCNPSENT = 12,
119 IRDMA_HW_STAT_INDEX_MAX_32, /* Must be last entry */
122 enum irdma_hw_stats_index_64b {
123 IRDMA_HW_STAT_INDEX_IP4RXOCTS = 0,
124 IRDMA_HW_STAT_INDEX_IP4RXPKTS = 1,
125 IRDMA_HW_STAT_INDEX_IP4RXFRAGS = 2,
126 IRDMA_HW_STAT_INDEX_IP4RXMCPKTS = 3,
127 IRDMA_HW_STAT_INDEX_IP4TXOCTS = 4,
128 IRDMA_HW_STAT_INDEX_IP4TXPKTS = 5,
129 IRDMA_HW_STAT_INDEX_IP4TXFRAGS = 6,
130 IRDMA_HW_STAT_INDEX_IP4TXMCPKTS = 7,
131 IRDMA_HW_STAT_INDEX_IP6RXOCTS = 8,
132 IRDMA_HW_STAT_INDEX_IP6RXPKTS = 9,
133 IRDMA_HW_STAT_INDEX_IP6RXFRAGS = 10,
134 IRDMA_HW_STAT_INDEX_IP6RXMCPKTS = 11,
135 IRDMA_HW_STAT_INDEX_IP6TXOCTS = 12,
136 IRDMA_HW_STAT_INDEX_IP6TXPKTS = 13,
137 IRDMA_HW_STAT_INDEX_IP6TXFRAGS = 14,
138 IRDMA_HW_STAT_INDEX_IP6TXMCPKTS = 15,
139 IRDMA_HW_STAT_INDEX_TCPRXSEGS = 16,
140 IRDMA_HW_STAT_INDEX_TCPTXSEG = 17,
141 IRDMA_HW_STAT_INDEX_RDMARXRDS = 18,
142 IRDMA_HW_STAT_INDEX_RDMARXSNDS = 19,
143 IRDMA_HW_STAT_INDEX_RDMARXWRS = 20,
144 IRDMA_HW_STAT_INDEX_RDMATXRDS = 21,
145 IRDMA_HW_STAT_INDEX_RDMATXSNDS = 22,
146 IRDMA_HW_STAT_INDEX_RDMATXWRS = 23,
147 IRDMA_HW_STAT_INDEX_RDMAVBND = 24,
148 IRDMA_HW_STAT_INDEX_RDMAVINV = 25,
149 IRDMA_HW_STAT_INDEX_MAX_64_GEN_1 = 26, /* Must be same value as next entry */
150 IRDMA_HW_STAT_INDEX_IP4RXMCOCTS = 26,
151 IRDMA_HW_STAT_INDEX_IP4TXMCOCTS = 27,
152 IRDMA_HW_STAT_INDEX_IP6RXMCOCTS = 28,
153 IRDMA_HW_STAT_INDEX_IP6TXMCOCTS = 29,
154 IRDMA_HW_STAT_INDEX_UDPRXPKTS = 30,
155 IRDMA_HW_STAT_INDEX_UDPTXPKTS = 31,
156 IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS = 32,
157 IRDMA_HW_STAT_INDEX_MAX_64, /* Must be last entry */
160 enum irdma_feature_type {
161 IRDMA_FEATURE_FW_INFO = 0,
162 IRDMA_HW_VERSION_INFO = 1,
163 IRDMA_QSETS_MAX = 26,
164 IRDMA_MAX_FEATURES, /* Must be last entry */
167 enum irdma_sched_prio_type {
168 IRDMA_PRIO_WEIGHTED_RR = 1,
169 IRDMA_PRIO_STRICT = 2,
170 IRDMA_PRIO_WEIGHTED_STRICT = 3,
173 enum irdma_vm_vf_type {
179 enum irdma_cqp_hmc_profile {
180 IRDMA_HMC_PROFILE_DEFAULT = 1,
181 IRDMA_HMC_PROFILE_FAVOR_VF = 2,
182 IRDMA_HMC_PROFILE_EQUAL = 3,
185 enum irdma_quad_entry_type {
186 IRDMA_QHASH_TYPE_TCP_ESTABLISHED = 1,
187 IRDMA_QHASH_TYPE_TCP_SYN,
188 IRDMA_QHASH_TYPE_UDP_UNICAST,
189 IRDMA_QHASH_TYPE_UDP_MCAST,
190 IRDMA_QHASH_TYPE_ROCE_MCAST,
191 IRDMA_QHASH_TYPE_ROCEV2_HW,
194 enum irdma_quad_hash_manage_type {
195 IRDMA_QHASH_MANAGE_TYPE_DELETE = 0,
196 IRDMA_QHASH_MANAGE_TYPE_ADD,
197 IRDMA_QHASH_MANAGE_TYPE_MODIFY,
200 enum irdma_syn_rst_handling {
201 IRDMA_SYN_RST_HANDLING_HW_TCP_SECURE = 0,
202 IRDMA_SYN_RST_HANDLING_HW_TCP,
203 IRDMA_SYN_RST_HANDLING_FW_TCP_SECURE,
204 IRDMA_SYN_RST_HANDLING_FW_TCP,
207 enum irdma_queue_type {
208 IRDMA_QUEUE_TYPE_SQ_RQ = 0,
209 IRDMA_QUEUE_TYPE_CQP,
213 struct irdma_vsi_pestat;
215 struct irdma_dcqcn_cc_params {
227 struct irdma_cqp_init_info {
231 struct irdma_sc_dev *dev;
232 struct irdma_cqp_quanta *sq;
233 struct irdma_dcqcn_cc_params dcqcn_params;
243 bool en_datacenter_tcp:1;
244 bool disable_packed:1;
245 bool rocev2_rto_policy:1;
246 enum irdma_protocol_used protocol_used;
249 struct irdma_terminate_hdr {
256 struct irdma_cqp_sq_wqe {
257 __le64 buf[IRDMA_CQP_WQE_SIZE];
260 struct irdma_sc_aeqe {
261 __le64 buf[IRDMA_AEQE_SIZE];
265 __le64 buf[IRDMA_CEQE_SIZE];
268 struct irdma_cqp_ctx {
269 __le64 buf[IRDMA_CQP_CTX_SIZE];
272 struct irdma_cq_shadow_area {
273 __le64 buf[IRDMA_SHADOW_AREA_SIZE];
276 struct irdma_dev_hw_stats_offsets {
277 u32 stats_offset_32[IRDMA_HW_STAT_INDEX_MAX_32];
278 u32 stats_offset_64[IRDMA_HW_STAT_INDEX_MAX_64];
281 struct irdma_dev_hw_stats {
282 u64 stats_val_32[IRDMA_HW_STAT_INDEX_MAX_32];
283 u64 stats_val_64[IRDMA_HW_STAT_INDEX_MAX_64];
286 struct irdma_gather_stats {
338 struct irdma_stats_gather_info {
339 bool use_hmc_fcn_index:1;
340 bool use_stats_inst:1;
343 struct irdma_dma_mem stats_buff_mem;
344 void *gather_stats_va;
345 void *last_gather_stats_va;
348 struct irdma_vsi_pestat {
350 struct irdma_dev_hw_stats hw_stats;
351 struct irdma_stats_gather_info gather_info;
352 struct timer_list stats_timer;
353 struct irdma_sc_vsi *vsi;
354 struct irdma_dev_hw_stats last_hw_stats;
355 spinlock_t lock; /* rdma stats lock */
360 u8 __iomem *priv_hw_addr;
361 struct device *device;
362 struct irdma_hmc_info hmc;
366 struct list_head rxlist;
383 struct irdma_sc_ah *ah;
384 struct irdma_puda_buf *ah_buf;
385 spinlock_t lock; /* fpdu processing lock */
386 struct irdma_puda_buf *lastrcv_buf;
390 struct irdma_sc_dev *dev;
395 struct irdma_cqp_quanta {
396 __le64 elem[IRDMA_CQP_WQE_SIZE];
399 struct irdma_sc_cqp {
404 struct irdma_sc_dev *dev;
405 enum irdma_status_code (*process_cqp_sds)(struct irdma_sc_dev *dev,
406 struct irdma_update_sds_info *info);
407 struct irdma_dma_mem sdbuf;
408 struct irdma_ring sq_ring;
409 struct irdma_cqp_quanta *sq_base;
410 struct irdma_dcqcn_cc_params dcqcn_params;
424 bool en_datacenter_tcp:1;
425 bool disable_packed:1;
426 bool rocev2_rto_policy:1;
427 enum irdma_protocol_used protocol_used;
430 struct irdma_sc_aeq {
433 struct irdma_sc_dev *dev;
434 struct irdma_sc_aeqe *aeqe_base;
437 struct irdma_ring aeq_ring;
439 u32 first_pm_pbl_idx;
445 struct irdma_sc_ceq {
448 struct irdma_sc_dev *dev;
449 struct irdma_ceqe *ceqe_base;
453 struct irdma_ring ceq_ring;
456 u32 first_pm_pbl_idx;
458 struct irdma_sc_vsi *vsi;
459 struct irdma_sc_cq **reg_cq;
461 spinlock_t req_cq_lock; /* protect access to reg_cq array */
464 bool itr_no_expire:1;
468 struct irdma_cq_uk cq_uk;
471 struct irdma_sc_dev *dev;
472 struct irdma_sc_vsi *vsi;
476 u32 shadow_read_threshold;
480 u32 first_pm_pbl_idx;
483 bool check_overflow:1;
489 struct irdma_qp_uk qp_uk;
495 struct irdma_sc_dev *dev;
496 struct irdma_sc_vsi *vsi;
497 struct irdma_sc_pd *pd;
499 void *llp_stream_handle;
500 struct irdma_pfpdu pfpdu;
515 bool ieq_pass_thru:1;
523 bool sq_flush_code:1;
524 bool rq_flush_code:1;
525 enum irdma_flush_opcode flush_code;
526 enum irdma_qp_event_type event_type;
529 struct list_head list;
532 struct irdma_stats_inst_info {
533 bool use_hmc_fcn_index;
538 struct irdma_up_info {
543 bool use_cnp_up_override:1;
546 #define IRDMA_MAX_WS_NODES 0x3FF
547 #define IRDMA_WS_NODE_INVALID 0xFFFF
549 struct irdma_ws_node_info {
561 struct irdma_hmc_fpm_misc {
569 u32 ooiscf_block_size;
572 #define IRDMA_LEAF_DEFAULT_REL_BW 64
573 #define IRDMA_PARENT_DEFAULT_REL_BW 1
576 struct list_head qplist;
577 struct mutex qos_mutex; /* protect QoS attributes per QoS level */
579 u32 l2_sched_node_id;
587 #define IRDMA_INVALID_FCN_ID 0xff
588 struct irdma_sc_vsi {
590 struct irdma_sc_dev *dev;
593 struct irdma_virt_mem ilq_mem;
594 struct irdma_puda_rsrc *ilq;
596 struct irdma_virt_mem ieq_mem;
597 struct irdma_puda_rsrc *ieq;
602 enum irdma_vm_vf_type vm_vf_type;
603 bool stats_fcn_id_alloc:1;
604 bool tc_change_pending:1;
605 struct irdma_qos qos[IRDMA_MAX_USER_PRIORITY];
606 struct irdma_vsi_pestat *pestat;
607 atomic_t qp_suspend_reqs;
608 enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
609 struct irdma_ws_node *tc_node);
610 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
611 struct irdma_ws_node *tc_node);
616 struct irdma_sc_dev {
617 struct list_head cqp_cmd_head; /* head of the CQP command list */
618 spinlock_t cqp_lock; /* protect CQP list access */
619 bool fcn_id_array[IRDMA_MAX_STATS_COUNT];
620 struct irdma_dma_mem vf_fpm_query_buf[IRDMA_MAX_PE_ENA_VF_COUNT];
621 u64 fpm_query_buf_pa;
622 u64 fpm_commit_buf_pa;
623 __le64 *fpm_query_buf;
624 __le64 *fpm_commit_buf;
627 u32 __iomem *wqe_alloc_db;
628 u32 __iomem *cq_arm_db;
629 u32 __iomem *aeq_alloc_db;
631 u32 __iomem *cq_ack_db;
632 u32 __iomem *ceq_itr_mask_db;
633 u32 __iomem *aeq_itr_mask_db;
634 u32 __iomem *hw_regs[IRDMA_MAX_REGS];
635 u32 ceq_itr; /* Interrupt throttle, usecs between interrupts: 0 disabled. 2 - 8160 */
636 u64 hw_masks[IRDMA_MAX_MASKS];
637 u64 hw_shifts[IRDMA_MAX_SHIFTS];
638 u64 hw_stats_regs_32[IRDMA_HW_STAT_INDEX_MAX_32];
639 u64 hw_stats_regs_64[IRDMA_HW_STAT_INDEX_MAX_64];
640 u64 feature_info[IRDMA_MAX_FEATURES];
641 u64 cqp_cmd_stats[IRDMA_MAX_CQP_OPS];
642 struct irdma_hw_attrs hw_attrs;
643 struct irdma_hmc_info *hmc_info;
644 struct irdma_sc_cqp *cqp;
645 struct irdma_sc_aeq *aeq;
646 struct irdma_sc_ceq *ceq[IRDMA_CEQ_MAX_COUNT];
647 struct irdma_sc_cq *ccq;
648 const struct irdma_irq_ops *irq_ops;
649 struct irdma_hmc_fpm_misc hmc_fpm_misc;
650 struct irdma_ws_node *ws_tree_root;
651 struct mutex ws_mutex; /* ws tree mutex */
658 enum irdma_status_code (*ws_add)(struct irdma_sc_vsi *vsi, u8 user_pri);
659 void (*ws_remove)(struct irdma_sc_vsi *vsi, u8 user_pri);
660 void (*ws_reset)(struct irdma_sc_vsi *vsi);
663 struct irdma_modify_cq_info {
665 struct irdma_cqe *cq_base;
667 u32 shadow_read_threshold;
669 u32 first_pm_pbl_idx;
675 struct irdma_create_qp_info {
677 bool tcp_ctx_valid:1;
679 bool arp_cache_idx_valid:1;
685 struct irdma_modify_qp_info {
693 bool tcp_ctx_valid:1;
694 bool udp_ctx_valid:1;
696 bool arp_cache_idx_valid:1;
697 bool reset_tcp_conn:1;
698 bool remove_hash_idx:1;
699 bool dont_send_term:1;
700 bool dont_send_fin:1;
701 bool cached_var_valid:1;
707 struct irdma_ccq_cqe_info {
708 struct irdma_sc_cqp *cqp;
717 struct irdma_dcb_app_info {
723 struct irdma_qos_tc_info {
731 struct irdma_l2params {
732 struct irdma_qos_tc_info tc_info[IRDMA_MAX_USER_PRIORITY];
733 struct irdma_dcb_app_info apps[IRDMA_MAX_APPS];
735 u16 qs_handle_list[IRDMA_MAX_USER_PRIORITY];
737 u8 up2tc[IRDMA_MAX_USER_PRIORITY];
745 struct irdma_vsi_init_info {
746 struct irdma_sc_dev *dev;
748 struct irdma_l2params *params;
751 enum irdma_vm_vf_type vm_vf_type;
753 enum irdma_status_code (*register_qset)(struct irdma_sc_vsi *vsi,
754 struct irdma_ws_node *tc_node);
755 void (*unregister_qset)(struct irdma_sc_vsi *vsi,
756 struct irdma_ws_node *tc_node);
759 struct irdma_vsi_stats_info {
760 struct irdma_vsi_pestat *pestat;
765 struct irdma_device_init_info {
766 u64 fpm_query_buf_pa;
767 u64 fpm_commit_buf_pa;
768 __le64 *fpm_query_buf;
769 __le64 *fpm_commit_buf;
775 struct irdma_ceq_init_info {
777 struct irdma_sc_dev *dev;
784 bool itr_no_expire:1;
787 u32 first_pm_pbl_idx;
788 struct irdma_sc_vsi *vsi;
789 struct irdma_sc_cq **reg_cq;
793 struct irdma_aeq_init_info {
795 struct irdma_sc_dev *dev;
801 u32 first_pm_pbl_idx;
805 struct irdma_ccq_init_info {
808 struct irdma_sc_dev *dev;
809 struct irdma_cqe *cq_base;
814 u32 shadow_read_threshold;
817 bool avoid_mem_cflct:1;
822 u32 first_pm_pbl_idx;
823 struct irdma_sc_vsi *vsi;
826 struct irdma_udp_offload_info {
828 bool insert_vlan_tag:1;
850 struct irdma_roce_offload_info {
874 bool use_stats_inst:1;
878 u8 mac_addr[ETH_ALEN];
882 struct irdma_iwarp_offload_info {
894 bool rcv_no_mpa_crc:1;
895 bool err_rq_idx_valid:1;
904 bool use_stats_inst:1;
910 u8 mac_addr[ETH_ALEN];
914 struct irdma_tcp_offload_info {
917 bool insert_vlan_tag:1;
920 bool avoid_stretch_ack:1;
922 bool ignore_tcp_opt:1;
923 bool ignore_tcp_uns_opt:1;
937 u16 syn_rst_handling;
944 u32 time_stamp_recent;
963 struct irdma_qp_host_ctx_info {
966 struct irdma_tcp_offload_info *tcp_info;
967 struct irdma_udp_offload_info *udp_info;
970 struct irdma_iwarp_offload_info *iwarp_info;
971 struct irdma_roce_offload_info *roce_info;
975 u32 rem_endpoint_idx;
978 bool tcp_info_valid:1;
979 bool iwarp_info_valid:1;
980 bool stats_idx_valid:1;
984 struct irdma_aeqe_info {
997 bool aeqe_overflow:1;
1002 struct irdma_allocate_stag_info {
1004 u64 first_pm_pbl_idx;
1010 bool remote_access:1;
1011 bool use_hmc_fcn_index:1;
1016 struct irdma_mw_alloc_info {
1020 bool remote_access:1;
1022 bool mw1_bind_dont_vldt_key:1;
1025 struct irdma_reg_ns_stag_info {
1031 u32 first_pm_pbl_index;
1032 enum irdma_addressing_type addr_type;
1033 irdma_stag_index stag_idx;
1036 irdma_stag_key stag_key;
1037 bool use_hmc_fcn_index:1;
1042 struct irdma_fast_reg_stag_info {
1050 u32 first_pm_pbl_index;
1051 enum irdma_addressing_type addr_type;
1052 irdma_stag_index stag_idx;
1055 irdma_stag_key stag_key;
1060 bool use_hmc_fcn_index:1;
1066 struct irdma_dealloc_stag_info {
1073 struct irdma_register_shared_stag {
1075 enum irdma_addressing_type addr_type;
1076 irdma_stag_index new_stag_idx;
1077 irdma_stag_index parent_stag_idx;
1081 irdma_stag_key new_stag_key;
1084 struct irdma_qp_init_info {
1085 struct irdma_qp_uk_init_info qp_uk_init_info;
1086 struct irdma_sc_pd *pd;
1087 struct irdma_sc_vsi *vsi;
1104 struct irdma_cq_init_info {
1105 struct irdma_sc_dev *dev;
1109 u32 shadow_read_threshold;
1111 u32 first_pm_pbl_idx;
1114 bool ceq_id_valid:1;
1118 struct irdma_cq_uk_init_info cq_uk_init_info;
1119 struct irdma_sc_vsi *vsi;
1122 struct irdma_upload_context_info {
1130 struct irdma_local_mac_entry_info {
1135 struct irdma_add_arp_cache_entry_info {
1136 u8 mac_addr[ETH_ALEN];
1142 struct irdma_apbvt_info {
1147 struct irdma_qhash_table_info {
1148 struct irdma_sc_vsi *vsi;
1149 enum irdma_quad_hash_manage_type manage;
1150 enum irdma_quad_entry_type entry_type;
1153 u8 mac_addr[ETH_ALEN];
1163 struct irdma_cqp_manage_push_page_info {
1170 struct irdma_qp_flush_info {
1179 bool userflushcode:1;
1183 struct irdma_gen_ae_info {
1188 struct irdma_cqp_timeout {
1193 struct irdma_irq_ops {
1194 void (*irdma_cfg_aeq)(struct irdma_sc_dev *dev, u32 idx, bool enable);
1195 void (*irdma_cfg_ceq)(struct irdma_sc_dev *dev, u32 ceq_id, u32 idx,
1197 void (*irdma_dis_irq)(struct irdma_sc_dev *dev, u32 idx);
1198 void (*irdma_en_irq)(struct irdma_sc_dev *dev, u32 idx);
1201 void irdma_sc_ccq_arm(struct irdma_sc_cq *ccq);
1202 enum irdma_status_code irdma_sc_ccq_create(struct irdma_sc_cq *ccq, u64 scratch,
1203 bool check_overflow, bool post_sq);
1204 enum irdma_status_code irdma_sc_ccq_destroy(struct irdma_sc_cq *ccq, u64 scratch,
1206 enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq,
1207 struct irdma_ccq_cqe_info *info);
1208 enum irdma_status_code irdma_sc_ccq_init(struct irdma_sc_cq *ccq,
1209 struct irdma_ccq_init_info *info);
1211 enum irdma_status_code irdma_sc_cceq_create(struct irdma_sc_ceq *ceq, u64 scratch);
1212 enum irdma_status_code irdma_sc_cceq_destroy_done(struct irdma_sc_ceq *ceq);
1214 enum irdma_status_code irdma_sc_ceq_destroy(struct irdma_sc_ceq *ceq, u64 scratch,
1216 enum irdma_status_code irdma_sc_ceq_init(struct irdma_sc_ceq *ceq,
1217 struct irdma_ceq_init_info *info);
1218 void irdma_sc_cleanup_ceqes(struct irdma_sc_cq *cq, struct irdma_sc_ceq *ceq);
1219 void *irdma_sc_process_ceq(struct irdma_sc_dev *dev, struct irdma_sc_ceq *ceq);
1221 enum irdma_status_code irdma_sc_aeq_init(struct irdma_sc_aeq *aeq,
1222 struct irdma_aeq_init_info *info);
1223 enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq,
1224 struct irdma_aeqe_info *info);
1225 void irdma_sc_repost_aeq_entries(struct irdma_sc_dev *dev, u32 count);
1227 void irdma_sc_pd_init(struct irdma_sc_dev *dev, struct irdma_sc_pd *pd, u32 pd_id,
1229 void irdma_cfg_aeq(struct irdma_sc_dev *dev, u32 idx, bool enable);
1230 void irdma_check_cqp_progress(struct irdma_cqp_timeout *cqp_timeout,
1231 struct irdma_sc_dev *dev);
1232 enum irdma_status_code irdma_sc_cqp_create(struct irdma_sc_cqp *cqp, u16 *maj_err,
1234 enum irdma_status_code irdma_sc_cqp_destroy(struct irdma_sc_cqp *cqp);
1235 enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp,
1236 struct irdma_cqp_init_info *info);
1237 void irdma_sc_cqp_post_sq(struct irdma_sc_cqp *cqp);
1238 enum irdma_status_code irdma_sc_poll_for_cqp_op_done(struct irdma_sc_cqp *cqp, u8 opcode,
1239 struct irdma_ccq_cqe_info *cmpl_info);
1240 enum irdma_status_code irdma_sc_fast_register(struct irdma_sc_qp *qp,
1241 struct irdma_fast_reg_stag_info *info,
1243 enum irdma_status_code irdma_sc_qp_create(struct irdma_sc_qp *qp,
1244 struct irdma_create_qp_info *info,
1245 u64 scratch, bool post_sq);
1246 enum irdma_status_code irdma_sc_qp_destroy(struct irdma_sc_qp *qp,
1247 u64 scratch, bool remove_hash_idx,
1248 bool ignore_mw_bnd, bool post_sq);
1249 enum irdma_status_code irdma_sc_qp_flush_wqes(struct irdma_sc_qp *qp,
1250 struct irdma_qp_flush_info *info,
1251 u64 scratch, bool post_sq);
1252 enum irdma_status_code irdma_sc_qp_init(struct irdma_sc_qp *qp,
1253 struct irdma_qp_init_info *info);
1254 enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp,
1255 struct irdma_modify_qp_info *info,
1256 u64 scratch, bool post_sq);
1257 void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size,
1259 void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size);
1260 void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read);
1261 void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1262 struct irdma_qp_host_ctx_info *info);
1263 void irdma_sc_qp_setctx_roce(struct irdma_sc_qp *qp, __le64 *qp_ctx,
1264 struct irdma_qp_host_ctx_info *info);
1265 enum irdma_status_code irdma_sc_cq_destroy(struct irdma_sc_cq *cq, u64 scratch,
1267 enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq,
1268 struct irdma_cq_init_info *info);
1269 void irdma_sc_cq_resize(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info);
1270 enum irdma_status_code irdma_sc_static_hmc_pages_allocated(struct irdma_sc_cqp *cqp,
1271 u64 scratch, u8 hmc_fn_id,
1272 bool post_sq, bool poll_registers);
1274 void sc_vsi_update_stats(struct irdma_sc_vsi *vsi);
1278 struct irdma_sc_qp *qp;
1279 struct irdma_create_qp_info info;
1284 struct irdma_sc_qp *qp;
1285 struct irdma_modify_qp_info info;
1290 struct irdma_sc_qp *qp;
1292 bool remove_hash_idx;
1297 struct irdma_sc_cq *cq;
1299 bool check_overflow;
1303 struct irdma_sc_cq *cq;
1304 struct irdma_modify_cq_info info;
1309 struct irdma_sc_cq *cq;
1314 struct irdma_sc_dev *dev;
1315 struct irdma_allocate_stag_info info;
1320 struct irdma_sc_dev *dev;
1321 struct irdma_mw_alloc_info info;
1326 struct irdma_sc_dev *dev;
1327 struct irdma_reg_ns_stag_info info;
1329 } mr_reg_non_shared;
1332 struct irdma_sc_dev *dev;
1333 struct irdma_dealloc_stag_info info;
1338 struct irdma_sc_cqp *cqp;
1339 struct irdma_add_arp_cache_entry_info info;
1341 } add_arp_cache_entry;
1344 struct irdma_sc_cqp *cqp;
1347 } del_arp_cache_entry;
1350 struct irdma_sc_cqp *cqp;
1351 struct irdma_local_mac_entry_info info;
1353 } add_local_mac_entry;
1356 struct irdma_sc_cqp *cqp;
1359 u8 ignore_ref_count;
1360 } del_local_mac_entry;
1363 struct irdma_sc_cqp *cqp;
1365 } alloc_local_mac_entry;
1368 struct irdma_sc_cqp *cqp;
1369 struct irdma_cqp_manage_push_page_info info;
1374 struct irdma_sc_dev *dev;
1375 struct irdma_upload_context_info info;
1377 } qp_upload_context;
1380 struct irdma_sc_dev *dev;
1381 struct irdma_hmc_fcn_info info;
1386 struct irdma_sc_ceq *ceq;
1391 struct irdma_sc_ceq *ceq;
1396 struct irdma_sc_aeq *aeq;
1401 struct irdma_sc_aeq *aeq;
1406 struct irdma_sc_qp *qp;
1407 struct irdma_qp_flush_info info;
1412 struct irdma_sc_qp *qp;
1413 struct irdma_gen_ae_info info;
1418 struct irdma_sc_cqp *cqp;
1426 struct irdma_sc_cqp *cqp;
1434 struct irdma_sc_cqp *cqp;
1435 struct irdma_apbvt_info info;
1437 } manage_apbvt_entry;
1440 struct irdma_sc_cqp *cqp;
1441 struct irdma_qhash_table_info info;
1443 } manage_qhash_table_entry;
1446 struct irdma_sc_dev *dev;
1447 struct irdma_update_sds_info info;
1452 struct irdma_sc_cqp *cqp;
1453 struct irdma_sc_qp *qp;
1458 struct irdma_sc_cqp *cqp;
1459 struct irdma_ah_info info;
1464 struct irdma_sc_cqp *cqp;
1465 struct irdma_ah_info info;
1470 struct irdma_sc_cqp *cqp;
1471 struct irdma_mcast_grp_info info;
1476 struct irdma_sc_cqp *cqp;
1477 struct irdma_mcast_grp_info info;
1482 struct irdma_sc_cqp *cqp;
1483 struct irdma_mcast_grp_info info;
1488 struct irdma_sc_cqp *cqp;
1489 struct irdma_stats_inst_info info;
1494 struct irdma_sc_cqp *cqp;
1495 struct irdma_stats_gather_info info;
1500 struct irdma_sc_cqp *cqp;
1501 struct irdma_ws_node_info info;
1506 struct irdma_sc_cqp *cqp;
1507 struct irdma_up_info info;
1512 struct irdma_sc_cqp *cqp;
1513 struct irdma_dma_mem query_buff_mem;
1519 struct cqp_cmds_info {
1520 struct list_head cqp_cmd_entry;
1526 __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch,
1530 * irdma_sc_cqp_get_next_send_wqe - get next wqe on cqp sq
1531 * @cqp: struct for cqp hw
1532 * @scratch: private data for CQP WQE
1534 static inline __le64 *irdma_sc_cqp_get_next_send_wqe(struct irdma_sc_cqp *cqp, u64 scratch)
1538 return irdma_sc_cqp_get_next_send_wqe_idx(cqp, scratch, &wqe_idx);
1540 #endif /* IRDMA_TYPE_H */