1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
3 * Copyright (c) 2013-2020, Mellanox Technologies inc. All rights reserved.
4 * Copyright (c) 2020, Intel Corporation. All rights reserved.
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <rdma/ib_verbs.h>
13 #include <rdma/ib_umem.h>
14 #include <rdma/ib_smi.h>
15 #include <linux/mlx5/driver.h>
16 #include <linux/mlx5/cq.h>
17 #include <linux/mlx5/fs.h>
18 #include <linux/mlx5/qp.h>
19 #include <linux/types.h>
20 #include <linux/mlx5/transobj.h>
21 #include <rdma/ib_user_verbs.h>
22 #include <rdma/mlx5-abi.h>
23 #include <rdma/uverbs_ioctl.h>
24 #include <rdma/mlx5_user_ioctl_cmds.h>
25 #include <rdma/mlx5_user_ioctl_verbs.h>
29 #define mlx5_ib_dbg(_dev, format, arg...) \
30 dev_dbg(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
31 __LINE__, current->pid, ##arg)
33 #define mlx5_ib_err(_dev, format, arg...) \
34 dev_err(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
35 __LINE__, current->pid, ##arg)
37 #define mlx5_ib_warn(_dev, format, arg...) \
38 dev_warn(&(_dev)->ib_dev.dev, "%s:%d:(pid %d): " format, __func__, \
39 __LINE__, current->pid, ##arg)
41 #define MLX5_IB_DEFAULT_UIDX 0xffffff
42 #define MLX5_USER_ASSIGNED_UIDX_MASK __mlx5_mask(qpc, user_index)
44 static __always_inline unsigned long
45 __mlx5_log_page_size_to_bitmap(unsigned int log_pgsz_bits,
46 unsigned int pgsz_shift)
48 unsigned int largest_pg_shift =
49 min_t(unsigned long, (1ULL << log_pgsz_bits) - 1 + pgsz_shift,
53 * Despite a command allowing it, the device does not support lower than
56 pgsz_shift = max_t(unsigned int, MLX5_ADAPTER_PAGE_SHIFT, pgsz_shift);
57 return GENMASK(largest_pg_shift, pgsz_shift);
61 * For mkc users, instead of a page_offset the command has a start_iova which
62 * specifies both the page_offset and the on-the-wire IOVA
64 #define mlx5_umem_find_best_pgsz(umem, typ, log_pgsz_fld, pgsz_shift, iova) \
65 ib_umem_find_best_pgsz(umem, \
66 __mlx5_log_page_size_to_bitmap( \
67 __mlx5_bit_sz(typ, log_pgsz_fld), \
71 static __always_inline unsigned long
72 __mlx5_page_offset_to_bitmask(unsigned int page_offset_bits,
73 unsigned int offset_shift)
75 unsigned int largest_offset_shift =
76 min_t(unsigned long, page_offset_bits - 1 + offset_shift,
79 return GENMASK(largest_offset_shift, offset_shift);
83 * QP/CQ/WQ/etc type commands take a page offset that satisifies:
84 * page_offset_quantized * (page_size/scale) = page_offset
85 * Which restricts allowed page sizes to ones that satisify the above.
87 unsigned long __mlx5_umem_find_best_quantized_pgoff(
88 struct ib_umem *umem, unsigned long pgsz_bitmap,
89 unsigned int page_offset_bits, u64 pgoff_bitmask, unsigned int scale,
90 unsigned int *page_offset_quantized);
91 #define mlx5_umem_find_best_quantized_pgoff(umem, typ, log_pgsz_fld, \
92 pgsz_shift, page_offset_fld, \
93 scale, page_offset_quantized) \
94 __mlx5_umem_find_best_quantized_pgoff( \
96 __mlx5_log_page_size_to_bitmap( \
97 __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
98 __mlx5_bit_sz(typ, page_offset_fld), \
99 GENMASK(31, order_base_2(scale)), scale, \
100 page_offset_quantized)
102 #define mlx5_umem_find_best_cq_quantized_pgoff(umem, typ, log_pgsz_fld, \
103 pgsz_shift, page_offset_fld, \
104 scale, page_offset_quantized) \
105 __mlx5_umem_find_best_quantized_pgoff( \
107 __mlx5_log_page_size_to_bitmap( \
108 __mlx5_bit_sz(typ, log_pgsz_fld), pgsz_shift), \
109 __mlx5_bit_sz(typ, page_offset_fld), 0, scale, \
110 page_offset_quantized)
113 MLX5_IB_MMAP_OFFSET_START = 9,
114 MLX5_IB_MMAP_OFFSET_END = 255,
118 MLX5_IB_MMAP_CMD_SHIFT = 8,
119 MLX5_IB_MMAP_CMD_MASK = 0xff,
123 MLX5_RES_SCAT_DATA32_CQE = 0x1,
124 MLX5_RES_SCAT_DATA64_CQE = 0x2,
125 MLX5_REQ_SCAT_DATA32_CQE = 0x11,
126 MLX5_REQ_SCAT_DATA64_CQE = 0x22,
129 enum mlx5_ib_mad_ifc_flags {
130 MLX5_MAD_IFC_IGNORE_MKEY = 1,
131 MLX5_MAD_IFC_IGNORE_BKEY = 2,
132 MLX5_MAD_IFC_NET_VIEW = 4,
136 MLX5_CROSS_CHANNEL_BFREG = 0,
145 MLX5_TM_MAX_RNDV_MSG_SIZE = 64,
150 MLX5_IB_INVALID_UAR_INDEX = BIT(31),
151 MLX5_IB_INVALID_BFREG = BIT(31),
155 MLX5_MAX_MEMIC_PAGES = 0x100,
156 MLX5_MEMIC_ALLOC_SIZE_MASK = 0x3f,
160 MLX5_MEMIC_BASE_ALIGN = 6,
161 MLX5_MEMIC_BASE_SIZE = 1 << MLX5_MEMIC_BASE_ALIGN,
164 enum mlx5_ib_mmap_type {
165 MLX5_IB_MMAP_TYPE_MEMIC = 1,
166 MLX5_IB_MMAP_TYPE_VAR = 2,
167 MLX5_IB_MMAP_TYPE_UAR_WC = 3,
168 MLX5_IB_MMAP_TYPE_UAR_NC = 4,
169 MLX5_IB_MMAP_TYPE_MEMIC_OP = 5,
172 struct mlx5_bfreg_info {
174 int num_low_latency_bfregs;
178 * protect bfreg allocation data structs
185 u32 num_static_sys_pages;
186 u32 total_num_bfregs;
190 struct mlx5_ib_ucontext {
191 struct ib_ucontext ibucontext;
192 struct list_head db_page_list;
194 /* protect doorbell record alloc/free
196 struct mutex db_page_mutex;
197 struct mlx5_bfreg_info bfregi;
199 /* Transport Domain number */
204 /* For RoCE LAG TX affinity */
205 atomic_t tx_port_affinity;
208 static inline struct mlx5_ib_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
210 return container_of(ibucontext, struct mlx5_ib_ucontext, ibucontext);
220 MLX5_IB_FLOW_ACTION_MODIFY_HEADER,
221 MLX5_IB_FLOW_ACTION_PACKET_REFORMAT,
222 MLX5_IB_FLOW_ACTION_DECAP,
225 #define MLX5_IB_FLOW_MCAST_PRIO (MLX5_BY_PASS_NUM_PRIOS - 1)
226 #define MLX5_IB_FLOW_LAST_PRIO (MLX5_BY_PASS_NUM_REGULAR_PRIOS - 1)
227 #if (MLX5_IB_FLOW_LAST_PRIO <= 0)
228 #error "Invalid number of bypass priorities"
230 #define MLX5_IB_FLOW_LEFTOVERS_PRIO (MLX5_IB_FLOW_MCAST_PRIO + 1)
232 #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1)
233 #define MLX5_IB_NUM_SNIFFER_FTS 2
234 #define MLX5_IB_NUM_EGRESS_FTS 1
235 #define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS
236 struct mlx5_ib_flow_prio {
237 struct mlx5_flow_table *flow_table;
238 unsigned int refcount;
241 struct mlx5_ib_flow_handler {
242 struct list_head list;
243 struct ib_flow ibflow;
244 struct mlx5_ib_flow_prio *prio;
245 struct mlx5_flow_handle *rule;
246 struct ib_counters *ibcounters;
247 struct mlx5_ib_dev *dev;
248 struct mlx5_ib_flow_matcher *flow_matcher;
251 struct mlx5_ib_flow_matcher {
252 struct mlx5_ib_match_params matcher_mask;
254 enum mlx5_ib_flow_type flow_type;
255 enum mlx5_flow_namespace_type ns_type;
257 struct mlx5_core_dev *mdev;
259 u8 match_criteria_enable;
264 struct mlx5_core_dev *mdev;
267 enum mlx5_ib_optional_counter_type {
268 MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS,
269 MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS,
270 MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS,
272 MLX5_IB_OPCOUNTER_MAX,
275 struct mlx5_ib_flow_db {
276 struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT];
277 struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT];
278 struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS];
279 struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS];
280 struct mlx5_ib_flow_prio fdb[MLX5_IB_NUM_FDB_FTS];
281 struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT];
282 struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT];
283 struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX];
284 struct mlx5_flow_table *lag_demux_ft;
285 /* Protect flow steering bypass flow tables
286 * when add/del flow rules.
287 * only single add/removal of flow steering rule could be done
293 /* Use macros here so that don't have to duplicate
294 * enum ib_send_flags and enum ib_qp_type for low-level driver
297 #define MLX5_IB_SEND_UMR_ENABLE_MR (IB_SEND_RESERVED_START << 0)
298 #define MLX5_IB_SEND_UMR_DISABLE_MR (IB_SEND_RESERVED_START << 1)
299 #define MLX5_IB_SEND_UMR_FAIL_IF_FREE (IB_SEND_RESERVED_START << 2)
300 #define MLX5_IB_SEND_UMR_UPDATE_XLT (IB_SEND_RESERVED_START << 3)
301 #define MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (IB_SEND_RESERVED_START << 4)
302 #define MLX5_IB_SEND_UMR_UPDATE_PD_ACCESS IB_SEND_RESERVED_END
304 #define MLX5_IB_QPT_REG_UMR IB_QPT_RESERVED1
306 * IB_QPT_GSI creates the software wrapper around GSI, and MLX5_IB_QPT_HW_GSI
307 * creates the actual hardware QP.
309 #define MLX5_IB_QPT_HW_GSI IB_QPT_RESERVED2
310 #define MLX5_IB_QPT_DCI IB_QPT_RESERVED3
311 #define MLX5_IB_QPT_DCT IB_QPT_RESERVED4
312 #define MLX5_IB_WR_UMR IB_WR_RESERVED1
314 #define MLX5_IB_UMR_OCTOWORD 16
315 #define MLX5_IB_UMR_XLT_ALIGNMENT 64
317 #define MLX5_IB_UPD_XLT_ZAP BIT(0)
318 #define MLX5_IB_UPD_XLT_ENABLE BIT(1)
319 #define MLX5_IB_UPD_XLT_ATOMIC BIT(2)
320 #define MLX5_IB_UPD_XLT_ADDR BIT(3)
321 #define MLX5_IB_UPD_XLT_PD BIT(4)
322 #define MLX5_IB_UPD_XLT_ACCESS BIT(5)
323 #define MLX5_IB_UPD_XLT_INDIRECT BIT(6)
325 /* Private QP creation flags to be passed in ib_qp_init_attr.create_flags.
327 * These flags are intended for internal use by the mlx5_ib driver, and they
328 * rely on the range reserved for that use in the ib_qp_create_flags enum.
330 #define MLX5_IB_QP_CREATE_SQPN_QP1 IB_QP_CREATE_RESERVED_START
331 #define MLX5_IB_QP_CREATE_WC_TEST (IB_QP_CREATE_RESERVED_START << 1)
338 enum mlx5_ib_rq_flags {
339 MLX5_IB_RQ_CVLAN_STRIPPING = 1 << 0,
340 MLX5_IB_RQ_PCI_WRITE_END_PADDING = 1 << 1,
344 struct mlx5_frag_buf_ctrl fbc;
347 struct wr_list *w_list;
351 /* serialize post to the work queue
366 enum mlx5_ib_wq_flags {
367 MLX5_IB_WQ_FLAGS_DELAY_DROP = 0x1,
368 MLX5_IB_WQ_FLAGS_STRIDING_RQ = 0x2,
371 #define MLX5_MIN_SINGLE_WQE_LOG_NUM_STRIDES 9
372 #define MLX5_MAX_SINGLE_WQE_LOG_NUM_STRIDES 16
373 #define MLX5_MIN_SINGLE_STRIDE_LOG_NUM_BYTES 6
374 #define MLX5_MAX_SINGLE_STRIDE_LOG_NUM_BYTES 13
375 #define MLX5_EXT_MIN_SINGLE_WQE_LOG_NUM_STRIDES 3
379 struct mlx5_core_qp core_qp;
386 u32 two_byte_shift_en;
387 u32 single_stride_log_num_of_bytes;
388 struct ib_umem *umem;
390 unsigned int page_shift;
396 u32 create_flags; /* Use enum mlx5_ib_wq_flags */
399 struct mlx5_ib_rwq_ind_table {
400 struct ib_rwq_ind_table ib_rwq_ind_tbl;
405 struct mlx5_ib_ubuffer {
406 struct ib_umem *umem;
411 struct mlx5_ib_qp_base {
412 struct mlx5_ib_qp *container_mibqp;
413 struct mlx5_core_qp mqp;
414 struct mlx5_ib_ubuffer ubuffer;
417 struct mlx5_ib_qp_trans {
418 struct mlx5_ib_qp_base base;
425 struct mlx5_ib_rss_qp {
430 struct mlx5_ib_qp_base base;
431 struct mlx5_ib_wq *rq;
432 struct mlx5_ib_ubuffer ubuffer;
433 struct mlx5_db *doorbell;
440 struct mlx5_ib_qp_base base;
441 struct mlx5_ib_wq *sq;
442 struct mlx5_ib_ubuffer ubuffer;
443 struct mlx5_db *doorbell;
444 struct mlx5_flow_handle *flow_rule;
449 struct mlx5_ib_raw_packet_qp {
450 struct mlx5_ib_sq sq;
451 struct mlx5_ib_rq rq;
456 unsigned long offset;
457 struct mlx5_sq_bfreg *bfreg;
461 struct mlx5_core_dct mdct;
465 struct mlx5_ib_gsi_qp {
468 struct ib_qp_cap cap;
470 struct mlx5_ib_gsi_wr *outstanding_wrs;
471 u32 outstanding_pi, outstanding_ci;
473 /* Protects access to the tx_qps. Post send operations synchronize
474 * with tx_qp creation in setup_qp(). Also protects the
475 * outstanding_wrs array and indices.
478 struct ib_qp **tx_qps;
484 struct mlx5_ib_qp_trans trans_qp;
485 struct mlx5_ib_raw_packet_qp raw_packet_qp;
486 struct mlx5_ib_rss_qp rss_qp;
487 struct mlx5_ib_dct dct;
488 struct mlx5_ib_gsi_qp gsi;
490 struct mlx5_frag_buf buf;
493 struct mlx5_ib_wq rq;
497 struct mlx5_ib_wq sq;
499 /* serialize qp state modifications
502 /* cached variant of create_flags from struct ib_qp_init_attr */
511 /* only for user space QPs. For kernel
512 * we have it from the bf object
516 struct list_head qps_list;
517 struct list_head cq_recv_list;
518 struct list_head cq_send_list;
519 struct mlx5_rate_limit rl;
523 * IB/core doesn't store low-level QP types, so
524 * store both MLX and IBTA types in the field below.
526 enum ib_qp_type type;
527 /* A flag to indicate if there's a new counter is configured
528 * but not take effective
534 struct mlx5_ib_cq_buf {
535 struct mlx5_frag_buf_ctrl fbc;
536 struct mlx5_frag_buf frag_buf;
537 struct ib_umem *umem;
543 struct ib_send_wr wr;
547 unsigned int page_shift;
548 unsigned int xlt_size;
552 u8 ignore_free_state:1;
555 static inline const struct mlx5_umr_wr *umr_wr(const struct ib_send_wr *wr)
557 return container_of(wr, struct mlx5_umr_wr, wr);
560 enum mlx5_ib_cq_pr_flags {
561 MLX5_IB_CQ_PR_FLAGS_CQE_128_PAD = 1 << 0,
562 MLX5_IB_CQ_PR_FLAGS_REAL_TIME_TS = 1 << 1,
567 struct mlx5_core_cq mcq;
568 struct mlx5_ib_cq_buf buf;
571 /* serialize access to the CQ
577 struct mutex resize_mutex;
578 struct mlx5_ib_cq_buf *resize_buf;
579 struct ib_umem *resize_umem;
581 struct list_head list_send_qp;
582 struct list_head list_recv_qp;
584 struct list_head wc_list;
585 enum ib_cq_notify_flags notify_flags;
586 struct work_struct notify_work;
587 u16 private_flags; /* Use mlx5_ib_cq_pr_flags */
592 struct list_head list;
597 struct mlx5_core_srq msrq;
598 struct mlx5_frag_buf buf;
600 struct mlx5_frag_buf_ctrl fbc;
602 /* protect SRQ hanlding
608 struct ib_umem *umem;
609 /* serialize arming a SRQ
615 struct mlx5_ib_xrcd {
616 struct ib_xrcd ibxrcd;
620 enum mlx5_ib_mtt_access_flags {
621 MLX5_IB_MTT_READ = (1 << 0),
622 MLX5_IB_MTT_WRITE = (1 << 1),
625 struct mlx5_user_mmap_entry {
626 struct rdma_user_mmap_entry rdma_entry;
632 enum mlx5_mkey_type {
635 MLX5_MKEY_INDIRECT_DEVX,
638 struct mlx5_ib_mkey {
640 enum mlx5_mkey_type type;
642 struct wait_queue_head wait;
646 #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE)
648 #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
649 IB_ACCESS_REMOTE_WRITE |\
650 IB_ACCESS_REMOTE_READ |\
651 IB_ACCESS_REMOTE_ATOMIC |\
654 #define MLX5_IB_DM_SW_ICM_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\
655 IB_ACCESS_REMOTE_WRITE |\
656 IB_ACCESS_REMOTE_READ |\
659 #define mlx5_update_odp_stats(mr, counter_name, value) \
660 atomic64_add(value, &((mr)->odp_stats.counter_name))
664 struct mlx5_ib_mkey mmkey;
667 struct mlx5_cache_ent *cache_ent;
668 /* Everything after cache_ent is zero'd when MR allocated */
669 struct ib_umem *umem;
672 /* Used only while the MR is in the cache */
674 u32 out[MLX5_ST_SZ_DW(create_mkey_out)];
675 struct mlx5_async_work cb_work;
676 /* Cache list element */
677 struct list_head list;
680 /* Used only by kernel MRs (umem == NULL) */
689 /* For Kernel IB_MR_TYPE_INTEGRITY */
690 struct mlx5_core_sig_ctx *sig;
691 struct mlx5_ib_mr *pi_mr;
692 struct mlx5_ib_mr *klm_mr;
693 struct mlx5_ib_mr *mtt_mr;
701 /* Used only by User MRs (umem != NULL) */
703 unsigned int page_shift;
704 /* Current access_flags */
708 struct mlx5_ib_mr *parent;
709 struct xarray implicit_children;
711 struct work_struct work;
713 struct ib_odp_counters odp_stats;
714 bool is_odp_implicit;
719 /* Zero the fields in the mr that are variant depending on usage */
720 static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr)
722 memset_after(mr, 0, cache_ent);
725 static inline bool is_odp_mr(struct mlx5_ib_mr *mr)
727 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
731 static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr)
733 return IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING) && mr->umem &&
739 struct mlx5_ib_mkey mmkey;
742 struct mlx5_ib_umr_context {
744 enum ib_wc_status status;
745 struct completion done;
752 /* control access to UMR QP
754 struct semaphore sem;
757 struct mlx5_cache_ent {
758 struct list_head head;
759 /* sync access to the cahce entry
771 u8 fill_to_high_water:1;
774 * - available_mrs is the length of list head, ie the number of MRs
775 * available for immediate allocation.
776 * - total_mrs is available_mrs plus all in use MRs that could be
777 * returned to the cache.
778 * - limit is the low water mark for available_mrs, 2* limit is the
780 * - pending is the number of MRs currently being created
790 struct mlx5_ib_dev *dev;
791 struct delayed_work dwork;
794 struct mlx5_mr_cache {
795 struct workqueue_struct *wq;
796 struct mlx5_cache_ent ent[MAX_MR_CACHE_ENTRIES];
798 unsigned long last_add;
801 struct mlx5_ib_port_resources {
802 struct mlx5_ib_gsi_qp *gsi;
803 struct work_struct pkey_change_work;
806 struct mlx5_ib_resources {
813 struct mlx5_ib_port_resources ports[2];
816 #define MAX_OPFC_RULES 2
818 struct mlx5_ib_op_fc {
820 struct mlx5_flow_handle *rule[MAX_OPFC_RULES];
823 struct mlx5_ib_counters {
824 struct rdma_stat_desc *descs;
827 u32 num_cong_counters;
828 u32 num_ext_ppcnt_counters;
831 struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX];
834 int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num,
835 struct mlx5_ib_op_fc *opfc,
836 enum mlx5_ib_optional_counter_type type);
838 void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev,
839 struct mlx5_ib_op_fc *opfc,
840 enum mlx5_ib_optional_counter_type type);
842 struct mlx5_ib_multiport_info;
844 struct mlx5_ib_multiport {
845 struct mlx5_ib_multiport_info *mpi;
846 /* To be held when accessing the multiport info */
851 /* Protect mlx5_ib_get_netdev from invoking dev_hold() with a NULL
854 rwlock_t netdev_lock;
855 struct net_device *netdev;
856 struct notifier_block nb;
857 atomic_t tx_port_affinity;
858 enum ib_port_state last_port_state;
859 struct mlx5_ib_dev *dev;
863 struct mlx5_ib_port {
864 struct mlx5_ib_counters cnts;
865 struct mlx5_ib_multiport mp;
866 struct mlx5_ib_dbg_cc_params *dbg_cc_params;
867 struct mlx5_roce roce;
868 struct mlx5_eswitch_rep *rep;
871 struct mlx5_ib_dbg_param {
873 struct mlx5_ib_dev *dev;
874 struct dentry *dentry;
878 enum mlx5_ib_dbg_cc_types {
879 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE,
880 MLX5_IB_DBG_CC_RP_CLAMP_TGT_RATE_ATI,
881 MLX5_IB_DBG_CC_RP_TIME_RESET,
882 MLX5_IB_DBG_CC_RP_BYTE_RESET,
883 MLX5_IB_DBG_CC_RP_THRESHOLD,
884 MLX5_IB_DBG_CC_RP_AI_RATE,
885 MLX5_IB_DBG_CC_RP_MAX_RATE,
886 MLX5_IB_DBG_CC_RP_HAI_RATE,
887 MLX5_IB_DBG_CC_RP_MIN_DEC_FAC,
888 MLX5_IB_DBG_CC_RP_MIN_RATE,
889 MLX5_IB_DBG_CC_RP_RATE_TO_SET_ON_FIRST_CNP,
890 MLX5_IB_DBG_CC_RP_DCE_TCP_G,
891 MLX5_IB_DBG_CC_RP_DCE_TCP_RTT,
892 MLX5_IB_DBG_CC_RP_RATE_REDUCE_MONITOR_PERIOD,
893 MLX5_IB_DBG_CC_RP_INITIAL_ALPHA_VALUE,
894 MLX5_IB_DBG_CC_RP_GD,
895 MLX5_IB_DBG_CC_NP_MIN_TIME_BETWEEN_CNPS,
896 MLX5_IB_DBG_CC_NP_CNP_DSCP,
897 MLX5_IB_DBG_CC_NP_CNP_PRIO_MODE,
898 MLX5_IB_DBG_CC_NP_CNP_PRIO,
902 struct mlx5_ib_dbg_cc_params {
904 struct mlx5_ib_dbg_param params[MLX5_IB_DBG_CC_MAX];
908 MLX5_MAX_DELAY_DROP_TIMEOUT_MS = 100,
911 struct mlx5_ib_delay_drop {
912 struct mlx5_ib_dev *dev;
913 struct work_struct delay_drop_work;
914 /* serialize setting of delay drop */
920 struct dentry *dir_debugfs;
923 enum mlx5_ib_stages {
927 MLX5_IB_STAGE_NON_DEFAULT_CB,
931 MLX5_IB_STAGE_DEVICE_RESOURCES,
932 MLX5_IB_STAGE_DEVICE_NOTIFIER,
934 MLX5_IB_STAGE_COUNTERS,
935 MLX5_IB_STAGE_CONG_DEBUGFS,
938 MLX5_IB_STAGE_PRE_IB_REG_UMR,
939 MLX5_IB_STAGE_WHITELIST_UID,
940 MLX5_IB_STAGE_IB_REG,
941 MLX5_IB_STAGE_POST_IB_REG_UMR,
942 MLX5_IB_STAGE_DELAY_DROP,
943 MLX5_IB_STAGE_RESTRACK,
947 struct mlx5_ib_stage {
948 int (*init)(struct mlx5_ib_dev *dev);
949 void (*cleanup)(struct mlx5_ib_dev *dev);
952 #define STAGE_CREATE(_stage, _init, _cleanup) \
953 .stage[_stage] = {.init = _init, .cleanup = _cleanup}
955 struct mlx5_ib_profile {
956 struct mlx5_ib_stage stage[MLX5_IB_STAGE_MAX];
959 struct mlx5_ib_multiport_info {
960 struct list_head list;
961 struct mlx5_ib_dev *ibdev;
962 struct mlx5_core_dev *mdev;
963 struct notifier_block mdev_events;
964 struct completion unref_comp;
971 struct mlx5_ib_flow_action {
972 struct ib_flow_action ib_action;
976 struct mlx5_accel_esp_xfrm *ctx;
979 struct mlx5_ib_dev *dev;
982 struct mlx5_modify_hdr *modify_hdr;
983 struct mlx5_pkt_reformat *pkt_reformat;
990 struct mlx5_core_dev *dev;
991 /* This lock is used to protect the access to the shared
992 * allocation map when concurrent requests by different
993 * processes are handled.
996 DECLARE_BITMAP(memic_alloc_pages, MLX5_MAX_MEMIC_PAGES);
999 struct mlx5_read_counters_attr {
1000 struct mlx5_fc *hw_cntrs_hndl;
1005 enum mlx5_ib_counters_type {
1006 MLX5_IB_COUNTERS_FLOW,
1009 struct mlx5_ib_mcounters {
1010 struct ib_counters ibcntrs;
1011 enum mlx5_ib_counters_type type;
1012 /* number of counters supported for this counters type */
1014 struct mlx5_fc *hw_cntrs_hndl;
1015 /* read function for this counters type */
1016 int (*read_counters)(struct ib_device *ibdev,
1017 struct mlx5_read_counters_attr *read_attr);
1018 /* max index set as part of create_flow */
1019 u32 cntrs_max_index;
1020 /* number of counters data entries (<description,index> pair) */
1022 /* counters data array for descriptions and indexes */
1023 struct mlx5_ib_flow_counters_desc *counters_data;
1024 /* protects access to mcounters internal data */
1025 struct mutex mcntrs_mutex;
1028 static inline struct mlx5_ib_mcounters *
1029 to_mcounters(struct ib_counters *ibcntrs)
1031 return container_of(ibcntrs, struct mlx5_ib_mcounters, ibcntrs);
1034 int parse_flow_flow_action(struct mlx5_ib_flow_action *maction,
1036 struct mlx5_flow_act *action);
1037 struct mlx5_ib_lb_state {
1038 /* protect the user_td */
1045 struct mlx5_ib_pf_eq {
1046 struct notifier_block irq_nb;
1047 struct mlx5_ib_dev *dev;
1048 struct mlx5_eq *core;
1049 struct work_struct work;
1050 spinlock_t lock; /* Pagefaults spinlock */
1051 struct workqueue_struct *wq;
1055 struct mlx5_devx_event_table {
1056 struct mlx5_nb devx_nb;
1057 /* serialize updating the event_xa */
1058 struct mutex event_xa_lock;
1059 struct xarray event_xa;
1062 struct mlx5_var_table {
1063 /* serialize updating the bitmap */
1064 struct mutex bitmap_lock;
1065 unsigned long *bitmap;
1068 u64 num_var_hw_entries;
1071 struct mlx5_port_caps {
1076 struct mlx5_ib_dev {
1077 struct ib_device ib_dev;
1078 struct mlx5_core_dev *mdev;
1079 struct notifier_block mdev_events;
1081 /* serialize update of capability mask
1083 struct mutex cap_mask_mutex;
1089 struct umr_common umrc;
1090 /* sync used page count stats
1092 struct mlx5_ib_resources devr;
1095 struct mlx5_mr_cache cache;
1096 struct timer_list delay_timer;
1097 /* Prevents soft lock on massive reg MRs */
1098 struct mutex slow_path_mutex;
1099 struct ib_odp_caps odp_caps;
1101 struct mutex odp_eq_mutex;
1102 struct mlx5_ib_pf_eq odp_pf_eq;
1104 struct xarray odp_mkeys;
1107 struct mlx5_ib_flow_db *flow_db;
1108 /* protect resources needed as part of reset flow */
1109 spinlock_t reset_flow_resource_lock;
1110 struct list_head qp_list;
1111 /* Array with num_ports elements */
1112 struct mlx5_ib_port *port;
1113 struct mlx5_sq_bfreg bfreg;
1114 struct mlx5_sq_bfreg wc_bfreg;
1115 struct mlx5_sq_bfreg fp_bfreg;
1116 struct mlx5_ib_delay_drop delay_drop;
1117 const struct mlx5_ib_profile *profile;
1119 struct mlx5_ib_lb_state lb;
1121 struct list_head ib_dev_list;
1124 u16 devx_whitelist_uid;
1125 struct mlx5_srq_table srq_table;
1126 struct mlx5_qp_table qp_table;
1127 struct mlx5_async_ctx async_ctx;
1128 struct mlx5_devx_event_table devx_event_table;
1129 struct mlx5_var_table var_table;
1131 struct xarray sig_mrs;
1132 struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
1137 static inline struct mlx5_ib_cq *to_mibcq(struct mlx5_core_cq *mcq)
1139 return container_of(mcq, struct mlx5_ib_cq, mcq);
1142 static inline struct mlx5_ib_xrcd *to_mxrcd(struct ib_xrcd *ibxrcd)
1144 return container_of(ibxrcd, struct mlx5_ib_xrcd, ibxrcd);
1147 static inline struct mlx5_ib_dev *to_mdev(struct ib_device *ibdev)
1149 return container_of(ibdev, struct mlx5_ib_dev, ib_dev);
1152 static inline struct mlx5_ib_dev *mr_to_mdev(struct mlx5_ib_mr *mr)
1154 return to_mdev(mr->ibmr.device);
1157 static inline struct mlx5_ib_dev *mlx5_udata_to_mdev(struct ib_udata *udata)
1159 struct mlx5_ib_ucontext *context = rdma_udata_to_drv_context(
1160 udata, struct mlx5_ib_ucontext, ibucontext);
1162 return to_mdev(context->ibucontext.device);
1165 static inline struct mlx5_ib_cq *to_mcq(struct ib_cq *ibcq)
1167 return container_of(ibcq, struct mlx5_ib_cq, ibcq);
1170 static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
1172 return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
1175 static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
1177 return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
1180 static inline struct mlx5_ib_pd *to_mpd(struct ib_pd *ibpd)
1182 return container_of(ibpd, struct mlx5_ib_pd, ibpd);
1185 static inline struct mlx5_ib_srq *to_msrq(struct ib_srq *ibsrq)
1187 return container_of(ibsrq, struct mlx5_ib_srq, ibsrq);
1190 static inline struct mlx5_ib_qp *to_mqp(struct ib_qp *ibqp)
1192 return container_of(ibqp, struct mlx5_ib_qp, ibqp);
1195 static inline struct mlx5_ib_rwq *to_mrwq(struct ib_wq *ibwq)
1197 return container_of(ibwq, struct mlx5_ib_rwq, ibwq);
1200 static inline struct mlx5_ib_rwq_ind_table *to_mrwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_tbl)
1202 return container_of(ib_rwq_ind_tbl, struct mlx5_ib_rwq_ind_table, ib_rwq_ind_tbl);
1205 static inline struct mlx5_ib_srq *to_mibsrq(struct mlx5_core_srq *msrq)
1207 return container_of(msrq, struct mlx5_ib_srq, msrq);
1210 static inline struct mlx5_ib_mr *to_mmr(struct ib_mr *ibmr)
1212 return container_of(ibmr, struct mlx5_ib_mr, ibmr);
1215 static inline struct mlx5_ib_mw *to_mmw(struct ib_mw *ibmw)
1217 return container_of(ibmw, struct mlx5_ib_mw, ibmw);
1220 static inline struct mlx5_ib_flow_action *
1221 to_mflow_act(struct ib_flow_action *ibact)
1223 return container_of(ibact, struct mlx5_ib_flow_action, ib_action);
1226 static inline struct mlx5_user_mmap_entry *
1227 to_mmmap(struct rdma_user_mmap_entry *rdma_entry)
1229 return container_of(rdma_entry,
1230 struct mlx5_user_mmap_entry, rdma_entry);
1233 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
1234 struct mlx5_db *db);
1235 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
1236 void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1237 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
1238 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
1239 int mlx5_ib_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr,
1240 struct ib_udata *udata);
1241 int mlx5_ib_query_ah(struct ib_ah *ibah, struct rdma_ah_attr *ah_attr);
1242 static inline int mlx5_ib_destroy_ah(struct ib_ah *ah, u32 flags)
1246 int mlx5_ib_create_srq(struct ib_srq *srq, struct ib_srq_init_attr *init_attr,
1247 struct ib_udata *udata);
1248 int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
1249 enum ib_srq_attr_mask attr_mask, struct ib_udata *udata);
1250 int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr);
1251 int mlx5_ib_destroy_srq(struct ib_srq *srq, struct ib_udata *udata);
1252 int mlx5_ib_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr,
1253 const struct ib_recv_wr **bad_wr);
1254 int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1255 void mlx5_ib_disable_lb(struct mlx5_ib_dev *dev, bool td, bool qp);
1256 int mlx5_ib_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *init_attr,
1257 struct ib_udata *udata);
1258 int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
1259 int attr_mask, struct ib_udata *udata);
1260 int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
1261 struct ib_qp_init_attr *qp_init_attr);
1262 int mlx5_ib_destroy_qp(struct ib_qp *qp, struct ib_udata *udata);
1263 void mlx5_ib_drain_sq(struct ib_qp *qp);
1264 void mlx5_ib_drain_rq(struct ib_qp *qp);
1265 int mlx5_ib_read_wqe_sq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1266 size_t buflen, size_t *bc);
1267 int mlx5_ib_read_wqe_rq(struct mlx5_ib_qp *qp, int wqe_index, void *buffer,
1268 size_t buflen, size_t *bc);
1269 int mlx5_ib_read_wqe_srq(struct mlx5_ib_srq *srq, int wqe_index, void *buffer,
1270 size_t buflen, size_t *bc);
1271 int mlx5_ib_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
1272 struct ib_udata *udata);
1273 int mlx5_ib_destroy_cq(struct ib_cq *cq, struct ib_udata *udata);
1274 int mlx5_ib_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc);
1275 int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
1276 int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period);
1277 int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata);
1278 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc);
1279 struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
1280 u64 virt_addr, int access_flags,
1281 struct ib_udata *udata);
1282 struct ib_mr *mlx5_ib_reg_user_mr_dmabuf(struct ib_pd *pd, u64 start,
1283 u64 length, u64 virt_addr,
1284 int fd, int access_flags,
1285 struct ib_udata *udata);
1286 int mlx5_ib_advise_mr(struct ib_pd *pd,
1287 enum ib_uverbs_advise_mr_advice advice,
1289 struct ib_sge *sg_list,
1291 struct uverbs_attr_bundle *attrs);
1292 int mlx5_ib_alloc_mw(struct ib_mw *mw, struct ib_udata *udata);
1293 int mlx5_ib_dealloc_mw(struct ib_mw *mw);
1294 int mlx5_ib_update_xlt(struct mlx5_ib_mr *mr, u64 idx, int npages,
1295 int page_shift, int flags);
1296 int mlx5_ib_update_mr_pas(struct mlx5_ib_mr *mr, unsigned int flags);
1297 struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
1299 void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *mr);
1300 void mlx5_ib_free_odp_mr(struct mlx5_ib_mr *mr);
1301 struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start,
1302 u64 length, u64 virt_addr, int access_flags,
1303 struct ib_pd *pd, struct ib_udata *udata);
1304 int mlx5_ib_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata);
1305 struct ib_mr *mlx5_ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
1307 struct ib_mr *mlx5_ib_alloc_mr_integrity(struct ib_pd *pd,
1309 u32 max_num_meta_sg);
1310 int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents,
1311 unsigned int *sg_offset);
1312 int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg,
1313 int data_sg_nents, unsigned int *data_sg_offset,
1314 struct scatterlist *meta_sg, int meta_sg_nents,
1315 unsigned int *meta_sg_offset);
1316 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u32 port_num,
1317 const struct ib_wc *in_wc, const struct ib_grh *in_grh,
1318 const struct ib_mad *in, struct ib_mad *out,
1319 size_t *out_mad_size, u16 *out_mad_pkey_index);
1320 int mlx5_ib_alloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1321 int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd, struct ib_udata *udata);
1322 int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port);
1323 int mlx5_query_mad_ifc_system_image_guid(struct ib_device *ibdev,
1324 __be64 *sys_image_guid);
1325 int mlx5_query_mad_ifc_max_pkeys(struct ib_device *ibdev,
1327 int mlx5_query_mad_ifc_vendor_id(struct ib_device *ibdev,
1329 int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc);
1330 int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid);
1331 int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index,
1333 int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index,
1335 int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port,
1336 struct ib_port_attr *props);
1337 int mlx5_ib_query_port(struct ib_device *ibdev, u32 port,
1338 struct ib_port_attr *props);
1339 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas,
1341 void mlx5_ib_copy_pas(u64 *old, u64 *new, int step, int num);
1342 int mlx5_ib_get_cqe_size(struct ib_cq *ibcq);
1343 int mlx5_mr_cache_init(struct mlx5_ib_dev *dev);
1344 int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev);
1346 struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev,
1347 struct mlx5_cache_ent *ent,
1350 int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask,
1351 struct ib_mr_status *mr_status);
1352 struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
1353 struct ib_wq_init_attr *init_attr,
1354 struct ib_udata *udata);
1355 int mlx5_ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata);
1356 int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
1357 u32 wq_attr_mask, struct ib_udata *udata);
1358 int mlx5_ib_create_rwq_ind_table(struct ib_rwq_ind_table *ib_rwq_ind_table,
1359 struct ib_rwq_ind_table_init_attr *init_attr,
1360 struct ib_udata *udata);
1361 int mlx5_ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *wq_ind_table);
1362 struct ib_mr *mlx5_ib_reg_dm_mr(struct ib_pd *pd, struct ib_dm *dm,
1363 struct ib_dm_mr_attr *attr,
1364 struct uverbs_attr_bundle *attrs);
1366 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
1367 int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev);
1368 int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq);
1369 void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev);
1370 int __init mlx5_ib_odp_init(void);
1371 void mlx5_ib_odp_cleanup(void);
1372 void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent);
1373 void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1374 struct mlx5_ib_mr *mr, int flags);
1376 int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1377 enum ib_uverbs_advise_mr_advice advice,
1378 u32 flags, struct ib_sge *sg_list, u32 num_sge);
1379 int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr);
1380 int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr);
1381 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1382 static inline int mlx5_ib_odp_init_one(struct mlx5_ib_dev *ibdev) { return 0; }
1383 static inline int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev,
1384 struct mlx5_ib_pf_eq *eq)
1388 static inline void mlx5_ib_odp_cleanup_one(struct mlx5_ib_dev *ibdev) {}
1389 static inline int mlx5_ib_odp_init(void) { return 0; }
1390 static inline void mlx5_ib_odp_cleanup(void) {}
1391 static inline void mlx5_odp_init_mr_cache_entry(struct mlx5_cache_ent *ent) {}
1392 static inline void mlx5_odp_populate_xlt(void *xlt, size_t idx, size_t nentries,
1393 struct mlx5_ib_mr *mr, int flags) {}
1396 mlx5_ib_advise_mr_prefetch(struct ib_pd *pd,
1397 enum ib_uverbs_advise_mr_advice advice, u32 flags,
1398 struct ib_sge *sg_list, u32 num_sge)
1402 static inline int mlx5_ib_init_odp_mr(struct mlx5_ib_mr *mr)
1406 static inline int mlx5_ib_init_dmabuf_mr(struct mlx5_ib_mr *mr)
1410 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
1412 extern const struct mmu_interval_notifier_ops mlx5_mn_ops;
1414 /* Needed for rep profile */
1415 void __mlx5_ib_remove(struct mlx5_ib_dev *dev,
1416 const struct mlx5_ib_profile *profile,
1418 int __mlx5_ib_add(struct mlx5_ib_dev *dev,
1419 const struct mlx5_ib_profile *profile);
1421 int mlx5_ib_get_vf_config(struct ib_device *device, int vf,
1422 u32 port, struct ifla_vf_info *info);
1423 int mlx5_ib_set_vf_link_state(struct ib_device *device, int vf,
1424 u32 port, int state);
1425 int mlx5_ib_get_vf_stats(struct ib_device *device, int vf,
1426 u32 port, struct ifla_vf_stats *stats);
1427 int mlx5_ib_get_vf_guid(struct ib_device *device, int vf, u32 port,
1428 struct ifla_vf_guid *node_guid,
1429 struct ifla_vf_guid *port_guid);
1430 int mlx5_ib_set_vf_guid(struct ib_device *device, int vf, u32 port,
1431 u64 guid, int type);
1433 __be16 mlx5_get_roce_udp_sport_min(const struct mlx5_ib_dev *dev,
1434 const struct ib_gid_attr *attr);
1436 void mlx5_ib_cleanup_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
1437 void mlx5_ib_init_cong_debugfs(struct mlx5_ib_dev *dev, u32 port_num);
1439 /* GSI QP helper functions */
1440 int mlx5_ib_create_gsi(struct ib_pd *pd, struct mlx5_ib_qp *mqp,
1441 struct ib_qp_init_attr *attr);
1442 int mlx5_ib_destroy_gsi(struct mlx5_ib_qp *mqp);
1443 int mlx5_ib_gsi_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1445 int mlx5_ib_gsi_query_qp(struct ib_qp *qp, struct ib_qp_attr *qp_attr,
1447 struct ib_qp_init_attr *qp_init_attr);
1448 int mlx5_ib_gsi_post_send(struct ib_qp *qp, const struct ib_send_wr *wr,
1449 const struct ib_send_wr **bad_wr);
1450 int mlx5_ib_gsi_post_recv(struct ib_qp *qp, const struct ib_recv_wr *wr,
1451 const struct ib_recv_wr **bad_wr);
1452 void mlx5_ib_gsi_pkey_change(struct mlx5_ib_gsi_qp *gsi);
1454 int mlx5_ib_generate_wc(struct ib_cq *ibcq, struct ib_wc *wc);
1456 void mlx5_ib_free_bfreg(struct mlx5_ib_dev *dev, struct mlx5_bfreg_info *bfregi,
1458 struct mlx5_ib_dev *mlx5_ib_get_ibdev_from_mpi(struct mlx5_ib_multiport_info *mpi);
1459 struct mlx5_core_dev *mlx5_ib_get_native_port_mdev(struct mlx5_ib_dev *dev,
1461 u32 *native_port_num);
1462 void mlx5_ib_put_native_port_mdev(struct mlx5_ib_dev *dev,
1465 extern const struct uapi_definition mlx5_ib_devx_defs[];
1466 extern const struct uapi_definition mlx5_ib_flow_defs[];
1467 extern const struct uapi_definition mlx5_ib_qos_defs[];
1468 extern const struct uapi_definition mlx5_ib_std_types_defs[];
1470 static inline int is_qp1(enum ib_qp_type qp_type)
1472 return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI;
1475 #define MLX5_MAX_UMR_SHIFT 16
1476 #define MLX5_MAX_UMR_PAGES (1 << MLX5_MAX_UMR_SHIFT)
1478 static inline u32 check_cq_create_flags(u32 flags)
1481 * It returns non-zero value for unsupported CQ
1482 * create flags, otherwise it returns zero.
1484 return (flags & ~(IB_UVERBS_CQ_FLAGS_IGNORE_OVERRUN |
1485 IB_UVERBS_CQ_FLAGS_TIMESTAMP_COMPLETION));
1488 static inline int verify_assign_uidx(u8 cqe_version, u32 cmd_uidx,
1492 if ((cmd_uidx == MLX5_IB_DEFAULT_UIDX) ||
1493 (cmd_uidx & ~MLX5_USER_ASSIGNED_UIDX_MASK))
1495 *user_index = cmd_uidx;
1497 *user_index = MLX5_IB_DEFAULT_UIDX;
1503 static inline int get_qp_user_index(struct mlx5_ib_ucontext *ucontext,
1504 struct mlx5_ib_create_qp *ucmd,
1508 u8 cqe_version = ucontext->cqe_version;
1510 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1511 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1514 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1517 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1520 static inline int get_srq_user_index(struct mlx5_ib_ucontext *ucontext,
1521 struct mlx5_ib_create_srq *ucmd,
1525 u8 cqe_version = ucontext->cqe_version;
1527 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) && !cqe_version &&
1528 (ucmd->uidx == MLX5_IB_DEFAULT_UIDX))
1531 if ((offsetofend(typeof(*ucmd), uidx) <= inlen) != !!cqe_version)
1534 return verify_assign_uidx(cqe_version, ucmd->uidx, user_index);
1537 static inline int get_uars_per_sys_page(struct mlx5_ib_dev *dev, bool lib_support)
1539 return lib_support && MLX5_CAP_GEN(dev->mdev, uar_4k) ?
1540 MLX5_UARS_IN_PAGE : 1;
1543 extern void *xlt_emergency_page;
1545 int bfregn_to_uar_index(struct mlx5_ib_dev *dev,
1546 struct mlx5_bfreg_info *bfregi, u32 bfregn,
1549 static inline bool mlx5_ib_can_load_pas_with_umr(struct mlx5_ib_dev *dev,
1553 * umr_check_mkey_mask() rejects MLX5_MKEY_MASK_PAGE_SIZE which is
1554 * always set if MLX5_IB_SEND_UMR_UPDATE_TRANSLATION (aka
1555 * MLX5_IB_UPD_XLT_ADDR and MLX5_IB_UPD_XLT_ENABLE) is set. Thus, a mkey
1556 * can never be enabled without this capability. Simplify this weird
1557 * quirky hardware by just saying it can't use PAS lists with UMR at
1560 if (MLX5_CAP_GEN(dev->mdev, umr_modify_entity_size_disabled))
1564 * length is the size of the MR in bytes when mlx5_ib_update_xlt() is
1567 if (!MLX5_CAP_GEN(dev->mdev, umr_extended_translation_offset) &&
1568 length >= MLX5_MAX_UMR_PAGES * PAGE_SIZE)
1574 * true if an existing MR can be reconfigured to new access_flags using UMR.
1575 * Older HW cannot use UMR to update certain elements of the MKC. See
1576 * umr_check_mkey_mask(), get_umr_update_access_mask() and umr_check_mkey_mask()
1578 static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev,
1579 unsigned int current_access_flags,
1580 unsigned int target_access_flags)
1582 unsigned int diffs = current_access_flags ^ target_access_flags;
1584 if ((diffs & IB_ACCESS_REMOTE_ATOMIC) &&
1585 MLX5_CAP_GEN(dev->mdev, atomic) &&
1586 MLX5_CAP_GEN(dev->mdev, umr_modify_atomic_disabled))
1589 if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
1590 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write) &&
1591 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_write_umr))
1594 if ((diffs & IB_ACCESS_RELAXED_ORDERING) &&
1595 MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read) &&
1596 !MLX5_CAP_GEN(dev->mdev, relaxed_ordering_read_umr))
1602 static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev,
1603 struct mlx5_ib_mkey *mmkey)
1605 refcount_set(&mmkey->usecount, 1);
1607 return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mmkey->key),
1608 mmkey, GFP_KERNEL));
1611 /* deref an mkey that can participate in ODP flow */
1612 static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey)
1614 if (refcount_dec_and_test(&mmkey->usecount))
1615 wake_up(&mmkey->wait);
1618 /* deref an mkey that can participate in ODP flow and wait for relese */
1619 static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey)
1621 mlx5r_deref_odp_mkey(mmkey);
1622 wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0);
1625 int mlx5_ib_test_wc(struct mlx5_ib_dev *dev);
1627 static inline bool mlx5_ib_lag_should_assign_affinity(struct mlx5_ib_dev *dev)
1629 return dev->lag_active ||
1630 (MLX5_CAP_GEN(dev->mdev, num_lag_ports) > 1 &&
1631 MLX5_CAP_GEN(dev->mdev, lag_tx_port_affinity));
1634 static inline bool rt_supported(int ts_cap)
1636 return ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_REAL_TIME ||
1637 ts_cap == MLX5_TIMESTAMP_FORMAT_CAP_FREE_RUNNING_AND_REAL_TIME;
1639 #endif /* MLX5_IB_H */