2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
36 #include <linux/types.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/mlx5/mlx5_ifc.h>
39 #include <linux/bitfield.h>
41 #if defined(__LITTLE_ENDIAN)
42 #define MLX5_SET_HOST_ENDIANNESS 0
43 #elif defined(__BIG_ENDIAN)
44 #define MLX5_SET_HOST_ENDIANNESS 0x80
46 #error Host endianness not defined
50 #define __mlx5_nullp(typ) ((struct mlx5_ifc_##typ##_bits *)0)
51 #define __mlx5_bit_sz(typ, fld) sizeof(__mlx5_nullp(typ)->fld)
52 #define __mlx5_bit_off(typ, fld) (offsetof(struct mlx5_ifc_##typ##_bits, fld))
53 #define __mlx5_16_off(typ, fld) (__mlx5_bit_off(typ, fld) / 16)
54 #define __mlx5_dw_off(typ, fld) (__mlx5_bit_off(typ, fld) / 32)
55 #define __mlx5_64_off(typ, fld) (__mlx5_bit_off(typ, fld) / 64)
56 #define __mlx5_16_bit_off(typ, fld) (16 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0xf))
57 #define __mlx5_dw_bit_off(typ, fld) (32 - __mlx5_bit_sz(typ, fld) - (__mlx5_bit_off(typ, fld) & 0x1f))
58 #define __mlx5_mask(typ, fld) ((u32)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
59 #define __mlx5_dw_mask(typ, fld) (__mlx5_mask(typ, fld) << __mlx5_dw_bit_off(typ, fld))
60 #define __mlx5_mask16(typ, fld) ((u16)((1ull << __mlx5_bit_sz(typ, fld)) - 1))
61 #define __mlx5_16_mask(typ, fld) (__mlx5_mask16(typ, fld) << __mlx5_16_bit_off(typ, fld))
62 #define __mlx5_st_sz_bits(typ) sizeof(struct mlx5_ifc_##typ##_bits)
64 #define MLX5_FLD_SZ_BYTES(typ, fld) (__mlx5_bit_sz(typ, fld) / 8)
65 #define MLX5_ST_SZ_BYTES(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 8)
66 #define MLX5_ST_SZ_DW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 32)
67 #define MLX5_ST_SZ_QW(typ) (sizeof(struct mlx5_ifc_##typ##_bits) / 64)
68 #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8)
69 #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32)
70 #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8)
71 #define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld)))
73 /* insert a value to a struct */
74 #define MLX5_SET(typ, p, fld, v) do { \
76 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
77 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
78 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
79 (~__mlx5_dw_mask(typ, fld))) | (((_v) & __mlx5_mask(typ, fld)) \
80 << __mlx5_dw_bit_off(typ, fld))); \
83 #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \
84 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \
85 MLX5_SET(typ, p, fld[idx], v); \
88 #define MLX5_SET_TO_ONES(typ, p, fld) do { \
89 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \
90 *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \
91 cpu_to_be32((be32_to_cpu(*((__be32 *)(p) + __mlx5_dw_off(typ, fld))) & \
92 (~__mlx5_dw_mask(typ, fld))) | ((__mlx5_mask(typ, fld)) \
93 << __mlx5_dw_bit_off(typ, fld))); \
96 #define MLX5_GET(typ, p, fld) ((be32_to_cpu(*((__be32 *)(p) +\
97 __mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
98 __mlx5_mask(typ, fld))
100 #define MLX5_GET_PR(typ, p, fld) ({ \
101 u32 ___t = MLX5_GET(typ, p, fld); \
102 pr_debug(#fld " = 0x%x\n", ___t); \
106 #define __MLX5_SET64(typ, p, fld, v) do { \
107 BUILD_BUG_ON(__mlx5_bit_sz(typ, fld) != 64); \
108 *((__be64 *)(p) + __mlx5_64_off(typ, fld)) = cpu_to_be64(v); \
111 #define MLX5_SET64(typ, p, fld, v) do { \
112 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
113 __MLX5_SET64(typ, p, fld, v); \
116 #define MLX5_ARRAY_SET64(typ, p, fld, idx, v) do { \
117 BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 64); \
118 __MLX5_SET64(typ, p, fld[idx], v); \
121 #define MLX5_GET64(typ, p, fld) be64_to_cpu(*((__be64 *)(p) + __mlx5_64_off(typ, fld)))
123 #define MLX5_GET64_PR(typ, p, fld) ({ \
124 u64 ___t = MLX5_GET64(typ, p, fld); \
125 pr_debug(#fld " = 0x%llx\n", ___t); \
129 #define MLX5_GET16(typ, p, fld) ((be16_to_cpu(*((__be16 *)(p) +\
130 __mlx5_16_off(typ, fld))) >> __mlx5_16_bit_off(typ, fld)) & \
131 __mlx5_mask16(typ, fld))
133 #define MLX5_SET16(typ, p, fld, v) do { \
135 BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 16); \
136 *((__be16 *)(p) + __mlx5_16_off(typ, fld)) = \
137 cpu_to_be16((be16_to_cpu(*((__be16 *)(p) + __mlx5_16_off(typ, fld))) & \
138 (~__mlx5_16_mask(typ, fld))) | (((_v) & __mlx5_mask16(typ, fld)) \
139 << __mlx5_16_bit_off(typ, fld))); \
142 /* Big endian getters */
143 #define MLX5_GET64_BE(typ, p, fld) (*((__be64 *)(p) +\
144 __mlx5_64_off(typ, fld)))
146 #define MLX5_GET_BE(type_t, typ, p, fld) ({ \
148 switch (sizeof(tmp)) { \
150 tmp = (__force type_t)MLX5_GET(typ, p, fld); \
153 tmp = (__force type_t)cpu_to_be16(MLX5_GET(typ, p, fld)); \
156 tmp = (__force type_t)cpu_to_be32(MLX5_GET(typ, p, fld)); \
159 tmp = (__force type_t)MLX5_GET64_BE(typ, p, fld); \
165 enum mlx5_inline_modes {
166 MLX5_INLINE_MODE_NONE,
169 MLX5_INLINE_MODE_TCP_UDP,
173 MLX5_MAX_COMMANDS = 32,
174 MLX5_CMD_DATA_BLOCK_SIZE = 512,
175 MLX5_PCI_CMD_XPORT = 7,
176 MLX5_MKEY_BSF_OCTO_SIZE = 4,
181 MLX5_EXTENDED_UD_AV = 0x80000000,
185 MLX5_CQ_STATE_ARMED = 9,
186 MLX5_CQ_STATE_ALWAYS_ARMED = 0xb,
187 MLX5_CQ_STATE_FIRED = 0xa,
191 MLX5_STAT_RATE_OFFSET = 5,
195 MLX5_INLINE_SEG = 0x80000000,
199 MLX5_HW_START_PADDING = MLX5_INLINE_SEG,
203 MLX5_MIN_PKEY_TABLE_SIZE = 128,
204 MLX5_MAX_LOG_PKEY_TABLE = 5,
208 MLX5_MKEY_INBOX_PG_ACCESS = 1 << 31
212 MLX5_PFAULT_SUBTYPE_WQE = 0,
213 MLX5_PFAULT_SUBTYPE_RDMA = 1,
216 enum wqe_page_fault_type {
217 MLX5_WQE_PF_TYPE_RMP = 0,
218 MLX5_WQE_PF_TYPE_REQ_SEND_OR_WRITE = 1,
219 MLX5_WQE_PF_TYPE_RESP = 2,
220 MLX5_WQE_PF_TYPE_REQ_READ_OR_ATOMIC = 3,
224 MLX5_PERM_LOCAL_READ = 1 << 2,
225 MLX5_PERM_LOCAL_WRITE = 1 << 3,
226 MLX5_PERM_REMOTE_READ = 1 << 4,
227 MLX5_PERM_REMOTE_WRITE = 1 << 5,
228 MLX5_PERM_ATOMIC = 1 << 6,
229 MLX5_PERM_UMR_EN = 1 << 7,
233 MLX5_PCIE_CTRL_SMALL_FENCE = 1 << 0,
234 MLX5_PCIE_CTRL_RELAXED_ORDERING = 1 << 2,
235 MLX5_PCIE_CTRL_NO_SNOOP = 1 << 3,
236 MLX5_PCIE_CTRL_TLP_PROCE_EN = 1 << 6,
237 MLX5_PCIE_CTRL_TPH_MASK = 3 << 4,
246 MLX5_ADAPTER_PAGE_SHIFT = 12,
247 MLX5_ADAPTER_PAGE_SIZE = 1 << MLX5_ADAPTER_PAGE_SHIFT,
251 MLX5_BFREGS_PER_UAR = 4,
252 MLX5_MAX_UARS = 1 << 8,
253 MLX5_NON_FP_BFREGS_PER_UAR = 2,
254 MLX5_FP_BFREGS_PER_UAR = MLX5_BFREGS_PER_UAR -
255 MLX5_NON_FP_BFREGS_PER_UAR,
256 MLX5_MAX_BFREGS = MLX5_MAX_UARS *
257 MLX5_NON_FP_BFREGS_PER_UAR,
258 MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
259 MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE,
260 MLX5_MIN_DYN_BFREGS = 512,
261 MLX5_MAX_DYN_BFREGS = 1024,
265 MLX5_MKEY_MASK_LEN = 1ull << 0,
266 MLX5_MKEY_MASK_PAGE_SIZE = 1ull << 1,
267 MLX5_MKEY_MASK_START_ADDR = 1ull << 6,
268 MLX5_MKEY_MASK_PD = 1ull << 7,
269 MLX5_MKEY_MASK_EN_RINVAL = 1ull << 8,
270 MLX5_MKEY_MASK_EN_SIGERR = 1ull << 9,
271 MLX5_MKEY_MASK_BSF_EN = 1ull << 12,
272 MLX5_MKEY_MASK_KEY = 1ull << 13,
273 MLX5_MKEY_MASK_QPN = 1ull << 14,
274 MLX5_MKEY_MASK_LR = 1ull << 17,
275 MLX5_MKEY_MASK_LW = 1ull << 18,
276 MLX5_MKEY_MASK_RR = 1ull << 19,
277 MLX5_MKEY_MASK_RW = 1ull << 20,
278 MLX5_MKEY_MASK_A = 1ull << 21,
279 MLX5_MKEY_MASK_SMALL_FENCE = 1ull << 23,
280 MLX5_MKEY_MASK_RELAXED_ORDERING_WRITE = 1ull << 25,
281 MLX5_MKEY_MASK_FREE = 1ull << 29,
282 MLX5_MKEY_MASK_RELAXED_ORDERING_READ = 1ull << 47,
286 MLX5_UMR_TRANSLATION_OFFSET_EN = (1 << 4),
288 MLX5_UMR_CHECK_NOT_FREE = (1 << 5),
289 MLX5_UMR_CHECK_FREE = (2 << 5),
291 MLX5_UMR_INLINE = (1 << 7),
294 #define MLX5_UMR_FLEX_ALIGNMENT 0x40
295 #define MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_mtt))
296 #define MLX5_UMR_KLM_NUM_ENTRIES_ALIGNMENT (MLX5_UMR_FLEX_ALIGNMENT / sizeof(struct mlx5_klm))
298 #define MLX5_USER_INDEX_LEN (MLX5_FLD_SZ_BYTES(qpc, user_index) * 8)
301 MLX5_EVENT_QUEUE_TYPE_QP = 0,
302 MLX5_EVENT_QUEUE_TYPE_RQ = 1,
303 MLX5_EVENT_QUEUE_TYPE_SQ = 2,
304 MLX5_EVENT_QUEUE_TYPE_DCT = 6,
307 /* mlx5 components can subscribe to any one of these events via
308 * mlx5_eq_notifier_register API.
311 /* Special value to subscribe to any event */
312 MLX5_EVENT_TYPE_NOTIFY_ANY = 0x0,
313 /* HW events enum start: comp events are not subscribable */
314 MLX5_EVENT_TYPE_COMP = 0x0,
315 /* HW Async events enum start: subscribable events */
316 MLX5_EVENT_TYPE_PATH_MIG = 0x01,
317 MLX5_EVENT_TYPE_COMM_EST = 0x02,
318 MLX5_EVENT_TYPE_SQ_DRAINED = 0x03,
319 MLX5_EVENT_TYPE_SRQ_LAST_WQE = 0x13,
320 MLX5_EVENT_TYPE_SRQ_RQ_LIMIT = 0x14,
322 MLX5_EVENT_TYPE_CQ_ERROR = 0x04,
323 MLX5_EVENT_TYPE_WQ_CATAS_ERROR = 0x05,
324 MLX5_EVENT_TYPE_PATH_MIG_FAILED = 0x07,
325 MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10,
326 MLX5_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11,
327 MLX5_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12,
328 MLX5_EVENT_TYPE_OBJECT_CHANGE = 0x27,
330 MLX5_EVENT_TYPE_INTERNAL_ERROR = 0x08,
331 MLX5_EVENT_TYPE_PORT_CHANGE = 0x09,
332 MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
333 MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
334 MLX5_EVENT_TYPE_TEMP_WARN_EVENT = 0x17,
335 MLX5_EVENT_TYPE_XRQ_ERROR = 0x18,
336 MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
337 MLX5_EVENT_TYPE_GENERAL_EVENT = 0x22,
338 MLX5_EVENT_TYPE_MONITOR_COUNTER = 0x24,
339 MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
341 MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
342 MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
344 MLX5_EVENT_TYPE_CMD = 0x0a,
345 MLX5_EVENT_TYPE_PAGE_REQUEST = 0xb,
347 MLX5_EVENT_TYPE_PAGE_FAULT = 0xc,
348 MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd,
350 MLX5_EVENT_TYPE_ESW_FUNCTIONS_CHANGED = 0xe,
351 MLX5_EVENT_TYPE_VHCA_STATE_CHANGE = 0xf,
353 MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c,
354 MLX5_EVENT_TYPE_DCT_KEY_VIOLATION = 0x1d,
356 MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
357 MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
359 MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
361 MLX5_EVENT_TYPE_MAX = 0x100,
364 enum mlx5_driver_event {
365 MLX5_DRIVER_EVENT_TYPE_TRAP = 0,
366 MLX5_DRIVER_EVENT_UPLINK_NETDEV,
370 MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
371 MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
372 MLX5_TRACER_SUBTYPE_STRINGS_DB_UPDATE = 0x2,
376 MLX5_GENERAL_SUBTYPE_DELAY_DROP_TIMEOUT = 0x1,
377 MLX5_GENERAL_SUBTYPE_PCI_POWER_CHANGE_EVENT = 0x5,
378 MLX5_GENERAL_SUBTYPE_FW_LIVE_PATCH_EVENT = 0x7,
379 MLX5_GENERAL_SUBTYPE_PCI_SYNC_FOR_FW_UPDATE_EVENT = 0x8,
383 MLX5_PORT_CHANGE_SUBTYPE_DOWN = 1,
384 MLX5_PORT_CHANGE_SUBTYPE_ACTIVE = 4,
385 MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED = 5,
386 MLX5_PORT_CHANGE_SUBTYPE_LID = 6,
387 MLX5_PORT_CHANGE_SUBTYPE_PKEY = 7,
388 MLX5_PORT_CHANGE_SUBTYPE_GUID = 8,
389 MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG = 9,
393 MLX5_ROCE_VERSION_1 = 0,
394 MLX5_ROCE_VERSION_2 = 2,
398 MLX5_ROCE_VERSION_1_CAP = 1 << MLX5_ROCE_VERSION_1,
399 MLX5_ROCE_VERSION_2_CAP = 1 << MLX5_ROCE_VERSION_2,
403 MLX5_ROCE_L3_TYPE_IPV4 = 0,
404 MLX5_ROCE_L3_TYPE_IPV6 = 1,
408 MLX5_ROCE_L3_TYPE_IPV4_CAP = 1 << 1,
409 MLX5_ROCE_L3_TYPE_IPV6_CAP = 1 << 2,
413 MLX5_OPCODE_NOP = 0x00,
414 MLX5_OPCODE_SEND_INVAL = 0x01,
415 MLX5_OPCODE_RDMA_WRITE = 0x08,
416 MLX5_OPCODE_RDMA_WRITE_IMM = 0x09,
417 MLX5_OPCODE_SEND = 0x0a,
418 MLX5_OPCODE_SEND_IMM = 0x0b,
419 MLX5_OPCODE_LSO = 0x0e,
420 MLX5_OPCODE_RDMA_READ = 0x10,
421 MLX5_OPCODE_ATOMIC_CS = 0x11,
422 MLX5_OPCODE_ATOMIC_FA = 0x12,
423 MLX5_OPCODE_ATOMIC_MASKED_CS = 0x14,
424 MLX5_OPCODE_ATOMIC_MASKED_FA = 0x15,
425 MLX5_OPCODE_BIND_MW = 0x18,
426 MLX5_OPCODE_CONFIG_CMD = 0x1f,
427 MLX5_OPCODE_ENHANCED_MPSW = 0x29,
429 MLX5_RECV_OPCODE_RDMA_WRITE_IMM = 0x00,
430 MLX5_RECV_OPCODE_SEND = 0x01,
431 MLX5_RECV_OPCODE_SEND_IMM = 0x02,
432 MLX5_RECV_OPCODE_SEND_INVAL = 0x03,
434 MLX5_CQE_OPCODE_ERROR = 0x1e,
435 MLX5_CQE_OPCODE_RESIZE = 0x16,
437 MLX5_OPCODE_SET_PSV = 0x20,
438 MLX5_OPCODE_GET_PSV = 0x21,
439 MLX5_OPCODE_CHECK_PSV = 0x22,
440 MLX5_OPCODE_DUMP = 0x23,
441 MLX5_OPCODE_RGET_PSV = 0x26,
442 MLX5_OPCODE_RCHECK_PSV = 0x27,
444 MLX5_OPCODE_UMR = 0x25,
446 MLX5_OPCODE_FLOW_TBL_ACCESS = 0x2c,
448 MLX5_OPCODE_ACCESS_ASO = 0x2d,
452 MLX5_OPC_MOD_TLS_TIS_STATIC_PARAMS = 0x1,
453 MLX5_OPC_MOD_TLS_TIR_STATIC_PARAMS = 0x2,
457 MLX5_OPC_MOD_TLS_TIS_PROGRESS_PARAMS = 0x1,
458 MLX5_OPC_MOD_TLS_TIR_PROGRESS_PARAMS = 0x2,
461 struct mlx5_wqe_tls_static_params_seg {
462 u8 ctx[MLX5_ST_SZ_BYTES(tls_static_params)];
465 struct mlx5_wqe_tls_progress_params_seg {
467 u8 ctx[MLX5_ST_SZ_BYTES(tls_progress_params)];
471 MLX5_SET_PORT_RESET_QKEY = 0,
472 MLX5_SET_PORT_GUID0 = 16,
473 MLX5_SET_PORT_NODE_GUID = 17,
474 MLX5_SET_PORT_SYS_GUID = 18,
475 MLX5_SET_PORT_GID_TABLE = 19,
476 MLX5_SET_PORT_PKEY_TABLE = 20,
480 MLX5_BW_NO_LIMIT = 0,
481 MLX5_100_MBPS_UNIT = 3,
486 MLX5_MAX_PAGE_SHIFT = 31
491 * Max wqe size for rdma read is 512 bytes, so this
492 * limits our max_sge_rd as the wqe needs to fit:
493 * - ctrl segment (16 bytes)
494 * - rdma segment (16 bytes)
495 * - scatter elements (16 bytes each)
497 MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
500 enum mlx5_odp_transport_cap_bits {
501 MLX5_ODP_SUPPORT_SEND = 1 << 31,
502 MLX5_ODP_SUPPORT_RECV = 1 << 30,
503 MLX5_ODP_SUPPORT_WRITE = 1 << 29,
504 MLX5_ODP_SUPPORT_READ = 1 << 28,
507 struct mlx5_odp_caps {
513 } per_transport_caps;
514 char reserved2[0xe4];
517 struct mlx5_cmd_layout {
532 enum mlx5_rfr_severity_bit_offsets {
533 MLX5_RFR_BIT_OFFSET = 0x7,
536 struct health_buffer {
537 __be32 assert_var[6];
539 __be32 assert_exit_ptr;
540 __be32 assert_callra;
552 enum mlx5_initializing_bit_offsets {
553 MLX5_FW_RESET_SUPPORTED_OFFSET = 30,
556 enum mlx5_cmd_addr_l_sz_offset {
557 MLX5_NIC_IFC_OFFSET = 8,
560 struct mlx5_init_seg {
562 __be32 cmdif_rev_fw_sub;
565 __be32 cmdq_addr_l_sz;
569 struct health_buffer health;
572 __be32 cmd_q_init_to;
573 __be32 internal_timer_h;
574 __be32 internal_timer_l;
576 __be32 health_counter;
582 __be32 ieee1588_clk_type;
586 struct mlx5_eqe_comp {
591 struct mlx5_eqe_qp_srq {
598 struct mlx5_eqe_cq_err {
604 struct mlx5_eqe_xrq_err {
610 struct mlx5_eqe_port_state {
615 struct mlx5_eqe_gpio {
620 struct mlx5_eqe_congestion {
626 struct mlx5_eqe_stall_vl {
631 struct mlx5_eqe_cmd {
636 struct mlx5_eqe_page_req {
643 struct mlx5_eqe_page_fault {
644 __be32 bytes_committed;
650 __be16 packet_length;
658 __be16 packet_length;
666 struct mlx5_eqe_vport_change {
672 struct mlx5_eqe_port_module {
681 struct mlx5_eqe_pps {
697 struct mlx5_eqe_dct {
702 struct mlx5_eqe_temp_warning {
703 __be64 sensor_warning_msb;
704 __be64 sensor_warning_lsb;
707 struct mlx5_eqe_obj_change {
713 #define SYNC_RST_STATE_MASK 0xf
715 enum sync_rst_state_type {
716 MLX5_SYNC_RST_STATE_RESET_REQUEST = 0x0,
717 MLX5_SYNC_RST_STATE_RESET_NOW = 0x1,
718 MLX5_SYNC_RST_STATE_RESET_ABORT = 0x2,
719 MLX5_SYNC_RST_STATE_RESET_UNLOAD = 0x3,
722 struct mlx5_eqe_sync_fw_update {
727 struct mlx5_eqe_vhca_state {
734 struct mlx5_eqe_cmd cmd;
735 struct mlx5_eqe_comp comp;
736 struct mlx5_eqe_qp_srq qp_srq;
737 struct mlx5_eqe_cq_err cq_err;
738 struct mlx5_eqe_port_state port;
739 struct mlx5_eqe_gpio gpio;
740 struct mlx5_eqe_congestion cong;
741 struct mlx5_eqe_stall_vl stall_vl;
742 struct mlx5_eqe_page_req req_pages;
743 struct mlx5_eqe_page_fault page_fault;
744 struct mlx5_eqe_vport_change vport_change;
745 struct mlx5_eqe_port_module port_module;
746 struct mlx5_eqe_pps pps;
747 struct mlx5_eqe_dct dct;
748 struct mlx5_eqe_temp_warning temp_warning;
749 struct mlx5_eqe_xrq_err xrq_err;
750 struct mlx5_eqe_sync_fw_update sync_fw_update;
751 struct mlx5_eqe_vhca_state vhca_state;
752 struct mlx5_eqe_obj_change obj_change;
767 struct mlx5_cmd_prot_block {
768 u8 data[MLX5_CMD_DATA_BLOCK_SIZE];
779 MLX5_CQE_SYND_FLUSHED_IN_ERROR = 5,
782 struct mlx5_err_cqe {
788 __be32 s_wqe_opcode_qpn;
795 u8 tls_outer_l3_tunneled;
800 u8 tcppsh_abort_dupack;
811 __be16 header_entry_index;
815 __be32 rss_hash_result;
825 __be32 srqn; /* [31:24]: lro_num_seg, [23:0]: srqn */
840 u8 validity_iteration_count;
845 struct mlx5_mini_cqe8 {
847 __be32 rx_hash_result;
863 MLX5_INLINE_DATA32_SEG,
864 MLX5_INLINE_DATA64_SEG,
869 MLX5_CQE_FORMAT_CSUM = 0x1,
870 MLX5_CQE_FORMAT_CSUM_STRIDX = 0x3,
874 MLX5_CQE_COMPRESS_LAYOUT_BASIC = 0,
875 MLX5_CQE_COMPRESS_LAYOUT_ENHANCED = 1,
878 #define MLX5_MINI_CQE_ARRAY_SIZE 8
880 static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
882 return (cqe->op_own >> 2) & 0x3;
885 static inline u8 get_cqe_opcode(struct mlx5_cqe64 *cqe)
887 return cqe->op_own >> 4;
890 static inline u8 get_cqe_enhanced_num_mini_cqes(struct mlx5_cqe64 *cqe)
892 /* num_of_mini_cqes is zero based */
893 return get_cqe_opcode(cqe) + 1;
896 static inline u8 get_cqe_lro_tcppsh(struct mlx5_cqe64 *cqe)
898 return (cqe->lro.tcppsh_abort_dupack >> 6) & 1;
901 static inline u8 get_cqe_l4_hdr_type(struct mlx5_cqe64 *cqe)
903 return (cqe->l4_l3_hdr_type >> 4) & 0x7;
906 static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
908 return cqe->tls_outer_l3_tunneled & 0x1;
911 static inline u8 get_cqe_tls_offload(struct mlx5_cqe64 *cqe)
913 return (cqe->tls_outer_l3_tunneled >> 3) & 0x3;
916 static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
918 return cqe->l4_l3_hdr_type & 0x1;
921 static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
925 hi = be32_to_cpu(cqe->timestamp_h);
926 lo = be32_to_cpu(cqe->timestamp_l);
928 return (u64)lo | ((u64)hi << 32);
931 static inline u16 get_cqe_flow_tag(struct mlx5_cqe64 *cqe)
933 return be32_to_cpu(cqe->sop_drop_qpn) & 0xFFF;
936 #define MLX5_MPWQE_LOG_NUM_STRIDES_EXT_BASE 3
937 #define MLX5_MPWQE_LOG_NUM_STRIDES_BASE 9
938 #define MLX5_MPWQE_LOG_NUM_STRIDES_MAX 16
939 #define MLX5_MPWQE_LOG_STRIDE_SZ_BASE 6
940 #define MLX5_MPWQE_LOG_STRIDE_SZ_MAX 13
942 struct mpwrq_cqe_bc {
943 __be16 filler_consumed_strides;
947 static inline u16 mpwrq_get_cqe_byte_cnt(struct mlx5_cqe64 *cqe)
949 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
951 return be16_to_cpu(bc->byte_cnt);
954 static inline u16 mpwrq_get_cqe_bc_consumed_strides(struct mpwrq_cqe_bc *bc)
956 return 0x7fff & be16_to_cpu(bc->filler_consumed_strides);
959 static inline u16 mpwrq_get_cqe_consumed_strides(struct mlx5_cqe64 *cqe)
961 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
963 return mpwrq_get_cqe_bc_consumed_strides(bc);
966 static inline bool mpwrq_is_filler_cqe(struct mlx5_cqe64 *cqe)
968 struct mpwrq_cqe_bc *bc = (struct mpwrq_cqe_bc *)&cqe->byte_cnt;
970 return 0x8000 & be16_to_cpu(bc->filler_consumed_strides);
973 static inline u16 mpwrq_get_cqe_stride_index(struct mlx5_cqe64 *cqe)
975 return be16_to_cpu(cqe->wqe_counter);
979 CQE_L4_HDR_TYPE_NONE = 0x0,
980 CQE_L4_HDR_TYPE_TCP_NO_ACK = 0x1,
981 CQE_L4_HDR_TYPE_UDP = 0x2,
982 CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA = 0x3,
983 CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA = 0x4,
987 CQE_RSS_HTYPE_IP = GENMASK(3, 2),
988 /* cqe->rss_hash_type[3:2] - IP destination selected for hash
989 * (00 = none, 01 = IPv4, 10 = IPv6, 11 = Reserved)
991 CQE_RSS_IP_NONE = 0x0,
994 CQE_RSS_RESERVED = 0x3,
996 CQE_RSS_HTYPE_L4 = GENMASK(7, 6),
997 /* cqe->rss_hash_type[7:6] - L4 destination selected for hash
998 * (00 = none, 01 = TCP. 10 = UDP, 11 = IPSEC.SPI
1000 CQE_RSS_L4_NONE = 0x0,
1001 CQE_RSS_L4_TCP = 0x1,
1002 CQE_RSS_L4_UDP = 0x2,
1003 CQE_RSS_L4_IPSEC = 0x3,
1007 MLX5_CQE_ROCE_L3_HEADER_TYPE_GRH = 0x0,
1008 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV6 = 0x1,
1009 MLX5_CQE_ROCE_L3_HEADER_TYPE_IPV4 = 0x2,
1019 CQE_TLS_OFFLOAD_NOT_DECRYPTED = 0x0,
1020 CQE_TLS_OFFLOAD_DECRYPTED = 0x1,
1021 CQE_TLS_OFFLOAD_RESYNC = 0x2,
1022 CQE_TLS_OFFLOAD_ERROR = 0x3,
1025 struct mlx5_sig_err_cqe {
1027 __be32 expected_trans_sig;
1028 __be32 actual_trans_sig;
1029 __be32 expected_reftag;
1030 __be32 actual_reftag;
1042 struct mlx5_wqe_srq_next_seg {
1044 __be16 next_wqe_index;
1049 union mlx5_ext_cqe {
1054 struct mlx5_cqe128 {
1055 union mlx5_ext_cqe inl_grh;
1056 struct mlx5_cqe64 cqe64;
1060 MLX5_MKEY_STATUS_FREE = 1 << 6,
1064 MLX5_MKEY_REMOTE_INVAL = 1 << 24,
1065 MLX5_MKEY_FLAG_SYNC_UMR = 1 << 29,
1066 MLX5_MKEY_BSF_EN = 1 << 30,
1069 struct mlx5_mkey_seg {
1070 /* This is a two bit field occupying bits 31-30.
1071 * bit 31 is always 0,
1072 * bit 30 is zero for regular MRs and 1 (e.g free) for UMRs that do not have translation
1083 __be32 bsfs_octo_size;
1085 __be32 xlt_oct_size;
1091 #define MLX5_ATTR_EXTENDED_PORT_INFO cpu_to_be16(0xff90)
1094 MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO = 1 << 0
1098 VPORT_STATE_DOWN = 0x0,
1099 VPORT_STATE_UP = 0x1,
1103 MLX5_VPORT_ADMIN_STATE_DOWN = 0x0,
1104 MLX5_VPORT_ADMIN_STATE_UP = 0x1,
1105 MLX5_VPORT_ADMIN_STATE_AUTO = 0x2,
1109 MLX5_VPORT_CVLAN_INSERT_WHEN_NO_CVLAN = 0x1,
1110 MLX5_VPORT_CVLAN_INSERT_ALWAYS = 0x3,
1114 MLX5_L3_PROT_TYPE_IPV4 = 0,
1115 MLX5_L3_PROT_TYPE_IPV6 = 1,
1119 MLX5_L4_PROT_TYPE_TCP = 0,
1120 MLX5_L4_PROT_TYPE_UDP = 1,
1124 MLX5_HASH_FIELD_SEL_SRC_IP = 1 << 0,
1125 MLX5_HASH_FIELD_SEL_DST_IP = 1 << 1,
1126 MLX5_HASH_FIELD_SEL_L4_SPORT = 1 << 2,
1127 MLX5_HASH_FIELD_SEL_L4_DPORT = 1 << 3,
1128 MLX5_HASH_FIELD_SEL_IPSEC_SPI = 1 << 4,
1132 MLX5_MATCH_OUTER_HEADERS = 1 << 0,
1133 MLX5_MATCH_MISC_PARAMETERS = 1 << 1,
1134 MLX5_MATCH_INNER_HEADERS = 1 << 2,
1135 MLX5_MATCH_MISC_PARAMETERS_2 = 1 << 3,
1136 MLX5_MATCH_MISC_PARAMETERS_3 = 1 << 4,
1137 MLX5_MATCH_MISC_PARAMETERS_4 = 1 << 5,
1138 MLX5_MATCH_MISC_PARAMETERS_5 = 1 << 6,
1142 MLX5_FLOW_TABLE_TYPE_NIC_RCV = 0,
1143 MLX5_FLOW_TABLE_TYPE_ESWITCH = 4,
1147 MLX5_FLOW_CONTEXT_DEST_TYPE_VPORT = 0,
1148 MLX5_FLOW_CONTEXT_DEST_TYPE_FLOW_TABLE = 1,
1149 MLX5_FLOW_CONTEXT_DEST_TYPE_TIR = 2,
1152 enum mlx5_list_type {
1153 MLX5_NVPRT_LIST_TYPE_UC = 0x0,
1154 MLX5_NVPRT_LIST_TYPE_MC = 0x1,
1155 MLX5_NVPRT_LIST_TYPE_VLAN = 0x2,
1159 MLX5_RQC_RQ_TYPE_MEMORY_RQ_INLINE = 0x0,
1160 MLX5_RQC_RQ_TYPE_MEMORY_RQ_RPM = 0x1,
1163 enum mlx5_wol_mode {
1164 MLX5_WOL_DISABLE = 0,
1165 MLX5_WOL_SECURED_MAGIC = 1 << 1,
1166 MLX5_WOL_MAGIC = 1 << 2,
1167 MLX5_WOL_ARP = 1 << 3,
1168 MLX5_WOL_BROADCAST = 1 << 4,
1169 MLX5_WOL_MULTICAST = 1 << 5,
1170 MLX5_WOL_UNICAST = 1 << 6,
1171 MLX5_WOL_PHY_ACTIVITY = 1 << 7,
1174 enum mlx5_mpls_supported_fields {
1175 MLX5_FIELD_SUPPORT_MPLS_LABEL = 1 << 0,
1176 MLX5_FIELD_SUPPORT_MPLS_EXP = 1 << 1,
1177 MLX5_FIELD_SUPPORT_MPLS_S_BOS = 1 << 2,
1178 MLX5_FIELD_SUPPORT_MPLS_TTL = 1 << 3
1181 enum mlx5_flex_parser_protos {
1182 MLX5_FLEX_PROTO_GENEVE = 1 << 3,
1183 MLX5_FLEX_PROTO_CW_MPLS_GRE = 1 << 4,
1184 MLX5_FLEX_PROTO_CW_MPLS_UDP = 1 << 5,
1185 MLX5_FLEX_PROTO_ICMP = 1 << 8,
1186 MLX5_FLEX_PROTO_ICMPV6 = 1 << 9,
1192 enum mlx5_cap_mode {
1193 HCA_CAP_OPMOD_GET_MAX = 0,
1194 HCA_CAP_OPMOD_GET_CUR = 1,
1197 /* Any new cap addition must update mlx5_hca_caps_alloc() to allocate
1198 * capability memory.
1200 enum mlx5_cap_type {
1201 MLX5_CAP_GENERAL = 0,
1202 MLX5_CAP_ETHERNET_OFFLOADS,
1206 MLX5_CAP_IPOIB_OFFLOADS,
1207 MLX5_CAP_IPOIB_ENHANCED_OFFLOADS,
1208 MLX5_CAP_FLOW_TABLE,
1209 MLX5_CAP_ESWITCH_FLOW_TABLE,
1212 MLX5_CAP_VECTOR_CALC,
1215 MLX5_CAP_RESERVED_14,
1217 MLX5_CAP_RESERVED_16,
1219 MLX5_CAP_VDPA_EMULATION = 0x13,
1220 MLX5_CAP_DEV_EVENT = 0x14,
1222 MLX5_CAP_CRYPTO = 0x1a,
1223 MLX5_CAP_DEV_SHAMPO = 0x1d,
1224 MLX5_CAP_MACSEC = 0x1f,
1225 MLX5_CAP_GENERAL_2 = 0x20,
1226 MLX5_CAP_PORT_SELECTION = 0x25,
1227 MLX5_CAP_ADV_VIRTUALIZATION = 0x26,
1228 /* NUM OF CAP Types */
1232 enum mlx5_pcam_reg_groups {
1233 MLX5_PCAM_REGS_5000_TO_507F = 0x0,
1236 enum mlx5_pcam_feature_groups {
1237 MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1240 enum mlx5_mcam_reg_groups {
1241 MLX5_MCAM_REGS_FIRST_128 = 0x0,
1242 MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
1243 MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
1244 MLX5_MCAM_REGS_NUM = 0x3,
1247 enum mlx5_mcam_feature_groups {
1248 MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1251 enum mlx5_qcam_reg_groups {
1252 MLX5_QCAM_REGS_FIRST_128 = 0x0,
1255 enum mlx5_qcam_feature_groups {
1256 MLX5_QCAM_FEATURE_ENHANCED_FEATURES = 0x0,
1259 /* GET Dev Caps macros */
1260 #define MLX5_CAP_GEN(mdev, cap) \
1261 MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
1263 #define MLX5_CAP_GEN_64(mdev, cap) \
1264 MLX5_GET64(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->cur, cap)
1266 #define MLX5_CAP_GEN_MAX(mdev, cap) \
1267 MLX5_GET(cmd_hca_cap, mdev->caps.hca[MLX5_CAP_GENERAL]->max, cap)
1269 #define MLX5_CAP_GEN_2(mdev, cap) \
1270 MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
1272 #define MLX5_CAP_GEN_2_64(mdev, cap) \
1273 MLX5_GET64(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->cur, cap)
1275 #define MLX5_CAP_GEN_2_MAX(mdev, cap) \
1276 MLX5_GET(cmd_hca_cap_2, mdev->caps.hca[MLX5_CAP_GENERAL_2]->max, cap)
1278 #define MLX5_CAP_ETH(mdev, cap) \
1279 MLX5_GET(per_protocol_networking_offload_caps,\
1280 mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->cur, cap)
1282 #define MLX5_CAP_ETH_MAX(mdev, cap) \
1283 MLX5_GET(per_protocol_networking_offload_caps,\
1284 mdev->caps.hca[MLX5_CAP_ETHERNET_OFFLOADS]->max, cap)
1286 #define MLX5_CAP_IPOIB_ENHANCED(mdev, cap) \
1287 MLX5_GET(per_protocol_networking_offload_caps,\
1288 mdev->caps.hca[MLX5_CAP_IPOIB_ENHANCED_OFFLOADS]->cur, cap)
1290 #define MLX5_CAP_ROCE(mdev, cap) \
1291 MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->cur, cap)
1293 #define MLX5_CAP_ROCE_MAX(mdev, cap) \
1294 MLX5_GET(roce_cap, mdev->caps.hca[MLX5_CAP_ROCE]->max, cap)
1296 #define MLX5_CAP_ATOMIC(mdev, cap) \
1297 MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->cur, cap)
1299 #define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
1300 MLX5_GET(atomic_caps, mdev->caps.hca[MLX5_CAP_ATOMIC]->max, cap)
1302 #define MLX5_CAP_FLOWTABLE(mdev, cap) \
1303 MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
1305 #define MLX5_CAP64_FLOWTABLE(mdev, cap) \
1306 MLX5_GET64(flow_table_nic_cap, (mdev)->caps.hca[MLX5_CAP_FLOW_TABLE]->cur, cap)
1308 #define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
1309 MLX5_GET(flow_table_nic_cap, mdev->caps.hca[MLX5_CAP_FLOW_TABLE]->max, cap)
1311 #define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
1312 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
1314 #define MLX5_CAP_FLOWTABLE_NIC_RX_MAX(mdev, cap) \
1315 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive.cap)
1317 #define MLX5_CAP_FLOWTABLE_NIC_TX(mdev, cap) \
1318 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit.cap)
1320 #define MLX5_CAP_FLOWTABLE_NIC_TX_MAX(mdev, cap) \
1321 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit.cap)
1323 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX(mdev, cap) \
1324 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_sniffer.cap)
1326 #define MLX5_CAP_FLOWTABLE_SNIFFER_RX_MAX(mdev, cap) \
1327 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_sniffer.cap)
1329 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX(mdev, cap) \
1330 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1332 #define MLX5_CAP_FLOWTABLE_SNIFFER_TX_MAX(mdev, cap) \
1333 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_sniffer.cap)
1335 #define MLX5_CAP_FLOWTABLE_RDMA_RX(mdev, cap) \
1336 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive_rdma.cap)
1338 #define MLX5_CAP_FLOWTABLE_RDMA_RX_MAX(mdev, cap) \
1339 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_receive_rdma.cap)
1341 #define MLX5_CAP_FLOWTABLE_RDMA_TX(mdev, cap) \
1342 MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_transmit_rdma.cap)
1344 #define MLX5_CAP_FLOWTABLE_RDMA_TX_MAX(mdev, cap) \
1345 MLX5_CAP_FLOWTABLE_MAX(mdev, flow_table_properties_nic_transmit_rdma.cap)
1347 #define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
1348 MLX5_GET(flow_table_eswitch_cap, \
1349 mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
1351 #define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
1352 MLX5_GET(flow_table_eswitch_cap, \
1353 mdev->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->max, cap)
1355 #define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
1356 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
1358 #define MLX5_CAP_ESW_FLOWTABLE_FDB_MAX(mdev, cap) \
1359 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_nic_esw_fdb.cap)
1361 #define MLX5_CAP_ESW_EGRESS_ACL(mdev, cap) \
1362 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_egress.cap)
1364 #define MLX5_CAP_ESW_EGRESS_ACL_MAX(mdev, cap) \
1365 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_egress.cap)
1367 #define MLX5_CAP_ESW_INGRESS_ACL(mdev, cap) \
1368 MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_esw_acl_ingress.cap)
1370 #define MLX5_CAP_ESW_INGRESS_ACL_MAX(mdev, cap) \
1371 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, flow_table_properties_esw_acl_ingress.cap)
1373 #define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2(mdev, cap) \
1374 MLX5_CAP_ESW_FLOWTABLE(mdev, ft_field_support_2_esw_fdb.cap)
1376 #define MLX5_CAP_ESW_FT_FIELD_SUPPORT_2_MAX(mdev, cap) \
1377 MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, ft_field_support_2_esw_fdb.cap)
1379 #define MLX5_CAP_ESW(mdev, cap) \
1380 MLX5_GET(e_switch_cap, \
1381 mdev->caps.hca[MLX5_CAP_ESWITCH]->cur, cap)
1383 #define MLX5_CAP64_ESW_FLOWTABLE(mdev, cap) \
1384 MLX5_GET64(flow_table_eswitch_cap, \
1385 (mdev)->caps.hca[MLX5_CAP_ESWITCH_FLOW_TABLE]->cur, cap)
1387 #define MLX5_CAP_ESW_MAX(mdev, cap) \
1388 MLX5_GET(e_switch_cap, \
1389 mdev->caps.hca[MLX5_CAP_ESWITCH]->max, cap)
1391 #define MLX5_CAP_PORT_SELECTION(mdev, cap) \
1392 MLX5_GET(port_selection_cap, \
1393 mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->cur, cap)
1395 #define MLX5_CAP_PORT_SELECTION_MAX(mdev, cap) \
1396 MLX5_GET(port_selection_cap, \
1397 mdev->caps.hca[MLX5_CAP_PORT_SELECTION]->max, cap)
1399 #define MLX5_CAP_ADV_VIRTUALIZATION(mdev, cap) \
1400 MLX5_GET(adv_virtualization_cap, \
1401 mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->cur, cap)
1403 #define MLX5_CAP_ADV_VIRTUALIZATION_MAX(mdev, cap) \
1404 MLX5_GET(adv_virtualization_cap, \
1405 mdev->caps.hca[MLX5_CAP_ADV_VIRTUALIZATION]->max, cap)
1407 #define MLX5_CAP_FLOWTABLE_PORT_SELECTION(mdev, cap) \
1408 MLX5_CAP_PORT_SELECTION(mdev, flow_table_properties_port_selection.cap)
1410 #define MLX5_CAP_FLOWTABLE_PORT_SELECTION_MAX(mdev, cap) \
1411 MLX5_CAP_PORT_SELECTION_MAX(mdev, flow_table_properties_port_selection.cap)
1413 #define MLX5_CAP_ODP(mdev, cap)\
1414 MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->cur, cap)
1416 #define MLX5_CAP_ODP_MAX(mdev, cap)\
1417 MLX5_GET(odp_cap, mdev->caps.hca[MLX5_CAP_ODP]->max, cap)
1419 #define MLX5_CAP_VECTOR_CALC(mdev, cap) \
1420 MLX5_GET(vector_calc_cap, \
1421 mdev->caps.hca[MLX5_CAP_VECTOR_CALC]->cur, cap)
1423 #define MLX5_CAP_QOS(mdev, cap)\
1424 MLX5_GET(qos_cap, mdev->caps.hca[MLX5_CAP_QOS]->cur, cap)
1426 #define MLX5_CAP_DEBUG(mdev, cap)\
1427 MLX5_GET(debug_cap, mdev->caps.hca[MLX5_CAP_DEBUG]->cur, cap)
1429 #define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
1430 MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
1432 #define MLX5_CAP_PCAM_REG(mdev, reg) \
1433 MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
1435 #define MLX5_CAP_MCAM_REG(mdev, reg) \
1436 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
1437 mng_access_reg_cap_mask.access_regs.reg)
1439 #define MLX5_CAP_MCAM_REG1(mdev, reg) \
1440 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
1441 mng_access_reg_cap_mask.access_regs1.reg)
1443 #define MLX5_CAP_MCAM_REG2(mdev, reg) \
1444 MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
1445 mng_access_reg_cap_mask.access_regs2.reg)
1447 #define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
1448 MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
1450 #define MLX5_CAP_QCAM_REG(mdev, fld) \
1451 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_access_reg_cap_mask.reg_cap.fld)
1453 #define MLX5_CAP_QCAM_FEATURE(mdev, fld) \
1454 MLX5_GET(qcam_reg, (mdev)->caps.qcam, qos_feature_cap_mask.feature_cap.fld)
1456 #define MLX5_CAP_FPGA(mdev, cap) \
1457 MLX5_GET(fpga_cap, (mdev)->caps.fpga, cap)
1459 #define MLX5_CAP64_FPGA(mdev, cap) \
1460 MLX5_GET64(fpga_cap, (mdev)->caps.fpga, cap)
1462 #define MLX5_CAP_DEV_MEM(mdev, cap)\
1463 MLX5_GET(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
1465 #define MLX5_CAP64_DEV_MEM(mdev, cap)\
1466 MLX5_GET64(device_mem_cap, mdev->caps.hca[MLX5_CAP_DEV_MEM]->cur, cap)
1468 #define MLX5_CAP_TLS(mdev, cap) \
1469 MLX5_GET(tls_cap, (mdev)->caps.hca[MLX5_CAP_TLS]->cur, cap)
1471 #define MLX5_CAP_DEV_EVENT(mdev, cap)\
1472 MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca[MLX5_CAP_DEV_EVENT]->cur, cap)
1474 #define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
1475 MLX5_GET(virtio_emulation_cap, \
1476 (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
1478 #define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
1479 MLX5_GET64(virtio_emulation_cap, \
1480 (mdev)->caps.hca[MLX5_CAP_VDPA_EMULATION]->cur, cap)
1482 #define MLX5_CAP_IPSEC(mdev, cap)\
1483 MLX5_GET(ipsec_cap, (mdev)->caps.hca[MLX5_CAP_IPSEC]->cur, cap)
1485 #define MLX5_CAP_CRYPTO(mdev, cap)\
1486 MLX5_GET(crypto_cap, (mdev)->caps.hca[MLX5_CAP_CRYPTO]->cur, cap)
1488 #define MLX5_CAP_DEV_SHAMPO(mdev, cap)\
1489 MLX5_GET(shampo_cap, mdev->caps.hca_cur[MLX5_CAP_DEV_SHAMPO], cap)
1491 #define MLX5_CAP_MACSEC(mdev, cap)\
1492 MLX5_GET(macsec_cap, (mdev)->caps.hca[MLX5_CAP_MACSEC]->cur, cap)
1495 MLX5_CMD_STAT_OK = 0x0,
1496 MLX5_CMD_STAT_INT_ERR = 0x1,
1497 MLX5_CMD_STAT_BAD_OP_ERR = 0x2,
1498 MLX5_CMD_STAT_BAD_PARAM_ERR = 0x3,
1499 MLX5_CMD_STAT_BAD_SYS_STATE_ERR = 0x4,
1500 MLX5_CMD_STAT_BAD_RES_ERR = 0x5,
1501 MLX5_CMD_STAT_RES_BUSY = 0x6,
1502 MLX5_CMD_STAT_LIM_ERR = 0x8,
1503 MLX5_CMD_STAT_BAD_RES_STATE_ERR = 0x9,
1504 MLX5_CMD_STAT_IX_ERR = 0xa,
1505 MLX5_CMD_STAT_NO_RES_ERR = 0xf,
1506 MLX5_CMD_STAT_BAD_INP_LEN_ERR = 0x50,
1507 MLX5_CMD_STAT_BAD_OUTP_LEN_ERR = 0x51,
1508 MLX5_CMD_STAT_BAD_QP_STATE_ERR = 0x10,
1509 MLX5_CMD_STAT_BAD_PKT_ERR = 0x30,
1510 MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR = 0x40,
1514 MLX5_IEEE_802_3_COUNTERS_GROUP = 0x0,
1515 MLX5_RFC_2863_COUNTERS_GROUP = 0x1,
1516 MLX5_RFC_2819_COUNTERS_GROUP = 0x2,
1517 MLX5_RFC_3635_COUNTERS_GROUP = 0x3,
1518 MLX5_ETHERNET_EXTENDED_COUNTERS_GROUP = 0x5,
1519 MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
1520 MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
1521 MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
1522 MLX5_PER_TRAFFIC_CLASS_CONGESTION_GROUP = 0x13,
1523 MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
1524 MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
1528 MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
1531 static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
1533 if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
1535 return MLX5_MIN_PKEY_TABLE_SIZE << pkey_sz;
1538 #define MLX5_RDMA_RX_NUM_COUNTERS_PRIOS 2
1539 #define MLX5_RDMA_TX_NUM_COUNTERS_PRIOS 1
1540 #define MLX5_BY_PASS_NUM_REGULAR_PRIOS 16
1541 #define MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS 16
1542 #define MLX5_BY_PASS_NUM_MULTICAST_PRIOS 1
1543 #define MLX5_BY_PASS_NUM_PRIOS (MLX5_BY_PASS_NUM_REGULAR_PRIOS +\
1544 MLX5_BY_PASS_NUM_DONT_TRAP_PRIOS +\
1545 MLX5_BY_PASS_NUM_MULTICAST_PRIOS)
1547 #endif /* MLX5_DEVICE_H */